1272503Smjg/*-
2272503Smjg * Copyright (c) 2014 Mateusz Guzik <mjg@FreeBSD.org>
3272503Smjg *
4272503Smjg * Redistribution and use in source and binary forms, with or without
5272503Smjg * modification, are permitted provided that the following conditions
6272503Smjg * are met:
7272503Smjg * 1. Redistributions of source code must retain the above copyright
8272503Smjg *    notice, this list of conditions and the following disclaimer.
9272503Smjg * 2. Redistributions in binary form must reproduce the above copyright
10272503Smjg *    notice, this list of conditions and the following disclaimer in the
11272503Smjg *    documentation and/or other materials provided with the distribution.
12272503Smjg *
13272503Smjg * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14272503Smjg * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15272503Smjg * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16272503Smjg * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17272503Smjg * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18272503Smjg * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19272503Smjg * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20272503Smjg * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21272503Smjg * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22272503Smjg * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23272503Smjg * SUCH DAMAGE.
24272503Smjg *
25272503Smjg * $FreeBSD$
26272503Smjg */
27272503Smjg
28272503Smjg#ifndef _SYS_SEQ_H_
29272503Smjg#define _SYS_SEQ_H_
30272503Smjg
31272503Smjg#ifdef _KERNEL
32273109Smjg#include <sys/systm.h>
33273109Smjg#endif
34273109Smjg#include <sys/types.h>
35272503Smjg
36272503Smjg/*
37273109Smjg * seq_t may be included in structs visible to userspace
38273109Smjg */
39273109Smjgtypedef uint32_t seq_t;
40273109Smjg
41273109Smjg#ifdef _KERNEL
42273109Smjg
43273109Smjg/*
44272503Smjg * Typical usage:
45272503Smjg *
46272503Smjg * writers:
47272503Smjg * 	lock_exclusive(&obj->lock);
48272503Smjg * 	seq_write_begin(&obj->seq);
49272503Smjg * 	.....
50272503Smjg * 	seq_write_end(&obj->seq);
51272503Smjg * 	unlock_exclusive(&obj->unlock);
52272503Smjg *
53272503Smjg * readers:
54272503Smjg * 	obj_t lobj;
55272503Smjg * 	seq_t seq;
56272503Smjg *
57272503Smjg * 	for (;;) {
58272503Smjg * 		seq = seq_read(&gobj->seq);
59272503Smjg * 		lobj = gobj;
60272503Smjg * 		if (seq_consistent(&gobj->seq, seq))
61272503Smjg * 			break;
62272503Smjg * 		cpu_spinwait();
63272503Smjg * 	}
64272503Smjg * 	foo(lobj);
65272503Smjg */
66272503Smjg
67272503Smjg/* A hack to get MPASS macro */
68272503Smjg#include <sys/lock.h>
69272503Smjg
70272503Smjg#include <machine/cpu.h>
71272503Smjg
72272503Smjg/*
73272503Smjg * This is a temporary hack until memory barriers are cleaned up.
74272503Smjg *
75272503Smjg * atomic_load_acq_int at least on amd64 provides a full memory barrier,
76272503Smjg * in a way which affects perforance.
77272503Smjg *
78272503Smjg * Hack below covers all architectures and avoids most of the penalty at least
79272503Smjg * on amd64.
80272503Smjg */
81272503Smjgstatic __inline int
82272503Smjgatomic_load_acq_rmb_int(volatile u_int *p)
83272503Smjg{
84272503Smjg	volatile u_int v;
85272503Smjg
86272503Smjg	v = *p;
87272503Smjg	atomic_load_acq_int(&v);
88272503Smjg	return (v);
89272503Smjg}
90272503Smjg
91272503Smjgstatic __inline bool
92272503Smjgseq_in_modify(seq_t seqp)
93272503Smjg{
94272503Smjg
95272503Smjg	return (seqp & 1);
96272503Smjg}
97272503Smjg
98272503Smjgstatic __inline void
99272503Smjgseq_write_begin(seq_t *seqp)
100272503Smjg{
101272503Smjg
102272503Smjg	MPASS(!seq_in_modify(*seqp));
103272503Smjg	atomic_add_acq_int(seqp, 1);
104272503Smjg}
105272503Smjg
106272503Smjgstatic __inline void
107272503Smjgseq_write_end(seq_t *seqp)
108272503Smjg{
109272503Smjg
110272503Smjg	atomic_add_rel_int(seqp, 1);
111272503Smjg	MPASS(!seq_in_modify(*seqp));
112272503Smjg}
113272503Smjg
114272503Smjgstatic __inline seq_t
115272503Smjgseq_read(seq_t *seqp)
116272503Smjg{
117272503Smjg	seq_t ret;
118272503Smjg
119272503Smjg	for (;;) {
120272503Smjg		ret = atomic_load_acq_rmb_int(seqp);
121272503Smjg		if (seq_in_modify(ret)) {
122272503Smjg			cpu_spinwait();
123272503Smjg			continue;
124272503Smjg		}
125272503Smjg		break;
126272503Smjg	}
127272503Smjg
128272503Smjg	return (ret);
129272503Smjg}
130272503Smjg
131272503Smjgstatic __inline seq_t
132272503Smjgseq_consistent(seq_t *seqp, seq_t oldseq)
133272503Smjg{
134272503Smjg
135272503Smjg	return (atomic_load_acq_rmb_int(seqp) == oldseq);
136272503Smjg}
137272503Smjg
138272503Smjgstatic __inline seq_t
139272503Smjgseq_consistent_nomb(seq_t *seqp, seq_t oldseq)
140272503Smjg{
141272503Smjg
142272503Smjg	return (*seqp == oldseq);
143272503Smjg}
144272503Smjg
145272503Smjg#endif	/* _KERNEL */
146272503Smjg#endif	/* _SYS_SEQ_H_ */
147