1/*-
2 * Copyright (c) 2010 Isilon Systems, Inc.
3 * Copyright (c) 2010 iX Systems, Inc.
4 * Copyright (c) 2010 Panasas, Inc.
5 * Copyright (c) 2013-2018 Mellanox Technologies, Ltd.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice unmodified, this list of conditions, and the following
13 *    disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 *
29 * $FreeBSD$
30 */
31
32#ifndef _ASM_ATOMIC_H_
33#define	_ASM_ATOMIC_H_
34
35#include <linux/compiler.h>
36#include <sys/types.h>
37#include <machine/atomic.h>
38
39#define	ATOMIC_INIT(x)	{ .counter = (x) }
40
41typedef struct {
42	volatile int counter;
43} atomic_t;
44
45/*------------------------------------------------------------------------*
46 *	32-bit atomic operations
47 *------------------------------------------------------------------------*/
48
49#define	atomic_add(i, v)		atomic_add_return((i), (v))
50#define	atomic_sub(i, v)		atomic_sub_return((i), (v))
51#define	atomic_inc_return(v)		atomic_add_return(1, (v))
52#define	atomic_add_negative(i, v)	(atomic_add_return((i), (v)) < 0)
53#define	atomic_add_and_test(i, v)	(atomic_add_return((i), (v)) == 0)
54#define	atomic_sub_and_test(i, v)	(atomic_sub_return((i), (v)) == 0)
55#define	atomic_dec_and_test(v)		(atomic_sub_return(1, (v)) == 0)
56#define	atomic_inc_and_test(v)		(atomic_add_return(1, (v)) == 0)
57#define	atomic_dec_return(v)		atomic_sub_return(1, (v))
58#define	atomic_inc_not_zero(v)		atomic_add_unless((v), 1, 0)
59
60static inline int
61atomic_add_return(int i, atomic_t *v)
62{
63	return i + atomic_fetchadd_int(&v->counter, i);
64}
65
66static inline int
67atomic_sub_return(int i, atomic_t *v)
68{
69	return atomic_fetchadd_int(&v->counter, -i) - i;
70}
71
72static inline void
73atomic_set(atomic_t *v, int i)
74{
75	WRITE_ONCE(v->counter, i);
76}
77
78static inline void
79atomic_set_release(atomic_t *v, int i)
80{
81	atomic_store_rel_int(&v->counter, i);
82}
83
84static inline void
85atomic_set_mask(unsigned int mask, atomic_t *v)
86{
87	atomic_set_int(&v->counter, mask);
88}
89
90static inline int
91atomic_read(const atomic_t *v)
92{
93	return READ_ONCE(v->counter);
94}
95
96static inline int
97atomic_inc(atomic_t *v)
98{
99	return atomic_fetchadd_int(&v->counter, 1) + 1;
100}
101
102static inline int
103atomic_dec(atomic_t *v)
104{
105	return atomic_fetchadd_int(&v->counter, -1) - 1;
106}
107
108static inline int
109atomic_add_unless(atomic_t *v, int a, int u)
110{
111	int c = atomic_read(v);
112
113	for (;;) {
114		if (unlikely(c == u))
115			break;
116		if (likely(atomic_fcmpset_int(&v->counter, &c, c + a)))
117			break;
118	}
119	return (c != u);
120}
121
122static inline int
123atomic_fetch_add_unless(atomic_t *v, int a, int u)
124{
125	int c = atomic_read(v);
126
127	for (;;) {
128		if (unlikely(c == u))
129			break;
130		if (likely(atomic_fcmpset_int(&v->counter, &c, c + a)))
131			break;
132	}
133	return (c);
134}
135
136static inline void
137atomic_clear_mask(unsigned int mask, atomic_t *v)
138{
139	atomic_clear_int(&v->counter, mask);
140}
141
142static inline int
143atomic_xchg(atomic_t *v, int i)
144{
145#if !defined(__mips__)
146	return (atomic_swap_int(&v->counter, i));
147#else
148	int ret = atomic_read(v);
149
150	while (!atomic_fcmpset_int(&v->counter, &ret, i))
151		;
152	return (ret);
153#endif
154}
155
156static inline int
157atomic_cmpxchg(atomic_t *v, int old, int new)
158{
159	int ret = old;
160
161	for (;;) {
162		if (atomic_fcmpset_int(&v->counter, &ret, new))
163			break;
164		if (ret != old)
165			break;
166	}
167	return (ret);
168}
169
170#if defined(__amd64__) || defined(__arm64__) || defined(__i386__)
171#define	LINUXKPI_ATOMIC_8(...) __VA_ARGS__
172#define	LINUXKPI_ATOMIC_16(...) __VA_ARGS__
173#else
174#define	LINUXKPI_ATOMIC_8(...)
175#define	LINUXKPI_ATOMIC_16(...)
176#endif
177
178#if !(defined(i386) || (defined(__mips__) && !(defined(__mips_n32) ||	\
179    defined(__mips_n64))) || (defined(__powerpc__) &&			\
180    !defined(__powerpc64__)))
181#define	LINUXKPI_ATOMIC_64(...) __VA_ARGS__
182#else
183#define	LINUXKPI_ATOMIC_64(...)
184#endif
185
186#define	cmpxchg(ptr, old, new) ({					\
187	union {								\
188		__typeof(*(ptr)) val;					\
189		u8 u8[0];						\
190		u16 u16[0];						\
191		u32 u32[0];						\
192		u64 u64[0];						\
193	} __ret = { .val = (old) }, __new = { .val = (new) };		\
194									\
195	CTASSERT(							\
196	    LINUXKPI_ATOMIC_8(sizeof(__ret.val) == 1 ||)		\
197	    LINUXKPI_ATOMIC_16(sizeof(__ret.val) == 2 ||)		\
198	    LINUXKPI_ATOMIC_64(sizeof(__ret.val) == 8 ||)		\
199	    sizeof(__ret.val) == 4);					\
200									\
201	switch (sizeof(__ret.val)) {					\
202	LINUXKPI_ATOMIC_8(						\
203	case 1:								\
204		while (!atomic_fcmpset_8((volatile u8 *)(ptr),		\
205		    __ret.u8, __new.u8[0]) && __ret.val == (old))	\
206			;						\
207		break;							\
208	)								\
209	LINUXKPI_ATOMIC_16(						\
210	case 2:								\
211		while (!atomic_fcmpset_16((volatile u16 *)(ptr),	\
212		    __ret.u16, __new.u16[0]) && __ret.val == (old))	\
213			;						\
214		break;							\
215	)								\
216	case 4:								\
217		while (!atomic_fcmpset_32((volatile u32 *)(ptr),	\
218		    __ret.u32, __new.u32[0]) && __ret.val == (old))	\
219			;						\
220		break;							\
221	LINUXKPI_ATOMIC_64(						\
222	case 8:								\
223		while (!atomic_fcmpset_64((volatile u64 *)(ptr),	\
224		    __ret.u64, __new.u64[0]) && __ret.val == (old))	\
225			;						\
226		break;							\
227	)								\
228	}								\
229	__ret.val;							\
230})
231
232#define	cmpxchg_relaxed(...)	cmpxchg(__VA_ARGS__)
233
234#define	xchg(ptr, new) ({						\
235	union {								\
236		__typeof(*(ptr)) val;					\
237		u8 u8[0];						\
238		u16 u16[0];						\
239		u32 u32[0];						\
240		u64 u64[0];						\
241	} __ret, __new = { .val = (new) };				\
242									\
243	CTASSERT(							\
244	    LINUXKPI_ATOMIC_8(sizeof(__ret.val) == 1 ||)		\
245	    LINUXKPI_ATOMIC_16(sizeof(__ret.val) == 2 ||)		\
246	    LINUXKPI_ATOMIC_64(sizeof(__ret.val) == 8 ||)		\
247	    sizeof(__ret.val) == 4);					\
248									\
249	switch (sizeof(__ret.val)) {					\
250	LINUXKPI_ATOMIC_8(						\
251	case 1:								\
252		__ret.val = READ_ONCE(*ptr);				\
253		while (!atomic_fcmpset_8((volatile u8 *)(ptr),		\
254	            __ret.u8, __new.u8[0]))				\
255			;						\
256		break;							\
257	)								\
258	LINUXKPI_ATOMIC_16(						\
259	case 2:								\
260		__ret.val = READ_ONCE(*ptr);				\
261		while (!atomic_fcmpset_16((volatile u16 *)(ptr),	\
262	            __ret.u16, __new.u16[0]))				\
263			;						\
264		break;							\
265	)								\
266	case 4:								\
267		__ret.u32[0] = atomic_swap_32((volatile u32 *)(ptr),	\
268		    __new.u32[0]);					\
269		break;							\
270	LINUXKPI_ATOMIC_64(						\
271	case 8:								\
272		__ret.u64[0] = atomic_swap_64((volatile u64 *)(ptr),	\
273		    __new.u64[0]);					\
274		break;							\
275	)								\
276	}								\
277	__ret.val;							\
278})
279
280static inline int
281atomic_dec_if_positive(atomic_t *v)
282{
283	int retval;
284	int old;
285
286	old = atomic_read(v);
287	for (;;) {
288		retval = old - 1;
289		if (unlikely(retval < 0))
290			break;
291		if (likely(atomic_fcmpset_int(&v->counter, &old, retval)))
292			break;
293	}
294	return (retval);
295}
296
297#define	LINUX_ATOMIC_OP(op, c_op)				\
298static inline void atomic_##op(int i, atomic_t *v)		\
299{								\
300	int c, old;						\
301								\
302	c = v->counter;						\
303	while ((old = atomic_cmpxchg(v, c, c c_op i)) != c)	\
304		c = old;					\
305}
306
307#define	LINUX_ATOMIC_FETCH_OP(op, c_op)				\
308static inline int atomic_fetch_##op(int i, atomic_t *v)		\
309{								\
310	int c, old;						\
311								\
312	c = v->counter;						\
313	while ((old = atomic_cmpxchg(v, c, c c_op i)) != c)	\
314		c = old;					\
315								\
316	return (c);						\
317}
318
319LINUX_ATOMIC_OP(or, |)
320LINUX_ATOMIC_OP(and, &)
321LINUX_ATOMIC_OP(andnot, &~)
322LINUX_ATOMIC_OP(xor, ^)
323
324LINUX_ATOMIC_FETCH_OP(or, |)
325LINUX_ATOMIC_FETCH_OP(and, &)
326LINUX_ATOMIC_FETCH_OP(andnot, &~)
327LINUX_ATOMIC_FETCH_OP(xor, ^)
328
329#endif					/* _ASM_ATOMIC_H_ */
330