atomic.h revision 327195
1/*-
2 * Copyright (c) 1998 Doug Rabson
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD: stable/11/sys/i386/include/atomic.h 327195 2017-12-26 10:07:17Z kib $
27 */
28#ifndef _MACHINE_ATOMIC_H_
29#define	_MACHINE_ATOMIC_H_
30
31#ifndef _SYS_CDEFS_H_
32#error this file needs sys/cdefs.h as a prerequisite
33#endif
34
35#include <sys/atomic_common.h>
36
37#ifdef _KERNEL
38#include <machine/md_var.h>
39#include <machine/specialreg.h>
40#endif
41
42#ifndef __OFFSETOF_MONITORBUF
43/*
44 * __OFFSETOF_MONITORBUF == __pcpu_offset(pc_monitorbuf).
45 *
46 * The open-coded number is used instead of the symbolic expression to
47 * avoid a dependency on sys/pcpu.h in machine/atomic.h consumers.
48 * An assertion in i386/vm_machdep.c ensures that the value is correct.
49 */
50#define	__OFFSETOF_MONITORBUF	0x180
51
52static __inline void
53__mbk(void)
54{
55
56	__asm __volatile("lock; addl $0,%%fs:%0"
57	    : "+m" (*(u_int *)__OFFSETOF_MONITORBUF) : : "memory", "cc");
58}
59
60static __inline void
61__mbu(void)
62{
63
64	__asm __volatile("lock; addl $0,(%%esp)" : : : "memory", "cc");
65}
66#endif
67
68/*
69 * Various simple operations on memory, each of which is atomic in the
70 * presence of interrupts and multiple processors.
71 *
72 * atomic_set_char(P, V)	(*(u_char *)(P) |= (V))
73 * atomic_clear_char(P, V)	(*(u_char *)(P) &= ~(V))
74 * atomic_add_char(P, V)	(*(u_char *)(P) += (V))
75 * atomic_subtract_char(P, V)	(*(u_char *)(P) -= (V))
76 *
77 * atomic_set_short(P, V)	(*(u_short *)(P) |= (V))
78 * atomic_clear_short(P, V)	(*(u_short *)(P) &= ~(V))
79 * atomic_add_short(P, V)	(*(u_short *)(P) += (V))
80 * atomic_subtract_short(P, V)	(*(u_short *)(P) -= (V))
81 *
82 * atomic_set_int(P, V)		(*(u_int *)(P) |= (V))
83 * atomic_clear_int(P, V)	(*(u_int *)(P) &= ~(V))
84 * atomic_add_int(P, V)		(*(u_int *)(P) += (V))
85 * atomic_subtract_int(P, V)	(*(u_int *)(P) -= (V))
86 * atomic_swap_int(P, V)	(return (*(u_int *)(P)); *(u_int *)(P) = (V);)
87 * atomic_readandclear_int(P)	(return (*(u_int *)(P)); *(u_int *)(P) = 0;)
88 *
89 * atomic_set_long(P, V)	(*(u_long *)(P) |= (V))
90 * atomic_clear_long(P, V)	(*(u_long *)(P) &= ~(V))
91 * atomic_add_long(P, V)	(*(u_long *)(P) += (V))
92 * atomic_subtract_long(P, V)	(*(u_long *)(P) -= (V))
93 * atomic_swap_long(P, V)	(return (*(u_long *)(P)); *(u_long *)(P) = (V);)
94 * atomic_readandclear_long(P)	(return (*(u_long *)(P)); *(u_long *)(P) = 0;)
95 */
96
97/*
98 * The above functions are expanded inline in the statically-linked
99 * kernel.  Lock prefixes are generated if an SMP kernel is being
100 * built.
101 *
102 * Kernel modules call real functions which are built into the kernel.
103 * This allows kernel modules to be portable between UP and SMP systems.
104 */
105#if defined(KLD_MODULE) || !defined(__GNUCLIKE_ASM)
106#define	ATOMIC_ASM(NAME, TYPE, OP, CONS, V)			\
107void atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v);	\
108void atomic_##NAME##_barr_##TYPE(volatile u_##TYPE *p, u_##TYPE v)
109
110int	atomic_cmpset_int(volatile u_int *dst, u_int expect, u_int src);
111int	atomic_fcmpset_int(volatile u_int *dst, u_int *expect, u_int src);
112u_int	atomic_fetchadd_int(volatile u_int *p, u_int v);
113int	atomic_testandset_int(volatile u_int *p, u_int v);
114int	atomic_testandclear_int(volatile u_int *p, u_int v);
115void	atomic_thread_fence_acq(void);
116void	atomic_thread_fence_acq_rel(void);
117void	atomic_thread_fence_rel(void);
118void	atomic_thread_fence_seq_cst(void);
119
120#define	ATOMIC_LOAD(TYPE)					\
121u_##TYPE	atomic_load_acq_##TYPE(volatile u_##TYPE *p)
122#define	ATOMIC_STORE(TYPE)					\
123void		atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)
124
125int		atomic_cmpset_64(volatile uint64_t *, uint64_t, uint64_t);
126uint64_t	atomic_load_acq_64(volatile uint64_t *);
127void		atomic_store_rel_64(volatile uint64_t *, uint64_t);
128uint64_t	atomic_swap_64(volatile uint64_t *, uint64_t);
129uint64_t	atomic_fetchadd_64(volatile uint64_t *, uint64_t);
130
131#else /* !KLD_MODULE && __GNUCLIKE_ASM */
132
133/*
134 * For userland, always use lock prefixes so that the binaries will run
135 * on both SMP and !SMP systems.
136 */
137#if defined(SMP) || !defined(_KERNEL)
138#define	MPLOCKED	"lock ; "
139#else
140#define	MPLOCKED
141#endif
142
143/*
144 * The assembly is volatilized to avoid code chunk removal by the compiler.
145 * GCC aggressively reorders operations and memory clobbering is necessary
146 * in order to avoid that for memory barriers.
147 */
148#define	ATOMIC_ASM(NAME, TYPE, OP, CONS, V)		\
149static __inline void					\
150atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
151{							\
152	__asm __volatile(MPLOCKED OP			\
153	: "+m" (*p)					\
154	: CONS (V)					\
155	: "cc");					\
156}							\
157							\
158static __inline void					\
159atomic_##NAME##_barr_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
160{							\
161	__asm __volatile(MPLOCKED OP			\
162	: "+m" (*p)					\
163	: CONS (V)					\
164	: "memory", "cc");				\
165}							\
166struct __hack
167
168/*
169 * Atomic compare and set, used by the mutex functions
170 *
171 * if (*dst == expect) *dst = src (all 32 bit words)
172 *
173 * Returns 0 on failure, non-zero on success
174 */
175
176static __inline int
177atomic_cmpset_int(volatile u_int *dst, u_int expect, u_int src)
178{
179	u_char res;
180
181	__asm __volatile(
182	"	" MPLOCKED "		"
183	"	cmpxchgl %3,%1 ;	"
184	"       sete	%0 ;		"
185	"# atomic_cmpset_int"
186	: "=q" (res),			/* 0 */
187	  "+m" (*dst),			/* 1 */
188	  "+a" (expect)			/* 2 */
189	: "r" (src)			/* 3 */
190	: "memory", "cc");
191	return (res);
192}
193
194static __inline int
195atomic_fcmpset_int(volatile u_int *dst, u_int *expect, u_int src)
196{
197	u_char res;
198
199	__asm __volatile(
200	"	" MPLOCKED "		"
201	"	cmpxchgl %3,%1 ;	"
202	"       sete	%0 ;		"
203	"# atomic_cmpset_int"
204	: "=q" (res),			/* 0 */
205	  "+m" (*dst),			/* 1 */
206	  "+a" (*expect)		/* 2 */
207	: "r" (src)			/* 3 */
208	: "memory", "cc");
209	return (res);
210}
211
212/*
213 * Atomically add the value of v to the integer pointed to by p and return
214 * the previous value of *p.
215 */
216static __inline u_int
217atomic_fetchadd_int(volatile u_int *p, u_int v)
218{
219
220	__asm __volatile(
221	"	" MPLOCKED "		"
222	"	xaddl	%0,%1 ;		"
223	"# atomic_fetchadd_int"
224	: "+r" (v),			/* 0 */
225	  "+m" (*p)			/* 1 */
226	: : "cc");
227	return (v);
228}
229
230static __inline int
231atomic_testandset_int(volatile u_int *p, u_int v)
232{
233	u_char res;
234
235	__asm __volatile(
236	"	" MPLOCKED "		"
237	"	btsl	%2,%1 ;		"
238	"	setc	%0 ;		"
239	"# atomic_testandset_int"
240	: "=q" (res),			/* 0 */
241	  "+m" (*p)			/* 1 */
242	: "Ir" (v & 0x1f)		/* 2 */
243	: "cc");
244	return (res);
245}
246
247static __inline int
248atomic_testandclear_int(volatile u_int *p, u_int v)
249{
250	u_char res;
251
252	__asm __volatile(
253	"	" MPLOCKED "		"
254	"	btrl	%2,%1 ;		"
255	"	setc	%0 ;		"
256	"# atomic_testandclear_int"
257	: "=q" (res),			/* 0 */
258	  "+m" (*p)			/* 1 */
259	: "Ir" (v & 0x1f)		/* 2 */
260	: "cc");
261	return (res);
262}
263
264/*
265 * We assume that a = b will do atomic loads and stores.  Due to the
266 * IA32 memory model, a simple store guarantees release semantics.
267 *
268 * However, a load may pass a store if they are performed on distinct
269 * addresses, so we need Store/Load barrier for sequentially
270 * consistent fences in SMP kernels.  We use "lock addl $0,mem" for a
271 * Store/Load barrier, as recommended by the AMD Software Optimization
272 * Guide, and not mfence.  In the kernel, we use a private per-cpu
273 * cache line for "mem", to avoid introducing false data
274 * dependencies.  In user space, we use the word at the top of the
275 * stack.
276 *
277 * For UP kernels, however, the memory of the single processor is
278 * always consistent, so we only need to stop the compiler from
279 * reordering accesses in a way that violates the semantics of acquire
280 * and release.
281 */
282
283#if defined(_KERNEL)
284#if defined(SMP)
285#define	__storeload_barrier()	__mbk()
286#else /* _KERNEL && UP */
287#define	__storeload_barrier()	__compiler_membar()
288#endif /* SMP */
289#else /* !_KERNEL */
290#define	__storeload_barrier()	__mbu()
291#endif /* _KERNEL*/
292
293#define	ATOMIC_LOAD(TYPE)					\
294static __inline u_##TYPE					\
295atomic_load_acq_##TYPE(volatile u_##TYPE *p)			\
296{								\
297	u_##TYPE res;						\
298								\
299	res = *p;						\
300	__compiler_membar();					\
301	return (res);						\
302}								\
303struct __hack
304
305#define	ATOMIC_STORE(TYPE)					\
306static __inline void						\
307atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)	\
308{								\
309								\
310	__compiler_membar();					\
311	*p = v;							\
312}								\
313struct __hack
314
315static __inline void
316atomic_thread_fence_acq(void)
317{
318
319	__compiler_membar();
320}
321
322static __inline void
323atomic_thread_fence_rel(void)
324{
325
326	__compiler_membar();
327}
328
329static __inline void
330atomic_thread_fence_acq_rel(void)
331{
332
333	__compiler_membar();
334}
335
336static __inline void
337atomic_thread_fence_seq_cst(void)
338{
339
340	__storeload_barrier();
341}
342
343#ifdef _KERNEL
344
345#ifdef WANT_FUNCTIONS
346int		atomic_cmpset_64_i386(volatile uint64_t *, uint64_t, uint64_t);
347int		atomic_cmpset_64_i586(volatile uint64_t *, uint64_t, uint64_t);
348uint64_t	atomic_load_acq_64_i386(volatile uint64_t *);
349uint64_t	atomic_load_acq_64_i586(volatile uint64_t *);
350void		atomic_store_rel_64_i386(volatile uint64_t *, uint64_t);
351void		atomic_store_rel_64_i586(volatile uint64_t *, uint64_t);
352uint64_t	atomic_swap_64_i386(volatile uint64_t *, uint64_t);
353uint64_t	atomic_swap_64_i586(volatile uint64_t *, uint64_t);
354#endif
355
356/* I486 does not support SMP or CMPXCHG8B. */
357static __inline int
358atomic_cmpset_64_i386(volatile uint64_t *dst, uint64_t expect, uint64_t src)
359{
360	volatile uint32_t *p;
361	u_char res;
362
363	p = (volatile uint32_t *)dst;
364	__asm __volatile(
365	"	pushfl ;		"
366	"	cli ;			"
367	"	xorl	%1,%%eax ;	"
368	"	xorl	%2,%%edx ;	"
369	"	orl	%%edx,%%eax ;	"
370	"	jne	1f ;		"
371	"	movl	%4,%1 ;		"
372	"	movl	%5,%2 ;		"
373	"1:				"
374	"	sete	%3 ;		"
375	"	popfl"
376	: "+A" (expect),		/* 0 */
377	  "+m" (*p),			/* 1 */
378	  "+m" (*(p + 1)),		/* 2 */
379	  "=q" (res)			/* 3 */
380	: "r" ((uint32_t)src),		/* 4 */
381	  "r" ((uint32_t)(src >> 32))	/* 5 */
382	: "memory", "cc");
383	return (res);
384}
385
386static __inline uint64_t
387atomic_load_acq_64_i386(volatile uint64_t *p)
388{
389	volatile uint32_t *q;
390	uint64_t res;
391
392	q = (volatile uint32_t *)p;
393	__asm __volatile(
394	"	pushfl ;		"
395	"	cli ;			"
396	"	movl	%1,%%eax ;	"
397	"	movl	%2,%%edx ;	"
398	"	popfl"
399	: "=&A" (res)			/* 0 */
400	: "m" (*q),			/* 1 */
401	  "m" (*(q + 1))		/* 2 */
402	: "memory");
403	return (res);
404}
405
406static __inline void
407atomic_store_rel_64_i386(volatile uint64_t *p, uint64_t v)
408{
409	volatile uint32_t *q;
410
411	q = (volatile uint32_t *)p;
412	__asm __volatile(
413	"	pushfl ;		"
414	"	cli ;			"
415	"	movl	%%eax,%0 ;	"
416	"	movl	%%edx,%1 ;	"
417	"	popfl"
418	: "=m" (*q),			/* 0 */
419	  "=m" (*(q + 1))		/* 1 */
420	: "A" (v)			/* 2 */
421	: "memory");
422}
423
424static __inline uint64_t
425atomic_swap_64_i386(volatile uint64_t *p, uint64_t v)
426{
427	volatile uint32_t *q;
428	uint64_t res;
429
430	q = (volatile uint32_t *)p;
431	__asm __volatile(
432	"	pushfl ;		"
433	"	cli ;			"
434	"	movl	%1,%%eax ;	"
435	"	movl	%2,%%edx ;	"
436	"	movl	%4,%2 ;		"
437	"	movl	%3,%1 ;		"
438	"	popfl"
439	: "=&A" (res),			/* 0 */
440	  "+m" (*q),			/* 1 */
441	  "+m" (*(q + 1))		/* 2 */
442	: "r" ((uint32_t)v),		/* 3 */
443	  "r" ((uint32_t)(v >> 32)));	/* 4 */
444	return (res);
445}
446
447static __inline int
448atomic_cmpset_64_i586(volatile uint64_t *dst, uint64_t expect, uint64_t src)
449{
450	u_char res;
451
452	__asm __volatile(
453	"	" MPLOCKED "		"
454	"	cmpxchg8b %1 ;		"
455	"	sete	%0"
456	: "=q" (res),			/* 0 */
457	  "+m" (*dst),			/* 1 */
458	  "+A" (expect)			/* 2 */
459	: "b" ((uint32_t)src),		/* 3 */
460	  "c" ((uint32_t)(src >> 32))	/* 4 */
461	: "memory", "cc");
462	return (res);
463}
464
465static __inline uint64_t
466atomic_load_acq_64_i586(volatile uint64_t *p)
467{
468	uint64_t res;
469
470	__asm __volatile(
471	"	movl	%%ebx,%%eax ;	"
472	"	movl	%%ecx,%%edx ;	"
473	"	" MPLOCKED "		"
474	"	cmpxchg8b %1"
475	: "=&A" (res),			/* 0 */
476	  "+m" (*p)			/* 1 */
477	: : "memory", "cc");
478	return (res);
479}
480
481static __inline void
482atomic_store_rel_64_i586(volatile uint64_t *p, uint64_t v)
483{
484
485	__asm __volatile(
486	"	movl	%%eax,%%ebx ;	"
487	"	movl	%%edx,%%ecx ;	"
488	"1:				"
489	"	" MPLOCKED "		"
490	"	cmpxchg8b %0 ;		"
491	"	jne	1b"
492	: "+m" (*p),			/* 0 */
493	  "+A" (v)			/* 1 */
494	: : "ebx", "ecx", "memory", "cc");
495}
496
497static __inline uint64_t
498atomic_swap_64_i586(volatile uint64_t *p, uint64_t v)
499{
500
501	__asm __volatile(
502	"	movl	%%eax,%%ebx ;	"
503	"	movl	%%edx,%%ecx ;	"
504	"1:				"
505	"	" MPLOCKED "		"
506	"	cmpxchg8b %0 ;		"
507	"	jne	1b"
508	: "+m" (*p),			/* 0 */
509	  "+A" (v)			/* 1 */
510	: : "ebx", "ecx", "memory", "cc");
511	return (v);
512}
513
514static __inline int
515atomic_cmpset_64(volatile uint64_t *dst, uint64_t expect, uint64_t src)
516{
517
518	if ((cpu_feature & CPUID_CX8) == 0)
519		return (atomic_cmpset_64_i386(dst, expect, src));
520	else
521		return (atomic_cmpset_64_i586(dst, expect, src));
522}
523
524static __inline uint64_t
525atomic_load_acq_64(volatile uint64_t *p)
526{
527
528	if ((cpu_feature & CPUID_CX8) == 0)
529		return (atomic_load_acq_64_i386(p));
530	else
531		return (atomic_load_acq_64_i586(p));
532}
533
534static __inline void
535atomic_store_rel_64(volatile uint64_t *p, uint64_t v)
536{
537
538	if ((cpu_feature & CPUID_CX8) == 0)
539		atomic_store_rel_64_i386(p, v);
540	else
541		atomic_store_rel_64_i586(p, v);
542}
543
544static __inline uint64_t
545atomic_swap_64(volatile uint64_t *p, uint64_t v)
546{
547
548	if ((cpu_feature & CPUID_CX8) == 0)
549		return (atomic_swap_64_i386(p, v));
550	else
551		return (atomic_swap_64_i586(p, v));
552}
553
554static __inline uint64_t
555atomic_fetchadd_64(volatile uint64_t *p, uint64_t v)
556{
557
558	for (;;) {
559		uint64_t t = *p;
560		if (atomic_cmpset_64(p, t, t + v))
561			return (t);
562	}
563}
564
565#endif /* _KERNEL */
566
567#endif /* KLD_MODULE || !__GNUCLIKE_ASM */
568
569ATOMIC_ASM(set,	     char,  "orb %b1,%0",  "iq",  v);
570ATOMIC_ASM(clear,    char,  "andb %b1,%0", "iq", ~v);
571ATOMIC_ASM(add,	     char,  "addb %b1,%0", "iq",  v);
572ATOMIC_ASM(subtract, char,  "subb %b1,%0", "iq",  v);
573
574ATOMIC_ASM(set,	     short, "orw %w1,%0",  "ir",  v);
575ATOMIC_ASM(clear,    short, "andw %w1,%0", "ir", ~v);
576ATOMIC_ASM(add,	     short, "addw %w1,%0", "ir",  v);
577ATOMIC_ASM(subtract, short, "subw %w1,%0", "ir",  v);
578
579ATOMIC_ASM(set,	     int,   "orl %1,%0",   "ir",  v);
580ATOMIC_ASM(clear,    int,   "andl %1,%0",  "ir", ~v);
581ATOMIC_ASM(add,	     int,   "addl %1,%0",  "ir",  v);
582ATOMIC_ASM(subtract, int,   "subl %1,%0",  "ir",  v);
583
584ATOMIC_ASM(set,	     long,  "orl %1,%0",   "ir",  v);
585ATOMIC_ASM(clear,    long,  "andl %1,%0",  "ir", ~v);
586ATOMIC_ASM(add,	     long,  "addl %1,%0",  "ir",  v);
587ATOMIC_ASM(subtract, long,  "subl %1,%0",  "ir",  v);
588
589#define	ATOMIC_LOADSTORE(TYPE)				\
590	ATOMIC_LOAD(TYPE);				\
591	ATOMIC_STORE(TYPE)
592
593ATOMIC_LOADSTORE(char);
594ATOMIC_LOADSTORE(short);
595ATOMIC_LOADSTORE(int);
596ATOMIC_LOADSTORE(long);
597
598#undef ATOMIC_ASM
599#undef ATOMIC_LOAD
600#undef ATOMIC_STORE
601#undef ATOMIC_LOADSTORE
602
603#ifndef WANT_FUNCTIONS
604
605static __inline int
606atomic_cmpset_long(volatile u_long *dst, u_long expect, u_long src)
607{
608
609	return (atomic_cmpset_int((volatile u_int *)dst, (u_int)expect,
610	    (u_int)src));
611}
612
613static __inline u_long
614atomic_fetchadd_long(volatile u_long *p, u_long v)
615{
616
617	return (atomic_fetchadd_int((volatile u_int *)p, (u_int)v));
618}
619
620static __inline int
621atomic_testandset_long(volatile u_long *p, u_int v)
622{
623
624	return (atomic_testandset_int((volatile u_int *)p, v));
625}
626
627static __inline int
628atomic_testandclear_long(volatile u_long *p, u_int v)
629{
630
631	return (atomic_testandclear_int((volatile u_int *)p, v));
632}
633
634/* Read the current value and store a new value in the destination. */
635#ifdef __GNUCLIKE_ASM
636
637static __inline u_int
638atomic_swap_int(volatile u_int *p, u_int v)
639{
640
641	__asm __volatile(
642	"	xchgl	%1,%0 ;		"
643	"# atomic_swap_int"
644	: "+r" (v),			/* 0 */
645	  "+m" (*p));			/* 1 */
646	return (v);
647}
648
649static __inline u_long
650atomic_swap_long(volatile u_long *p, u_long v)
651{
652
653	return (atomic_swap_int((volatile u_int *)p, (u_int)v));
654}
655
656#else /* !__GNUCLIKE_ASM */
657
658u_int	atomic_swap_int(volatile u_int *p, u_int v);
659u_long	atomic_swap_long(volatile u_long *p, u_long v);
660
661#endif /* __GNUCLIKE_ASM */
662
663#define	atomic_set_acq_char		atomic_set_barr_char
664#define	atomic_set_rel_char		atomic_set_barr_char
665#define	atomic_clear_acq_char		atomic_clear_barr_char
666#define	atomic_clear_rel_char		atomic_clear_barr_char
667#define	atomic_add_acq_char		atomic_add_barr_char
668#define	atomic_add_rel_char		atomic_add_barr_char
669#define	atomic_subtract_acq_char	atomic_subtract_barr_char
670#define	atomic_subtract_rel_char	atomic_subtract_barr_char
671
672#define	atomic_set_acq_short		atomic_set_barr_short
673#define	atomic_set_rel_short		atomic_set_barr_short
674#define	atomic_clear_acq_short		atomic_clear_barr_short
675#define	atomic_clear_rel_short		atomic_clear_barr_short
676#define	atomic_add_acq_short		atomic_add_barr_short
677#define	atomic_add_rel_short		atomic_add_barr_short
678#define	atomic_subtract_acq_short	atomic_subtract_barr_short
679#define	atomic_subtract_rel_short	atomic_subtract_barr_short
680
681#define	atomic_set_acq_int		atomic_set_barr_int
682#define	atomic_set_rel_int		atomic_set_barr_int
683#define	atomic_clear_acq_int		atomic_clear_barr_int
684#define	atomic_clear_rel_int		atomic_clear_barr_int
685#define	atomic_add_acq_int		atomic_add_barr_int
686#define	atomic_add_rel_int		atomic_add_barr_int
687#define	atomic_subtract_acq_int		atomic_subtract_barr_int
688#define	atomic_subtract_rel_int		atomic_subtract_barr_int
689#define	atomic_cmpset_acq_int		atomic_cmpset_int
690#define	atomic_cmpset_rel_int		atomic_cmpset_int
691#define	atomic_fcmpset_acq_int		atomic_fcmpset_int
692#define	atomic_fcmpset_rel_int		atomic_fcmpset_int
693
694#define	atomic_set_acq_long		atomic_set_barr_long
695#define	atomic_set_rel_long		atomic_set_barr_long
696#define	atomic_clear_acq_long		atomic_clear_barr_long
697#define	atomic_clear_rel_long		atomic_clear_barr_long
698#define	atomic_add_acq_long		atomic_add_barr_long
699#define	atomic_add_rel_long		atomic_add_barr_long
700#define	atomic_subtract_acq_long	atomic_subtract_barr_long
701#define	atomic_subtract_rel_long	atomic_subtract_barr_long
702#define	atomic_cmpset_acq_long		atomic_cmpset_long
703#define	atomic_cmpset_rel_long		atomic_cmpset_long
704#define	atomic_fcmpset_acq_long		atomic_fcmpset_long
705#define	atomic_fcmpset_rel_long		atomic_fcmpset_long
706
707#define	atomic_readandclear_int(p)	atomic_swap_int(p, 0)
708#define	atomic_readandclear_long(p)	atomic_swap_long(p, 0)
709
710/* Operations on 8-bit bytes. */
711#define	atomic_set_8		atomic_set_char
712#define	atomic_set_acq_8	atomic_set_acq_char
713#define	atomic_set_rel_8	atomic_set_rel_char
714#define	atomic_clear_8		atomic_clear_char
715#define	atomic_clear_acq_8	atomic_clear_acq_char
716#define	atomic_clear_rel_8	atomic_clear_rel_char
717#define	atomic_add_8		atomic_add_char
718#define	atomic_add_acq_8	atomic_add_acq_char
719#define	atomic_add_rel_8	atomic_add_rel_char
720#define	atomic_subtract_8	atomic_subtract_char
721#define	atomic_subtract_acq_8	atomic_subtract_acq_char
722#define	atomic_subtract_rel_8	atomic_subtract_rel_char
723#define	atomic_load_acq_8	atomic_load_acq_char
724#define	atomic_store_rel_8	atomic_store_rel_char
725
726/* Operations on 16-bit words. */
727#define	atomic_set_16		atomic_set_short
728#define	atomic_set_acq_16	atomic_set_acq_short
729#define	atomic_set_rel_16	atomic_set_rel_short
730#define	atomic_clear_16		atomic_clear_short
731#define	atomic_clear_acq_16	atomic_clear_acq_short
732#define	atomic_clear_rel_16	atomic_clear_rel_short
733#define	atomic_add_16		atomic_add_short
734#define	atomic_add_acq_16	atomic_add_acq_short
735#define	atomic_add_rel_16	atomic_add_rel_short
736#define	atomic_subtract_16	atomic_subtract_short
737#define	atomic_subtract_acq_16	atomic_subtract_acq_short
738#define	atomic_subtract_rel_16	atomic_subtract_rel_short
739#define	atomic_load_acq_16	atomic_load_acq_short
740#define	atomic_store_rel_16	atomic_store_rel_short
741
742/* Operations on 32-bit double words. */
743#define	atomic_set_32		atomic_set_int
744#define	atomic_set_acq_32	atomic_set_acq_int
745#define	atomic_set_rel_32	atomic_set_rel_int
746#define	atomic_clear_32		atomic_clear_int
747#define	atomic_clear_acq_32	atomic_clear_acq_int
748#define	atomic_clear_rel_32	atomic_clear_rel_int
749#define	atomic_add_32		atomic_add_int
750#define	atomic_add_acq_32	atomic_add_acq_int
751#define	atomic_add_rel_32	atomic_add_rel_int
752#define	atomic_subtract_32	atomic_subtract_int
753#define	atomic_subtract_acq_32	atomic_subtract_acq_int
754#define	atomic_subtract_rel_32	atomic_subtract_rel_int
755#define	atomic_load_acq_32	atomic_load_acq_int
756#define	atomic_store_rel_32	atomic_store_rel_int
757#define	atomic_cmpset_32	atomic_cmpset_int
758#define	atomic_cmpset_acq_32	atomic_cmpset_acq_int
759#define	atomic_cmpset_rel_32	atomic_cmpset_rel_int
760#define	atomic_fcmpset_32	atomic_fcmpset_int
761#define	atomic_fcmpset_acq_32	atomic_fcmpset_acq_int
762#define	atomic_fcmpset_rel_32	atomic_fcmpset_rel_int
763#define	atomic_swap_32		atomic_swap_int
764#define	atomic_readandclear_32	atomic_readandclear_int
765#define	atomic_fetchadd_32	atomic_fetchadd_int
766#define	atomic_testandset_32	atomic_testandset_int
767#define	atomic_testandclear_32	atomic_testandclear_int
768
769/* Operations on pointers. */
770#define	atomic_set_ptr(p, v) \
771	atomic_set_int((volatile u_int *)(p), (u_int)(v))
772#define	atomic_set_acq_ptr(p, v) \
773	atomic_set_acq_int((volatile u_int *)(p), (u_int)(v))
774#define	atomic_set_rel_ptr(p, v) \
775	atomic_set_rel_int((volatile u_int *)(p), (u_int)(v))
776#define	atomic_clear_ptr(p, v) \
777	atomic_clear_int((volatile u_int *)(p), (u_int)(v))
778#define	atomic_clear_acq_ptr(p, v) \
779	atomic_clear_acq_int((volatile u_int *)(p), (u_int)(v))
780#define	atomic_clear_rel_ptr(p, v) \
781	atomic_clear_rel_int((volatile u_int *)(p), (u_int)(v))
782#define	atomic_add_ptr(p, v) \
783	atomic_add_int((volatile u_int *)(p), (u_int)(v))
784#define	atomic_add_acq_ptr(p, v) \
785	atomic_add_acq_int((volatile u_int *)(p), (u_int)(v))
786#define	atomic_add_rel_ptr(p, v) \
787	atomic_add_rel_int((volatile u_int *)(p), (u_int)(v))
788#define	atomic_subtract_ptr(p, v) \
789	atomic_subtract_int((volatile u_int *)(p), (u_int)(v))
790#define	atomic_subtract_acq_ptr(p, v) \
791	atomic_subtract_acq_int((volatile u_int *)(p), (u_int)(v))
792#define	atomic_subtract_rel_ptr(p, v) \
793	atomic_subtract_rel_int((volatile u_int *)(p), (u_int)(v))
794#define	atomic_load_acq_ptr(p) \
795	atomic_load_acq_int((volatile u_int *)(p))
796#define	atomic_store_rel_ptr(p, v) \
797	atomic_store_rel_int((volatile u_int *)(p), (v))
798#define	atomic_cmpset_ptr(dst, old, new) \
799	atomic_cmpset_int((volatile u_int *)(dst), (u_int)(old), (u_int)(new))
800#define	atomic_cmpset_acq_ptr(dst, old, new) \
801	atomic_cmpset_acq_int((volatile u_int *)(dst), (u_int)(old), \
802	    (u_int)(new))
803#define	atomic_cmpset_rel_ptr(dst, old, new) \
804	atomic_cmpset_rel_int((volatile u_int *)(dst), (u_int)(old), \
805	    (u_int)(new))
806#define	atomic_fcmpset_ptr(dst, old, new) \
807	atomic_fcmpset_int((volatile u_int *)(dst), (u_int *)(old), (u_int)(new))
808#define	atomic_fcmpset_acq_ptr(dst, old, new) \
809	atomic_fcmpset_acq_int((volatile u_int *)(dst), (u_int *)(old), \
810	    (u_int)(new))
811#define	atomic_fcmpset_rel_ptr(dst, old, new) \
812	atomic_fcmpset_rel_int((volatile u_int *)(dst), (u_int *)(old), \
813	    (u_int)(new))
814#define	atomic_swap_ptr(p, v) \
815	atomic_swap_int((volatile u_int *)(p), (u_int)(v))
816#define	atomic_readandclear_ptr(p) \
817	atomic_readandclear_int((volatile u_int *)(p))
818
819#endif /* !WANT_FUNCTIONS */
820
821#if defined(_KERNEL)
822#define	mb()	__mbk()
823#define	wmb()	__mbk()
824#define	rmb()	__mbk()
825#else
826#define	mb()	__mbu()
827#define	wmb()	__mbu()
828#define	rmb()	__mbu()
829#endif
830
831#endif /* !_MACHINE_ATOMIC_H_ */
832