1/*-
2 * Copyright (c) 1998 Doug Rabson
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 *	from: src/sys/alpha/include/atomic.h,v 1.21.2.3 2005/10/06 18:12:05 jhb
27 * $FreeBSD$
28 */
29
30#ifndef _MACHINE_ATOMIC_H_
31#define	_MACHINE_ATOMIC_H_
32
33#ifndef _SYS_CDEFS_H_
34#error this file needs sys/cdefs.h as a prerequisite
35#endif
36
37/*
38 * Note: All the 64-bit atomic operations are only atomic when running
39 * in 64-bit mode.  It is assumed that code compiled for n32 and n64
40 * fits into this definition and no further safeties are needed.
41 *
42 * It is also assumed that the add, subtract and other arithmetic is
43 * done on numbers not pointers.  The special rules for n32 pointers
44 * do not have atomic operations defined for them, but generally shouldn't
45 * need atomic operations.
46 */
47#ifndef __MIPS_PLATFORM_SYNC_NOPS
48#define __MIPS_PLATFORM_SYNC_NOPS ""
49#endif
50
51static __inline  void
52mips_sync(void)
53{
54	__asm __volatile (".set noreorder\n"
55			"\tsync\n"
56			__MIPS_PLATFORM_SYNC_NOPS
57			".set reorder\n"
58			: : : "memory");
59}
60
61#define mb()	mips_sync()
62#define wmb()	mips_sync()
63#define rmb()	mips_sync()
64
65/*
66 * Various simple arithmetic on memory which is atomic in the presence
67 * of interrupts and SMP safe.
68 */
69
70void atomic_set_8(__volatile uint8_t *, uint8_t);
71void atomic_clear_8(__volatile uint8_t *, uint8_t);
72void atomic_add_8(__volatile uint8_t *, uint8_t);
73void atomic_subtract_8(__volatile uint8_t *, uint8_t);
74
75void atomic_set_16(__volatile uint16_t *, uint16_t);
76void atomic_clear_16(__volatile uint16_t *, uint16_t);
77void atomic_add_16(__volatile uint16_t *, uint16_t);
78void atomic_subtract_16(__volatile uint16_t *, uint16_t);
79
80static __inline void
81atomic_set_32(__volatile uint32_t *p, uint32_t v)
82{
83	uint32_t temp;
84
85	__asm __volatile (
86		"1:\tll	%0, %3\n\t"		/* load old value */
87		"or	%0, %2, %0\n\t"		/* calculate new value */
88		"sc	%0, %1\n\t"		/* attempt to store */
89		"beqz	%0, 1b\n\t"		/* spin if failed */
90		: "=&r" (temp), "=m" (*p)
91		: "r" (v), "m" (*p)
92		: "memory");
93
94}
95
96static __inline void
97atomic_clear_32(__volatile uint32_t *p, uint32_t v)
98{
99	uint32_t temp;
100	v = ~v;
101
102	__asm __volatile (
103		"1:\tll	%0, %3\n\t"		/* load old value */
104		"and	%0, %2, %0\n\t"		/* calculate new value */
105		"sc	%0, %1\n\t"		/* attempt to store */
106		"beqz	%0, 1b\n\t"		/* spin if failed */
107		: "=&r" (temp), "=m" (*p)
108		: "r" (v), "m" (*p)
109		: "memory");
110}
111
112static __inline void
113atomic_add_32(__volatile uint32_t *p, uint32_t v)
114{
115	uint32_t temp;
116
117	__asm __volatile (
118		"1:\tll	%0, %3\n\t"		/* load old value */
119		"addu	%0, %2, %0\n\t"		/* calculate new value */
120		"sc	%0, %1\n\t"		/* attempt to store */
121		"beqz	%0, 1b\n\t"		/* spin if failed */
122		: "=&r" (temp), "=m" (*p)
123		: "r" (v), "m" (*p)
124		: "memory");
125}
126
127static __inline void
128atomic_subtract_32(__volatile uint32_t *p, uint32_t v)
129{
130	uint32_t temp;
131
132	__asm __volatile (
133		"1:\tll	%0, %3\n\t"		/* load old value */
134		"subu	%0, %2\n\t"		/* calculate new value */
135		"sc	%0, %1\n\t"		/* attempt to store */
136		"beqz	%0, 1b\n\t"		/* spin if failed */
137		: "=&r" (temp), "=m" (*p)
138		: "r" (v), "m" (*p)
139		: "memory");
140}
141
142static __inline uint32_t
143atomic_readandclear_32(__volatile uint32_t *addr)
144{
145	uint32_t result,temp;
146
147	__asm __volatile (
148		"1:\tll	 %0,%3\n\t"	/* load current value, asserting lock */
149		"li	 %1,0\n\t"		/* value to store */
150		"sc	 %1,%2\n\t"	/* attempt to store */
151		"beqz	 %1, 1b\n\t"		/* if the store failed, spin */
152		: "=&r"(result), "=&r"(temp), "=m" (*addr)
153		: "m" (*addr)
154		: "memory");
155
156	return result;
157}
158
159static __inline uint32_t
160atomic_readandset_32(__volatile uint32_t *addr, uint32_t value)
161{
162	uint32_t result,temp;
163
164	__asm __volatile (
165		"1:\tll	 %0,%3\n\t"	/* load current value, asserting lock */
166		"or      %1,$0,%4\n\t"
167		"sc	 %1,%2\n\t"	/* attempt to store */
168		"beqz	 %1, 1b\n\t"		/* if the store failed, spin */
169		: "=&r"(result), "=&r"(temp), "=m" (*addr)
170		: "m" (*addr), "r" (value)
171		: "memory");
172
173	return result;
174}
175
176#if defined(__mips_n64) || defined(__mips_n32)
177static __inline void
178atomic_set_64(__volatile uint64_t *p, uint64_t v)
179{
180	uint64_t temp;
181
182	__asm __volatile (
183		"1:\n\t"
184		"lld	%0, %3\n\t"		/* load old value */
185		"or	%0, %2, %0\n\t"		/* calculate new value */
186		"scd	%0, %1\n\t"		/* attempt to store */
187		"beqz	%0, 1b\n\t"		/* spin if failed */
188		: "=&r" (temp), "=m" (*p)
189		: "r" (v), "m" (*p)
190		: "memory");
191
192}
193
194static __inline void
195atomic_clear_64(__volatile uint64_t *p, uint64_t v)
196{
197	uint64_t temp;
198	v = ~v;
199
200	__asm __volatile (
201		"1:\n\t"
202		"lld	%0, %3\n\t"		/* load old value */
203		"and	%0, %2, %0\n\t"		/* calculate new value */
204		"scd	%0, %1\n\t"		/* attempt to store */
205		"beqz	%0, 1b\n\t"		/* spin if failed */
206		: "=&r" (temp), "=m" (*p)
207		: "r" (v), "m" (*p)
208		: "memory");
209}
210
211static __inline void
212atomic_add_64(__volatile uint64_t *p, uint64_t v)
213{
214	uint64_t temp;
215
216	__asm __volatile (
217		"1:\n\t"
218		"lld	%0, %3\n\t"		/* load old value */
219		"daddu	%0, %2, %0\n\t"		/* calculate new value */
220		"scd	%0, %1\n\t"		/* attempt to store */
221		"beqz	%0, 1b\n\t"		/* spin if failed */
222		: "=&r" (temp), "=m" (*p)
223		: "r" (v), "m" (*p)
224		: "memory");
225}
226
227static __inline void
228atomic_subtract_64(__volatile uint64_t *p, uint64_t v)
229{
230	uint64_t temp;
231
232	__asm __volatile (
233		"1:\n\t"
234		"lld	%0, %3\n\t"		/* load old value */
235		"dsubu	%0, %2\n\t"		/* calculate new value */
236		"scd	%0, %1\n\t"		/* attempt to store */
237		"beqz	%0, 1b\n\t"		/* spin if failed */
238		: "=&r" (temp), "=m" (*p)
239		: "r" (v), "m" (*p)
240		: "memory");
241}
242
243static __inline uint64_t
244atomic_readandclear_64(__volatile uint64_t *addr)
245{
246	uint64_t result,temp;
247
248	__asm __volatile (
249		"1:\n\t"
250		"lld	 %0, %3\n\t"		/* load old value */
251		"li	 %1, 0\n\t"		/* value to store */
252		"scd	 %1, %2\n\t"		/* attempt to store */
253		"beqz	 %1, 1b\n\t"		/* if the store failed, spin */
254		: "=&r"(result), "=&r"(temp), "=m" (*addr)
255		: "m" (*addr)
256		: "memory");
257
258	return result;
259}
260
261static __inline uint64_t
262atomic_readandset_64(__volatile uint64_t *addr, uint64_t value)
263{
264	uint64_t result,temp;
265
266	__asm __volatile (
267		"1:\n\t"
268		"lld	 %0,%3\n\t"		/* Load old value*/
269		"or      %1,$0,%4\n\t"
270		"scd	 %1,%2\n\t"		/* attempt to store */
271		"beqz	 %1, 1b\n\t"		/* if the store failed, spin */
272		: "=&r"(result), "=&r"(temp), "=m" (*addr)
273		: "m" (*addr), "r" (value)
274		: "memory");
275
276	return result;
277}
278#endif
279
280#define	ATOMIC_ACQ_REL(NAME, WIDTH)					\
281static __inline  void							\
282atomic_##NAME##_acq_##WIDTH(__volatile uint##WIDTH##_t *p, uint##WIDTH##_t v)\
283{									\
284	atomic_##NAME##_##WIDTH(p, v);					\
285	mips_sync(); 							\
286}									\
287									\
288static __inline  void							\
289atomic_##NAME##_rel_##WIDTH(__volatile uint##WIDTH##_t *p, uint##WIDTH##_t v)\
290{									\
291	mips_sync();							\
292	atomic_##NAME##_##WIDTH(p, v);					\
293}
294
295/* Variants of simple arithmetic with memory barriers. */
296ATOMIC_ACQ_REL(set, 8)
297ATOMIC_ACQ_REL(clear, 8)
298ATOMIC_ACQ_REL(add, 8)
299ATOMIC_ACQ_REL(subtract, 8)
300ATOMIC_ACQ_REL(set, 16)
301ATOMIC_ACQ_REL(clear, 16)
302ATOMIC_ACQ_REL(add, 16)
303ATOMIC_ACQ_REL(subtract, 16)
304ATOMIC_ACQ_REL(set, 32)
305ATOMIC_ACQ_REL(clear, 32)
306ATOMIC_ACQ_REL(add, 32)
307ATOMIC_ACQ_REL(subtract, 32)
308#if defined(__mips_n64) || defined(__mips_n32)
309ATOMIC_ACQ_REL(set, 64)
310ATOMIC_ACQ_REL(clear, 64)
311ATOMIC_ACQ_REL(add, 64)
312ATOMIC_ACQ_REL(subtract, 64)
313#endif
314
315#undef ATOMIC_ACQ_REL
316
317/*
318 * We assume that a = b will do atomic loads and stores.
319 */
320#define	ATOMIC_STORE_LOAD(WIDTH)			\
321static __inline  uint##WIDTH##_t			\
322atomic_load_acq_##WIDTH(__volatile uint##WIDTH##_t *p)	\
323{							\
324	uint##WIDTH##_t v;				\
325							\
326	v = *p;						\
327	mips_sync();					\
328	return (v);					\
329}							\
330							\
331static __inline  void					\
332atomic_store_rel_##WIDTH(__volatile uint##WIDTH##_t *p, uint##WIDTH##_t v)\
333{							\
334	mips_sync();					\
335	*p = v;						\
336}
337
338ATOMIC_STORE_LOAD(32)
339ATOMIC_STORE_LOAD(64)
340#if !defined(__mips_n64) && !defined(__mips_n32)
341void atomic_store_64(__volatile uint64_t *, uint64_t *);
342void atomic_load_64(__volatile uint64_t *, uint64_t *);
343#else
344static __inline void
345atomic_store_64(__volatile uint64_t *p, uint64_t *v)
346{
347	*p = *v;
348}
349
350static __inline void
351atomic_load_64(__volatile uint64_t *p, uint64_t *v)
352{
353	*v = *p;
354}
355#endif
356
357#undef ATOMIC_STORE_LOAD
358
359/*
360 * Atomically compare the value stored at *p with cmpval and if the
361 * two values are equal, update the value of *p with newval. Returns
362 * zero if the compare failed, nonzero otherwise.
363 */
364static __inline uint32_t
365atomic_cmpset_32(__volatile uint32_t* p, uint32_t cmpval, uint32_t newval)
366{
367	uint32_t ret;
368
369	__asm __volatile (
370		"1:\tll	%0, %4\n\t"		/* load old value */
371		"bne %0, %2, 2f\n\t"		/* compare */
372		"move %0, %3\n\t"		/* value to store */
373		"sc %0, %1\n\t"			/* attempt to store */
374		"beqz %0, 1b\n\t"		/* if it failed, spin */
375		"j 3f\n\t"
376		"2:\n\t"
377		"li	%0, 0\n\t"
378		"3:\n"
379		: "=&r" (ret), "=m" (*p)
380		: "r" (cmpval), "r" (newval), "m" (*p)
381		: "memory");
382
383	return ret;
384}
385
386/*
387 * Atomically compare the value stored at *p with cmpval and if the
388 * two values are equal, update the value of *p with newval. Returns
389 * zero if the compare failed, nonzero otherwise.
390 */
391static __inline uint32_t
392atomic_cmpset_acq_32(__volatile uint32_t *p, uint32_t cmpval, uint32_t newval)
393{
394	int retval;
395
396	retval = atomic_cmpset_32(p, cmpval, newval);
397	mips_sync();
398	return (retval);
399}
400
401static __inline uint32_t
402atomic_cmpset_rel_32(__volatile uint32_t *p, uint32_t cmpval, uint32_t newval)
403{
404	mips_sync();
405	return (atomic_cmpset_32(p, cmpval, newval));
406}
407
408/*
409 * Atomically add the value of v to the integer pointed to by p and return
410 * the previous value of *p.
411 */
412static __inline uint32_t
413atomic_fetchadd_32(__volatile uint32_t *p, uint32_t v)
414{
415	uint32_t value, temp;
416
417	__asm __volatile (
418		"1:\tll %0, %1\n\t"		/* load old value */
419		"addu %2, %3, %0\n\t"		/* calculate new value */
420		"sc %2, %1\n\t"			/* attempt to store */
421		"beqz %2, 1b\n\t"		/* spin if failed */
422		: "=&r" (value), "=m" (*p), "=&r" (temp)
423		: "r" (v), "m" (*p));
424	return (value);
425}
426
427#if defined(__mips_n64) || defined(__mips_n32)
428/*
429 * Atomically compare the value stored at *p with cmpval and if the
430 * two values are equal, update the value of *p with newval. Returns
431 * zero if the compare failed, nonzero otherwise.
432 */
433static __inline uint64_t
434atomic_cmpset_64(__volatile uint64_t* p, uint64_t cmpval, uint64_t newval)
435{
436	uint64_t ret;
437
438	__asm __volatile (
439		"1:\n\t"
440		"lld	%0, %4\n\t"		/* load old value */
441		"bne	%0, %2, 2f\n\t"		/* compare */
442		"move	%0, %3\n\t"		/* value to store */
443		"scd	%0, %1\n\t"		/* attempt to store */
444		"beqz	%0, 1b\n\t"		/* if it failed, spin */
445		"j	3f\n\t"
446		"2:\n\t"
447		"li	%0, 0\n\t"
448		"3:\n"
449		: "=&r" (ret), "=m" (*p)
450		: "r" (cmpval), "r" (newval), "m" (*p)
451		: "memory");
452
453	return ret;
454}
455
456/*
457 * Atomically compare the value stored at *p with cmpval and if the
458 * two values are equal, update the value of *p with newval. Returns
459 * zero if the compare failed, nonzero otherwise.
460 */
461static __inline uint64_t
462atomic_cmpset_acq_64(__volatile uint64_t *p, uint64_t cmpval, uint64_t newval)
463{
464	int retval;
465
466	retval = atomic_cmpset_64(p, cmpval, newval);
467	mips_sync();
468	return (retval);
469}
470
471static __inline uint64_t
472atomic_cmpset_rel_64(__volatile uint64_t *p, uint64_t cmpval, uint64_t newval)
473{
474	mips_sync();
475	return (atomic_cmpset_64(p, cmpval, newval));
476}
477
478/*
479 * Atomically add the value of v to the integer pointed to by p and return
480 * the previous value of *p.
481 */
482static __inline uint64_t
483atomic_fetchadd_64(__volatile uint64_t *p, uint64_t v)
484{
485	uint64_t value, temp;
486
487	__asm __volatile (
488		"1:\n\t"
489		"lld	%0, %1\n\t"		/* load old value */
490		"daddu	%2, %3, %0\n\t"		/* calculate new value */
491		"scd	%2, %1\n\t"		/* attempt to store */
492		"beqz	%2, 1b\n\t"		/* spin if failed */
493		: "=&r" (value), "=m" (*p), "=&r" (temp)
494		: "r" (v), "m" (*p));
495	return (value);
496}
497#endif
498
499/* Operations on chars. */
500#define	atomic_set_char		atomic_set_8
501#define	atomic_set_acq_char	atomic_set_acq_8
502#define	atomic_set_rel_char	atomic_set_rel_8
503#define	atomic_clear_char	atomic_clear_8
504#define	atomic_clear_acq_char	atomic_clear_acq_8
505#define	atomic_clear_rel_char	atomic_clear_rel_8
506#define	atomic_add_char		atomic_add_8
507#define	atomic_add_acq_char	atomic_add_acq_8
508#define	atomic_add_rel_char	atomic_add_rel_8
509#define	atomic_subtract_char	atomic_subtract_8
510#define	atomic_subtract_acq_char	atomic_subtract_acq_8
511#define	atomic_subtract_rel_char	atomic_subtract_rel_8
512
513/* Operations on shorts. */
514#define	atomic_set_short	atomic_set_16
515#define	atomic_set_acq_short	atomic_set_acq_16
516#define	atomic_set_rel_short	atomic_set_rel_16
517#define	atomic_clear_short	atomic_clear_16
518#define	atomic_clear_acq_short	atomic_clear_acq_16
519#define	atomic_clear_rel_short	atomic_clear_rel_16
520#define	atomic_add_short	atomic_add_16
521#define	atomic_add_acq_short	atomic_add_acq_16
522#define	atomic_add_rel_short	atomic_add_rel_16
523#define	atomic_subtract_short	atomic_subtract_16
524#define	atomic_subtract_acq_short	atomic_subtract_acq_16
525#define	atomic_subtract_rel_short	atomic_subtract_rel_16
526
527/* Operations on ints. */
528#define	atomic_set_int		atomic_set_32
529#define	atomic_set_acq_int	atomic_set_acq_32
530#define	atomic_set_rel_int	atomic_set_rel_32
531#define	atomic_clear_int	atomic_clear_32
532#define	atomic_clear_acq_int	atomic_clear_acq_32
533#define	atomic_clear_rel_int	atomic_clear_rel_32
534#define	atomic_add_int		atomic_add_32
535#define	atomic_add_acq_int	atomic_add_acq_32
536#define	atomic_add_rel_int	atomic_add_rel_32
537#define	atomic_subtract_int	atomic_subtract_32
538#define	atomic_subtract_acq_int	atomic_subtract_acq_32
539#define	atomic_subtract_rel_int	atomic_subtract_rel_32
540#define	atomic_cmpset_int	atomic_cmpset_32
541#define	atomic_cmpset_acq_int	atomic_cmpset_acq_32
542#define	atomic_cmpset_rel_int	atomic_cmpset_rel_32
543#define	atomic_load_acq_int	atomic_load_acq_32
544#define	atomic_store_rel_int	atomic_store_rel_32
545#define	atomic_readandclear_int	atomic_readandclear_32
546#define	atomic_readandset_int	atomic_readandset_32
547#define	atomic_fetchadd_int	atomic_fetchadd_32
548
549/*
550 * I think the following is right, even for n32.  For n32 the pointers
551 * are still 32-bits, so we need to operate on them as 32-bit quantities,
552 * even though they are sign extended in operation.  For longs, there's
553 * no question because they are always 32-bits.
554 */
555#ifdef __mips_n64
556/* Operations on longs. */
557#define	atomic_set_long		atomic_set_64
558#define	atomic_set_acq_long	atomic_set_acq_64
559#define	atomic_set_rel_long	atomic_set_rel_64
560#define	atomic_clear_long	atomic_clear_64
561#define	atomic_clear_acq_long	atomic_clear_acq_64
562#define	atomic_clear_rel_long	atomic_clear_rel_64
563#define	atomic_add_long		atomic_add_64
564#define	atomic_add_acq_long	atomic_add_acq_64
565#define	atomic_add_rel_long	atomic_add_rel_64
566#define	atomic_subtract_long	atomic_subtract_64
567#define	atomic_subtract_acq_long	atomic_subtract_acq_64
568#define	atomic_subtract_rel_long	atomic_subtract_rel_64
569#define	atomic_cmpset_long	atomic_cmpset_64
570#define	atomic_cmpset_acq_long	atomic_cmpset_acq_64
571#define	atomic_cmpset_rel_long	atomic_cmpset_rel_64
572#define	atomic_load_acq_long	atomic_load_acq_64
573#define	atomic_store_rel_long	atomic_store_rel_64
574#define	atomic_fetchadd_long	atomic_fetchadd_64
575#define	atomic_readandclear_long	atomic_readandclear_64
576
577#else /* !__mips_n64 */
578
579/* Operations on longs. */
580#define	atomic_set_long(p, v)						\
581	atomic_set_32((volatile u_int *)(p), (u_int)(v))
582#define	atomic_set_acq_long(p, v)					\
583	atomic_set_acq_32((volatile u_int *)(p), (u_int)(v))
584#define	atomic_set_rel_long(p, v)					\
585	atomic_set_rel_32((volatile u_int *)(p), (u_int)(v))
586#define	atomic_clear_long(p, v)						\
587	atomic_clear_32((volatile u_int *)(p), (u_int)(v))
588#define	atomic_clear_acq_long(p, v)					\
589	atomic_clear_acq_32((volatile u_int *)(p), (u_int)(v))
590#define	atomic_clear_rel_long(p, v)					\
591	atomic_clear_rel_32((volatile u_int *)(p), (u_int)(v))
592#define	atomic_add_long(p, v)						\
593	atomic_add_32((volatile u_int *)(p), (u_int)(v))
594#define	atomic_add_acq_long(p, v)					\
595	atomic_add_32((volatile u_int *)(p), (u_int)(v))
596#define	atomic_add_rel_long(p, v)					\
597	atomic_add_32((volatile u_int *)(p), (u_int)(v))
598#define	atomic_subtract_long(p, v)					\
599	atomic_subtract_32((volatile u_int *)(p), (u_int)(v))
600#define	atomic_subtract_acq_long(p, v)					\
601	atomic_subtract_acq_32((volatile u_int *)(p), (u_int)(v))
602#define	atomic_subtract_rel_long(p, v)					\
603	atomic_subtract_rel_32((volatile u_int *)(p), (u_int)(v))
604#define	atomic_cmpset_long(p, cmpval, newval)				\
605	atomic_cmpset_32((volatile u_int *)(p), (u_int)(cmpval),	\
606	    (u_int)(newval))
607#define	atomic_cmpset_acq_long(p, cmpval, newval)			\
608	atomic_cmpset_acq_32((volatile u_int *)(p), (u_int)(cmpval),	\
609	    (u_int)(newval))
610#define	atomic_cmpset_rel_long(p, cmpval, newval)			\
611	atomic_cmpset_rel_32((volatile u_int *)(p), (u_int)(cmpval),	\
612	    (u_int)(newval))
613#define	atomic_load_acq_long(p)						\
614	(u_long)atomic_load_acq_32((volatile u_int *)(p))
615#define	atomic_store_rel_long(p, v)					\
616	atomic_store_rel_32((volatile u_int *)(p), (u_int)(v))
617#define	atomic_fetchadd_long(p, v)					\
618	atomic_fetchadd_32((volatile u_int *)(p), (u_int)(v))
619#define	atomic_readandclear_long(p)					\
620	atomic_readandclear_32((volatile u_int *)(p))
621
622#endif /* __mips_n64 */
623
624/* Operations on pointers. */
625#define	atomic_set_ptr		atomic_set_long
626#define	atomic_set_acq_ptr	atomic_set_acq_long
627#define	atomic_set_rel_ptr	atomic_set_rel_long
628#define	atomic_clear_ptr	atomic_clear_long
629#define	atomic_clear_acq_ptr	atomic_clear_acq_long
630#define	atomic_clear_rel_ptr	atomic_clear_rel_long
631#define	atomic_add_ptr		atomic_add_long
632#define	atomic_add_acq_ptr	atomic_add_acq_long
633#define	atomic_add_rel_ptr	atomic_add_rel_long
634#define	atomic_subtract_ptr	atomic_subtract_long
635#define	atomic_subtract_acq_ptr	atomic_subtract_acq_long
636#define	atomic_subtract_rel_ptr	atomic_subtract_rel_long
637#define	atomic_cmpset_ptr	atomic_cmpset_long
638#define	atomic_cmpset_acq_ptr	atomic_cmpset_acq_long
639#define	atomic_cmpset_rel_ptr	atomic_cmpset_rel_long
640#define	atomic_load_acq_ptr	atomic_load_acq_long
641#define	atomic_store_rel_ptr	atomic_store_rel_long
642#define	atomic_readandclear_ptr	atomic_readandclear_long
643
644#endif /* ! _MACHINE_ATOMIC_H_ */
645