1/* spinlock.h: 64-bit Sparc spinlock support.
2 *
3 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
4 */
5
6#ifndef __SPARC64_SPINLOCK_H
7#define __SPARC64_SPINLOCK_H
8
9#include <linux/config.h>
10
11#ifndef __ASSEMBLY__
12
13/* To get debugging spinlocks which detect and catch
14 * deadlock situations, set CONFIG_DEBUG_SPINLOCK
15 * and rebuild your kernel.
16 */
17
18/* All of these locking primitives are expected to work properly
19 * even in an RMO memory model, which currently is what the kernel
20 * runs in.
21 *
22 * There is another issue.  Because we play games to save cycles
23 * in the non-contention case, we need to be extra careful about
24 * branch targets into the "spinning" code.  They live in their
25 * own section, but the newer V9 branches have a shorter range
26 * than the traditional 32-bit sparc branch variants.  The rule
27 * is that the branches that go into and out of the spinner sections
28 * must be pre-V9 branches.
29 */
30
31#ifndef CONFIG_DEBUG_SPINLOCK
32
33typedef unsigned char spinlock_t;
34#define SPIN_LOCK_UNLOCKED	0
35
36#define spin_lock_init(lock)	(*((unsigned char *)(lock)) = 0)
37#define spin_is_locked(lock)	(*((volatile unsigned char *)(lock)) != 0)
38
39#define spin_unlock_wait(lock)	\
40do {	membar("#LoadLoad");	\
41} while(*((volatile unsigned char *)lock))
42
43extern __inline__ void spin_lock(spinlock_t *lock)
44{
45	__asm__ __volatile__(
46"1:	ldstub		[%0], %%g7\n"
47"	brnz,pn		%%g7, 2f\n"
48"	 membar		#StoreLoad | #StoreStore\n"
49"	.subsection	2\n"
50"2:	ldub		[%0], %%g7\n"
51"	brnz,pt		%%g7, 2b\n"
52"	 membar		#LoadLoad\n"
53"	b,a,pt		%%xcc, 1b\n"
54"	.previous\n"
55	: /* no outputs */
56	: "r" (lock)
57	: "g7", "memory");
58}
59
60extern __inline__ int spin_trylock(spinlock_t *lock)
61{
62	unsigned int result;
63	__asm__ __volatile__("ldstub [%1], %0\n\t"
64			     "membar #StoreLoad | #StoreStore"
65			     : "=r" (result)
66			     : "r" (lock)
67			     : "memory");
68	return (result == 0);
69}
70
71extern __inline__ void spin_unlock(spinlock_t *lock)
72{
73	__asm__ __volatile__("membar	#StoreStore | #LoadStore\n\t"
74			     "stb	%%g0, [%0]"
75			     : /* No outputs */
76			     : "r" (lock)
77			     : "memory");
78}
79
80#else /* !(CONFIG_DEBUG_SPINLOCK) */
81
82typedef struct {
83	unsigned char lock;
84	unsigned int owner_pc, owner_cpu;
85} spinlock_t;
86#define SPIN_LOCK_UNLOCKED (spinlock_t) { 0, 0, 0xff }
87#define spin_lock_init(__lock)	\
88do {	(__lock)->lock = 0; \
89	(__lock)->owner_pc = 0; \
90	(__lock)->owner_cpu = 0xff; \
91} while(0)
92#define spin_is_locked(__lock)	(*((volatile unsigned char *)(&((__lock)->lock))) != 0)
93#define spin_unlock_wait(__lock)	\
94do { \
95	membar("#LoadLoad"); \
96} while(*((volatile unsigned char *)(&((__lock)->lock))))
97
98extern void _do_spin_lock (spinlock_t *lock, char *str);
99extern void _do_spin_unlock (spinlock_t *lock);
100extern int _spin_trylock (spinlock_t *lock);
101
102#define spin_trylock(lp)	_spin_trylock(lp)
103#define spin_lock(lock)		_do_spin_lock(lock, "spin_lock")
104#define spin_unlock(lock)	_do_spin_unlock(lock)
105
106#endif /* CONFIG_DEBUG_SPINLOCK */
107
108/* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
109
110#ifndef CONFIG_DEBUG_SPINLOCK
111
112typedef unsigned int rwlock_t;
113#define RW_LOCK_UNLOCKED	0
114#define rwlock_init(lp) do { *(lp) = RW_LOCK_UNLOCKED; } while(0)
115
116extern void __read_lock(rwlock_t *);
117extern void __read_unlock(rwlock_t *);
118extern void __write_lock(rwlock_t *);
119extern void __write_unlock(rwlock_t *);
120
121#define read_lock(p)	__read_lock(p)
122#define read_unlock(p)	__read_unlock(p)
123#define write_lock(p)	__write_lock(p)
124#define write_unlock(p)	__write_unlock(p)
125
126#else /* !(CONFIG_DEBUG_SPINLOCK) */
127
128typedef struct {
129	unsigned long lock;
130	unsigned int writer_pc, writer_cpu;
131	unsigned int reader_pc[4];
132} rwlock_t;
133#define RW_LOCK_UNLOCKED	(rwlock_t) { 0, 0, 0xff, { 0, 0, 0, 0 } }
134#define rwlock_init(lp) do { *(lp) = RW_LOCK_UNLOCKED; } while(0)
135
136extern void _do_read_lock(rwlock_t *rw, char *str);
137extern void _do_read_unlock(rwlock_t *rw, char *str);
138extern void _do_write_lock(rwlock_t *rw, char *str);
139extern void _do_write_unlock(rwlock_t *rw);
140
141#define read_lock(lock)	\
142do {	unsigned long flags; \
143	__save_and_cli(flags); \
144	_do_read_lock(lock, "read_lock"); \
145	__restore_flags(flags); \
146} while(0)
147
148#define read_unlock(lock) \
149do {	unsigned long flags; \
150	__save_and_cli(flags); \
151	_do_read_unlock(lock, "read_unlock"); \
152	__restore_flags(flags); \
153} while(0)
154
155#define write_lock(lock) \
156do {	unsigned long flags; \
157	__save_and_cli(flags); \
158	_do_write_lock(lock, "write_lock"); \
159	__restore_flags(flags); \
160} while(0)
161
162#define write_unlock(lock) \
163do {	unsigned long flags; \
164	__save_and_cli(flags); \
165	_do_write_unlock(lock); \
166	__restore_flags(flags); \
167} while(0)
168
169#endif /* CONFIG_DEBUG_SPINLOCK */
170
171#endif /* !(__ASSEMBLY__) */
172
173#endif /* !(__SPARC64_SPINLOCK_H) */
174