1#ifndef _ALPHA_SEMAPHORE_H
2#define _ALPHA_SEMAPHORE_H
3
4/*
5 * SMP- and interrupt-safe semaphores..
6 *
7 * (C) Copyright 1996 Linus Torvalds
8 * (C) Copyright 1996, 2000 Richard Henderson
9 */
10
11#include <asm/current.h>
12#include <asm/system.h>
13#include <asm/atomic.h>
14#include <linux/compiler.h>
15#include <linux/wait.h>
16#include <linux/rwsem.h>
17
18struct semaphore {
19	atomic_t count;
20	wait_queue_head_t wait;
21};
22
23#define __SEMAPHORE_INITIALIZER(name, n)			\
24{								\
25	.count	= ATOMIC_INIT(n),				\
26  	.wait	= __WAIT_QUEUE_HEAD_INITIALIZER((name).wait),	\
27}
28
29#define __DECLARE_SEMAPHORE_GENERIC(name,count)		\
30	struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
31
32#define DECLARE_MUTEX(name)		__DECLARE_SEMAPHORE_GENERIC(name,1)
33#define DECLARE_MUTEX_LOCKED(name)	__DECLARE_SEMAPHORE_GENERIC(name,0)
34
35static inline void sema_init(struct semaphore *sem, int val)
36{
37	/*
38	 * Logically,
39	 *   *sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val);
40	 * except that gcc produces better initializing by parts yet.
41	 */
42
43	atomic_set(&sem->count, val);
44	init_waitqueue_head(&sem->wait);
45}
46
47static inline void init_MUTEX (struct semaphore *sem)
48{
49	sema_init(sem, 1);
50}
51
52static inline void init_MUTEX_LOCKED (struct semaphore *sem)
53{
54	sema_init(sem, 0);
55}
56
57extern void down(struct semaphore *);
58extern void __down_failed(struct semaphore *);
59extern int  down_interruptible(struct semaphore *);
60extern int  __down_failed_interruptible(struct semaphore *);
61extern int  down_trylock(struct semaphore *);
62extern void up(struct semaphore *);
63extern void __up_wakeup(struct semaphore *);
64
65/*
66 * Hidden out of line code is fun, but extremely messy.  Rely on newer
67 * compilers to do a respectable job with this.  The contention cases
68 * are handled out of line in arch/alpha/kernel/semaphore.c.
69 */
70
71static inline void __down(struct semaphore *sem)
72{
73	long count;
74	might_sleep();
75	count = atomic_dec_return(&sem->count);
76	if (unlikely(count < 0))
77		__down_failed(sem);
78}
79
80static inline int __down_interruptible(struct semaphore *sem)
81{
82	long count;
83	might_sleep();
84	count = atomic_dec_return(&sem->count);
85	if (unlikely(count < 0))
86		return __down_failed_interruptible(sem);
87	return 0;
88}
89
90/*
91 * down_trylock returns 0 on success, 1 if we failed to get the lock.
92 */
93
94static inline int __down_trylock(struct semaphore *sem)
95{
96	long ret;
97
98	/* "Equivalent" C:
99
100	   do {
101		ret = ldl_l;
102		--ret;
103		if (ret < 0)
104			break;
105		ret = stl_c = ret;
106	   } while (ret == 0);
107	*/
108	__asm__ __volatile__(
109		"1:	ldl_l	%0,%1\n"
110		"	subl	%0,1,%0\n"
111		"	blt	%0,2f\n"
112		"	stl_c	%0,%1\n"
113		"	beq	%0,3f\n"
114		"	mb\n"
115		"2:\n"
116		".subsection 2\n"
117		"3:	br	1b\n"
118		".previous"
119		: "=&r" (ret), "=m" (sem->count)
120		: "m" (sem->count));
121
122	return ret < 0;
123}
124
125static inline void __up(struct semaphore *sem)
126{
127	if (unlikely(atomic_inc_return(&sem->count) <= 0))
128		__up_wakeup(sem);
129}
130
131#if !defined(CONFIG_DEBUG_SEMAPHORE)
132extern inline void down(struct semaphore *sem)
133{
134	__down(sem);
135}
136extern inline int down_interruptible(struct semaphore *sem)
137{
138	return __down_interruptible(sem);
139}
140extern inline int down_trylock(struct semaphore *sem)
141{
142	return __down_trylock(sem);
143}
144extern inline void up(struct semaphore *sem)
145{
146	__up(sem);
147}
148#endif
149
150#endif
151