1#ifndef _S390_RWSEM_H 2#define _S390_RWSEM_H 3 4/* 5 * include/asm-s390/rwsem.h 6 * 7 * S390 version 8 * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH, IBM Corporation 9 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) 10 * 11 * Based on asm-alpha/semaphore.h and asm-i386/rwsem.h 12 */ 13 14/* 15 * 16 * The MSW of the count is the negated number of active writers and waiting 17 * lockers, and the LSW is the total number of active locks 18 * 19 * The lock count is initialized to 0 (no active and no waiting lockers). 20 * 21 * When a writer subtracts WRITE_BIAS, it'll get 0xffff0001 for the case of an 22 * uncontended lock. This can be determined because XADD returns the old value. 23 * Readers increment by 1 and see a positive value when uncontended, negative 24 * if there are writers (and maybe) readers waiting (in which case it goes to 25 * sleep). 26 * 27 * The value of WAITING_BIAS supports up to 32766 waiting processes. This can 28 * be extended to 65534 by manually checking the whole MSW rather than relying 29 * on the S flag. 30 * 31 * The value of ACTIVE_BIAS supports up to 65535 active processes. 32 * 33 * This should be totally fair - if anything is waiting, a process that wants a 34 * lock will go to the back of the queue. When the currently active lock is 35 * released, if there's a writer at the front of the queue, then that and only 36 * that will be woken up; if there's a bunch of consequtive readers at the 37 * front, then they'll all be woken up, but no other readers will be. 38 */ 39 40#ifndef _LINUX_RWSEM_H 41#error please dont include asm/rwsem.h directly, use linux/rwsem.h instead 42#endif 43 44#ifdef __KERNEL__ 45 46#include <linux/list.h> 47#include <linux/spinlock.h> 48 49struct rwsem_waiter; 50 51extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem); 52extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem); 53extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *); 54 55/* 56 * the semaphore definition 57 */ 58struct rw_semaphore { 59 signed long count; 60 spinlock_t wait_lock; 61 struct list_head wait_list; 62}; 63 64#define RWSEM_UNLOCKED_VALUE 0x00000000 65#define RWSEM_ACTIVE_BIAS 0x00000001 66#define RWSEM_ACTIVE_MASK 0x0000ffff 67#define RWSEM_WAITING_BIAS (-0x00010000) 68#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS 69#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) 70 71/* 72 * initialisation 73 */ 74#define __RWSEM_INITIALIZER(name) \ 75{ RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, LIST_HEAD_INIT((name).wait_list) } 76 77#define DECLARE_RWSEM(name) \ 78 struct rw_semaphore name = __RWSEM_INITIALIZER(name) 79 80static inline void init_rwsem(struct rw_semaphore *sem) 81{ 82 sem->count = RWSEM_UNLOCKED_VALUE; 83 spin_lock_init(&sem->wait_lock); 84 INIT_LIST_HEAD(&sem->wait_list); 85} 86 87/* 88 * lock for reading 89 */ 90static inline void __down_read(struct rw_semaphore *sem) 91{ 92 signed long old, new; 93 94 __asm__ __volatile__( 95 " l %0,0(%2)\n" 96 "0: lr %1,%0\n" 97 " ahi %1,%3\n" 98 " cs %0,%1,0(%2)\n" 99 " jl 0b" 100 : "=&d" (old), "=&d" (new) 101 : "a" (&sem->count), "i" (RWSEM_ACTIVE_READ_BIAS) 102 : "cc", "memory" ); 103 if (old < 0) 104 rwsem_down_read_failed(sem); 105} 106 107/* 108 * lock for writing 109 */ 110static inline void __down_write(struct rw_semaphore *sem) 111{ 112 signed long old, new, tmp; 113 114 tmp = RWSEM_ACTIVE_WRITE_BIAS; 115 __asm__ __volatile__( 116 " l %0,0(%2)\n" 117 "0: lr %1,%0\n" 118 " a %1,%3\n" 119 " cs %0,%1,0(%2)\n" 120 " jl 0b" 121 : "=&d" (old), "=&d" (new) 122 : "a" (&sem->count), "m" (tmp) 123 : "cc", "memory" ); 124 if (old != 0) 125 rwsem_down_write_failed(sem); 126} 127 128/* 129 * unlock after reading 130 */ 131static inline void __up_read(struct rw_semaphore *sem) 132{ 133 signed long old, new; 134 135 __asm__ __volatile__( 136 " l %0,0(%2)\n" 137 "0: lr %1,%0\n" 138 " ahi %1,%3\n" 139 " cs %0,%1,0(%2)\n" 140 " jl 0b" 141 : "=&d" (old), "=&d" (new) 142 : "a" (&sem->count), "i" (-RWSEM_ACTIVE_READ_BIAS) 143 : "cc", "memory" ); 144 if (new < 0) 145 if ((new & RWSEM_ACTIVE_MASK) == 0) 146 rwsem_wake(sem); 147} 148 149/* 150 * unlock after writing 151 */ 152static inline void __up_write(struct rw_semaphore *sem) 153{ 154 signed long old, new, tmp; 155 156 tmp = -RWSEM_ACTIVE_WRITE_BIAS; 157 __asm__ __volatile__( 158 " l %0,0(%2)\n" 159 "0: lr %1,%0\n" 160 " a %1,%3\n" 161 " cs %0,%1,0(%2)\n" 162 " jl 0b" 163 : "=&d" (old), "=&d" (new) 164 : "a" (&sem->count), "m" (tmp) 165 : "cc", "memory" ); 166 if (new < 0) 167 if ((new & RWSEM_ACTIVE_MASK) == 0) 168 rwsem_wake(sem); 169} 170 171/* 172 * implement atomic add functionality 173 */ 174static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem) 175{ 176 signed long old, new; 177 178 __asm__ __volatile__( 179 " l %0,0(%2)\n" 180 "0: lr %1,%0\n" 181 " ar %1,%3\n" 182 " cs %0,%1,0(%2)\n" 183 " jl 0b" 184 : "=&d" (old), "=&d" (new) 185 : "a" (&sem->count), "d" (delta) 186 : "cc", "memory" ); 187} 188 189/* 190 * implement exchange and add functionality 191 */ 192static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem) 193{ 194 signed long old, new; 195 196 __asm__ __volatile__( 197 " l %0,0(%2)\n" 198 "0: lr %1,%0\n" 199 " ar %1,%3\n" 200 " cs %0,%1,0(%2)\n" 201 " jl 0b" 202 : "=&d" (old), "=&d" (new) 203 : "a" (&sem->count), "d" (delta) 204 : "cc", "memory" ); 205 return new; 206} 207 208#endif /* __KERNEL__ */ 209#endif /* _S390_RWSEM_H */ 210