1/* 2 * include/asm-ppc/rwsem.h: R/W semaphores for PPC using the stuff 3 * in lib/rwsem.c. Adapted largely from include/asm-i386/rwsem.h 4 * by Paul Mackerras <paulus@samba.org>. 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 9 * 2 of the License, or (at your option) any later version. 10 */ 11 12#ifndef _PPC64_RWSEM_H 13#define _PPC64_RWSEM_H 14 15#ifdef __KERNEL__ 16#include <linux/list.h> 17#include <linux/spinlock.h> 18#include <asm/atomic.h> 19#include <asm/system.h> 20 21/* 22 * the semaphore definition 23 */ 24struct rw_semaphore { 25 signed int count; 26#define RWSEM_UNLOCKED_VALUE 0x00000000 27#define RWSEM_ACTIVE_BIAS 0x00000001 28#define RWSEM_ACTIVE_MASK 0x0000ffff 29#define RWSEM_WAITING_BIAS (-0x00010000) 30#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS 31#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) 32 spinlock_t wait_lock; 33 struct list_head wait_list; 34#if RWSEM_DEBUG 35 int debug; 36#endif 37}; 38 39/* 40 * initialisation 41 */ 42#if RWSEM_DEBUG 43#define __RWSEM_DEBUG_INIT , 0 44#else 45#define __RWSEM_DEBUG_INIT /* */ 46#endif 47 48#define __RWSEM_INITIALIZER(name) \ 49 { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \ 50 LIST_HEAD_INIT((name).wait_list) \ 51 __RWSEM_DEBUG_INIT } 52 53#define DECLARE_RWSEM(name) \ 54 struct rw_semaphore name = __RWSEM_INITIALIZER(name) 55 56extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem); 57extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem); 58extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem); 59 60static inline void init_rwsem(struct rw_semaphore *sem) 61{ 62 sem->count = RWSEM_UNLOCKED_VALUE; 63 spin_lock_init(&sem->wait_lock); 64 INIT_LIST_HEAD(&sem->wait_list); 65#if RWSEM_DEBUG 66 sem->debug = 0; 67#endif 68} 69 70/* 71 * lock for reading 72 */ 73static inline void __down_read(struct rw_semaphore *sem) 74{ 75 if (atomic_inc_return((atomic_t *)(&sem->count)) >= 0) 76 smp_wmb(); 77 else 78 rwsem_down_read_failed(sem); 79} 80 81/* 82 * lock for writing 83 */ 84static inline void __down_write(struct rw_semaphore *sem) 85{ 86 int tmp; 87 88 tmp = atomic_add_return(RWSEM_ACTIVE_WRITE_BIAS, 89 (atomic_t *)(&sem->count)); 90 if (tmp == RWSEM_ACTIVE_WRITE_BIAS) 91 smp_wmb(); 92 else 93 rwsem_down_write_failed(sem); 94} 95 96/* 97 * unlock after reading 98 */ 99static inline void __up_read(struct rw_semaphore *sem) 100{ 101 int tmp; 102 103 smp_wmb(); 104 tmp = atomic_dec_return((atomic_t *)(&sem->count)); 105 if (tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0) 106 rwsem_wake(sem); 107} 108 109/* 110 * unlock after writing 111 */ 112static inline void __up_write(struct rw_semaphore *sem) 113{ 114 smp_wmb(); 115 if (atomic_sub_return(RWSEM_ACTIVE_WRITE_BIAS, 116 (atomic_t *)(&sem->count)) < 0) 117 rwsem_wake(sem); 118} 119 120/* 121 * implement atomic add functionality 122 */ 123static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem) 124{ 125 atomic_add(delta, (atomic_t *)(&sem->count)); 126} 127 128/* 129 * implement exchange and add functionality 130 */ 131static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem) 132{ 133 smp_mb(); 134 return atomic_add_return(delta, (atomic_t *)(&sem->count)); 135} 136 137#endif /* __KERNEL__ */ 138#endif /* _PPC_RWSEM_XADD_H */ 139