1144518Sdavidxu/*- 2144518Sdavidxu * Copyright (c) 2005 David Xu <davidxu@freebsd.org> 3144518Sdavidxu * All rights reserved. 4144518Sdavidxu * 5144518Sdavidxu * Redistribution and use in source and binary forms, with or without 6144518Sdavidxu * modification, are permitted provided that the following conditions 7144518Sdavidxu * are met: 8144518Sdavidxu * 1. Redistributions of source code must retain the above copyright 9144518Sdavidxu * notice, this list of conditions and the following disclaimer. 10144518Sdavidxu * 2. Redistributions in binary form must reproduce the above copyright 11144518Sdavidxu * notice, this list of conditions and the following disclaimer in the 12144518Sdavidxu * documentation and/or other materials provided with the distribution. 13144518Sdavidxu * 14144518Sdavidxu * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15144518Sdavidxu * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16144518Sdavidxu * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17144518Sdavidxu * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18144518Sdavidxu * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19144518Sdavidxu * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20144518Sdavidxu * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21144518Sdavidxu * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22144518Sdavidxu * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23144518Sdavidxu * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24144518Sdavidxu * SUCH DAMAGE. 25144518Sdavidxu * 26144518Sdavidxu * $FreeBSD$ 27144518Sdavidxu */ 28144518Sdavidxu 29144518Sdavidxu#ifndef _THR_FBSD_UMTX_H_ 30144518Sdavidxu#define _THR_FBSD_UMTX_H_ 31144518Sdavidxu 32162061Sdavidxu#include <strings.h> 33144518Sdavidxu#include <sys/umtx.h> 34144518Sdavidxu 35212077Sdavidxu#define DEFAULT_UMUTEX {0,0,{0,0},{0,0,0,0}} 36212077Sdavidxu#define DEFAULT_URWLOCK {0,0,0,0,{0,0,0,0}} 37162061Sdavidxu 38233912Sdavidxuint _umtx_op_err(void *, int op, u_long, void *, void *) __hidden; 39179970Sdavidxuint __thr_umutex_lock(struct umutex *mtx, uint32_t id) __hidden; 40216641Sdavidxuint __thr_umutex_lock_spin(struct umutex *mtx, uint32_t id) __hidden; 41179970Sdavidxuint __thr_umutex_timedlock(struct umutex *mtx, uint32_t id, 42161680Sdavidxu const struct timespec *timeout) __hidden; 43179970Sdavidxuint __thr_umutex_unlock(struct umutex *mtx, uint32_t id) __hidden; 44163334Sdavidxuint __thr_umutex_trylock(struct umutex *mtx) __hidden; 45161680Sdavidxuint __thr_umutex_set_ceiling(struct umutex *mtx, uint32_t ceiling, 46161680Sdavidxu uint32_t *oldceiling) __hidden; 47161680Sdavidxu 48163334Sdavidxuvoid _thr_umutex_init(struct umutex *mtx) __hidden; 49212077Sdavidxuvoid _thr_urwlock_init(struct urwlock *rwl) __hidden; 50212077Sdavidxu 51173801Sdavidxuint _thr_umtx_wait(volatile long *mtx, long exp, 52162061Sdavidxu const struct timespec *timeout) __hidden; 53173801Sdavidxuint _thr_umtx_wait_uint(volatile u_int *mtx, u_int exp, 54178647Sdavidxu const struct timespec *timeout, int shared) __hidden; 55216641Sdavidxuint _thr_umtx_timedwait_uint(volatile u_int *mtx, u_int exp, int clockid, 56216641Sdavidxu const struct timespec *timeout, int shared) __hidden; 57178647Sdavidxuint _thr_umtx_wake(volatile void *mtx, int count, int shared) __hidden; 58164877Sdavidxuint _thr_ucond_wait(struct ucond *cv, struct umutex *m, 59249985Sjilles const struct timespec *timeout, int flags) __hidden; 60164902Sdavidxuvoid _thr_ucond_init(struct ucond *cv) __hidden; 61164902Sdavidxuint _thr_ucond_signal(struct ucond *cv) __hidden; 62164902Sdavidxuint _thr_ucond_broadcast(struct ucond *cv) __hidden; 63162061Sdavidxu 64232209Sdavidxuint __thr_rwlock_rdlock(struct urwlock *rwlock, int flags, 65232209Sdavidxu const struct timespec *tsp) __hidden; 66232209Sdavidxuint __thr_rwlock_wrlock(struct urwlock *rwlock, 67232209Sdavidxu const struct timespec *tsp) __hidden; 68177850Sdavidxuint __thr_rwlock_unlock(struct urwlock *rwlock) __hidden; 69177850Sdavidxu 70212076Sdavidxu/* Internal used only */ 71212076Sdavidxuvoid _thr_rwl_rdlock(struct urwlock *rwlock) __hidden; 72212076Sdavidxuvoid _thr_rwl_wrlock(struct urwlock *rwlock) __hidden; 73212076Sdavidxuvoid _thr_rwl_unlock(struct urwlock *rwlock) __hidden; 74212076Sdavidxu 75144518Sdavidxustatic inline int 76161680Sdavidxu_thr_umutex_trylock(struct umutex *mtx, uint32_t id) 77161680Sdavidxu{ 78161680Sdavidxu if (atomic_cmpset_acq_32(&mtx->m_owner, UMUTEX_UNOWNED, id)) 79161680Sdavidxu return (0); 80161680Sdavidxu if ((mtx->m_flags & UMUTEX_PRIO_PROTECT) == 0) 81161680Sdavidxu return (EBUSY); 82163334Sdavidxu return (__thr_umutex_trylock(mtx)); 83161680Sdavidxu} 84161680Sdavidxu 85161680Sdavidxustatic inline int 86165206Sdavidxu_thr_umutex_trylock2(struct umutex *mtx, uint32_t id) 87165206Sdavidxu{ 88179970Sdavidxu if (atomic_cmpset_acq_32(&mtx->m_owner, UMUTEX_UNOWNED, id) != 0) 89165206Sdavidxu return (0); 90179970Sdavidxu if ((uint32_t)mtx->m_owner == UMUTEX_CONTESTED && 91179970Sdavidxu __predict_true((mtx->m_flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) == 0)) 92179970Sdavidxu if (atomic_cmpset_acq_32(&mtx->m_owner, UMUTEX_CONTESTED, id | UMUTEX_CONTESTED)) 93179970Sdavidxu return (0); 94165206Sdavidxu return (EBUSY); 95165206Sdavidxu} 96165206Sdavidxu 97165206Sdavidxustatic inline int 98161680Sdavidxu_thr_umutex_lock(struct umutex *mtx, uint32_t id) 99161680Sdavidxu{ 100179970Sdavidxu if (_thr_umutex_trylock2(mtx, id) == 0) 101161680Sdavidxu return (0); 102179970Sdavidxu return (__thr_umutex_lock(mtx, id)); 103161680Sdavidxu} 104161680Sdavidxu 105161680Sdavidxustatic inline int 106216641Sdavidxu_thr_umutex_lock_spin(struct umutex *mtx, uint32_t id) 107216641Sdavidxu{ 108216641Sdavidxu if (_thr_umutex_trylock2(mtx, id) == 0) 109216641Sdavidxu return (0); 110216641Sdavidxu return (__thr_umutex_lock_spin(mtx, id)); 111216641Sdavidxu} 112216641Sdavidxu 113216641Sdavidxustatic inline int 114161680Sdavidxu_thr_umutex_timedlock(struct umutex *mtx, uint32_t id, 115161680Sdavidxu const struct timespec *timeout) 116161680Sdavidxu{ 117179970Sdavidxu if (_thr_umutex_trylock2(mtx, id) == 0) 118161680Sdavidxu return (0); 119179970Sdavidxu return (__thr_umutex_timedlock(mtx, id, timeout)); 120161680Sdavidxu} 121161680Sdavidxu 122161680Sdavidxustatic inline int 123239200Sdavidxu_thr_umutex_unlock2(struct umutex *mtx, uint32_t id, int *defer) 124161680Sdavidxu{ 125233912Sdavidxu uint32_t flags = mtx->m_flags; 126233912Sdavidxu 127233912Sdavidxu if ((flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) == 0) { 128233912Sdavidxu uint32_t owner; 129233912Sdavidxu do { 130233912Sdavidxu owner = mtx->m_owner; 131233912Sdavidxu if (__predict_false((owner & ~UMUTEX_CONTESTED) != id)) 132233912Sdavidxu return (EPERM); 133233912Sdavidxu } while (__predict_false(!atomic_cmpset_rel_32(&mtx->m_owner, 134233912Sdavidxu owner, UMUTEX_UNOWNED))); 135239200Sdavidxu if ((owner & UMUTEX_CONTESTED)) { 136239200Sdavidxu if (defer == NULL) 137239200Sdavidxu (void)_umtx_op_err(mtx, UMTX_OP_MUTEX_WAKE2, flags, 0, 0); 138239200Sdavidxu else 139239200Sdavidxu *defer = 1; 140239200Sdavidxu } 141233912Sdavidxu return (0); 142233912Sdavidxu } 143233912Sdavidxu if (atomic_cmpset_rel_32(&mtx->m_owner, id, UMUTEX_UNOWNED)) 144233912Sdavidxu return (0); 145233912Sdavidxu return (__thr_umutex_unlock(mtx, id)); 146161680Sdavidxu} 147161680Sdavidxu 148177850Sdavidxustatic inline int 149239200Sdavidxu_thr_umutex_unlock(struct umutex *mtx, uint32_t id) 150239200Sdavidxu{ 151239200Sdavidxu return _thr_umutex_unlock2(mtx, id, NULL); 152239200Sdavidxu} 153239200Sdavidxu 154239200Sdavidxustatic inline int 155177850Sdavidxu_thr_rwlock_tryrdlock(struct urwlock *rwlock, int flags) 156177850Sdavidxu{ 157177850Sdavidxu int32_t state; 158177850Sdavidxu int32_t wrflags; 159177850Sdavidxu 160177850Sdavidxu if (flags & URWLOCK_PREFER_READER || rwlock->rw_flags & URWLOCK_PREFER_READER) 161177850Sdavidxu wrflags = URWLOCK_WRITE_OWNER; 162177850Sdavidxu else 163177850Sdavidxu wrflags = URWLOCK_WRITE_OWNER | URWLOCK_WRITE_WAITERS; 164177850Sdavidxu state = rwlock->rw_state; 165177850Sdavidxu while (!(state & wrflags)) { 166177850Sdavidxu if (__predict_false(URWLOCK_READER_COUNT(state) == URWLOCK_MAX_READERS)) 167177850Sdavidxu return (EAGAIN); 168177850Sdavidxu if (atomic_cmpset_acq_32(&rwlock->rw_state, state, state + 1)) 169177850Sdavidxu return (0); 170177850Sdavidxu state = rwlock->rw_state; 171177850Sdavidxu } 172177850Sdavidxu 173177850Sdavidxu return (EBUSY); 174177850Sdavidxu} 175177850Sdavidxu 176177850Sdavidxustatic inline int 177177850Sdavidxu_thr_rwlock_trywrlock(struct urwlock *rwlock) 178177850Sdavidxu{ 179177850Sdavidxu int32_t state; 180177850Sdavidxu 181177850Sdavidxu state = rwlock->rw_state; 182177850Sdavidxu while (!(state & URWLOCK_WRITE_OWNER) && URWLOCK_READER_COUNT(state) == 0) { 183177850Sdavidxu if (atomic_cmpset_acq_32(&rwlock->rw_state, state, state | URWLOCK_WRITE_OWNER)) 184177850Sdavidxu return (0); 185177850Sdavidxu state = rwlock->rw_state; 186177850Sdavidxu } 187177850Sdavidxu 188177850Sdavidxu return (EBUSY); 189177850Sdavidxu} 190177850Sdavidxu 191177850Sdavidxustatic inline int 192177850Sdavidxu_thr_rwlock_rdlock(struct urwlock *rwlock, int flags, struct timespec *tsp) 193177850Sdavidxu{ 194177850Sdavidxu if (_thr_rwlock_tryrdlock(rwlock, flags) == 0) 195177850Sdavidxu return (0); 196177850Sdavidxu return (__thr_rwlock_rdlock(rwlock, flags, tsp)); 197177850Sdavidxu} 198177850Sdavidxu 199177850Sdavidxustatic inline int 200177850Sdavidxu_thr_rwlock_wrlock(struct urwlock *rwlock, struct timespec *tsp) 201177850Sdavidxu{ 202177850Sdavidxu if (_thr_rwlock_trywrlock(rwlock) == 0) 203177850Sdavidxu return (0); 204177850Sdavidxu return (__thr_rwlock_wrlock(rwlock, tsp)); 205177850Sdavidxu} 206177850Sdavidxu 207177850Sdavidxustatic inline int 208177850Sdavidxu_thr_rwlock_unlock(struct urwlock *rwlock) 209177850Sdavidxu{ 210177850Sdavidxu int32_t state; 211177850Sdavidxu 212177850Sdavidxu state = rwlock->rw_state; 213177850Sdavidxu if (state & URWLOCK_WRITE_OWNER) { 214177850Sdavidxu if (atomic_cmpset_rel_32(&rwlock->rw_state, URWLOCK_WRITE_OWNER, 0)) 215177850Sdavidxu return (0); 216177850Sdavidxu } else { 217177850Sdavidxu for (;;) { 218177850Sdavidxu if (__predict_false(URWLOCK_READER_COUNT(state) == 0)) 219177850Sdavidxu return (EPERM); 220197445Sattilio if (!((state & (URWLOCK_WRITE_WAITERS | 221197445Sattilio URWLOCK_READ_WAITERS)) && 222197445Sattilio URWLOCK_READER_COUNT(state) == 1)) { 223197445Sattilio if (atomic_cmpset_rel_32(&rwlock->rw_state, 224197445Sattilio state, state-1)) 225177850Sdavidxu return (0); 226177850Sdavidxu state = rwlock->rw_state; 227177850Sdavidxu } else { 228177850Sdavidxu break; 229177850Sdavidxu } 230177850Sdavidxu } 231177850Sdavidxu } 232177850Sdavidxu return (__thr_rwlock_unlock(rwlock)); 233177850Sdavidxu} 234144518Sdavidxu#endif 235