1154941Sjhb/*-
2154941Sjhb * Copyright (c) 2006 John Baldwin <jhb@FreeBSD.org>
3154941Sjhb * All rights reserved.
4154941Sjhb *
5154941Sjhb * Redistribution and use in source and binary forms, with or without
6154941Sjhb * modification, are permitted provided that the following conditions
7154941Sjhb * are met:
8154941Sjhb * 1. Redistributions of source code must retain the above copyright
9154941Sjhb *    notice, this list of conditions and the following disclaimer.
10154941Sjhb * 2. Redistributions in binary form must reproduce the above copyright
11154941Sjhb *    notice, this list of conditions and the following disclaimer in the
12154941Sjhb *    documentation and/or other materials provided with the distribution.
13154941Sjhb *
14154941Sjhb * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15154941Sjhb * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16154941Sjhb * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17154941Sjhb * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18154941Sjhb * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19154941Sjhb * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20154941Sjhb * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21154941Sjhb * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22154941Sjhb * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23154941Sjhb * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24154941Sjhb * SUCH DAMAGE.
25154941Sjhb */
26154941Sjhb
27154941Sjhb/*
28154941Sjhb * Machine independent bits of reader/writer lock implementation.
29154941Sjhb */
30154941Sjhb
31154941Sjhb#include <sys/cdefs.h>
32154941Sjhb__FBSDID("$FreeBSD: stable/10/sys/kern/kern_rwlock.c 323870 2017-09-21 19:24:11Z marius $");
33154941Sjhb
34154941Sjhb#include "opt_ddb.h"
35233628Sfabient#include "opt_hwpmc_hooks.h"
36192853Ssson#include "opt_kdtrace.h"
37167801Sjhb#include "opt_no_adaptive_rwlocks.h"
38154941Sjhb
39154941Sjhb#include <sys/param.h>
40244582Sattilio#include <sys/kdb.h>
41154941Sjhb#include <sys/ktr.h>
42177912Sjeff#include <sys/kernel.h>
43154941Sjhb#include <sys/lock.h>
44154941Sjhb#include <sys/mutex.h>
45154941Sjhb#include <sys/proc.h>
46154941Sjhb#include <sys/rwlock.h>
47278694Ssbruno#include <sys/sched.h>
48310980Smjg#include <sys/smp.h>
49177912Sjeff#include <sys/sysctl.h>
50154941Sjhb#include <sys/systm.h>
51154941Sjhb#include <sys/turnstile.h>
52171516Sattilio
53154941Sjhb#include <machine/cpu.h>
54154941Sjhb
55167801Sjhb#if defined(SMP) && !defined(NO_ADAPTIVE_RWLOCKS)
56167801Sjhb#define	ADAPTIVE_RWLOCKS
57167801Sjhb#endif
58167801Sjhb
59233628Sfabient#ifdef HWPMC_HOOKS
60233628Sfabient#include <sys/pmckern.h>
61233628SfabientPMC_SOFT_DECLARE( , , lock, failed);
62233628Sfabient#endif
63233628Sfabient
64242515Sattilio/*
65242515Sattilio * Return the rwlock address when the lock cookie address is provided.
66242515Sattilio * This functionality assumes that struct rwlock* have a member named rw_lock.
67242515Sattilio */
68242515Sattilio#define	rwlock2rw(c)	(__containerof(c, struct rwlock, rw_lock))
69242515Sattilio
70154941Sjhb#ifdef DDB
71154941Sjhb#include <ddb/ddb.h>
72154941Sjhb
73227588Spjdstatic void	db_show_rwlock(const struct lock_object *lock);
74154941Sjhb#endif
75227588Spjdstatic void	assert_rw(const struct lock_object *lock, int what);
76255745Sdavidestatic void	lock_rw(struct lock_object *lock, uintptr_t how);
77192853Ssson#ifdef KDTRACE_HOOKS
78227588Spjdstatic int	owner_rw(const struct lock_object *lock, struct thread **owner);
79192853Ssson#endif
80255745Sdavidestatic uintptr_t unlock_rw(struct lock_object *lock);
81154941Sjhb
82154941Sjhbstruct lock_class lock_class_rw = {
83167365Sjhb	.lc_name = "rw",
84167365Sjhb	.lc_flags = LC_SLEEPLOCK | LC_RECURSABLE | LC_UPGRADABLE,
85173733Sattilio	.lc_assert = assert_rw,
86154941Sjhb#ifdef DDB
87167365Sjhb	.lc_ddb_show = db_show_rwlock,
88154941Sjhb#endif
89167368Sjhb	.lc_lock = lock_rw,
90167368Sjhb	.lc_unlock = unlock_rw,
91192853Ssson#ifdef KDTRACE_HOOKS
92192853Ssson	.lc_owner = owner_rw,
93192853Ssson#endif
94154941Sjhb};
95154941Sjhb
96310980Smjg#ifdef ADAPTIVE_RWLOCKS
97310980Smjgstatic int rowner_retries = 10;
98310980Smjgstatic int rowner_loops = 10000;
99310980Smjgstatic SYSCTL_NODE(_debug, OID_AUTO, rwlock, CTLFLAG_RD, NULL,
100310980Smjg    "rwlock debugging");
101310980SmjgSYSCTL_INT(_debug_rwlock, OID_AUTO, retry, CTLFLAG_RW, &rowner_retries, 0, "");
102310980SmjgSYSCTL_INT(_debug_rwlock, OID_AUTO, loops, CTLFLAG_RW, &rowner_loops, 0, "");
103310980Smjg
104310980Smjgstatic struct lock_delay_config rw_delay = {
105310980Smjg	.initial	= 1000,
106310980Smjg	.step		= 500,
107310980Smjg	.min		= 100,
108310980Smjg	.max		= 5000,
109310980Smjg};
110310980Smjg
111310980SmjgSYSCTL_INT(_debug_rwlock, OID_AUTO, delay_initial, CTLFLAG_RW, &rw_delay.initial,
112310980Smjg    0, "");
113310980SmjgSYSCTL_INT(_debug_rwlock, OID_AUTO, delay_step, CTLFLAG_RW, &rw_delay.step,
114310980Smjg    0, "");
115310980SmjgSYSCTL_INT(_debug_rwlock, OID_AUTO, delay_min, CTLFLAG_RW, &rw_delay.min,
116310980Smjg    0, "");
117310980SmjgSYSCTL_INT(_debug_rwlock, OID_AUTO, delay_max, CTLFLAG_RW, &rw_delay.max,
118310980Smjg    0, "");
119310980Smjg
120310980Smjgstatic void
121310980Smjgrw_delay_sysinit(void *dummy)
122310980Smjg{
123310980Smjg
124310980Smjg	rw_delay.initial = mp_ncpus * 25;
125310980Smjg	rw_delay.step = (mp_ncpus * 25) / 2;
126310980Smjg	rw_delay.min = mp_ncpus * 5;
127310980Smjg	rw_delay.max = mp_ncpus * 25 * 10;
128310980Smjg}
129310980SmjgLOCK_DELAY_SYSINIT(rw_delay_sysinit);
130310980Smjg#endif
131310980Smjg
132157826Sjhb/*
133157826Sjhb * Return a pointer to the owning thread if the lock is write-locked or
134157826Sjhb * NULL if the lock is unlocked or read-locked.
135157826Sjhb */
136157826Sjhb#define	rw_wowner(rw)							\
137154941Sjhb	((rw)->rw_lock & RW_LOCK_READ ? NULL :				\
138154941Sjhb	    (struct thread *)RW_OWNER((rw)->rw_lock))
139154941Sjhb
140157826Sjhb/*
141171052Sattilio * Returns if a write owner is recursed.  Write ownership is not assured
142171052Sattilio * here and should be previously checked.
143171052Sattilio */
144171052Sattilio#define	rw_recursed(rw)		((rw)->rw_recurse != 0)
145171052Sattilio
146171052Sattilio/*
147171052Sattilio * Return true if curthread helds the lock.
148171052Sattilio */
149171052Sattilio#define	rw_wlocked(rw)		(rw_wowner((rw)) == curthread)
150171052Sattilio
151171052Sattilio/*
152157826Sjhb * Return a pointer to the owning thread for this lock who should receive
153157826Sjhb * any priority lent by threads that block on this lock.  Currently this
154157826Sjhb * is identical to rw_wowner().
155157826Sjhb */
156157826Sjhb#define	rw_owner(rw)		rw_wowner(rw)
157157826Sjhb
158154941Sjhb#ifndef INVARIANTS
159242515Sattilio#define	__rw_assert(c, what, file, line)
160154941Sjhb#endif
161154941Sjhb
162154941Sjhbvoid
163227588Spjdassert_rw(const struct lock_object *lock, int what)
164173733Sattilio{
165173733Sattilio
166227588Spjd	rw_assert((const struct rwlock *)lock, what);
167173733Sattilio}
168173733Sattilio
169173733Sattiliovoid
170255745Sdavidelock_rw(struct lock_object *lock, uintptr_t how)
171167368Sjhb{
172167368Sjhb	struct rwlock *rw;
173167368Sjhb
174167368Sjhb	rw = (struct rwlock *)lock;
175167368Sjhb	if (how)
176255788Sdavide		rw_rlock(rw);
177255788Sdavide	else
178167368Sjhb		rw_wlock(rw);
179167368Sjhb}
180167368Sjhb
181255745Sdavideuintptr_t
182167368Sjhbunlock_rw(struct lock_object *lock)
183167368Sjhb{
184167368Sjhb	struct rwlock *rw;
185167368Sjhb
186167368Sjhb	rw = (struct rwlock *)lock;
187167368Sjhb	rw_assert(rw, RA_LOCKED | LA_NOTRECURSED);
188167368Sjhb	if (rw->rw_lock & RW_LOCK_READ) {
189167368Sjhb		rw_runlock(rw);
190255788Sdavide		return (1);
191167368Sjhb	} else {
192167368Sjhb		rw_wunlock(rw);
193255788Sdavide		return (0);
194167368Sjhb	}
195167368Sjhb}
196167368Sjhb
197192853Ssson#ifdef KDTRACE_HOOKS
198192853Sssonint
199227588Spjdowner_rw(const struct lock_object *lock, struct thread **owner)
200192853Ssson{
201227588Spjd	const struct rwlock *rw = (const struct rwlock *)lock;
202192853Ssson	uintptr_t x = rw->rw_lock;
203192853Ssson
204192853Ssson	*owner = rw_wowner(rw);
205192853Ssson	return ((x & RW_LOCK_READ) != 0 ?  (RW_READERS(x) != 0) :
206192853Ssson	    (*owner != NULL));
207192853Ssson}
208192853Ssson#endif
209192853Ssson
210167368Sjhbvoid
211242515Sattilio_rw_init_flags(volatile uintptr_t *c, const char *name, int opts)
212154941Sjhb{
213242515Sattilio	struct rwlock *rw;
214171052Sattilio	int flags;
215154941Sjhb
216242515Sattilio	rw = rwlock2rw(c);
217242515Sattilio
218171052Sattilio	MPASS((opts & ~(RW_DUPOK | RW_NOPROFILE | RW_NOWITNESS | RW_QUIET |
219323870Smarius	    RW_RECURSE | RW_NEW)) == 0);
220196334Sattilio	ASSERT_ATOMIC_LOAD_PTR(rw->rw_lock,
221196334Sattilio	    ("%s: rw_lock not aligned for %s: %p", __func__, name,
222196334Sattilio	    &rw->rw_lock));
223171052Sattilio
224193307Sattilio	flags = LO_UPGRADABLE;
225171052Sattilio	if (opts & RW_DUPOK)
226171052Sattilio		flags |= LO_DUPOK;
227171052Sattilio	if (opts & RW_NOPROFILE)
228171052Sattilio		flags |= LO_NOPROFILE;
229171052Sattilio	if (!(opts & RW_NOWITNESS))
230171052Sattilio		flags |= LO_WITNESS;
231193307Sattilio	if (opts & RW_RECURSE)
232193307Sattilio		flags |= LO_RECURSABLE;
233171052Sattilio	if (opts & RW_QUIET)
234171052Sattilio		flags |= LO_QUIET;
235323870Smarius	if (opts & RW_NEW)
236323870Smarius		flags |= LO_NEW;
237171052Sattilio
238252212Sjhb	lock_init(&rw->lock_object, &lock_class_rw, name, NULL, flags);
239154941Sjhb	rw->rw_lock = RW_UNLOCKED;
240171052Sattilio	rw->rw_recurse = 0;
241154941Sjhb}
242154941Sjhb
243154941Sjhbvoid
244242515Sattilio_rw_destroy(volatile uintptr_t *c)
245154941Sjhb{
246242515Sattilio	struct rwlock *rw;
247154941Sjhb
248242515Sattilio	rw = rwlock2rw(c);
249242515Sattilio
250205626Sbz	KASSERT(rw->rw_lock == RW_UNLOCKED, ("rw lock %p not unlocked", rw));
251205626Sbz	KASSERT(rw->rw_recurse == 0, ("rw lock %p still recursed", rw));
252169394Sjhb	rw->rw_lock = RW_DESTROYED;
253167787Sjhb	lock_destroy(&rw->lock_object);
254154941Sjhb}
255154941Sjhb
256154941Sjhbvoid
257154941Sjhbrw_sysinit(void *arg)
258154941Sjhb{
259154941Sjhb	struct rw_args *args = arg;
260154941Sjhb
261242515Sattilio	rw_init((struct rwlock *)args->ra_rw, args->ra_desc);
262154941Sjhb}
263154941Sjhb
264185778Skmacyvoid
265185778Skmacyrw_sysinit_flags(void *arg)
266185778Skmacy{
267185778Skmacy	struct rw_args_flags *args = arg;
268185778Skmacy
269242515Sattilio	rw_init_flags((struct rwlock *)args->ra_rw, args->ra_desc,
270242515Sattilio	    args->ra_flags);
271185778Skmacy}
272185778Skmacy
273167024Srwatsonint
274242515Sattilio_rw_wowned(const volatile uintptr_t *c)
275167024Srwatson{
276167024Srwatson
277242515Sattilio	return (rw_wowner(rwlock2rw(c)) == curthread);
278167024Srwatson}
279167024Srwatson
280154941Sjhbvoid
281242515Sattilio_rw_wlock_cookie(volatile uintptr_t *c, const char *file, int line)
282154941Sjhb{
283242515Sattilio	struct rwlock *rw;
284154941Sjhb
285228424Savg	if (SCHEDULER_STOPPED())
286228424Savg		return;
287242515Sattilio
288242515Sattilio	rw = rwlock2rw(c);
289242515Sattilio
290244582Sattilio	KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
291240424Sattilio	    ("rw_wlock() by idle thread %p on rwlock %s @ %s:%d",
292240424Sattilio	    curthread, rw->lock_object.lo_name, file, line));
293169394Sjhb	KASSERT(rw->rw_lock != RW_DESTROYED,
294169394Sjhb	    ("rw_wlock() of destroyed rwlock @ %s:%d", file, line));
295167787Sjhb	WITNESS_CHECKORDER(&rw->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE, file,
296182914Sjhb	    line, NULL);
297154941Sjhb	__rw_wlock(rw, curthread, file, line);
298171052Sattilio	LOCK_LOG_LOCK("WLOCK", &rw->lock_object, 0, rw->rw_recurse, file, line);
299167787Sjhb	WITNESS_LOCK(&rw->lock_object, LOP_EXCLUSIVE, file, line);
300160771Sjhb	curthread->td_locks++;
301154941Sjhb}
302154941Sjhb
303177843Sattilioint
304242515Sattilio__rw_try_wlock(volatile uintptr_t *c, const char *file, int line)
305177843Sattilio{
306242515Sattilio	struct rwlock *rw;
307177843Sattilio	int rval;
308177843Sattilio
309228424Savg	if (SCHEDULER_STOPPED())
310228424Savg		return (1);
311228424Savg
312242515Sattilio	rw = rwlock2rw(c);
313242515Sattilio
314244582Sattilio	KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
315240424Sattilio	    ("rw_try_wlock() by idle thread %p on rwlock %s @ %s:%d",
316240424Sattilio	    curthread, rw->lock_object.lo_name, file, line));
317177843Sattilio	KASSERT(rw->rw_lock != RW_DESTROYED,
318177843Sattilio	    ("rw_try_wlock() of destroyed rwlock @ %s:%d", file, line));
319177843Sattilio
320193307Sattilio	if (rw_wlocked(rw) &&
321193307Sattilio	    (rw->lock_object.lo_flags & LO_RECURSABLE) != 0) {
322177843Sattilio		rw->rw_recurse++;
323177843Sattilio		rval = 1;
324177843Sattilio	} else
325177843Sattilio		rval = atomic_cmpset_acq_ptr(&rw->rw_lock, RW_UNLOCKED,
326177843Sattilio		    (uintptr_t)curthread);
327177843Sattilio
328177843Sattilio	LOCK_LOG_TRY("WLOCK", &rw->lock_object, 0, rval, file, line);
329177843Sattilio	if (rval) {
330177843Sattilio		WITNESS_LOCK(&rw->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
331177843Sattilio		    file, line);
332284998Savg		if (!rw_recursed(rw))
333284998Savg			LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_RW_WLOCK_ACQUIRE,
334284998Savg			    rw, 0, 0, file, line);
335177843Sattilio		curthread->td_locks++;
336177843Sattilio	}
337177843Sattilio	return (rval);
338177843Sattilio}
339177843Sattilio
340154941Sjhbvoid
341242515Sattilio_rw_wunlock_cookie(volatile uintptr_t *c, const char *file, int line)
342154941Sjhb{
343242515Sattilio	struct rwlock *rw;
344154941Sjhb
345228424Savg	if (SCHEDULER_STOPPED())
346228424Savg		return;
347242515Sattilio
348242515Sattilio	rw = rwlock2rw(c);
349242515Sattilio
350169394Sjhb	KASSERT(rw->rw_lock != RW_DESTROYED,
351169394Sjhb	    ("rw_wunlock() of destroyed rwlock @ %s:%d", file, line));
352242515Sattilio	__rw_assert(c, RA_WLOCKED, file, line);
353167787Sjhb	WITNESS_UNLOCK(&rw->lock_object, LOP_EXCLUSIVE, file, line);
354171052Sattilio	LOCK_LOG_LOCK("WUNLOCK", &rw->lock_object, 0, rw->rw_recurse, file,
355171052Sattilio	    line);
356171052Sattilio	if (!rw_recursed(rw))
357192853Ssson		LOCKSTAT_PROFILE_RELEASE_LOCK(LS_RW_WUNLOCK_RELEASE, rw);
358154941Sjhb	__rw_wunlock(rw, curthread, file, line);
359252212Sjhb	curthread->td_locks--;
360154941Sjhb}
361176017Sjeff/*
362176017Sjeff * Determines whether a new reader can acquire a lock.  Succeeds if the
363176017Sjeff * reader already owns a read lock and the lock is locked for read to
364176017Sjeff * prevent deadlock from reader recursion.  Also succeeds if the lock
365176017Sjeff * is unlocked and has no writer waiters or spinners.  Failing otherwise
366176017Sjeff * prioritizes writers before readers.
367176017Sjeff */
368176017Sjeff#define	RW_CAN_READ(_rw)						\
369176017Sjeff    ((curthread->td_rw_rlocks && (_rw) & RW_LOCK_READ) || ((_rw) &	\
370176017Sjeff    (RW_LOCK_READ | RW_LOCK_WRITE_WAITERS | RW_LOCK_WRITE_SPINNER)) ==	\
371176017Sjeff    RW_LOCK_READ)
372154941Sjhb
373154941Sjhbvoid
374242515Sattilio__rw_rlock(volatile uintptr_t *c, const char *file, int line)
375154941Sjhb{
376242515Sattilio	struct rwlock *rw;
377170295Sjeff	struct turnstile *ts;
378167801Sjhb#ifdef ADAPTIVE_RWLOCKS
379157846Sjhb	volatile struct thread *owner;
380177912Sjeff	int spintries = 0;
381177912Sjeff	int i;
382157851Swkoszek#endif
383189846Sjeff#ifdef LOCK_PROFILING
384167307Sjhb	uint64_t waittime = 0;
385167054Skmacy	int contested = 0;
386189846Sjeff#endif
387176017Sjeff	uintptr_t v;
388310980Smjg#if defined(ADAPTIVE_RWLOCKS) || defined(KDTRACE_HOOKS)
389310980Smjg	struct lock_delay_arg lda;
390310980Smjg#endif
391192853Ssson#ifdef KDTRACE_HOOKS
392284998Savg	uintptr_t state;
393310980Smjg	u_int sleep_cnt = 0;
394192853Ssson	int64_t sleep_time = 0;
395284998Savg	int64_t all_time = 0;
396192853Ssson#endif
397154941Sjhb
398228424Savg	if (SCHEDULER_STOPPED())
399228424Savg		return;
400228424Savg
401310980Smjg#if defined(ADAPTIVE_RWLOCKS)
402310980Smjg	lock_delay_arg_init(&lda, &rw_delay);
403310980Smjg#elif defined(KDTRACE_HOOKS)
404310980Smjg	lock_delay_arg_init(&lda, NULL);
405310980Smjg#endif
406242515Sattilio	rw = rwlock2rw(c);
407242515Sattilio
408244582Sattilio	KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
409240424Sattilio	    ("rw_rlock() by idle thread %p on rwlock %s @ %s:%d",
410240424Sattilio	    curthread, rw->lock_object.lo_name, file, line));
411169394Sjhb	KASSERT(rw->rw_lock != RW_DESTROYED,
412169394Sjhb	    ("rw_rlock() of destroyed rwlock @ %s:%d", file, line));
413157826Sjhb	KASSERT(rw_wowner(rw) != curthread,
414251323Sjhb	    ("rw_rlock: wlock already held for %s @ %s:%d",
415167787Sjhb	    rw->lock_object.lo_name, file, line));
416182914Sjhb	WITNESS_CHECKORDER(&rw->lock_object, LOP_NEWORDER, file, line, NULL);
417154941Sjhb
418284998Savg#ifdef KDTRACE_HOOKS
419285759Smarkj	all_time -= lockstat_nsecs(&rw->lock_object);
420284998Savg	state = rw->rw_lock;
421284998Savg#endif
422154941Sjhb	for (;;) {
423154941Sjhb		/*
424154941Sjhb		 * Handle the easy case.  If no other thread has a write
425154941Sjhb		 * lock, then try to bump up the count of read locks.  Note
426154941Sjhb		 * that we have to preserve the current state of the
427154941Sjhb		 * RW_LOCK_WRITE_WAITERS flag.  If we fail to acquire a
428154941Sjhb		 * read lock, then rw_lock must have changed, so restart
429154941Sjhb		 * the loop.  Note that this handles the case of a
430154941Sjhb		 * completely unlocked rwlock since such a lock is encoded
431154941Sjhb		 * as a read lock with no waiters.
432154941Sjhb		 */
433176017Sjeff		v = rw->rw_lock;
434176017Sjeff		if (RW_CAN_READ(v)) {
435154941Sjhb			/*
436154941Sjhb			 * The RW_LOCK_READ_WAITERS flag should only be set
437176017Sjeff			 * if the lock has been unlocked and write waiters
438176017Sjeff			 * were present.
439154941Sjhb			 */
440176017Sjeff			if (atomic_cmpset_acq_ptr(&rw->rw_lock, v,
441176017Sjeff			    v + RW_ONE_READER)) {
442167787Sjhb				if (LOCK_LOG_TEST(&rw->lock_object, 0))
443154941Sjhb					CTR4(KTR_LOCK,
444154941Sjhb					    "%s: %p succeed %p -> %p", __func__,
445176017Sjeff					    rw, (void *)v,
446176017Sjeff					    (void *)(v + RW_ONE_READER));
447154941Sjhb				break;
448154941Sjhb			}
449154941Sjhb			continue;
450154941Sjhb		}
451310980Smjg#ifdef KDTRACE_HOOKS
452310980Smjg		lda.spin_cnt++;
453310980Smjg#endif
454233628Sfabient#ifdef HWPMC_HOOKS
455233628Sfabient		PMC_SOFT_CALL( , , lock, failed);
456233628Sfabient#endif
457174629Sjeff		lock_profile_obtain_lock_failed(&rw->lock_object,
458174629Sjeff		    &contested, &waittime);
459154941Sjhb
460173960Sattilio#ifdef ADAPTIVE_RWLOCKS
461154941Sjhb		/*
462173960Sattilio		 * If the owner is running on another CPU, spin until
463173960Sattilio		 * the owner stops running or the state of the lock
464173960Sattilio		 * changes.
465173960Sattilio		 */
466176017Sjeff		if ((v & RW_LOCK_READ) == 0) {
467176017Sjeff			owner = (struct thread *)RW_OWNER(v);
468176017Sjeff			if (TD_IS_RUNNING(owner)) {
469176017Sjeff				if (LOCK_LOG_TEST(&rw->lock_object, 0))
470176017Sjeff					CTR3(KTR_LOCK,
471176017Sjeff					    "%s: spinning on %p held by %p",
472176017Sjeff					    __func__, rw, owner);
473278694Ssbruno				KTR_STATE1(KTR_SCHED, "thread",
474278694Ssbruno				    sched_tdname(curthread), "spinning",
475278694Ssbruno				    "lockname:\"%s\"", rw->lock_object.lo_name);
476176017Sjeff				while ((struct thread*)RW_OWNER(rw->rw_lock) ==
477310980Smjg				    owner && TD_IS_RUNNING(owner))
478310980Smjg					lock_delay(&lda);
479278694Ssbruno				KTR_STATE0(KTR_SCHED, "thread",
480278694Ssbruno				    sched_tdname(curthread), "running");
481176017Sjeff				continue;
482176017Sjeff			}
483177912Sjeff		} else if (spintries < rowner_retries) {
484177912Sjeff			spintries++;
485278694Ssbruno			KTR_STATE1(KTR_SCHED, "thread", sched_tdname(curthread),
486278694Ssbruno			    "spinning", "lockname:\"%s\"",
487278694Ssbruno			    rw->lock_object.lo_name);
488177912Sjeff			for (i = 0; i < rowner_loops; i++) {
489177912Sjeff				v = rw->rw_lock;
490177912Sjeff				if ((v & RW_LOCK_READ) == 0 || RW_CAN_READ(v))
491177912Sjeff					break;
492177912Sjeff				cpu_spinwait();
493177912Sjeff			}
494310980Smjg#ifdef KDTRACE_HOOKS
495310980Smjg			lda.spin_cnt += rowner_loops - i;
496310980Smjg#endif
497278694Ssbruno			KTR_STATE0(KTR_SCHED, "thread", sched_tdname(curthread),
498278694Ssbruno			    "running");
499177912Sjeff			if (i != rowner_loops)
500177912Sjeff				continue;
501173960Sattilio		}
502173960Sattilio#endif
503173960Sattilio
504173960Sattilio		/*
505154941Sjhb		 * Okay, now it's the hard case.  Some other thread already
506176017Sjeff		 * has a write lock or there are write waiters present,
507176017Sjeff		 * acquire the turnstile lock so we can begin the process
508176017Sjeff		 * of blocking.
509154941Sjhb		 */
510170295Sjeff		ts = turnstile_trywait(&rw->lock_object);
511154941Sjhb
512154941Sjhb		/*
513154941Sjhb		 * The lock might have been released while we spun, so
514176017Sjeff		 * recheck its state and restart the loop if needed.
515154941Sjhb		 */
516176017Sjeff		v = rw->rw_lock;
517176017Sjeff		if (RW_CAN_READ(v)) {
518170295Sjeff			turnstile_cancel(ts);
519154941Sjhb			continue;
520154941Sjhb		}
521154941Sjhb
522173960Sattilio#ifdef ADAPTIVE_RWLOCKS
523154941Sjhb		/*
524193035Sjhb		 * The current lock owner might have started executing
525193035Sjhb		 * on another CPU (or the lock could have changed
526193035Sjhb		 * owners) while we were waiting on the turnstile
527193035Sjhb		 * chain lock.  If so, drop the turnstile lock and try
528193035Sjhb		 * again.
529173960Sattilio		 */
530176017Sjeff		if ((v & RW_LOCK_READ) == 0) {
531176017Sjeff			owner = (struct thread *)RW_OWNER(v);
532176017Sjeff			if (TD_IS_RUNNING(owner)) {
533176017Sjeff				turnstile_cancel(ts);
534176017Sjeff				continue;
535176017Sjeff			}
536173960Sattilio		}
537173960Sattilio#endif
538173960Sattilio
539173960Sattilio		/*
540176017Sjeff		 * The lock is held in write mode or it already has waiters.
541154941Sjhb		 */
542176017Sjeff		MPASS(!RW_CAN_READ(v));
543176017Sjeff
544176017Sjeff		/*
545176017Sjeff		 * If the RW_LOCK_READ_WAITERS flag is already set, then
546176017Sjeff		 * we can go ahead and block.  If it is not set then try
547176017Sjeff		 * to set it.  If we fail to set it drop the turnstile
548176017Sjeff		 * lock and restart the loop.
549176017Sjeff		 */
550176017Sjeff		if (!(v & RW_LOCK_READ_WAITERS)) {
551176017Sjeff			if (!atomic_cmpset_ptr(&rw->rw_lock, v,
552176017Sjeff			    v | RW_LOCK_READ_WAITERS)) {
553170295Sjeff				turnstile_cancel(ts);
554157826Sjhb				continue;
555157826Sjhb			}
556167787Sjhb			if (LOCK_LOG_TEST(&rw->lock_object, 0))
557157826Sjhb				CTR2(KTR_LOCK, "%s: %p set read waiters flag",
558157826Sjhb				    __func__, rw);
559154941Sjhb		}
560154941Sjhb
561154941Sjhb		/*
562154941Sjhb		 * We were unable to acquire the lock and the read waiters
563154941Sjhb		 * flag is set, so we must block on the turnstile.
564154941Sjhb		 */
565167787Sjhb		if (LOCK_LOG_TEST(&rw->lock_object, 0))
566154941Sjhb			CTR2(KTR_LOCK, "%s: %p blocking on turnstile", __func__,
567154941Sjhb			    rw);
568192853Ssson#ifdef KDTRACE_HOOKS
569285759Smarkj		sleep_time -= lockstat_nsecs(&rw->lock_object);
570192853Ssson#endif
571170295Sjeff		turnstile_wait(ts, rw_owner(rw), TS_SHARED_QUEUE);
572192853Ssson#ifdef KDTRACE_HOOKS
573285759Smarkj		sleep_time += lockstat_nsecs(&rw->lock_object);
574192853Ssson		sleep_cnt++;
575192853Ssson#endif
576167787Sjhb		if (LOCK_LOG_TEST(&rw->lock_object, 0))
577154941Sjhb			CTR2(KTR_LOCK, "%s: %p resuming from turnstile",
578154941Sjhb			    __func__, rw);
579154941Sjhb	}
580284998Savg#ifdef KDTRACE_HOOKS
581285759Smarkj	all_time += lockstat_nsecs(&rw->lock_object);
582284998Savg	if (sleep_time)
583284998Savg		LOCKSTAT_RECORD4(LS_RW_RLOCK_BLOCK, rw, sleep_time,
584284998Savg		    LOCKSTAT_READER, (state & RW_LOCK_READ) == 0,
585284998Savg		    (state & RW_LOCK_READ) == 0 ? 0 : RW_READERS(state));
586154941Sjhb
587284998Savg	/* Record only the loops spinning and not sleeping. */
588310980Smjg	if (lda.spin_cnt > sleep_cnt)
589284998Savg		LOCKSTAT_RECORD4(LS_RW_RLOCK_SPIN, rw, all_time - sleep_time,
590284998Savg		    LOCKSTAT_READER, (state & RW_LOCK_READ) == 0,
591284998Savg		    (state & RW_LOCK_READ) == 0 ? 0 : RW_READERS(state));
592284998Savg#endif
593154941Sjhb	/*
594154941Sjhb	 * TODO: acquire "owner of record" here.  Here be turnstile dragons
595154941Sjhb	 * however.  turnstiles don't like owners changing between calls to
596154941Sjhb	 * turnstile_wait() currently.
597154941Sjhb	 */
598192853Ssson	LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_RW_RLOCK_ACQUIRE, rw, contested,
599174629Sjeff	    waittime, file, line);
600167787Sjhb	LOCK_LOG_LOCK("RLOCK", &rw->lock_object, 0, 0, file, line);
601167787Sjhb	WITNESS_LOCK(&rw->lock_object, 0, file, line);
602160771Sjhb	curthread->td_locks++;
603176017Sjeff	curthread->td_rw_rlocks++;
604154941Sjhb}
605154941Sjhb
606177843Sattilioint
607242515Sattilio__rw_try_rlock(volatile uintptr_t *c, const char *file, int line)
608177843Sattilio{
609242515Sattilio	struct rwlock *rw;
610177843Sattilio	uintptr_t x;
611177843Sattilio
612228424Savg	if (SCHEDULER_STOPPED())
613228424Savg		return (1);
614228424Savg
615242515Sattilio	rw = rwlock2rw(c);
616242515Sattilio
617244582Sattilio	KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
618240424Sattilio	    ("rw_try_rlock() by idle thread %p on rwlock %s @ %s:%d",
619240424Sattilio	    curthread, rw->lock_object.lo_name, file, line));
620240424Sattilio
621177843Sattilio	for (;;) {
622177843Sattilio		x = rw->rw_lock;
623177843Sattilio		KASSERT(rw->rw_lock != RW_DESTROYED,
624177843Sattilio		    ("rw_try_rlock() of destroyed rwlock @ %s:%d", file, line));
625177843Sattilio		if (!(x & RW_LOCK_READ))
626177843Sattilio			break;
627177843Sattilio		if (atomic_cmpset_acq_ptr(&rw->rw_lock, x, x + RW_ONE_READER)) {
628177843Sattilio			LOCK_LOG_TRY("RLOCK", &rw->lock_object, 0, 1, file,
629177843Sattilio			    line);
630177843Sattilio			WITNESS_LOCK(&rw->lock_object, LOP_TRYLOCK, file, line);
631284998Savg			LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_RW_RLOCK_ACQUIRE,
632284998Savg			    rw, 0, 0, file, line);
633177843Sattilio			curthread->td_locks++;
634177843Sattilio			curthread->td_rw_rlocks++;
635177843Sattilio			return (1);
636177843Sattilio		}
637177843Sattilio	}
638177843Sattilio
639177843Sattilio	LOCK_LOG_TRY("RLOCK", &rw->lock_object, 0, 0, file, line);
640177843Sattilio	return (0);
641177843Sattilio}
642177843Sattilio
643154941Sjhbvoid
644242515Sattilio_rw_runlock_cookie(volatile uintptr_t *c, const char *file, int line)
645154941Sjhb{
646242515Sattilio	struct rwlock *rw;
647154941Sjhb	struct turnstile *ts;
648176017Sjeff	uintptr_t x, v, queue;
649154941Sjhb
650228424Savg	if (SCHEDULER_STOPPED())
651228424Savg		return;
652228424Savg
653242515Sattilio	rw = rwlock2rw(c);
654242515Sattilio
655169394Sjhb	KASSERT(rw->rw_lock != RW_DESTROYED,
656169394Sjhb	    ("rw_runlock() of destroyed rwlock @ %s:%d", file, line));
657242515Sattilio	__rw_assert(c, RA_RLOCKED, file, line);
658167787Sjhb	WITNESS_UNLOCK(&rw->lock_object, 0, file, line);
659167787Sjhb	LOCK_LOG_LOCK("RUNLOCK", &rw->lock_object, 0, 0, file, line);
660154941Sjhb
661154941Sjhb	/* TODO: drop "owner of record" here. */
662154941Sjhb
663154941Sjhb	for (;;) {
664154941Sjhb		/*
665154941Sjhb		 * See if there is more than one read lock held.  If so,
666154941Sjhb		 * just drop one and return.
667154941Sjhb		 */
668154941Sjhb		x = rw->rw_lock;
669154941Sjhb		if (RW_READERS(x) > 1) {
670197643Sattilio			if (atomic_cmpset_rel_ptr(&rw->rw_lock, x,
671154941Sjhb			    x - RW_ONE_READER)) {
672167787Sjhb				if (LOCK_LOG_TEST(&rw->lock_object, 0))
673154941Sjhb					CTR4(KTR_LOCK,
674154941Sjhb					    "%s: %p succeeded %p -> %p",
675154941Sjhb					    __func__, rw, (void *)x,
676154941Sjhb					    (void *)(x - RW_ONE_READER));
677154941Sjhb				break;
678154941Sjhb			}
679154941Sjhb			continue;
680167307Sjhb		}
681154941Sjhb		/*
682154941Sjhb		 * If there aren't any waiters for a write lock, then try
683154941Sjhb		 * to drop it quickly.
684154941Sjhb		 */
685176017Sjeff		if (!(x & RW_LOCK_WAITERS)) {
686176017Sjeff			MPASS((x & ~RW_LOCK_WRITE_SPINNER) ==
687176017Sjeff			    RW_READERS_LOCK(1));
688197643Sattilio			if (atomic_cmpset_rel_ptr(&rw->rw_lock, x,
689197643Sattilio			    RW_UNLOCKED)) {
690167787Sjhb				if (LOCK_LOG_TEST(&rw->lock_object, 0))
691154941Sjhb					CTR2(KTR_LOCK, "%s: %p last succeeded",
692154941Sjhb					    __func__, rw);
693154941Sjhb				break;
694154941Sjhb			}
695154941Sjhb			continue;
696154941Sjhb		}
697154941Sjhb		/*
698176017Sjeff		 * Ok, we know we have waiters and we think we are the
699176017Sjeff		 * last reader, so grab the turnstile lock.
700154941Sjhb		 */
701170295Sjeff		turnstile_chain_lock(&rw->lock_object);
702176017Sjeff		v = rw->rw_lock & (RW_LOCK_WAITERS | RW_LOCK_WRITE_SPINNER);
703176017Sjeff		MPASS(v & RW_LOCK_WAITERS);
704154941Sjhb
705154941Sjhb		/*
706154941Sjhb		 * Try to drop our lock leaving the lock in a unlocked
707154941Sjhb		 * state.
708154941Sjhb		 *
709154941Sjhb		 * If you wanted to do explicit lock handoff you'd have to
710154941Sjhb		 * do it here.  You'd also want to use turnstile_signal()
711154941Sjhb		 * and you'd have to handle the race where a higher
712154941Sjhb		 * priority thread blocks on the write lock before the
713154941Sjhb		 * thread you wakeup actually runs and have the new thread
714154941Sjhb		 * "steal" the lock.  For now it's a lot simpler to just
715154941Sjhb		 * wakeup all of the waiters.
716154941Sjhb		 *
717154941Sjhb		 * As above, if we fail, then another thread might have
718154941Sjhb		 * acquired a read lock, so drop the turnstile lock and
719154941Sjhb		 * restart.
720154941Sjhb		 */
721176017Sjeff		x = RW_UNLOCKED;
722176017Sjeff		if (v & RW_LOCK_WRITE_WAITERS) {
723176017Sjeff			queue = TS_EXCLUSIVE_QUEUE;
724176017Sjeff			x |= (v & RW_LOCK_READ_WAITERS);
725176017Sjeff		} else
726176017Sjeff			queue = TS_SHARED_QUEUE;
727197643Sattilio		if (!atomic_cmpset_rel_ptr(&rw->rw_lock, RW_READERS_LOCK(1) | v,
728176017Sjeff		    x)) {
729170295Sjeff			turnstile_chain_unlock(&rw->lock_object);
730154941Sjhb			continue;
731154941Sjhb		}
732167787Sjhb		if (LOCK_LOG_TEST(&rw->lock_object, 0))
733154941Sjhb			CTR2(KTR_LOCK, "%s: %p last succeeded with waiters",
734154941Sjhb			    __func__, rw);
735154941Sjhb
736154941Sjhb		/*
737154941Sjhb		 * Ok.  The lock is released and all that's left is to
738154941Sjhb		 * wake up the waiters.  Note that the lock might not be
739154941Sjhb		 * free anymore, but in that case the writers will just
740154941Sjhb		 * block again if they run before the new lock holder(s)
741154941Sjhb		 * release the lock.
742154941Sjhb		 */
743167787Sjhb		ts = turnstile_lookup(&rw->lock_object);
744157846Sjhb		MPASS(ts != NULL);
745176017Sjeff		turnstile_broadcast(ts, queue);
746154941Sjhb		turnstile_unpend(ts, TS_SHARED_LOCK);
747170295Sjeff		turnstile_chain_unlock(&rw->lock_object);
748154941Sjhb		break;
749154941Sjhb	}
750192853Ssson	LOCKSTAT_PROFILE_RELEASE_LOCK(LS_RW_RUNLOCK_RELEASE, rw);
751252212Sjhb	curthread->td_locks--;
752252212Sjhb	curthread->td_rw_rlocks--;
753154941Sjhb}
754154941Sjhb
755154941Sjhb/*
756154941Sjhb * This function is called when we are unable to obtain a write lock on the
757154941Sjhb * first try.  This means that at least one other thread holds either a
758154941Sjhb * read or write lock.
759154941Sjhb */
760154941Sjhbvoid
761242515Sattilio__rw_wlock_hard(volatile uintptr_t *c, uintptr_t tid, const char *file,
762242515Sattilio    int line)
763154941Sjhb{
764242515Sattilio	struct rwlock *rw;
765170295Sjeff	struct turnstile *ts;
766167801Sjhb#ifdef ADAPTIVE_RWLOCKS
767157846Sjhb	volatile struct thread *owner;
768176017Sjeff	int spintries = 0;
769176017Sjeff	int i;
770157851Swkoszek#endif
771189846Sjeff	uintptr_t v, x;
772189846Sjeff#ifdef LOCK_PROFILING
773171516Sattilio	uint64_t waittime = 0;
774171516Sattilio	int contested = 0;
775189846Sjeff#endif
776310980Smjg#if defined(ADAPTIVE_RWLOCKS) || defined(KDTRACE_HOOKS)
777310980Smjg	struct lock_delay_arg lda;
778310980Smjg#endif
779192853Ssson#ifdef KDTRACE_HOOKS
780284998Savg	uintptr_t state;
781310980Smjg	u_int sleep_cnt = 0;
782192853Ssson	int64_t sleep_time = 0;
783284998Savg	int64_t all_time = 0;
784192853Ssson#endif
785154941Sjhb
786228424Savg	if (SCHEDULER_STOPPED())
787228424Savg		return;
788228424Savg
789310980Smjg#if defined(ADAPTIVE_RWLOCKS)
790310980Smjg	lock_delay_arg_init(&lda, &rw_delay);
791310980Smjg#elif defined(KDTRACE_HOOKS)
792310980Smjg	lock_delay_arg_init(&lda, NULL);
793310980Smjg#endif
794242515Sattilio	rw = rwlock2rw(c);
795242515Sattilio
796171052Sattilio	if (rw_wlocked(rw)) {
797193307Sattilio		KASSERT(rw->lock_object.lo_flags & LO_RECURSABLE,
798171052Sattilio		    ("%s: recursing but non-recursive rw %s @ %s:%d\n",
799171052Sattilio		    __func__, rw->lock_object.lo_name, file, line));
800171052Sattilio		rw->rw_recurse++;
801171052Sattilio		if (LOCK_LOG_TEST(&rw->lock_object, 0))
802171052Sattilio			CTR2(KTR_LOCK, "%s: %p recursing", __func__, rw);
803171052Sattilio		return;
804171052Sattilio	}
805171052Sattilio
806167787Sjhb	if (LOCK_LOG_TEST(&rw->lock_object, 0))
807154941Sjhb		CTR5(KTR_LOCK, "%s: %s contested (lock=%p) at %s:%d", __func__,
808167787Sjhb		    rw->lock_object.lo_name, (void *)rw->rw_lock, file, line);
809154941Sjhb
810284998Savg#ifdef KDTRACE_HOOKS
811285759Smarkj	all_time -= lockstat_nsecs(&rw->lock_object);
812284998Savg	state = rw->rw_lock;
813284998Savg#endif
814310979Smjg	for (;;) {
815310979Smjg		if (rw->rw_lock == RW_UNLOCKED && _rw_write_lock(rw, tid))
816310979Smjg			break;
817192853Ssson#ifdef KDTRACE_HOOKS
818310980Smjg		lda.spin_cnt++;
819192853Ssson#endif
820233628Sfabient#ifdef HWPMC_HOOKS
821233628Sfabient		PMC_SOFT_CALL( , , lock, failed);
822233628Sfabient#endif
823174629Sjeff		lock_profile_obtain_lock_failed(&rw->lock_object,
824174629Sjeff		    &contested, &waittime);
825173960Sattilio#ifdef ADAPTIVE_RWLOCKS
826173960Sattilio		/*
827173960Sattilio		 * If the lock is write locked and the owner is
828173960Sattilio		 * running on another CPU, spin until the owner stops
829173960Sattilio		 * running or the state of the lock changes.
830173960Sattilio		 */
831173960Sattilio		v = rw->rw_lock;
832173960Sattilio		owner = (struct thread *)RW_OWNER(v);
833173960Sattilio		if (!(v & RW_LOCK_READ) && TD_IS_RUNNING(owner)) {
834173960Sattilio			if (LOCK_LOG_TEST(&rw->lock_object, 0))
835173960Sattilio				CTR3(KTR_LOCK, "%s: spinning on %p held by %p",
836173960Sattilio				    __func__, rw, owner);
837278694Ssbruno			KTR_STATE1(KTR_SCHED, "thread", sched_tdname(curthread),
838278694Ssbruno			    "spinning", "lockname:\"%s\"",
839278694Ssbruno			    rw->lock_object.lo_name);
840173960Sattilio			while ((struct thread*)RW_OWNER(rw->rw_lock) == owner &&
841310980Smjg			    TD_IS_RUNNING(owner))
842310980Smjg				lock_delay(&lda);
843278694Ssbruno			KTR_STATE0(KTR_SCHED, "thread", sched_tdname(curthread),
844278694Ssbruno			    "running");
845173960Sattilio			continue;
846173960Sattilio		}
847177912Sjeff		if ((v & RW_LOCK_READ) && RW_READERS(v) &&
848177912Sjeff		    spintries < rowner_retries) {
849176017Sjeff			if (!(v & RW_LOCK_WRITE_SPINNER)) {
850176017Sjeff				if (!atomic_cmpset_ptr(&rw->rw_lock, v,
851176017Sjeff				    v | RW_LOCK_WRITE_SPINNER)) {
852176017Sjeff					continue;
853176017Sjeff				}
854176017Sjeff			}
855176017Sjeff			spintries++;
856278694Ssbruno			KTR_STATE1(KTR_SCHED, "thread", sched_tdname(curthread),
857278694Ssbruno			    "spinning", "lockname:\"%s\"",
858278694Ssbruno			    rw->lock_object.lo_name);
859177912Sjeff			for (i = 0; i < rowner_loops; i++) {
860176017Sjeff				if ((rw->rw_lock & RW_LOCK_WRITE_SPINNER) == 0)
861176017Sjeff					break;
862176017Sjeff				cpu_spinwait();
863176017Sjeff			}
864278694Ssbruno			KTR_STATE0(KTR_SCHED, "thread", sched_tdname(curthread),
865278694Ssbruno			    "running");
866192853Ssson#ifdef KDTRACE_HOOKS
867310980Smjg			lda.spin_cnt += rowner_loops - i;
868192853Ssson#endif
869177912Sjeff			if (i != rowner_loops)
870176017Sjeff				continue;
871176017Sjeff		}
872173960Sattilio#endif
873170295Sjeff		ts = turnstile_trywait(&rw->lock_object);
874154941Sjhb		v = rw->rw_lock;
875154941Sjhb
876173960Sattilio#ifdef ADAPTIVE_RWLOCKS
877154941Sjhb		/*
878193035Sjhb		 * The current lock owner might have started executing
879193035Sjhb		 * on another CPU (or the lock could have changed
880193035Sjhb		 * owners) while we were waiting on the turnstile
881193035Sjhb		 * chain lock.  If so, drop the turnstile lock and try
882193035Sjhb		 * again.
883173960Sattilio		 */
884173960Sattilio		if (!(v & RW_LOCK_READ)) {
885173960Sattilio			owner = (struct thread *)RW_OWNER(v);
886173960Sattilio			if (TD_IS_RUNNING(owner)) {
887173960Sattilio				turnstile_cancel(ts);
888173960Sattilio				continue;
889173960Sattilio			}
890173960Sattilio		}
891173960Sattilio#endif
892173960Sattilio		/*
893179334Sattilio		 * Check for the waiters flags about this rwlock.
894179334Sattilio		 * If the lock was released, without maintain any pending
895179334Sattilio		 * waiters queue, simply try to acquire it.
896179334Sattilio		 * If a pending waiters queue is present, claim the lock
897179334Sattilio		 * ownership and maintain the pending queue.
898154941Sjhb		 */
899176017Sjeff		x = v & (RW_LOCK_WAITERS | RW_LOCK_WRITE_SPINNER);
900176017Sjeff		if ((v & ~x) == RW_UNLOCKED) {
901176017Sjeff			x &= ~RW_LOCK_WRITE_SPINNER;
902176017Sjeff			if (atomic_cmpset_acq_ptr(&rw->rw_lock, v, tid | x)) {
903176017Sjeff				if (x)
904176017Sjeff					turnstile_claim(ts);
905176017Sjeff				else
906176017Sjeff					turnstile_cancel(ts);
907154941Sjhb				break;
908154941Sjhb			}
909170295Sjeff			turnstile_cancel(ts);
910154941Sjhb			continue;
911154941Sjhb		}
912154941Sjhb		/*
913154941Sjhb		 * If the RW_LOCK_WRITE_WAITERS flag isn't set, then try to
914154941Sjhb		 * set it.  If we fail to set it, then loop back and try
915154941Sjhb		 * again.
916154941Sjhb		 */
917157826Sjhb		if (!(v & RW_LOCK_WRITE_WAITERS)) {
918157826Sjhb			if (!atomic_cmpset_ptr(&rw->rw_lock, v,
919157826Sjhb			    v | RW_LOCK_WRITE_WAITERS)) {
920170295Sjeff				turnstile_cancel(ts);
921157826Sjhb				continue;
922157826Sjhb			}
923167787Sjhb			if (LOCK_LOG_TEST(&rw->lock_object, 0))
924157826Sjhb				CTR2(KTR_LOCK, "%s: %p set write waiters flag",
925157826Sjhb				    __func__, rw);
926154941Sjhb		}
927157846Sjhb		/*
928154941Sjhb		 * We were unable to acquire the lock and the write waiters
929154941Sjhb		 * flag is set, so we must block on the turnstile.
930154941Sjhb		 */
931167787Sjhb		if (LOCK_LOG_TEST(&rw->lock_object, 0))
932154941Sjhb			CTR2(KTR_LOCK, "%s: %p blocking on turnstile", __func__,
933154941Sjhb			    rw);
934192853Ssson#ifdef KDTRACE_HOOKS
935285759Smarkj		sleep_time -= lockstat_nsecs(&rw->lock_object);
936192853Ssson#endif
937170295Sjeff		turnstile_wait(ts, rw_owner(rw), TS_EXCLUSIVE_QUEUE);
938192853Ssson#ifdef KDTRACE_HOOKS
939285759Smarkj		sleep_time += lockstat_nsecs(&rw->lock_object);
940192853Ssson		sleep_cnt++;
941192853Ssson#endif
942167787Sjhb		if (LOCK_LOG_TEST(&rw->lock_object, 0))
943154941Sjhb			CTR2(KTR_LOCK, "%s: %p resuming from turnstile",
944154941Sjhb			    __func__, rw);
945176017Sjeff#ifdef ADAPTIVE_RWLOCKS
946176017Sjeff		spintries = 0;
947176017Sjeff#endif
948154941Sjhb	}
949192853Ssson#ifdef KDTRACE_HOOKS
950285759Smarkj	all_time += lockstat_nsecs(&rw->lock_object);
951192853Ssson	if (sleep_time)
952284998Savg		LOCKSTAT_RECORD4(LS_RW_WLOCK_BLOCK, rw, sleep_time,
953284998Savg		    LOCKSTAT_WRITER, (state & RW_LOCK_READ) == 0,
954284998Savg		    (state & RW_LOCK_READ) == 0 ? 0 : RW_READERS(state));
955192853Ssson
956284998Savg	/* Record only the loops spinning and not sleeping. */
957310980Smjg	if (lda.spin_cnt > sleep_cnt)
958284998Savg		LOCKSTAT_RECORD4(LS_RW_WLOCK_SPIN, rw, all_time - sleep_time,
959310980Smjg		    LOCKSTAT_WRITER, (state & RW_LOCK_READ) == 0,
960284998Savg		    (state & RW_LOCK_READ) == 0 ? 0 : RW_READERS(state));
961192853Ssson#endif
962284998Savg	LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_RW_WLOCK_ACQUIRE, rw, contested,
963284998Savg	    waittime, file, line);
964154941Sjhb}
965154941Sjhb
966154941Sjhb/*
967154941Sjhb * This function is called if the first try at releasing a write lock failed.
968154941Sjhb * This means that one of the 2 waiter bits must be set indicating that at
969154941Sjhb * least one thread is waiting on this lock.
970154941Sjhb */
971154941Sjhbvoid
972242515Sattilio__rw_wunlock_hard(volatile uintptr_t *c, uintptr_t tid, const char *file,
973242515Sattilio    int line)
974154941Sjhb{
975242515Sattilio	struct rwlock *rw;
976154941Sjhb	struct turnstile *ts;
977154941Sjhb	uintptr_t v;
978154941Sjhb	int queue;
979154941Sjhb
980228424Savg	if (SCHEDULER_STOPPED())
981228424Savg		return;
982228424Savg
983242515Sattilio	rw = rwlock2rw(c);
984242515Sattilio
985171052Sattilio	if (rw_wlocked(rw) && rw_recursed(rw)) {
986176017Sjeff		rw->rw_recurse--;
987171052Sattilio		if (LOCK_LOG_TEST(&rw->lock_object, 0))
988171052Sattilio			CTR2(KTR_LOCK, "%s: %p unrecursing", __func__, rw);
989171052Sattilio		return;
990171052Sattilio	}
991171052Sattilio
992154941Sjhb	KASSERT(rw->rw_lock & (RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS),
993154941Sjhb	    ("%s: neither of the waiter flags are set", __func__));
994154941Sjhb
995167787Sjhb	if (LOCK_LOG_TEST(&rw->lock_object, 0))
996154941Sjhb		CTR2(KTR_LOCK, "%s: %p contested", __func__, rw);
997154941Sjhb
998170295Sjeff	turnstile_chain_lock(&rw->lock_object);
999167787Sjhb	ts = turnstile_lookup(&rw->lock_object);
1000154941Sjhb	MPASS(ts != NULL);
1001154941Sjhb
1002154941Sjhb	/*
1003154941Sjhb	 * Use the same algo as sx locks for now.  Prefer waking up shared
1004154941Sjhb	 * waiters if we have any over writers.  This is probably not ideal.
1005154941Sjhb	 *
1006154941Sjhb	 * 'v' is the value we are going to write back to rw_lock.  If we
1007154941Sjhb	 * have waiters on both queues, we need to preserve the state of
1008154941Sjhb	 * the waiter flag for the queue we don't wake up.  For now this is
1009154941Sjhb	 * hardcoded for the algorithm mentioned above.
1010154941Sjhb	 *
1011154941Sjhb	 * In the case of both readers and writers waiting we wakeup the
1012154941Sjhb	 * readers but leave the RW_LOCK_WRITE_WAITERS flag set.  If a
1013154941Sjhb	 * new writer comes in before a reader it will claim the lock up
1014154941Sjhb	 * above.  There is probably a potential priority inversion in
1015154941Sjhb	 * there that could be worked around either by waking both queues
1016154941Sjhb	 * of waiters or doing some complicated lock handoff gymnastics.
1017154941Sjhb	 */
1018157846Sjhb	v = RW_UNLOCKED;
1019176076Sjeff	if (rw->rw_lock & RW_LOCK_WRITE_WAITERS) {
1020176076Sjeff		queue = TS_EXCLUSIVE_QUEUE;
1021176076Sjeff		v |= (rw->rw_lock & RW_LOCK_READ_WAITERS);
1022176076Sjeff	} else
1023154941Sjhb		queue = TS_SHARED_QUEUE;
1024157846Sjhb
1025157846Sjhb	/* Wake up all waiters for the specific queue. */
1026167787Sjhb	if (LOCK_LOG_TEST(&rw->lock_object, 0))
1027154941Sjhb		CTR3(KTR_LOCK, "%s: %p waking up %s waiters", __func__, rw,
1028154941Sjhb		    queue == TS_SHARED_QUEUE ? "read" : "write");
1029154941Sjhb	turnstile_broadcast(ts, queue);
1030154941Sjhb	atomic_store_rel_ptr(&rw->rw_lock, v);
1031154941Sjhb	turnstile_unpend(ts, TS_EXCLUSIVE_LOCK);
1032170295Sjeff	turnstile_chain_unlock(&rw->lock_object);
1033154941Sjhb}
1034154941Sjhb
1035157882Sjhb/*
1036157882Sjhb * Attempt to do a non-blocking upgrade from a read lock to a write
1037157882Sjhb * lock.  This will only succeed if this thread holds a single read
1038157882Sjhb * lock.  Returns true if the upgrade succeeded and false otherwise.
1039157882Sjhb */
1040157882Sjhbint
1041242515Sattilio__rw_try_upgrade(volatile uintptr_t *c, const char *file, int line)
1042157882Sjhb{
1043242515Sattilio	struct rwlock *rw;
1044176017Sjeff	uintptr_t v, x, tid;
1045170295Sjeff	struct turnstile *ts;
1046157882Sjhb	int success;
1047157882Sjhb
1048228424Savg	if (SCHEDULER_STOPPED())
1049228424Savg		return (1);
1050228424Savg
1051242515Sattilio	rw = rwlock2rw(c);
1052242515Sattilio
1053169394Sjhb	KASSERT(rw->rw_lock != RW_DESTROYED,
1054169394Sjhb	    ("rw_try_upgrade() of destroyed rwlock @ %s:%d", file, line));
1055242515Sattilio	__rw_assert(c, RA_RLOCKED, file, line);
1056157882Sjhb
1057157882Sjhb	/*
1058157882Sjhb	 * Attempt to switch from one reader to a writer.  If there
1059157882Sjhb	 * are any write waiters, then we will have to lock the
1060157882Sjhb	 * turnstile first to prevent races with another writer
1061157882Sjhb	 * calling turnstile_wait() before we have claimed this
1062157882Sjhb	 * turnstile.  So, do the simple case of no waiters first.
1063157882Sjhb	 */
1064157882Sjhb	tid = (uintptr_t)curthread;
1065176017Sjeff	success = 0;
1066176017Sjeff	for (;;) {
1067176017Sjeff		v = rw->rw_lock;
1068176017Sjeff		if (RW_READERS(v) > 1)
1069176017Sjeff			break;
1070176017Sjeff		if (!(v & RW_LOCK_WAITERS)) {
1071176017Sjeff			success = atomic_cmpset_ptr(&rw->rw_lock, v, tid);
1072176017Sjeff			if (!success)
1073176017Sjeff				continue;
1074176017Sjeff			break;
1075176017Sjeff		}
1076157882Sjhb
1077176017Sjeff		/*
1078176017Sjeff		 * Ok, we think we have waiters, so lock the turnstile.
1079176017Sjeff		 */
1080176017Sjeff		ts = turnstile_trywait(&rw->lock_object);
1081176017Sjeff		v = rw->rw_lock;
1082176017Sjeff		if (RW_READERS(v) > 1) {
1083176017Sjeff			turnstile_cancel(ts);
1084176017Sjeff			break;
1085176017Sjeff		}
1086176017Sjeff		/*
1087176017Sjeff		 * Try to switch from one reader to a writer again.  This time
1088176017Sjeff		 * we honor the current state of the waiters flags.
1089176017Sjeff		 * If we obtain the lock with the flags set, then claim
1090176017Sjeff		 * ownership of the turnstile.
1091176017Sjeff		 */
1092176017Sjeff		x = rw->rw_lock & RW_LOCK_WAITERS;
1093176017Sjeff		success = atomic_cmpset_ptr(&rw->rw_lock, v, tid | x);
1094176017Sjeff		if (success) {
1095176017Sjeff			if (x)
1096176017Sjeff				turnstile_claim(ts);
1097176017Sjeff			else
1098176017Sjeff				turnstile_cancel(ts);
1099176017Sjeff			break;
1100176017Sjeff		}
1101170295Sjeff		turnstile_cancel(ts);
1102176017Sjeff	}
1103167787Sjhb	LOCK_LOG_TRY("WUPGRADE", &rw->lock_object, 0, success, file, line);
1104176017Sjeff	if (success) {
1105176017Sjeff		curthread->td_rw_rlocks--;
1106167787Sjhb		WITNESS_UPGRADE(&rw->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
1107157882Sjhb		    file, line);
1108192853Ssson		LOCKSTAT_RECORD0(LS_RW_TRYUPGRADE_UPGRADE, rw);
1109176017Sjeff	}
1110157882Sjhb	return (success);
1111157882Sjhb}
1112157882Sjhb
1113157882Sjhb/*
1114157882Sjhb * Downgrade a write lock into a single read lock.
1115157882Sjhb */
1116157882Sjhbvoid
1117242515Sattilio__rw_downgrade(volatile uintptr_t *c, const char *file, int line)
1118157882Sjhb{
1119242515Sattilio	struct rwlock *rw;
1120157882Sjhb	struct turnstile *ts;
1121157882Sjhb	uintptr_t tid, v;
1122176017Sjeff	int rwait, wwait;
1123157882Sjhb
1124228424Savg	if (SCHEDULER_STOPPED())
1125228424Savg		return;
1126228424Savg
1127242515Sattilio	rw = rwlock2rw(c);
1128242515Sattilio
1129169394Sjhb	KASSERT(rw->rw_lock != RW_DESTROYED,
1130169394Sjhb	    ("rw_downgrade() of destroyed rwlock @ %s:%d", file, line));
1131242515Sattilio	__rw_assert(c, RA_WLOCKED | RA_NOTRECURSED, file, line);
1132171052Sattilio#ifndef INVARIANTS
1133171052Sattilio	if (rw_recursed(rw))
1134171052Sattilio		panic("downgrade of a recursed lock");
1135171052Sattilio#endif
1136157882Sjhb
1137167787Sjhb	WITNESS_DOWNGRADE(&rw->lock_object, 0, file, line);
1138157882Sjhb
1139157882Sjhb	/*
1140157882Sjhb	 * Convert from a writer to a single reader.  First we handle
1141157882Sjhb	 * the easy case with no waiters.  If there are any waiters, we
1142176017Sjeff	 * lock the turnstile and "disown" the lock.
1143157882Sjhb	 */
1144157882Sjhb	tid = (uintptr_t)curthread;
1145157882Sjhb	if (atomic_cmpset_rel_ptr(&rw->rw_lock, tid, RW_READERS_LOCK(1)))
1146157882Sjhb		goto out;
1147157882Sjhb
1148157882Sjhb	/*
1149157882Sjhb	 * Ok, we think we have waiters, so lock the turnstile so we can
1150157882Sjhb	 * read the waiter flags without any races.
1151157882Sjhb	 */
1152170295Sjeff	turnstile_chain_lock(&rw->lock_object);
1153176017Sjeff	v = rw->rw_lock & RW_LOCK_WAITERS;
1154176017Sjeff	rwait = v & RW_LOCK_READ_WAITERS;
1155176017Sjeff	wwait = v & RW_LOCK_WRITE_WAITERS;
1156176017Sjeff	MPASS(rwait | wwait);
1157157882Sjhb
1158157882Sjhb	/*
1159176017Sjeff	 * Downgrade from a write lock while preserving waiters flag
1160176017Sjeff	 * and give up ownership of the turnstile.
1161157882Sjhb	 */
1162167787Sjhb	ts = turnstile_lookup(&rw->lock_object);
1163157882Sjhb	MPASS(ts != NULL);
1164176017Sjeff	if (!wwait)
1165176017Sjeff		v &= ~RW_LOCK_READ_WAITERS;
1166176017Sjeff	atomic_store_rel_ptr(&rw->rw_lock, RW_READERS_LOCK(1) | v);
1167176017Sjeff	/*
1168176017Sjeff	 * Wake other readers if there are no writers pending.  Otherwise they
1169176017Sjeff	 * won't be able to acquire the lock anyway.
1170176017Sjeff	 */
1171176017Sjeff	if (rwait && !wwait) {
1172157882Sjhb		turnstile_broadcast(ts, TS_SHARED_QUEUE);
1173157882Sjhb		turnstile_unpend(ts, TS_EXCLUSIVE_LOCK);
1174176017Sjeff	} else
1175157882Sjhb		turnstile_disown(ts);
1176170295Sjeff	turnstile_chain_unlock(&rw->lock_object);
1177157882Sjhbout:
1178176017Sjeff	curthread->td_rw_rlocks++;
1179167787Sjhb	LOCK_LOG_LOCK("WDOWNGRADE", &rw->lock_object, 0, 0, file, line);
1180192853Ssson	LOCKSTAT_RECORD0(LS_RW_DOWNGRADE_DOWNGRADE, rw);
1181157882Sjhb}
1182157882Sjhb
1183154941Sjhb#ifdef INVARIANT_SUPPORT
1184155162Sscottl#ifndef INVARIANTS
1185242515Sattilio#undef __rw_assert
1186154941Sjhb#endif
1187154941Sjhb
1188154941Sjhb/*
1189154941Sjhb * In the non-WITNESS case, rw_assert() can only detect that at least
1190154941Sjhb * *some* thread owns an rlock, but it cannot guarantee that *this*
1191154941Sjhb * thread owns an rlock.
1192154941Sjhb */
1193154941Sjhbvoid
1194242515Sattilio__rw_assert(const volatile uintptr_t *c, int what, const char *file, int line)
1195154941Sjhb{
1196242515Sattilio	const struct rwlock *rw;
1197154941Sjhb
1198154941Sjhb	if (panicstr != NULL)
1199154941Sjhb		return;
1200242515Sattilio
1201242515Sattilio	rw = rwlock2rw(c);
1202242515Sattilio
1203154941Sjhb	switch (what) {
1204154941Sjhb	case RA_LOCKED:
1205171052Sattilio	case RA_LOCKED | RA_RECURSED:
1206171052Sattilio	case RA_LOCKED | RA_NOTRECURSED:
1207154941Sjhb	case RA_RLOCKED:
1208251323Sjhb	case RA_RLOCKED | RA_RECURSED:
1209251323Sjhb	case RA_RLOCKED | RA_NOTRECURSED:
1210154941Sjhb#ifdef WITNESS
1211167787Sjhb		witness_assert(&rw->lock_object, what, file, line);
1212154941Sjhb#else
1213154941Sjhb		/*
1214154941Sjhb		 * If some other thread has a write lock or we have one
1215154941Sjhb		 * and are asserting a read lock, fail.  Also, if no one
1216154941Sjhb		 * has a lock at all, fail.
1217154941Sjhb		 */
1218155061Sscottl		if (rw->rw_lock == RW_UNLOCKED ||
1219251323Sjhb		    (!(rw->rw_lock & RW_LOCK_READ) && (what & RA_RLOCKED ||
1220157826Sjhb		    rw_wowner(rw) != curthread)))
1221154941Sjhb			panic("Lock %s not %slocked @ %s:%d\n",
1222251323Sjhb			    rw->lock_object.lo_name, (what & RA_RLOCKED) ?
1223154941Sjhb			    "read " : "", file, line);
1224171052Sattilio
1225251323Sjhb		if (!(rw->rw_lock & RW_LOCK_READ) && !(what & RA_RLOCKED)) {
1226171052Sattilio			if (rw_recursed(rw)) {
1227171052Sattilio				if (what & RA_NOTRECURSED)
1228171052Sattilio					panic("Lock %s recursed @ %s:%d\n",
1229171052Sattilio					    rw->lock_object.lo_name, file,
1230171052Sattilio					    line);
1231171052Sattilio			} else if (what & RA_RECURSED)
1232171052Sattilio				panic("Lock %s not recursed @ %s:%d\n",
1233171052Sattilio				    rw->lock_object.lo_name, file, line);
1234171052Sattilio		}
1235154941Sjhb#endif
1236154941Sjhb		break;
1237154941Sjhb	case RA_WLOCKED:
1238171052Sattilio	case RA_WLOCKED | RA_RECURSED:
1239171052Sattilio	case RA_WLOCKED | RA_NOTRECURSED:
1240157826Sjhb		if (rw_wowner(rw) != curthread)
1241154941Sjhb			panic("Lock %s not exclusively locked @ %s:%d\n",
1242167787Sjhb			    rw->lock_object.lo_name, file, line);
1243171052Sattilio		if (rw_recursed(rw)) {
1244171052Sattilio			if (what & RA_NOTRECURSED)
1245171052Sattilio				panic("Lock %s recursed @ %s:%d\n",
1246171052Sattilio				    rw->lock_object.lo_name, file, line);
1247171052Sattilio		} else if (what & RA_RECURSED)
1248171052Sattilio			panic("Lock %s not recursed @ %s:%d\n",
1249171052Sattilio			    rw->lock_object.lo_name, file, line);
1250154941Sjhb		break;
1251154941Sjhb	case RA_UNLOCKED:
1252154941Sjhb#ifdef WITNESS
1253167787Sjhb		witness_assert(&rw->lock_object, what, file, line);
1254154941Sjhb#else
1255154941Sjhb		/*
1256154941Sjhb		 * If we hold a write lock fail.  We can't reliably check
1257154941Sjhb		 * to see if we hold a read lock or not.
1258154941Sjhb		 */
1259157826Sjhb		if (rw_wowner(rw) == curthread)
1260154941Sjhb			panic("Lock %s exclusively locked @ %s:%d\n",
1261167787Sjhb			    rw->lock_object.lo_name, file, line);
1262154941Sjhb#endif
1263154941Sjhb		break;
1264154941Sjhb	default:
1265154941Sjhb		panic("Unknown rw lock assertion: %d @ %s:%d", what, file,
1266154941Sjhb		    line);
1267154941Sjhb	}
1268154941Sjhb}
1269154941Sjhb#endif /* INVARIANT_SUPPORT */
1270154941Sjhb
1271154941Sjhb#ifdef DDB
1272154941Sjhbvoid
1273227588Spjddb_show_rwlock(const struct lock_object *lock)
1274154941Sjhb{
1275227588Spjd	const struct rwlock *rw;
1276154941Sjhb	struct thread *td;
1277154941Sjhb
1278227588Spjd	rw = (const struct rwlock *)lock;
1279154941Sjhb
1280154941Sjhb	db_printf(" state: ");
1281154941Sjhb	if (rw->rw_lock == RW_UNLOCKED)
1282154941Sjhb		db_printf("UNLOCKED\n");
1283169394Sjhb	else if (rw->rw_lock == RW_DESTROYED) {
1284169394Sjhb		db_printf("DESTROYED\n");
1285169394Sjhb		return;
1286169394Sjhb	} else if (rw->rw_lock & RW_LOCK_READ)
1287167504Sjhb		db_printf("RLOCK: %ju locks\n",
1288167504Sjhb		    (uintmax_t)(RW_READERS(rw->rw_lock)));
1289154941Sjhb	else {
1290157826Sjhb		td = rw_wowner(rw);
1291154941Sjhb		db_printf("WLOCK: %p (tid %d, pid %d, \"%s\")\n", td,
1292173600Sjulian		    td->td_tid, td->td_proc->p_pid, td->td_name);
1293171052Sattilio		if (rw_recursed(rw))
1294171052Sattilio			db_printf(" recursed: %u\n", rw->rw_recurse);
1295154941Sjhb	}
1296154941Sjhb	db_printf(" waiters: ");
1297154941Sjhb	switch (rw->rw_lock & (RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS)) {
1298154941Sjhb	case RW_LOCK_READ_WAITERS:
1299154941Sjhb		db_printf("readers\n");
1300154941Sjhb		break;
1301154941Sjhb	case RW_LOCK_WRITE_WAITERS:
1302154941Sjhb		db_printf("writers\n");
1303154941Sjhb		break;
1304154941Sjhb	case RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS:
1305167492Sjhb		db_printf("readers and writers\n");
1306154941Sjhb		break;
1307154941Sjhb	default:
1308154941Sjhb		db_printf("none\n");
1309154941Sjhb		break;
1310154941Sjhb	}
1311154941Sjhb}
1312154941Sjhb
1313154941Sjhb#endif
1314