kern_rmlock.c revision 252209
1/*-
2 * Copyright (c) 2007 Stephan Uphoff <ups@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 * 3. Neither the name of the author nor the names of any co-contributors
14 *    may be used to endorse or promote products derived from this software
15 *    without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30/*
31 * Machine independent bits of reader/writer lock implementation.
32 */
33
34#include <sys/cdefs.h>
35__FBSDID("$FreeBSD: head/sys/kern/kern_rmlock.c 252209 2013-06-25 18:44:15Z jhb $");
36
37#include "opt_ddb.h"
38#include "opt_kdtrace.h"
39
40#include <sys/param.h>
41#include <sys/systm.h>
42
43#include <sys/kernel.h>
44#include <sys/kdb.h>
45#include <sys/ktr.h>
46#include <sys/lock.h>
47#include <sys/mutex.h>
48#include <sys/proc.h>
49#include <sys/rmlock.h>
50#include <sys/sched.h>
51#include <sys/smp.h>
52#include <sys/turnstile.h>
53#include <sys/lock_profile.h>
54#include <machine/cpu.h>
55
56#ifdef DDB
57#include <ddb/ddb.h>
58#endif
59
60/*
61 * A cookie to mark destroyed rmlocks.  This is stored in the head of
62 * rm_activeReaders.
63 */
64#define	RM_DESTROYED	((void *)0xdead)
65
66#define	rm_destroyed(rm)						\
67	(LIST_FIRST(&(rm)->rm_activeReaders) == RM_DESTROYED)
68
69#define RMPF_ONQUEUE	1
70#define RMPF_SIGNAL	2
71
72#ifndef INVARIANTS
73#define	_rm_assert(c, what, file, line)
74#endif
75
76static void	assert_rm(const struct lock_object *lock, int what);
77#ifdef DDB
78static void	db_show_rm(const struct lock_object *lock);
79#endif
80static void	lock_rm(struct lock_object *lock, int how);
81#ifdef KDTRACE_HOOKS
82static int	owner_rm(const struct lock_object *lock, struct thread **owner);
83#endif
84static int	unlock_rm(struct lock_object *lock);
85
86struct lock_class lock_class_rm = {
87	.lc_name = "rm",
88	.lc_flags = LC_SLEEPLOCK | LC_RECURSABLE,
89	.lc_assert = assert_rm,
90#ifdef DDB
91	.lc_ddb_show = db_show_rm,
92#endif
93	.lc_lock = lock_rm,
94	.lc_unlock = unlock_rm,
95#ifdef KDTRACE_HOOKS
96	.lc_owner = owner_rm,
97#endif
98};
99
100struct lock_class lock_class_rm_sleepable = {
101	.lc_name = "sleepable rm",
102	.lc_flags = LC_SLEEPLOCK | LC_SLEEPABLE | LC_RECURSABLE,
103	.lc_assert = assert_rm,
104#ifdef DDB
105	.lc_ddb_show = db_show_rm,
106#endif
107	.lc_lock = lock_rm,
108	.lc_unlock = unlock_rm,
109#ifdef KDTRACE_HOOKS
110	.lc_owner = owner_rm,
111#endif
112};
113
114static void
115assert_rm(const struct lock_object *lock, int what)
116{
117
118	rm_assert((const struct rmlock *)lock, what);
119}
120
121/*
122 * These do not support read locks because it would be hard to make
123 * the tracker work correctly with the current lock_class API as you
124 * would need to have the tracker pointer available when calling
125 * rm_rlock() in lock_rm().
126 */
127static void
128lock_rm(struct lock_object *lock, int how)
129{
130	struct rmlock *rm;
131
132	rm = (struct rmlock *)lock;
133	if (how)
134		rm_wlock(rm);
135#ifdef INVARIANTS
136	else
137		panic("lock_rm called in read mode");
138#endif
139}
140
141static int
142unlock_rm(struct lock_object *lock)
143{
144	struct rmlock *rm;
145
146	rm = (struct rmlock *)lock;
147	rm_wunlock(rm);
148	return (1);
149}
150
151#ifdef KDTRACE_HOOKS
152static int
153owner_rm(const struct lock_object *lock, struct thread **owner)
154{
155	const struct rmlock *rm;
156	struct lock_class *lc;
157
158	rm = (const struct rmlock *)lock;
159	lc = LOCK_CLASS(&rm->rm_wlock_object);
160	return (lc->lc_owner(&rm->rm_wlock_object, owner));
161}
162#endif
163
164static struct mtx rm_spinlock;
165
166MTX_SYSINIT(rm_spinlock, &rm_spinlock, "rm_spinlock", MTX_SPIN);
167
168/*
169 * Add or remove tracker from per-cpu list.
170 *
171 * The per-cpu list can be traversed at any time in forward direction from an
172 * interrupt on the *local* cpu.
173 */
174static void inline
175rm_tracker_add(struct pcpu *pc, struct rm_priotracker *tracker)
176{
177	struct rm_queue *next;
178
179	/* Initialize all tracker pointers */
180	tracker->rmp_cpuQueue.rmq_prev = &pc->pc_rm_queue;
181	next = pc->pc_rm_queue.rmq_next;
182	tracker->rmp_cpuQueue.rmq_next = next;
183
184	/* rmq_prev is not used during froward traversal. */
185	next->rmq_prev = &tracker->rmp_cpuQueue;
186
187	/* Update pointer to first element. */
188	pc->pc_rm_queue.rmq_next = &tracker->rmp_cpuQueue;
189}
190
191/*
192 * Return a count of the number of trackers the thread 'td' already
193 * has on this CPU for the lock 'rm'.
194 */
195static int
196rm_trackers_present(const struct pcpu *pc, const struct rmlock *rm,
197    const struct thread *td)
198{
199	struct rm_queue *queue;
200	struct rm_priotracker *tracker;
201	int count;
202
203	count = 0;
204	for (queue = pc->pc_rm_queue.rmq_next; queue != &pc->pc_rm_queue;
205	    queue = queue->rmq_next) {
206		tracker = (struct rm_priotracker *)queue;
207		if ((tracker->rmp_rmlock == rm) && (tracker->rmp_thread == td))
208			count++;
209	}
210	return (count);
211}
212
213static void inline
214rm_tracker_remove(struct pcpu *pc, struct rm_priotracker *tracker)
215{
216	struct rm_queue *next, *prev;
217
218	next = tracker->rmp_cpuQueue.rmq_next;
219	prev = tracker->rmp_cpuQueue.rmq_prev;
220
221	/* Not used during forward traversal. */
222	next->rmq_prev = prev;
223
224	/* Remove from list. */
225	prev->rmq_next = next;
226}
227
228static void
229rm_cleanIPI(void *arg)
230{
231	struct pcpu *pc;
232	struct rmlock *rm = arg;
233	struct rm_priotracker *tracker;
234	struct rm_queue *queue;
235	pc = pcpu_find(curcpu);
236
237	for (queue = pc->pc_rm_queue.rmq_next; queue != &pc->pc_rm_queue;
238	    queue = queue->rmq_next) {
239		tracker = (struct rm_priotracker *)queue;
240		if (tracker->rmp_rmlock == rm && tracker->rmp_flags == 0) {
241			tracker->rmp_flags = RMPF_ONQUEUE;
242			mtx_lock_spin(&rm_spinlock);
243			LIST_INSERT_HEAD(&rm->rm_activeReaders, tracker,
244			    rmp_qentry);
245			mtx_unlock_spin(&rm_spinlock);
246		}
247	}
248}
249
250void
251rm_init_flags(struct rmlock *rm, const char *name, int opts)
252{
253	struct lock_class *lc;
254	int liflags;
255
256	liflags = 0;
257	if (!(opts & RM_NOWITNESS))
258		liflags |= LO_WITNESS;
259	if (opts & RM_RECURSE)
260		liflags |= LO_RECURSABLE;
261	rm->rm_writecpus = all_cpus;
262	LIST_INIT(&rm->rm_activeReaders);
263	if (opts & RM_SLEEPABLE) {
264		liflags |= LO_SLEEPABLE;
265		lc = &lock_class_rm_sleepable;
266		sx_init_flags(&rm->rm_lock_sx, "rmlock_sx", SX_NOWITNESS);
267	} else {
268		lc = &lock_class_rm;
269		mtx_init(&rm->rm_lock_mtx, name, "rmlock_mtx", MTX_NOWITNESS);
270	}
271	lock_init(&rm->lock_object, lc, name, NULL, liflags);
272}
273
274void
275rm_init(struct rmlock *rm, const char *name)
276{
277
278	rm_init_flags(rm, name, 0);
279}
280
281void
282rm_destroy(struct rmlock *rm)
283{
284
285	rm_assert(rm, RA_UNLOCKED);
286	LIST_FIRST(&rm->rm_activeReaders) = RM_DESTROYED;
287	if (rm->lock_object.lo_flags & LO_SLEEPABLE)
288		sx_destroy(&rm->rm_lock_sx);
289	else
290		mtx_destroy(&rm->rm_lock_mtx);
291	lock_destroy(&rm->lock_object);
292}
293
294int
295rm_wowned(const struct rmlock *rm)
296{
297
298	if (rm->lock_object.lo_flags & LO_SLEEPABLE)
299		return (sx_xlocked(&rm->rm_lock_sx));
300	else
301		return (mtx_owned(&rm->rm_lock_mtx));
302}
303
304void
305rm_sysinit(void *arg)
306{
307	struct rm_args *args = arg;
308
309	rm_init(args->ra_rm, args->ra_desc);
310}
311
312void
313rm_sysinit_flags(void *arg)
314{
315	struct rm_args_flags *args = arg;
316
317	rm_init_flags(args->ra_rm, args->ra_desc, args->ra_opts);
318}
319
320static int
321_rm_rlock_hard(struct rmlock *rm, struct rm_priotracker *tracker, int trylock)
322{
323	struct pcpu *pc;
324
325	critical_enter();
326	pc = pcpu_find(curcpu);
327
328	/* Check if we just need to do a proper critical_exit. */
329	if (!CPU_ISSET(pc->pc_cpuid, &rm->rm_writecpus)) {
330		critical_exit();
331		return (1);
332	}
333
334	/* Remove our tracker from the per-cpu list. */
335	rm_tracker_remove(pc, tracker);
336
337	/* Check to see if the IPI granted us the lock after all. */
338	if (tracker->rmp_flags) {
339		/* Just add back tracker - we hold the lock. */
340		rm_tracker_add(pc, tracker);
341		critical_exit();
342		return (1);
343	}
344
345	/*
346	 * We allow readers to aquire a lock even if a writer is blocked if
347	 * the lock is recursive and the reader already holds the lock.
348	 */
349	if ((rm->lock_object.lo_flags & LO_RECURSABLE) != 0) {
350		/*
351		 * Just grant the lock if this thread already has a tracker
352		 * for this lock on the per-cpu queue.
353		 */
354		if (rm_trackers_present(pc, rm, curthread) != 0) {
355			mtx_lock_spin(&rm_spinlock);
356			LIST_INSERT_HEAD(&rm->rm_activeReaders, tracker,
357			    rmp_qentry);
358			tracker->rmp_flags = RMPF_ONQUEUE;
359			mtx_unlock_spin(&rm_spinlock);
360			rm_tracker_add(pc, tracker);
361			critical_exit();
362			return (1);
363		}
364	}
365
366	sched_unpin();
367	critical_exit();
368
369	if (trylock) {
370		if (rm->lock_object.lo_flags & LO_SLEEPABLE) {
371			if (!sx_try_xlock(&rm->rm_lock_sx))
372				return (0);
373		} else {
374			if (!mtx_trylock(&rm->rm_lock_mtx))
375				return (0);
376		}
377	} else {
378		if (rm->lock_object.lo_flags & LO_SLEEPABLE)
379			sx_xlock(&rm->rm_lock_sx);
380		else
381			mtx_lock(&rm->rm_lock_mtx);
382	}
383
384	critical_enter();
385	pc = pcpu_find(curcpu);
386	CPU_CLR(pc->pc_cpuid, &rm->rm_writecpus);
387	rm_tracker_add(pc, tracker);
388	sched_pin();
389	critical_exit();
390
391	if (rm->lock_object.lo_flags & LO_SLEEPABLE)
392		sx_xunlock(&rm->rm_lock_sx);
393	else
394		mtx_unlock(&rm->rm_lock_mtx);
395
396	return (1);
397}
398
399int
400_rm_rlock(struct rmlock *rm, struct rm_priotracker *tracker, int trylock)
401{
402	struct thread *td = curthread;
403	struct pcpu *pc;
404
405	if (SCHEDULER_STOPPED())
406		return (1);
407
408	tracker->rmp_flags  = 0;
409	tracker->rmp_thread = td;
410	tracker->rmp_rmlock = rm;
411
412	if (rm->lock_object.lo_flags & LO_SLEEPABLE)
413		THREAD_NO_SLEEPING();
414
415	td->td_critnest++;	/* critical_enter(); */
416
417	__compiler_membar();
418
419	pc = cpuid_to_pcpu[td->td_oncpu]; /* pcpu_find(td->td_oncpu); */
420
421	rm_tracker_add(pc, tracker);
422
423	sched_pin();
424
425	__compiler_membar();
426
427	td->td_critnest--;
428
429	/*
430	 * Fast path to combine two common conditions into a single
431	 * conditional jump.
432	 */
433	if (0 == (td->td_owepreempt |
434	    CPU_ISSET(pc->pc_cpuid, &rm->rm_writecpus)))
435		return (1);
436
437	/* We do not have a read token and need to acquire one. */
438	return _rm_rlock_hard(rm, tracker, trylock);
439}
440
441static void
442_rm_unlock_hard(struct thread *td,struct rm_priotracker *tracker)
443{
444
445	if (td->td_owepreempt) {
446		td->td_critnest++;
447		critical_exit();
448	}
449
450	if (!tracker->rmp_flags)
451		return;
452
453	mtx_lock_spin(&rm_spinlock);
454	LIST_REMOVE(tracker, rmp_qentry);
455
456	if (tracker->rmp_flags & RMPF_SIGNAL) {
457		struct rmlock *rm;
458		struct turnstile *ts;
459
460		rm = tracker->rmp_rmlock;
461
462		turnstile_chain_lock(&rm->lock_object);
463		mtx_unlock_spin(&rm_spinlock);
464
465		ts = turnstile_lookup(&rm->lock_object);
466
467		turnstile_signal(ts, TS_EXCLUSIVE_QUEUE);
468		turnstile_unpend(ts, TS_EXCLUSIVE_LOCK);
469		turnstile_chain_unlock(&rm->lock_object);
470	} else
471		mtx_unlock_spin(&rm_spinlock);
472}
473
474void
475_rm_runlock(struct rmlock *rm, struct rm_priotracker *tracker)
476{
477	struct pcpu *pc;
478	struct thread *td = tracker->rmp_thread;
479
480	if (SCHEDULER_STOPPED())
481		return;
482
483	td->td_critnest++;	/* critical_enter(); */
484	pc = cpuid_to_pcpu[td->td_oncpu]; /* pcpu_find(td->td_oncpu); */
485	rm_tracker_remove(pc, tracker);
486	td->td_critnest--;
487	sched_unpin();
488
489	if (rm->lock_object.lo_flags & LO_SLEEPABLE)
490		THREAD_SLEEPING_OK();
491
492	if (0 == (td->td_owepreempt | tracker->rmp_flags))
493		return;
494
495	_rm_unlock_hard(td, tracker);
496}
497
498void
499_rm_wlock(struct rmlock *rm)
500{
501	struct rm_priotracker *prio;
502	struct turnstile *ts;
503	cpuset_t readcpus;
504
505	if (SCHEDULER_STOPPED())
506		return;
507
508	if (rm->lock_object.lo_flags & LO_SLEEPABLE)
509		sx_xlock(&rm->rm_lock_sx);
510	else
511		mtx_lock(&rm->rm_lock_mtx);
512
513	if (CPU_CMP(&rm->rm_writecpus, &all_cpus)) {
514		/* Get all read tokens back */
515		readcpus = all_cpus;
516		CPU_NAND(&readcpus, &rm->rm_writecpus);
517		rm->rm_writecpus = all_cpus;
518
519		/*
520		 * Assumes rm->rm_writecpus update is visible on other CPUs
521		 * before rm_cleanIPI is called.
522		 */
523#ifdef SMP
524		smp_rendezvous_cpus(readcpus,
525		    smp_no_rendevous_barrier,
526		    rm_cleanIPI,
527		    smp_no_rendevous_barrier,
528		    rm);
529
530#else
531		rm_cleanIPI(rm);
532#endif
533
534		mtx_lock_spin(&rm_spinlock);
535		while ((prio = LIST_FIRST(&rm->rm_activeReaders)) != NULL) {
536			ts = turnstile_trywait(&rm->lock_object);
537			prio->rmp_flags = RMPF_ONQUEUE | RMPF_SIGNAL;
538			mtx_unlock_spin(&rm_spinlock);
539			turnstile_wait(ts, prio->rmp_thread,
540			    TS_EXCLUSIVE_QUEUE);
541			mtx_lock_spin(&rm_spinlock);
542		}
543		mtx_unlock_spin(&rm_spinlock);
544	}
545}
546
547void
548_rm_wunlock(struct rmlock *rm)
549{
550
551	if (rm->lock_object.lo_flags & LO_SLEEPABLE)
552		sx_xunlock(&rm->rm_lock_sx);
553	else
554		mtx_unlock(&rm->rm_lock_mtx);
555}
556
557#ifdef LOCK_DEBUG
558
559void
560_rm_wlock_debug(struct rmlock *rm, const char *file, int line)
561{
562
563	if (SCHEDULER_STOPPED())
564		return;
565
566	KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
567	    ("rm_wlock() by idle thread %p on rmlock %s @ %s:%d",
568	    curthread, rm->lock_object.lo_name, file, line));
569	KASSERT(!rm_destroyed(rm),
570	    ("rm_wlock() of destroyed rmlock @ %s:%d", file, line));
571	_rm_assert(rm, RA_UNLOCKED, file, line);
572
573	WITNESS_CHECKORDER(&rm->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE,
574	    file, line, NULL);
575
576	_rm_wlock(rm);
577
578	LOCK_LOG_LOCK("RMWLOCK", &rm->lock_object, 0, 0, file, line);
579
580	WITNESS_LOCK(&rm->lock_object, LOP_EXCLUSIVE, file, line);
581
582	curthread->td_locks++;
583
584}
585
586void
587_rm_wunlock_debug(struct rmlock *rm, const char *file, int line)
588{
589
590	if (SCHEDULER_STOPPED())
591		return;
592
593	KASSERT(!rm_destroyed(rm),
594	    ("rm_wunlock() of destroyed rmlock @ %s:%d", file, line));
595	_rm_assert(rm, RA_WLOCKED, file, line);
596	WITNESS_UNLOCK(&rm->lock_object, LOP_EXCLUSIVE, file, line);
597	LOCK_LOG_LOCK("RMWUNLOCK", &rm->lock_object, 0, 0, file, line);
598	_rm_wunlock(rm);
599	curthread->td_locks--;
600}
601
602int
603_rm_rlock_debug(struct rmlock *rm, struct rm_priotracker *tracker,
604    int trylock, const char *file, int line)
605{
606
607	if (SCHEDULER_STOPPED())
608		return (1);
609
610#ifdef INVARIANTS
611	if (!(rm->lock_object.lo_flags & LO_RECURSABLE) && !trylock) {
612		critical_enter();
613		KASSERT(rm_trackers_present(pcpu_find(curcpu), rm,
614		    curthread) == 0,
615		    ("rm_rlock: recursed on non-recursive rmlock %s @ %s:%d\n",
616		    rm->lock_object.lo_name, file, line));
617		critical_exit();
618	}
619#endif
620	KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
621	    ("rm_rlock() by idle thread %p on rmlock %s @ %s:%d",
622	    curthread, rm->lock_object.lo_name, file, line));
623	KASSERT(!rm_destroyed(rm),
624	    ("rm_rlock() of destroyed rmlock @ %s:%d", file, line));
625	if (!trylock) {
626		KASSERT(!rm_wowned(rm),
627		    ("rm_rlock: wlock already held for %s @ %s:%d",
628		    rm->lock_object.lo_name, file, line));
629		WITNESS_CHECKORDER(&rm->lock_object, LOP_NEWORDER, file, line,
630		    NULL);
631	}
632
633	if (_rm_rlock(rm, tracker, trylock)) {
634		if (trylock)
635			LOCK_LOG_TRY("RMRLOCK", &rm->lock_object, 0, 1, file,
636			    line);
637		else
638			LOCK_LOG_LOCK("RMRLOCK", &rm->lock_object, 0, 0, file,
639			    line);
640		WITNESS_LOCK(&rm->lock_object, 0, file, line);
641
642		curthread->td_locks++;
643
644		return (1);
645	} else if (trylock)
646		LOCK_LOG_TRY("RMRLOCK", &rm->lock_object, 0, 0, file, line);
647
648	return (0);
649}
650
651void
652_rm_runlock_debug(struct rmlock *rm, struct rm_priotracker *tracker,
653    const char *file, int line)
654{
655
656	if (SCHEDULER_STOPPED())
657		return;
658
659	KASSERT(!rm_destroyed(rm),
660	    ("rm_runlock() of destroyed rmlock @ %s:%d", file, line));
661	_rm_assert(rm, RA_RLOCKED, file, line);
662	WITNESS_UNLOCK(&rm->lock_object, 0, file, line);
663	LOCK_LOG_LOCK("RMRUNLOCK", &rm->lock_object, 0, 0, file, line);
664	_rm_runlock(rm, tracker);
665	curthread->td_locks--;
666}
667
668#else
669
670/*
671 * Just strip out file and line arguments if no lock debugging is enabled in
672 * the kernel - we are called from a kernel module.
673 */
674void
675_rm_wlock_debug(struct rmlock *rm, const char *file, int line)
676{
677
678	_rm_wlock(rm);
679}
680
681void
682_rm_wunlock_debug(struct rmlock *rm, const char *file, int line)
683{
684
685	_rm_wunlock(rm);
686}
687
688int
689_rm_rlock_debug(struct rmlock *rm, struct rm_priotracker *tracker,
690    int trylock, const char *file, int line)
691{
692
693	return _rm_rlock(rm, tracker, trylock);
694}
695
696void
697_rm_runlock_debug(struct rmlock *rm, struct rm_priotracker *tracker,
698    const char *file, int line)
699{
700
701	_rm_runlock(rm, tracker);
702}
703
704#endif
705
706#ifdef INVARIANT_SUPPORT
707/*
708 * Note that this does not need to use witness_assert() for read lock
709 * assertions since an exact count of read locks held by this thread
710 * is computable.
711 */
712void
713_rm_assert(const struct rmlock *rm, int what, const char *file, int line)
714{
715	int count;
716
717	if (panicstr != NULL)
718		return;
719	switch (what) {
720	case RA_LOCKED:
721	case RA_LOCKED | RA_RECURSED:
722	case RA_LOCKED | RA_NOTRECURSED:
723	case RA_RLOCKED:
724	case RA_RLOCKED | RA_RECURSED:
725	case RA_RLOCKED | RA_NOTRECURSED:
726		/*
727		 * Handle the write-locked case.  Unlike other
728		 * primitives, writers can never recurse.
729		 */
730		if (rm_wowned(rm)) {
731			if (what & RA_RLOCKED)
732				panic("Lock %s exclusively locked @ %s:%d\n",
733				    rm->lock_object.lo_name, file, line);
734			if (what & RA_RECURSED)
735				panic("Lock %s not recursed @ %s:%d\n",
736				    rm->lock_object.lo_name, file, line);
737			break;
738		}
739
740		critical_enter();
741		count = rm_trackers_present(pcpu_find(curcpu), rm, curthread);
742		critical_exit();
743
744		if (count == 0)
745			panic("Lock %s not %slocked @ %s:%d\n",
746			    rm->lock_object.lo_name, (what & RA_RLOCKED) ?
747			    "read " : "", file, line);
748		if (count > 1) {
749			if (what & RA_NOTRECURSED)
750				panic("Lock %s recursed @ %s:%d\n",
751				    rm->lock_object.lo_name, file, line);
752		} else if (what & RA_RECURSED)
753			panic("Lock %s not recursed @ %s:%d\n",
754			    rm->lock_object.lo_name, file, line);
755		break;
756	case RA_WLOCKED:
757		if (!rm_wowned(rm))
758			panic("Lock %s not exclusively locked @ %s:%d\n",
759			    rm->lock_object.lo_name, file, line);
760		break;
761	case RA_UNLOCKED:
762		if (rm_wowned(rm))
763			panic("Lock %s exclusively locked @ %s:%d\n",
764			    rm->lock_object.lo_name, file, line);
765
766		critical_enter();
767		count = rm_trackers_present(pcpu_find(curcpu), rm, curthread);
768		critical_exit();
769
770		if (count != 0)
771			panic("Lock %s read locked @ %s:%d\n",
772			    rm->lock_object.lo_name, file, line);
773		break;
774	default:
775		panic("Unknown rm lock assertion: %d @ %s:%d", what, file,
776		    line);
777	}
778}
779#endif /* INVARIANT_SUPPORT */
780
781#ifdef DDB
782static void
783print_tracker(struct rm_priotracker *tr)
784{
785	struct thread *td;
786
787	td = tr->rmp_thread;
788	db_printf("   thread %p (tid %d, pid %d, \"%s\") {", td, td->td_tid,
789	    td->td_proc->p_pid, td->td_name);
790	if (tr->rmp_flags & RMPF_ONQUEUE) {
791		db_printf("ONQUEUE");
792		if (tr->rmp_flags & RMPF_SIGNAL)
793			db_printf(",SIGNAL");
794	} else
795		db_printf("0");
796	db_printf("}\n");
797}
798
799static void
800db_show_rm(const struct lock_object *lock)
801{
802	struct rm_priotracker *tr;
803	struct rm_queue *queue;
804	const struct rmlock *rm;
805	struct lock_class *lc;
806	struct pcpu *pc;
807
808	rm = (const struct rmlock *)lock;
809	db_printf(" writecpus: ");
810	ddb_display_cpuset(__DEQUALIFY(const cpuset_t *, &rm->rm_writecpus));
811	db_printf("\n");
812	db_printf(" per-CPU readers:\n");
813	STAILQ_FOREACH(pc, &cpuhead, pc_allcpu)
814		for (queue = pc->pc_rm_queue.rmq_next;
815		    queue != &pc->pc_rm_queue; queue = queue->rmq_next) {
816			tr = (struct rm_priotracker *)queue;
817			if (tr->rmp_rmlock == rm)
818				print_tracker(tr);
819		}
820	db_printf(" active readers:\n");
821	LIST_FOREACH(tr, &rm->rm_activeReaders, rmp_qentry)
822		print_tracker(tr);
823	lc = LOCK_CLASS(&rm->rm_wlock_object);
824	db_printf("Backing write-lock (%s):\n", lc->lc_name);
825	lc->lc_ddb_show(&rm->rm_wlock_object);
826}
827#endif
828