kern_rmlock.c revision 223758
1104476Ssam/*-
2104476Ssam * Copyright (c) 2007 Stephan Uphoff <ups@FreeBSD.org>
3139825Simp * All rights reserved.
4104476Ssam *
5167755Ssam * Redistribution and use in source and binary forms, with or without
6104476Ssam * modification, are permitted provided that the following conditions
7104476Ssam * are met:
8104476Ssam * 1. Redistributions of source code must retain the above copyright
9104476Ssam *    notice, this list of conditions and the following disclaimer.
10104476Ssam * 2. Redistributions in binary form must reproduce the above copyright
11104476Ssam *    notice, this list of conditions and the following disclaimer in the
12104476Ssam *    documentation and/or other materials provided with the distribution.
13104476Ssam * 3. Neither the name of the author nor the names of any co-contributors
14104476Ssam *    may be used to endorse or promote products derived from this software
15104476Ssam *    without specific prior written permission.
16104476Ssam *
17104476Ssam * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18104476Ssam * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19104476Ssam * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20104476Ssam * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21104476Ssam * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22104476Ssam * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23104476Ssam * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24104476Ssam * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25104476Ssam * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26104476Ssam * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27104476Ssam * SUCH DAMAGE.
28104476Ssam */
29104476Ssam
30104476Ssam/*
31104476Ssam * Machine independent bits of reader/writer lock implementation.
32104476Ssam */
33104476Ssam
34104476Ssam#include <sys/cdefs.h>
35116191Sobrien__FBSDID("$FreeBSD: head/sys/kern/kern_rmlock.c 223758 2011-07-04 12:04:52Z attilio $");
36116191Sobrien
37116191Sobrien#include "opt_ddb.h"
38210631Skib#include "opt_kdtrace.h"
39210631Skib
40104476Ssam#include <sys/param.h>
41104476Ssam#include <sys/systm.h>
42104476Ssam
43104476Ssam#include <sys/kernel.h>
44104476Ssam#include <sys/ktr.h>
45104476Ssam#include <sys/lock.h>
46104476Ssam#include <sys/mutex.h>
47104476Ssam#include <sys/proc.h>
48104476Ssam#include <sys/rmlock.h>
49104476Ssam#include <sys/sched.h>
50104476Ssam#include <sys/smp.h>
51104476Ssam#include <sys/turnstile.h>
52104476Ssam#include <sys/lock_profile.h>
53104476Ssam#include <machine/cpu.h>
54129880Sphk
55104476Ssam#ifdef DDB
56167755Ssam#include <ddb/ddb.h>
57104476Ssam#endif
58104476Ssam
59104476Ssam#define RMPF_ONQUEUE	1
60104476Ssam#define RMPF_SIGNAL	2
61210631Skib
62210631Skib/*
63210631Skib * To support usage of rmlock in CVs and msleep yet another list for the
64210631Skib * priority tracker would be needed.  Using this lock for cv and msleep also
65210631Skib * does not seem very useful
66210631Skib */
67210631Skib
68210631Skibstatic __inline void compiler_memory_barrier(void) {
69210631Skib	__asm __volatile("":::"memory");
70210631Skib}
71210631Skib
72210631Skibstatic void	assert_rm(struct lock_object *lock, int what);
73210631Skibstatic void	lock_rm(struct lock_object *lock, int how);
74210631Skib#ifdef KDTRACE_HOOKS
75210631Skibstatic int	owner_rm(struct lock_object *lock, struct thread **owner);
76210631Skib#endif
77210631Skibstatic int	unlock_rm(struct lock_object *lock);
78210631Skib
79210631Skibstruct lock_class lock_class_rm = {
80210631Skib	.lc_name = "rm",
81210631Skib	.lc_flags = LC_SLEEPLOCK | LC_RECURSABLE,
82210631Skib	.lc_assert = assert_rm,
83210631Skib#if 0
84210631Skib#ifdef DDB
85210631Skib	.lc_ddb_show = db_show_rwlock,
86210631Skib#endif
87210631Skib#endif
88210631Skib	.lc_lock = lock_rm,
89210631Skib	.lc_unlock = unlock_rm,
90210631Skib#ifdef KDTRACE_HOOKS
91210631Skib	.lc_owner = owner_rm,
92210631Skib#endif
93210631Skib};
94210631Skib
95210631Skibstatic void
96210631Skibassert_rm(struct lock_object *lock, int what)
97210631Skib{
98210631Skib
99210631Skib	panic("assert_rm called");
100210631Skib}
101210631Skib
102210631Skibstatic void
103210631Skiblock_rm(struct lock_object *lock, int how)
104210631Skib{
105210631Skib
106210631Skib	panic("lock_rm called");
107210631Skib}
108210631Skib
109210631Skibstatic int
110210631Skibunlock_rm(struct lock_object *lock)
111210631Skib{
112210631Skib
113210631Skib	panic("unlock_rm called");
114210631Skib}
115210631Skib
116210631Skib#ifdef KDTRACE_HOOKS
117210631Skibstatic int
118210631Skibowner_rm(struct lock_object *lock, struct thread **owner)
119210631Skib{
120210631Skib
121210631Skib	panic("owner_rm called");
122210631Skib}
123210631Skib#endif
124210631Skib
125210631Skibstatic struct mtx rm_spinlock;
126210631Skib
127210631SkibMTX_SYSINIT(rm_spinlock, &rm_spinlock, "rm_spinlock", MTX_SPIN);
128210631Skib
129210631Skib/*
130210631Skib * Add or remove tracker from per-cpu list.
131210631Skib *
132210631Skib * The per-cpu list can be traversed at any time in forward direction from an
133210631Skib * interrupt on the *local* cpu.
134210631Skib */
135210631Skibstatic void inline
136210631Skibrm_tracker_add(struct pcpu *pc, struct rm_priotracker *tracker)
137210631Skib{
138210631Skib	struct rm_queue *next;
139210631Skib
140210631Skib	/* Initialize all tracker pointers */
141210631Skib	tracker->rmp_cpuQueue.rmq_prev = &pc->pc_rm_queue;
142210631Skib	next = pc->pc_rm_queue.rmq_next;
143210631Skib	tracker->rmp_cpuQueue.rmq_next = next;
144210631Skib
145210631Skib	/* rmq_prev is not used during froward traversal. */
146210631Skib	next->rmq_prev = &tracker->rmp_cpuQueue;
147210631Skib
148210631Skib	/* Update pointer to first element. */
149210631Skib	pc->pc_rm_queue.rmq_next = &tracker->rmp_cpuQueue;
150210631Skib}
151210631Skib
152210631Skibstatic void inline
153210631Skibrm_tracker_remove(struct pcpu *pc, struct rm_priotracker *tracker)
154210631Skib{
155210631Skib	struct rm_queue *next, *prev;
156210631Skib
157210631Skib	next = tracker->rmp_cpuQueue.rmq_next;
158210631Skib	prev = tracker->rmp_cpuQueue.rmq_prev;
159210631Skib
160210631Skib	/* Not used during forward traversal. */
161210631Skib	next->rmq_prev = prev;
162210631Skib
163210631Skib	/* Remove from list. */
164210631Skib	prev->rmq_next = next;
165210631Skib}
166210631Skib
167210631Skibstatic void
168210631Skibrm_cleanIPI(void *arg)
169210631Skib{
170210631Skib	struct pcpu *pc;
171210631Skib	struct rmlock *rm = arg;
172210631Skib	struct rm_priotracker *tracker;
173210631Skib	struct rm_queue *queue;
174210631Skib	pc = pcpu_find(curcpu);
175210631Skib
176210631Skib	for (queue = pc->pc_rm_queue.rmq_next; queue != &pc->pc_rm_queue;
177210631Skib	    queue = queue->rmq_next) {
178210631Skib		tracker = (struct rm_priotracker *)queue;
179210631Skib		if (tracker->rmp_rmlock == rm && tracker->rmp_flags == 0) {
180210631Skib			tracker->rmp_flags = RMPF_ONQUEUE;
181210631Skib			mtx_lock_spin(&rm_spinlock);
182210631Skib			LIST_INSERT_HEAD(&rm->rm_activeReaders, tracker,
183210631Skib			    rmp_qentry);
184210631Skib			mtx_unlock_spin(&rm_spinlock);
185210631Skib		}
186210631Skib	}
187210631Skib}
188210631Skib
189210631SkibCTASSERT((RM_SLEEPABLE & LO_CLASSFLAGS) == RM_SLEEPABLE);
190210631Skib
191210631Skibvoid
192210631Skibrm_init_flags(struct rmlock *rm, const char *name, int opts)
193210631Skib{
194210631Skib	int liflags;
195210631Skib
196210631Skib	liflags = 0;
197210631Skib	if (!(opts & RM_NOWITNESS))
198210631Skib		liflags |= LO_WITNESS;
199210631Skib	if (opts & RM_RECURSE)
200210631Skib		liflags |= LO_RECURSABLE;
201210631Skib	rm->rm_writecpus = all_cpus;
202210631Skib	LIST_INIT(&rm->rm_activeReaders);
203210631Skib	if (opts & RM_SLEEPABLE) {
204210631Skib		liflags |= RM_SLEEPABLE;
205210631Skib		sx_init_flags(&rm->rm_lock_sx, "rmlock_sx", SX_RECURSE);
206210631Skib	} else
207210631Skib		mtx_init(&rm->rm_lock_mtx, name, "rmlock_mtx", MTX_NOWITNESS);
208210631Skib	lock_init(&rm->lock_object, &lock_class_rm, name, NULL, liflags);
209210631Skib}
210210631Skib
211210631Skibvoid
212210631Skibrm_init(struct rmlock *rm, const char *name)
213210631Skib{
214210631Skib
215210631Skib	rm_init_flags(rm, name, 0);
216210631Skib}
217210631Skib
218210631Skibvoid
219210631Skibrm_destroy(struct rmlock *rm)
220210631Skib{
221210631Skib
222210631Skib	if (rm->lock_object.lo_flags & RM_SLEEPABLE)
223210631Skib		sx_destroy(&rm->rm_lock_sx);
224210631Skib	else
225210631Skib		mtx_destroy(&rm->rm_lock_mtx);
226210631Skib	lock_destroy(&rm->lock_object);
227210631Skib}
228210631Skib
229210631Skibint
230210631Skibrm_wowned(struct rmlock *rm)
231210631Skib{
232210631Skib
233210631Skib	if (rm->lock_object.lo_flags & RM_SLEEPABLE)
234210631Skib		return (sx_xlocked(&rm->rm_lock_sx));
235210631Skib	else
236210631Skib		return (mtx_owned(&rm->rm_lock_mtx));
237210631Skib}
238210631Skib
239210631Skibvoid
240210631Skibrm_sysinit(void *arg)
241210631Skib{
242210631Skib	struct rm_args *args = arg;
243210631Skib
244210631Skib	rm_init(args->ra_rm, args->ra_desc);
245210631Skib}
246210631Skib
247210631Skibvoid
248210631Skibrm_sysinit_flags(void *arg)
249210631Skib{
250210631Skib	struct rm_args_flags *args = arg;
251210631Skib
252210631Skib	rm_init_flags(args->ra_rm, args->ra_desc, args->ra_opts);
253210631Skib}
254210631Skib
255210631Skibstatic int
256104476Ssam_rm_rlock_hard(struct rmlock *rm, struct rm_priotracker *tracker, int trylock)
257104476Ssam{
258104476Ssam	struct pcpu *pc;
259104476Ssam	struct rm_queue *queue;
260115746Ssam	struct rm_priotracker *atracker;
261104476Ssam
262104476Ssam	critical_enter();
263104476Ssam	pc = pcpu_find(curcpu);
264104476Ssam
265104476Ssam	/* Check if we just need to do a proper critical_exit. */
266104476Ssam	if (!CPU_ISSET(pc->pc_cpuid, &rm->rm_writecpus)) {
267104476Ssam		critical_exit();
268104476Ssam		return (1);
269104476Ssam	}
270104476Ssam
271104476Ssam	/* Remove our tracker from the per-cpu list. */
272104476Ssam	rm_tracker_remove(pc, tracker);
273104476Ssam
274122908Ssam	/* Check to see if the IPI granted us the lock after all. */
275104476Ssam	if (tracker->rmp_flags) {
276104476Ssam		/* Just add back tracker - we hold the lock. */
277104476Ssam		rm_tracker_add(pc, tracker);
278104476Ssam		critical_exit();
279104476Ssam		return (1);
280104476Ssam	}
281104476Ssam
282104476Ssam	/*
283104476Ssam	 * We allow readers to aquire a lock even if a writer is blocked if
284104476Ssam	 * the lock is recursive and the reader already holds the lock.
285104476Ssam	 */
286175140Sjhb	if ((rm->lock_object.lo_flags & LO_RECURSABLE) != 0) {
287175140Sjhb		/*
288104476Ssam		 * Just grant the lock if this thread already has a tracker
289104476Ssam		 * for this lock on the per-cpu queue.
290104476Ssam		 */
291104476Ssam		for (queue = pc->pc_rm_queue.rmq_next;
292104476Ssam		    queue != &pc->pc_rm_queue; queue = queue->rmq_next) {
293104476Ssam			atracker = (struct rm_priotracker *)queue;
294104476Ssam			if ((atracker->rmp_rmlock == rm) &&
295104476Ssam			    (atracker->rmp_thread == tracker->rmp_thread)) {
296104476Ssam				mtx_lock_spin(&rm_spinlock);
297116546Sphk				LIST_INSERT_HEAD(&rm->rm_activeReaders,
298116546Sphk				    tracker, rmp_qentry);
299175140Sjhb				tracker->rmp_flags = RMPF_ONQUEUE;
300116546Sphk				mtx_unlock_spin(&rm_spinlock);
301116546Sphk				rm_tracker_add(pc, tracker);
302116546Sphk				critical_exit();
303116546Sphk				return (1);
304224914Skib			}
305224914Skib		}
306224914Skib	}
307254356Sglebius
308104476Ssam	sched_unpin();
309104476Ssam	critical_exit();
310104476Ssam
311104476Ssam	if (trylock) {
312104476Ssam		if (rm->lock_object.lo_flags & RM_SLEEPABLE) {
313104476Ssam			if (!sx_try_xlock(&rm->rm_lock_sx))
314104476Ssam				return (0);
315104476Ssam		} else {
316104476Ssam			if (!mtx_trylock(&rm->rm_lock_mtx))
317104476Ssam				return (0);
318104476Ssam		}
319104476Ssam	} else {
320104476Ssam		if (rm->lock_object.lo_flags & RM_SLEEPABLE)
321167755Ssam			sx_xlock(&rm->rm_lock_sx);
322104476Ssam		else
323104476Ssam			mtx_lock(&rm->rm_lock_mtx);
324104476Ssam	}
325104476Ssam
326104476Ssam	critical_enter();
327104476Ssam	pc = pcpu_find(curcpu);
328104476Ssam	CPU_CLR(pc->pc_cpuid, &rm->rm_writecpus);
329104476Ssam	rm_tracker_add(pc, tracker);
330104476Ssam	sched_pin();
331104476Ssam	critical_exit();
332104476Ssam
333104476Ssam	if (rm->lock_object.lo_flags & RM_SLEEPABLE)
334104476Ssam		sx_xunlock(&rm->rm_lock_sx);
335175140Sjhb	else
336175140Sjhb		mtx_unlock(&rm->rm_lock_mtx);
337175140Sjhb
338175140Sjhb	return (1);
339175140Sjhb}
340175140Sjhb
341175140Sjhbint
342175140Sjhb_rm_rlock(struct rmlock *rm, struct rm_priotracker *tracker, int trylock)
343175140Sjhb{
344175140Sjhb	struct thread *td = curthread;
345175140Sjhb	struct pcpu *pc;
346167755Ssam
347167755Ssam	tracker->rmp_flags  = 0;
348167755Ssam	tracker->rmp_thread = td;
349167755Ssam	tracker->rmp_rmlock = rm;
350167755Ssam
351167755Ssam	td->td_critnest++;	/* critical_enter(); */
352167755Ssam
353167755Ssam	compiler_memory_barrier();
354167755Ssam
355167755Ssam	pc = cpuid_to_pcpu[td->td_oncpu]; /* pcpu_find(td->td_oncpu); */
356167755Ssam
357167755Ssam	rm_tracker_add(pc, tracker);
358167755Ssam
359167755Ssam	sched_pin();
360167755Ssam
361167755Ssam	compiler_memory_barrier();
362104476Ssam
363104476Ssam	td->td_critnest--;
364104476Ssam
365104476Ssam	/*
366104476Ssam	 * Fast path to combine two common conditions into a single
367104476Ssam	 * conditional jump.
368104476Ssam	 */
369104476Ssam	if (0 == (td->td_owepreempt |
370104476Ssam	    CPU_ISSET(pc->pc_cpuid, &rm->rm_writecpus)))
371167755Ssam		return (1);
372104476Ssam
373109153Sdillon	/* We do not have a read token and need to acquire one. */
374104476Ssam	return _rm_rlock_hard(rm, tracker, trylock);
375104476Ssam}
376104476Ssam
377104476Ssamstatic void
378104476Ssam_rm_unlock_hard(struct thread *td,struct rm_priotracker *tracker)
379167755Ssam{
380104476Ssam
381104476Ssam	if (td->td_owepreempt) {
382167755Ssam		td->td_critnest++;
383210631Skib		critical_exit();
384210631Skib	}
385210631Skib
386210631Skib	if (!tracker->rmp_flags)
387210631Skib		return;
388104476Ssam
389104476Ssam	mtx_lock_spin(&rm_spinlock);
390104476Ssam	LIST_REMOVE(tracker, rmp_qentry);
391167755Ssam
392210631Skib	if (tracker->rmp_flags & RMPF_SIGNAL) {
393210631Skib		struct rmlock *rm;
394210631Skib		struct turnstile *ts;
395210631Skib
396210631Skib		rm = tracker->rmp_rmlock;
397210631Skib
398210631Skib		turnstile_chain_lock(&rm->lock_object);
399210631Skib		mtx_unlock_spin(&rm_spinlock);
400210631Skib
401210631Skib		ts = turnstile_lookup(&rm->lock_object);
402210631Skib
403210631Skib		turnstile_signal(ts, TS_EXCLUSIVE_QUEUE);
404104476Ssam		turnstile_unpend(ts, TS_EXCLUSIVE_LOCK);
405104476Ssam		turnstile_chain_unlock(&rm->lock_object);
406104476Ssam	} else
407104476Ssam		mtx_unlock_spin(&rm_spinlock);
408104476Ssam}
409104476Ssam
410104476Ssamvoid
411104476Ssam_rm_runlock(struct rmlock *rm, struct rm_priotracker *tracker)
412104476Ssam{
413104476Ssam	struct pcpu *pc;
414104476Ssam	struct thread *td = tracker->rmp_thread;
415104476Ssam
416104476Ssam	td->td_critnest++;	/* critical_enter(); */
417104476Ssam	pc = cpuid_to_pcpu[td->td_oncpu]; /* pcpu_find(td->td_oncpu); */
418104476Ssam	rm_tracker_remove(pc, tracker);
419104476Ssam	td->td_critnest--;
420104476Ssam	sched_unpin();
421104476Ssam
422104476Ssam	if (0 == (td->td_owepreempt | tracker->rmp_flags))
423104476Ssam		return;
424104476Ssam
425213068Spjd	_rm_unlock_hard(td, tracker);
426213068Spjd}
427213068Spjd
428104476Ssamvoid
429104476Ssam_rm_wlock(struct rmlock *rm)
430104476Ssam{
431104476Ssam	struct rm_priotracker *prio;
432104476Ssam	struct turnstile *ts;
433104476Ssam	cpuset_t readcpus;
434169425Sgnn
435169425Sgnn	if (rm->lock_object.lo_flags & RM_SLEEPABLE)
436169425Sgnn		sx_xlock(&rm->rm_lock_sx);
437104476Ssam	else
438104476Ssam		mtx_lock(&rm->rm_lock_mtx);
439104476Ssam
440104476Ssam	if (CPU_CMP(&rm->rm_writecpus, &all_cpus)) {
441104476Ssam		/* Get all read tokens back */
442104476Ssam		readcpus = all_cpus;
443104476Ssam		CPU_NAND(&readcpus, &rm->rm_writecpus);
444104476Ssam		rm->rm_writecpus = all_cpus;
445158703Spjd
446104476Ssam		/*
447104476Ssam		 * Assumes rm->rm_writecpus update is visible on other CPUs
448158703Spjd		 * before rm_cleanIPI is called.
449104476Ssam		 */
450158703Spjd#ifdef SMP
451158703Spjd		smp_rendezvous_cpus(readcpus,
452104476Ssam		    smp_no_rendevous_barrier,
453158703Spjd		    rm_cleanIPI,
454158703Spjd		    smp_no_rendevous_barrier,
455158703Spjd		    rm);
456158703Spjd
457158703Spjd#else
458158703Spjd		rm_cleanIPI(rm);
459104476Ssam#endif
460158703Spjd
461104476Ssam		mtx_lock_spin(&rm_spinlock);
462104476Ssam		while ((prio = LIST_FIRST(&rm->rm_activeReaders)) != NULL) {
463104476Ssam			ts = turnstile_trywait(&rm->lock_object);
464104476Ssam			prio->rmp_flags = RMPF_ONQUEUE | RMPF_SIGNAL;
465104476Ssam			mtx_unlock_spin(&rm_spinlock);
466104476Ssam			turnstile_wait(ts, prio->rmp_thread,
467104476Ssam			    TS_EXCLUSIVE_QUEUE);
468104476Ssam			mtx_lock_spin(&rm_spinlock);
469104476Ssam		}
470104476Ssam		mtx_unlock_spin(&rm_spinlock);
471104476Ssam	}
472104476Ssam}
473104476Ssam
474104476Ssamvoid
475104476Ssam_rm_wunlock(struct rmlock *rm)
476104476Ssam{
477104476Ssam
478104476Ssam	if (rm->lock_object.lo_flags & RM_SLEEPABLE)
479104476Ssam		sx_xunlock(&rm->rm_lock_sx);
480104476Ssam	else
481104476Ssam		mtx_unlock(&rm->rm_lock_mtx);
482104476Ssam}
483104476Ssam
484104476Ssam#ifdef LOCK_DEBUG
485104476Ssam
486104476Ssamvoid _rm_wlock_debug(struct rmlock *rm, const char *file, int line)
487104476Ssam{
488104476Ssam
489184214Sdes	WITNESS_CHECKORDER(&rm->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE,
490184214Sdes	    file, line, NULL);
491104476Ssam
492104476Ssam	_rm_wlock(rm);
493104476Ssam
494104476Ssam	LOCK_LOG_LOCK("RMWLOCK", &rm->lock_object, 0, 0, file, line);
495104476Ssam
496104476Ssam	if (rm->lock_object.lo_flags & RM_SLEEPABLE)
497104476Ssam		WITNESS_LOCK(&rm->rm_lock_sx.lock_object, LOP_EXCLUSIVE,
498104476Ssam		    file, line);
499104476Ssam	else
500104476Ssam		WITNESS_LOCK(&rm->lock_object, LOP_EXCLUSIVE, file, line);
501104476Ssam
502104476Ssam	curthread->td_locks++;
503104476Ssam
504104476Ssam}
505104476Ssam
506104476Ssamvoid
507184214Sdes_rm_wunlock_debug(struct rmlock *rm, const char *file, int line)
508184214Sdes{
509104476Ssam
510104476Ssam	curthread->td_locks--;
511104476Ssam	if (rm->lock_object.lo_flags & RM_SLEEPABLE)
512104476Ssam		WITNESS_UNLOCK(&rm->rm_lock_sx.lock_object, LOP_EXCLUSIVE,
513104476Ssam		    file, line);
514104476Ssam	else
515211181Sjhb		WITNESS_UNLOCK(&rm->lock_object, LOP_EXCLUSIVE, file, line);
516210631Skib	LOCK_LOG_LOCK("RMWUNLOCK", &rm->lock_object, 0, 0, file, line);
517210631Skib	_rm_wunlock(rm);
518210631Skib}
519210631Skib
520210631Skibint
521167755Ssam_rm_rlock_debug(struct rmlock *rm, struct rm_priotracker *tracker,
522167755Ssam    int trylock, const char *file, int line)
523158700Spjd{
524158700Spjd	if (!trylock && (rm->lock_object.lo_flags & RM_SLEEPABLE))
525167755Ssam		WITNESS_CHECKORDER(&rm->rm_lock_sx.lock_object, LOP_NEWORDER,
526167755Ssam		    file, line, NULL);
527167755Ssam	WITNESS_CHECKORDER(&rm->lock_object, LOP_NEWORDER, file, line, NULL);
528167755Ssam
529167755Ssam	if (_rm_rlock(rm, tracker, trylock)) {
530104476Ssam		LOCK_LOG_LOCK("RMRLOCK", &rm->lock_object, 0, 0, file, line);
531104476Ssam
532104476Ssam		WITNESS_LOCK(&rm->lock_object, 0, file, line);
533104476Ssam
534104476Ssam		curthread->td_locks++;
535104476Ssam
536104476Ssam		return (1);
537104476Ssam	}
538104476Ssam
539104476Ssam	return (0);
540104476Ssam}
541210631Skib
542210631Skibvoid
543210631Skib_rm_runlock_debug(struct rmlock *rm, struct rm_priotracker *tracker,
544210631Skib    const char *file, int line)
545210631Skib{
546167755Ssam
547167755Ssam	curthread->td_locks--;
548167755Ssam	WITNESS_UNLOCK(&rm->lock_object, 0, file, line);
549104476Ssam	LOCK_LOG_LOCK("RMRUNLOCK", &rm->lock_object, 0, 0, file, line);
550104476Ssam	_rm_runlock(rm, tracker);
551104476Ssam}
552184205Sdes
553104476Ssam#else
554184205Sdes
555104476Ssam/*
556210631Skib * Just strip out file and line arguments if no lock debugging is enabled in
557210631Skib * the kernel - we are called from a kernel module.
558210631Skib */
559210631Skibvoid
560210631Skib_rm_wlock_debug(struct rmlock *rm, const char *file, int line)
561210631Skib{
562210631Skib
563210631Skib	_rm_wlock(rm);
564210631Skib}
565104476Ssam
566104476Ssamvoid
567104476Ssam_rm_wunlock_debug(struct rmlock *rm, const char *file, int line)
568104476Ssam{
569167755Ssam
570104476Ssam	_rm_wunlock(rm);
571104476Ssam}
572104476Ssam
573104476Ssamint
574104476Ssam_rm_rlock_debug(struct rmlock *rm, struct rm_priotracker *tracker,
575210631Skib    int trylock, const char *file, int line)
576210631Skib{
577210631Skib
578210631Skib	return _rm_rlock(rm, tracker, trylock);
579210631Skib}
580210631Skib
581210631Skibvoid
582210631Skib_rm_runlock_debug(struct rmlock *rm, struct rm_priotracker *tracker,
583104476Ssam    const char *file, int line)
584167755Ssam{
585104476Ssam
586104476Ssam	_rm_runlock(rm, tracker);
587210631Skib}
588210631Skib
589210631Skib#endif
590210631Skib