1/*-
2 * Copyright (c) 2008 Attilio Rao <attilio@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice(s), this list of conditions and the following disclaimer as
10 *    the first lines of this file unmodified other than the possible
11 *    addition of one or more copyright notices.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice(s), this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
17 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19 * DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
20 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
26 * DAMAGE.
27 */
28
29#include "opt_adaptive_lockmgrs.h"
30#include "opt_ddb.h"
31#include "opt_hwpmc_hooks.h"
32#include "opt_kdtrace.h"
33
34#include <sys/cdefs.h>
35__FBSDID("$FreeBSD: stable/10/sys/kern/kern_lock.c 310979 2016-12-31 16:37:47Z mjg $");
36
37#include <sys/param.h>
38#include <sys/kdb.h>
39#include <sys/ktr.h>
40#include <sys/lock.h>
41#include <sys/lock_profile.h>
42#include <sys/lockmgr.h>
43#include <sys/mutex.h>
44#include <sys/proc.h>
45#include <sys/sleepqueue.h>
46#ifdef DEBUG_LOCKS
47#include <sys/stack.h>
48#endif
49#include <sys/sysctl.h>
50#include <sys/systm.h>
51
52#include <machine/cpu.h>
53
54#ifdef DDB
55#include <ddb/ddb.h>
56#endif
57
58#ifdef HWPMC_HOOKS
59#include <sys/pmckern.h>
60PMC_SOFT_DECLARE( , , lock, failed);
61#endif
62
63CTASSERT(((LK_ADAPTIVE | LK_NOSHARE) & LO_CLASSFLAGS) ==
64    (LK_ADAPTIVE | LK_NOSHARE));
65CTASSERT(LK_UNLOCKED == (LK_UNLOCKED &
66    ~(LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS)));
67
68#define	SQ_EXCLUSIVE_QUEUE	0
69#define	SQ_SHARED_QUEUE		1
70
71#ifndef INVARIANTS
72#define	_lockmgr_assert(lk, what, file, line)
73#define	TD_LOCKS_INC(td)
74#define	TD_LOCKS_DEC(td)
75#else
76#define	TD_LOCKS_INC(td)	((td)->td_locks++)
77#define	TD_LOCKS_DEC(td)	((td)->td_locks--)
78#endif
79#define	TD_SLOCKS_INC(td)	((td)->td_lk_slocks++)
80#define	TD_SLOCKS_DEC(td)	((td)->td_lk_slocks--)
81
82#ifndef DEBUG_LOCKS
83#define	STACK_PRINT(lk)
84#define	STACK_SAVE(lk)
85#define	STACK_ZERO(lk)
86#else
87#define	STACK_PRINT(lk)	stack_print_ddb(&(lk)->lk_stack)
88#define	STACK_SAVE(lk)	stack_save(&(lk)->lk_stack)
89#define	STACK_ZERO(lk)	stack_zero(&(lk)->lk_stack)
90#endif
91
92#define	LOCK_LOG2(lk, string, arg1, arg2)				\
93	if (LOCK_LOG_TEST(&(lk)->lock_object, 0))			\
94		CTR2(KTR_LOCK, (string), (arg1), (arg2))
95#define	LOCK_LOG3(lk, string, arg1, arg2, arg3)				\
96	if (LOCK_LOG_TEST(&(lk)->lock_object, 0))			\
97		CTR3(KTR_LOCK, (string), (arg1), (arg2), (arg3))
98
99#define	GIANT_DECLARE							\
100	int _i = 0;							\
101	WITNESS_SAVE_DECL(Giant)
102#define	GIANT_RESTORE() do {						\
103	if (_i > 0) {							\
104		while (_i--)						\
105			mtx_lock(&Giant);				\
106		WITNESS_RESTORE(&Giant.lock_object, Giant);		\
107	}								\
108} while (0)
109#define	GIANT_SAVE() do {						\
110	if (mtx_owned(&Giant)) {					\
111		WITNESS_SAVE(&Giant.lock_object, Giant);		\
112		while (mtx_owned(&Giant)) {				\
113			_i++;						\
114			mtx_unlock(&Giant);				\
115		}							\
116	}								\
117} while (0)
118
119#define	LK_CAN_SHARE(x, flags)						\
120	(((x) & LK_SHARE) &&						\
121	(((x) & (LK_EXCLUSIVE_WAITERS | LK_EXCLUSIVE_SPINNERS)) == 0 ||	\
122	(curthread->td_lk_slocks != 0 && !(flags & LK_NODDLKTREAT)) ||	\
123	(curthread->td_pflags & TDP_DEADLKTREAT)))
124#define	LK_TRYOP(x)							\
125	((x) & LK_NOWAIT)
126
127#define	LK_CAN_WITNESS(x)						\
128	(((x) & LK_NOWITNESS) == 0 && !LK_TRYOP(x))
129#define	LK_TRYWIT(x)							\
130	(LK_TRYOP(x) ? LOP_TRYLOCK : 0)
131
132#define	LK_CAN_ADAPT(lk, f)						\
133	(((lk)->lock_object.lo_flags & LK_ADAPTIVE) != 0 &&		\
134	((f) & LK_SLEEPFAIL) == 0)
135
136#define	lockmgr_disowned(lk)						\
137	(((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == LK_KERNPROC)
138
139#define	lockmgr_xlocked(lk)						\
140	(((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == (uintptr_t)curthread)
141
142static void	assert_lockmgr(const struct lock_object *lock, int how);
143#ifdef DDB
144static void	db_show_lockmgr(const struct lock_object *lock);
145#endif
146static void	lock_lockmgr(struct lock_object *lock, uintptr_t how);
147#ifdef KDTRACE_HOOKS
148static int	owner_lockmgr(const struct lock_object *lock,
149		    struct thread **owner);
150#endif
151static uintptr_t unlock_lockmgr(struct lock_object *lock);
152
153struct lock_class lock_class_lockmgr = {
154	.lc_name = "lockmgr",
155	.lc_flags = LC_RECURSABLE | LC_SLEEPABLE | LC_SLEEPLOCK | LC_UPGRADABLE,
156	.lc_assert = assert_lockmgr,
157#ifdef DDB
158	.lc_ddb_show = db_show_lockmgr,
159#endif
160	.lc_lock = lock_lockmgr,
161	.lc_unlock = unlock_lockmgr,
162#ifdef KDTRACE_HOOKS
163	.lc_owner = owner_lockmgr,
164#endif
165};
166
167#ifdef ADAPTIVE_LOCKMGRS
168static u_int alk_retries = 10;
169static u_int alk_loops = 10000;
170static SYSCTL_NODE(_debug, OID_AUTO, lockmgr, CTLFLAG_RD, NULL,
171    "lockmgr debugging");
172SYSCTL_UINT(_debug_lockmgr, OID_AUTO, retries, CTLFLAG_RW, &alk_retries, 0, "");
173SYSCTL_UINT(_debug_lockmgr, OID_AUTO, loops, CTLFLAG_RW, &alk_loops, 0, "");
174#endif
175
176static __inline struct thread *
177lockmgr_xholder(const struct lock *lk)
178{
179	uintptr_t x;
180
181	x = lk->lk_lock;
182	return ((x & LK_SHARE) ? NULL : (struct thread *)LK_HOLDER(x));
183}
184
185/*
186 * It assumes sleepq_lock held and returns with this one unheld.
187 * It also assumes the generic interlock is sane and previously checked.
188 * If LK_INTERLOCK is specified the interlock is not reacquired after the
189 * sleep.
190 */
191static __inline int
192sleeplk(struct lock *lk, u_int flags, struct lock_object *ilk,
193    const char *wmesg, int pri, int timo, int queue)
194{
195	GIANT_DECLARE;
196	struct lock_class *class;
197	int catch, error;
198
199	class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
200	catch = pri & PCATCH;
201	pri &= PRIMASK;
202	error = 0;
203
204	LOCK_LOG3(lk, "%s: %p blocking on the %s sleepqueue", __func__, lk,
205	    (queue == SQ_EXCLUSIVE_QUEUE) ? "exclusive" : "shared");
206
207	if (flags & LK_INTERLOCK)
208		class->lc_unlock(ilk);
209	if (queue == SQ_EXCLUSIVE_QUEUE && (flags & LK_SLEEPFAIL) != 0)
210		lk->lk_exslpfail++;
211	GIANT_SAVE();
212	sleepq_add(&lk->lock_object, NULL, wmesg, SLEEPQ_LK | (catch ?
213	    SLEEPQ_INTERRUPTIBLE : 0), queue);
214	if ((flags & LK_TIMELOCK) && timo)
215		sleepq_set_timeout(&lk->lock_object, timo);
216
217	/*
218	 * Decisional switch for real sleeping.
219	 */
220	if ((flags & LK_TIMELOCK) && timo && catch)
221		error = sleepq_timedwait_sig(&lk->lock_object, pri);
222	else if ((flags & LK_TIMELOCK) && timo)
223		error = sleepq_timedwait(&lk->lock_object, pri);
224	else if (catch)
225		error = sleepq_wait_sig(&lk->lock_object, pri);
226	else
227		sleepq_wait(&lk->lock_object, pri);
228	GIANT_RESTORE();
229	if ((flags & LK_SLEEPFAIL) && error == 0)
230		error = ENOLCK;
231
232	return (error);
233}
234
235static __inline int
236wakeupshlk(struct lock *lk, const char *file, int line)
237{
238	uintptr_t v, x;
239	u_int realexslp;
240	int queue, wakeup_swapper;
241
242	WITNESS_UNLOCK(&lk->lock_object, 0, file, line);
243	LOCK_LOG_LOCK("SUNLOCK", &lk->lock_object, 0, 0, file, line);
244
245	wakeup_swapper = 0;
246	for (;;) {
247		x = lk->lk_lock;
248
249		/*
250		 * If there is more than one shared lock held, just drop one
251		 * and return.
252		 */
253		if (LK_SHARERS(x) > 1) {
254			if (atomic_cmpset_rel_ptr(&lk->lk_lock, x,
255			    x - LK_ONE_SHARER))
256				break;
257			continue;
258		}
259
260		/*
261		 * If there are not waiters on the exclusive queue, drop the
262		 * lock quickly.
263		 */
264		if ((x & LK_ALL_WAITERS) == 0) {
265			MPASS((x & ~LK_EXCLUSIVE_SPINNERS) ==
266			    LK_SHARERS_LOCK(1));
267			if (atomic_cmpset_rel_ptr(&lk->lk_lock, x, LK_UNLOCKED))
268				break;
269			continue;
270		}
271
272		/*
273		 * We should have a sharer with waiters, so enter the hard
274		 * path in order to handle wakeups correctly.
275		 */
276		sleepq_lock(&lk->lock_object);
277		x = lk->lk_lock & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
278		v = LK_UNLOCKED;
279
280		/*
281		 * If the lock has exclusive waiters, give them preference in
282		 * order to avoid deadlock with shared runners up.
283		 * If interruptible sleeps left the exclusive queue empty
284		 * avoid a starvation for the threads sleeping on the shared
285		 * queue by giving them precedence and cleaning up the
286		 * exclusive waiters bit anyway.
287		 * Please note that lk_exslpfail count may be lying about
288		 * the real number of waiters with the LK_SLEEPFAIL flag on
289		 * because they may be used in conjunction with interruptible
290		 * sleeps so lk_exslpfail might be considered an 'upper limit'
291		 * bound, including the edge cases.
292		 */
293		realexslp = sleepq_sleepcnt(&lk->lock_object,
294		    SQ_EXCLUSIVE_QUEUE);
295		if ((x & LK_EXCLUSIVE_WAITERS) != 0 && realexslp != 0) {
296			if (lk->lk_exslpfail < realexslp) {
297				lk->lk_exslpfail = 0;
298				queue = SQ_EXCLUSIVE_QUEUE;
299				v |= (x & LK_SHARED_WAITERS);
300			} else {
301				lk->lk_exslpfail = 0;
302				LOCK_LOG2(lk,
303				    "%s: %p has only LK_SLEEPFAIL sleepers",
304				    __func__, lk);
305				LOCK_LOG2(lk,
306			    "%s: %p waking up threads on the exclusive queue",
307				    __func__, lk);
308				wakeup_swapper =
309				    sleepq_broadcast(&lk->lock_object,
310				    SLEEPQ_LK, 0, SQ_EXCLUSIVE_QUEUE);
311				queue = SQ_SHARED_QUEUE;
312			}
313
314		} else {
315
316			/*
317			 * Exclusive waiters sleeping with LK_SLEEPFAIL on
318			 * and using interruptible sleeps/timeout may have
319			 * left spourious lk_exslpfail counts on, so clean
320			 * it up anyway.
321			 */
322			lk->lk_exslpfail = 0;
323			queue = SQ_SHARED_QUEUE;
324		}
325
326		if (!atomic_cmpset_rel_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1) | x,
327		    v)) {
328			sleepq_release(&lk->lock_object);
329			continue;
330		}
331		LOCK_LOG3(lk, "%s: %p waking up threads on the %s queue",
332		    __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
333		    "exclusive");
334		wakeup_swapper |= sleepq_broadcast(&lk->lock_object, SLEEPQ_LK,
335		    0, queue);
336		sleepq_release(&lk->lock_object);
337		break;
338	}
339
340	lock_profile_release_lock(&lk->lock_object);
341	TD_LOCKS_DEC(curthread);
342	TD_SLOCKS_DEC(curthread);
343	return (wakeup_swapper);
344}
345
346static void
347assert_lockmgr(const struct lock_object *lock, int what)
348{
349
350	panic("lockmgr locks do not support assertions");
351}
352
353static void
354lock_lockmgr(struct lock_object *lock, uintptr_t how)
355{
356
357	panic("lockmgr locks do not support sleep interlocking");
358}
359
360static uintptr_t
361unlock_lockmgr(struct lock_object *lock)
362{
363
364	panic("lockmgr locks do not support sleep interlocking");
365}
366
367#ifdef KDTRACE_HOOKS
368static int
369owner_lockmgr(const struct lock_object *lock, struct thread **owner)
370{
371
372	panic("lockmgr locks do not support owner inquiring");
373}
374#endif
375
376void
377lockinit(struct lock *lk, int pri, const char *wmesg, int timo, int flags)
378{
379	int iflags;
380
381	MPASS((flags & ~LK_INIT_MASK) == 0);
382	ASSERT_ATOMIC_LOAD_PTR(lk->lk_lock,
383            ("%s: lockmgr not aligned for %s: %p", __func__, wmesg,
384            &lk->lk_lock));
385
386	iflags = LO_SLEEPABLE | LO_UPGRADABLE;
387	if (flags & LK_CANRECURSE)
388		iflags |= LO_RECURSABLE;
389	if ((flags & LK_NODUP) == 0)
390		iflags |= LO_DUPOK;
391	if (flags & LK_NOPROFILE)
392		iflags |= LO_NOPROFILE;
393	if ((flags & LK_NOWITNESS) == 0)
394		iflags |= LO_WITNESS;
395	if (flags & LK_QUIET)
396		iflags |= LO_QUIET;
397	if (flags & LK_IS_VNODE)
398		iflags |= LO_IS_VNODE;
399	iflags |= flags & (LK_ADAPTIVE | LK_NOSHARE);
400
401	lock_init(&lk->lock_object, &lock_class_lockmgr, wmesg, NULL, iflags);
402	lk->lk_lock = LK_UNLOCKED;
403	lk->lk_recurse = 0;
404	lk->lk_exslpfail = 0;
405	lk->lk_timo = timo;
406	lk->lk_pri = pri;
407	STACK_ZERO(lk);
408}
409
410/*
411 * XXX: Gross hacks to manipulate external lock flags after
412 * initialization.  Used for certain vnode and buf locks.
413 */
414void
415lockallowshare(struct lock *lk)
416{
417
418	lockmgr_assert(lk, KA_XLOCKED);
419	lk->lock_object.lo_flags &= ~LK_NOSHARE;
420}
421
422void
423lockdisableshare(struct lock *lk)
424{
425
426	lockmgr_assert(lk, KA_XLOCKED);
427	lk->lock_object.lo_flags |= LK_NOSHARE;
428}
429
430void
431lockallowrecurse(struct lock *lk)
432{
433
434	lockmgr_assert(lk, KA_XLOCKED);
435	lk->lock_object.lo_flags |= LO_RECURSABLE;
436}
437
438void
439lockdisablerecurse(struct lock *lk)
440{
441
442	lockmgr_assert(lk, KA_XLOCKED);
443	lk->lock_object.lo_flags &= ~LO_RECURSABLE;
444}
445
446void
447lockdestroy(struct lock *lk)
448{
449
450	KASSERT(lk->lk_lock == LK_UNLOCKED, ("lockmgr still held"));
451	KASSERT(lk->lk_recurse == 0, ("lockmgr still recursed"));
452	KASSERT(lk->lk_exslpfail == 0, ("lockmgr still exclusive waiters"));
453	lock_destroy(&lk->lock_object);
454}
455
456int
457__lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
458    const char *wmesg, int pri, int timo, const char *file, int line)
459{
460	GIANT_DECLARE;
461	struct lock_class *class;
462	const char *iwmesg;
463	uintptr_t tid, v, x;
464	u_int op, realexslp;
465	int error, ipri, itimo, queue, wakeup_swapper;
466#ifdef LOCK_PROFILING
467	uint64_t waittime = 0;
468	int contested = 0;
469#endif
470#ifdef ADAPTIVE_LOCKMGRS
471	volatile struct thread *owner;
472	u_int i, spintries = 0;
473#endif
474
475	error = 0;
476	tid = (uintptr_t)curthread;
477	op = (flags & LK_TYPE_MASK);
478	iwmesg = (wmesg == LK_WMESG_DEFAULT) ? lk->lock_object.lo_name : wmesg;
479	ipri = (pri == LK_PRIO_DEFAULT) ? lk->lk_pri : pri;
480	itimo = (timo == LK_TIMO_DEFAULT) ? lk->lk_timo : timo;
481
482	MPASS((flags & ~LK_TOTAL_MASK) == 0);
483	KASSERT((op & (op - 1)) == 0,
484	    ("%s: Invalid requested operation @ %s:%d", __func__, file, line));
485	KASSERT((flags & (LK_NOWAIT | LK_SLEEPFAIL)) == 0 ||
486	    (op != LK_DOWNGRADE && op != LK_RELEASE),
487	    ("%s: Invalid flags in regard of the operation desired @ %s:%d",
488	    __func__, file, line));
489	KASSERT((flags & LK_INTERLOCK) == 0 || ilk != NULL,
490	    ("%s: LK_INTERLOCK passed without valid interlock @ %s:%d",
491	    __func__, file, line));
492	KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
493	    ("%s: idle thread %p on lockmgr %s @ %s:%d", __func__, curthread,
494	    lk->lock_object.lo_name, file, line));
495
496	class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
497	if (panicstr != NULL) {
498		if (flags & LK_INTERLOCK)
499			class->lc_unlock(ilk);
500		return (0);
501	}
502
503	if (lk->lock_object.lo_flags & LK_NOSHARE) {
504		switch (op) {
505		case LK_SHARED:
506			op = LK_EXCLUSIVE;
507			break;
508		case LK_UPGRADE:
509		case LK_TRYUPGRADE:
510		case LK_DOWNGRADE:
511			_lockmgr_assert(lk, KA_XLOCKED | KA_NOTRECURSED,
512			    file, line);
513			if (flags & LK_INTERLOCK)
514				class->lc_unlock(ilk);
515			return (0);
516		}
517	}
518
519	wakeup_swapper = 0;
520	switch (op) {
521	case LK_SHARED:
522		if (LK_CAN_WITNESS(flags))
523			WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER,
524			    file, line, flags & LK_INTERLOCK ? ilk : NULL);
525		for (;;) {
526			x = lk->lk_lock;
527
528			/*
529			 * If no other thread has an exclusive lock, or
530			 * no exclusive waiter is present, bump the count of
531			 * sharers.  Since we have to preserve the state of
532			 * waiters, if we fail to acquire the shared lock
533			 * loop back and retry.
534			 */
535			if (LK_CAN_SHARE(x, flags)) {
536				if (atomic_cmpset_acq_ptr(&lk->lk_lock, x,
537				    x + LK_ONE_SHARER))
538					break;
539				continue;
540			}
541#ifdef HWPMC_HOOKS
542			PMC_SOFT_CALL( , , lock, failed);
543#endif
544			lock_profile_obtain_lock_failed(&lk->lock_object,
545			    &contested, &waittime);
546
547			/*
548			 * If the lock is already held by curthread in
549			 * exclusive way avoid a deadlock.
550			 */
551			if (LK_HOLDER(x) == tid) {
552				LOCK_LOG2(lk,
553				    "%s: %p already held in exclusive mode",
554				    __func__, lk);
555				error = EDEADLK;
556				break;
557			}
558
559			/*
560			 * If the lock is expected to not sleep just give up
561			 * and return.
562			 */
563			if (LK_TRYOP(flags)) {
564				LOCK_LOG2(lk, "%s: %p fails the try operation",
565				    __func__, lk);
566				error = EBUSY;
567				break;
568			}
569
570#ifdef ADAPTIVE_LOCKMGRS
571			/*
572			 * If the owner is running on another CPU, spin until
573			 * the owner stops running or the state of the lock
574			 * changes.  We need a double-state handle here
575			 * because for a failed acquisition the lock can be
576			 * either held in exclusive mode or shared mode
577			 * (for the writer starvation avoidance technique).
578			 */
579			if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
580			    LK_HOLDER(x) != LK_KERNPROC) {
581				owner = (struct thread *)LK_HOLDER(x);
582				if (LOCK_LOG_TEST(&lk->lock_object, 0))
583					CTR3(KTR_LOCK,
584					    "%s: spinning on %p held by %p",
585					    __func__, lk, owner);
586				KTR_STATE1(KTR_SCHED, "thread",
587				    sched_tdname(td), "spinning",
588				    "lockname:\"%s\"", lk->lock_object.lo_name);
589
590				/*
591				 * If we are holding also an interlock drop it
592				 * in order to avoid a deadlock if the lockmgr
593				 * owner is adaptively spinning on the
594				 * interlock itself.
595				 */
596				if (flags & LK_INTERLOCK) {
597					class->lc_unlock(ilk);
598					flags &= ~LK_INTERLOCK;
599				}
600				GIANT_SAVE();
601				while (LK_HOLDER(lk->lk_lock) ==
602				    (uintptr_t)owner && TD_IS_RUNNING(owner))
603					cpu_spinwait();
604				KTR_STATE0(KTR_SCHED, "thread",
605				    sched_tdname(td), "running");
606				GIANT_RESTORE();
607				continue;
608			} else if (LK_CAN_ADAPT(lk, flags) &&
609			    (x & LK_SHARE) != 0 && LK_SHARERS(x) &&
610			    spintries < alk_retries) {
611				KTR_STATE1(KTR_SCHED, "thread",
612				    sched_tdname(td), "spinning",
613				    "lockname:\"%s\"", lk->lock_object.lo_name);
614				if (flags & LK_INTERLOCK) {
615					class->lc_unlock(ilk);
616					flags &= ~LK_INTERLOCK;
617				}
618				GIANT_SAVE();
619				spintries++;
620				for (i = 0; i < alk_loops; i++) {
621					if (LOCK_LOG_TEST(&lk->lock_object, 0))
622						CTR4(KTR_LOCK,
623				    "%s: shared spinning on %p with %u and %u",
624						    __func__, lk, spintries, i);
625					x = lk->lk_lock;
626					if ((x & LK_SHARE) == 0 ||
627					    LK_CAN_SHARE(x, flags) != 0)
628						break;
629					cpu_spinwait();
630				}
631				KTR_STATE0(KTR_SCHED, "thread",
632				    sched_tdname(td), "running");
633				GIANT_RESTORE();
634				if (i != alk_loops)
635					continue;
636			}
637#endif
638
639			/*
640			 * Acquire the sleepqueue chain lock because we
641			 * probabilly will need to manipulate waiters flags.
642			 */
643			sleepq_lock(&lk->lock_object);
644			x = lk->lk_lock;
645
646			/*
647			 * if the lock can be acquired in shared mode, try
648			 * again.
649			 */
650			if (LK_CAN_SHARE(x, flags)) {
651				sleepq_release(&lk->lock_object);
652				continue;
653			}
654
655#ifdef ADAPTIVE_LOCKMGRS
656			/*
657			 * The current lock owner might have started executing
658			 * on another CPU (or the lock could have changed
659			 * owner) while we were waiting on the turnstile
660			 * chain lock.  If so, drop the turnstile lock and try
661			 * again.
662			 */
663			if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
664			    LK_HOLDER(x) != LK_KERNPROC) {
665				owner = (struct thread *)LK_HOLDER(x);
666				if (TD_IS_RUNNING(owner)) {
667					sleepq_release(&lk->lock_object);
668					continue;
669				}
670			}
671#endif
672
673			/*
674			 * Try to set the LK_SHARED_WAITERS flag.  If we fail,
675			 * loop back and retry.
676			 */
677			if ((x & LK_SHARED_WAITERS) == 0) {
678				if (!atomic_cmpset_acq_ptr(&lk->lk_lock, x,
679				    x | LK_SHARED_WAITERS)) {
680					sleepq_release(&lk->lock_object);
681					continue;
682				}
683				LOCK_LOG2(lk, "%s: %p set shared waiters flag",
684				    __func__, lk);
685			}
686
687			/*
688			 * As far as we have been unable to acquire the
689			 * shared lock and the shared waiters flag is set,
690			 * we will sleep.
691			 */
692			error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo,
693			    SQ_SHARED_QUEUE);
694			flags &= ~LK_INTERLOCK;
695			if (error) {
696				LOCK_LOG3(lk,
697				    "%s: interrupted sleep for %p with %d",
698				    __func__, lk, error);
699				break;
700			}
701			LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
702			    __func__, lk);
703		}
704		if (error == 0) {
705			lock_profile_obtain_lock_success(&lk->lock_object,
706			    contested, waittime, file, line);
707			LOCK_LOG_LOCK("SLOCK", &lk->lock_object, 0, 0, file,
708			    line);
709			WITNESS_LOCK(&lk->lock_object, LK_TRYWIT(flags), file,
710			    line);
711			TD_LOCKS_INC(curthread);
712			TD_SLOCKS_INC(curthread);
713			STACK_SAVE(lk);
714		}
715		break;
716	case LK_UPGRADE:
717	case LK_TRYUPGRADE:
718		_lockmgr_assert(lk, KA_SLOCKED, file, line);
719		v = lk->lk_lock;
720		x = v & LK_ALL_WAITERS;
721		v &= LK_EXCLUSIVE_SPINNERS;
722
723		/*
724		 * Try to switch from one shared lock to an exclusive one.
725		 * We need to preserve waiters flags during the operation.
726		 */
727		if (atomic_cmpset_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1) | x | v,
728		    tid | x)) {
729			LOCK_LOG_LOCK("XUPGRADE", &lk->lock_object, 0, 0, file,
730			    line);
731			WITNESS_UPGRADE(&lk->lock_object, LOP_EXCLUSIVE |
732			    LK_TRYWIT(flags), file, line);
733			TD_SLOCKS_DEC(curthread);
734			break;
735		}
736
737		/*
738		 * In LK_TRYUPGRADE mode, do not drop the lock,
739		 * returning EBUSY instead.
740		 */
741		if (op == LK_TRYUPGRADE) {
742			LOCK_LOG2(lk, "%s: %p failed the nowait upgrade",
743			    __func__, lk);
744			error = EBUSY;
745			break;
746		}
747
748		/*
749		 * We have been unable to succeed in upgrading, so just
750		 * give up the shared lock.
751		 */
752		wakeup_swapper |= wakeupshlk(lk, file, line);
753
754		/* FALLTHROUGH */
755	case LK_EXCLUSIVE:
756		if (LK_CAN_WITNESS(flags))
757			WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
758			    LOP_EXCLUSIVE, file, line, flags & LK_INTERLOCK ?
759			    ilk : NULL);
760
761		/*
762		 * If curthread already holds the lock and this one is
763		 * allowed to recurse, simply recurse on it.
764		 */
765		if (lockmgr_xlocked(lk)) {
766			if ((flags & LK_CANRECURSE) == 0 &&
767			    (lk->lock_object.lo_flags & LO_RECURSABLE) == 0) {
768
769				/*
770				 * If the lock is expected to not panic just
771				 * give up and return.
772				 */
773				if (LK_TRYOP(flags)) {
774					LOCK_LOG2(lk,
775					    "%s: %p fails the try operation",
776					    __func__, lk);
777					error = EBUSY;
778					break;
779				}
780				if (flags & LK_INTERLOCK)
781					class->lc_unlock(ilk);
782		panic("%s: recursing on non recursive lockmgr %s @ %s:%d\n",
783				    __func__, iwmesg, file, line);
784			}
785			lk->lk_recurse++;
786			LOCK_LOG2(lk, "%s: %p recursing", __func__, lk);
787			LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0,
788			    lk->lk_recurse, file, line);
789			WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
790			    LK_TRYWIT(flags), file, line);
791			TD_LOCKS_INC(curthread);
792			break;
793		}
794
795		for (;;) {
796			if (lk->lk_lock == LK_UNLOCKED &&
797			    atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid))
798				break;
799#ifdef HWPMC_HOOKS
800			PMC_SOFT_CALL( , , lock, failed);
801#endif
802			lock_profile_obtain_lock_failed(&lk->lock_object,
803			    &contested, &waittime);
804
805			/*
806			 * If the lock is expected to not sleep just give up
807			 * and return.
808			 */
809			if (LK_TRYOP(flags)) {
810				LOCK_LOG2(lk, "%s: %p fails the try operation",
811				    __func__, lk);
812				error = EBUSY;
813				break;
814			}
815
816#ifdef ADAPTIVE_LOCKMGRS
817			/*
818			 * If the owner is running on another CPU, spin until
819			 * the owner stops running or the state of the lock
820			 * changes.
821			 */
822			x = lk->lk_lock;
823			if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
824			    LK_HOLDER(x) != LK_KERNPROC) {
825				owner = (struct thread *)LK_HOLDER(x);
826				if (LOCK_LOG_TEST(&lk->lock_object, 0))
827					CTR3(KTR_LOCK,
828					    "%s: spinning on %p held by %p",
829					    __func__, lk, owner);
830				KTR_STATE1(KTR_SCHED, "thread",
831				    sched_tdname(td), "spinning",
832				    "lockname:\"%s\"", lk->lock_object.lo_name);
833
834				/*
835				 * If we are holding also an interlock drop it
836				 * in order to avoid a deadlock if the lockmgr
837				 * owner is adaptively spinning on the
838				 * interlock itself.
839				 */
840				if (flags & LK_INTERLOCK) {
841					class->lc_unlock(ilk);
842					flags &= ~LK_INTERLOCK;
843				}
844				GIANT_SAVE();
845				while (LK_HOLDER(lk->lk_lock) ==
846				    (uintptr_t)owner && TD_IS_RUNNING(owner))
847					cpu_spinwait();
848				KTR_STATE0(KTR_SCHED, "thread",
849				    sched_tdname(td), "running");
850				GIANT_RESTORE();
851				continue;
852			} else if (LK_CAN_ADAPT(lk, flags) &&
853			    (x & LK_SHARE) != 0 && LK_SHARERS(x) &&
854			    spintries < alk_retries) {
855				if ((x & LK_EXCLUSIVE_SPINNERS) == 0 &&
856				    !atomic_cmpset_ptr(&lk->lk_lock, x,
857				    x | LK_EXCLUSIVE_SPINNERS))
858					continue;
859				KTR_STATE1(KTR_SCHED, "thread",
860				    sched_tdname(td), "spinning",
861				    "lockname:\"%s\"", lk->lock_object.lo_name);
862				if (flags & LK_INTERLOCK) {
863					class->lc_unlock(ilk);
864					flags &= ~LK_INTERLOCK;
865				}
866				GIANT_SAVE();
867				spintries++;
868				for (i = 0; i < alk_loops; i++) {
869					if (LOCK_LOG_TEST(&lk->lock_object, 0))
870						CTR4(KTR_LOCK,
871				    "%s: shared spinning on %p with %u and %u",
872						    __func__, lk, spintries, i);
873					if ((lk->lk_lock &
874					    LK_EXCLUSIVE_SPINNERS) == 0)
875						break;
876					cpu_spinwait();
877				}
878				KTR_STATE0(KTR_SCHED, "thread",
879				    sched_tdname(td), "running");
880				GIANT_RESTORE();
881				if (i != alk_loops)
882					continue;
883			}
884#endif
885
886			/*
887			 * Acquire the sleepqueue chain lock because we
888			 * probabilly will need to manipulate waiters flags.
889			 */
890			sleepq_lock(&lk->lock_object);
891			x = lk->lk_lock;
892
893			/*
894			 * if the lock has been released while we spun on
895			 * the sleepqueue chain lock just try again.
896			 */
897			if (x == LK_UNLOCKED) {
898				sleepq_release(&lk->lock_object);
899				continue;
900			}
901
902#ifdef ADAPTIVE_LOCKMGRS
903			/*
904			 * The current lock owner might have started executing
905			 * on another CPU (or the lock could have changed
906			 * owner) while we were waiting on the turnstile
907			 * chain lock.  If so, drop the turnstile lock and try
908			 * again.
909			 */
910			if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
911			    LK_HOLDER(x) != LK_KERNPROC) {
912				owner = (struct thread *)LK_HOLDER(x);
913				if (TD_IS_RUNNING(owner)) {
914					sleepq_release(&lk->lock_object);
915					continue;
916				}
917			}
918#endif
919
920			/*
921			 * The lock can be in the state where there is a
922			 * pending queue of waiters, but still no owner.
923			 * This happens when the lock is contested and an
924			 * owner is going to claim the lock.
925			 * If curthread is the one successfully acquiring it
926			 * claim lock ownership and return, preserving waiters
927			 * flags.
928			 */
929			v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
930			if ((x & ~v) == LK_UNLOCKED) {
931				v &= ~LK_EXCLUSIVE_SPINNERS;
932				if (atomic_cmpset_acq_ptr(&lk->lk_lock, x,
933				    tid | v)) {
934					sleepq_release(&lk->lock_object);
935					LOCK_LOG2(lk,
936					    "%s: %p claimed by a new writer",
937					    __func__, lk);
938					break;
939				}
940				sleepq_release(&lk->lock_object);
941				continue;
942			}
943
944			/*
945			 * Try to set the LK_EXCLUSIVE_WAITERS flag.  If we
946			 * fail, loop back and retry.
947			 */
948			if ((x & LK_EXCLUSIVE_WAITERS) == 0) {
949				if (!atomic_cmpset_ptr(&lk->lk_lock, x,
950				    x | LK_EXCLUSIVE_WAITERS)) {
951					sleepq_release(&lk->lock_object);
952					continue;
953				}
954				LOCK_LOG2(lk, "%s: %p set excl waiters flag",
955				    __func__, lk);
956			}
957
958			/*
959			 * As far as we have been unable to acquire the
960			 * exclusive lock and the exclusive waiters flag
961			 * is set, we will sleep.
962			 */
963			error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo,
964			    SQ_EXCLUSIVE_QUEUE);
965			flags &= ~LK_INTERLOCK;
966			if (error) {
967				LOCK_LOG3(lk,
968				    "%s: interrupted sleep for %p with %d",
969				    __func__, lk, error);
970				break;
971			}
972			LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
973			    __func__, lk);
974		}
975		if (error == 0) {
976			lock_profile_obtain_lock_success(&lk->lock_object,
977			    contested, waittime, file, line);
978			LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0,
979			    lk->lk_recurse, file, line);
980			WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
981			    LK_TRYWIT(flags), file, line);
982			TD_LOCKS_INC(curthread);
983			STACK_SAVE(lk);
984		}
985		break;
986	case LK_DOWNGRADE:
987		_lockmgr_assert(lk, KA_XLOCKED, file, line);
988		LOCK_LOG_LOCK("XDOWNGRADE", &lk->lock_object, 0, 0, file, line);
989		WITNESS_DOWNGRADE(&lk->lock_object, 0, file, line);
990
991		/*
992		 * Panic if the lock is recursed.
993		 */
994		if (lockmgr_xlocked(lk) && lockmgr_recursed(lk)) {
995			if (flags & LK_INTERLOCK)
996				class->lc_unlock(ilk);
997			panic("%s: downgrade a recursed lockmgr %s @ %s:%d\n",
998			    __func__, iwmesg, file, line);
999		}
1000		TD_SLOCKS_INC(curthread);
1001
1002		/*
1003		 * In order to preserve waiters flags, just spin.
1004		 */
1005		for (;;) {
1006			x = lk->lk_lock;
1007			MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
1008			x &= LK_ALL_WAITERS;
1009			if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x,
1010			    LK_SHARERS_LOCK(1) | x))
1011				break;
1012			cpu_spinwait();
1013		}
1014		break;
1015	case LK_RELEASE:
1016		_lockmgr_assert(lk, KA_LOCKED, file, line);
1017		x = lk->lk_lock;
1018
1019		if ((x & LK_SHARE) == 0) {
1020
1021			/*
1022			 * As first option, treact the lock as if it has not
1023			 * any waiter.
1024			 * Fix-up the tid var if the lock has been disowned.
1025			 */
1026			if (LK_HOLDER(x) == LK_KERNPROC)
1027				tid = LK_KERNPROC;
1028			else {
1029				WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE,
1030				    file, line);
1031				TD_LOCKS_DEC(curthread);
1032			}
1033			LOCK_LOG_LOCK("XUNLOCK", &lk->lock_object, 0,
1034			    lk->lk_recurse, file, line);
1035
1036			/*
1037			 * The lock is held in exclusive mode.
1038			 * If the lock is recursed also, then unrecurse it.
1039			 */
1040			if (lockmgr_xlocked(lk) && lockmgr_recursed(lk)) {
1041				LOCK_LOG2(lk, "%s: %p unrecursing", __func__,
1042				    lk);
1043				lk->lk_recurse--;
1044				break;
1045			}
1046			if (tid != LK_KERNPROC)
1047				lock_profile_release_lock(&lk->lock_object);
1048
1049			if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid,
1050			    LK_UNLOCKED))
1051				break;
1052
1053			sleepq_lock(&lk->lock_object);
1054			x = lk->lk_lock;
1055			v = LK_UNLOCKED;
1056
1057			/*
1058		 	 * If the lock has exclusive waiters, give them
1059			 * preference in order to avoid deadlock with
1060			 * shared runners up.
1061			 * If interruptible sleeps left the exclusive queue
1062			 * empty avoid a starvation for the threads sleeping
1063			 * on the shared queue by giving them precedence
1064			 * and cleaning up the exclusive waiters bit anyway.
1065			 * Please note that lk_exslpfail count may be lying
1066			 * about the real number of waiters with the
1067			 * LK_SLEEPFAIL flag on because they may be used in
1068			 * conjunction with interruptible sleeps so
1069			 * lk_exslpfail might be considered an 'upper limit'
1070			 * bound, including the edge cases.
1071			 */
1072			MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
1073			realexslp = sleepq_sleepcnt(&lk->lock_object,
1074			    SQ_EXCLUSIVE_QUEUE);
1075			if ((x & LK_EXCLUSIVE_WAITERS) != 0 && realexslp != 0) {
1076				if (lk->lk_exslpfail < realexslp) {
1077					lk->lk_exslpfail = 0;
1078					queue = SQ_EXCLUSIVE_QUEUE;
1079					v |= (x & LK_SHARED_WAITERS);
1080				} else {
1081					lk->lk_exslpfail = 0;
1082					LOCK_LOG2(lk,
1083					"%s: %p has only LK_SLEEPFAIL sleepers",
1084					    __func__, lk);
1085					LOCK_LOG2(lk,
1086			"%s: %p waking up threads on the exclusive queue",
1087					    __func__, lk);
1088					wakeup_swapper =
1089					    sleepq_broadcast(&lk->lock_object,
1090					    SLEEPQ_LK, 0, SQ_EXCLUSIVE_QUEUE);
1091					queue = SQ_SHARED_QUEUE;
1092				}
1093			} else {
1094
1095				/*
1096				 * Exclusive waiters sleeping with LK_SLEEPFAIL
1097				 * on and using interruptible sleeps/timeout
1098				 * may have left spourious lk_exslpfail counts
1099				 * on, so clean it up anyway.
1100				 */
1101				lk->lk_exslpfail = 0;
1102				queue = SQ_SHARED_QUEUE;
1103			}
1104
1105			LOCK_LOG3(lk,
1106			    "%s: %p waking up threads on the %s queue",
1107			    __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
1108			    "exclusive");
1109			atomic_store_rel_ptr(&lk->lk_lock, v);
1110			wakeup_swapper |= sleepq_broadcast(&lk->lock_object,
1111			    SLEEPQ_LK, 0, queue);
1112			sleepq_release(&lk->lock_object);
1113			break;
1114		} else
1115			wakeup_swapper = wakeupshlk(lk, file, line);
1116		break;
1117	case LK_DRAIN:
1118		if (LK_CAN_WITNESS(flags))
1119			WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
1120			    LOP_EXCLUSIVE, file, line, flags & LK_INTERLOCK ?
1121			    ilk : NULL);
1122
1123		/*
1124		 * Trying to drain a lock we already own will result in a
1125		 * deadlock.
1126		 */
1127		if (lockmgr_xlocked(lk)) {
1128			if (flags & LK_INTERLOCK)
1129				class->lc_unlock(ilk);
1130			panic("%s: draining %s with the lock held @ %s:%d\n",
1131			    __func__, iwmesg, file, line);
1132		}
1133
1134		for (;;) {
1135			if (lk->lk_lock == LK_UNLOCKED &&
1136			    atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid))
1137				break;
1138
1139#ifdef HWPMC_HOOKS
1140			PMC_SOFT_CALL( , , lock, failed);
1141#endif
1142			lock_profile_obtain_lock_failed(&lk->lock_object,
1143			    &contested, &waittime);
1144
1145			/*
1146			 * If the lock is expected to not sleep just give up
1147			 * and return.
1148			 */
1149			if (LK_TRYOP(flags)) {
1150				LOCK_LOG2(lk, "%s: %p fails the try operation",
1151				    __func__, lk);
1152				error = EBUSY;
1153				break;
1154			}
1155
1156			/*
1157			 * Acquire the sleepqueue chain lock because we
1158			 * probabilly will need to manipulate waiters flags.
1159			 */
1160			sleepq_lock(&lk->lock_object);
1161			x = lk->lk_lock;
1162
1163			/*
1164			 * if the lock has been released while we spun on
1165			 * the sleepqueue chain lock just try again.
1166			 */
1167			if (x == LK_UNLOCKED) {
1168				sleepq_release(&lk->lock_object);
1169				continue;
1170			}
1171
1172			v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
1173			if ((x & ~v) == LK_UNLOCKED) {
1174				v = (x & ~LK_EXCLUSIVE_SPINNERS);
1175
1176				/*
1177				 * If interruptible sleeps left the exclusive
1178				 * queue empty avoid a starvation for the
1179				 * threads sleeping on the shared queue by
1180				 * giving them precedence and cleaning up the
1181				 * exclusive waiters bit anyway.
1182				 * Please note that lk_exslpfail count may be
1183				 * lying about the real number of waiters with
1184				 * the LK_SLEEPFAIL flag on because they may
1185				 * be used in conjunction with interruptible
1186				 * sleeps so lk_exslpfail might be considered
1187				 * an 'upper limit' bound, including the edge
1188				 * cases.
1189				 */
1190				if (v & LK_EXCLUSIVE_WAITERS) {
1191					queue = SQ_EXCLUSIVE_QUEUE;
1192					v &= ~LK_EXCLUSIVE_WAITERS;
1193				} else {
1194
1195					/*
1196					 * Exclusive waiters sleeping with
1197					 * LK_SLEEPFAIL on and using
1198					 * interruptible sleeps/timeout may
1199					 * have left spourious lk_exslpfail
1200					 * counts on, so clean it up anyway.
1201					 */
1202					MPASS(v & LK_SHARED_WAITERS);
1203					lk->lk_exslpfail = 0;
1204					queue = SQ_SHARED_QUEUE;
1205					v &= ~LK_SHARED_WAITERS;
1206				}
1207				if (queue == SQ_EXCLUSIVE_QUEUE) {
1208					realexslp =
1209					    sleepq_sleepcnt(&lk->lock_object,
1210					    SQ_EXCLUSIVE_QUEUE);
1211					if (lk->lk_exslpfail >= realexslp) {
1212						lk->lk_exslpfail = 0;
1213						queue = SQ_SHARED_QUEUE;
1214						v &= ~LK_SHARED_WAITERS;
1215						if (realexslp != 0) {
1216							LOCK_LOG2(lk,
1217					"%s: %p has only LK_SLEEPFAIL sleepers",
1218							    __func__, lk);
1219							LOCK_LOG2(lk,
1220			"%s: %p waking up threads on the exclusive queue",
1221							    __func__, lk);
1222							wakeup_swapper =
1223							    sleepq_broadcast(
1224							    &lk->lock_object,
1225							    SLEEPQ_LK, 0,
1226							    SQ_EXCLUSIVE_QUEUE);
1227						}
1228					} else
1229						lk->lk_exslpfail = 0;
1230				}
1231				if (!atomic_cmpset_ptr(&lk->lk_lock, x, v)) {
1232					sleepq_release(&lk->lock_object);
1233					continue;
1234				}
1235				LOCK_LOG3(lk,
1236				"%s: %p waking up all threads on the %s queue",
1237				    __func__, lk, queue == SQ_SHARED_QUEUE ?
1238				    "shared" : "exclusive");
1239				wakeup_swapper |= sleepq_broadcast(
1240				    &lk->lock_object, SLEEPQ_LK, 0, queue);
1241
1242				/*
1243				 * If shared waiters have been woken up we need
1244				 * to wait for one of them to acquire the lock
1245				 * before to set the exclusive waiters in
1246				 * order to avoid a deadlock.
1247				 */
1248				if (queue == SQ_SHARED_QUEUE) {
1249					for (v = lk->lk_lock;
1250					    (v & LK_SHARE) && !LK_SHARERS(v);
1251					    v = lk->lk_lock)
1252						cpu_spinwait();
1253				}
1254			}
1255
1256			/*
1257			 * Try to set the LK_EXCLUSIVE_WAITERS flag.  If we
1258			 * fail, loop back and retry.
1259			 */
1260			if ((x & LK_EXCLUSIVE_WAITERS) == 0) {
1261				if (!atomic_cmpset_ptr(&lk->lk_lock, x,
1262				    x | LK_EXCLUSIVE_WAITERS)) {
1263					sleepq_release(&lk->lock_object);
1264					continue;
1265				}
1266				LOCK_LOG2(lk, "%s: %p set drain waiters flag",
1267				    __func__, lk);
1268			}
1269
1270			/*
1271			 * As far as we have been unable to acquire the
1272			 * exclusive lock and the exclusive waiters flag
1273			 * is set, we will sleep.
1274			 */
1275			if (flags & LK_INTERLOCK) {
1276				class->lc_unlock(ilk);
1277				flags &= ~LK_INTERLOCK;
1278			}
1279			GIANT_SAVE();
1280			sleepq_add(&lk->lock_object, NULL, iwmesg, SLEEPQ_LK,
1281			    SQ_EXCLUSIVE_QUEUE);
1282			sleepq_wait(&lk->lock_object, ipri & PRIMASK);
1283			GIANT_RESTORE();
1284			LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
1285			    __func__, lk);
1286		}
1287
1288		if (error == 0) {
1289			lock_profile_obtain_lock_success(&lk->lock_object,
1290			    contested, waittime, file, line);
1291			LOCK_LOG_LOCK("DRAIN", &lk->lock_object, 0,
1292			    lk->lk_recurse, file, line);
1293			WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
1294			    LK_TRYWIT(flags), file, line);
1295			TD_LOCKS_INC(curthread);
1296			STACK_SAVE(lk);
1297		}
1298		break;
1299	default:
1300		if (flags & LK_INTERLOCK)
1301			class->lc_unlock(ilk);
1302		panic("%s: unknown lockmgr request 0x%x\n", __func__, op);
1303	}
1304
1305	if (flags & LK_INTERLOCK)
1306		class->lc_unlock(ilk);
1307	if (wakeup_swapper)
1308		kick_proc0();
1309
1310	return (error);
1311}
1312
1313void
1314_lockmgr_disown(struct lock *lk, const char *file, int line)
1315{
1316	uintptr_t tid, x;
1317
1318	if (SCHEDULER_STOPPED())
1319		return;
1320
1321	tid = (uintptr_t)curthread;
1322	_lockmgr_assert(lk, KA_XLOCKED, file, line);
1323
1324	/*
1325	 * Panic if the lock is recursed.
1326	 */
1327	if (lockmgr_xlocked(lk) && lockmgr_recursed(lk))
1328		panic("%s: disown a recursed lockmgr @ %s:%d\n",
1329		    __func__,  file, line);
1330
1331	/*
1332	 * If the owner is already LK_KERNPROC just skip the whole operation.
1333	 */
1334	if (LK_HOLDER(lk->lk_lock) != tid)
1335		return;
1336	lock_profile_release_lock(&lk->lock_object);
1337	LOCK_LOG_LOCK("XDISOWN", &lk->lock_object, 0, 0, file, line);
1338	WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE, file, line);
1339	TD_LOCKS_DEC(curthread);
1340	STACK_SAVE(lk);
1341
1342	/*
1343	 * In order to preserve waiters flags, just spin.
1344	 */
1345	for (;;) {
1346		x = lk->lk_lock;
1347		MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
1348		x &= LK_ALL_WAITERS;
1349		if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x,
1350		    LK_KERNPROC | x))
1351			return;
1352		cpu_spinwait();
1353	}
1354}
1355
1356void
1357lockmgr_printinfo(const struct lock *lk)
1358{
1359	struct thread *td;
1360	uintptr_t x;
1361
1362	if (lk->lk_lock == LK_UNLOCKED)
1363		printf("lock type %s: UNLOCKED\n", lk->lock_object.lo_name);
1364	else if (lk->lk_lock & LK_SHARE)
1365		printf("lock type %s: SHARED (count %ju)\n",
1366		    lk->lock_object.lo_name,
1367		    (uintmax_t)LK_SHARERS(lk->lk_lock));
1368	else {
1369		td = lockmgr_xholder(lk);
1370		if (td == (struct thread *)LK_KERNPROC)
1371			printf("lock type %s: EXCL by KERNPROC\n",
1372			    lk->lock_object.lo_name);
1373		else
1374			printf("lock type %s: EXCL by thread %p "
1375			    "(pid %d, %s, tid %d)\n", lk->lock_object.lo_name,
1376			    td, td->td_proc->p_pid, td->td_proc->p_comm,
1377			    td->td_tid);
1378	}
1379
1380	x = lk->lk_lock;
1381	if (x & LK_EXCLUSIVE_WAITERS)
1382		printf(" with exclusive waiters pending\n");
1383	if (x & LK_SHARED_WAITERS)
1384		printf(" with shared waiters pending\n");
1385	if (x & LK_EXCLUSIVE_SPINNERS)
1386		printf(" with exclusive spinners pending\n");
1387
1388	STACK_PRINT(lk);
1389}
1390
1391int
1392lockstatus(const struct lock *lk)
1393{
1394	uintptr_t v, x;
1395	int ret;
1396
1397	ret = LK_SHARED;
1398	x = lk->lk_lock;
1399	v = LK_HOLDER(x);
1400
1401	if ((x & LK_SHARE) == 0) {
1402		if (v == (uintptr_t)curthread || v == LK_KERNPROC)
1403			ret = LK_EXCLUSIVE;
1404		else
1405			ret = LK_EXCLOTHER;
1406	} else if (x == LK_UNLOCKED)
1407		ret = 0;
1408
1409	return (ret);
1410}
1411
1412#ifdef INVARIANT_SUPPORT
1413
1414FEATURE(invariant_support,
1415    "Support for modules compiled with INVARIANTS option");
1416
1417#ifndef INVARIANTS
1418#undef	_lockmgr_assert
1419#endif
1420
1421void
1422_lockmgr_assert(const struct lock *lk, int what, const char *file, int line)
1423{
1424	int slocked = 0;
1425
1426	if (panicstr != NULL)
1427		return;
1428	switch (what) {
1429	case KA_SLOCKED:
1430	case KA_SLOCKED | KA_NOTRECURSED:
1431	case KA_SLOCKED | KA_RECURSED:
1432		slocked = 1;
1433	case KA_LOCKED:
1434	case KA_LOCKED | KA_NOTRECURSED:
1435	case KA_LOCKED | KA_RECURSED:
1436#ifdef WITNESS
1437
1438		/*
1439		 * We cannot trust WITNESS if the lock is held in exclusive
1440		 * mode and a call to lockmgr_disown() happened.
1441		 * Workaround this skipping the check if the lock is held in
1442		 * exclusive mode even for the KA_LOCKED case.
1443		 */
1444		if (slocked || (lk->lk_lock & LK_SHARE)) {
1445			witness_assert(&lk->lock_object, what, file, line);
1446			break;
1447		}
1448#endif
1449		if (lk->lk_lock == LK_UNLOCKED ||
1450		    ((lk->lk_lock & LK_SHARE) == 0 && (slocked ||
1451		    (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk)))))
1452			panic("Lock %s not %slocked @ %s:%d\n",
1453			    lk->lock_object.lo_name, slocked ? "share" : "",
1454			    file, line);
1455
1456		if ((lk->lk_lock & LK_SHARE) == 0) {
1457			if (lockmgr_recursed(lk)) {
1458				if (what & KA_NOTRECURSED)
1459					panic("Lock %s recursed @ %s:%d\n",
1460					    lk->lock_object.lo_name, file,
1461					    line);
1462			} else if (what & KA_RECURSED)
1463				panic("Lock %s not recursed @ %s:%d\n",
1464				    lk->lock_object.lo_name, file, line);
1465		}
1466		break;
1467	case KA_XLOCKED:
1468	case KA_XLOCKED | KA_NOTRECURSED:
1469	case KA_XLOCKED | KA_RECURSED:
1470		if (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk))
1471			panic("Lock %s not exclusively locked @ %s:%d\n",
1472			    lk->lock_object.lo_name, file, line);
1473		if (lockmgr_recursed(lk)) {
1474			if (what & KA_NOTRECURSED)
1475				panic("Lock %s recursed @ %s:%d\n",
1476				    lk->lock_object.lo_name, file, line);
1477		} else if (what & KA_RECURSED)
1478			panic("Lock %s not recursed @ %s:%d\n",
1479			    lk->lock_object.lo_name, file, line);
1480		break;
1481	case KA_UNLOCKED:
1482		if (lockmgr_xlocked(lk) || lockmgr_disowned(lk))
1483			panic("Lock %s exclusively locked @ %s:%d\n",
1484			    lk->lock_object.lo_name, file, line);
1485		break;
1486	default:
1487		panic("Unknown lockmgr assertion: %d @ %s:%d\n", what, file,
1488		    line);
1489	}
1490}
1491#endif
1492
1493#ifdef DDB
1494int
1495lockmgr_chain(struct thread *td, struct thread **ownerp)
1496{
1497	struct lock *lk;
1498
1499	lk = td->td_wchan;
1500
1501	if (LOCK_CLASS(&lk->lock_object) != &lock_class_lockmgr)
1502		return (0);
1503	db_printf("blocked on lockmgr %s", lk->lock_object.lo_name);
1504	if (lk->lk_lock & LK_SHARE)
1505		db_printf("SHARED (count %ju)\n",
1506		    (uintmax_t)LK_SHARERS(lk->lk_lock));
1507	else
1508		db_printf("EXCL\n");
1509	*ownerp = lockmgr_xholder(lk);
1510
1511	return (1);
1512}
1513
1514static void
1515db_show_lockmgr(const struct lock_object *lock)
1516{
1517	struct thread *td;
1518	const struct lock *lk;
1519
1520	lk = (const struct lock *)lock;
1521
1522	db_printf(" state: ");
1523	if (lk->lk_lock == LK_UNLOCKED)
1524		db_printf("UNLOCKED\n");
1525	else if (lk->lk_lock & LK_SHARE)
1526		db_printf("SLOCK: %ju\n", (uintmax_t)LK_SHARERS(lk->lk_lock));
1527	else {
1528		td = lockmgr_xholder(lk);
1529		if (td == (struct thread *)LK_KERNPROC)
1530			db_printf("XLOCK: LK_KERNPROC\n");
1531		else
1532			db_printf("XLOCK: %p (tid %d, pid %d, \"%s\")\n", td,
1533			    td->td_tid, td->td_proc->p_pid,
1534			    td->td_proc->p_comm);
1535		if (lockmgr_recursed(lk))
1536			db_printf(" recursed: %d\n", lk->lk_recurse);
1537	}
1538	db_printf(" waiters: ");
1539	switch (lk->lk_lock & LK_ALL_WAITERS) {
1540	case LK_SHARED_WAITERS:
1541		db_printf("shared\n");
1542		break;
1543	case LK_EXCLUSIVE_WAITERS:
1544		db_printf("exclusive\n");
1545		break;
1546	case LK_ALL_WAITERS:
1547		db_printf("shared and exclusive\n");
1548		break;
1549	default:
1550		db_printf("none\n");
1551	}
1552	db_printf(" spinners: ");
1553	if (lk->lk_lock & LK_EXCLUSIVE_SPINNERS)
1554		db_printf("exclusive\n");
1555	else
1556		db_printf("none\n");
1557}
1558#endif
1559