1/*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 * 3. Berkeley Software Design Inc's name may not be used to endorse or
15 *    promote products derived from this software without specific prior
16 *    written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED.  IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 *
30 *	from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $
31 *	and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $
32 */
33
34/*
35 * Machine independent bits of mutex implementation.
36 */
37
38#include <sys/cdefs.h>
39__FBSDID("$FreeBSD$");
40
41#include "opt_adaptive_mutexes.h"
42#include "opt_ddb.h"
43#include "opt_hwpmc_hooks.h"
44#include "opt_sched.h"
45
46#include <sys/param.h>
47#include <sys/systm.h>
48#include <sys/bus.h>
49#include <sys/conf.h>
50#include <sys/kdb.h>
51#include <sys/kernel.h>
52#include <sys/ktr.h>
53#include <sys/lock.h>
54#include <sys/malloc.h>
55#include <sys/mutex.h>
56#include <sys/proc.h>
57#include <sys/resourcevar.h>
58#include <sys/sched.h>
59#include <sys/sbuf.h>
60#include <sys/smp.h>
61#include <sys/sysctl.h>
62#include <sys/turnstile.h>
63#include <sys/vmmeter.h>
64#include <sys/lock_profile.h>
65
66#include <machine/atomic.h>
67#include <machine/bus.h>
68#include <machine/cpu.h>
69
70#include <ddb/ddb.h>
71
72#include <fs/devfs/devfs_int.h>
73
74#include <vm/vm.h>
75#include <vm/vm_extern.h>
76
77#if defined(SMP) && !defined(NO_ADAPTIVE_MUTEXES)
78#define	ADAPTIVE_MUTEXES
79#endif
80
81#ifdef HWPMC_HOOKS
82#include <sys/pmckern.h>
83PMC_SOFT_DEFINE( , , lock, failed);
84#endif
85
86/*
87 * Return the mutex address when the lock cookie address is provided.
88 * This functionality assumes that struct mtx* have a member named mtx_lock.
89 */
90#define	mtxlock2mtx(c)	(__containerof(c, struct mtx, mtx_lock))
91
92/*
93 * Internal utility macros.
94 */
95#define mtx_unowned(m)	((m)->mtx_lock == MTX_UNOWNED)
96
97#define	mtx_destroyed(m) ((m)->mtx_lock == MTX_DESTROYED)
98
99static void	assert_mtx(const struct lock_object *lock, int what);
100#ifdef DDB
101static void	db_show_mtx(const struct lock_object *lock);
102#endif
103static void	lock_mtx(struct lock_object *lock, uintptr_t how);
104static void	lock_spin(struct lock_object *lock, uintptr_t how);
105#ifdef KDTRACE_HOOKS
106static int	owner_mtx(const struct lock_object *lock,
107		    struct thread **owner);
108#endif
109static uintptr_t unlock_mtx(struct lock_object *lock);
110static uintptr_t unlock_spin(struct lock_object *lock);
111
112/*
113 * Lock classes for sleep and spin mutexes.
114 */
115struct lock_class lock_class_mtx_sleep = {
116	.lc_name = "sleep mutex",
117	.lc_flags = LC_SLEEPLOCK | LC_RECURSABLE,
118	.lc_assert = assert_mtx,
119#ifdef DDB
120	.lc_ddb_show = db_show_mtx,
121#endif
122	.lc_lock = lock_mtx,
123	.lc_unlock = unlock_mtx,
124#ifdef KDTRACE_HOOKS
125	.lc_owner = owner_mtx,
126#endif
127};
128struct lock_class lock_class_mtx_spin = {
129	.lc_name = "spin mutex",
130	.lc_flags = LC_SPINLOCK | LC_RECURSABLE,
131	.lc_assert = assert_mtx,
132#ifdef DDB
133	.lc_ddb_show = db_show_mtx,
134#endif
135	.lc_lock = lock_spin,
136	.lc_unlock = unlock_spin,
137#ifdef KDTRACE_HOOKS
138	.lc_owner = owner_mtx,
139#endif
140};
141
142#ifdef ADAPTIVE_MUTEXES
143#ifdef MUTEX_CUSTOM_BACKOFF
144static SYSCTL_NODE(_debug, OID_AUTO, mtx, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
145    "mtx debugging");
146
147static struct lock_delay_config __read_frequently mtx_delay;
148
149SYSCTL_U16(_debug_mtx, OID_AUTO, delay_base, CTLFLAG_RW, &mtx_delay.base,
150    0, "");
151SYSCTL_U16(_debug_mtx, OID_AUTO, delay_max, CTLFLAG_RW, &mtx_delay.max,
152    0, "");
153
154LOCK_DELAY_SYSINIT_DEFAULT(mtx_delay);
155#else
156#define mtx_delay	locks_delay
157#endif
158#endif
159
160#ifdef MUTEX_SPIN_CUSTOM_BACKOFF
161static SYSCTL_NODE(_debug, OID_AUTO, mtx_spin,
162    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
163    "mtx spin debugging");
164
165static struct lock_delay_config __read_frequently mtx_spin_delay;
166
167SYSCTL_INT(_debug_mtx_spin, OID_AUTO, delay_base, CTLFLAG_RW,
168    &mtx_spin_delay.base, 0, "");
169SYSCTL_INT(_debug_mtx_spin, OID_AUTO, delay_max, CTLFLAG_RW,
170    &mtx_spin_delay.max, 0, "");
171
172LOCK_DELAY_SYSINIT_DEFAULT(mtx_spin_delay);
173#else
174#define mtx_spin_delay	locks_delay
175#endif
176
177/*
178 * System-wide mutexes
179 */
180struct mtx blocked_lock;
181struct mtx __exclusive_cache_line Giant;
182
183static void _mtx_lock_indefinite_check(struct mtx *, struct lock_delay_arg *);
184
185void
186assert_mtx(const struct lock_object *lock, int what)
187{
188
189	/*
190	 * Treat LA_LOCKED as if LA_XLOCKED was asserted.
191	 *
192	 * Some callers of lc_assert uses LA_LOCKED to indicate that either
193	 * a shared lock or write lock was held, while other callers uses
194	 * the more strict LA_XLOCKED (used as MA_OWNED).
195	 *
196	 * Mutex is the only lock class that can not be shared, as a result,
197	 * we can reasonably consider the caller really intends to assert
198	 * LA_XLOCKED when they are asserting LA_LOCKED on a mutex object.
199	 */
200	if (what & LA_LOCKED) {
201		what &= ~LA_LOCKED;
202		what |= LA_XLOCKED;
203	}
204	mtx_assert((const struct mtx *)lock, what);
205}
206
207void
208lock_mtx(struct lock_object *lock, uintptr_t how)
209{
210
211	mtx_lock((struct mtx *)lock);
212}
213
214void
215lock_spin(struct lock_object *lock, uintptr_t how)
216{
217
218	panic("spin locks can only use msleep_spin");
219}
220
221uintptr_t
222unlock_mtx(struct lock_object *lock)
223{
224	struct mtx *m;
225
226	m = (struct mtx *)lock;
227	mtx_assert(m, MA_OWNED | MA_NOTRECURSED);
228	mtx_unlock(m);
229	return (0);
230}
231
232uintptr_t
233unlock_spin(struct lock_object *lock)
234{
235
236	panic("spin locks can only use msleep_spin");
237}
238
239#ifdef KDTRACE_HOOKS
240int
241owner_mtx(const struct lock_object *lock, struct thread **owner)
242{
243	const struct mtx *m;
244	uintptr_t x;
245
246	m = (const struct mtx *)lock;
247	x = m->mtx_lock;
248	*owner = (struct thread *)(x & ~MTX_FLAGMASK);
249	return (*owner != NULL);
250}
251#endif
252
253/*
254 * Function versions of the inlined __mtx_* macros.  These are used by
255 * modules and can also be called from assembly language if needed.
256 */
257void
258__mtx_lock_flags(volatile uintptr_t *c, int opts, const char *file, int line)
259{
260	struct mtx *m;
261	uintptr_t tid, v;
262
263	m = mtxlock2mtx(c);
264
265	KASSERT(kdb_active != 0 || SCHEDULER_STOPPED() ||
266	    !TD_IS_IDLETHREAD(curthread),
267	    ("mtx_lock() by idle thread %p on sleep mutex %s @ %s:%d",
268	    curthread, m->lock_object.lo_name, file, line));
269	KASSERT(m->mtx_lock != MTX_DESTROYED,
270	    ("mtx_lock() of destroyed mutex @ %s:%d", file, line));
271	KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep,
272	    ("mtx_lock() of spin mutex %s @ %s:%d", m->lock_object.lo_name,
273	    file, line));
274	WITNESS_CHECKORDER(&m->lock_object, (opts & ~MTX_RECURSE) |
275	    LOP_NEWORDER | LOP_EXCLUSIVE, file, line, NULL);
276
277	tid = (uintptr_t)curthread;
278	v = MTX_UNOWNED;
279	if (!_mtx_obtain_lock_fetch(m, &v, tid))
280		_mtx_lock_sleep(m, v, opts, file, line);
281	else
282		LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(adaptive__acquire,
283		    m, 0, 0, file, line);
284	LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file,
285	    line);
286	WITNESS_LOCK(&m->lock_object, (opts & ~MTX_RECURSE) | LOP_EXCLUSIVE,
287	    file, line);
288	TD_LOCKS_INC(curthread);
289}
290
291void
292__mtx_unlock_flags(volatile uintptr_t *c, int opts, const char *file, int line)
293{
294	struct mtx *m;
295
296	m = mtxlock2mtx(c);
297
298	KASSERT(m->mtx_lock != MTX_DESTROYED,
299	    ("mtx_unlock() of destroyed mutex @ %s:%d", file, line));
300	KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep,
301	    ("mtx_unlock() of spin mutex %s @ %s:%d", m->lock_object.lo_name,
302	    file, line));
303	WITNESS_UNLOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
304	LOCK_LOG_LOCK("UNLOCK", &m->lock_object, opts, m->mtx_recurse, file,
305	    line);
306	mtx_assert(m, MA_OWNED);
307
308#ifdef LOCK_PROFILING
309	__mtx_unlock_sleep(c, (uintptr_t)curthread, opts, file, line);
310#else
311	__mtx_unlock(m, curthread, opts, file, line);
312#endif
313	TD_LOCKS_DEC(curthread);
314}
315
316void
317__mtx_lock_spin_flags(volatile uintptr_t *c, int opts, const char *file,
318    int line)
319{
320	struct mtx *m;
321#ifdef SMP
322	uintptr_t tid, v;
323#endif
324
325	m = mtxlock2mtx(c);
326
327	KASSERT(m->mtx_lock != MTX_DESTROYED,
328	    ("mtx_lock_spin() of destroyed mutex @ %s:%d", file, line));
329	KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin,
330	    ("mtx_lock_spin() of sleep mutex %s @ %s:%d",
331	    m->lock_object.lo_name, file, line));
332	if (mtx_owned(m))
333		KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0 ||
334		    (opts & MTX_RECURSE) != 0,
335	    ("mtx_lock_spin: recursed on non-recursive mutex %s @ %s:%d\n",
336		    m->lock_object.lo_name, file, line));
337	opts &= ~MTX_RECURSE;
338	WITNESS_CHECKORDER(&m->lock_object, opts | LOP_NEWORDER | LOP_EXCLUSIVE,
339	    file, line, NULL);
340#ifdef SMP
341	spinlock_enter();
342	tid = (uintptr_t)curthread;
343	v = MTX_UNOWNED;
344	if (!_mtx_obtain_lock_fetch(m, &v, tid))
345		_mtx_lock_spin(m, v, opts, file, line);
346	else
347		LOCKSTAT_PROFILE_OBTAIN_SPIN_LOCK_SUCCESS(spin__acquire,
348		    m, 0, 0, file, line);
349#else
350	__mtx_lock_spin(m, curthread, opts, file, line);
351#endif
352	LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file,
353	    line);
354	WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
355}
356
357int
358__mtx_trylock_spin_flags(volatile uintptr_t *c, int opts, const char *file,
359    int line)
360{
361	struct mtx *m;
362
363	if (SCHEDULER_STOPPED())
364		return (1);
365
366	m = mtxlock2mtx(c);
367
368	KASSERT(m->mtx_lock != MTX_DESTROYED,
369	    ("mtx_trylock_spin() of destroyed mutex @ %s:%d", file, line));
370	KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin,
371	    ("mtx_trylock_spin() of sleep mutex %s @ %s:%d",
372	    m->lock_object.lo_name, file, line));
373	KASSERT((opts & MTX_RECURSE) == 0,
374	    ("mtx_trylock_spin: unsupp. opt MTX_RECURSE on mutex %s @ %s:%d\n",
375	    m->lock_object.lo_name, file, line));
376	if (__mtx_trylock_spin(m, curthread, opts, file, line)) {
377		LOCK_LOG_TRY("LOCK", &m->lock_object, opts, 1, file, line);
378		WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
379		return (1);
380	}
381	LOCK_LOG_TRY("LOCK", &m->lock_object, opts, 0, file, line);
382	return (0);
383}
384
385void
386__mtx_unlock_spin_flags(volatile uintptr_t *c, int opts, const char *file,
387    int line)
388{
389	struct mtx *m;
390
391	m = mtxlock2mtx(c);
392
393	KASSERT(m->mtx_lock != MTX_DESTROYED,
394	    ("mtx_unlock_spin() of destroyed mutex @ %s:%d", file, line));
395	KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin,
396	    ("mtx_unlock_spin() of sleep mutex %s @ %s:%d",
397	    m->lock_object.lo_name, file, line));
398	WITNESS_UNLOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
399	LOCK_LOG_LOCK("UNLOCK", &m->lock_object, opts, m->mtx_recurse, file,
400	    line);
401	mtx_assert(m, MA_OWNED);
402
403	__mtx_unlock_spin(m);
404}
405
406/*
407 * The important part of mtx_trylock{,_flags}()
408 * Tries to acquire lock `m.'  If this function is called on a mutex that
409 * is already owned, it will recursively acquire the lock.
410 */
411int
412_mtx_trylock_flags_int(struct mtx *m, int opts LOCK_FILE_LINE_ARG_DEF)
413{
414	struct thread *td;
415	uintptr_t tid, v;
416#ifdef LOCK_PROFILING
417	uint64_t waittime = 0;
418	int contested = 0;
419#endif
420	int rval;
421	bool recursed;
422
423	td = curthread;
424	tid = (uintptr_t)td;
425	if (SCHEDULER_STOPPED_TD(td))
426		return (1);
427
428	KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(td),
429	    ("mtx_trylock() by idle thread %p on sleep mutex %s @ %s:%d",
430	    curthread, m->lock_object.lo_name, file, line));
431	KASSERT(m->mtx_lock != MTX_DESTROYED,
432	    ("mtx_trylock() of destroyed mutex @ %s:%d", file, line));
433	KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep,
434	    ("mtx_trylock() of spin mutex %s @ %s:%d", m->lock_object.lo_name,
435	    file, line));
436
437	rval = 1;
438	recursed = false;
439	v = MTX_UNOWNED;
440	for (;;) {
441		if (_mtx_obtain_lock_fetch(m, &v, tid))
442			break;
443		if (v == MTX_UNOWNED)
444			continue;
445		if (v == tid &&
446		    ((m->lock_object.lo_flags & LO_RECURSABLE) != 0 ||
447		    (opts & MTX_RECURSE) != 0)) {
448			m->mtx_recurse++;
449			atomic_set_ptr(&m->mtx_lock, MTX_RECURSED);
450			recursed = true;
451			break;
452		}
453		rval = 0;
454		break;
455	}
456
457	opts &= ~MTX_RECURSE;
458
459	LOCK_LOG_TRY("LOCK", &m->lock_object, opts, rval, file, line);
460	if (rval) {
461		WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE | LOP_TRYLOCK,
462		    file, line);
463		TD_LOCKS_INC(curthread);
464		if (!recursed)
465			LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(adaptive__acquire,
466			    m, contested, waittime, file, line);
467	}
468
469	return (rval);
470}
471
472int
473_mtx_trylock_flags_(volatile uintptr_t *c, int opts, const char *file, int line)
474{
475	struct mtx *m;
476
477	m = mtxlock2mtx(c);
478	return (_mtx_trylock_flags_int(m, opts LOCK_FILE_LINE_ARG));
479}
480
481/*
482 * __mtx_lock_sleep: the tougher part of acquiring an MTX_DEF lock.
483 *
484 * We call this if the lock is either contested (i.e. we need to go to
485 * sleep waiting for it), or if we need to recurse on it.
486 */
487#if LOCK_DEBUG > 0
488void
489__mtx_lock_sleep(volatile uintptr_t *c, uintptr_t v, int opts, const char *file,
490    int line)
491#else
492void
493__mtx_lock_sleep(volatile uintptr_t *c, uintptr_t v)
494#endif
495{
496	struct thread *td;
497	struct mtx *m;
498	struct turnstile *ts;
499	uintptr_t tid;
500	struct thread *owner;
501#ifdef LOCK_PROFILING
502	int contested = 0;
503	uint64_t waittime = 0;
504#endif
505#if defined(ADAPTIVE_MUTEXES) || defined(KDTRACE_HOOKS)
506	struct lock_delay_arg lda;
507#endif
508#ifdef KDTRACE_HOOKS
509	u_int sleep_cnt = 0;
510	int64_t sleep_time = 0;
511	int64_t all_time = 0;
512#endif
513#if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
514	int doing_lockprof = 0;
515#endif
516
517	td = curthread;
518	tid = (uintptr_t)td;
519	m = mtxlock2mtx(c);
520
521#ifdef KDTRACE_HOOKS
522	if (LOCKSTAT_PROFILE_ENABLED(adaptive__acquire)) {
523		while (v == MTX_UNOWNED) {
524			if (_mtx_obtain_lock_fetch(m, &v, tid))
525				goto out_lockstat;
526		}
527		doing_lockprof = 1;
528		all_time -= lockstat_nsecs(&m->lock_object);
529	}
530#endif
531#ifdef LOCK_PROFILING
532	doing_lockprof = 1;
533#endif
534
535	if (SCHEDULER_STOPPED_TD(td))
536		return;
537
538	if (__predict_false(v == MTX_UNOWNED))
539		v = MTX_READ_VALUE(m);
540
541	if (__predict_false(lv_mtx_owner(v) == td)) {
542		KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0 ||
543		    (opts & MTX_RECURSE) != 0,
544	    ("_mtx_lock_sleep: recursed on non-recursive mutex %s @ %s:%d\n",
545		    m->lock_object.lo_name, file, line));
546#if LOCK_DEBUG > 0
547		opts &= ~MTX_RECURSE;
548#endif
549		m->mtx_recurse++;
550		atomic_set_ptr(&m->mtx_lock, MTX_RECURSED);
551		if (LOCK_LOG_TEST(&m->lock_object, opts))
552			CTR1(KTR_LOCK, "_mtx_lock_sleep: %p recursing", m);
553		return;
554	}
555#if LOCK_DEBUG > 0
556	opts &= ~MTX_RECURSE;
557#endif
558
559#if defined(ADAPTIVE_MUTEXES)
560	lock_delay_arg_init(&lda, &mtx_delay);
561#elif defined(KDTRACE_HOOKS)
562	lock_delay_arg_init_noadapt(&lda);
563#endif
564
565#ifdef HWPMC_HOOKS
566	PMC_SOFT_CALL( , , lock, failed);
567#endif
568	lock_profile_obtain_lock_failed(&m->lock_object, false,
569		    &contested, &waittime);
570	if (LOCK_LOG_TEST(&m->lock_object, opts))
571		CTR4(KTR_LOCK,
572		    "_mtx_lock_sleep: %s contested (lock=%p) at %s:%d",
573		    m->lock_object.lo_name, (void *)m->mtx_lock, file, line);
574
575	for (;;) {
576		if (v == MTX_UNOWNED) {
577			if (_mtx_obtain_lock_fetch(m, &v, tid))
578				break;
579			continue;
580		}
581#ifdef KDTRACE_HOOKS
582		lda.spin_cnt++;
583#endif
584#ifdef ADAPTIVE_MUTEXES
585		/*
586		 * If the owner is running on another CPU, spin until the
587		 * owner stops running or the state of the lock changes.
588		 */
589		owner = lv_mtx_owner(v);
590		if (TD_IS_RUNNING(owner)) {
591			if (LOCK_LOG_TEST(&m->lock_object, 0))
592				CTR3(KTR_LOCK,
593				    "%s: spinning on %p held by %p",
594				    __func__, m, owner);
595			KTR_STATE1(KTR_SCHED, "thread",
596			    sched_tdname((struct thread *)tid),
597			    "spinning", "lockname:\"%s\"",
598			    m->lock_object.lo_name);
599			do {
600				lock_delay(&lda);
601				v = MTX_READ_VALUE(m);
602				owner = lv_mtx_owner(v);
603			} while (v != MTX_UNOWNED && TD_IS_RUNNING(owner));
604			KTR_STATE0(KTR_SCHED, "thread",
605			    sched_tdname((struct thread *)tid),
606			    "running");
607			continue;
608		}
609#endif
610
611		ts = turnstile_trywait(&m->lock_object);
612		v = MTX_READ_VALUE(m);
613retry_turnstile:
614
615		/*
616		 * Check if the lock has been released while spinning for
617		 * the turnstile chain lock.
618		 */
619		if (v == MTX_UNOWNED) {
620			turnstile_cancel(ts);
621			continue;
622		}
623
624#ifdef ADAPTIVE_MUTEXES
625		/*
626		 * The current lock owner might have started executing
627		 * on another CPU (or the lock could have changed
628		 * owners) while we were waiting on the turnstile
629		 * chain lock.  If so, drop the turnstile lock and try
630		 * again.
631		 */
632		owner = lv_mtx_owner(v);
633		if (TD_IS_RUNNING(owner)) {
634			turnstile_cancel(ts);
635			continue;
636		}
637#endif
638
639		/*
640		 * If the mutex isn't already contested and a failure occurs
641		 * setting the contested bit, the mutex was either released
642		 * or the state of the MTX_RECURSED bit changed.
643		 */
644		if ((v & MTX_CONTESTED) == 0 &&
645		    !atomic_fcmpset_ptr(&m->mtx_lock, &v, v | MTX_CONTESTED)) {
646			goto retry_turnstile;
647		}
648
649		/*
650		 * We definitely must sleep for this lock.
651		 */
652		mtx_assert(m, MA_NOTOWNED);
653
654		/*
655		 * Block on the turnstile.
656		 */
657#ifdef KDTRACE_HOOKS
658		sleep_time -= lockstat_nsecs(&m->lock_object);
659#endif
660#ifndef ADAPTIVE_MUTEXES
661		owner = mtx_owner(m);
662#endif
663		MPASS(owner == mtx_owner(m));
664		turnstile_wait(ts, owner, TS_EXCLUSIVE_QUEUE);
665#ifdef KDTRACE_HOOKS
666		sleep_time += lockstat_nsecs(&m->lock_object);
667		sleep_cnt++;
668#endif
669		v = MTX_READ_VALUE(m);
670	}
671#if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
672	if (__predict_true(!doing_lockprof))
673		return;
674#endif
675#ifdef KDTRACE_HOOKS
676	all_time += lockstat_nsecs(&m->lock_object);
677	if (sleep_time)
678		LOCKSTAT_RECORD1(adaptive__block, m, sleep_time);
679
680	/*
681	 * Only record the loops spinning and not sleeping.
682	 */
683	if (lda.spin_cnt > sleep_cnt)
684		LOCKSTAT_RECORD1(adaptive__spin, m, all_time - sleep_time);
685out_lockstat:
686#endif
687	LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(adaptive__acquire, m, contested,
688	    waittime, file, line);
689}
690
691#ifdef SMP
692/*
693 * _mtx_lock_spin_cookie: the tougher part of acquiring an MTX_SPIN lock.
694 *
695 * This is only called if we need to actually spin for the lock. Recursion
696 * is handled inline.
697 */
698#if LOCK_DEBUG > 0
699void
700_mtx_lock_spin_cookie(volatile uintptr_t *c, uintptr_t v, int opts,
701    const char *file, int line)
702#else
703void
704_mtx_lock_spin_cookie(volatile uintptr_t *c, uintptr_t v)
705#endif
706{
707	struct mtx *m;
708	struct lock_delay_arg lda;
709	uintptr_t tid;
710#ifdef LOCK_PROFILING
711	int contested = 0;
712	uint64_t waittime = 0;
713#endif
714#ifdef KDTRACE_HOOKS
715	int64_t spin_time = 0;
716#endif
717#if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
718	int doing_lockprof = 0;
719#endif
720
721	tid = (uintptr_t)curthread;
722	m = mtxlock2mtx(c);
723
724#ifdef KDTRACE_HOOKS
725	if (LOCKSTAT_PROFILE_ENABLED(adaptive__acquire)) {
726		while (v == MTX_UNOWNED) {
727			if (_mtx_obtain_lock_fetch(m, &v, tid))
728				goto out_lockstat;
729		}
730		doing_lockprof = 1;
731		spin_time -= lockstat_nsecs(&m->lock_object);
732	}
733#endif
734#ifdef LOCK_PROFILING
735	doing_lockprof = 1;
736#endif
737
738	if (__predict_false(v == MTX_UNOWNED))
739		v = MTX_READ_VALUE(m);
740
741	if (__predict_false(v == tid)) {
742		m->mtx_recurse++;
743		return;
744	}
745
746	if (SCHEDULER_STOPPED())
747		return;
748
749	if (LOCK_LOG_TEST(&m->lock_object, opts))
750		CTR1(KTR_LOCK, "_mtx_lock_spin: %p spinning", m);
751	KTR_STATE1(KTR_SCHED, "thread", sched_tdname((struct thread *)tid),
752	    "spinning", "lockname:\"%s\"", m->lock_object.lo_name);
753
754	lock_delay_arg_init(&lda, &mtx_spin_delay);
755
756#ifdef HWPMC_HOOKS
757	PMC_SOFT_CALL( , , lock, failed);
758#endif
759	lock_profile_obtain_lock_failed(&m->lock_object, true, &contested, &waittime);
760
761	for (;;) {
762		if (v == MTX_UNOWNED) {
763			if (_mtx_obtain_lock_fetch(m, &v, tid))
764				break;
765			continue;
766		}
767		/* Give interrupts a chance while we spin. */
768		spinlock_exit();
769		do {
770			if (__predict_true(lda.spin_cnt < 10000000)) {
771				lock_delay(&lda);
772			} else {
773				_mtx_lock_indefinite_check(m, &lda);
774			}
775			v = MTX_READ_VALUE(m);
776		} while (v != MTX_UNOWNED);
777		spinlock_enter();
778	}
779
780	if (LOCK_LOG_TEST(&m->lock_object, opts))
781		CTR1(KTR_LOCK, "_mtx_lock_spin: %p spin done", m);
782	KTR_STATE0(KTR_SCHED, "thread", sched_tdname((struct thread *)tid),
783	    "running");
784
785#if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
786	if (__predict_true(!doing_lockprof))
787		return;
788#endif
789#ifdef KDTRACE_HOOKS
790	spin_time += lockstat_nsecs(&m->lock_object);
791	if (lda.spin_cnt != 0)
792		LOCKSTAT_RECORD1(spin__spin, m, spin_time);
793out_lockstat:
794#endif
795	LOCKSTAT_PROFILE_OBTAIN_SPIN_LOCK_SUCCESS(spin__acquire, m,
796	    contested, waittime, file, line);
797}
798#endif /* SMP */
799
800#ifdef INVARIANTS
801static void
802thread_lock_validate(struct mtx *m, int opts, const char *file, int line)
803{
804
805	KASSERT(m->mtx_lock != MTX_DESTROYED,
806	    ("thread_lock() of destroyed mutex @ %s:%d", file, line));
807	KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin,
808	    ("thread_lock() of sleep mutex %s @ %s:%d",
809	    m->lock_object.lo_name, file, line));
810	KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) == 0,
811	    ("thread_lock: got a recursive mutex %s @ %s:%d\n",
812	    m->lock_object.lo_name, file, line));
813	WITNESS_CHECKORDER(&m->lock_object,
814	    opts | LOP_NEWORDER | LOP_EXCLUSIVE, file, line, NULL);
815}
816#else
817#define thread_lock_validate(m, opts, file, line) do { } while (0)
818#endif
819
820#ifndef LOCK_PROFILING
821#if LOCK_DEBUG > 0
822void
823_thread_lock(struct thread *td, int opts, const char *file, int line)
824#else
825void
826_thread_lock(struct thread *td)
827#endif
828{
829	struct mtx *m;
830	uintptr_t tid;
831
832	tid = (uintptr_t)curthread;
833
834	if (__predict_false(LOCKSTAT_PROFILE_ENABLED(spin__acquire)))
835		goto slowpath_noirq;
836	spinlock_enter();
837	m = td->td_lock;
838	thread_lock_validate(m, 0, file, line);
839	if (__predict_false(m == &blocked_lock))
840		goto slowpath_unlocked;
841	if (__predict_false(!_mtx_obtain_lock(m, tid)))
842		goto slowpath_unlocked;
843	if (__predict_true(m == td->td_lock)) {
844		WITNESS_LOCK(&m->lock_object, LOP_EXCLUSIVE, file, line);
845		return;
846	}
847	_mtx_release_lock_quick(m);
848slowpath_unlocked:
849	spinlock_exit();
850slowpath_noirq:
851#if LOCK_DEBUG > 0
852	thread_lock_flags_(td, opts, file, line);
853#else
854	thread_lock_flags_(td, 0, 0, 0);
855#endif
856}
857#endif
858
859void
860thread_lock_flags_(struct thread *td, int opts, const char *file, int line)
861{
862	struct mtx *m;
863	uintptr_t tid, v;
864	struct lock_delay_arg lda;
865#ifdef LOCK_PROFILING
866	int contested = 0;
867	uint64_t waittime = 0;
868#endif
869#ifdef KDTRACE_HOOKS
870	int64_t spin_time = 0;
871#endif
872#if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
873	int doing_lockprof = 1;
874#endif
875
876	tid = (uintptr_t)curthread;
877
878	if (SCHEDULER_STOPPED()) {
879		/*
880		 * Ensure that spinlock sections are balanced even when the
881		 * scheduler is stopped, since we may otherwise inadvertently
882		 * re-enable interrupts while dumping core.
883		 */
884		spinlock_enter();
885		return;
886	}
887
888	lock_delay_arg_init(&lda, &mtx_spin_delay);
889
890#ifdef HWPMC_HOOKS
891	PMC_SOFT_CALL( , , lock, failed);
892#endif
893
894#ifdef LOCK_PROFILING
895	doing_lockprof = 1;
896#elif defined(KDTRACE_HOOKS)
897	doing_lockprof = lockstat_enabled;
898	if (__predict_false(doing_lockprof))
899		spin_time -= lockstat_nsecs(&td->td_lock->lock_object);
900#endif
901	spinlock_enter();
902
903	for (;;) {
904retry:
905		m = td->td_lock;
906		thread_lock_validate(m, opts, file, line);
907		v = MTX_READ_VALUE(m);
908		for (;;) {
909			if (v == MTX_UNOWNED) {
910				if (_mtx_obtain_lock_fetch(m, &v, tid))
911					break;
912				continue;
913			}
914			MPASS(v != tid);
915			lock_profile_obtain_lock_failed(&m->lock_object, true,
916			    &contested, &waittime);
917			/* Give interrupts a chance while we spin. */
918			spinlock_exit();
919			do {
920				if (__predict_true(lda.spin_cnt < 10000000)) {
921					lock_delay(&lda);
922				} else {
923					_mtx_lock_indefinite_check(m, &lda);
924				}
925				if (m != td->td_lock) {
926					spinlock_enter();
927					goto retry;
928				}
929				v = MTX_READ_VALUE(m);
930			} while (v != MTX_UNOWNED);
931			spinlock_enter();
932		}
933		if (m == td->td_lock)
934			break;
935		_mtx_release_lock_quick(m);
936	}
937	LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file,
938	    line);
939	WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
940
941#if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
942	if (__predict_true(!doing_lockprof))
943		return;
944#endif
945#ifdef KDTRACE_HOOKS
946	spin_time += lockstat_nsecs(&m->lock_object);
947#endif
948	LOCKSTAT_PROFILE_OBTAIN_SPIN_LOCK_SUCCESS(spin__acquire, m, contested,
949	    waittime, file, line);
950#ifdef KDTRACE_HOOKS
951	if (lda.spin_cnt != 0)
952		LOCKSTAT_RECORD1(thread__spin, m, spin_time);
953#endif
954}
955
956struct mtx *
957thread_lock_block(struct thread *td)
958{
959	struct mtx *lock;
960
961	lock = td->td_lock;
962	mtx_assert(lock, MA_OWNED);
963	td->td_lock = &blocked_lock;
964
965	return (lock);
966}
967
968void
969thread_lock_unblock(struct thread *td, struct mtx *new)
970{
971
972	mtx_assert(new, MA_OWNED);
973	KASSERT(td->td_lock == &blocked_lock,
974	    ("thread %p lock %p not blocked_lock %p",
975	    td, td->td_lock, &blocked_lock));
976	atomic_store_rel_ptr((volatile void *)&td->td_lock, (uintptr_t)new);
977}
978
979void
980thread_lock_block_wait(struct thread *td)
981{
982
983	while (td->td_lock == &blocked_lock)
984		cpu_spinwait();
985
986	/* Acquire fence to be certain that all thread state is visible. */
987	atomic_thread_fence_acq();
988}
989
990void
991thread_lock_set(struct thread *td, struct mtx *new)
992{
993	struct mtx *lock;
994
995	mtx_assert(new, MA_OWNED);
996	lock = td->td_lock;
997	mtx_assert(lock, MA_OWNED);
998	td->td_lock = new;
999	mtx_unlock_spin(lock);
1000}
1001
1002/*
1003 * __mtx_unlock_sleep: the tougher part of releasing an MTX_DEF lock.
1004 *
1005 * We are only called here if the lock is recursed, contested (i.e. we
1006 * need to wake up a blocked thread) or lockstat probe is active.
1007 */
1008#if LOCK_DEBUG > 0
1009void
1010__mtx_unlock_sleep(volatile uintptr_t *c, uintptr_t v, int opts,
1011    const char *file, int line)
1012#else
1013void
1014__mtx_unlock_sleep(volatile uintptr_t *c, uintptr_t v)
1015#endif
1016{
1017	struct mtx *m;
1018	struct turnstile *ts;
1019	uintptr_t tid;
1020
1021	if (SCHEDULER_STOPPED())
1022		return;
1023
1024	tid = (uintptr_t)curthread;
1025	m = mtxlock2mtx(c);
1026
1027	if (__predict_false(v == tid))
1028		v = MTX_READ_VALUE(m);
1029
1030	if (__predict_false(v & MTX_RECURSED)) {
1031		if (--(m->mtx_recurse) == 0)
1032			atomic_clear_ptr(&m->mtx_lock, MTX_RECURSED);
1033		if (LOCK_LOG_TEST(&m->lock_object, opts))
1034			CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p unrecurse", m);
1035		return;
1036	}
1037
1038	LOCKSTAT_PROFILE_RELEASE_LOCK(adaptive__release, m);
1039	if (v == tid && _mtx_release_lock(m, tid))
1040		return;
1041
1042	/*
1043	 * We have to lock the chain before the turnstile so this turnstile
1044	 * can be removed from the hash list if it is empty.
1045	 */
1046	turnstile_chain_lock(&m->lock_object);
1047	_mtx_release_lock_quick(m);
1048	ts = turnstile_lookup(&m->lock_object);
1049	MPASS(ts != NULL);
1050	if (LOCK_LOG_TEST(&m->lock_object, opts))
1051		CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p contested", m);
1052	turnstile_broadcast(ts, TS_EXCLUSIVE_QUEUE);
1053
1054	/*
1055	 * This turnstile is now no longer associated with the mutex.  We can
1056	 * unlock the chain lock so a new turnstile may take it's place.
1057	 */
1058	turnstile_unpend(ts);
1059	turnstile_chain_unlock(&m->lock_object);
1060}
1061
1062/*
1063 * All the unlocking of MTX_SPIN locks is done inline.
1064 * See the __mtx_unlock_spin() macro for the details.
1065 */
1066
1067/*
1068 * The backing function for the INVARIANTS-enabled mtx_assert()
1069 */
1070#ifdef INVARIANT_SUPPORT
1071void
1072__mtx_assert(const volatile uintptr_t *c, int what, const char *file, int line)
1073{
1074	const struct mtx *m;
1075
1076	if (KERNEL_PANICKED() || dumping || SCHEDULER_STOPPED())
1077		return;
1078
1079	m = mtxlock2mtx(c);
1080
1081	switch (what) {
1082	case MA_OWNED:
1083	case MA_OWNED | MA_RECURSED:
1084	case MA_OWNED | MA_NOTRECURSED:
1085		if (!mtx_owned(m))
1086			panic("mutex %s not owned at %s:%d",
1087			    m->lock_object.lo_name, file, line);
1088		if (mtx_recursed(m)) {
1089			if ((what & MA_NOTRECURSED) != 0)
1090				panic("mutex %s recursed at %s:%d",
1091				    m->lock_object.lo_name, file, line);
1092		} else if ((what & MA_RECURSED) != 0) {
1093			panic("mutex %s unrecursed at %s:%d",
1094			    m->lock_object.lo_name, file, line);
1095		}
1096		break;
1097	case MA_NOTOWNED:
1098		if (mtx_owned(m))
1099			panic("mutex %s owned at %s:%d",
1100			    m->lock_object.lo_name, file, line);
1101		break;
1102	default:
1103		panic("unknown mtx_assert at %s:%d", file, line);
1104	}
1105}
1106#endif
1107
1108/*
1109 * General init routine used by the MTX_SYSINIT() macro.
1110 */
1111void
1112mtx_sysinit(void *arg)
1113{
1114	struct mtx_args *margs = arg;
1115
1116	mtx_init((struct mtx *)margs->ma_mtx, margs->ma_desc, NULL,
1117	    margs->ma_opts);
1118}
1119
1120/*
1121 * Mutex initialization routine; initialize lock `m' of type contained in
1122 * `opts' with options contained in `opts' and name `name.'  The optional
1123 * lock type `type' is used as a general lock category name for use with
1124 * witness.
1125 */
1126void
1127_mtx_init(volatile uintptr_t *c, const char *name, const char *type, int opts)
1128{
1129	struct mtx *m;
1130	struct lock_class *class;
1131	int flags;
1132
1133	m = mtxlock2mtx(c);
1134
1135	MPASS((opts & ~(MTX_SPIN | MTX_QUIET | MTX_RECURSE |
1136	    MTX_NOWITNESS | MTX_DUPOK | MTX_NOPROFILE | MTX_NEW)) == 0);
1137	ASSERT_ATOMIC_LOAD_PTR(m->mtx_lock,
1138	    ("%s: mtx_lock not aligned for %s: %p", __func__, name,
1139	    &m->mtx_lock));
1140
1141	/* Determine lock class and lock flags. */
1142	if (opts & MTX_SPIN)
1143		class = &lock_class_mtx_spin;
1144	else
1145		class = &lock_class_mtx_sleep;
1146	flags = 0;
1147	if (opts & MTX_QUIET)
1148		flags |= LO_QUIET;
1149	if (opts & MTX_RECURSE)
1150		flags |= LO_RECURSABLE;
1151	if ((opts & MTX_NOWITNESS) == 0)
1152		flags |= LO_WITNESS;
1153	if (opts & MTX_DUPOK)
1154		flags |= LO_DUPOK;
1155	if (opts & MTX_NOPROFILE)
1156		flags |= LO_NOPROFILE;
1157	if (opts & MTX_NEW)
1158		flags |= LO_NEW;
1159
1160	/* Initialize mutex. */
1161	lock_init(&m->lock_object, class, name, type, flags);
1162
1163	m->mtx_lock = MTX_UNOWNED;
1164	m->mtx_recurse = 0;
1165}
1166
1167/*
1168 * Remove lock `m' from all_mtx queue.  We don't allow MTX_QUIET to be
1169 * passed in as a flag here because if the corresponding mtx_init() was
1170 * called with MTX_QUIET set, then it will already be set in the mutex's
1171 * flags.
1172 */
1173void
1174_mtx_destroy(volatile uintptr_t *c)
1175{
1176	struct mtx *m;
1177
1178	m = mtxlock2mtx(c);
1179
1180	if (!mtx_owned(m))
1181		MPASS(mtx_unowned(m));
1182	else {
1183		MPASS((m->mtx_lock & (MTX_RECURSED|MTX_CONTESTED)) == 0);
1184
1185		/* Perform the non-mtx related part of mtx_unlock_spin(). */
1186		if (LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin) {
1187			lock_profile_release_lock(&m->lock_object, true);
1188			spinlock_exit();
1189		} else {
1190			TD_LOCKS_DEC(curthread);
1191			lock_profile_release_lock(&m->lock_object, false);
1192		}
1193
1194		/* Tell witness this isn't locked to make it happy. */
1195		WITNESS_UNLOCK(&m->lock_object, LOP_EXCLUSIVE, __FILE__,
1196		    __LINE__);
1197	}
1198
1199	m->mtx_lock = MTX_DESTROYED;
1200	lock_destroy(&m->lock_object);
1201}
1202
1203/*
1204 * Intialize the mutex code and system mutexes.  This is called from the MD
1205 * startup code prior to mi_startup().  The per-CPU data space needs to be
1206 * setup before this is called.
1207 */
1208void
1209mutex_init(void)
1210{
1211
1212	/* Setup turnstiles so that sleep mutexes work. */
1213	init_turnstiles();
1214
1215	/*
1216	 * Initialize mutexes.
1217	 */
1218	mtx_init(&Giant, "Giant", NULL, MTX_DEF | MTX_RECURSE);
1219	mtx_init(&blocked_lock, "blocked lock", NULL, MTX_SPIN);
1220	blocked_lock.mtx_lock = 0xdeadc0de;	/* Always blocked. */
1221	mtx_init(&proc0.p_mtx, "process lock", NULL, MTX_DEF | MTX_DUPOK);
1222	mtx_init(&proc0.p_slock, "process slock", NULL, MTX_SPIN);
1223	mtx_init(&proc0.p_statmtx, "pstatl", NULL, MTX_SPIN);
1224	mtx_init(&proc0.p_itimmtx, "pitiml", NULL, MTX_SPIN);
1225	mtx_init(&proc0.p_profmtx, "pprofl", NULL, MTX_SPIN);
1226	mtx_init(&devmtx, "cdev", NULL, MTX_DEF);
1227	mtx_lock(&Giant);
1228}
1229
1230static void __noinline
1231_mtx_lock_indefinite_check(struct mtx *m, struct lock_delay_arg *ldap)
1232{
1233	struct thread *td;
1234
1235	ldap->spin_cnt++;
1236	if (ldap->spin_cnt < 60000000 || kdb_active || KERNEL_PANICKED())
1237		cpu_lock_delay();
1238	else {
1239		td = mtx_owner(m);
1240
1241		/* If the mutex is unlocked, try again. */
1242		if (td == NULL)
1243			return;
1244
1245		printf( "spin lock %p (%s) held by %p (tid %d) too long\n",
1246		    m, m->lock_object.lo_name, td, td->td_tid);
1247#ifdef WITNESS
1248		witness_display_spinlock(&m->lock_object, td, printf);
1249#endif
1250		panic("spin lock held too long");
1251	}
1252	cpu_spinwait();
1253}
1254
1255void
1256mtx_spin_wait_unlocked(struct mtx *m)
1257{
1258	struct lock_delay_arg lda;
1259
1260	KASSERT(m->mtx_lock != MTX_DESTROYED,
1261	    ("%s() of destroyed mutex %p", __func__, m));
1262	KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin,
1263	    ("%s() of sleep mutex %p (%s)", __func__, m,
1264	    m->lock_object.lo_name));
1265	KASSERT(!mtx_owned(m), ("%s() waiting on myself on lock %p (%s)", __func__, m,
1266	    m->lock_object.lo_name));
1267
1268	lda.spin_cnt = 0;
1269
1270	while (atomic_load_acq_ptr(&m->mtx_lock) != MTX_UNOWNED) {
1271		if (__predict_true(lda.spin_cnt < 10000000)) {
1272			cpu_spinwait();
1273			lda.spin_cnt++;
1274		} else {
1275			_mtx_lock_indefinite_check(m, &lda);
1276		}
1277	}
1278}
1279
1280void
1281mtx_wait_unlocked(struct mtx *m)
1282{
1283	struct thread *owner;
1284	uintptr_t v;
1285
1286	KASSERT(m->mtx_lock != MTX_DESTROYED,
1287	    ("%s() of destroyed mutex %p", __func__, m));
1288	KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep,
1289	    ("%s() not a sleep mutex %p (%s)", __func__, m,
1290	    m->lock_object.lo_name));
1291	KASSERT(!mtx_owned(m), ("%s() waiting on myself on lock %p (%s)", __func__, m,
1292	    m->lock_object.lo_name));
1293
1294	for (;;) {
1295		v = atomic_load_acq_ptr(&m->mtx_lock);
1296		if (v == MTX_UNOWNED) {
1297			break;
1298		}
1299		owner = lv_mtx_owner(v);
1300		if (!TD_IS_RUNNING(owner)) {
1301			mtx_lock(m);
1302			mtx_unlock(m);
1303			break;
1304		}
1305		cpu_spinwait();
1306	}
1307}
1308
1309#ifdef DDB
1310void
1311db_show_mtx(const struct lock_object *lock)
1312{
1313	struct thread *td;
1314	const struct mtx *m;
1315
1316	m = (const struct mtx *)lock;
1317
1318	db_printf(" flags: {");
1319	if (LOCK_CLASS(lock) == &lock_class_mtx_spin)
1320		db_printf("SPIN");
1321	else
1322		db_printf("DEF");
1323	if (m->lock_object.lo_flags & LO_RECURSABLE)
1324		db_printf(", RECURSE");
1325	if (m->lock_object.lo_flags & LO_DUPOK)
1326		db_printf(", DUPOK");
1327	db_printf("}\n");
1328	db_printf(" state: {");
1329	if (mtx_unowned(m))
1330		db_printf("UNOWNED");
1331	else if (mtx_destroyed(m))
1332		db_printf("DESTROYED");
1333	else {
1334		db_printf("OWNED");
1335		if (m->mtx_lock & MTX_CONTESTED)
1336			db_printf(", CONTESTED");
1337		if (m->mtx_lock & MTX_RECURSED)
1338			db_printf(", RECURSED");
1339	}
1340	db_printf("}\n");
1341	if (!mtx_unowned(m) && !mtx_destroyed(m)) {
1342		td = mtx_owner(m);
1343		db_printf(" owner: %p (tid %d, pid %d, \"%s\")\n", td,
1344		    td->td_tid, td->td_proc->p_pid, td->td_name);
1345		if (mtx_recursed(m))
1346			db_printf(" recursed: %d\n", m->mtx_recurse);
1347	}
1348}
1349#endif
1350