1251881Speter/*-
2251881Speter * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved.
3251881Speter *
4251881Speter * Redistribution and use in source and binary forms, with or without
5251881Speter * modification, are permitted provided that the following conditions
6251881Speter * are met:
7251881Speter * 1. Redistributions of source code must retain the above copyright
8251881Speter *    notice, this list of conditions and the following disclaimer.
9251881Speter * 2. Redistributions in binary form must reproduce the above copyright
10251881Speter *    notice, this list of conditions and the following disclaimer in the
11251881Speter *    documentation and/or other materials provided with the distribution.
12251881Speter * 3. Berkeley Software Design Inc's name may not be used to endorse or
13251881Speter *    promote products derived from this software without specific prior
14251881Speter *    written permission.
15251881Speter *
16251881Speter * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
17251881Speter * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18251881Speter * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19251881Speter * ARE DISCLAIMED.  IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
20251881Speter * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21251881Speter * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22251881Speter * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23251881Speter * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24251881Speter * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25251881Speter * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26251881Speter * SUCH DAMAGE.
27251881Speter *
28251881Speter *	from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $
29251881Speter *	and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $
30251881Speter */
31251881Speter
32251881Speter/*
33251881Speter * Machine independent bits of mutex implementation.
34251881Speter */
35251881Speter
36251881Speter#include <sys/cdefs.h>
37251881Speter__FBSDID("$FreeBSD: stable/10/sys/kern/kern_mutex.c 323870 2017-09-21 19:24:11Z marius $");
38251881Speter
39251881Speter#include "opt_adaptive_mutexes.h"
40251881Speter#include "opt_ddb.h"
41251881Speter#include "opt_global.h"
42251881Speter#include "opt_hwpmc_hooks.h"
43251881Speter#include "opt_kdtrace.h"
44251881Speter#include "opt_sched.h"
45251881Speter
46251881Speter#include <sys/param.h>
47251881Speter#include <sys/systm.h>
48251881Speter#include <sys/bus.h>
49251881Speter#include <sys/conf.h>
50251881Speter#include <sys/kdb.h>
51251881Speter#include <sys/kernel.h>
52251881Speter#include <sys/ktr.h>
53251881Speter#include <sys/lock.h>
54251881Speter#include <sys/malloc.h>
55251881Speter#include <sys/mutex.h>
56251881Speter#include <sys/proc.h>
57251881Speter#include <sys/resourcevar.h>
58251881Speter#include <sys/sched.h>
59251881Speter#include <sys/sbuf.h>
60251881Speter#include <sys/smp.h>
61251881Speter#include <sys/sysctl.h>
62251881Speter#include <sys/turnstile.h>
63251881Speter#include <sys/vmmeter.h>
64251881Speter#include <sys/lock_profile.h>
65251881Speter
66251881Speter#include <machine/atomic.h>
67251881Speter#include <machine/bus.h>
68251881Speter#include <machine/cpu.h>
69251881Speter
70251881Speter#include <ddb/ddb.h>
71251881Speter
72251881Speter#include <fs/devfs/devfs_int.h>
73251881Speter
74251881Speter#include <vm/vm.h>
75251881Speter#include <vm/vm_extern.h>
76251881Speter
77251881Speter#if defined(SMP) && !defined(NO_ADAPTIVE_MUTEXES)
78251881Speter#define	ADAPTIVE_MUTEXES
79251881Speter#endif
80251881Speter
81251881Speter#ifdef HWPMC_HOOKS
82251881Speter#include <sys/pmckern.h>
83251881SpeterPMC_SOFT_DEFINE( , , lock, failed);
84251881Speter#endif
85251881Speter
86251881Speter/*
87251881Speter * Return the mutex address when the lock cookie address is provided.
88251881Speter * This functionality assumes that struct mtx* have a member named mtx_lock.
89251881Speter */
90251881Speter#define	mtxlock2mtx(c)	(__containerof(c, struct mtx, mtx_lock))
91251881Speter
92251881Speter/*
93251881Speter * Internal utility macros.
94251881Speter */
95251881Speter#define mtx_unowned(m)	((m)->mtx_lock == MTX_UNOWNED)
96251881Speter
97251881Speter#define	mtx_destroyed(m) ((m)->mtx_lock == MTX_DESTROYED)
98251881Speter
99251881Speter#define	mtx_owner(m)	((struct thread *)((m)->mtx_lock & ~MTX_FLAGMASK))
100251881Speter
101251881Speterstatic void	assert_mtx(const struct lock_object *lock, int what);
102251881Speter#ifdef DDB
103251881Speterstatic void	db_show_mtx(const struct lock_object *lock);
104251881Speter#endif
105251881Speterstatic void	lock_mtx(struct lock_object *lock, uintptr_t how);
106251881Speterstatic void	lock_spin(struct lock_object *lock, uintptr_t how);
107251881Speter#ifdef KDTRACE_HOOKS
108251881Speterstatic int	owner_mtx(const struct lock_object *lock,
109251881Speter		    struct thread **owner);
110251881Speter#endif
111251881Speterstatic uintptr_t unlock_mtx(struct lock_object *lock);
112251881Speterstatic uintptr_t unlock_spin(struct lock_object *lock);
113251881Speter
114251881Speter/*
115251881Speter * Lock classes for sleep and spin mutexes.
116251881Speter */
117251881Speterstruct lock_class lock_class_mtx_sleep = {
118251881Speter	.lc_name = "sleep mutex",
119251881Speter	.lc_flags = LC_SLEEPLOCK | LC_RECURSABLE,
120251881Speter	.lc_assert = assert_mtx,
121251881Speter#ifdef DDB
122251881Speter	.lc_ddb_show = db_show_mtx,
123251881Speter#endif
124251881Speter	.lc_lock = lock_mtx,
125251881Speter	.lc_unlock = unlock_mtx,
126251881Speter#ifdef KDTRACE_HOOKS
127251881Speter	.lc_owner = owner_mtx,
128251881Speter#endif
129251881Speter};
130251881Speterstruct lock_class lock_class_mtx_spin = {
131251881Speter	.lc_name = "spin mutex",
132251881Speter	.lc_flags = LC_SPINLOCK | LC_RECURSABLE,
133251881Speter	.lc_assert = assert_mtx,
134251881Speter#ifdef DDB
135251881Speter	.lc_ddb_show = db_show_mtx,
136251881Speter#endif
137251881Speter	.lc_lock = lock_spin,
138251881Speter	.lc_unlock = unlock_spin,
139251881Speter#ifdef KDTRACE_HOOKS
140251881Speter	.lc_owner = owner_mtx,
141251881Speter#endif
142251881Speter};
143251881Speter
144251881Speter#ifdef ADAPTIVE_MUTEXES
145251881Speterstatic SYSCTL_NODE(_debug, OID_AUTO, mtx, CTLFLAG_RD, NULL, "mtx debugging");
146251881Speter
147251881Speterstatic struct lock_delay_config mtx_delay = {
148251881Speter	.initial	= 1000,
149251881Speter	.step		= 500,
150251881Speter	.min		= 100,
151251881Speter	.max		= 5000,
152251881Speter};
153251881Speter
154251881SpeterSYSCTL_INT(_debug_mtx, OID_AUTO, delay_initial, CTLFLAG_RW, &mtx_delay.initial,
155251881Speter    0, "");
156251881SpeterSYSCTL_INT(_debug_mtx, OID_AUTO, delay_step, CTLFLAG_RW, &mtx_delay.step,
157251881Speter    0, "");
158251881SpeterSYSCTL_INT(_debug_mtx, OID_AUTO, delay_min, CTLFLAG_RW, &mtx_delay.min,
159251881Speter    0, "");
160251881SpeterSYSCTL_INT(_debug_mtx, OID_AUTO, delay_max, CTLFLAG_RW, &mtx_delay.max,
161251881Speter    0, "");
162251881Speter
163251881Speterstatic void
164251881Spetermtx_delay_sysinit(void *dummy)
165251881Speter{
166251881Speter
167251881Speter	mtx_delay.initial = mp_ncpus * 25;
168251881Speter	mtx_delay.step = (mp_ncpus * 25) / 2;
169251881Speter	mtx_delay.min = mp_ncpus * 5;
170251881Speter	mtx_delay.max = mp_ncpus * 25 * 10;
171251881Speter}
172251881SpeterLOCK_DELAY_SYSINIT(mtx_delay_sysinit);
173251881Speter#endif
174251881Speter
175251881Speter/*
176251881Speter * System-wide mutexes
177251881Speter */
178251881Speterstruct mtx blocked_lock;
179251881Speterstruct mtx Giant;
180251881Speter
181251881Spetervoid
182251881Speterassert_mtx(const struct lock_object *lock, int what)
183251881Speter{
184251881Speter
185251881Speter	mtx_assert((const struct mtx *)lock, what);
186251881Speter}
187251881Speter
188251881Spetervoid
189251881Speterlock_mtx(struct lock_object *lock, uintptr_t how)
190251881Speter{
191251881Speter
192251881Speter	mtx_lock((struct mtx *)lock);
193251881Speter}
194251881Speter
195251881Spetervoid
196251881Speterlock_spin(struct lock_object *lock, uintptr_t how)
197251881Speter{
198251881Speter
199251881Speter	panic("spin locks can only use msleep_spin");
200251881Speter}
201251881Speter
202251881Speteruintptr_t
203251881Speterunlock_mtx(struct lock_object *lock)
204251881Speter{
205251881Speter	struct mtx *m;
206251881Speter
207251881Speter	m = (struct mtx *)lock;
208251881Speter	mtx_assert(m, MA_OWNED | MA_NOTRECURSED);
209251881Speter	mtx_unlock(m);
210251881Speter	return (0);
211251881Speter}
212251881Speter
213251881Speteruintptr_t
214251881Speterunlock_spin(struct lock_object *lock)
215251881Speter{
216251881Speter
217251881Speter	panic("spin locks can only use msleep_spin");
218251881Speter}
219251881Speter
220251881Speter#ifdef KDTRACE_HOOKS
221251881Speterint
222251881Speterowner_mtx(const struct lock_object *lock, struct thread **owner)
223251881Speter{
224251881Speter	const struct mtx *m = (const struct mtx *)lock;
225251881Speter
226251881Speter	*owner = mtx_owner(m);
227251881Speter	return (mtx_unowned(m) == 0);
228251881Speter}
229251881Speter#endif
230251881Speter
231251881Speter/*
232251881Speter * Function versions of the inlined __mtx_* macros.  These are used by
233251881Speter * modules and can also be called from assembly language if needed.
234251881Speter */
235251881Spetervoid
236251881Speter__mtx_lock_flags(volatile uintptr_t *c, int opts, const char *file, int line)
237251881Speter{
238251881Speter	struct mtx *m;
239251881Speter
240251881Speter	if (SCHEDULER_STOPPED())
241251881Speter		return;
242251881Speter
243251881Speter	m = mtxlock2mtx(c);
244251881Speter
245251881Speter	KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
246251881Speter	    ("mtx_lock() by idle thread %p on sleep mutex %s @ %s:%d",
247251881Speter	    curthread, m->lock_object.lo_name, file, line));
248251881Speter	KASSERT(m->mtx_lock != MTX_DESTROYED,
249251881Speter	    ("mtx_lock() of destroyed mutex @ %s:%d", file, line));
250251881Speter	KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep,
251251881Speter	    ("mtx_lock() of spin mutex %s @ %s:%d", m->lock_object.lo_name,
252251881Speter	    file, line));
253251881Speter	WITNESS_CHECKORDER(&m->lock_object, (opts & ~MTX_RECURSE) |
254251881Speter	    LOP_NEWORDER | LOP_EXCLUSIVE, file, line, NULL);
255251881Speter
256251881Speter	__mtx_lock(m, curthread, opts, file, line);
257251881Speter	LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file,
258251881Speter	    line);
259251881Speter	WITNESS_LOCK(&m->lock_object, (opts & ~MTX_RECURSE) | LOP_EXCLUSIVE,
260251881Speter	    file, line);
261251881Speter	curthread->td_locks++;
262251881Speter}
263251881Speter
264251881Spetervoid
265251881Speter__mtx_unlock_flags(volatile uintptr_t *c, int opts, const char *file, int line)
266251881Speter{
267251881Speter	struct mtx *m;
268251881Speter
269251881Speter	if (SCHEDULER_STOPPED())
270251881Speter		return;
271251881Speter
272251881Speter	m = mtxlock2mtx(c);
273251881Speter
274251881Speter	KASSERT(m->mtx_lock != MTX_DESTROYED,
275251881Speter	    ("mtx_unlock() of destroyed mutex @ %s:%d", file, line));
276251881Speter	KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep,
277251881Speter	    ("mtx_unlock() of spin mutex %s @ %s:%d", m->lock_object.lo_name,
278251881Speter	    file, line));
279251881Speter	WITNESS_UNLOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
280251881Speter	LOCK_LOG_LOCK("UNLOCK", &m->lock_object, opts, m->mtx_recurse, file,
281251881Speter	    line);
282251881Speter	mtx_assert(m, MA_OWNED);
283251881Speter
284251881Speter	if (m->mtx_recurse == 0)
285251881Speter		LOCKSTAT_PROFILE_RELEASE_LOCK(LS_MTX_UNLOCK_RELEASE, m);
286251881Speter	__mtx_unlock(m, curthread, opts, file, line);
287251881Speter	curthread->td_locks--;
288251881Speter}
289251881Speter
290251881Spetervoid
291251881Speter__mtx_lock_spin_flags(volatile uintptr_t *c, int opts, const char *file,
292251881Speter    int line)
293251881Speter{
294251881Speter	struct mtx *m;
295251881Speter
296251881Speter	if (SCHEDULER_STOPPED())
297251881Speter		return;
298251881Speter
299251881Speter	m = mtxlock2mtx(c);
300251881Speter
301251881Speter	KASSERT(m->mtx_lock != MTX_DESTROYED,
302251881Speter	    ("mtx_lock_spin() of destroyed mutex @ %s:%d", file, line));
303251881Speter	KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin,
304251881Speter	    ("mtx_lock_spin() of sleep mutex %s @ %s:%d",
305251881Speter	    m->lock_object.lo_name, file, line));
306251881Speter	if (mtx_owned(m))
307251881Speter		KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0 ||
308251881Speter		    (opts & MTX_RECURSE) != 0,
309251881Speter	    ("mtx_lock_spin: recursed on non-recursive mutex %s @ %s:%d\n",
310251881Speter		    m->lock_object.lo_name, file, line));
311251881Speter	opts &= ~MTX_RECURSE;
312251881Speter	WITNESS_CHECKORDER(&m->lock_object, opts | LOP_NEWORDER | LOP_EXCLUSIVE,
313251881Speter	    file, line, NULL);
314251881Speter	__mtx_lock_spin(m, curthread, opts, file, line);
315251881Speter	LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file,
316251881Speter	    line);
317251881Speter	WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
318251881Speter}
319251881Speter
320251881Speterint
321251881Speter__mtx_trylock_spin_flags(volatile uintptr_t *c, int opts, const char *file,
322251881Speter    int line)
323251881Speter{
324251881Speter	struct mtx *m;
325251881Speter
326251881Speter	if (SCHEDULER_STOPPED())
327251881Speter		return (1);
328251881Speter
329251881Speter	m = mtxlock2mtx(c);
330251881Speter
331251881Speter	KASSERT(m->mtx_lock != MTX_DESTROYED,
332251881Speter	    ("mtx_trylock_spin() of destroyed mutex @ %s:%d", file, line));
333251881Speter	KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin,
334251881Speter	    ("mtx_trylock_spin() of sleep mutex %s @ %s:%d",
335251881Speter	    m->lock_object.lo_name, file, line));
336251881Speter	KASSERT((opts & MTX_RECURSE) == 0,
337251881Speter	    ("mtx_trylock_spin: unsupp. opt MTX_RECURSE on mutex %s @ %s:%d\n",
338251881Speter	    m->lock_object.lo_name, file, line));
339251881Speter	if (__mtx_trylock_spin(m, curthread, opts, file, line)) {
340251881Speter		LOCK_LOG_TRY("LOCK", &m->lock_object, opts, 1, file, line);
341251881Speter		WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
342251881Speter		return (1);
343251881Speter	}
344251881Speter	LOCK_LOG_TRY("LOCK", &m->lock_object, opts, 0, file, line);
345251881Speter	return (0);
346251881Speter}
347251881Speter
348251881Spetervoid
349251881Speter__mtx_unlock_spin_flags(volatile uintptr_t *c, int opts, const char *file,
350251881Speter    int line)
351251881Speter{
352251881Speter	struct mtx *m;
353251881Speter
354251881Speter	if (SCHEDULER_STOPPED())
355251881Speter		return;
356251881Speter
357251881Speter	m = mtxlock2mtx(c);
358251881Speter
359251881Speter	KASSERT(m->mtx_lock != MTX_DESTROYED,
360251881Speter	    ("mtx_unlock_spin() of destroyed mutex @ %s:%d", file, line));
361251881Speter	KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin,
362251881Speter	    ("mtx_unlock_spin() of sleep mutex %s @ %s:%d",
363251881Speter	    m->lock_object.lo_name, file, line));
364251881Speter	WITNESS_UNLOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
365251881Speter	LOCK_LOG_LOCK("UNLOCK", &m->lock_object, opts, m->mtx_recurse, file,
366251881Speter	    line);
367251881Speter	mtx_assert(m, MA_OWNED);
368251881Speter
369251881Speter	__mtx_unlock_spin(m);
370251881Speter}
371251881Speter
372251881Speter/*
373251881Speter * The important part of mtx_trylock{,_flags}()
374251881Speter * Tries to acquire lock `m.'  If this function is called on a mutex that
375251881Speter * is already owned, it will recursively acquire the lock.
376251881Speter */
377251881Speterint
378251881Speter_mtx_trylock_flags_(volatile uintptr_t *c, int opts, const char *file, int line)
379251881Speter{
380251881Speter	struct mtx *m;
381251881Speter#ifdef LOCK_PROFILING
382251881Speter	uint64_t waittime = 0;
383251881Speter	int contested = 0;
384251881Speter#endif
385251881Speter	int rval;
386251881Speter
387251881Speter	if (SCHEDULER_STOPPED())
388251881Speter		return (1);
389251881Speter
390251881Speter	m = mtxlock2mtx(c);
391251881Speter
392251881Speter	KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
393251881Speter	    ("mtx_trylock() by idle thread %p on sleep mutex %s @ %s:%d",
394251881Speter	    curthread, m->lock_object.lo_name, file, line));
395251881Speter	KASSERT(m->mtx_lock != MTX_DESTROYED,
396251881Speter	    ("mtx_trylock() of destroyed mutex @ %s:%d", file, line));
397251881Speter	KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep,
398251881Speter	    ("mtx_trylock() of spin mutex %s @ %s:%d", m->lock_object.lo_name,
399251881Speter	    file, line));
400251881Speter
401251881Speter	if (mtx_owned(m) && ((m->lock_object.lo_flags & LO_RECURSABLE) != 0 ||
402251881Speter	    (opts & MTX_RECURSE) != 0)) {
403251881Speter		m->mtx_recurse++;
404251881Speter		atomic_set_ptr(&m->mtx_lock, MTX_RECURSED);
405251881Speter		rval = 1;
406251881Speter	} else
407251881Speter		rval = _mtx_obtain_lock(m, (uintptr_t)curthread);
408251881Speter	opts &= ~MTX_RECURSE;
409251881Speter
410251881Speter	LOCK_LOG_TRY("LOCK", &m->lock_object, opts, rval, file, line);
411251881Speter	if (rval) {
412251881Speter		WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE | LOP_TRYLOCK,
413251881Speter		    file, line);
414251881Speter		curthread->td_locks++;
415251881Speter		if (m->mtx_recurse == 0)
416251881Speter			LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_MTX_LOCK_ACQUIRE,
417251881Speter			    m, contested, waittime, file, line);
418251881Speter
419251881Speter	}
420251881Speter
421251881Speter	return (rval);
422251881Speter}
423251881Speter
424251881Speter/*
425251881Speter * __mtx_lock_sleep: the tougher part of acquiring an MTX_DEF lock.
426251881Speter *
427251881Speter * We call this if the lock is either contested (i.e. we need to go to
428251881Speter * sleep waiting for it), or if we need to recurse on it.
429251881Speter */
430251881Spetervoid
431251881Speter__mtx_lock_sleep(volatile uintptr_t *c, uintptr_t tid, int opts,
432251881Speter    const char *file, int line)
433251881Speter{
434251881Speter	struct mtx *m;
435251881Speter	struct turnstile *ts;
436251881Speter	uintptr_t v;
437251881Speter#ifdef ADAPTIVE_MUTEXES
438251881Speter	volatile struct thread *owner;
439251881Speter#endif
440251881Speter#ifdef KTR
441251881Speter	int cont_logged = 0;
442251881Speter#endif
443251881Speter#ifdef LOCK_PROFILING
444251881Speter	int contested = 0;
445251881Speter	uint64_t waittime = 0;
446251881Speter#endif
447251881Speter#if defined(ADAPTIVE_MUTEXES) || defined(KDTRACE_HOOKS)
448251881Speter	struct lock_delay_arg lda;
449251881Speter#endif
450251881Speter#ifdef KDTRACE_HOOKS
451251881Speter	u_int sleep_cnt = 0;
452251881Speter	int64_t sleep_time = 0;
453251881Speter	int64_t all_time = 0;
454251881Speter#endif
455251881Speter
456251881Speter	if (SCHEDULER_STOPPED())
457251881Speter		return;
458251881Speter
459251881Speter#if defined(ADAPTIVE_MUTEXES)
460251881Speter	lock_delay_arg_init(&lda, &mtx_delay);
461251881Speter#elif defined(KDTRACE_HOOKS)
462251881Speter	lock_delay_arg_init(&lda, NULL);
463251881Speter#endif
464251881Speter	m = mtxlock2mtx(c);
465251881Speter
466251881Speter	if (mtx_owned(m)) {
467251881Speter		KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0 ||
468251881Speter		    (opts & MTX_RECURSE) != 0,
469251881Speter	    ("_mtx_lock_sleep: recursed on non-recursive mutex %s @ %s:%d\n",
470251881Speter		    m->lock_object.lo_name, file, line));
471251881Speter		opts &= ~MTX_RECURSE;
472251881Speter		m->mtx_recurse++;
473251881Speter		atomic_set_ptr(&m->mtx_lock, MTX_RECURSED);
474251881Speter		if (LOCK_LOG_TEST(&m->lock_object, opts))
475251881Speter			CTR1(KTR_LOCK, "_mtx_lock_sleep: %p recursing", m);
476251881Speter		return;
477251881Speter	}
478251881Speter	opts &= ~MTX_RECURSE;
479251881Speter
480251881Speter#ifdef HWPMC_HOOKS
481251881Speter	PMC_SOFT_CALL( , , lock, failed);
482251881Speter#endif
483251881Speter	lock_profile_obtain_lock_failed(&m->lock_object,
484251881Speter		    &contested, &waittime);
485251881Speter	if (LOCK_LOG_TEST(&m->lock_object, opts))
486251881Speter		CTR4(KTR_LOCK,
487251881Speter		    "_mtx_lock_sleep: %s contested (lock=%p) at %s:%d",
488251881Speter		    m->lock_object.lo_name, (void *)m->mtx_lock, file, line);
489251881Speter#ifdef KDTRACE_HOOKS
490251881Speter	all_time -= lockstat_nsecs(&m->lock_object);
491251881Speter#endif
492251881Speter
493251881Speter	for (;;) {
494251881Speter		if (m->mtx_lock == MTX_UNOWNED && _mtx_obtain_lock(m, tid))
495251881Speter			break;
496251881Speter#ifdef KDTRACE_HOOKS
497251881Speter		lda.spin_cnt++;
498251881Speter#endif
499251881Speter#ifdef ADAPTIVE_MUTEXES
500251881Speter		/*
501251881Speter		 * If the owner is running on another CPU, spin until the
502251881Speter		 * owner stops running or the state of the lock changes.
503251881Speter		 */
504251881Speter		v = m->mtx_lock;
505251881Speter		if (v != MTX_UNOWNED) {
506251881Speter			owner = (struct thread *)(v & ~MTX_FLAGMASK);
507251881Speter			if (TD_IS_RUNNING(owner)) {
508251881Speter				if (LOCK_LOG_TEST(&m->lock_object, 0))
509251881Speter					CTR3(KTR_LOCK,
510251881Speter					    "%s: spinning on %p held by %p",
511251881Speter					    __func__, m, owner);
512251881Speter				KTR_STATE1(KTR_SCHED, "thread",
513251881Speter				    sched_tdname((struct thread *)tid),
514251881Speter				    "spinning", "lockname:\"%s\"",
515251881Speter				    m->lock_object.lo_name);
516251881Speter				while (mtx_owner(m) == owner &&
517251881Speter				    TD_IS_RUNNING(owner))
518251881Speter					lock_delay(&lda);
519251881Speter				KTR_STATE0(KTR_SCHED, "thread",
520251881Speter				    sched_tdname((struct thread *)tid),
521251881Speter				    "running");
522251881Speter				continue;
523251881Speter			}
524251881Speter		}
525251881Speter#endif
526251881Speter
527251881Speter		ts = turnstile_trywait(&m->lock_object);
528251881Speter		v = m->mtx_lock;
529251881Speter
530251881Speter		/*
531251881Speter		 * Check if the lock has been released while spinning for
532251881Speter		 * the turnstile chain lock.
533251881Speter		 */
534251881Speter		if (v == MTX_UNOWNED) {
535251881Speter			turnstile_cancel(ts);
536251881Speter			continue;
537251881Speter		}
538251881Speter
539251881Speter#ifdef ADAPTIVE_MUTEXES
540251881Speter		/*
541251881Speter		 * The current lock owner might have started executing
542251881Speter		 * on another CPU (or the lock could have changed
543251881Speter		 * owners) while we were waiting on the turnstile
544251881Speter		 * chain lock.  If so, drop the turnstile lock and try
545251881Speter		 * again.
546251881Speter		 */
547251881Speter		owner = (struct thread *)(v & ~MTX_FLAGMASK);
548251881Speter		if (TD_IS_RUNNING(owner)) {
549251881Speter			turnstile_cancel(ts);
550251881Speter			continue;
551251881Speter		}
552251881Speter#endif
553251881Speter
554251881Speter		/*
555251881Speter		 * If the mutex isn't already contested and a failure occurs
556251881Speter		 * setting the contested bit, the mutex was either released
557251881Speter		 * or the state of the MTX_RECURSED bit changed.
558251881Speter		 */
559251881Speter		if ((v & MTX_CONTESTED) == 0 &&
560251881Speter		    !atomic_cmpset_ptr(&m->mtx_lock, v, v | MTX_CONTESTED)) {
561251881Speter			turnstile_cancel(ts);
562251881Speter			continue;
563251881Speter		}
564251881Speter
565251881Speter		/*
566251881Speter		 * We definitely must sleep for this lock.
567251881Speter		 */
568251881Speter		mtx_assert(m, MA_NOTOWNED);
569251881Speter
570251881Speter#ifdef KTR
571251881Speter		if (!cont_logged) {
572251881Speter			CTR6(KTR_CONTENTION,
573251881Speter			    "contention: %p at %s:%d wants %s, taken by %s:%d",
574251881Speter			    (void *)tid, file, line, m->lock_object.lo_name,
575251881Speter			    WITNESS_FILE(&m->lock_object),
576251881Speter			    WITNESS_LINE(&m->lock_object));
577251881Speter			cont_logged = 1;
578251881Speter		}
579251881Speter#endif
580251881Speter
581251881Speter		/*
582251881Speter		 * Block on the turnstile.
583251881Speter		 */
584251881Speter#ifdef KDTRACE_HOOKS
585251881Speter		sleep_time -= lockstat_nsecs(&m->lock_object);
586251881Speter#endif
587251881Speter		turnstile_wait(ts, mtx_owner(m), TS_EXCLUSIVE_QUEUE);
588251881Speter#ifdef KDTRACE_HOOKS
589251881Speter		sleep_time += lockstat_nsecs(&m->lock_object);
590251881Speter		sleep_cnt++;
591251881Speter#endif
592251881Speter	}
593251881Speter#ifdef KDTRACE_HOOKS
594251881Speter	all_time += lockstat_nsecs(&m->lock_object);
595251881Speter#endif
596251881Speter#ifdef KTR
597251881Speter	if (cont_logged) {
598251881Speter		CTR4(KTR_CONTENTION,
599251881Speter		    "contention end: %s acquired by %p at %s:%d",
600251881Speter		    m->lock_object.lo_name, (void *)tid, file, line);
601251881Speter	}
602251881Speter#endif
603251881Speter	LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_MTX_LOCK_ACQUIRE, m, contested,
604251881Speter	    waittime, file, line);
605251881Speter#ifdef KDTRACE_HOOKS
606251881Speter	if (sleep_time)
607251881Speter		LOCKSTAT_RECORD1(LS_MTX_LOCK_BLOCK, m, sleep_time);
608251881Speter
609251881Speter	/*
610251881Speter	 * Only record the loops spinning and not sleeping.
611251881Speter	 */
612251881Speter	if (lda.spin_cnt > sleep_cnt)
613251881Speter		LOCKSTAT_RECORD1(LS_MTX_LOCK_SPIN, m, (all_time - sleep_time));
614251881Speter#endif
615251881Speter}
616251881Speter
617251881Speterstatic void
618251881Speter_mtx_lock_spin_failed(struct mtx *m)
619251881Speter{
620251881Speter	struct thread *td;
621251881Speter
622251881Speter	td = mtx_owner(m);
623251881Speter
624251881Speter	/* If the mutex is unlocked, try again. */
625251881Speter	if (td == NULL)
626251881Speter		return;
627251881Speter
628251881Speter	printf( "spin lock %p (%s) held by %p (tid %d) too long\n",
629251881Speter	    m, m->lock_object.lo_name, td, td->td_tid);
630299742Sdim#ifdef WITNESS
631299742Sdim	witness_display_spinlock(&m->lock_object, td, printf);
632299742Sdim#endif
633299742Sdim	panic("spin lock held too long");
634299742Sdim}
635299742Sdim
636299742Sdim#ifdef SMP
637299742Sdim/*
638299742Sdim * _mtx_lock_spin_cookie: the tougher part of acquiring an MTX_SPIN lock.
639299742Sdim *
640299742Sdim * This is only called if we need to actually spin for the lock. Recursion
641299742Sdim * is handled inline.
642299742Sdim */
643299742Sdimvoid
644299742Sdim_mtx_lock_spin_cookie(volatile uintptr_t *c, uintptr_t tid, int opts,
645299742Sdim    const char *file, int line)
646299742Sdim{
647299742Sdim	struct mtx *m;
648299742Sdim	int i = 0;
649299742Sdim#ifdef LOCK_PROFILING
650299742Sdim	int contested = 0;
651299742Sdim	uint64_t waittime = 0;
652251881Speter#endif
653251881Speter#ifdef KDTRACE_HOOKS
654251881Speter	int64_t spin_time = 0;
655251881Speter#endif
656251881Speter
657251881Speter	if (SCHEDULER_STOPPED())
658251881Speter		return;
659251881Speter
660251881Speter	m = mtxlock2mtx(c);
661251881Speter
662251881Speter	if (LOCK_LOG_TEST(&m->lock_object, opts))
663251881Speter		CTR1(KTR_LOCK, "_mtx_lock_spin: %p spinning", m);
664251881Speter	KTR_STATE1(KTR_SCHED, "thread", sched_tdname((struct thread *)tid),
665251881Speter	    "spinning", "lockname:\"%s\"", m->lock_object.lo_name);
666251881Speter
667251881Speter#ifdef HWPMC_HOOKS
668251881Speter	PMC_SOFT_CALL( , , lock, failed);
669251881Speter#endif
670251881Speter	lock_profile_obtain_lock_failed(&m->lock_object, &contested, &waittime);
671251881Speter#ifdef KDTRACE_HOOKS
672251881Speter	spin_time -= lockstat_nsecs(&m->lock_object);
673251881Speter#endif
674251881Speter	for (;;) {
675251881Speter		if (m->mtx_lock == MTX_UNOWNED && _mtx_obtain_lock(m, tid))
676251881Speter			break;
677251881Speter		/* Give interrupts a chance while we spin. */
678251881Speter		spinlock_exit();
679251881Speter		while (m->mtx_lock != MTX_UNOWNED) {
680251881Speter			if (i++ < 10000000) {
681251881Speter				cpu_spinwait();
682251881Speter				continue;
683251881Speter			}
684251881Speter			if (i < 60000000 || kdb_active || panicstr != NULL)
685251881Speter				DELAY(1);
686251881Speter			else
687251881Speter				_mtx_lock_spin_failed(m);
688251881Speter			cpu_spinwait();
689251881Speter		}
690251881Speter		spinlock_enter();
691251881Speter	}
692251881Speter#ifdef KDTRACE_HOOKS
693251881Speter	spin_time += lockstat_nsecs(&m->lock_object);
694251881Speter#endif
695251881Speter
696251881Speter	if (LOCK_LOG_TEST(&m->lock_object, opts))
697251881Speter		CTR1(KTR_LOCK, "_mtx_lock_spin: %p spin done", m);
698251881Speter	KTR_STATE0(KTR_SCHED, "thread", sched_tdname((struct thread *)tid),
699251881Speter	    "running");
700251881Speter
701251881Speter	LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_MTX_SPIN_LOCK_ACQUIRE, m,
702251881Speter	    contested, waittime, (file), (line));
703251881Speter#ifdef KDTRACE_HOOKS
704251881Speter	if (spin_time != 0)
705251881Speter		LOCKSTAT_RECORD1(LS_MTX_SPIN_LOCK_SPIN, m, spin_time);
706251881Speter#endif
707251881Speter}
708251881Speter#endif /* SMP */
709251881Speter
710251881Spetervoid
711251881Speterthread_lock_flags_(struct thread *td, int opts, const char *file, int line)
712251881Speter{
713251881Speter	struct mtx *m;
714251881Speter	uintptr_t tid;
715251881Speter	int i;
716251881Speter#ifdef LOCK_PROFILING
717251881Speter	int contested = 0;
718251881Speter	uint64_t waittime = 0;
719251881Speter#endif
720251881Speter#ifdef KDTRACE_HOOKS
721251881Speter	int64_t spin_time = 0;
722251881Speter#endif
723251881Speter
724251881Speter	i = 0;
725251881Speter	tid = (uintptr_t)curthread;
726251881Speter
727251881Speter	if (SCHEDULER_STOPPED()) {
728251881Speter		/*
729251881Speter		 * Ensure that spinlock sections are balanced even when the
730251881Speter		 * scheduler is stopped, since we may otherwise inadvertently
731251881Speter		 * re-enable interrupts while dumping core.
732251881Speter		 */
733251881Speter		spinlock_enter();
734251881Speter		return;
735251881Speter	}
736251881Speter
737251881Speter#ifdef KDTRACE_HOOKS
738251881Speter	spin_time -= lockstat_nsecs(&td->td_lock->lock_object);
739251881Speter#endif
740251881Speter	for (;;) {
741251881Speterretry:
742251881Speter		spinlock_enter();
743251881Speter		m = td->td_lock;
744251881Speter		KASSERT(m->mtx_lock != MTX_DESTROYED,
745251881Speter		    ("thread_lock() of destroyed mutex @ %s:%d", file, line));
746251881Speter		KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin,
747251881Speter		    ("thread_lock() of sleep mutex %s @ %s:%d",
748251881Speter		    m->lock_object.lo_name, file, line));
749251881Speter		if (mtx_owned(m))
750251881Speter			KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0,
751251881Speter	    ("thread_lock: recursed on non-recursive mutex %s @ %s:%d\n",
752251881Speter			    m->lock_object.lo_name, file, line));
753251881Speter		WITNESS_CHECKORDER(&m->lock_object,
754251881Speter		    opts | LOP_NEWORDER | LOP_EXCLUSIVE, file, line, NULL);
755251881Speter		for (;;) {
756251881Speter			if (m->mtx_lock == MTX_UNOWNED && _mtx_obtain_lock(m, tid))
757251881Speter				break;
758251881Speter			if (m->mtx_lock == tid) {
759251881Speter				m->mtx_recurse++;
760251881Speter				break;
761251881Speter			}
762251881Speter#ifdef HWPMC_HOOKS
763251881Speter			PMC_SOFT_CALL( , , lock, failed);
764251881Speter#endif
765251881Speter			lock_profile_obtain_lock_failed(&m->lock_object,
766251881Speter			    &contested, &waittime);
767251881Speter			/* Give interrupts a chance while we spin. */
768251881Speter			spinlock_exit();
769251881Speter			while (m->mtx_lock != MTX_UNOWNED) {
770251881Speter				if (i++ < 10000000)
771251881Speter					cpu_spinwait();
772251881Speter				else if (i < 60000000 ||
773251881Speter				    kdb_active || panicstr != NULL)
774251881Speter					DELAY(1);
775251881Speter				else
776251881Speter					_mtx_lock_spin_failed(m);
777251881Speter				cpu_spinwait();
778251881Speter				if (m != td->td_lock)
779251881Speter					goto retry;
780251881Speter			}
781251881Speter			spinlock_enter();
782251881Speter		}
783251881Speter		if (m == td->td_lock)
784251881Speter			break;
785251881Speter		__mtx_unlock_spin(m);	/* does spinlock_exit() */
786251881Speter	}
787251881Speter#ifdef KDTRACE_HOOKS
788251881Speter	spin_time += lockstat_nsecs(&m->lock_object);
789251881Speter#endif
790251881Speter	if (m->mtx_recurse == 0)
791251881Speter		LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_MTX_SPIN_LOCK_ACQUIRE,
792251881Speter		    m, contested, waittime, (file), (line));
793251881Speter	LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file,
794251881Speter	    line);
795251881Speter	WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
796251881Speter	LOCKSTAT_RECORD1(LS_THREAD_LOCK_SPIN, m, spin_time);
797251881Speter}
798251881Speter
799251881Speterstruct mtx *
800251881Speterthread_lock_block(struct thread *td)
801251881Speter{
802251881Speter	struct mtx *lock;
803251881Speter
804251881Speter	THREAD_LOCK_ASSERT(td, MA_OWNED);
805251881Speter	lock = td->td_lock;
806251881Speter	td->td_lock = &blocked_lock;
807251881Speter	mtx_unlock_spin(lock);
808251881Speter
809251881Speter	return (lock);
810251881Speter}
811251881Speter
812251881Spetervoid
813251881Speterthread_lock_unblock(struct thread *td, struct mtx *new)
814251881Speter{
815251881Speter	mtx_assert(new, MA_OWNED);
816251881Speter	MPASS(td->td_lock == &blocked_lock);
817251881Speter	atomic_store_rel_ptr((volatile void *)&td->td_lock, (uintptr_t)new);
818251881Speter}
819251881Speter
820251881Spetervoid
821251881Speterthread_lock_set(struct thread *td, struct mtx *new)
822251881Speter{
823251881Speter	struct mtx *lock;
824251881Speter
825251881Speter	mtx_assert(new, MA_OWNED);
826251881Speter	THREAD_LOCK_ASSERT(td, MA_OWNED);
827251881Speter	lock = td->td_lock;
828251881Speter	td->td_lock = new;
829251881Speter	mtx_unlock_spin(lock);
830251881Speter}
831251881Speter
832251881Speter/*
833251881Speter * __mtx_unlock_sleep: the tougher part of releasing an MTX_DEF lock.
834251881Speter *
835251881Speter * We are only called here if the lock is recursed or contested (i.e. we
836251881Speter * need to wake up a blocked thread).
837251881Speter */
838251881Spetervoid
839251881Speter__mtx_unlock_sleep(volatile uintptr_t *c, int opts, const char *file, int line)
840251881Speter{
841251881Speter	struct mtx *m;
842251881Speter	struct turnstile *ts;
843251881Speter
844251881Speter	if (SCHEDULER_STOPPED())
845251881Speter		return;
846251881Speter
847251881Speter	m = mtxlock2mtx(c);
848251881Speter
849251881Speter	if (mtx_recursed(m)) {
850251881Speter		if (--(m->mtx_recurse) == 0)
851251881Speter			atomic_clear_ptr(&m->mtx_lock, MTX_RECURSED);
852251881Speter		if (LOCK_LOG_TEST(&m->lock_object, opts))
853251881Speter			CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p unrecurse", m);
854251881Speter		return;
855251881Speter	}
856251881Speter
857251881Speter	/*
858251881Speter	 * We have to lock the chain before the turnstile so this turnstile
859251881Speter	 * can be removed from the hash list if it is empty.
860251881Speter	 */
861251881Speter	turnstile_chain_lock(&m->lock_object);
862251881Speter	ts = turnstile_lookup(&m->lock_object);
863251881Speter	if (LOCK_LOG_TEST(&m->lock_object, opts))
864251881Speter		CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p contested", m);
865251881Speter	MPASS(ts != NULL);
866251881Speter	turnstile_broadcast(ts, TS_EXCLUSIVE_QUEUE);
867251881Speter	_mtx_release_lock_quick(m);
868251881Speter
869251881Speter	/*
870251881Speter	 * This turnstile is now no longer associated with the mutex.  We can
871251881Speter	 * unlock the chain lock so a new turnstile may take it's place.
872251881Speter	 */
873251881Speter	turnstile_unpend(ts, TS_EXCLUSIVE_LOCK);
874251881Speter	turnstile_chain_unlock(&m->lock_object);
875251881Speter}
876251881Speter
877251881Speter/*
878251881Speter * All the unlocking of MTX_SPIN locks is done inline.
879251881Speter * See the __mtx_unlock_spin() macro for the details.
880251881Speter */
881251881Speter
882251881Speter/*
883251881Speter * The backing function for the INVARIANTS-enabled mtx_assert()
884251881Speter */
885251881Speter#ifdef INVARIANT_SUPPORT
886251881Spetervoid
887251881Speter__mtx_assert(const volatile uintptr_t *c, int what, const char *file, int line)
888251881Speter{
889251881Speter	const struct mtx *m;
890251881Speter
891251881Speter	if (panicstr != NULL || dumping || SCHEDULER_STOPPED())
892251881Speter		return;
893251881Speter
894251881Speter	m = mtxlock2mtx(c);
895251881Speter
896251881Speter	switch (what) {
897251881Speter	case MA_OWNED:
898251881Speter	case MA_OWNED | MA_RECURSED:
899251881Speter	case MA_OWNED | MA_NOTRECURSED:
900251881Speter		if (!mtx_owned(m))
901251881Speter			panic("mutex %s not owned at %s:%d",
902251881Speter			    m->lock_object.lo_name, file, line);
903251881Speter		if (mtx_recursed(m)) {
904251881Speter			if ((what & MA_NOTRECURSED) != 0)
905251881Speter				panic("mutex %s recursed at %s:%d",
906251881Speter				    m->lock_object.lo_name, file, line);
907251881Speter		} else if ((what & MA_RECURSED) != 0) {
908251881Speter			panic("mutex %s unrecursed at %s:%d",
909251881Speter			    m->lock_object.lo_name, file, line);
910251881Speter		}
911251881Speter		break;
912251881Speter	case MA_NOTOWNED:
913251881Speter		if (mtx_owned(m))
914251881Speter			panic("mutex %s owned at %s:%d",
915251881Speter			    m->lock_object.lo_name, file, line);
916251881Speter		break;
917251881Speter	default:
918251881Speter		panic("unknown mtx_assert at %s:%d", file, line);
919251881Speter	}
920251881Speter}
921251881Speter#endif
922251881Speter
923251881Speter/*
924251881Speter * The MUTEX_DEBUG-enabled mtx_validate()
925251881Speter *
926251881Speter * Most of these checks have been moved off into the LO_INITIALIZED flag
927251881Speter * maintained by the witness code.
928251881Speter */
929251881Speter#ifdef MUTEX_DEBUG
930251881Speter
931251881Spetervoid	mtx_validate(struct mtx *);
932251881Speter
933251881Spetervoid
934251881Spetermtx_validate(struct mtx *m)
935251881Speter{
936251881Speter
937251881Speter/*
938251881Speter * XXX: When kernacc() does not require Giant we can reenable this check
939251881Speter */
940251881Speter#ifdef notyet
941251881Speter	/*
942251881Speter	 * Can't call kernacc() from early init386(), especially when
943251881Speter	 * initializing Giant mutex, because some stuff in kernacc()
944251881Speter	 * requires Giant itself.
945251881Speter	 */
946251881Speter	if (!cold)
947251881Speter		if (!kernacc((caddr_t)m, sizeof(m),
948251881Speter		    VM_PROT_READ | VM_PROT_WRITE))
949251881Speter			panic("Can't read and write to mutex %p", m);
950251881Speter#endif
951251881Speter}
952251881Speter#endif
953251881Speter
954251881Speter/*
955251881Speter * General init routine used by the MTX_SYSINIT() macro.
956251881Speter */
957251881Spetervoid
958251881Spetermtx_sysinit(void *arg)
959251881Speter{
960251881Speter	struct mtx_args *margs = arg;
961251881Speter
962251881Speter	mtx_init((struct mtx *)margs->ma_mtx, margs->ma_desc, NULL,
963251881Speter	    margs->ma_opts);
964251881Speter}
965251881Speter
966251881Speter/*
967251881Speter * Mutex initialization routine; initialize lock `m' of type contained in
968251881Speter * `opts' with options contained in `opts' and name `name.'  The optional
969251881Speter * lock type `type' is used as a general lock category name for use with
970251881Speter * witness.
971251881Speter */
972251881Spetervoid
973251881Speter_mtx_init(volatile uintptr_t *c, const char *name, const char *type, int opts)
974251881Speter{
975251881Speter	struct mtx *m;
976251881Speter	struct lock_class *class;
977251881Speter	int flags;
978251881Speter
979251881Speter	m = mtxlock2mtx(c);
980251881Speter
981251881Speter	MPASS((opts & ~(MTX_SPIN | MTX_QUIET | MTX_RECURSE |
982251881Speter	    MTX_NOWITNESS | MTX_DUPOK | MTX_NOPROFILE | MTX_NEW)) == 0);
983251881Speter	ASSERT_ATOMIC_LOAD_PTR(m->mtx_lock,
984251881Speter	    ("%s: mtx_lock not aligned for %s: %p", __func__, name,
985251881Speter	    &m->mtx_lock));
986251881Speter
987251881Speter#ifdef MUTEX_DEBUG
988251881Speter	/* Diagnostic and error correction */
989251881Speter	mtx_validate(m);
990251881Speter#endif
991251881Speter
992251881Speter	/* Determine lock class and lock flags. */
993251881Speter	if (opts & MTX_SPIN)
994251881Speter		class = &lock_class_mtx_spin;
995251881Speter	else
996251881Speter		class = &lock_class_mtx_sleep;
997251881Speter	flags = 0;
998251881Speter	if (opts & MTX_QUIET)
999251881Speter		flags |= LO_QUIET;
1000251881Speter	if (opts & MTX_RECURSE)
1001251881Speter		flags |= LO_RECURSABLE;
1002251881Speter	if ((opts & MTX_NOWITNESS) == 0)
1003251881Speter		flags |= LO_WITNESS;
1004251881Speter	if (opts & MTX_DUPOK)
1005251881Speter		flags |= LO_DUPOK;
1006251881Speter	if (opts & MTX_NOPROFILE)
1007251881Speter		flags |= LO_NOPROFILE;
1008251881Speter	if (opts & MTX_NEW)
1009251881Speter		flags |= LO_NEW;
1010251881Speter
1011251881Speter	/* Initialize mutex. */
1012251881Speter	lock_init(&m->lock_object, class, name, type, flags);
1013251881Speter
1014251881Speter	m->mtx_lock = MTX_UNOWNED;
1015251881Speter	m->mtx_recurse = 0;
1016251881Speter}
1017251881Speter
1018251881Speter/*
1019251881Speter * Remove lock `m' from all_mtx queue.  We don't allow MTX_QUIET to be
1020251881Speter * passed in as a flag here because if the corresponding mtx_init() was
1021251881Speter * called with MTX_QUIET set, then it will already be set in the mutex's
1022251881Speter * flags.
1023251881Speter */
1024251881Spetervoid
1025251881Speter_mtx_destroy(volatile uintptr_t *c)
1026251881Speter{
1027251881Speter	struct mtx *m;
1028251881Speter
1029251881Speter	m = mtxlock2mtx(c);
1030251881Speter
1031251881Speter	if (!mtx_owned(m))
1032251881Speter		MPASS(mtx_unowned(m));
1033251881Speter	else {
1034251881Speter		MPASS((m->mtx_lock & (MTX_RECURSED|MTX_CONTESTED)) == 0);
1035251881Speter
1036251881Speter		/* Perform the non-mtx related part of mtx_unlock_spin(). */
1037251881Speter		if (LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin)
1038251881Speter			spinlock_exit();
1039251881Speter		else
1040251881Speter			curthread->td_locks--;
1041251881Speter
1042251881Speter		lock_profile_release_lock(&m->lock_object);
1043251881Speter		/* Tell witness this isn't locked to make it happy. */
1044251881Speter		WITNESS_UNLOCK(&m->lock_object, LOP_EXCLUSIVE, __FILE__,
1045251881Speter		    __LINE__);
1046251881Speter	}
1047251881Speter
1048251881Speter	m->mtx_lock = MTX_DESTROYED;
1049251881Speter	lock_destroy(&m->lock_object);
1050251881Speter}
1051251881Speter
1052251881Speter/*
1053251881Speter * Intialize the mutex code and system mutexes.  This is called from the MD
1054251881Speter * startup code prior to mi_startup().  The per-CPU data space needs to be
1055251881Speter * setup before this is called.
1056251881Speter */
1057251881Spetervoid
1058251881Spetermutex_init(void)
1059251881Speter{
1060251881Speter
1061251881Speter	/* Setup turnstiles so that sleep mutexes work. */
1062251881Speter	init_turnstiles();
1063251881Speter
1064251881Speter	/*
1065251881Speter	 * Initialize mutexes.
1066251881Speter	 */
1067251881Speter	mtx_init(&Giant, "Giant", NULL, MTX_DEF | MTX_RECURSE);
1068251881Speter	mtx_init(&blocked_lock, "blocked lock", NULL, MTX_SPIN);
1069251881Speter	blocked_lock.mtx_lock = 0xdeadc0de;	/* Always blocked. */
1070251881Speter	mtx_init(&proc0.p_mtx, "process lock", NULL, MTX_DEF | MTX_DUPOK);
1071251881Speter	mtx_init(&proc0.p_slock, "process slock", NULL, MTX_SPIN | MTX_RECURSE);
1072251881Speter	mtx_init(&devmtx, "cdev", NULL, MTX_DEF);
1073251881Speter	mtx_lock(&Giant);
1074251881Speter}
1075251881Speter
1076251881Speter#ifdef DDB
1077251881Spetervoid
1078251881Speterdb_show_mtx(const struct lock_object *lock)
1079251881Speter{
1080251881Speter	struct thread *td;
1081251881Speter	const struct mtx *m;
1082251881Speter
1083251881Speter	m = (const struct mtx *)lock;
1084251881Speter
1085251881Speter	db_printf(" flags: {");
1086251881Speter	if (LOCK_CLASS(lock) == &lock_class_mtx_spin)
1087251881Speter		db_printf("SPIN");
1088251881Speter	else
1089251881Speter		db_printf("DEF");
1090251881Speter	if (m->lock_object.lo_flags & LO_RECURSABLE)
1091251881Speter		db_printf(", RECURSE");
1092251881Speter	if (m->lock_object.lo_flags & LO_DUPOK)
1093251881Speter		db_printf(", DUPOK");
1094251881Speter	db_printf("}\n");
1095251881Speter	db_printf(" state: {");
1096251881Speter	if (mtx_unowned(m))
1097251881Speter		db_printf("UNOWNED");
1098251881Speter	else if (mtx_destroyed(m))
1099251881Speter		db_printf("DESTROYED");
1100251881Speter	else {
1101251881Speter		db_printf("OWNED");
1102251881Speter		if (m->mtx_lock & MTX_CONTESTED)
1103251881Speter			db_printf(", CONTESTED");
1104251881Speter		if (m->mtx_lock & MTX_RECURSED)
1105251881Speter			db_printf(", RECURSED");
1106251881Speter	}
1107251881Speter	db_printf("}\n");
1108251881Speter	if (!mtx_unowned(m) && !mtx_destroyed(m)) {
1109251881Speter		td = mtx_owner(m);
1110251881Speter		db_printf(" owner: %p (tid %d, pid %d, \"%s\")\n", td,
1111251881Speter		    td->td_tid, td->td_proc->p_pid, td->td_name);
1112251881Speter		if (mtx_recursed(m))
1113251881Speter			db_printf(" recursed: %d\n", m->mtx_recurse);
1114251881Speter	}
1115251881Speter}
1116251881Speter#endif
1117251881Speter