kern_mutex.c revision 167368
1/*-
2 * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 *    notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 *    notice, this list of conditions and the following disclaimer in the
11 *    documentation and/or other materials provided with the distribution.
12 * 3. Berkeley Software Design Inc's name may not be used to endorse or
13 *    promote products derived from this software without specific prior
14 *    written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED.  IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 *
28 *	from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $
29 *	and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $
30 */
31
32/*
33 * Machine independent bits of mutex implementation.
34 */
35
36#include <sys/cdefs.h>
37__FBSDID("$FreeBSD: head/sys/kern/kern_mutex.c 167368 2007-03-09 16:27:11Z jhb $");
38
39#include "opt_adaptive_mutexes.h"
40#include "opt_ddb.h"
41#include "opt_global.h"
42#include "opt_mutex_wake_all.h"
43#include "opt_sched.h"
44
45#include <sys/param.h>
46#include <sys/systm.h>
47#include <sys/bus.h>
48#include <sys/conf.h>
49#include <sys/kdb.h>
50#include <sys/kernel.h>
51#include <sys/ktr.h>
52#include <sys/lock.h>
53#include <sys/malloc.h>
54#include <sys/mutex.h>
55#include <sys/proc.h>
56#include <sys/resourcevar.h>
57#include <sys/sched.h>
58#include <sys/sbuf.h>
59#include <sys/sysctl.h>
60#include <sys/turnstile.h>
61#include <sys/vmmeter.h>
62#include <sys/lock_profile.h>
63
64#include <machine/atomic.h>
65#include <machine/bus.h>
66#include <machine/cpu.h>
67
68#include <ddb/ddb.h>
69
70#include <fs/devfs/devfs_int.h>
71
72#include <vm/vm.h>
73#include <vm/vm_extern.h>
74
75/*
76 * Force MUTEX_WAKE_ALL for now.
77 * single thread wakeup needs fixes to avoid race conditions with
78 * priority inheritance.
79 */
80#ifndef MUTEX_WAKE_ALL
81#define MUTEX_WAKE_ALL
82#endif
83
84/*
85 * Internal utility macros.
86 */
87#define mtx_unowned(m)	((m)->mtx_lock == MTX_UNOWNED)
88
89#define	mtx_owner(m)	((struct thread *)((m)->mtx_lock & ~MTX_FLAGMASK))
90
91#ifdef DDB
92static void	db_show_mtx(struct lock_object *lock);
93#endif
94static void	lock_mtx(struct lock_object *lock, int how);
95static void	lock_spin(struct lock_object *lock, int how);
96static int	unlock_mtx(struct lock_object *lock);
97static int	unlock_spin(struct lock_object *lock);
98
99/*
100 * Lock classes for sleep and spin mutexes.
101 */
102struct lock_class lock_class_mtx_sleep = {
103	.lc_name = "sleep mutex",
104	.lc_flags = LC_SLEEPLOCK | LC_RECURSABLE,
105#ifdef DDB
106	.lc_ddb_show = db_show_mtx,
107#endif
108	.lc_lock = lock_mtx,
109	.lc_unlock = unlock_mtx,
110};
111struct lock_class lock_class_mtx_spin = {
112	.lc_name = "spin mutex",
113	.lc_flags = LC_SPINLOCK | LC_RECURSABLE,
114#ifdef DDB
115	.lc_ddb_show = db_show_mtx,
116#endif
117	.lc_lock = lock_spin,
118	.lc_unlock = unlock_spin,
119};
120
121/*
122 * System-wide mutexes
123 */
124struct mtx sched_lock;
125struct mtx Giant;
126
127#ifdef LOCK_PROFILING
128static inline void lock_profile_init(void)
129{
130        int i;
131        /* Initialize the mutex profiling locks */
132        for (i = 0; i < LPROF_LOCK_SIZE; i++) {
133                mtx_init(&lprof_locks[i], "mprof lock",
134                    NULL, MTX_SPIN|MTX_QUIET|MTX_NOPROFILE);
135        }
136}
137#else
138static inline void lock_profile_init(void) {;}
139#endif
140
141void
142lock_mtx(struct lock_object *lock, int how)
143{
144
145	mtx_lock((struct mtx *)lock);
146}
147
148void
149lock_spin(struct lock_object *lock, int how)
150{
151
152	panic("spin locks can only use msleep_spin");
153}
154
155int
156unlock_mtx(struct lock_object *lock)
157{
158	struct mtx *m;
159
160	m = (struct mtx *)lock;
161	mtx_assert(m, MA_OWNED | MA_NOTRECURSED);
162	mtx_unlock(m);
163	return (0);
164}
165
166int
167unlock_spin(struct lock_object *lock)
168{
169
170	panic("spin locks can only use msleep_spin");
171}
172
173/*
174 * Function versions of the inlined __mtx_* macros.  These are used by
175 * modules and can also be called from assembly language if needed.
176 */
177void
178_mtx_lock_flags(struct mtx *m, int opts, const char *file, int line)
179{
180
181	MPASS(curthread != NULL);
182	KASSERT(m->mtx_lock != MTX_DESTROYED,
183	    ("mtx_lock() of destroyed mutex @ %s:%d", file, line));
184	KASSERT(LOCK_CLASS(&m->mtx_object) == &lock_class_mtx_sleep,
185	    ("mtx_lock() of spin mutex %s @ %s:%d", m->mtx_object.lo_name,
186	    file, line));
187	WITNESS_CHECKORDER(&m->mtx_object, opts | LOP_NEWORDER | LOP_EXCLUSIVE,
188	    file, line);
189
190	_get_sleep_lock(m, curthread, opts, file, line);
191	LOCK_LOG_LOCK("LOCK", &m->mtx_object, opts, m->mtx_recurse, file,
192	    line);
193	WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line);
194	curthread->td_locks++;
195}
196
197void
198_mtx_unlock_flags(struct mtx *m, int opts, const char *file, int line)
199{
200	MPASS(curthread != NULL);
201	KASSERT(m->mtx_lock != MTX_DESTROYED,
202	    ("mtx_unlock() of destroyed mutex @ %s:%d", file, line));
203	KASSERT(LOCK_CLASS(&m->mtx_object) == &lock_class_mtx_sleep,
204	    ("mtx_unlock() of spin mutex %s @ %s:%d", m->mtx_object.lo_name,
205	    file, line));
206	curthread->td_locks--;
207	WITNESS_UNLOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line);
208	LOCK_LOG_LOCK("UNLOCK", &m->mtx_object, opts, m->mtx_recurse, file,
209	    line);
210	mtx_assert(m, MA_OWNED);
211
212	lock_profile_release_lock(&m->mtx_object);
213	_rel_sleep_lock(m, curthread, opts, file, line);
214}
215
216void
217_mtx_lock_spin_flags(struct mtx *m, int opts, const char *file, int line)
218{
219
220	MPASS(curthread != NULL);
221	KASSERT(m->mtx_lock != MTX_DESTROYED,
222	    ("mtx_lock_spin() of destroyed mutex @ %s:%d", file, line));
223	KASSERT(LOCK_CLASS(&m->mtx_object) == &lock_class_mtx_spin,
224	    ("mtx_lock_spin() of sleep mutex %s @ %s:%d",
225	    m->mtx_object.lo_name, file, line));
226	WITNESS_CHECKORDER(&m->mtx_object, opts | LOP_NEWORDER | LOP_EXCLUSIVE,
227	    file, line);
228	_get_spin_lock(m, curthread, opts, file, line);
229	LOCK_LOG_LOCK("LOCK", &m->mtx_object, opts, m->mtx_recurse, file,
230	    line);
231	WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line);
232}
233
234void
235_mtx_unlock_spin_flags(struct mtx *m, int opts, const char *file, int line)
236{
237
238	MPASS(curthread != NULL);
239	KASSERT(m->mtx_lock != MTX_DESTROYED,
240	    ("mtx_unlock_spin() of destroyed mutex @ %s:%d", file, line));
241	KASSERT(LOCK_CLASS(&m->mtx_object) == &lock_class_mtx_spin,
242	    ("mtx_unlock_spin() of sleep mutex %s @ %s:%d",
243	    m->mtx_object.lo_name, file, line));
244	WITNESS_UNLOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line);
245	LOCK_LOG_LOCK("UNLOCK", &m->mtx_object, opts, m->mtx_recurse, file,
246	    line);
247	mtx_assert(m, MA_OWNED);
248
249	lock_profile_release_lock(&m->mtx_object);
250	_rel_spin_lock(m);
251}
252
253/*
254 * The important part of mtx_trylock{,_flags}()
255 * Tries to acquire lock `m.'  If this function is called on a mutex that
256 * is already owned, it will recursively acquire the lock.
257 */
258int
259_mtx_trylock(struct mtx *m, int opts, const char *file, int line)
260{
261	int rval, contested = 0;
262	uint64_t waittime = 0;
263
264	MPASS(curthread != NULL);
265	KASSERT(m->mtx_lock != MTX_DESTROYED,
266	    ("mtx_trylock() of destroyed mutex @ %s:%d", file, line));
267	KASSERT(LOCK_CLASS(&m->mtx_object) == &lock_class_mtx_sleep,
268	    ("mtx_trylock() of spin mutex %s @ %s:%d", m->mtx_object.lo_name,
269	    file, line));
270
271	if (mtx_owned(m) && (m->mtx_object.lo_flags & LO_RECURSABLE) != 0) {
272		m->mtx_recurse++;
273		atomic_set_ptr(&m->mtx_lock, MTX_RECURSED);
274		rval = 1;
275	} else
276		rval = _obtain_lock(m, (uintptr_t)curthread);
277
278	LOCK_LOG_TRY("LOCK", &m->mtx_object, opts, rval, file, line);
279	if (rval) {
280		WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE | LOP_TRYLOCK,
281		    file, line);
282		curthread->td_locks++;
283		if (m->mtx_recurse == 0)
284			lock_profile_obtain_lock_success(&m->mtx_object, contested,
285			    waittime, file, line);
286
287	}
288
289	return (rval);
290}
291
292/*
293 * _mtx_lock_sleep: the tougher part of acquiring an MTX_DEF lock.
294 *
295 * We call this if the lock is either contested (i.e. we need to go to
296 * sleep waiting for it), or if we need to recurse on it.
297 */
298void
299_mtx_lock_sleep(struct mtx *m, uintptr_t tid, int opts, const char *file,
300    int line)
301{
302#if defined(SMP) && !defined(NO_ADAPTIVE_MUTEXES)
303	volatile struct thread *owner;
304#endif
305#ifdef KTR
306	int cont_logged = 0;
307#endif
308	uintptr_t v;
309
310	if (mtx_owned(m)) {
311		KASSERT((m->mtx_object.lo_flags & LO_RECURSABLE) != 0,
312	    ("_mtx_lock_sleep: recursed on non-recursive mutex %s @ %s:%d\n",
313		    m->mtx_object.lo_name, file, line));
314		m->mtx_recurse++;
315		atomic_set_ptr(&m->mtx_lock, MTX_RECURSED);
316		if (LOCK_LOG_TEST(&m->mtx_object, opts))
317			CTR1(KTR_LOCK, "_mtx_lock_sleep: %p recursing", m);
318		return;
319	}
320
321	if (LOCK_LOG_TEST(&m->mtx_object, opts))
322		CTR4(KTR_LOCK,
323		    "_mtx_lock_sleep: %s contested (lock=%p) at %s:%d",
324		    m->mtx_object.lo_name, (void *)m->mtx_lock, file, line);
325
326	while (!_obtain_lock(m, tid)) {
327		turnstile_lock(&m->mtx_object);
328		v = m->mtx_lock;
329
330		/*
331		 * Check if the lock has been released while spinning for
332		 * the turnstile chain lock.
333		 */
334		if (v == MTX_UNOWNED) {
335			turnstile_release(&m->mtx_object);
336			cpu_spinwait();
337			continue;
338		}
339
340#ifdef MUTEX_WAKE_ALL
341		MPASS(v != MTX_CONTESTED);
342#else
343		/*
344		 * The mutex was marked contested on release. This means that
345		 * there are other threads blocked on it.  Grab ownership of
346		 * it and propagate its priority to the current thread if
347		 * necessary.
348		 */
349		if (v == MTX_CONTESTED) {
350			m->mtx_lock = tid | MTX_CONTESTED;
351			turnstile_claim(&m->mtx_object);
352			break;
353		}
354#endif
355
356		/*
357		 * If the mutex isn't already contested and a failure occurs
358		 * setting the contested bit, the mutex was either released
359		 * or the state of the MTX_RECURSED bit changed.
360		 */
361		if ((v & MTX_CONTESTED) == 0 &&
362		    !atomic_cmpset_ptr(&m->mtx_lock, v, v | MTX_CONTESTED)) {
363			turnstile_release(&m->mtx_object);
364			cpu_spinwait();
365			continue;
366		}
367
368#if defined(SMP) && !defined(NO_ADAPTIVE_MUTEXES)
369		/*
370		 * If the current owner of the lock is executing on another
371		 * CPU, spin instead of blocking.
372		 */
373		owner = (struct thread *)(v & ~MTX_FLAGMASK);
374#ifdef ADAPTIVE_GIANT
375		if (TD_IS_RUNNING(owner))
376#else
377		if (m != &Giant && TD_IS_RUNNING(owner))
378#endif
379		{
380			turnstile_release(&m->mtx_object);
381			while (mtx_owner(m) == owner && TD_IS_RUNNING(owner)) {
382				cpu_spinwait();
383			}
384			continue;
385		}
386#endif	/* SMP && !NO_ADAPTIVE_MUTEXES */
387
388		/*
389		 * We definitely must sleep for this lock.
390		 */
391		mtx_assert(m, MA_NOTOWNED);
392
393#ifdef KTR
394		if (!cont_logged) {
395			CTR6(KTR_CONTENTION,
396			    "contention: %p at %s:%d wants %s, taken by %s:%d",
397			    (void *)tid, file, line, m->mtx_object.lo_name,
398			    WITNESS_FILE(&m->mtx_object),
399			    WITNESS_LINE(&m->mtx_object));
400			cont_logged = 1;
401		}
402#endif
403
404		/*
405		 * Block on the turnstile.
406		 */
407		turnstile_wait(&m->mtx_object, mtx_owner(m),
408		    TS_EXCLUSIVE_QUEUE);
409	}
410#ifdef KTR
411	if (cont_logged) {
412		CTR4(KTR_CONTENTION,
413		    "contention end: %s acquired by %p at %s:%d",
414		    m->mtx_object.lo_name, (void *)tid, file, line);
415	}
416#endif
417	return;
418}
419
420#ifdef SMP
421/*
422 * _mtx_lock_spin: the tougher part of acquiring an MTX_SPIN lock.
423 *
424 * This is only called if we need to actually spin for the lock. Recursion
425 * is handled inline.
426 */
427void
428_mtx_lock_spin(struct mtx *m, uintptr_t tid, int opts, const char *file,
429    int line)
430{
431	int i = 0;
432	struct thread *td;
433
434	if (LOCK_LOG_TEST(&m->mtx_object, opts))
435		CTR1(KTR_LOCK, "_mtx_lock_spin: %p spinning", m);
436
437	while (!_obtain_lock(m, tid)) {
438
439		/* Give interrupts a chance while we spin. */
440		spinlock_exit();
441		while (m->mtx_lock != MTX_UNOWNED) {
442			if (i++ < 10000000) {
443				cpu_spinwait();
444				continue;
445			}
446			if (i < 60000000 || kdb_active || panicstr != NULL)
447				DELAY(1);
448			else {
449				td = mtx_owner(m);
450
451				/* If the mutex is unlocked, try again. */
452				if (td == NULL)
453					continue;
454				printf(
455			"spin lock %p (%s) held by %p (tid %d) too long\n",
456				    m, m->mtx_object.lo_name, td, td->td_tid);
457#ifdef WITNESS
458				witness_display_spinlock(&m->mtx_object, td);
459#endif
460				panic("spin lock held too long");
461			}
462			cpu_spinwait();
463		}
464		spinlock_enter();
465	}
466
467	if (LOCK_LOG_TEST(&m->mtx_object, opts))
468		CTR1(KTR_LOCK, "_mtx_lock_spin: %p spin done", m);
469
470	return;
471}
472#endif /* SMP */
473
474/*
475 * _mtx_unlock_sleep: the tougher part of releasing an MTX_DEF lock.
476 *
477 * We are only called here if the lock is recursed or contested (i.e. we
478 * need to wake up a blocked thread).
479 */
480void
481_mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line)
482{
483	struct turnstile *ts;
484#ifndef PREEMPTION
485	struct thread *td, *td1;
486#endif
487
488	if (mtx_recursed(m)) {
489		if (--(m->mtx_recurse) == 0)
490			atomic_clear_ptr(&m->mtx_lock, MTX_RECURSED);
491		if (LOCK_LOG_TEST(&m->mtx_object, opts))
492			CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p unrecurse", m);
493		return;
494	}
495
496	turnstile_lock(&m->mtx_object);
497	ts = turnstile_lookup(&m->mtx_object);
498	if (LOCK_LOG_TEST(&m->mtx_object, opts))
499		CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p contested", m);
500
501#if defined(SMP) && !defined(NO_ADAPTIVE_MUTEXES)
502	if (ts == NULL) {
503		_release_lock_quick(m);
504		if (LOCK_LOG_TEST(&m->mtx_object, opts))
505			CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p no sleepers", m);
506		turnstile_release(&m->mtx_object);
507		return;
508	}
509#else
510	MPASS(ts != NULL);
511#endif
512#ifndef PREEMPTION
513	/* XXX */
514	td1 = turnstile_head(ts, TS_EXCLUSIVE_QUEUE);
515#endif
516#ifdef MUTEX_WAKE_ALL
517	turnstile_broadcast(ts, TS_EXCLUSIVE_QUEUE);
518	_release_lock_quick(m);
519#else
520	if (turnstile_signal(ts, TS_EXCLUSIVE_QUEUE)) {
521		_release_lock_quick(m);
522		if (LOCK_LOG_TEST(&m->mtx_object, opts))
523			CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p not held", m);
524	} else {
525		m->mtx_lock = MTX_CONTESTED;
526		if (LOCK_LOG_TEST(&m->mtx_object, opts))
527			CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p still contested",
528			    m);
529	}
530#endif
531	turnstile_unpend(ts, TS_EXCLUSIVE_LOCK);
532
533#ifndef PREEMPTION
534	/*
535	 * XXX: This is just a hack until preemption is done.  However,
536	 * once preemption is done we need to either wrap the
537	 * turnstile_signal() and release of the actual lock in an
538	 * extra critical section or change the preemption code to
539	 * always just set a flag and never do instant-preempts.
540	 */
541	td = curthread;
542	if (td->td_critnest > 0 || td1->td_priority >= td->td_priority)
543		return;
544	mtx_lock_spin(&sched_lock);
545	if (!TD_IS_RUNNING(td1)) {
546#ifdef notyet
547		if (td->td_ithd != NULL) {
548			struct ithd *it = td->td_ithd;
549
550			if (it->it_interrupted) {
551				if (LOCK_LOG_TEST(&m->mtx_object, opts))
552					CTR2(KTR_LOCK,
553				    "_mtx_unlock_sleep: %p interrupted %p",
554					    it, it->it_interrupted);
555				intr_thd_fixup(it);
556			}
557		}
558#endif
559		if (LOCK_LOG_TEST(&m->mtx_object, opts))
560			CTR2(KTR_LOCK,
561			    "_mtx_unlock_sleep: %p switching out lock=%p", m,
562			    (void *)m->mtx_lock);
563
564		mi_switch(SW_INVOL, NULL);
565		if (LOCK_LOG_TEST(&m->mtx_object, opts))
566			CTR2(KTR_LOCK, "_mtx_unlock_sleep: %p resuming lock=%p",
567			    m, (void *)m->mtx_lock);
568	}
569	mtx_unlock_spin(&sched_lock);
570#endif
571
572	return;
573}
574
575/*
576 * All the unlocking of MTX_SPIN locks is done inline.
577 * See the _rel_spin_lock() macro for the details.
578 */
579
580/*
581 * The backing function for the INVARIANTS-enabled mtx_assert()
582 */
583#ifdef INVARIANT_SUPPORT
584void
585_mtx_assert(struct mtx *m, int what, const char *file, int line)
586{
587
588	if (panicstr != NULL || dumping)
589		return;
590	switch (what) {
591	case MA_OWNED:
592	case MA_OWNED | MA_RECURSED:
593	case MA_OWNED | MA_NOTRECURSED:
594		if (!mtx_owned(m))
595			panic("mutex %s not owned at %s:%d",
596			    m->mtx_object.lo_name, file, line);
597		if (mtx_recursed(m)) {
598			if ((what & MA_NOTRECURSED) != 0)
599				panic("mutex %s recursed at %s:%d",
600				    m->mtx_object.lo_name, file, line);
601		} else if ((what & MA_RECURSED) != 0) {
602			panic("mutex %s unrecursed at %s:%d",
603			    m->mtx_object.lo_name, file, line);
604		}
605		break;
606	case MA_NOTOWNED:
607		if (mtx_owned(m))
608			panic("mutex %s owned at %s:%d",
609			    m->mtx_object.lo_name, file, line);
610		break;
611	default:
612		panic("unknown mtx_assert at %s:%d", file, line);
613	}
614}
615#endif
616
617/*
618 * The MUTEX_DEBUG-enabled mtx_validate()
619 *
620 * Most of these checks have been moved off into the LO_INITIALIZED flag
621 * maintained by the witness code.
622 */
623#ifdef MUTEX_DEBUG
624
625void	mtx_validate(struct mtx *);
626
627void
628mtx_validate(struct mtx *m)
629{
630
631/*
632 * XXX: When kernacc() does not require Giant we can reenable this check
633 */
634#ifdef notyet
635	/*
636	 * Can't call kernacc() from early init386(), especially when
637	 * initializing Giant mutex, because some stuff in kernacc()
638	 * requires Giant itself.
639	 */
640	if (!cold)
641		if (!kernacc((caddr_t)m, sizeof(m),
642		    VM_PROT_READ | VM_PROT_WRITE))
643			panic("Can't read and write to mutex %p", m);
644#endif
645}
646#endif
647
648/*
649 * General init routine used by the MTX_SYSINIT() macro.
650 */
651void
652mtx_sysinit(void *arg)
653{
654	struct mtx_args *margs = arg;
655
656	mtx_init(margs->ma_mtx, margs->ma_desc, NULL, margs->ma_opts);
657}
658
659/*
660 * Mutex initialization routine; initialize lock `m' of type contained in
661 * `opts' with options contained in `opts' and name `name.'  The optional
662 * lock type `type' is used as a general lock category name for use with
663 * witness.
664 */
665void
666mtx_init(struct mtx *m, const char *name, const char *type, int opts)
667{
668	struct lock_class *class;
669	int flags;
670
671	MPASS((opts & ~(MTX_SPIN | MTX_QUIET | MTX_RECURSE |
672		MTX_NOWITNESS | MTX_DUPOK | MTX_NOPROFILE)) == 0);
673
674#ifdef MUTEX_DEBUG
675	/* Diagnostic and error correction */
676	mtx_validate(m);
677#endif
678
679	/* Determine lock class and lock flags. */
680	if (opts & MTX_SPIN)
681		class = &lock_class_mtx_spin;
682	else
683		class = &lock_class_mtx_sleep;
684	flags = 0;
685	if (opts & MTX_QUIET)
686		flags |= LO_QUIET;
687	if (opts & MTX_RECURSE)
688		flags |= LO_RECURSABLE;
689	if ((opts & MTX_NOWITNESS) == 0)
690		flags |= LO_WITNESS;
691	if (opts & MTX_DUPOK)
692		flags |= LO_DUPOK;
693	if (opts & MTX_NOPROFILE)
694		flags |= LO_NOPROFILE;
695
696	/* Initialize mutex. */
697	m->mtx_lock = MTX_UNOWNED;
698	m->mtx_recurse = 0;
699
700	lock_profile_object_init(&m->mtx_object, class, name);
701	lock_init(&m->mtx_object, class, name, type, flags);
702}
703
704/*
705 * Remove lock `m' from all_mtx queue.  We don't allow MTX_QUIET to be
706 * passed in as a flag here because if the corresponding mtx_init() was
707 * called with MTX_QUIET set, then it will already be set in the mutex's
708 * flags.
709 */
710void
711mtx_destroy(struct mtx *m)
712{
713
714	if (!mtx_owned(m))
715		MPASS(mtx_unowned(m));
716	else {
717		MPASS((m->mtx_lock & (MTX_RECURSED|MTX_CONTESTED)) == 0);
718
719		/* Perform the non-mtx related part of mtx_unlock_spin(). */
720		if (LOCK_CLASS(&m->mtx_object) == &lock_class_mtx_spin)
721			spinlock_exit();
722		else
723			curthread->td_locks--;
724
725		/* Tell witness this isn't locked to make it happy. */
726		WITNESS_UNLOCK(&m->mtx_object, LOP_EXCLUSIVE, __FILE__,
727		    __LINE__);
728	}
729
730	m->mtx_lock = MTX_DESTROYED;
731	lock_profile_object_destroy(&m->mtx_object);
732	lock_destroy(&m->mtx_object);
733}
734
735/*
736 * Intialize the mutex code and system mutexes.  This is called from the MD
737 * startup code prior to mi_startup().  The per-CPU data space needs to be
738 * setup before this is called.
739 */
740void
741mutex_init(void)
742{
743
744	/* Setup turnstiles so that sleep mutexes work. */
745	init_turnstiles();
746
747	/*
748	 * Initialize mutexes.
749	 */
750	mtx_init(&Giant, "Giant", NULL, MTX_DEF | MTX_RECURSE);
751	mtx_init(&sched_lock, "sched lock", NULL, MTX_SPIN | MTX_RECURSE);
752	mtx_init(&proc0.p_mtx, "process lock", NULL, MTX_DEF | MTX_DUPOK);
753	mtx_init(&devmtx, "cdev", NULL, MTX_DEF);
754	mtx_lock(&Giant);
755
756	lock_profile_init();
757}
758
759#ifdef DDB
760void
761db_show_mtx(struct lock_object *lock)
762{
763	struct thread *td;
764	struct mtx *m;
765
766	m = (struct mtx *)lock;
767
768	db_printf(" flags: {");
769	if (LOCK_CLASS(lock) == &lock_class_mtx_spin)
770		db_printf("SPIN");
771	else
772		db_printf("DEF");
773	if (m->mtx_object.lo_flags & LO_RECURSABLE)
774		db_printf(", RECURSE");
775	if (m->mtx_object.lo_flags & LO_DUPOK)
776		db_printf(", DUPOK");
777	db_printf("}\n");
778	db_printf(" state: {");
779	if (mtx_unowned(m))
780		db_printf("UNOWNED");
781	else {
782		db_printf("OWNED");
783		if (m->mtx_lock & MTX_CONTESTED)
784			db_printf(", CONTESTED");
785		if (m->mtx_lock & MTX_RECURSED)
786			db_printf(", RECURSED");
787	}
788	db_printf("}\n");
789	if (!mtx_unowned(m)) {
790		td = mtx_owner(m);
791		db_printf(" owner: %p (tid %d, pid %d, \"%s\")\n", td,
792		    td->td_tid, td->td_proc->p_pid, td->td_proc->p_comm);
793		if (mtx_recursed(m))
794			db_printf(" recursed: %d\n", m->mtx_recurse);
795	}
796}
797#endif
798