kern_mutex.c revision 93273
1/*-
2 * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 *    notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 *    notice, this list of conditions and the following disclaimer in the
11 *    documentation and/or other materials provided with the distribution.
12 * 3. Berkeley Software Design Inc's name may not be used to endorse or
13 *    promote products derived from this software without specific prior
14 *    written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED.  IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 *
28 *	from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $
29 *	and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $
30 * $FreeBSD: head/sys/kern/kern_mutex.c 93273 2002-03-27 09:23:41Z jeff $
31 */
32
33/*
34 * Machine independent bits of mutex implementation.
35 */
36
37#include "opt_ddb.h"
38
39#include <sys/param.h>
40#include <sys/bus.h>
41#include <sys/kernel.h>
42#include <sys/lock.h>
43#include <sys/malloc.h>
44#include <sys/mutex.h>
45#include <sys/proc.h>
46#include <sys/resourcevar.h>
47#include <sys/sysctl.h>
48#include <sys/systm.h>
49#include <sys/vmmeter.h>
50#include <sys/ktr.h>
51
52#include <machine/atomic.h>
53#include <machine/bus.h>
54#include <machine/clock.h>
55#include <machine/cpu.h>
56
57#include <ddb/ddb.h>
58
59#include <vm/vm.h>
60#include <vm/vm_extern.h>
61
62/*
63 * Internal utility macros.
64 */
65#define mtx_unowned(m)	((m)->mtx_lock == MTX_UNOWNED)
66
67#define mtx_owner(m)	(mtx_unowned((m)) ? NULL \
68	: (struct thread *)((m)->mtx_lock & MTX_FLAGMASK))
69
70/*
71 * Lock classes for sleep and spin mutexes.
72 */
73struct lock_class lock_class_mtx_sleep = {
74	"sleep mutex",
75	LC_SLEEPLOCK | LC_RECURSABLE
76};
77struct lock_class lock_class_mtx_spin = {
78	"spin mutex",
79	LC_SPINLOCK | LC_RECURSABLE
80};
81
82/*
83 * Prototypes for non-exported routines.
84 */
85static void	propagate_priority(struct thread *);
86
87static void
88propagate_priority(struct thread *td)
89{
90	int pri = td->td_priority;
91	struct mtx *m = td->td_blocked;
92
93	mtx_assert(&sched_lock, MA_OWNED);
94	for (;;) {
95		struct thread *td1;
96
97		td = mtx_owner(m);
98
99		if (td == NULL) {
100			/*
101			 * This really isn't quite right. Really
102			 * ought to bump priority of thread that
103			 * next acquires the mutex.
104			 */
105			MPASS(m->mtx_lock == MTX_CONTESTED);
106			return;
107		}
108
109		MPASS(td->td_proc->p_magic == P_MAGIC);
110		KASSERT(td->td_proc->p_stat != SSLEEP, ("sleeping thread owns a mutex"));
111		if (td->td_priority <= pri) /* lower is higher priority */
112			return;
113
114		/*
115		 * Bump this thread's priority.
116		 */
117		td->td_priority = pri;
118
119		/*
120		 * If lock holder is actually running, just bump priority.
121		 */
122		 /* XXXKSE this test is not sufficient */
123		if (td->td_kse && (td->td_kse->ke_oncpu != NOCPU)) {
124			MPASS(td->td_proc->p_stat == SRUN
125			|| td->td_proc->p_stat == SZOMB
126			|| td->td_proc->p_stat == SSTOP);
127			return;
128		}
129
130#ifndef SMP
131		/*
132		 * For UP, we check to see if td is curthread (this shouldn't
133		 * ever happen however as it would mean we are in a deadlock.)
134		 */
135		KASSERT(td != curthread, ("Deadlock detected"));
136#endif
137
138		/*
139		 * If on run queue move to new run queue, and quit.
140		 * XXXKSE this gets a lot more complicated under threads
141		 * but try anyhow.
142		 */
143		if (td->td_proc->p_stat == SRUN) {
144			MPASS(td->td_blocked == NULL);
145			remrunqueue(td);
146			setrunqueue(td);
147			return;
148		}
149
150		/*
151		 * If we aren't blocked on a mutex, we should be.
152		 */
153		KASSERT(td->td_proc->p_stat == SMTX, (
154		    "process %d(%s):%d holds %s but isn't blocked on a mutex\n",
155		    td->td_proc->p_pid, td->td_proc->p_comm, td->td_proc->p_stat,
156		    m->mtx_object.lo_name));
157
158		/*
159		 * Pick up the mutex that td is blocked on.
160		 */
161		m = td->td_blocked;
162		MPASS(m != NULL);
163
164		/*
165		 * Check if the thread needs to be moved up on
166		 * the blocked chain
167		 */
168		if (td == TAILQ_FIRST(&m->mtx_blocked)) {
169			continue;
170		}
171
172		td1 = TAILQ_PREV(td, threadqueue, td_blkq);
173		if (td1->td_priority <= pri) {
174			continue;
175		}
176
177		/*
178		 * Remove thread from blocked chain and determine where
179		 * it should be moved up to.  Since we know that td1 has
180		 * a lower priority than td, we know that at least one
181		 * thread in the chain has a lower priority and that
182		 * td1 will thus not be NULL after the loop.
183		 */
184		TAILQ_REMOVE(&m->mtx_blocked, td, td_blkq);
185		TAILQ_FOREACH(td1, &m->mtx_blocked, td_blkq) {
186			MPASS(td1->td_proc->p_magic == P_MAGIC);
187			if (td1->td_priority > pri)
188				break;
189		}
190
191		MPASS(td1 != NULL);
192		TAILQ_INSERT_BEFORE(td1, td, td_blkq);
193		CTR4(KTR_LOCK,
194		    "propagate_priority: p %p moved before %p on [%p] %s",
195		    td, td1, m, m->mtx_object.lo_name);
196	}
197}
198
199/*
200 * Function versions of the inlined __mtx_* macros.  These are used by
201 * modules and can also be called from assembly language if needed.
202 */
203void
204_mtx_lock_flags(struct mtx *m, int opts, const char *file, int line)
205{
206
207	MPASS(curthread != NULL);
208	_get_sleep_lock(m, curthread, opts, file, line);
209	LOCK_LOG_LOCK("LOCK", &m->mtx_object, opts, m->mtx_recurse, file,
210	    line);
211	WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line);
212}
213
214void
215_mtx_unlock_flags(struct mtx *m, int opts, const char *file, int line)
216{
217
218	MPASS(curthread != NULL);
219	mtx_assert(m, MA_OWNED);
220 	WITNESS_UNLOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line);
221	LOCK_LOG_LOCK("UNLOCK", &m->mtx_object, opts, m->mtx_recurse, file,
222	    line);
223	_rel_sleep_lock(m, curthread, opts, file, line);
224}
225
226void
227_mtx_lock_spin_flags(struct mtx *m, int opts, const char *file, int line)
228{
229
230	MPASS(curthread != NULL);
231	_get_spin_lock(m, curthread, opts, file, line);
232	LOCK_LOG_LOCK("LOCK", &m->mtx_object, opts, m->mtx_recurse, file,
233	    line);
234	WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line);
235}
236
237void
238_mtx_unlock_spin_flags(struct mtx *m, int opts, const char *file, int line)
239{
240
241	MPASS(curthread != NULL);
242	mtx_assert(m, MA_OWNED);
243 	WITNESS_UNLOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line);
244	LOCK_LOG_LOCK("UNLOCK", &m->mtx_object, opts, m->mtx_recurse, file,
245	    line);
246	_rel_spin_lock(m);
247}
248
249/*
250 * The important part of mtx_trylock{,_flags}()
251 * Tries to acquire lock `m.' We do NOT handle recursion here; we assume that
252 * if we're called, it's because we know we don't already own this lock.
253 */
254int
255_mtx_trylock(struct mtx *m, int opts, const char *file, int line)
256{
257	int rval;
258
259	MPASS(curthread != NULL);
260
261	rval = _obtain_lock(m, curthread);
262
263	LOCK_LOG_TRY("LOCK", &m->mtx_object, opts, rval, file, line);
264	if (rval) {
265		/*
266		 * We do not handle recursion in _mtx_trylock; see the
267		 * note at the top of the routine.
268		 */
269		KASSERT(!mtx_recursed(m),
270		    ("mtx_trylock() called on a recursed mutex"));
271		WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE | LOP_TRYLOCK,
272		    file, line);
273	}
274
275	return (rval);
276}
277
278/*
279 * _mtx_lock_sleep: the tougher part of acquiring an MTX_DEF lock.
280 *
281 * We call this if the lock is either contested (i.e. we need to go to
282 * sleep waiting for it), or if we need to recurse on it.
283 */
284void
285_mtx_lock_sleep(struct mtx *m, int opts, const char *file, int line)
286{
287	struct thread *td = curthread;
288
289	if ((m->mtx_lock & MTX_FLAGMASK) == (uintptr_t)td) {
290		m->mtx_recurse++;
291		atomic_set_ptr(&m->mtx_lock, MTX_RECURSED);
292		if (LOCK_LOG_TEST(&m->mtx_object, opts))
293			CTR1(KTR_LOCK, "_mtx_lock_sleep: %p recursing", m);
294		return;
295	}
296
297	if (LOCK_LOG_TEST(&m->mtx_object, opts))
298		CTR4(KTR_LOCK,
299		    "_mtx_lock_sleep: %s contested (lock=%p) at %s:%d",
300		    m->mtx_object.lo_name, (void *)m->mtx_lock, file, line);
301
302	while (!_obtain_lock(m, td)) {
303		uintptr_t v;
304		struct thread *td1;
305
306		mtx_lock_spin(&sched_lock);
307		/*
308		 * Check if the lock has been released while spinning for
309		 * the sched_lock.
310		 */
311		if ((v = m->mtx_lock) == MTX_UNOWNED) {
312			mtx_unlock_spin(&sched_lock);
313			continue;
314		}
315
316		/*
317		 * The mutex was marked contested on release. This means that
318		 * there are threads blocked on it.
319		 */
320		if (v == MTX_CONTESTED) {
321			td1 = TAILQ_FIRST(&m->mtx_blocked);
322			MPASS(td1 != NULL);
323			m->mtx_lock = (uintptr_t)td | MTX_CONTESTED;
324
325			if (td1->td_priority < td->td_priority)
326				td->td_priority = td1->td_priority;
327			mtx_unlock_spin(&sched_lock);
328			return;
329		}
330
331		/*
332		 * If the mutex isn't already contested and a failure occurs
333		 * setting the contested bit, the mutex was either released
334		 * or the state of the MTX_RECURSED bit changed.
335		 */
336		if ((v & MTX_CONTESTED) == 0 &&
337		    !atomic_cmpset_ptr(&m->mtx_lock, (void *)v,
338			(void *)(v | MTX_CONTESTED))) {
339			mtx_unlock_spin(&sched_lock);
340			continue;
341		}
342
343		/*
344		 * We deffinately must sleep for this lock.
345		 */
346		mtx_assert(m, MA_NOTOWNED);
347
348#ifdef notyet
349		/*
350		 * If we're borrowing an interrupted thread's VM context, we
351		 * must clean up before going to sleep.
352		 */
353		if (td->td_ithd != NULL) {
354			struct ithd *it = td->td_ithd;
355
356			if (it->it_interrupted) {
357				if (LOCK_LOG_TEST(&m->mtx_object, opts))
358					CTR2(KTR_LOCK,
359				    "_mtx_lock_sleep: %p interrupted %p",
360					    it, it->it_interrupted);
361				intr_thd_fixup(it);
362			}
363		}
364#endif
365
366		/*
367		 * Put us on the list of threads blocked on this mutex.
368		 */
369		if (TAILQ_EMPTY(&m->mtx_blocked)) {
370			td1 = mtx_owner(m);
371			LIST_INSERT_HEAD(&td1->td_contested, m, mtx_contested);
372			TAILQ_INSERT_TAIL(&m->mtx_blocked, td, td_blkq);
373		} else {
374			TAILQ_FOREACH(td1, &m->mtx_blocked, td_blkq)
375				if (td1->td_priority > td->td_priority)
376					break;
377			if (td1)
378				TAILQ_INSERT_BEFORE(td1, td, td_blkq);
379			else
380				TAILQ_INSERT_TAIL(&m->mtx_blocked, td, td_blkq);
381		}
382
383		/*
384		 * Save who we're blocked on.
385		 */
386		td->td_blocked = m;
387		td->td_mtxname = m->mtx_object.lo_name;
388		td->td_proc->p_stat = SMTX;
389		propagate_priority(td);
390
391		if (LOCK_LOG_TEST(&m->mtx_object, opts))
392			CTR3(KTR_LOCK,
393			    "_mtx_lock_sleep: p %p blocked on [%p] %s", td, m,
394			    m->mtx_object.lo_name);
395
396		td->td_proc->p_stats->p_ru.ru_nvcsw++;
397		mi_switch();
398
399		if (LOCK_LOG_TEST(&m->mtx_object, opts))
400			CTR3(KTR_LOCK,
401			  "_mtx_lock_sleep: p %p free from blocked on [%p] %s",
402			  td, m, m->mtx_object.lo_name);
403
404		mtx_unlock_spin(&sched_lock);
405	}
406
407	return;
408}
409
410/*
411 * _mtx_lock_spin: the tougher part of acquiring an MTX_SPIN lock.
412 *
413 * This is only called if we need to actually spin for the lock. Recursion
414 * is handled inline.
415 */
416void
417_mtx_lock_spin(struct mtx *m, int opts, const char *file, int line)
418{
419	int i = 0;
420
421	if (LOCK_LOG_TEST(&m->mtx_object, opts))
422		CTR1(KTR_LOCK, "_mtx_lock_spin: %p spinning", m);
423
424	for (;;) {
425		if (_obtain_lock(m, curthread))
426			break;
427
428		/* Give interrupts a chance while we spin. */
429		critical_exit();
430		while (m->mtx_lock != MTX_UNOWNED) {
431			if (i++ < 10000000)
432				continue;
433			if (i++ < 60000000)
434				DELAY(1);
435#ifdef DDB
436			else if (!db_active)
437#else
438			else
439#endif
440			panic("spin lock %s held by %p for > 5 seconds",
441			    m->mtx_object.lo_name, (void *)m->mtx_lock);
442		}
443		critical_enter();
444	}
445
446	if (LOCK_LOG_TEST(&m->mtx_object, opts))
447		CTR1(KTR_LOCK, "_mtx_lock_spin: %p spin done", m);
448
449	return;
450}
451
452/*
453 * _mtx_unlock_sleep: the tougher part of releasing an MTX_DEF lock.
454 *
455 * We are only called here if the lock is recursed or contested (i.e. we
456 * need to wake up a blocked thread).
457 */
458void
459_mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line)
460{
461	struct thread *td, *td1;
462	struct mtx *m1;
463	int pri;
464
465	td = curthread;
466
467	if (mtx_recursed(m)) {
468		if (--(m->mtx_recurse) == 0)
469			atomic_clear_ptr(&m->mtx_lock, MTX_RECURSED);
470		if (LOCK_LOG_TEST(&m->mtx_object, opts))
471			CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p unrecurse", m);
472		return;
473	}
474
475	mtx_lock_spin(&sched_lock);
476	if (LOCK_LOG_TEST(&m->mtx_object, opts))
477		CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p contested", m);
478
479	td1 = TAILQ_FIRST(&m->mtx_blocked);
480	MPASS(td->td_proc->p_magic == P_MAGIC);
481	MPASS(td1->td_proc->p_magic == P_MAGIC);
482
483	TAILQ_REMOVE(&m->mtx_blocked, td1, td_blkq);
484
485	if (TAILQ_EMPTY(&m->mtx_blocked)) {
486		LIST_REMOVE(m, mtx_contested);
487		_release_lock_quick(m);
488		if (LOCK_LOG_TEST(&m->mtx_object, opts))
489			CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p not held", m);
490	} else
491		atomic_store_rel_ptr(&m->mtx_lock, (void *)MTX_CONTESTED);
492
493	pri = PRI_MAX;
494	LIST_FOREACH(m1, &td->td_contested, mtx_contested) {
495		int cp = TAILQ_FIRST(&m1->mtx_blocked)->td_priority;
496		if (cp < pri)
497			pri = cp;
498	}
499
500	if (pri > td->td_base_pri)
501		pri = td->td_base_pri;
502	td->td_priority = pri;
503
504	if (LOCK_LOG_TEST(&m->mtx_object, opts))
505		CTR2(KTR_LOCK, "_mtx_unlock_sleep: %p contested setrunqueue %p",
506		    m, td1);
507
508	td1->td_blocked = NULL;
509	td1->td_proc->p_stat = SRUN;
510	setrunqueue(td1);
511
512	if (td->td_critnest == 1 && td1->td_priority < pri) {
513#ifdef notyet
514		if (td->td_ithd != NULL) {
515			struct ithd *it = td->td_ithd;
516
517			if (it->it_interrupted) {
518				if (LOCK_LOG_TEST(&m->mtx_object, opts))
519					CTR2(KTR_LOCK,
520				    "_mtx_unlock_sleep: %p interrupted %p",
521					    it, it->it_interrupted);
522				intr_thd_fixup(it);
523			}
524		}
525#endif
526		setrunqueue(td);
527		if (LOCK_LOG_TEST(&m->mtx_object, opts))
528			CTR2(KTR_LOCK,
529			    "_mtx_unlock_sleep: %p switching out lock=%p", m,
530			    (void *)m->mtx_lock);
531
532		td->td_proc->p_stats->p_ru.ru_nivcsw++;
533		mi_switch();
534		if (LOCK_LOG_TEST(&m->mtx_object, opts))
535			CTR2(KTR_LOCK, "_mtx_unlock_sleep: %p resuming lock=%p",
536			    m, (void *)m->mtx_lock);
537	}
538
539	mtx_unlock_spin(&sched_lock);
540
541	return;
542}
543
544/*
545 * All the unlocking of MTX_SPIN locks is done inline.
546 * See the _rel_spin_lock() macro for the details.
547 */
548
549/*
550 * The backing function for the INVARIANTS-enabled mtx_assert()
551 */
552#ifdef INVARIANT_SUPPORT
553void
554_mtx_assert(struct mtx *m, int what, const char *file, int line)
555{
556
557	if (panicstr != NULL)
558		return;
559	switch (what) {
560	case MA_OWNED:
561	case MA_OWNED | MA_RECURSED:
562	case MA_OWNED | MA_NOTRECURSED:
563		if (!mtx_owned(m))
564			panic("mutex %s not owned at %s:%d",
565			    m->mtx_object.lo_name, file, line);
566		if (mtx_recursed(m)) {
567			if ((what & MA_NOTRECURSED) != 0)
568				panic("mutex %s recursed at %s:%d",
569				    m->mtx_object.lo_name, file, line);
570		} else if ((what & MA_RECURSED) != 0) {
571			panic("mutex %s unrecursed at %s:%d",
572			    m->mtx_object.lo_name, file, line);
573		}
574		break;
575	case MA_NOTOWNED:
576		if (mtx_owned(m))
577			panic("mutex %s owned at %s:%d",
578			    m->mtx_object.lo_name, file, line);
579		break;
580	default:
581		panic("unknown mtx_assert at %s:%d", file, line);
582	}
583}
584#endif
585
586/*
587 * The MUTEX_DEBUG-enabled mtx_validate()
588 *
589 * Most of these checks have been moved off into the LO_INITIALIZED flag
590 * maintained by the witness code.
591 */
592#ifdef MUTEX_DEBUG
593
594void	mtx_validate(struct mtx *);
595
596void
597mtx_validate(struct mtx *m)
598{
599
600/*
601 * XXX - When kernacc() is fixed on the alpha to handle K0_SEG memory properly
602 * we can re-enable the kernacc() checks.
603 */
604#ifndef __alpha__
605	/*
606	 * Can't call kernacc() from early init386(), especially when
607	 * initializing Giant mutex, because some stuff in kernacc()
608	 * requires Giant itself.
609	 */
610	if (!cold)
611		if (!kernacc((caddr_t)m, sizeof(m),
612		    VM_PROT_READ | VM_PROT_WRITE))
613			panic("Can't read and write to mutex %p", m);
614#endif
615}
616#endif
617
618/*
619 * Mutex initialization routine; initialize lock `m' of type contained in
620 * `opts' with options contained in `opts' and description `description.'
621 */
622void
623mtx_init(struct mtx *m, const char *description, int opts)
624{
625	struct lock_object *lock;
626
627	MPASS((opts & ~(MTX_SPIN | MTX_QUIET | MTX_RECURSE |
628	    MTX_SLEEPABLE | MTX_NOWITNESS | MTX_DUPOK)) == 0);
629
630#ifdef MUTEX_DEBUG
631	/* Diagnostic and error correction */
632	mtx_validate(m);
633#endif
634
635	lock = &m->mtx_object;
636	KASSERT((lock->lo_flags & LO_INITIALIZED) == 0,
637	    ("mutex %s %p already initialized", description, m));
638	bzero(m, sizeof(*m));
639	if (opts & MTX_SPIN)
640		lock->lo_class = &lock_class_mtx_spin;
641	else
642		lock->lo_class = &lock_class_mtx_sleep;
643	lock->lo_name = description;
644	if (opts & MTX_QUIET)
645		lock->lo_flags = LO_QUIET;
646	if (opts & MTX_RECURSE)
647		lock->lo_flags |= LO_RECURSABLE;
648	if (opts & MTX_SLEEPABLE)
649		lock->lo_flags |= LO_SLEEPABLE;
650	if ((opts & MTX_NOWITNESS) == 0)
651		lock->lo_flags |= LO_WITNESS;
652	if (opts & MTX_DUPOK)
653		lock->lo_flags |= LO_DUPOK;
654
655	m->mtx_lock = MTX_UNOWNED;
656	TAILQ_INIT(&m->mtx_blocked);
657
658	LOCK_LOG_INIT(lock, opts);
659
660	WITNESS_INIT(lock);
661}
662
663/*
664 * Remove lock `m' from all_mtx queue.  We don't allow MTX_QUIET to be
665 * passed in as a flag here because if the corresponding mtx_init() was
666 * called with MTX_QUIET set, then it will already be set in the mutex's
667 * flags.
668 */
669void
670mtx_destroy(struct mtx *m)
671{
672
673	LOCK_LOG_DESTROY(&m->mtx_object, 0);
674
675	if (!mtx_owned(m))
676		MPASS(mtx_unowned(m));
677	else {
678		MPASS((m->mtx_lock & (MTX_RECURSED|MTX_CONTESTED)) == 0);
679
680		/* Tell witness this isn't locked to make it happy. */
681		WITNESS_UNLOCK(&m->mtx_object, LOP_EXCLUSIVE, __FILE__,
682		    __LINE__);
683	}
684
685	WITNESS_DESTROY(&m->mtx_object);
686}
687
688/*
689 * Encapsulated Giant mutex routines.  These routines provide encapsulation
690 * control for the Giant mutex, allowing sysctls to be used to turn on and
691 * off Giant around certain subsystems.  The default value for the sysctls
692 * are set to what developers believe is stable and working in regards to
693 * the Giant pushdown.  Developers should not turn off Giant via these
694 * sysctls unless they know what they are doing.
695 *
696 * Callers of mtx_lock_giant() are expected to pass the return value to an
697 * accompanying mtx_unlock_giant() later on.  If multiple subsystems are
698 * effected by a Giant wrap, all related sysctl variables must be zero for
699 * the subsystem call to operate without Giant (as determined by the caller).
700 */
701
702SYSCTL_NODE(_kern, OID_AUTO, giant, CTLFLAG_RD, NULL, "Giant mutex manipulation");
703
704static int kern_giant_all = 0;
705SYSCTL_INT(_kern_giant, OID_AUTO, all, CTLFLAG_RW, &kern_giant_all, 0, "");
706
707int kern_giant_proc = 1;	/* Giant around PROC locks */
708int kern_giant_file = 1;	/* Giant around struct file & filedesc */
709int kern_giant_ucred = 1;	/* Giant around ucred */
710SYSCTL_INT(_kern_giant, OID_AUTO, proc, CTLFLAG_RW, &kern_giant_proc, 0, "");
711SYSCTL_INT(_kern_giant, OID_AUTO, file, CTLFLAG_RW, &kern_giant_file, 0, "");
712SYSCTL_INT(_kern_giant, OID_AUTO, ucred, CTLFLAG_RW, &kern_giant_ucred, 0, "");
713
714int
715mtx_lock_giant(int sysctlvar)
716{
717	if (sysctlvar || kern_giant_all) {
718		mtx_lock(&Giant);
719		return(1);
720	}
721	return(0);
722}
723
724void
725mtx_unlock_giant(int s)
726{
727	if (s)
728		mtx_unlock(&Giant);
729}
730
731