kern_mutex.c revision 67548
1254721Semaste/*-
2254721Semaste * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved.
3254721Semaste *
4254721Semaste * Redistribution and use in source and binary forms, with or without
5254721Semaste * modification, are permitted provided that the following conditions
6254721Semaste * are met:
7254721Semaste * 1. Redistributions of source code must retain the above copyright
8254721Semaste *    notice, this list of conditions and the following disclaimer.
9254721Semaste * 2. Redistributions in binary form must reproduce the above copyright
10254721Semaste *    notice, this list of conditions and the following disclaimer in the
11254721Semaste *    documentation and/or other materials provided with the distribution.
12254721Semaste * 3. Berkeley Software Design Inc's name may not be used to endorse or
13254721Semaste *    promote products derived from this software without specific prior
14254721Semaste *    written permission.
15254721Semaste *
16254721Semaste * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
17254721Semaste * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18254721Semaste * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19254721Semaste * ARE DISCLAIMED.  IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
20254721Semaste * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21254721Semaste * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22254721Semaste * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23254721Semaste * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24254721Semaste * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25254721Semaste * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26254721Semaste * SUCH DAMAGE.
27254721Semaste *
28254721Semaste *	from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $
29254721Semaste *	and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $
30254721Semaste * $FreeBSD: head/sys/kern/kern_mutex.c 67548 2000-10-25 04:37:54Z jhb $
31254721Semaste */
32254721Semaste
33254721Semaste/*
34254721Semaste *	Main Entry: witness
35254721Semaste *	Pronunciation: 'wit-n&s
36254721Semaste *	Function: noun
37254721Semaste *	Etymology: Middle English witnesse, from Old English witnes knowledge,
38254721Semaste *	    testimony, witness, from 2wit
39254721Semaste *	Date: before 12th century
40254721Semaste *	1 : attestation of a fact or event : TESTIMONY
41254721Semaste *	2 : one that gives evidence; specifically : one who testifies in
42254721Semaste *	    a cause or before a judicial tribunal
43254721Semaste *	3 : one asked to be present at a transaction so as to be able to
44254721Semaste *	    testify to its having taken place
45254721Semaste *	4 : one who has personal knowledge of something
46254721Semaste *	5 a : something serving as evidence or proof : SIGN
47254721Semaste *	  b : public affirmation by word or example of usually
48254721Semaste *	      religious faith or conviction <the heroic witness to divine
49254721Semaste *	      life -- Pilot>
50254721Semaste *	6 capitalized : a member of the Jehovah's Witnesses
51254721Semaste */
52254721Semaste
53254721Semaste#include <sys/param.h>
54254721Semaste#include <sys/bus.h>
55254721Semaste#include <sys/kernel.h>
56254721Semaste#include <sys/malloc.h>
57254721Semaste#include <sys/proc.h>
58254721Semaste#include <sys/systm.h>
59254721Semaste#include <sys/vmmeter.h>
60254721Semaste#include <sys/ktr.h>
61254721Semaste
62254721Semaste#include <machine/atomic.h>
63254721Semaste#include <machine/bus.h>
64254721Semaste#include <machine/clock.h>
65254721Semaste#include <machine/cpu.h>
66254721Semaste
67254721Semaste#include <vm/vm.h>
68254721Semaste#include <vm/vm_extern.h>
69254721Semaste
70254721Semaste#define _KERN_MUTEX_C_		/* Cause non-inlined mtx_*() to be compiled. */
71254721Semaste#include <sys/mutex.h>
72254721Semaste
73254721Semaste/*
74254721Semaste * Machine independent bits of the mutex implementation
75254721Semaste */
76254721Semaste/* All mutexes in system (used for debug/panic) */
77254721Semaste#ifdef MUTEX_DEBUG
78254721Semastestatic struct mtx_debug all_mtx_debug = { NULL, {NULL, NULL}, NULL, 0,
79254721Semaste	"All mutexes queue head" };
80254721Semastestatic struct mtx all_mtx = { MTX_UNOWNED, 0, 0, &all_mtx_debug,
81254721Semaste	TAILQ_HEAD_INITIALIZER(all_mtx.mtx_blocked),
82254721Semaste	{ NULL, NULL }, &all_mtx, &all_mtx };
83254721Semaste#else	/* MUTEX_DEBUG */
84254721Semastestatic struct mtx all_mtx = { MTX_UNOWNED, 0, 0, "All mutexes queue head",
85254721Semaste	TAILQ_HEAD_INITIALIZER(all_mtx.mtx_blocked),
86254721Semaste	{ NULL, NULL }, &all_mtx, &all_mtx };
87254721Semaste#endif	/* MUTEX_DEBUG */
88254721Semaste
89254721Semastestatic int	mtx_cur_cnt;
90254721Semastestatic int	mtx_max_cnt;
91254721Semaste
92254721Semastevoid	_mtx_enter_giant_def(void);
93254721Semastevoid	_mtx_exit_giant_def(void);
94254721Semastestatic void propagate_priority(struct proc *) __unused;
95254721Semaste
96254721Semaste#define	mtx_unowned(m)	((m)->mtx_lock == MTX_UNOWNED)
97254721Semaste#define	mtx_owner(m)	(mtx_unowned(m) ? NULL \
98254721Semaste			    : (struct proc *)((m)->mtx_lock & MTX_FLAGMASK))
99254721Semaste
100254721Semaste#define RETIP(x)		*(((uintptr_t *)(&x)) - 1)
101254721Semaste#define	SET_PRIO(p, pri)	(p)->p_priority = (pri)
102254721Semaste
103254721Semaste/*
104254721Semaste * XXX Temporary, for use from assembly language
105254721Semaste */
106254721Semaste
107254721Semastevoid
108254721Semaste_mtx_enter_giant_def(void)
109254721Semaste{
110254721Semaste
111254721Semaste	mtx_enter(&Giant, MTX_DEF);
112254721Semaste}
113254721Semaste
114254721Semastevoid
115254721Semaste_mtx_exit_giant_def(void)
116254721Semaste{
117254721Semaste
118254721Semaste	mtx_exit(&Giant, MTX_DEF);
119254721Semaste}
120254721Semaste
121254721Semastestatic void
122254721Semastepropagate_priority(struct proc *p)
123254721Semaste{
124254721Semaste	int pri = p->p_priority;
125254721Semaste	struct mtx *m = p->p_blocked;
126254721Semaste
127254721Semaste	for (;;) {
128254721Semaste		struct proc *p1;
129254721Semaste
130254721Semaste		p = mtx_owner(m);
131254721Semaste
132254721Semaste		if (p == NULL) {
133254721Semaste			/*
134254721Semaste			 * This really isn't quite right. Really
135254721Semaste			 * ought to bump priority of process that
136254721Semaste			 * next acquires the mutex.
137254721Semaste			 */
138254721Semaste			MPASS(m->mtx_lock == MTX_CONTESTED);
139254721Semaste			return;
140254721Semaste		}
141254721Semaste		MPASS(p->p_magic == P_MAGIC);
142254721Semaste		if (p->p_priority <= pri)
143254721Semaste			return;
144254721Semaste		/*
145254721Semaste		 * If lock holder is actually running, just bump priority.
146254721Semaste		 */
147254721Semaste		if (TAILQ_NEXT(p, p_procq) == NULL) {
148254721Semaste			MPASS(p->p_stat == SRUN || p->p_stat == SZOMB);
149254721Semaste			SET_PRIO(p, pri);
150254721Semaste			return;
151254721Semaste		}
152254721Semaste		/*
153254721Semaste		 * If on run queue move to new run queue, and
154254721Semaste		 * quit.
155254721Semaste		 */
156254721Semaste		if (p->p_stat == SRUN) {
157254721Semaste			MPASS(p->p_blocked == NULL);
158254721Semaste			remrunqueue(p);
159254721Semaste			SET_PRIO(p, pri);
160254721Semaste			setrunqueue(p);
161254721Semaste			return;
162254721Semaste		}
163254721Semaste
164254721Semaste		/*
165254721Semaste		 * If we aren't blocked on a mutex, give up and quit.
166254721Semaste		 */
167254721Semaste		if (p->p_stat != SMTX) {
168254721Semaste			printf(
169254721Semaste	"XXX: process %d(%s):%d holds %s but isn't blocked on a mutex\n",
170254721Semaste			    p->p_pid, p->p_comm, p->p_stat, m->mtx_description);
171254721Semaste			return;
172254721Semaste		}
173254721Semaste
174254721Semaste		/*
175254721Semaste		 * Pick up the mutex that p is blocked on.
176254721Semaste		 */
177254721Semaste		m = p->p_blocked;
178254721Semaste		MPASS(m != NULL);
179254721Semaste
180254721Semaste		printf("XXX: process %d(%s) is blocked on %s\n", p->p_pid,
181		    p->p_comm, m->mtx_description);
182		/*
183		 * Check if the proc needs to be moved up on
184		 * the blocked chain
185		 */
186		if ((p1 = TAILQ_PREV(p, rq, p_procq)) == NULL ||
187		    p1->p_priority <= pri) {
188			if (p1)
189				printf(
190	"XXX: previous process %d(%s) has higher priority\n",
191				    p->p_pid, p->p_comm);
192			else
193				printf("XXX: process at head of run queue\n");
194			continue;
195		}
196
197		/*
198		 * Remove proc from blocked chain
199		 */
200		TAILQ_REMOVE(&m->mtx_blocked, p, p_procq);
201		TAILQ_FOREACH(p1, &m->mtx_blocked, p_procq) {
202			MPASS(p1->p_magic == P_MAGIC);
203			if (p1->p_priority > pri)
204				break;
205		}
206		if (p1)
207			TAILQ_INSERT_BEFORE(p1, p, p_procq);
208		else
209			TAILQ_INSERT_TAIL(&m->mtx_blocked, p, p_procq);
210		CTR4(KTR_LOCK,
211		    "propagate priority: p 0x%p moved before 0x%p on [0x%p] %s",
212		    p, p1, m, m->mtx_description);
213	}
214}
215
216void
217mtx_enter_hard(struct mtx *m, int type, int saveintr)
218{
219	struct proc *p = CURPROC;
220	struct timeval new_switchtime;
221
222	KASSERT(p != NULL, ("curproc is NULL in mutex"));
223
224	switch (type) {
225	case MTX_DEF:
226		if ((m->mtx_lock & MTX_FLAGMASK) == (uintptr_t)p) {
227			m->mtx_recurse++;
228			atomic_set_ptr(&m->mtx_lock, MTX_RECURSE);
229			CTR1(KTR_LOCK, "mtx_enter: 0x%p recurse", m);
230			return;
231		}
232		CTR3(KTR_LOCK, "mtx_enter: 0x%p contested (lock=%p) [0x%p]",
233		    m, (void *)m->mtx_lock, (void *)RETIP(m));
234		while (!_obtain_lock(m, p)) {
235			uintptr_t v;
236			struct proc *p1;
237
238			mtx_enter(&sched_lock, MTX_SPIN | MTX_RLIKELY);
239			/*
240			 * check if the lock has been released while
241			 * waiting for the schedlock.
242			 */
243			if ((v = m->mtx_lock) == MTX_UNOWNED) {
244				mtx_exit(&sched_lock, MTX_SPIN);
245				continue;
246			}
247			/*
248			 * The mutex was marked contested on release. This
249			 * means that there are processes blocked on it.
250			 */
251			if (v == MTX_CONTESTED) {
252				p1 = TAILQ_FIRST(&m->mtx_blocked);
253				KASSERT(p1 != NULL, ("contested mutex has no contesters"));
254				KASSERT(p != NULL, ("curproc is NULL for contested mutex"));
255				m->mtx_lock = (uintptr_t)p | MTX_CONTESTED;
256				if (p1->p_priority < p->p_priority) {
257					SET_PRIO(p, p1->p_priority);
258				}
259				mtx_exit(&sched_lock, MTX_SPIN);
260				return;
261			}
262			/*
263			 * If the mutex isn't already contested and
264			 * a failure occurs setting the contested bit the
265			 * mutex was either release or the
266			 * state of the RECURSION bit changed.
267			 */
268			if ((v & MTX_CONTESTED) == 0 &&
269			    !atomic_cmpset_ptr(&m->mtx_lock, (void *)v,
270				               (void *)(v | MTX_CONTESTED))) {
271				mtx_exit(&sched_lock, MTX_SPIN);
272				continue;
273			}
274
275			/* We definitely have to sleep for this lock */
276			mtx_assert(m, MA_NOTOWNED);
277
278#ifdef notyet
279			/*
280			 * If we're borrowing an interrupted thread's VM
281			 * context must clean up before going to sleep.
282			 */
283			if (p->p_flag & (P_ITHD | P_SITHD)) {
284				ithd_t *it = (ithd_t *)p;
285
286				if (it->it_interrupted) {
287					CTR2(KTR_LOCK,
288					    "mtx_enter: 0x%x interrupted 0x%x",
289					    it, it->it_interrupted);
290					intr_thd_fixup(it);
291				}
292			}
293#endif
294
295			/* Put us on the list of procs blocked on this mutex */
296			if (TAILQ_EMPTY(&m->mtx_blocked)) {
297				p1 = (struct proc *)(m->mtx_lock &
298						     MTX_FLAGMASK);
299				LIST_INSERT_HEAD(&p1->p_contested, m,
300						 mtx_contested);
301				TAILQ_INSERT_TAIL(&m->mtx_blocked, p, p_procq);
302			} else {
303				TAILQ_FOREACH(p1, &m->mtx_blocked, p_procq)
304					if (p1->p_priority > p->p_priority)
305						break;
306				if (p1)
307					TAILQ_INSERT_BEFORE(p1, p, p_procq);
308				else
309					TAILQ_INSERT_TAIL(&m->mtx_blocked, p,
310							  p_procq);
311			}
312
313			p->p_blocked = m;	/* Who we're blocked on */
314			p->p_stat = SMTX;
315#if 0
316			propagate_priority(p);
317#endif
318			CTR3(KTR_LOCK, "mtx_enter: p 0x%p blocked on [0x%p] %s",
319			    p, m, m->mtx_description);
320			/*
321			 * Blatantly copied from mi_switch nearly verbatim.
322			 * When Giant goes away and we stop dinking with it
323			 * in mi_switch, we can go back to calling mi_switch
324			 * directly here.
325			 */
326
327			/*
328			 * Compute the amount of time during which the current
329			 * process was running, and add that to its total so
330			 * far.
331			 */
332			microuptime(&new_switchtime);
333			if (timevalcmp(&new_switchtime, &switchtime, <)) {
334				printf(
335		    "microuptime() went backwards (%ld.%06ld -> %ld.%06ld)\n",
336		    		    switchtime.tv_sec, switchtime.tv_usec,
337		    		    new_switchtime.tv_sec,
338		    		    new_switchtime.tv_usec);
339				new_switchtime = switchtime;
340			} else {
341				p->p_runtime += (new_switchtime.tv_usec -
342				    switchtime.tv_usec) +
343				    (new_switchtime.tv_sec - switchtime.tv_sec) *
344				    (int64_t)1000000;
345			}
346
347			/*
348			 * Pick a new current process and record its start time.
349			 */
350			cnt.v_swtch++;
351			switchtime = new_switchtime;
352			cpu_switch();
353			if (switchtime.tv_sec == 0)
354				microuptime(&switchtime);
355			switchticks = ticks;
356			CTR3(KTR_LOCK,
357			    "mtx_enter: p 0x%p free from blocked on [0x%p] %s",
358			    p, m, m->mtx_description);
359			mtx_exit(&sched_lock, MTX_SPIN);
360		}
361		return;
362	case MTX_SPIN:
363	case MTX_SPIN | MTX_FIRST:
364	case MTX_SPIN | MTX_TOPHALF:
365	    {
366		int i = 0;
367
368		if (m->mtx_lock == (uintptr_t)p) {
369			m->mtx_recurse++;
370			return;
371		}
372		CTR1(KTR_LOCK, "mtx_enter: %p spinning", m);
373		for (;;) {
374			if (_obtain_lock(m, p))
375				break;
376			while (m->mtx_lock != MTX_UNOWNED) {
377				if (i++ < 1000000)
378					continue;
379				if (i++ < 6000000)
380					DELAY (1);
381#ifdef DDB
382				else if (!db_active)
383#else
384				else
385#endif
386					panic(
387				"spin lock %s held by 0x%p for > 5 seconds",
388					    m->mtx_description,
389					    (void *)m->mtx_lock);
390			}
391		}
392
393#ifdef MUTEX_DEBUG
394		if (type != MTX_SPIN)
395			m->mtx_saveintr = 0xbeefface;
396		else
397#endif
398			m->mtx_saveintr = saveintr;
399		CTR1(KTR_LOCK, "mtx_enter: 0x%p spin done", m);
400		return;
401	    }
402	}
403}
404
405void
406mtx_exit_hard(struct mtx *m, int type)
407{
408	struct proc *p, *p1;
409	struct mtx *m1;
410	int pri;
411
412	p = CURPROC;
413	switch (type) {
414	case MTX_DEF:
415	case MTX_DEF | MTX_NOSWITCH:
416		if (m->mtx_recurse != 0) {
417			if (--(m->mtx_recurse) == 0)
418				atomic_clear_ptr(&m->mtx_lock, MTX_RECURSE);
419			CTR1(KTR_LOCK, "mtx_exit: 0x%p unrecurse", m);
420			return;
421		}
422		mtx_enter(&sched_lock, MTX_SPIN);
423		CTR1(KTR_LOCK, "mtx_exit: 0x%p contested", m);
424		p1 = TAILQ_FIRST(&m->mtx_blocked);
425		MPASS(p->p_magic == P_MAGIC);
426		MPASS(p1->p_magic == P_MAGIC);
427		TAILQ_REMOVE(&m->mtx_blocked, p1, p_procq);
428		if (TAILQ_EMPTY(&m->mtx_blocked)) {
429			LIST_REMOVE(m, mtx_contested);
430			_release_lock_quick(m);
431			CTR1(KTR_LOCK, "mtx_exit: 0x%p not held", m);
432		} else
433			m->mtx_lock = MTX_CONTESTED;
434		pri = MAXPRI;
435		LIST_FOREACH(m1, &p->p_contested, mtx_contested) {
436			int cp = TAILQ_FIRST(&m1->mtx_blocked)->p_priority;
437			if (cp < pri)
438				pri = cp;
439		}
440		if (pri > p->p_nativepri)
441			pri = p->p_nativepri;
442		SET_PRIO(p, pri);
443		CTR2(KTR_LOCK, "mtx_exit: 0x%p contested setrunqueue 0x%p",
444		    m, p1);
445		p1->p_blocked = NULL;
446		p1->p_stat = SRUN;
447		setrunqueue(p1);
448		if ((type & MTX_NOSWITCH) == 0 && p1->p_priority < pri) {
449#ifdef notyet
450			if (p->p_flag & (P_ITHD | P_SITHD)) {
451				ithd_t *it = (ithd_t *)p;
452
453				if (it->it_interrupted) {
454					CTR2(KTR_LOCK,
455					    "mtx_exit: 0x%x interruped 0x%x",
456					    it, it->it_interrupted);
457					intr_thd_fixup(it);
458				}
459			}
460#endif
461			setrunqueue(p);
462			CTR2(KTR_LOCK, "mtx_exit: 0x%p switching out lock=0x%p",
463			    m, (void *)m->mtx_lock);
464			mi_switch();
465			CTR2(KTR_LOCK, "mtx_exit: 0x%p resuming lock=0x%p",
466			    m, (void *)m->mtx_lock);
467		}
468		mtx_exit(&sched_lock, MTX_SPIN);
469		break;
470	case MTX_SPIN:
471	case MTX_SPIN | MTX_FIRST:
472		if (m->mtx_recurse != 0) {
473			m->mtx_recurse--;
474			return;
475		}
476		MPASS(mtx_owned(m));
477		_release_lock_quick(m);
478		if (type & MTX_FIRST)
479			enable_intr();	/* XXX is this kosher? */
480		else {
481			MPASS(m->mtx_saveintr != 0xbeefface);
482			restore_intr(m->mtx_saveintr);
483		}
484		break;
485	case MTX_SPIN | MTX_TOPHALF:
486		if (m->mtx_recurse != 0) {
487			m->mtx_recurse--;
488			return;
489		}
490		MPASS(mtx_owned(m));
491		_release_lock_quick(m);
492		break;
493	default:
494		panic("mtx_exit_hard: unsupported type 0x%x\n", type);
495	}
496}
497
498#define MV_DESTROY	0	/* validate before destory */
499#define MV_INIT		1	/* validate before init */
500
501#ifdef MUTEX_DEBUG
502
503int mtx_validate __P((struct mtx *, int));
504
505int
506mtx_validate(struct mtx *m, int when)
507{
508	struct mtx *mp;
509	int i;
510	int retval = 0;
511
512	if (m == &all_mtx || cold)
513		return 0;
514
515	mtx_enter(&all_mtx, MTX_DEF);
516/*
517 * XXX - When kernacc() is fixed on the alpha to handle K0_SEG memory properly
518 * we can re-enable the kernacc() checks.
519 */
520#ifndef __alpha__
521	MPASS(kernacc((caddr_t)all_mtx.mtx_next, sizeof(uintptr_t),
522	    VM_PROT_READ) == 1);
523#endif
524	MPASS(all_mtx.mtx_next->mtx_prev == &all_mtx);
525	for (i = 0, mp = all_mtx.mtx_next; mp != &all_mtx; mp = mp->mtx_next) {
526#ifndef __alpha__
527		if (kernacc((caddr_t)mp->mtx_next, sizeof(uintptr_t),
528		    VM_PROT_READ) != 1) {
529			panic("mtx_validate: mp=%p mp->mtx_next=%p",
530			    mp, mp->mtx_next);
531		}
532#endif
533		i++;
534		if (i > mtx_cur_cnt) {
535			panic("mtx_validate: too many in chain, known=%d\n",
536			    mtx_cur_cnt);
537		}
538	}
539	MPASS(i == mtx_cur_cnt);
540	switch (when) {
541	case MV_DESTROY:
542		for (mp = all_mtx.mtx_next; mp != &all_mtx; mp = mp->mtx_next)
543			if (mp == m)
544				break;
545		MPASS(mp == m);
546		break;
547	case MV_INIT:
548		for (mp = all_mtx.mtx_next; mp != &all_mtx; mp = mp->mtx_next)
549		if (mp == m) {
550			/*
551			 * Not good. This mutex already exists.
552			 */
553			printf("re-initing existing mutex %s\n",
554			    m->mtx_description);
555			MPASS(m->mtx_lock == MTX_UNOWNED);
556			retval = 1;
557		}
558	}
559	mtx_exit(&all_mtx, MTX_DEF);
560	return (retval);
561}
562#endif
563
564void
565mtx_init(struct mtx *m, const char *t, int flag)
566{
567#ifdef MUTEX_DEBUG
568	struct mtx_debug *debug;
569#endif
570
571	CTR2(KTR_LOCK, "mtx_init 0x%p (%s)", m, t);
572#ifdef MUTEX_DEBUG
573	if (mtx_validate(m, MV_INIT))	/* diagnostic and error correction */
574		return;
575	if (flag & MTX_COLD)
576		debug = m->mtx_debug;
577	else
578		debug = NULL;
579	if (debug == NULL) {
580#ifdef DIAGNOSTIC
581		if(cold && bootverbose)
582			printf("malloc'ing mtx_debug while cold for %s\n", t);
583#endif
584
585		/* XXX - should not use DEVBUF */
586		debug = malloc(sizeof(struct mtx_debug), M_DEVBUF, M_NOWAIT);
587		MPASS(debug != NULL);
588		bzero(debug, sizeof(struct mtx_debug));
589	}
590#endif
591	bzero((void *)m, sizeof *m);
592	TAILQ_INIT(&m->mtx_blocked);
593#ifdef MUTEX_DEBUG
594	m->mtx_debug = debug;
595#endif
596	m->mtx_description = t;
597	m->mtx_lock = MTX_UNOWNED;
598	/* Put on all mutex queue */
599	mtx_enter(&all_mtx, MTX_DEF);
600	m->mtx_next = &all_mtx;
601	m->mtx_prev = all_mtx.mtx_prev;
602	m->mtx_prev->mtx_next = m;
603	all_mtx.mtx_prev = m;
604	if (++mtx_cur_cnt > mtx_max_cnt)
605		mtx_max_cnt = mtx_cur_cnt;
606	mtx_exit(&all_mtx, MTX_DEF);
607	witness_init(m, flag);
608}
609
610void
611mtx_destroy(struct mtx *m)
612{
613
614	CTR2(KTR_LOCK, "mtx_destroy 0x%p (%s)", m, m->mtx_description);
615#ifdef MUTEX_DEBUG
616	if (m->mtx_next == NULL)
617		panic("mtx_destroy: %p (%s) already destroyed",
618		    m, m->mtx_description);
619
620	if (!mtx_owned(m)) {
621		MPASS(m->mtx_lock == MTX_UNOWNED);
622	} else {
623		MPASS((m->mtx_lock & (MTX_RECURSE|MTX_CONTESTED)) == 0);
624	}
625	mtx_validate(m, MV_DESTROY);		/* diagnostic */
626#endif
627
628#ifdef WITNESS
629	if (m->mtx_witness)
630		witness_destroy(m);
631#endif /* WITNESS */
632
633	/* Remove from the all mutex queue */
634	mtx_enter(&all_mtx, MTX_DEF);
635	m->mtx_next->mtx_prev = m->mtx_prev;
636	m->mtx_prev->mtx_next = m->mtx_next;
637#ifdef MUTEX_DEBUG
638	m->mtx_next = m->mtx_prev = NULL;
639	free(m->mtx_debug, M_DEVBUF);
640	m->mtx_debug = NULL;
641#endif
642	mtx_cur_cnt--;
643	mtx_exit(&all_mtx, MTX_DEF);
644}
645
646/*
647 * The non-inlined versions of the mtx_*() functions are always built (above),
648 * but the witness code depends on the MUTEX_DEBUG and WITNESS kernel options
649 * being specified.
650 */
651#if (defined(MUTEX_DEBUG) && defined(WITNESS))
652
653#define WITNESS_COUNT 200
654#define	WITNESS_NCHILDREN 2
655
656#ifndef SMP
657extern int witness_spin_check;
658#endif
659
660int witness_watch = 1;
661
662struct witness {
663	struct witness	*w_next;
664	const char	*w_description;
665	const char	*w_file;
666	int		 w_line;
667	struct witness	*w_morechildren;
668	u_char		 w_childcnt;
669	u_char		 w_Giant_squawked:1;
670	u_char		 w_other_squawked:1;
671	u_char		 w_same_squawked:1;
672	u_char		 w_sleep:1;
673	u_char		 w_spin:1;	/* this is a spin mutex */
674	u_int		 w_level;
675	struct witness	*w_children[WITNESS_NCHILDREN];
676};
677
678struct witness_blessed {
679	char 	*b_lock1;
680	char	*b_lock2;
681};
682
683#ifdef KDEBUG
684/*
685 * When WITNESS_KDEBUG is set to 1, it will cause the system to
686 * drop into kdebug() when:
687 *	- a lock heirarchy violation occurs
688 *	- locks are held when going to sleep.
689 */
690#ifndef WITNESS_KDEBUG
691#define WITNESS_KDEBUG 0
692#endif
693int	witness_kdebug = WITNESS_KDEBUG;
694#endif /* KDEBUG */
695
696#ifndef WITNESS_SKIPSPIN
697#define WITNESS_SKIPSPIN 0
698#endif
699int	witness_skipspin = WITNESS_SKIPSPIN;
700
701
702static struct mtx	 w_mtx;
703static struct witness	*w_free;
704static struct witness	*w_all;
705static int		 w_inited;
706static int		 witness_dead;	/* fatal error, probably no memory */
707
708static struct witness	 w_data[WITNESS_COUNT];
709
710static struct witness	 *enroll __P((const char *description, int flag));
711static int itismychild __P((struct witness *parent, struct witness *child));
712static void removechild __P((struct witness *parent, struct witness *child));
713static int isitmychild __P((struct witness *parent, struct witness *child));
714static int isitmydescendant __P((struct witness *parent, struct witness *child));
715static int dup_ok __P((struct witness *));
716static int blessed __P((struct witness *, struct witness *));
717static void witness_displaydescendants
718    __P((void(*)(const char *fmt, ...), struct witness *));
719static void witness_leveldescendents __P((struct witness *parent, int level));
720static void witness_levelall __P((void));
721static struct witness * witness_get __P((void));
722static void witness_free __P((struct witness *m));
723
724
725static char *ignore_list[] = {
726	"witness lock",
727	"Kdebug",		/* breaks rules and may or may not work */
728	"Page Alias",		/* sparc only, witness lock won't block intr */
729	NULL
730};
731
732static char *spin_order_list[] = {
733	"sched lock",
734	"log mtx",
735	"zslock",	/* sparc only above log, this one is a real hack */
736	"time lock",	/* above callout */
737	"callout mtx",	/* above wayout */
738	/*
739	 * leaf locks
740	 */
741	"wayout mtx",
742	"kernel_pmap",  /* sparc only, logically equal "pmap" below */
743	"pmap",		/* sparc only */
744	NULL
745};
746
747static char *order_list[] = {
748	"tcb", "inp", "so_snd", "so_rcv", "Giant lock", NULL,
749	"udb", "inp", NULL,
750	"unp head", "unp", "so_snd", NULL,
751	"de0", "Giant lock", NULL,
752	"ifnet", "Giant lock", NULL,
753	"fifo", "so_snd", NULL,
754	"hme0", "Giant lock", NULL,
755	"esp0", "Giant lock", NULL,
756	"hfa0", "Giant lock", NULL,
757	"so_rcv", "atm_global", NULL,
758	"so_snd", "atm_global", NULL,
759	"NFS", "Giant lock", NULL,
760	NULL
761};
762
763static char *dup_list[] = {
764	"inp",
765	"process group",
766	"session",
767	"unp",
768	"rtentry",
769	"rawcb",
770	NULL
771};
772
773static char *sleep_list[] = {
774	"Giant lock",
775	NULL
776};
777
778/*
779 * Pairs of locks which have been blessed
780 * Don't complain about order problems with blessed locks
781 */
782static struct witness_blessed blessed_list[] = {
783};
784static int blessed_count = sizeof(blessed_list) / sizeof(struct witness_blessed);
785
786void
787witness_init(struct mtx *m, int flag)
788{
789	m->mtx_witness = enroll(m->mtx_description, flag);
790}
791
792void
793witness_destroy(struct mtx *m)
794{
795	struct mtx *m1;
796	struct proc *p;
797	p = CURPROC;
798	for ((m1 = LIST_FIRST(&p->p_heldmtx)); m1 != NULL;
799		m1 = LIST_NEXT(m1, mtx_held)) {
800		if (m1 == m) {
801			LIST_REMOVE(m, mtx_held);
802			break;
803		}
804	}
805	return;
806
807}
808
809void
810witness_enter(struct mtx *m, int flags, const char *file, int line)
811{
812	struct witness *w, *w1;
813	struct mtx *m1;
814	struct proc *p;
815	int i;
816#ifdef KDEBUG
817	int go_into_kdebug = 0;
818#endif /* KDEBUG */
819
820	w = m->mtx_witness;
821	p = CURPROC;
822
823	if (flags & MTX_SPIN) {
824		if (!w->w_spin)
825			panic("mutex_enter: MTX_SPIN on MTX_DEF mutex %s @"
826			    " %s:%d", m->mtx_description, file, line);
827		if (m->mtx_recurse != 0)
828			return;
829		mtx_enter(&w_mtx, MTX_SPIN);
830		i = witness_spin_check;
831		if (i != 0 && w->w_level < i) {
832			mtx_exit(&w_mtx, MTX_SPIN);
833			panic("mutex_enter(%s:%x, MTX_SPIN) out of order @"
834			    " %s:%d already holding %s:%x",
835			    m->mtx_description, w->w_level, file, line,
836			    spin_order_list[ffs(i)-1], i);
837		}
838		PCPU_SET(witness_spin_check, i | w->w_level);
839		mtx_exit(&w_mtx, MTX_SPIN);
840		return;
841	}
842	if (w->w_spin)
843		panic("mutex_enter: MTX_DEF on MTX_SPIN mutex %s @ %s:%d",
844		    m->mtx_description, file, line);
845
846	if (m->mtx_recurse != 0)
847		return;
848	if (witness_dead)
849		goto out;
850	if (cold)
851		goto out;
852
853	if (!mtx_legal2block())
854		panic("blockable mtx_enter() of %s when not legal @ %s:%d",
855			    m->mtx_description, file, line);
856	/*
857	 * Is this the first mutex acquired
858	 */
859	if ((m1 = LIST_FIRST(&p->p_heldmtx)) == NULL)
860		goto out;
861
862	if ((w1 = m1->mtx_witness) == w) {
863		if (w->w_same_squawked || dup_ok(w))
864			goto out;
865		w->w_same_squawked = 1;
866		printf("acquring duplicate lock of same type: \"%s\"\n",
867			m->mtx_description);
868		printf(" 1st @ %s:%d\n", w->w_file, w->w_line);
869		printf(" 2nd @ %s:%d\n", file, line);
870#ifdef KDEBUG
871		go_into_kdebug = 1;
872#endif /* KDEBUG */
873		goto out;
874	}
875	MPASS(!mtx_owned(&w_mtx));
876	mtx_enter(&w_mtx, MTX_SPIN);
877	/*
878	 * If we have a known higher number just say ok
879	 */
880	if (witness_watch > 1 && w->w_level > w1->w_level) {
881		mtx_exit(&w_mtx, MTX_SPIN);
882		goto out;
883	}
884	if (isitmydescendant(m1->mtx_witness, w)) {
885		mtx_exit(&w_mtx, MTX_SPIN);
886		goto out;
887	}
888	for (i = 0; m1 != NULL; m1 = LIST_NEXT(m1, mtx_held), i++) {
889
890		MPASS(i < 200);
891		w1 = m1->mtx_witness;
892		if (isitmydescendant(w, w1)) {
893			mtx_exit(&w_mtx, MTX_SPIN);
894			if (blessed(w, w1))
895				goto out;
896			if (m1 == &Giant) {
897				if (w1->w_Giant_squawked)
898					goto out;
899				else
900					w1->w_Giant_squawked = 1;
901			} else {
902				if (w1->w_other_squawked)
903					goto out;
904				else
905					w1->w_other_squawked = 1;
906			}
907			printf("lock order reversal\n");
908			printf(" 1st %s last acquired @ %s:%d\n",
909			    w->w_description, w->w_file, w->w_line);
910			printf(" 2nd %p %s @ %s:%d\n",
911			    m1, w1->w_description, w1->w_file, w1->w_line);
912			printf(" 3rd %p %s @ %s:%d\n",
913			    m, w->w_description, file, line);
914#ifdef KDEBUG
915			go_into_kdebug = 1;
916#endif /* KDEBUG */
917			goto out;
918		}
919	}
920	m1 = LIST_FIRST(&p->p_heldmtx);
921	if (!itismychild(m1->mtx_witness, w))
922		mtx_exit(&w_mtx, MTX_SPIN);
923
924out:
925#ifdef KDEBUG
926	if (witness_kdebug && go_into_kdebug)
927		kdebug();
928#endif /* KDEBUG */
929	w->w_file = file;
930	w->w_line = line;
931	m->mtx_line = line;
932	m->mtx_file = file;
933
934	/*
935	 * If this pays off it likely means that a mutex  being witnessed
936	 * is acquired in hardclock. Put it in the ignore list. It is
937	 * likely not the mutex this assert fails on.
938	 */
939	MPASS(m->mtx_held.le_prev == NULL);
940	LIST_INSERT_HEAD(&p->p_heldmtx, (struct mtx*)m, mtx_held);
941}
942
943void
944witness_exit(struct mtx *m, int flags, const char *file, int line)
945{
946	struct witness *w;
947
948	w = m->mtx_witness;
949
950	if (flags & MTX_SPIN) {
951		if (!w->w_spin)
952			panic("mutex_exit: MTX_SPIN on MTX_DEF mutex %s @"
953			    " %s:%d", m->mtx_description, file, line);
954		if (m->mtx_recurse != 0)
955			return;
956		mtx_enter(&w_mtx, MTX_SPIN);
957		PCPU_SET(witness_spin_check, witness_spin_check & ~w->w_level);
958		mtx_exit(&w_mtx, MTX_SPIN);
959		return;
960	}
961	if (w->w_spin)
962		panic("mutex_exit: MTX_DEF on MTX_SPIN mutex %s @ %s:%d",
963		    m->mtx_description, file, line);
964
965	if (m->mtx_recurse != 0)
966		return;
967
968	if ((flags & MTX_NOSWITCH) == 0 && !mtx_legal2block() && !cold)
969		panic("switchable mtx_exit() of %s when not legal @ %s:%d",
970			    m->mtx_description, file, line);
971	LIST_REMOVE(m, mtx_held);
972	m->mtx_held.le_prev = NULL;
973}
974
975void
976witness_try_enter(struct mtx *m, int flags, const char *file, int line)
977{
978	struct proc *p;
979	struct witness *w = m->mtx_witness;
980
981	if (flags & MTX_SPIN) {
982		if (!w->w_spin)
983			panic("mutex_try_enter: "
984			    "MTX_SPIN on MTX_DEF mutex %s @ %s:%d",
985			    m->mtx_description, file, line);
986		if (m->mtx_recurse != 0)
987			return;
988		mtx_enter(&w_mtx, MTX_SPIN);
989		PCPU_SET(witness_spin_check, witness_spin_check | w->w_level);
990		mtx_exit(&w_mtx, MTX_SPIN);
991		return;
992	}
993
994	if (w->w_spin)
995		panic("mutex_try_enter: MTX_DEF on MTX_SPIN mutex %s @ %s:%d",
996		    m->mtx_description, file, line);
997
998	if (m->mtx_recurse != 0)
999		return;
1000
1001	w->w_file = file;
1002	w->w_line = line;
1003	m->mtx_line = line;
1004	m->mtx_file = file;
1005	p = CURPROC;
1006	MPASS(m->mtx_held.le_prev == NULL);
1007	LIST_INSERT_HEAD(&p->p_heldmtx, (struct mtx*)m, mtx_held);
1008}
1009
1010void
1011witness_display(void(*prnt)(const char *fmt, ...))
1012{
1013	struct witness *w, *w1;
1014
1015	witness_levelall();
1016
1017	for (w = w_all; w; w = w->w_next) {
1018		if (w->w_file == NULL)
1019			continue;
1020		for (w1 = w_all; w1; w1 = w1->w_next) {
1021			if (isitmychild(w1, w))
1022				break;
1023		}
1024		if (w1 != NULL)
1025			continue;
1026		/*
1027		 * This lock has no anscestors, display its descendants.
1028		 */
1029		witness_displaydescendants(prnt, w);
1030	}
1031	prnt("\nMutex which were never acquired\n");
1032	for (w = w_all; w; w = w->w_next) {
1033		if (w->w_file != NULL)
1034			continue;
1035		prnt("%s\n", w->w_description);
1036	}
1037}
1038
1039int
1040witness_sleep(int check_only, struct mtx *mtx, const char *file, int line)
1041{
1042	struct mtx *m;
1043	struct proc *p;
1044	char **sleep;
1045	int n = 0;
1046
1047	p = CURPROC;
1048	for ((m = LIST_FIRST(&p->p_heldmtx)); m != NULL;
1049	    m = LIST_NEXT(m, mtx_held)) {
1050		if (m == mtx)
1051			continue;
1052		for (sleep = sleep_list; *sleep!= NULL; sleep++)
1053			if (strcmp(m->mtx_description, *sleep) == 0)
1054				goto next;
1055		printf("%s:%d: %s with \"%s\" locked from %s:%d\n",
1056			file, line, check_only ? "could sleep" : "sleeping",
1057			m->mtx_description,
1058			m->mtx_witness->w_file, m->mtx_witness->w_line);
1059		n++;
1060	next:
1061	}
1062#ifdef KDEBUG
1063	if (witness_kdebug && n)
1064		kdebug();
1065#endif /* KDEBUG */
1066	return (n);
1067}
1068
1069static struct witness *
1070enroll(const char *description, int flag)
1071{
1072	int i;
1073	struct witness *w, *w1;
1074	char **ignore;
1075	char **order;
1076
1077	if (!witness_watch)
1078		return (NULL);
1079	for (ignore = ignore_list; *ignore != NULL; ignore++)
1080		if (strcmp(description, *ignore) == 0)
1081			return (NULL);
1082
1083	if (w_inited == 0) {
1084		mtx_init(&w_mtx, "witness lock", MTX_DEF);
1085		for (i = 0; i < WITNESS_COUNT; i++) {
1086			w = &w_data[i];
1087			witness_free(w);
1088		}
1089		w_inited = 1;
1090		for (order = order_list; *order != NULL; order++) {
1091			w = enroll(*order, MTX_DEF);
1092			w->w_file = "order list";
1093			for (order++; *order != NULL; order++) {
1094				w1 = enroll(*order, MTX_DEF);
1095				w1->w_file = "order list";
1096				itismychild(w, w1);
1097				w = w1;
1098    	    	    	}
1099		}
1100	}
1101	if ((flag & MTX_SPIN) && witness_skipspin)
1102		return (NULL);
1103	mtx_enter(&w_mtx, MTX_SPIN);
1104	for (w = w_all; w; w = w->w_next) {
1105		if (strcmp(description, w->w_description) == 0) {
1106			mtx_exit(&w_mtx, MTX_SPIN);
1107			return (w);
1108		}
1109	}
1110	if ((w = witness_get()) == NULL)
1111		return (NULL);
1112	w->w_next = w_all;
1113	w_all = w;
1114	w->w_description = description;
1115	mtx_exit(&w_mtx, MTX_SPIN);
1116	if (flag & MTX_SPIN) {
1117		w->w_spin = 1;
1118
1119		i = 1;
1120		for (order = spin_order_list; *order != NULL; order++) {
1121			if (strcmp(description, *order) == 0)
1122				break;
1123			i <<= 1;
1124		}
1125		if (*order == NULL)
1126			panic("spin lock %s not in order list", description);
1127		w->w_level = i;
1128	}
1129	return (w);
1130}
1131
1132static int
1133itismychild(struct witness *parent, struct witness *child)
1134{
1135	static int recursed;
1136
1137	/*
1138	 * Insert "child" after "parent"
1139	 */
1140	while (parent->w_morechildren)
1141		parent = parent->w_morechildren;
1142
1143	if (parent->w_childcnt == WITNESS_NCHILDREN) {
1144		if ((parent->w_morechildren = witness_get()) == NULL)
1145			return (1);
1146		parent = parent->w_morechildren;
1147	}
1148	MPASS(child != NULL);
1149	parent->w_children[parent->w_childcnt++] = child;
1150	/*
1151	 * now prune whole tree
1152	 */
1153	if (recursed)
1154		return (0);
1155	recursed = 1;
1156	for (child = w_all; child != NULL; child = child->w_next) {
1157		for (parent = w_all; parent != NULL;
1158		    parent = parent->w_next) {
1159			if (!isitmychild(parent, child))
1160				continue;
1161			removechild(parent, child);
1162			if (isitmydescendant(parent, child))
1163				continue;
1164			itismychild(parent, child);
1165		}
1166	}
1167	recursed = 0;
1168	witness_levelall();
1169	return (0);
1170}
1171
1172static void
1173removechild(struct witness *parent, struct witness *child)
1174{
1175	struct witness *w, *w1;
1176	int i;
1177
1178	for (w = parent; w != NULL; w = w->w_morechildren)
1179		for (i = 0; i < w->w_childcnt; i++)
1180			if (w->w_children[i] == child)
1181				goto found;
1182	return;
1183found:
1184	for (w1 = w; w1->w_morechildren != NULL; w1 = w1->w_morechildren)
1185		continue;
1186	w->w_children[i] = w1->w_children[--w1->w_childcnt];
1187	MPASS(w->w_children[i] != NULL);
1188
1189	if (w1->w_childcnt != 0)
1190		return;
1191
1192	if (w1 == parent)
1193		return;
1194	for (w = parent; w->w_morechildren != w1; w = w->w_morechildren)
1195		continue;
1196	w->w_morechildren = 0;
1197	witness_free(w1);
1198}
1199
1200static int
1201isitmychild(struct witness *parent, struct witness *child)
1202{
1203	struct witness *w;
1204	int i;
1205
1206	for (w = parent; w != NULL; w = w->w_morechildren) {
1207		for (i = 0; i < w->w_childcnt; i++) {
1208			if (w->w_children[i] == child)
1209				return (1);
1210		}
1211	}
1212	return (0);
1213}
1214
1215static int
1216isitmydescendant(struct witness *parent, struct witness *child)
1217{
1218	struct witness *w;
1219	int i;
1220	int j;
1221
1222	for (j = 0, w = parent; w != NULL; w = w->w_morechildren, j++) {
1223		MPASS(j < 1000);
1224		for (i = 0; i < w->w_childcnt; i++) {
1225			if (w->w_children[i] == child)
1226				return (1);
1227		}
1228		for (i = 0; i < w->w_childcnt; i++) {
1229			if (isitmydescendant(w->w_children[i], child))
1230				return (1);
1231		}
1232	}
1233	return (0);
1234}
1235
1236void
1237witness_levelall (void)
1238{
1239	struct witness *w, *w1;
1240
1241	for (w = w_all; w; w = w->w_next)
1242		if (!w->w_spin)
1243			w->w_level = 0;
1244	for (w = w_all; w; w = w->w_next) {
1245		if (w->w_spin)
1246			continue;
1247		for (w1 = w_all; w1; w1 = w1->w_next) {
1248			if (isitmychild(w1, w))
1249				break;
1250		}
1251		if (w1 != NULL)
1252			continue;
1253		witness_leveldescendents(w, 0);
1254	}
1255}
1256
1257static void
1258witness_leveldescendents(struct witness *parent, int level)
1259{
1260	int i;
1261	struct witness *w;
1262
1263	if (parent->w_level < level)
1264		parent->w_level = level;
1265	level++;
1266	for (w = parent; w != NULL; w = w->w_morechildren)
1267		for (i = 0; i < w->w_childcnt; i++)
1268			witness_leveldescendents(w->w_children[i], level);
1269}
1270
1271static void
1272witness_displaydescendants(void(*prnt)(const char *fmt, ...),
1273			   struct witness *parent)
1274{
1275	struct witness *w;
1276	int i;
1277	int level = parent->w_level;
1278
1279	prnt("%d", level);
1280	if (level < 10)
1281		prnt(" ");
1282	for (i = 0; i < level; i++)
1283		prnt(" ");
1284	prnt("%s", parent->w_description);
1285	if (parent->w_file != NULL) {
1286		prnt(" -- last acquired @ %s", parent->w_file);
1287#ifndef W_USE_WHERE
1288		prnt(":%d", parent->w_line);
1289#endif
1290		prnt("\n");
1291	}
1292
1293	for (w = parent; w != NULL; w = w->w_morechildren)
1294		for (i = 0; i < w->w_childcnt; i++)
1295			    witness_displaydescendants(prnt, w->w_children[i]);
1296    }
1297
1298static int
1299dup_ok(struct witness *w)
1300{
1301	char **dup;
1302
1303	for (dup = dup_list; *dup!= NULL; dup++)
1304		if (strcmp(w->w_description, *dup) == 0)
1305			return (1);
1306	return (0);
1307}
1308
1309static int
1310blessed(struct witness *w1, struct witness *w2)
1311{
1312	int i;
1313	struct witness_blessed *b;
1314
1315	for (i = 0; i < blessed_count; i++) {
1316		b = &blessed_list[i];
1317		if (strcmp(w1->w_description, b->b_lock1) == 0) {
1318			if (strcmp(w2->w_description, b->b_lock2) == 0)
1319				return (1);
1320			continue;
1321		}
1322		if (strcmp(w1->w_description, b->b_lock2) == 0)
1323			if (strcmp(w2->w_description, b->b_lock1) == 0)
1324				return (1);
1325	}
1326	return (0);
1327}
1328
1329static struct witness *
1330witness_get()
1331{
1332	struct witness *w;
1333
1334	if ((w = w_free) == NULL) {
1335		witness_dead = 1;
1336		mtx_exit(&w_mtx, MTX_SPIN);
1337		printf("witness exhausted\n");
1338		return (NULL);
1339	}
1340	w_free = w->w_next;
1341	bzero(w, sizeof(*w));
1342	return (w);
1343}
1344
1345static void
1346witness_free(struct witness *w)
1347{
1348	w->w_next = w_free;
1349	w_free = w;
1350}
1351
1352void
1353witness_list(struct proc *p)
1354{
1355	struct mtx *m;
1356
1357	for ((m = LIST_FIRST(&p->p_heldmtx)); m != NULL;
1358	    m = LIST_NEXT(m, mtx_held)) {
1359		printf("\t\"%s\" (%p) locked at %s:%d\n",
1360		    m->mtx_description, m,
1361		    m->mtx_witness->w_file, m->mtx_witness->w_line);
1362	}
1363}
1364
1365void
1366witness_save(struct mtx *m, const char **filep, int *linep)
1367{
1368	*filep = m->mtx_witness->w_file;
1369	*linep = m->mtx_witness->w_line;
1370}
1371
1372void
1373witness_restore(struct mtx *m, const char *file, int line)
1374{
1375	m->mtx_witness->w_file = file;
1376	m->mtx_witness->w_line = line;
1377}
1378
1379#endif	/* (defined(MUTEX_DEBUG) && defined(WITNESS)) */
1380