kern_mutex.c revision 136437
1/*-
2 * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 *    notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 *    notice, this list of conditions and the following disclaimer in the
11 *    documentation and/or other materials provided with the distribution.
12 * 3. Berkeley Software Design Inc's name may not be used to endorse or
13 *    promote products derived from this software without specific prior
14 *    written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED.  IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 *
28 *	from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $
29 *	and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $
30 */
31
32/*
33 * Machine independent bits of mutex implementation.
34 */
35
36#include <sys/cdefs.h>
37__FBSDID("$FreeBSD: head/sys/kern/kern_mutex.c 136437 2004-10-12 16:28:18Z ups $");
38
39#include "opt_adaptive_mutexes.h"
40#include "opt_ddb.h"
41#include "opt_mprof.h"
42#include "opt_mutex_wake_all.h"
43#include "opt_sched.h"
44
45#include <sys/param.h>
46#include <sys/systm.h>
47#include <sys/bus.h>
48#include <sys/kdb.h>
49#include <sys/kernel.h>
50#include <sys/ktr.h>
51#include <sys/lock.h>
52#include <sys/malloc.h>
53#include <sys/mutex.h>
54#include <sys/proc.h>
55#include <sys/resourcevar.h>
56#include <sys/sched.h>
57#include <sys/sbuf.h>
58#include <sys/sysctl.h>
59#include <sys/turnstile.h>
60#include <sys/vmmeter.h>
61
62#include <machine/atomic.h>
63#include <machine/bus.h>
64#include <machine/clock.h>
65#include <machine/cpu.h>
66
67#include <ddb/ddb.h>
68
69#include <vm/vm.h>
70#include <vm/vm_extern.h>
71
72/*
73 * Force MUTEX_WAKE_ALL for now.
74 * single thread wakeup needs fixes to avoid race conditions with
75 * priority inheritance.
76 */
77#ifndef MUTEX_WAKE_ALL
78#define MUTEX_WAKE_ALL
79#endif
80
81/*
82 * Internal utility macros.
83 */
84#define mtx_unowned(m)	((m)->mtx_lock == MTX_UNOWNED)
85
86#define mtx_owner(m)	(mtx_unowned((m)) ? NULL \
87	: (struct thread *)((m)->mtx_lock & MTX_FLAGMASK))
88
89/*
90 * Lock classes for sleep and spin mutexes.
91 */
92struct lock_class lock_class_mtx_sleep = {
93	"sleep mutex",
94	LC_SLEEPLOCK | LC_RECURSABLE
95};
96struct lock_class lock_class_mtx_spin = {
97	"spin mutex",
98	LC_SPINLOCK | LC_RECURSABLE
99};
100
101/*
102 * System-wide mutexes
103 */
104struct mtx sched_lock;
105struct mtx Giant;
106
107#ifdef MUTEX_PROFILING
108SYSCTL_NODE(_debug, OID_AUTO, mutex, CTLFLAG_RD, NULL, "mutex debugging");
109SYSCTL_NODE(_debug_mutex, OID_AUTO, prof, CTLFLAG_RD, NULL, "mutex profiling");
110static int mutex_prof_enable = 0;
111SYSCTL_INT(_debug_mutex_prof, OID_AUTO, enable, CTLFLAG_RW,
112    &mutex_prof_enable, 0, "Enable tracing of mutex holdtime");
113
114struct mutex_prof {
115	const char	*name;
116	const char	*file;
117	int		line;
118	uintmax_t	cnt_max;
119	uintmax_t	cnt_tot;
120	uintmax_t	cnt_cur;
121	uintmax_t	cnt_contest_holding;
122	uintmax_t	cnt_contest_locking;
123	struct mutex_prof *next;
124};
125
126/*
127 * mprof_buf is a static pool of profiling records to avoid possible
128 * reentrance of the memory allocation functions.
129 *
130 * Note: NUM_MPROF_BUFFERS must be smaller than MPROF_HASH_SIZE.
131 */
132#ifdef MPROF_BUFFERS
133#define NUM_MPROF_BUFFERS	MPROF_BUFFERS
134#else
135#define	NUM_MPROF_BUFFERS	1000
136#endif
137static struct mutex_prof mprof_buf[NUM_MPROF_BUFFERS];
138static int first_free_mprof_buf;
139#ifndef MPROF_HASH_SIZE
140#define	MPROF_HASH_SIZE		1009
141#endif
142#if NUM_MPROF_BUFFERS >= MPROF_HASH_SIZE
143#error MPROF_BUFFERS must be larger than MPROF_HASH_SIZE
144#endif
145static struct mutex_prof *mprof_hash[MPROF_HASH_SIZE];
146/* SWAG: sbuf size = avg stat. line size * number of locks */
147#define MPROF_SBUF_SIZE		256 * 400
148
149static int mutex_prof_acquisitions;
150SYSCTL_INT(_debug_mutex_prof, OID_AUTO, acquisitions, CTLFLAG_RD,
151    &mutex_prof_acquisitions, 0, "Number of mutex acquistions recorded");
152static int mutex_prof_records;
153SYSCTL_INT(_debug_mutex_prof, OID_AUTO, records, CTLFLAG_RD,
154    &mutex_prof_records, 0, "Number of profiling records");
155static int mutex_prof_maxrecords = NUM_MPROF_BUFFERS;
156SYSCTL_INT(_debug_mutex_prof, OID_AUTO, maxrecords, CTLFLAG_RD,
157    &mutex_prof_maxrecords, 0, "Maximum number of profiling records");
158static int mutex_prof_rejected;
159SYSCTL_INT(_debug_mutex_prof, OID_AUTO, rejected, CTLFLAG_RD,
160    &mutex_prof_rejected, 0, "Number of rejected profiling records");
161static int mutex_prof_hashsize = MPROF_HASH_SIZE;
162SYSCTL_INT(_debug_mutex_prof, OID_AUTO, hashsize, CTLFLAG_RD,
163    &mutex_prof_hashsize, 0, "Hash size");
164static int mutex_prof_collisions = 0;
165SYSCTL_INT(_debug_mutex_prof, OID_AUTO, collisions, CTLFLAG_RD,
166    &mutex_prof_collisions, 0, "Number of hash collisions");
167
168/*
169 * mprof_mtx protects the profiling buffers and the hash.
170 */
171static struct mtx mprof_mtx;
172MTX_SYSINIT(mprof, &mprof_mtx, "mutex profiling lock", MTX_SPIN | MTX_QUIET);
173
174static u_int64_t
175nanoseconds(void)
176{
177	struct timespec tv;
178
179	nanotime(&tv);
180	return (tv.tv_sec * (u_int64_t)1000000000 + tv.tv_nsec);
181}
182
183static int
184dump_mutex_prof_stats(SYSCTL_HANDLER_ARGS)
185{
186	struct sbuf *sb;
187	int error, i;
188	static int multiplier = 1;
189
190	if (first_free_mprof_buf == 0)
191		return (SYSCTL_OUT(req, "No locking recorded",
192		    sizeof("No locking recorded")));
193
194retry_sbufops:
195	sb = sbuf_new(NULL, NULL, MPROF_SBUF_SIZE * multiplier, SBUF_FIXEDLEN);
196	sbuf_printf(sb, "%6s %12s %11s %5s %12s %12s %s\n",
197	    "max", "total", "count", "avg", "cnt_hold", "cnt_lock", "name");
198	/*
199	 * XXX this spinlock seems to be by far the largest perpetrator
200	 * of spinlock latency (1.6 msec on an Athlon1600 was recorded
201	 * even before I pessimized it further by moving the average
202	 * computation here).
203	 */
204	mtx_lock_spin(&mprof_mtx);
205	for (i = 0; i < first_free_mprof_buf; ++i) {
206		sbuf_printf(sb, "%6ju %12ju %11ju %5ju %12ju %12ju %s:%d (%s)\n",
207		    mprof_buf[i].cnt_max / 1000,
208		    mprof_buf[i].cnt_tot / 1000,
209		    mprof_buf[i].cnt_cur,
210		    mprof_buf[i].cnt_cur == 0 ? (uintmax_t)0 :
211			mprof_buf[i].cnt_tot / (mprof_buf[i].cnt_cur * 1000),
212		    mprof_buf[i].cnt_contest_holding,
213		    mprof_buf[i].cnt_contest_locking,
214		    mprof_buf[i].file, mprof_buf[i].line, mprof_buf[i].name);
215		if (sbuf_overflowed(sb)) {
216			mtx_unlock_spin(&mprof_mtx);
217			sbuf_delete(sb);
218			multiplier++;
219			goto retry_sbufops;
220		}
221	}
222	mtx_unlock_spin(&mprof_mtx);
223	sbuf_finish(sb);
224	error = SYSCTL_OUT(req, sbuf_data(sb), sbuf_len(sb) + 1);
225	sbuf_delete(sb);
226	return (error);
227}
228SYSCTL_PROC(_debug_mutex_prof, OID_AUTO, stats, CTLTYPE_STRING | CTLFLAG_RD,
229    NULL, 0, dump_mutex_prof_stats, "A", "Mutex profiling statistics");
230
231static int
232reset_mutex_prof_stats(SYSCTL_HANDLER_ARGS)
233{
234	int error, v;
235
236	if (first_free_mprof_buf == 0)
237		return (0);
238
239	v = 0;
240	error = sysctl_handle_int(oidp, &v, 0, req);
241	if (error)
242		return (error);
243	if (req->newptr == NULL)
244		return (error);
245	if (v == 0)
246		return (0);
247
248	mtx_lock_spin(&mprof_mtx);
249	bzero(mprof_buf, sizeof(*mprof_buf) * first_free_mprof_buf);
250	bzero(mprof_hash, sizeof(struct mtx *) * MPROF_HASH_SIZE);
251	first_free_mprof_buf = 0;
252	mtx_unlock_spin(&mprof_mtx);
253	return (0);
254}
255SYSCTL_PROC(_debug_mutex_prof, OID_AUTO, reset, CTLTYPE_INT | CTLFLAG_RW,
256    NULL, 0, reset_mutex_prof_stats, "I", "Reset mutex profiling statistics");
257#endif
258
259/*
260 * Function versions of the inlined __mtx_* macros.  These are used by
261 * modules and can also be called from assembly language if needed.
262 */
263void
264_mtx_lock_flags(struct mtx *m, int opts, const char *file, int line)
265{
266
267	MPASS(curthread != NULL);
268	KASSERT(m->mtx_object.lo_class == &lock_class_mtx_sleep,
269	    ("mtx_lock() of spin mutex %s @ %s:%d", m->mtx_object.lo_name,
270	    file, line));
271	WITNESS_CHECKORDER(&m->mtx_object, opts | LOP_NEWORDER | LOP_EXCLUSIVE,
272	    file, line);
273	_get_sleep_lock(m, curthread, opts, file, line);
274	LOCK_LOG_LOCK("LOCK", &m->mtx_object, opts, m->mtx_recurse, file,
275	    line);
276	WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line);
277#ifdef MUTEX_PROFILING
278	/* don't reset the timer when/if recursing */
279	if (m->mtx_acqtime == 0) {
280		m->mtx_filename = file;
281		m->mtx_lineno = line;
282		m->mtx_acqtime = mutex_prof_enable ? nanoseconds() : 0;
283		++mutex_prof_acquisitions;
284	}
285#endif
286}
287
288void
289_mtx_unlock_flags(struct mtx *m, int opts, const char *file, int line)
290{
291
292	MPASS(curthread != NULL);
293	KASSERT(m->mtx_object.lo_class == &lock_class_mtx_sleep,
294	    ("mtx_unlock() of spin mutex %s @ %s:%d", m->mtx_object.lo_name,
295	    file, line));
296	WITNESS_UNLOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line);
297	LOCK_LOG_LOCK("UNLOCK", &m->mtx_object, opts, m->mtx_recurse, file,
298	    line);
299	mtx_assert(m, MA_OWNED);
300#ifdef MUTEX_PROFILING
301	if (m->mtx_acqtime != 0) {
302		static const char *unknown = "(unknown)";
303		struct mutex_prof *mpp;
304		u_int64_t acqtime, now;
305		const char *p, *q;
306		volatile u_int hash;
307
308		now = nanoseconds();
309		acqtime = m->mtx_acqtime;
310		m->mtx_acqtime = 0;
311		if (now <= acqtime)
312			goto out;
313		for (p = m->mtx_filename;
314		    p != NULL && strncmp(p, "../", 3) == 0; p += 3)
315			/* nothing */ ;
316		if (p == NULL || *p == '\0')
317			p = unknown;
318		for (hash = m->mtx_lineno, q = p; *q != '\0'; ++q)
319			hash = (hash * 2 + *q) % MPROF_HASH_SIZE;
320		mtx_lock_spin(&mprof_mtx);
321		for (mpp = mprof_hash[hash]; mpp != NULL; mpp = mpp->next)
322			if (mpp->line == m->mtx_lineno &&
323			    strcmp(mpp->file, p) == 0)
324				break;
325		if (mpp == NULL) {
326			/* Just exit if we cannot get a trace buffer */
327			if (first_free_mprof_buf >= NUM_MPROF_BUFFERS) {
328				++mutex_prof_rejected;
329				goto unlock;
330			}
331			mpp = &mprof_buf[first_free_mprof_buf++];
332			mpp->name = mtx_name(m);
333			mpp->file = p;
334			mpp->line = m->mtx_lineno;
335			mpp->next = mprof_hash[hash];
336			if (mprof_hash[hash] != NULL)
337				++mutex_prof_collisions;
338			mprof_hash[hash] = mpp;
339			++mutex_prof_records;
340		}
341		/*
342		 * Record if the mutex has been held longer now than ever
343		 * before.
344		 */
345		if (now - acqtime > mpp->cnt_max)
346			mpp->cnt_max = now - acqtime;
347		mpp->cnt_tot += now - acqtime;
348		mpp->cnt_cur++;
349		/*
350		 * There's a small race, really we should cmpxchg
351		 * 0 with the current value, but that would bill
352		 * the contention to the wrong lock instance if
353		 * it followed this also.
354		 */
355		mpp->cnt_contest_holding += m->mtx_contest_holding;
356		m->mtx_contest_holding = 0;
357		mpp->cnt_contest_locking += m->mtx_contest_locking;
358		m->mtx_contest_locking = 0;
359unlock:
360		mtx_unlock_spin(&mprof_mtx);
361	}
362out:
363#endif
364	_rel_sleep_lock(m, curthread, opts, file, line);
365}
366
367void
368_mtx_lock_spin_flags(struct mtx *m, int opts, const char *file, int line)
369{
370
371	MPASS(curthread != NULL);
372	KASSERT(m->mtx_object.lo_class == &lock_class_mtx_spin,
373	    ("mtx_lock_spin() of sleep mutex %s @ %s:%d",
374	    m->mtx_object.lo_name, file, line));
375	WITNESS_CHECKORDER(&m->mtx_object, opts | LOP_NEWORDER | LOP_EXCLUSIVE,
376	    file, line);
377#if defined(SMP) || LOCK_DEBUG > 0 || 1
378	_get_spin_lock(m, curthread, opts, file, line);
379#else
380	critical_enter();
381#endif
382	LOCK_LOG_LOCK("LOCK", &m->mtx_object, opts, m->mtx_recurse, file,
383	    line);
384	WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line);
385}
386
387void
388_mtx_unlock_spin_flags(struct mtx *m, int opts, const char *file, int line)
389{
390
391	MPASS(curthread != NULL);
392	KASSERT(m->mtx_object.lo_class == &lock_class_mtx_spin,
393	    ("mtx_unlock_spin() of sleep mutex %s @ %s:%d",
394	    m->mtx_object.lo_name, file, line));
395	WITNESS_UNLOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line);
396	LOCK_LOG_LOCK("UNLOCK", &m->mtx_object, opts, m->mtx_recurse, file,
397	    line);
398	mtx_assert(m, MA_OWNED);
399#if defined(SMP) || LOCK_DEBUG > 0 || 1
400	_rel_spin_lock(m);
401#else
402	critical_exit();
403#endif
404}
405
406/*
407 * The important part of mtx_trylock{,_flags}()
408 * Tries to acquire lock `m.'  If this function is called on a mutex that
409 * is already owned, it will recursively acquire the lock.
410 */
411int
412_mtx_trylock(struct mtx *m, int opts, const char *file, int line)
413{
414	int rval;
415
416	MPASS(curthread != NULL);
417
418	if (mtx_owned(m) && (m->mtx_object.lo_flags & LO_RECURSABLE) != 0) {
419		m->mtx_recurse++;
420		atomic_set_ptr(&m->mtx_lock, MTX_RECURSED);
421		rval = 1;
422	} else
423		rval = _obtain_lock(m, curthread);
424
425	LOCK_LOG_TRY("LOCK", &m->mtx_object, opts, rval, file, line);
426	if (rval)
427		WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE | LOP_TRYLOCK,
428		    file, line);
429
430	return (rval);
431}
432
433/*
434 * _mtx_lock_sleep: the tougher part of acquiring an MTX_DEF lock.
435 *
436 * We call this if the lock is either contested (i.e. we need to go to
437 * sleep waiting for it), or if we need to recurse on it.
438 */
439void
440_mtx_lock_sleep(struct mtx *m, struct thread *td, int opts, const char *file,
441    int line)
442{
443	struct turnstile *ts;
444#if defined(SMP) && !defined(NO_ADAPTIVE_MUTEXES)
445	struct thread *owner;
446#endif
447	uintptr_t v;
448#ifdef KTR
449	int cont_logged = 0;
450#endif
451#ifdef MUTEX_PROFILING
452	int contested;
453#endif
454
455	if (mtx_owned(m)) {
456		KASSERT((m->mtx_object.lo_flags & LO_RECURSABLE) != 0,
457	    ("_mtx_lock_sleep: recursed on non-recursive mutex %s @ %s:%d\n",
458		    m->mtx_object.lo_name, file, line));
459		m->mtx_recurse++;
460		atomic_set_ptr(&m->mtx_lock, MTX_RECURSED);
461		if (LOCK_LOG_TEST(&m->mtx_object, opts))
462			CTR1(KTR_LOCK, "_mtx_lock_sleep: %p recursing", m);
463		return;
464	}
465
466	if (LOCK_LOG_TEST(&m->mtx_object, opts))
467		CTR4(KTR_LOCK,
468		    "_mtx_lock_sleep: %s contested (lock=%p) at %s:%d",
469		    m->mtx_object.lo_name, (void *)m->mtx_lock, file, line);
470
471#ifdef MUTEX_PROFILING
472	contested = 0;
473#endif
474	while (!_obtain_lock(m, td)) {
475#ifdef MUTEX_PROFILING
476		contested = 1;
477		atomic_add_int(&m->mtx_contest_holding, 1);
478#endif
479		ts = turnstile_lookup(&m->mtx_object);
480		v = m->mtx_lock;
481
482		/*
483		 * Check if the lock has been released while spinning for
484		 * the turnstile chain lock.
485		 */
486		if (v == MTX_UNOWNED) {
487			turnstile_release(&m->mtx_object);
488			cpu_spinwait();
489			continue;
490		}
491
492#ifdef MUTEX_WAKE_ALL
493		MPASS(v != MTX_CONTESTED);
494#else
495		/*
496		 * The mutex was marked contested on release. This means that
497		 * there are other threads blocked on it.  Grab ownership of
498		 * it and propagate its priority to the current thread if
499		 * necessary.
500		 */
501		if (v == MTX_CONTESTED) {
502			MPASS(ts != NULL);
503			m->mtx_lock = (uintptr_t)td | MTX_CONTESTED;
504			turnstile_claim(ts);
505			break;
506		}
507#endif
508
509		/*
510		 * If the mutex isn't already contested and a failure occurs
511		 * setting the contested bit, the mutex was either released
512		 * or the state of the MTX_RECURSED bit changed.
513		 */
514		if ((v & MTX_CONTESTED) == 0 &&
515		    !atomic_cmpset_ptr(&m->mtx_lock, (void *)v,
516			(void *)(v | MTX_CONTESTED))) {
517			turnstile_release(&m->mtx_object);
518			cpu_spinwait();
519			continue;
520		}
521
522#if defined(SMP) && !defined(NO_ADAPTIVE_MUTEXES)
523		/*
524		 * If the current owner of the lock is executing on another
525		 * CPU, spin instead of blocking.
526		 */
527		owner = (struct thread *)(v & MTX_FLAGMASK);
528#ifdef ADAPTIVE_GIANT
529		if (TD_IS_RUNNING(owner)) {
530#else
531		if (m != &Giant && TD_IS_RUNNING(owner)) {
532#endif
533			turnstile_release(&m->mtx_object);
534			while (mtx_owner(m) == owner && TD_IS_RUNNING(owner)) {
535				cpu_spinwait();
536			}
537			continue;
538		}
539#endif	/* SMP && !NO_ADAPTIVE_MUTEXES */
540
541		/*
542		 * We definitely must sleep for this lock.
543		 */
544		mtx_assert(m, MA_NOTOWNED);
545
546#ifdef KTR
547		if (!cont_logged) {
548			CTR6(KTR_CONTENTION,
549			    "contention: %p at %s:%d wants %s, taken by %s:%d",
550			    td, file, line, m->mtx_object.lo_name,
551			    WITNESS_FILE(&m->mtx_object),
552			    WITNESS_LINE(&m->mtx_object));
553			cont_logged = 1;
554		}
555#endif
556
557		/*
558		 * Block on the turnstile.
559		 */
560		turnstile_wait(ts, &m->mtx_object, mtx_owner(m));
561	}
562
563#ifdef KTR
564	if (cont_logged) {
565		CTR4(KTR_CONTENTION,
566		    "contention end: %s acquired by %p at %s:%d",
567		    m->mtx_object.lo_name, td, file, line);
568	}
569#endif
570#ifdef MUTEX_PROFILING
571	if (contested)
572		m->mtx_contest_locking++;
573	m->mtx_contest_holding = 0;
574#endif
575	return;
576}
577
578/*
579 * _mtx_lock_spin: the tougher part of acquiring an MTX_SPIN lock.
580 *
581 * This is only called if we need to actually spin for the lock. Recursion
582 * is handled inline.
583 */
584void
585_mtx_lock_spin(struct mtx *m, struct thread *td, int opts, const char *file,
586    int line)
587{
588	int i = 0;
589
590	if (LOCK_LOG_TEST(&m->mtx_object, opts))
591		CTR1(KTR_LOCK, "_mtx_lock_spin: %p spinning", m);
592
593	for (;;) {
594		if (_obtain_lock(m, td))
595			break;
596
597		/* Give interrupts a chance while we spin. */
598		critical_exit();
599		while (m->mtx_lock != MTX_UNOWNED) {
600			if (i++ < 10000000) {
601				cpu_spinwait();
602				continue;
603			}
604			if (i < 60000000)
605				DELAY(1);
606			else if (!kdb_active) {
607				printf("spin lock %s held by %p for > 5 seconds\n",
608				    m->mtx_object.lo_name, (void *)m->mtx_lock);
609#ifdef WITNESS
610				witness_display_spinlock(&m->mtx_object,
611				    mtx_owner(m));
612#endif
613				panic("spin lock held too long");
614			}
615			cpu_spinwait();
616		}
617		critical_enter();
618	}
619
620	if (LOCK_LOG_TEST(&m->mtx_object, opts))
621		CTR1(KTR_LOCK, "_mtx_lock_spin: %p spin done", m);
622
623	return;
624}
625
626/*
627 * _mtx_unlock_sleep: the tougher part of releasing an MTX_DEF lock.
628 *
629 * We are only called here if the lock is recursed or contested (i.e. we
630 * need to wake up a blocked thread).
631 */
632void
633_mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line)
634{
635	struct turnstile *ts;
636#ifndef PREEMPTION
637	struct thread *td, *td1;
638#endif
639
640	if (mtx_recursed(m)) {
641		if (--(m->mtx_recurse) == 0)
642			atomic_clear_ptr(&m->mtx_lock, MTX_RECURSED);
643		if (LOCK_LOG_TEST(&m->mtx_object, opts))
644			CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p unrecurse", m);
645		return;
646	}
647
648	ts = turnstile_lookup(&m->mtx_object);
649	if (LOCK_LOG_TEST(&m->mtx_object, opts))
650		CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p contested", m);
651
652#if defined(SMP) && !defined(NO_ADAPTIVE_MUTEXES)
653	if (ts == NULL) {
654		_release_lock_quick(m);
655		if (LOCK_LOG_TEST(&m->mtx_object, opts))
656			CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p no sleepers", m);
657		turnstile_release(&m->mtx_object);
658		return;
659	}
660#else
661	MPASS(ts != NULL);
662#endif
663#ifndef PREEMPTION
664	/* XXX */
665	td1 = turnstile_head(ts);
666#endif
667#ifdef MUTEX_WAKE_ALL
668	turnstile_broadcast(ts);
669	_release_lock_quick(m);
670#else
671	if (turnstile_signal(ts)) {
672		_release_lock_quick(m);
673		if (LOCK_LOG_TEST(&m->mtx_object, opts))
674			CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p not held", m);
675	} else {
676		m->mtx_lock = MTX_CONTESTED;
677		if (LOCK_LOG_TEST(&m->mtx_object, opts))
678			CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p still contested",
679			    m);
680	}
681#endif
682	turnstile_unpend(ts);
683
684#ifndef PREEMPTION
685	/*
686	 * XXX: This is just a hack until preemption is done.  However,
687	 * once preemption is done we need to either wrap the
688	 * turnstile_signal() and release of the actual lock in an
689	 * extra critical section or change the preemption code to
690	 * always just set a flag and never do instant-preempts.
691	 */
692	td = curthread;
693	if (td->td_critnest > 0 || td1->td_priority >= td->td_priority)
694		return;
695	mtx_lock_spin(&sched_lock);
696	if (!TD_IS_RUNNING(td1)) {
697#ifdef notyet
698		if (td->td_ithd != NULL) {
699			struct ithd *it = td->td_ithd;
700
701			if (it->it_interrupted) {
702				if (LOCK_LOG_TEST(&m->mtx_object, opts))
703					CTR2(KTR_LOCK,
704				    "_mtx_unlock_sleep: %p interrupted %p",
705					    it, it->it_interrupted);
706				intr_thd_fixup(it);
707			}
708		}
709#endif
710		if (LOCK_LOG_TEST(&m->mtx_object, opts))
711			CTR2(KTR_LOCK,
712			    "_mtx_unlock_sleep: %p switching out lock=%p", m,
713			    (void *)m->mtx_lock);
714
715		mi_switch(SW_INVOL, NULL);
716		if (LOCK_LOG_TEST(&m->mtx_object, opts))
717			CTR2(KTR_LOCK, "_mtx_unlock_sleep: %p resuming lock=%p",
718			    m, (void *)m->mtx_lock);
719	}
720	mtx_unlock_spin(&sched_lock);
721#endif
722
723	return;
724}
725
726/*
727 * All the unlocking of MTX_SPIN locks is done inline.
728 * See the _rel_spin_lock() macro for the details.
729 */
730
731/*
732 * The backing function for the INVARIANTS-enabled mtx_assert()
733 */
734#ifdef INVARIANT_SUPPORT
735void
736_mtx_assert(struct mtx *m, int what, const char *file, int line)
737{
738
739	if (panicstr != NULL)
740		return;
741	switch (what) {
742	case MA_OWNED:
743	case MA_OWNED | MA_RECURSED:
744	case MA_OWNED | MA_NOTRECURSED:
745		if (!mtx_owned(m))
746			panic("mutex %s not owned at %s:%d",
747			    m->mtx_object.lo_name, file, line);
748		if (mtx_recursed(m)) {
749			if ((what & MA_NOTRECURSED) != 0)
750				panic("mutex %s recursed at %s:%d",
751				    m->mtx_object.lo_name, file, line);
752		} else if ((what & MA_RECURSED) != 0) {
753			panic("mutex %s unrecursed at %s:%d",
754			    m->mtx_object.lo_name, file, line);
755		}
756		break;
757	case MA_NOTOWNED:
758		if (mtx_owned(m))
759			panic("mutex %s owned at %s:%d",
760			    m->mtx_object.lo_name, file, line);
761		break;
762	default:
763		panic("unknown mtx_assert at %s:%d", file, line);
764	}
765}
766#endif
767
768/*
769 * The MUTEX_DEBUG-enabled mtx_validate()
770 *
771 * Most of these checks have been moved off into the LO_INITIALIZED flag
772 * maintained by the witness code.
773 */
774#ifdef MUTEX_DEBUG
775
776void	mtx_validate(struct mtx *);
777
778void
779mtx_validate(struct mtx *m)
780{
781
782/*
783 * XXX: When kernacc() does not require Giant we can reenable this check
784 */
785#ifdef notyet
786/*
787 * XXX - When kernacc() is fixed on the alpha to handle K0_SEG memory properly
788 * we can re-enable the kernacc() checks.
789 */
790#ifndef __alpha__
791	/*
792	 * Can't call kernacc() from early init386(), especially when
793	 * initializing Giant mutex, because some stuff in kernacc()
794	 * requires Giant itself.
795	 */
796	if (!cold)
797		if (!kernacc((caddr_t)m, sizeof(m),
798		    VM_PROT_READ | VM_PROT_WRITE))
799			panic("Can't read and write to mutex %p", m);
800#endif
801#endif
802}
803#endif
804
805/*
806 * General init routine used by the MTX_SYSINIT() macro.
807 */
808void
809mtx_sysinit(void *arg)
810{
811	struct mtx_args *margs = arg;
812
813	mtx_init(margs->ma_mtx, margs->ma_desc, NULL, margs->ma_opts);
814}
815
816/*
817 * Mutex initialization routine; initialize lock `m' of type contained in
818 * `opts' with options contained in `opts' and name `name.'  The optional
819 * lock type `type' is used as a general lock category name for use with
820 * witness.
821 */
822void
823mtx_init(struct mtx *m, const char *name, const char *type, int opts)
824{
825	struct lock_object *lock;
826
827	MPASS((opts & ~(MTX_SPIN | MTX_QUIET | MTX_RECURSE |
828	    MTX_NOWITNESS | MTX_DUPOK)) == 0);
829
830#ifdef MUTEX_DEBUG
831	/* Diagnostic and error correction */
832	mtx_validate(m);
833#endif
834
835	lock = &m->mtx_object;
836	KASSERT((lock->lo_flags & LO_INITIALIZED) == 0,
837	    ("mutex \"%s\" %p already initialized", name, m));
838	bzero(m, sizeof(*m));
839	if (opts & MTX_SPIN)
840		lock->lo_class = &lock_class_mtx_spin;
841	else
842		lock->lo_class = &lock_class_mtx_sleep;
843	lock->lo_name = name;
844	lock->lo_type = type != NULL ? type : name;
845	if (opts & MTX_QUIET)
846		lock->lo_flags = LO_QUIET;
847	if (opts & MTX_RECURSE)
848		lock->lo_flags |= LO_RECURSABLE;
849	if ((opts & MTX_NOWITNESS) == 0)
850		lock->lo_flags |= LO_WITNESS;
851	if (opts & MTX_DUPOK)
852		lock->lo_flags |= LO_DUPOK;
853
854	m->mtx_lock = MTX_UNOWNED;
855
856	LOCK_LOG_INIT(lock, opts);
857
858	WITNESS_INIT(lock);
859}
860
861/*
862 * Remove lock `m' from all_mtx queue.  We don't allow MTX_QUIET to be
863 * passed in as a flag here because if the corresponding mtx_init() was
864 * called with MTX_QUIET set, then it will already be set in the mutex's
865 * flags.
866 */
867void
868mtx_destroy(struct mtx *m)
869{
870
871	LOCK_LOG_DESTROY(&m->mtx_object, 0);
872
873	if (!mtx_owned(m))
874		MPASS(mtx_unowned(m));
875	else {
876		MPASS((m->mtx_lock & (MTX_RECURSED|MTX_CONTESTED)) == 0);
877
878		/* Tell witness this isn't locked to make it happy. */
879		WITNESS_UNLOCK(&m->mtx_object, LOP_EXCLUSIVE, __FILE__,
880		    __LINE__);
881	}
882
883	WITNESS_DESTROY(&m->mtx_object);
884}
885
886/*
887 * Intialize the mutex code and system mutexes.  This is called from the MD
888 * startup code prior to mi_startup().  The per-CPU data space needs to be
889 * setup before this is called.
890 */
891void
892mutex_init(void)
893{
894
895	/* Setup thread0 so that mutexes work. */
896	LIST_INIT(&thread0.td_contested);
897
898	/* Setup turnstiles so that sleep mutexes work. */
899	init_turnstiles();
900
901	/*
902	 * Initialize mutexes.
903	 */
904	mtx_init(&Giant, "Giant", NULL, MTX_DEF | MTX_RECURSE);
905	mtx_init(&sched_lock, "sched lock", NULL, MTX_SPIN | MTX_RECURSE);
906	mtx_init(&proc0.p_mtx, "process lock", NULL, MTX_DEF | MTX_DUPOK);
907	mtx_lock(&Giant);
908}
909