kern_mutex.c revision 151450
1/*-
2 * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 *    notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 *    notice, this list of conditions and the following disclaimer in the
11 *    documentation and/or other materials provided with the distribution.
12 * 3. Berkeley Software Design Inc's name may not be used to endorse or
13 *    promote products derived from this software without specific prior
14 *    written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED.  IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 *
28 *	from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $
29 *	and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $
30 */
31
32/*
33 * Machine independent bits of mutex implementation.
34 */
35
36#include <sys/cdefs.h>
37__FBSDID("$FreeBSD: head/sys/kern/kern_mutex.c 151450 2005-10-18 18:27:44Z jhb $");
38
39#include "opt_adaptive_mutexes.h"
40#include "opt_ddb.h"
41#include "opt_mprof.h"
42#include "opt_mutex_wake_all.h"
43#include "opt_sched.h"
44
45#include <sys/param.h>
46#include <sys/systm.h>
47#include <sys/bus.h>
48#include <sys/conf.h>
49#include <sys/kdb.h>
50#include <sys/kernel.h>
51#include <sys/ktr.h>
52#include <sys/lock.h>
53#include <sys/malloc.h>
54#include <sys/mutex.h>
55#include <sys/proc.h>
56#include <sys/resourcevar.h>
57#include <sys/sched.h>
58#include <sys/sbuf.h>
59#include <sys/sysctl.h>
60#include <sys/turnstile.h>
61#include <sys/vmmeter.h>
62
63#include <machine/atomic.h>
64#include <machine/bus.h>
65#include <machine/clock.h>
66#include <machine/cpu.h>
67
68#include <ddb/ddb.h>
69
70#include <fs/devfs/devfs_int.h>
71
72#include <vm/vm.h>
73#include <vm/vm_extern.h>
74
75/*
76 * Force MUTEX_WAKE_ALL for now.
77 * single thread wakeup needs fixes to avoid race conditions with
78 * priority inheritance.
79 */
80#ifndef MUTEX_WAKE_ALL
81#define MUTEX_WAKE_ALL
82#endif
83
84/*
85 * Internal utility macros.
86 */
87#define mtx_unowned(m)	((m)->mtx_lock == MTX_UNOWNED)
88
89#define mtx_owner(m)	(mtx_unowned((m)) ? NULL \
90	: (struct thread *)((m)->mtx_lock & MTX_FLAGMASK))
91
92/*
93 * Lock classes for sleep and spin mutexes.
94 */
95struct lock_class lock_class_mtx_sleep = {
96	"sleep mutex",
97	LC_SLEEPLOCK | LC_RECURSABLE
98};
99struct lock_class lock_class_mtx_spin = {
100	"spin mutex",
101	LC_SPINLOCK | LC_RECURSABLE
102};
103
104/*
105 * System-wide mutexes
106 */
107struct mtx sched_lock;
108struct mtx Giant;
109
110#ifdef MUTEX_PROFILING
111SYSCTL_NODE(_debug, OID_AUTO, mutex, CTLFLAG_RD, NULL, "mutex debugging");
112SYSCTL_NODE(_debug_mutex, OID_AUTO, prof, CTLFLAG_RD, NULL, "mutex profiling");
113static int mutex_prof_enable = 0;
114SYSCTL_INT(_debug_mutex_prof, OID_AUTO, enable, CTLFLAG_RW,
115    &mutex_prof_enable, 0, "Enable tracing of mutex holdtime");
116
117struct mutex_prof {
118	const char	*name;
119	const char	*file;
120	int		line;
121	uintmax_t	cnt_max;
122	uintmax_t	cnt_tot;
123	uintmax_t	cnt_cur;
124	uintmax_t	cnt_contest_holding;
125	uintmax_t	cnt_contest_locking;
126	struct mutex_prof *next;
127};
128
129/*
130 * mprof_buf is a static pool of profiling records to avoid possible
131 * reentrance of the memory allocation functions.
132 *
133 * Note: NUM_MPROF_BUFFERS must be smaller than MPROF_HASH_SIZE.
134 */
135#ifdef MPROF_BUFFERS
136#define NUM_MPROF_BUFFERS	MPROF_BUFFERS
137#else
138#define	NUM_MPROF_BUFFERS	1000
139#endif
140static struct mutex_prof mprof_buf[NUM_MPROF_BUFFERS];
141static int first_free_mprof_buf;
142#ifndef MPROF_HASH_SIZE
143#define	MPROF_HASH_SIZE		1009
144#endif
145#if NUM_MPROF_BUFFERS >= MPROF_HASH_SIZE
146#error MPROF_BUFFERS must be larger than MPROF_HASH_SIZE
147#endif
148static struct mutex_prof *mprof_hash[MPROF_HASH_SIZE];
149/* SWAG: sbuf size = avg stat. line size * number of locks */
150#define MPROF_SBUF_SIZE		256 * 400
151
152static int mutex_prof_acquisitions;
153SYSCTL_INT(_debug_mutex_prof, OID_AUTO, acquisitions, CTLFLAG_RD,
154    &mutex_prof_acquisitions, 0, "Number of mutex acquistions recorded");
155static int mutex_prof_records;
156SYSCTL_INT(_debug_mutex_prof, OID_AUTO, records, CTLFLAG_RD,
157    &mutex_prof_records, 0, "Number of profiling records");
158static int mutex_prof_maxrecords = NUM_MPROF_BUFFERS;
159SYSCTL_INT(_debug_mutex_prof, OID_AUTO, maxrecords, CTLFLAG_RD,
160    &mutex_prof_maxrecords, 0, "Maximum number of profiling records");
161static int mutex_prof_rejected;
162SYSCTL_INT(_debug_mutex_prof, OID_AUTO, rejected, CTLFLAG_RD,
163    &mutex_prof_rejected, 0, "Number of rejected profiling records");
164static int mutex_prof_hashsize = MPROF_HASH_SIZE;
165SYSCTL_INT(_debug_mutex_prof, OID_AUTO, hashsize, CTLFLAG_RD,
166    &mutex_prof_hashsize, 0, "Hash size");
167static int mutex_prof_collisions = 0;
168SYSCTL_INT(_debug_mutex_prof, OID_AUTO, collisions, CTLFLAG_RD,
169    &mutex_prof_collisions, 0, "Number of hash collisions");
170
171/*
172 * mprof_mtx protects the profiling buffers and the hash.
173 */
174static struct mtx mprof_mtx;
175MTX_SYSINIT(mprof, &mprof_mtx, "mutex profiling lock", MTX_SPIN | MTX_QUIET);
176
177static u_int64_t
178nanoseconds(void)
179{
180	struct timespec tv;
181
182	nanotime(&tv);
183	return (tv.tv_sec * (u_int64_t)1000000000 + tv.tv_nsec);
184}
185
186static int
187dump_mutex_prof_stats(SYSCTL_HANDLER_ARGS)
188{
189	struct sbuf *sb;
190	int error, i;
191	static int multiplier = 1;
192
193	if (first_free_mprof_buf == 0)
194		return (SYSCTL_OUT(req, "No locking recorded",
195		    sizeof("No locking recorded")));
196
197retry_sbufops:
198	sb = sbuf_new(NULL, NULL, MPROF_SBUF_SIZE * multiplier, SBUF_FIXEDLEN);
199	sbuf_printf(sb, "\n%6s %12s %11s %5s %12s %12s %s\n",
200	    "max", "total", "count", "avg", "cnt_hold", "cnt_lock", "name");
201	/*
202	 * XXX this spinlock seems to be by far the largest perpetrator
203	 * of spinlock latency (1.6 msec on an Athlon1600 was recorded
204	 * even before I pessimized it further by moving the average
205	 * computation here).
206	 */
207	mtx_lock_spin(&mprof_mtx);
208	for (i = 0; i < first_free_mprof_buf; ++i) {
209		sbuf_printf(sb, "%6ju %12ju %11ju %5ju %12ju %12ju %s:%d (%s)\n",
210		    mprof_buf[i].cnt_max / 1000,
211		    mprof_buf[i].cnt_tot / 1000,
212		    mprof_buf[i].cnt_cur,
213		    mprof_buf[i].cnt_cur == 0 ? (uintmax_t)0 :
214			mprof_buf[i].cnt_tot / (mprof_buf[i].cnt_cur * 1000),
215		    mprof_buf[i].cnt_contest_holding,
216		    mprof_buf[i].cnt_contest_locking,
217		    mprof_buf[i].file, mprof_buf[i].line, mprof_buf[i].name);
218		if (sbuf_overflowed(sb)) {
219			mtx_unlock_spin(&mprof_mtx);
220			sbuf_delete(sb);
221			multiplier++;
222			goto retry_sbufops;
223		}
224	}
225	mtx_unlock_spin(&mprof_mtx);
226	sbuf_finish(sb);
227	error = SYSCTL_OUT(req, sbuf_data(sb), sbuf_len(sb) + 1);
228	sbuf_delete(sb);
229	return (error);
230}
231SYSCTL_PROC(_debug_mutex_prof, OID_AUTO, stats, CTLTYPE_STRING | CTLFLAG_RD,
232    NULL, 0, dump_mutex_prof_stats, "A", "Mutex profiling statistics");
233
234static int
235reset_mutex_prof_stats(SYSCTL_HANDLER_ARGS)
236{
237	int error, v;
238
239	if (first_free_mprof_buf == 0)
240		return (0);
241
242	v = 0;
243	error = sysctl_handle_int(oidp, &v, 0, req);
244	if (error)
245		return (error);
246	if (req->newptr == NULL)
247		return (error);
248	if (v == 0)
249		return (0);
250
251	mtx_lock_spin(&mprof_mtx);
252	bzero(mprof_buf, sizeof(*mprof_buf) * first_free_mprof_buf);
253	bzero(mprof_hash, sizeof(struct mtx *) * MPROF_HASH_SIZE);
254	first_free_mprof_buf = 0;
255	mtx_unlock_spin(&mprof_mtx);
256	return (0);
257}
258SYSCTL_PROC(_debug_mutex_prof, OID_AUTO, reset, CTLTYPE_INT | CTLFLAG_RW,
259    NULL, 0, reset_mutex_prof_stats, "I", "Reset mutex profiling statistics");
260#endif
261
262/*
263 * Function versions of the inlined __mtx_* macros.  These are used by
264 * modules and can also be called from assembly language if needed.
265 */
266void
267_mtx_lock_flags(struct mtx *m, int opts, const char *file, int line)
268{
269
270	MPASS(curthread != NULL);
271	KASSERT(m->mtx_object.lo_class == &lock_class_mtx_sleep,
272	    ("mtx_lock() of spin mutex %s @ %s:%d", m->mtx_object.lo_name,
273	    file, line));
274	WITNESS_CHECKORDER(&m->mtx_object, opts | LOP_NEWORDER | LOP_EXCLUSIVE,
275	    file, line);
276	_get_sleep_lock(m, curthread, opts, file, line);
277	LOCK_LOG_LOCK("LOCK", &m->mtx_object, opts, m->mtx_recurse, file,
278	    line);
279	WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line);
280#ifdef MUTEX_PROFILING
281	/* don't reset the timer when/if recursing */
282	if (m->mtx_acqtime == 0) {
283		m->mtx_filename = file;
284		m->mtx_lineno = line;
285		m->mtx_acqtime = mutex_prof_enable ? nanoseconds() : 0;
286		++mutex_prof_acquisitions;
287	}
288#endif
289}
290
291void
292_mtx_unlock_flags(struct mtx *m, int opts, const char *file, int line)
293{
294
295	MPASS(curthread != NULL);
296	KASSERT(m->mtx_object.lo_class == &lock_class_mtx_sleep,
297	    ("mtx_unlock() of spin mutex %s @ %s:%d", m->mtx_object.lo_name,
298	    file, line));
299	WITNESS_UNLOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line);
300	LOCK_LOG_LOCK("UNLOCK", &m->mtx_object, opts, m->mtx_recurse, file,
301	    line);
302	mtx_assert(m, MA_OWNED);
303#ifdef MUTEX_PROFILING
304	if (m->mtx_acqtime != 0) {
305		static const char *unknown = "(unknown)";
306		struct mutex_prof *mpp;
307		u_int64_t acqtime, now;
308		const char *p, *q;
309		volatile u_int hash;
310
311		now = nanoseconds();
312		acqtime = m->mtx_acqtime;
313		m->mtx_acqtime = 0;
314		if (now <= acqtime)
315			goto out;
316		for (p = m->mtx_filename;
317		    p != NULL && strncmp(p, "../", 3) == 0; p += 3)
318			/* nothing */ ;
319		if (p == NULL || *p == '\0')
320			p = unknown;
321		for (hash = m->mtx_lineno, q = p; *q != '\0'; ++q)
322			hash = (hash * 2 + *q) % MPROF_HASH_SIZE;
323		mtx_lock_spin(&mprof_mtx);
324		for (mpp = mprof_hash[hash]; mpp != NULL; mpp = mpp->next)
325			if (mpp->line == m->mtx_lineno &&
326			    strcmp(mpp->file, p) == 0)
327				break;
328		if (mpp == NULL) {
329			/* Just exit if we cannot get a trace buffer */
330			if (first_free_mprof_buf >= NUM_MPROF_BUFFERS) {
331				++mutex_prof_rejected;
332				goto unlock;
333			}
334			mpp = &mprof_buf[first_free_mprof_buf++];
335			mpp->name = mtx_name(m);
336			mpp->file = p;
337			mpp->line = m->mtx_lineno;
338			mpp->next = mprof_hash[hash];
339			if (mprof_hash[hash] != NULL)
340				++mutex_prof_collisions;
341			mprof_hash[hash] = mpp;
342			++mutex_prof_records;
343		}
344		/*
345		 * Record if the mutex has been held longer now than ever
346		 * before.
347		 */
348		if (now - acqtime > mpp->cnt_max)
349			mpp->cnt_max = now - acqtime;
350		mpp->cnt_tot += now - acqtime;
351		mpp->cnt_cur++;
352		/*
353		 * There's a small race, really we should cmpxchg
354		 * 0 with the current value, but that would bill
355		 * the contention to the wrong lock instance if
356		 * it followed this also.
357		 */
358		mpp->cnt_contest_holding += m->mtx_contest_holding;
359		m->mtx_contest_holding = 0;
360		mpp->cnt_contest_locking += m->mtx_contest_locking;
361		m->mtx_contest_locking = 0;
362unlock:
363		mtx_unlock_spin(&mprof_mtx);
364	}
365out:
366#endif
367	_rel_sleep_lock(m, curthread, opts, file, line);
368}
369
370void
371_mtx_lock_spin_flags(struct mtx *m, int opts, const char *file, int line)
372{
373
374	MPASS(curthread != NULL);
375	KASSERT(m->mtx_object.lo_class == &lock_class_mtx_spin,
376	    ("mtx_lock_spin() of sleep mutex %s @ %s:%d",
377	    m->mtx_object.lo_name, file, line));
378	WITNESS_CHECKORDER(&m->mtx_object, opts | LOP_NEWORDER | LOP_EXCLUSIVE,
379	    file, line);
380	_get_spin_lock(m, curthread, opts, file, line);
381	LOCK_LOG_LOCK("LOCK", &m->mtx_object, opts, m->mtx_recurse, file,
382	    line);
383	WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line);
384}
385
386void
387_mtx_unlock_spin_flags(struct mtx *m, int opts, const char *file, int line)
388{
389
390	MPASS(curthread != NULL);
391	KASSERT(m->mtx_object.lo_class == &lock_class_mtx_spin,
392	    ("mtx_unlock_spin() of sleep mutex %s @ %s:%d",
393	    m->mtx_object.lo_name, file, line));
394	WITNESS_UNLOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line);
395	LOCK_LOG_LOCK("UNLOCK", &m->mtx_object, opts, m->mtx_recurse, file,
396	    line);
397	mtx_assert(m, MA_OWNED);
398	_rel_spin_lock(m);
399}
400
401/*
402 * The important part of mtx_trylock{,_flags}()
403 * Tries to acquire lock `m.'  If this function is called on a mutex that
404 * is already owned, it will recursively acquire the lock.
405 */
406int
407_mtx_trylock(struct mtx *m, int opts, const char *file, int line)
408{
409	int rval;
410
411	MPASS(curthread != NULL);
412	KASSERT(m->mtx_object.lo_class == &lock_class_mtx_sleep,
413	    ("mtx_trylock() of spin mutex %s @ %s:%d", m->mtx_object.lo_name,
414	    file, line));
415
416	if (mtx_owned(m) && (m->mtx_object.lo_flags & LO_RECURSABLE) != 0) {
417		m->mtx_recurse++;
418		atomic_set_ptr(&m->mtx_lock, MTX_RECURSED);
419		rval = 1;
420	} else
421		rval = _obtain_lock(m, (uintptr_t)curthread);
422
423	LOCK_LOG_TRY("LOCK", &m->mtx_object, opts, rval, file, line);
424	if (rval)
425		WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE | LOP_TRYLOCK,
426		    file, line);
427
428	return (rval);
429}
430
431/*
432 * _mtx_lock_sleep: the tougher part of acquiring an MTX_DEF lock.
433 *
434 * We call this if the lock is either contested (i.e. we need to go to
435 * sleep waiting for it), or if we need to recurse on it.
436 */
437void
438_mtx_lock_sleep(struct mtx *m, uintptr_t tid, int opts, const char *file,
439    int line)
440{
441#if defined(SMP) && !defined(NO_ADAPTIVE_MUTEXES)
442	struct thread *owner;
443#endif
444	uintptr_t v;
445#ifdef KTR
446	int cont_logged = 0;
447#endif
448#ifdef MUTEX_PROFILING
449	int contested;
450#endif
451
452	if (mtx_owned(m)) {
453		KASSERT((m->mtx_object.lo_flags & LO_RECURSABLE) != 0,
454	    ("_mtx_lock_sleep: recursed on non-recursive mutex %s @ %s:%d\n",
455		    m->mtx_object.lo_name, file, line));
456		m->mtx_recurse++;
457		atomic_set_ptr(&m->mtx_lock, MTX_RECURSED);
458		if (LOCK_LOG_TEST(&m->mtx_object, opts))
459			CTR1(KTR_LOCK, "_mtx_lock_sleep: %p recursing", m);
460		return;
461	}
462
463	if (LOCK_LOG_TEST(&m->mtx_object, opts))
464		CTR4(KTR_LOCK,
465		    "_mtx_lock_sleep: %s contested (lock=%p) at %s:%d",
466		    m->mtx_object.lo_name, (void *)m->mtx_lock, file, line);
467
468#ifdef MUTEX_PROFILING
469	contested = 0;
470#endif
471	while (!_obtain_lock(m, tid)) {
472#ifdef MUTEX_PROFILING
473		contested = 1;
474		atomic_add_int(&m->mtx_contest_holding, 1);
475#endif
476		turnstile_lock(&m->mtx_object);
477		v = m->mtx_lock;
478
479		/*
480		 * Check if the lock has been released while spinning for
481		 * the turnstile chain lock.
482		 */
483		if (v == MTX_UNOWNED) {
484			turnstile_release(&m->mtx_object);
485			cpu_spinwait();
486			continue;
487		}
488
489#ifdef MUTEX_WAKE_ALL
490		MPASS(v != MTX_CONTESTED);
491#else
492		/*
493		 * The mutex was marked contested on release. This means that
494		 * there are other threads blocked on it.  Grab ownership of
495		 * it and propagate its priority to the current thread if
496		 * necessary.
497		 */
498		if (v == MTX_CONTESTED) {
499			m->mtx_lock = tid | MTX_CONTESTED;
500			turnstile_claim(&m->mtx_object);
501			break;
502		}
503#endif
504
505		/*
506		 * If the mutex isn't already contested and a failure occurs
507		 * setting the contested bit, the mutex was either released
508		 * or the state of the MTX_RECURSED bit changed.
509		 */
510		if ((v & MTX_CONTESTED) == 0 &&
511		    !atomic_cmpset_ptr(&m->mtx_lock, v, v | MTX_CONTESTED)) {
512			turnstile_release(&m->mtx_object);
513			cpu_spinwait();
514			continue;
515		}
516
517#if defined(SMP) && !defined(NO_ADAPTIVE_MUTEXES)
518		/*
519		 * If the current owner of the lock is executing on another
520		 * CPU, spin instead of blocking.
521		 */
522		owner = (struct thread *)(v & MTX_FLAGMASK);
523#ifdef ADAPTIVE_GIANT
524		if (TD_IS_RUNNING(owner)) {
525#else
526		if (m != &Giant && TD_IS_RUNNING(owner)) {
527#endif
528			turnstile_release(&m->mtx_object);
529			while (mtx_owner(m) == owner && TD_IS_RUNNING(owner)) {
530				cpu_spinwait();
531			}
532			continue;
533		}
534#endif	/* SMP && !NO_ADAPTIVE_MUTEXES */
535
536		/*
537		 * We definitely must sleep for this lock.
538		 */
539		mtx_assert(m, MA_NOTOWNED);
540
541#ifdef KTR
542		if (!cont_logged) {
543			CTR6(KTR_CONTENTION,
544			    "contention: %p at %s:%d wants %s, taken by %s:%d",
545			    (void *)tid, file, line, m->mtx_object.lo_name,
546			    WITNESS_FILE(&m->mtx_object),
547			    WITNESS_LINE(&m->mtx_object));
548			cont_logged = 1;
549		}
550#endif
551
552		/*
553		 * Block on the turnstile.
554		 */
555		turnstile_wait(&m->mtx_object, mtx_owner(m));
556	}
557
558#ifdef KTR
559	if (cont_logged) {
560		CTR4(KTR_CONTENTION,
561		    "contention end: %s acquired by %p at %s:%d",
562		    m->mtx_object.lo_name, (void *)tid, file, line);
563	}
564#endif
565#ifdef MUTEX_PROFILING
566	if (contested)
567		m->mtx_contest_locking++;
568	m->mtx_contest_holding = 0;
569#endif
570	return;
571}
572
573#ifdef SMP
574/*
575 * _mtx_lock_spin: the tougher part of acquiring an MTX_SPIN lock.
576 *
577 * This is only called if we need to actually spin for the lock. Recursion
578 * is handled inline.
579 */
580void
581_mtx_lock_spin(struct mtx *m, uintptr_t tid, int opts, const char *file,
582    int line)
583{
584	int i = 0;
585
586	if (LOCK_LOG_TEST(&m->mtx_object, opts))
587		CTR1(KTR_LOCK, "_mtx_lock_spin: %p spinning", m);
588
589	for (;;) {
590		if (_obtain_lock(m, tid))
591			break;
592
593		/* Give interrupts a chance while we spin. */
594		spinlock_exit();
595		while (m->mtx_lock != MTX_UNOWNED) {
596			if (i++ < 10000000) {
597				cpu_spinwait();
598				continue;
599			}
600			if (i < 60000000)
601				DELAY(1);
602			else if (!kdb_active && !panicstr) {
603				printf("spin lock %s held by %p for > 5 seconds\n",
604				    m->mtx_object.lo_name, (void *)m->mtx_lock);
605#ifdef WITNESS
606				witness_display_spinlock(&m->mtx_object,
607				    mtx_owner(m));
608#endif
609				panic("spin lock held too long");
610			}
611			cpu_spinwait();
612		}
613		spinlock_enter();
614	}
615
616	if (LOCK_LOG_TEST(&m->mtx_object, opts))
617		CTR1(KTR_LOCK, "_mtx_lock_spin: %p spin done", m);
618
619	return;
620}
621#endif /* SMP */
622
623/*
624 * _mtx_unlock_sleep: the tougher part of releasing an MTX_DEF lock.
625 *
626 * We are only called here if the lock is recursed or contested (i.e. we
627 * need to wake up a blocked thread).
628 */
629void
630_mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line)
631{
632	struct turnstile *ts;
633#ifndef PREEMPTION
634	struct thread *td, *td1;
635#endif
636
637	if (mtx_recursed(m)) {
638		if (--(m->mtx_recurse) == 0)
639			atomic_clear_ptr(&m->mtx_lock, MTX_RECURSED);
640		if (LOCK_LOG_TEST(&m->mtx_object, opts))
641			CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p unrecurse", m);
642		return;
643	}
644
645	turnstile_lock(&m->mtx_object);
646	ts = turnstile_lookup(&m->mtx_object);
647	if (LOCK_LOG_TEST(&m->mtx_object, opts))
648		CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p contested", m);
649
650#if defined(SMP) && !defined(NO_ADAPTIVE_MUTEXES)
651	if (ts == NULL) {
652		_release_lock_quick(m);
653		if (LOCK_LOG_TEST(&m->mtx_object, opts))
654			CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p no sleepers", m);
655		turnstile_release(&m->mtx_object);
656		return;
657	}
658#else
659	MPASS(ts != NULL);
660#endif
661#ifndef PREEMPTION
662	/* XXX */
663	td1 = turnstile_head(ts);
664#endif
665#ifdef MUTEX_WAKE_ALL
666	turnstile_broadcast(ts);
667	_release_lock_quick(m);
668#else
669	if (turnstile_signal(ts)) {
670		_release_lock_quick(m);
671		if (LOCK_LOG_TEST(&m->mtx_object, opts))
672			CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p not held", m);
673	} else {
674		m->mtx_lock = MTX_CONTESTED;
675		if (LOCK_LOG_TEST(&m->mtx_object, opts))
676			CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p still contested",
677			    m);
678	}
679#endif
680	turnstile_unpend(ts);
681
682#ifndef PREEMPTION
683	/*
684	 * XXX: This is just a hack until preemption is done.  However,
685	 * once preemption is done we need to either wrap the
686	 * turnstile_signal() and release of the actual lock in an
687	 * extra critical section or change the preemption code to
688	 * always just set a flag and never do instant-preempts.
689	 */
690	td = curthread;
691	if (td->td_critnest > 0 || td1->td_priority >= td->td_priority)
692		return;
693	mtx_lock_spin(&sched_lock);
694	if (!TD_IS_RUNNING(td1)) {
695#ifdef notyet
696		if (td->td_ithd != NULL) {
697			struct ithd *it = td->td_ithd;
698
699			if (it->it_interrupted) {
700				if (LOCK_LOG_TEST(&m->mtx_object, opts))
701					CTR2(KTR_LOCK,
702				    "_mtx_unlock_sleep: %p interrupted %p",
703					    it, it->it_interrupted);
704				intr_thd_fixup(it);
705			}
706		}
707#endif
708		if (LOCK_LOG_TEST(&m->mtx_object, opts))
709			CTR2(KTR_LOCK,
710			    "_mtx_unlock_sleep: %p switching out lock=%p", m,
711			    (void *)m->mtx_lock);
712
713		mi_switch(SW_INVOL, NULL);
714		if (LOCK_LOG_TEST(&m->mtx_object, opts))
715			CTR2(KTR_LOCK, "_mtx_unlock_sleep: %p resuming lock=%p",
716			    m, (void *)m->mtx_lock);
717	}
718	mtx_unlock_spin(&sched_lock);
719#endif
720
721	return;
722}
723
724/*
725 * All the unlocking of MTX_SPIN locks is done inline.
726 * See the _rel_spin_lock() macro for the details.
727 */
728
729/*
730 * The backing function for the INVARIANTS-enabled mtx_assert()
731 */
732#ifdef INVARIANT_SUPPORT
733void
734_mtx_assert(struct mtx *m, int what, const char *file, int line)
735{
736
737	if (panicstr != NULL || dumping)
738		return;
739	switch (what) {
740	case MA_OWNED:
741	case MA_OWNED | MA_RECURSED:
742	case MA_OWNED | MA_NOTRECURSED:
743		if (!mtx_owned(m))
744			panic("mutex %s not owned at %s:%d",
745			    m->mtx_object.lo_name, file, line);
746		if (mtx_recursed(m)) {
747			if ((what & MA_NOTRECURSED) != 0)
748				panic("mutex %s recursed at %s:%d",
749				    m->mtx_object.lo_name, file, line);
750		} else if ((what & MA_RECURSED) != 0) {
751			panic("mutex %s unrecursed at %s:%d",
752			    m->mtx_object.lo_name, file, line);
753		}
754		break;
755	case MA_NOTOWNED:
756		if (mtx_owned(m))
757			panic("mutex %s owned at %s:%d",
758			    m->mtx_object.lo_name, file, line);
759		break;
760	default:
761		panic("unknown mtx_assert at %s:%d", file, line);
762	}
763}
764#endif
765
766/*
767 * The MUTEX_DEBUG-enabled mtx_validate()
768 *
769 * Most of these checks have been moved off into the LO_INITIALIZED flag
770 * maintained by the witness code.
771 */
772#ifdef MUTEX_DEBUG
773
774void	mtx_validate(struct mtx *);
775
776void
777mtx_validate(struct mtx *m)
778{
779
780/*
781 * XXX: When kernacc() does not require Giant we can reenable this check
782 */
783#ifdef notyet
784/*
785 * XXX - When kernacc() is fixed on the alpha to handle K0_SEG memory properly
786 * we can re-enable the kernacc() checks.
787 */
788#ifndef __alpha__
789	/*
790	 * Can't call kernacc() from early init386(), especially when
791	 * initializing Giant mutex, because some stuff in kernacc()
792	 * requires Giant itself.
793	 */
794	if (!cold)
795		if (!kernacc((caddr_t)m, sizeof(m),
796		    VM_PROT_READ | VM_PROT_WRITE))
797			panic("Can't read and write to mutex %p", m);
798#endif
799#endif
800}
801#endif
802
803/*
804 * General init routine used by the MTX_SYSINIT() macro.
805 */
806void
807mtx_sysinit(void *arg)
808{
809	struct mtx_args *margs = arg;
810
811	mtx_init(margs->ma_mtx, margs->ma_desc, NULL, margs->ma_opts);
812}
813
814/*
815 * Mutex initialization routine; initialize lock `m' of type contained in
816 * `opts' with options contained in `opts' and name `name.'  The optional
817 * lock type `type' is used as a general lock category name for use with
818 * witness.
819 */
820void
821mtx_init(struct mtx *m, const char *name, const char *type, int opts)
822{
823	struct lock_object *lock;
824
825	MPASS((opts & ~(MTX_SPIN | MTX_QUIET | MTX_RECURSE |
826	    MTX_NOWITNESS | MTX_DUPOK)) == 0);
827
828#ifdef MUTEX_DEBUG
829	/* Diagnostic and error correction */
830	mtx_validate(m);
831#endif
832
833	lock = &m->mtx_object;
834	KASSERT((lock->lo_flags & LO_INITIALIZED) == 0,
835	    ("mutex \"%s\" %p already initialized", name, m));
836	bzero(m, sizeof(*m));
837	if (opts & MTX_SPIN)
838		lock->lo_class = &lock_class_mtx_spin;
839	else
840		lock->lo_class = &lock_class_mtx_sleep;
841	lock->lo_name = name;
842	lock->lo_type = type != NULL ? type : name;
843	if (opts & MTX_QUIET)
844		lock->lo_flags = LO_QUIET;
845	if (opts & MTX_RECURSE)
846		lock->lo_flags |= LO_RECURSABLE;
847	if ((opts & MTX_NOWITNESS) == 0)
848		lock->lo_flags |= LO_WITNESS;
849	if (opts & MTX_DUPOK)
850		lock->lo_flags |= LO_DUPOK;
851
852	m->mtx_lock = MTX_UNOWNED;
853
854	LOCK_LOG_INIT(lock, opts);
855
856	WITNESS_INIT(lock);
857}
858
859/*
860 * Remove lock `m' from all_mtx queue.  We don't allow MTX_QUIET to be
861 * passed in as a flag here because if the corresponding mtx_init() was
862 * called with MTX_QUIET set, then it will already be set in the mutex's
863 * flags.
864 */
865void
866mtx_destroy(struct mtx *m)
867{
868
869	LOCK_LOG_DESTROY(&m->mtx_object, 0);
870
871	if (!mtx_owned(m))
872		MPASS(mtx_unowned(m));
873	else {
874		MPASS((m->mtx_lock & (MTX_RECURSED|MTX_CONTESTED)) == 0);
875
876		/* Tell witness this isn't locked to make it happy. */
877		WITNESS_UNLOCK(&m->mtx_object, LOP_EXCLUSIVE, __FILE__,
878		    __LINE__);
879	}
880
881	WITNESS_DESTROY(&m->mtx_object);
882}
883
884/*
885 * Intialize the mutex code and system mutexes.  This is called from the MD
886 * startup code prior to mi_startup().  The per-CPU data space needs to be
887 * setup before this is called.
888 */
889void
890mutex_init(void)
891{
892
893	/* Setup thread0 so that mutexes work. */
894	LIST_INIT(&thread0.td_contested);
895
896	/* Setup turnstiles so that sleep mutexes work. */
897	init_turnstiles();
898
899	/*
900	 * Initialize mutexes.
901	 */
902	mtx_init(&Giant, "Giant", NULL, MTX_DEF | MTX_RECURSE);
903	mtx_init(&sched_lock, "sched lock", NULL, MTX_SPIN | MTX_RECURSE);
904	mtx_init(&proc0.p_mtx, "process lock", NULL, MTX_DEF | MTX_DUPOK);
905	mtx_init(&devmtx, "cdev", NULL, MTX_DEF);
906	mtx_lock(&Giant);
907}
908