kern_mutex.c revision 100754
1151497Sru/*-
275584Sru * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved.
375584Sru *
475584Sru * Redistribution and use in source and binary forms, with or without
575584Sru * modification, are permitted provided that the following conditions
675584Sru * are met:
775584Sru * 1. Redistributions of source code must retain the above copyright
875584Sru *    notice, this list of conditions and the following disclaimer.
975584Sru * 2. Redistributions in binary form must reproduce the above copyright
1075584Sru *    notice, this list of conditions and the following disclaimer in the
1175584Sru *    documentation and/or other materials provided with the distribution.
1275584Sru * 3. Berkeley Software Design Inc's name may not be used to endorse or
1375584Sru *    promote products derived from this software without specific prior
1475584Sru *    written permission.
1575584Sru *
1675584Sru * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
1775584Sru * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18151497Sru * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
1975584Sru * ARE DISCLAIMED.  IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
2075584Sru * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
2175584Sru * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
2275584Sru * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
2375584Sru * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
2475584Sru * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
2575584Sru * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
2675584Sru * SUCH DAMAGE.
2775584Sru *
2875584Sru *	from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $
2975584Sru *	and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $
3075584Sru * $FreeBSD: head/sys/kern/kern_mutex.c 100754 2002-07-27 16:54:23Z jhb $
3175584Sru */
3275584Sru
3375584Sru/*
3475584Sru * Machine independent bits of mutex implementation.
3575584Sru */
3675584Sru
3775584Sru#include "opt_adaptive_mutexes.h"
3875584Sru#include "opt_ddb.h"
3975584Sru
40114402Sru#include <sys/param.h>
4175584Sru#include <sys/systm.h>
4275584Sru#include <sys/bus.h>
4375584Sru#include <sys/kernel.h>
4475584Sru#include <sys/ktr.h>
45114402Sru#include <sys/lock.h>
4675584Sru#include <sys/malloc.h>
4775584Sru#include <sys/mutex.h>
4875584Sru#include <sys/proc.h>
4975584Sru#include <sys/resourcevar.h>
5075584Sru#include <sys/sbuf.h>
5175584Sru#include <sys/stdint.h>
5275584Sru#include <sys/sysctl.h>
5375584Sru#include <sys/vmmeter.h>
5475584Sru
55104862Sru#include <machine/atomic.h>
5675584Sru#include <machine/bus.h>
5775584Sru#include <machine/clock.h>
5875584Sru#include <machine/cpu.h>
59104862Sru
6075584Sru#include <ddb/ddb.h>
6175584Sru
6275584Sru#include <vm/vm.h>
6375584Sru#include <vm/vm_extern.h>
6475584Sru
6575584Sru/*
6675584Sru * Internal utility macros.
6775584Sru */
6875584Sru#define mtx_unowned(m)	((m)->mtx_lock == MTX_UNOWNED)
6975584Sru
7075584Sru#define mtx_owner(m)	(mtx_unowned((m)) ? NULL \
71104862Sru	: (struct thread *)((m)->mtx_lock & MTX_FLAGMASK))
7275584Sru
7375584Sru/* XXXKSE This test will change. */
7475584Sru#define	thread_running(td)						\
75104862Sru	((td)->td_kse != NULL && (td)->td_kse->ke_oncpu != NOCPU)
7675584Sru
7775584Sru/*
7875584Sru * Lock classes for sleep and spin mutexes.
7975584Sru */
8075584Srustruct lock_class lock_class_mtx_sleep = {
8175584Sru	"sleep mutex",
8275584Sru	LC_SLEEPLOCK | LC_RECURSABLE
8375584Sru};
8475584Srustruct lock_class lock_class_mtx_spin = {
8575584Sru	"spin mutex",
8675584Sru	LC_SPINLOCK | LC_RECURSABLE
87104862Sru};
8875584Sru
8975584Sru/*
9075584Sru * System-wide mutexes
91104862Sru */
9275584Srustruct mtx sched_lock;
9375584Srustruct mtx Giant;
9475584Sru
9575584Sru/*
9675584Sru * Prototypes for non-exported routines.
9775584Sru */
9875584Srustatic void	propagate_priority(struct thread *);
9975584Sru
10075584Srustatic void
10175584Srupropagate_priority(struct thread *td)
10275584Sru{
103104862Sru	int pri = td->td_priority;
10475584Sru	struct mtx *m = td->td_blocked;
10575584Sru
10675584Sru	mtx_assert(&sched_lock, MA_OWNED);
107104862Sru	for (;;) {
10875584Sru		struct thread *td1;
10975584Sru
11075584Sru		td = mtx_owner(m);
11175584Sru
11275584Sru		if (td == NULL) {
11375584Sru			/*
11475584Sru			 * This really isn't quite right. Really
11575584Sru			 * ought to bump priority of thread that
11675584Sru			 * next acquires the mutex.
11775584Sru			 */
11875584Sru			MPASS(m->mtx_lock == MTX_CONTESTED);
119104862Sru			return;
12075584Sru		}
12175584Sru
12275584Sru		KASSERT(td->td_state != TDS_SURPLUS, ("Mutex owner SURPLUS"));
123104862Sru		MPASS(td->td_proc != NULL);
12475584Sru		MPASS(td->td_proc->p_magic == P_MAGIC);
12575584Sru		KASSERT(td->td_state != TDS_SLP,
12675584Sru		    ("sleeping thread owns a mutex"));
12775584Sru		if (td->td_priority <= pri) /* lower is higher priority */
12875584Sru			return;
12975584Sru
13075584Sru
13175584Sru		/*
13275584Sru		 * If lock holder is actually running, just bump priority.
13375584Sru		 */
13475584Sru		if (td->td_state == TDS_RUNNING) {
135104862Sru			td->td_priority = pri;
13675584Sru			return;
13775584Sru		}
13875584Sru
139104862Sru#ifndef SMP
14075584Sru		/*
14175584Sru		 * For UP, we check to see if td is curthread (this shouldn't
14275584Sru		 * ever happen however as it would mean we are in a deadlock.)
14375584Sru		 */
14475584Sru		KASSERT(td != curthread, ("Deadlock detected"));
14575584Sru#endif
14675584Sru
14775584Sru		/*
14875584Sru		 * If on run queue move to new run queue, and quit.
14975584Sru		 * XXXKSE this gets a lot more complicated under threads
15075584Sru		 * but try anyhow.
151104862Sru		 * We should have a special call to do this more efficiently.
15275584Sru		 */
15375584Sru		if (td->td_state == TDS_RUNQ) {
15475584Sru			MPASS(td->td_blocked == NULL);
155104862Sru			remrunqueue(td);
15675584Sru			td->td_priority = pri;
15775584Sru			setrunqueue(td);
15875584Sru			return;
15975584Sru		}
16075584Sru		/*
16175584Sru		 * Adjust for any other cases.
16275584Sru		 */
16375584Sru		td->td_priority = pri;
16475584Sru
16575584Sru		/*
16675584Sru		 * If we aren't blocked on a mutex, we should be.
167104862Sru		 */
16875584Sru		KASSERT(td->td_state == TDS_MTX, (
16975584Sru		    "process %d(%s):%d holds %s but isn't blocked on a mutex\n",
17075584Sru		    td->td_proc->p_pid, td->td_proc->p_comm, td->td_state,
171104862Sru		    m->mtx_object.lo_name));
17275584Sru
17375584Sru		/*
17475584Sru		 * Pick up the mutex that td is blocked on.
17575584Sru		 */
17675584Sru		m = td->td_blocked;
17775584Sru		MPASS(m != NULL);
17875584Sru
17975584Sru		/*
18075584Sru		 * Check if the thread needs to be moved up on
18175584Sru		 * the blocked chain
18275584Sru		 */
18375584Sru		if (td == TAILQ_FIRST(&m->mtx_blocked)) {
18475584Sru			continue;
18575584Sru		}
18675584Sru
187104862Sru		td1 = TAILQ_PREV(td, threadqueue, td_blkq);
18875584Sru		if (td1->td_priority <= pri) {
18975584Sru			continue;
19075584Sru		}
191151497Sru
192151497Sru		/*
19375584Sru		 * Remove thread from blocked chain and determine where
19475584Sru		 * it should be moved up to.  Since we know that td1 has
195104862Sru		 * a lower priority than td, we know that at least one
19675584Sru		 * thread in the chain has a lower priority and that
19775584Sru		 * td1 will thus not be NULL after the loop.
198104862Sru		 */
19975584Sru		TAILQ_REMOVE(&m->mtx_blocked, td, td_blkq);
200114402Sru		TAILQ_FOREACH(td1, &m->mtx_blocked, td_blkq) {
201114402Sru			MPASS(td1->td_proc->p_magic == P_MAGIC);
202114402Sru			if (td1->td_priority > pri)
203114402Sru				break;
204114402Sru		}
205114402Sru
206114402Sru		MPASS(td1 != NULL);
20775584Sru		TAILQ_INSERT_BEFORE(td1, td, td_blkq);
20875584Sru		CTR4(KTR_LOCK,
20975584Sru		    "propagate_priority: p %p moved before %p on [%p] %s",
21075584Sru		    td, td1, m, m->mtx_object.lo_name);
21175584Sru	}
21275584Sru}
21375584Sru
21475584Sru#ifdef MUTEX_PROFILING
21575584SruSYSCTL_NODE(_debug, OID_AUTO, mutex, CTLFLAG_RD, NULL, "mutex debugging");
21675584SruSYSCTL_NODE(_debug_mutex, OID_AUTO, prof, CTLFLAG_RD, NULL, "mutex profiling");
21775584Srustatic int mutex_prof_enable = 0;
21875584SruSYSCTL_INT(_debug_mutex_prof, OID_AUTO, enable, CTLFLAG_RW,
21975584Sru    &mutex_prof_enable, 0, "Enable tracing of mutex holdtime");
22075584Sru
22175584Srustruct mutex_prof {
22275584Sru	const char *name;
22375584Sru	const char *file;
224104862Sru	int line;
225104862Sru#define MPROF_MAX 0
226104862Sru#define MPROF_TOT 1
227104862Sru#define MPROF_CNT 2
22875584Sru#define MPROF_AVG 3
22975584Sru	uintmax_t counter[4];
23075584Sru	struct mutex_prof *next;
23175584Sru};
23275584Sru
23375584Sru/*
23475584Sru * mprof_buf is a static pool of profiling records to avoid possible
23575584Sru * reentrance of the memory allocation functions.
23675584Sru *
23775584Sru * Note: NUM_MPROF_BUFFERS must be smaller than MPROF_HASH_SIZE.
23875584Sru */
23975584Sru#define NUM_MPROF_BUFFERS 1000
240static struct mutex_prof mprof_buf[NUM_MPROF_BUFFERS];
241static int first_free_mprof_buf;
242#define MPROF_HASH_SIZE 1009
243static struct mutex_prof *mprof_hash[MPROF_HASH_SIZE];
244
245static int mutex_prof_acquisitions;
246SYSCTL_INT(_debug_mutex_prof, OID_AUTO, acquisitions, CTLFLAG_RD,
247    &mutex_prof_acquisitions, 0, "Number of mutex acquistions recorded");
248static int mutex_prof_records;
249SYSCTL_INT(_debug_mutex_prof, OID_AUTO, records, CTLFLAG_RD,
250    &mutex_prof_records, 0, "Number of profiling records");
251static int mutex_prof_maxrecords = NUM_MPROF_BUFFERS;
252SYSCTL_INT(_debug_mutex_prof, OID_AUTO, maxrecords, CTLFLAG_RD,
253    &mutex_prof_maxrecords, 0, "Maximum number of profiling records");
254static int mutex_prof_rejected;
255SYSCTL_INT(_debug_mutex_prof, OID_AUTO, rejected, CTLFLAG_RD,
256    &mutex_prof_rejected, 0, "Number of rejected profiling records");
257static int mutex_prof_hashsize = MPROF_HASH_SIZE;
258SYSCTL_INT(_debug_mutex_prof, OID_AUTO, hashsize, CTLFLAG_RD,
259    &mutex_prof_hashsize, 0, "Hash size");
260static int mutex_prof_collisions = 0;
261SYSCTL_INT(_debug_mutex_prof, OID_AUTO, collisions, CTLFLAG_RD,
262    &mutex_prof_collisions, 0, "Number of hash collisions");
263
264/*
265 * mprof_mtx protects the profiling buffers and the hash.
266 */
267static struct mtx mprof_mtx;
268MTX_SYSINIT(mprof, &mprof_mtx, "mutex profiling lock", MTX_SPIN | MTX_QUIET);
269
270static u_int64_t
271nanoseconds(void)
272{
273	struct timespec tv;
274
275	nanotime(&tv);
276	return (tv.tv_sec * (u_int64_t)1000000000 + tv.tv_nsec);
277}
278
279static int
280dump_mutex_prof_stats(SYSCTL_HANDLER_ARGS)
281{
282	struct sbuf *sb;
283	int error, i;
284
285	if (first_free_mprof_buf == 0)
286		return SYSCTL_OUT(req, "No locking recorded",
287		    sizeof("No locking recorded"));
288
289	sb = sbuf_new(NULL, NULL, 1024, SBUF_AUTOEXTEND);
290	sbuf_printf(sb, "%12s %12s %12s %12s %s\n",
291	    "max", "total", "count", "average", "name");
292	mtx_lock_spin(&mprof_mtx);
293	for (i = 0; i < first_free_mprof_buf; ++i)
294		sbuf_printf(sb, "%12ju %12ju %12ju %12ju %s:%d (%s)\n",
295		    mprof_buf[i].counter[MPROF_MAX] / 1000,
296		    mprof_buf[i].counter[MPROF_TOT] / 1000,
297		    mprof_buf[i].counter[MPROF_CNT],
298		    mprof_buf[i].counter[MPROF_AVG] / 1000,
299		    mprof_buf[i].file, mprof_buf[i].line, mprof_buf[i].name);
300	mtx_unlock_spin(&mprof_mtx);
301	sbuf_finish(sb);
302	error = SYSCTL_OUT(req, sbuf_data(sb), sbuf_len(sb) + 1);
303	sbuf_delete(sb);
304	return (error);
305}
306SYSCTL_PROC(_debug_mutex_prof, OID_AUTO, stats, CTLTYPE_STRING|CTLFLAG_RD,
307    NULL, 0, dump_mutex_prof_stats, "A", "Mutex profiling statistics");
308#endif
309
310/*
311 * Function versions of the inlined __mtx_* macros.  These are used by
312 * modules and can also be called from assembly language if needed.
313 */
314void
315_mtx_lock_flags(struct mtx *m, int opts, const char *file, int line)
316{
317
318	MPASS(curthread != NULL);
319	_get_sleep_lock(m, curthread, opts, file, line);
320	LOCK_LOG_LOCK("LOCK", &m->mtx_object, opts, m->mtx_recurse, file,
321	    line);
322	WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line);
323#ifdef MUTEX_PROFILING
324	/* don't reset the timer when/if recursing */
325	if (m->mtx_acqtime == 0) {
326		m->mtx_filename = file;
327		m->mtx_lineno = line;
328		m->mtx_acqtime = mutex_prof_enable ? nanoseconds() : 0;
329		++mutex_prof_acquisitions;
330	}
331#endif
332}
333
334void
335_mtx_unlock_flags(struct mtx *m, int opts, const char *file, int line)
336{
337
338	MPASS(curthread != NULL);
339	mtx_assert(m, MA_OWNED);
340#ifdef MUTEX_PROFILING
341	if (m->mtx_acqtime != 0) {
342		static const char *unknown = "(unknown)";
343		struct mutex_prof *mpp;
344		u_int64_t acqtime, now;
345		const char *p, *q;
346		volatile u_int hash;
347
348		now = nanoseconds();
349		acqtime = m->mtx_acqtime;
350		m->mtx_acqtime = 0;
351		if (now <= acqtime)
352			goto out;
353		for (p = m->mtx_filename; strncmp(p, "../", 3) == 0; p += 3)
354			/* nothing */ ;
355		if (p == NULL || *p == '\0')
356			p = unknown;
357		for (hash = m->mtx_lineno, q = p; *q != '\0'; ++q)
358			hash = (hash * 2 + *q) % MPROF_HASH_SIZE;
359		mtx_lock_spin(&mprof_mtx);
360		for (mpp = mprof_hash[hash]; mpp != NULL; mpp = mpp->next)
361			if (mpp->line == m->mtx_lineno &&
362			    strcmp(mpp->file, p) == 0)
363				break;
364		if (mpp == NULL) {
365			/* Just exit if we cannot get a trace buffer */
366			if (first_free_mprof_buf >= NUM_MPROF_BUFFERS) {
367				++mutex_prof_rejected;
368				goto unlock;
369			}
370			mpp = &mprof_buf[first_free_mprof_buf++];
371			mpp->name = mtx_name(m);
372			mpp->file = p;
373			mpp->line = m->mtx_lineno;
374			mpp->next = mprof_hash[hash];
375			if (mprof_hash[hash] != NULL)
376				++mutex_prof_collisions;
377			mprof_hash[hash] = mpp;
378			++mutex_prof_records;
379		}
380		/*
381		 * Record if the mutex has been held longer now than ever
382		 * before
383		 */
384		if ((now - acqtime) > mpp->counter[MPROF_MAX])
385			mpp->counter[MPROF_MAX] = now - acqtime;
386		mpp->counter[MPROF_TOT] += now - acqtime;
387		mpp->counter[MPROF_CNT] += 1;
388		mpp->counter[MPROF_AVG] =
389		    mpp->counter[MPROF_TOT] / mpp->counter[MPROF_CNT];
390unlock:
391		mtx_unlock_spin(&mprof_mtx);
392	}
393out:
394#endif
395 	WITNESS_UNLOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line);
396	LOCK_LOG_LOCK("UNLOCK", &m->mtx_object, opts, m->mtx_recurse, file,
397	    line);
398	_rel_sleep_lock(m, curthread, opts, file, line);
399}
400
401void
402_mtx_lock_spin_flags(struct mtx *m, int opts, const char *file, int line)
403{
404
405	MPASS(curthread != NULL);
406#if defined(SMP) || LOCK_DEBUG > 0 || 1
407	_get_spin_lock(m, curthread, opts, file, line);
408#else
409	critical_enter();
410#endif
411	LOCK_LOG_LOCK("LOCK", &m->mtx_object, opts, m->mtx_recurse, file,
412	    line);
413	WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line);
414}
415
416void
417_mtx_unlock_spin_flags(struct mtx *m, int opts, const char *file, int line)
418{
419
420	MPASS(curthread != NULL);
421	mtx_assert(m, MA_OWNED);
422 	WITNESS_UNLOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line);
423	LOCK_LOG_LOCK("UNLOCK", &m->mtx_object, opts, m->mtx_recurse, file,
424	    line);
425#if defined(SMP) || LOCK_DEBUG > 0 || 1
426	_rel_spin_lock(m);
427#else
428	critical_exit();
429#endif
430}
431
432/*
433 * The important part of mtx_trylock{,_flags}()
434 * Tries to acquire lock `m.' We do NOT handle recursion here; we assume that
435 * if we're called, it's because we know we don't already own this lock.
436 */
437int
438_mtx_trylock(struct mtx *m, int opts, const char *file, int line)
439{
440	int rval;
441
442	MPASS(curthread != NULL);
443
444	rval = _obtain_lock(m, curthread);
445
446	LOCK_LOG_TRY("LOCK", &m->mtx_object, opts, rval, file, line);
447	if (rval) {
448		/*
449		 * We do not handle recursion in _mtx_trylock; see the
450		 * note at the top of the routine.
451		 */
452		KASSERT(!mtx_recursed(m),
453		    ("mtx_trylock() called on a recursed mutex"));
454		WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE | LOP_TRYLOCK,
455		    file, line);
456	}
457
458	return (rval);
459}
460
461/*
462 * _mtx_lock_sleep: the tougher part of acquiring an MTX_DEF lock.
463 *
464 * We call this if the lock is either contested (i.e. we need to go to
465 * sleep waiting for it), or if we need to recurse on it.
466 */
467void
468_mtx_lock_sleep(struct mtx *m, int opts, const char *file, int line)
469{
470	struct thread *td = curthread;
471#if defined(SMP) && defined(ADAPTIVE_MUTEXES)
472	struct thread *owner;
473#endif
474
475	if ((m->mtx_lock & MTX_FLAGMASK) == (uintptr_t)td) {
476		m->mtx_recurse++;
477		atomic_set_ptr(&m->mtx_lock, MTX_RECURSED);
478		if (LOCK_LOG_TEST(&m->mtx_object, opts))
479			CTR1(KTR_LOCK, "_mtx_lock_sleep: %p recursing", m);
480		return;
481	}
482
483	if (LOCK_LOG_TEST(&m->mtx_object, opts))
484		CTR4(KTR_LOCK,
485		    "_mtx_lock_sleep: %s contested (lock=%p) at %s:%d",
486		    m->mtx_object.lo_name, (void *)m->mtx_lock, file, line);
487
488	while (!_obtain_lock(m, td)) {
489		uintptr_t v;
490		struct thread *td1;
491
492		mtx_lock_spin(&sched_lock);
493		/*
494		 * Check if the lock has been released while spinning for
495		 * the sched_lock.
496		 */
497		if ((v = m->mtx_lock) == MTX_UNOWNED) {
498			mtx_unlock_spin(&sched_lock);
499#ifdef __i386__
500			ia32_pause();
501#endif
502			continue;
503		}
504
505		/*
506		 * The mutex was marked contested on release. This means that
507		 * there are threads blocked on it.
508		 */
509		if (v == MTX_CONTESTED) {
510			td1 = TAILQ_FIRST(&m->mtx_blocked);
511			MPASS(td1 != NULL);
512			m->mtx_lock = (uintptr_t)td | MTX_CONTESTED;
513
514			if (td1->td_priority < td->td_priority)
515				td->td_priority = td1->td_priority;
516			mtx_unlock_spin(&sched_lock);
517			return;
518		}
519
520		/*
521		 * If the mutex isn't already contested and a failure occurs
522		 * setting the contested bit, the mutex was either released
523		 * or the state of the MTX_RECURSED bit changed.
524		 */
525		if ((v & MTX_CONTESTED) == 0 &&
526		    !atomic_cmpset_ptr(&m->mtx_lock, (void *)v,
527			(void *)(v | MTX_CONTESTED))) {
528			mtx_unlock_spin(&sched_lock);
529#ifdef __i386__
530			ia32_pause();
531#endif
532			continue;
533		}
534
535#if defined(SMP) && defined(ADAPTIVE_MUTEXES)
536		/*
537		 * If the current owner of the lock is executing on another
538		 * CPU, spin instead of blocking.
539		 */
540		owner = (struct thread *)(v & MTX_FLAGMASK);
541		if (m != &Giant && thread_running(owner)) {
542			mtx_unlock_spin(&sched_lock);
543			while (mtx_owner(m) == owner && thread_running(owner)) {
544#ifdef __i386__
545				ia32_pause();
546#endif
547			}
548			continue;
549		}
550#endif	/* SMP && ADAPTIVE_MUTEXES */
551
552		/*
553		 * We definitely must sleep for this lock.
554		 */
555		mtx_assert(m, MA_NOTOWNED);
556
557#ifdef notyet
558		/*
559		 * If we're borrowing an interrupted thread's VM context, we
560		 * must clean up before going to sleep.
561		 */
562		if (td->td_ithd != NULL) {
563			struct ithd *it = td->td_ithd;
564
565			if (it->it_interrupted) {
566				if (LOCK_LOG_TEST(&m->mtx_object, opts))
567					CTR2(KTR_LOCK,
568				    "_mtx_lock_sleep: %p interrupted %p",
569					    it, it->it_interrupted);
570				intr_thd_fixup(it);
571			}
572		}
573#endif
574
575		/*
576		 * Put us on the list of threads blocked on this mutex.
577		 */
578		if (TAILQ_EMPTY(&m->mtx_blocked)) {
579			td1 = mtx_owner(m);
580			LIST_INSERT_HEAD(&td1->td_contested, m, mtx_contested);
581			TAILQ_INSERT_TAIL(&m->mtx_blocked, td, td_blkq);
582		} else {
583			TAILQ_FOREACH(td1, &m->mtx_blocked, td_blkq)
584				if (td1->td_priority > td->td_priority)
585					break;
586			if (td1)
587				TAILQ_INSERT_BEFORE(td1, td, td_blkq);
588			else
589				TAILQ_INSERT_TAIL(&m->mtx_blocked, td, td_blkq);
590		}
591
592		/*
593		 * Save who we're blocked on.
594		 */
595		td->td_blocked = m;
596		td->td_mtxname = m->mtx_object.lo_name;
597		td->td_state = TDS_MTX;
598		propagate_priority(td);
599
600		if (LOCK_LOG_TEST(&m->mtx_object, opts))
601			CTR3(KTR_LOCK,
602			    "_mtx_lock_sleep: p %p blocked on [%p] %s", td, m,
603			    m->mtx_object.lo_name);
604
605		td->td_proc->p_stats->p_ru.ru_nvcsw++;
606		mi_switch();
607
608		if (LOCK_LOG_TEST(&m->mtx_object, opts))
609			CTR3(KTR_LOCK,
610			  "_mtx_lock_sleep: p %p free from blocked on [%p] %s",
611			  td, m, m->mtx_object.lo_name);
612
613		mtx_unlock_spin(&sched_lock);
614	}
615
616	return;
617}
618
619/*
620 * _mtx_lock_spin: the tougher part of acquiring an MTX_SPIN lock.
621 *
622 * This is only called if we need to actually spin for the lock. Recursion
623 * is handled inline.
624 */
625void
626_mtx_lock_spin(struct mtx *m, int opts, const char *file, int line)
627{
628	int i = 0;
629
630	if (LOCK_LOG_TEST(&m->mtx_object, opts))
631		CTR1(KTR_LOCK, "_mtx_lock_spin: %p spinning", m);
632
633	for (;;) {
634		if (_obtain_lock(m, curthread))
635			break;
636
637		/* Give interrupts a chance while we spin. */
638		critical_exit();
639		while (m->mtx_lock != MTX_UNOWNED) {
640			if (i++ < 10000000) {
641#ifdef __i386__
642				ia32_pause();
643#endif
644				continue;
645			}
646			if (i < 60000000)
647				DELAY(1);
648#ifdef DDB
649			else if (!db_active)
650#else
651			else
652#endif
653				panic("spin lock %s held by %p for > 5 seconds",
654				    m->mtx_object.lo_name, (void *)m->mtx_lock);
655#ifdef __i386__
656			ia32_pause();
657#endif
658		}
659		critical_enter();
660	}
661
662	if (LOCK_LOG_TEST(&m->mtx_object, opts))
663		CTR1(KTR_LOCK, "_mtx_lock_spin: %p spin done", m);
664
665	return;
666}
667
668/*
669 * _mtx_unlock_sleep: the tougher part of releasing an MTX_DEF lock.
670 *
671 * We are only called here if the lock is recursed or contested (i.e. we
672 * need to wake up a blocked thread).
673 */
674void
675_mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line)
676{
677	struct thread *td, *td1;
678	struct mtx *m1;
679	int pri;
680
681	td = curthread;
682
683	if (mtx_recursed(m)) {
684		if (--(m->mtx_recurse) == 0)
685			atomic_clear_ptr(&m->mtx_lock, MTX_RECURSED);
686		if (LOCK_LOG_TEST(&m->mtx_object, opts))
687			CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p unrecurse", m);
688		return;
689	}
690
691	mtx_lock_spin(&sched_lock);
692	if (LOCK_LOG_TEST(&m->mtx_object, opts))
693		CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p contested", m);
694
695	td1 = TAILQ_FIRST(&m->mtx_blocked);
696#if defined(SMP) && defined(ADAPTIVE_MUTEXES)
697	if (td1 == NULL) {
698		_release_lock_quick(m);
699		if (LOCK_LOG_TEST(&m->mtx_object, opts))
700			CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p no sleepers", m);
701		mtx_unlock_spin(&sched_lock);
702		return;
703	}
704#endif
705	MPASS(td->td_proc->p_magic == P_MAGIC);
706	MPASS(td1->td_proc->p_magic == P_MAGIC);
707
708	TAILQ_REMOVE(&m->mtx_blocked, td1, td_blkq);
709
710	if (TAILQ_EMPTY(&m->mtx_blocked)) {
711		LIST_REMOVE(m, mtx_contested);
712		_release_lock_quick(m);
713		if (LOCK_LOG_TEST(&m->mtx_object, opts))
714			CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p not held", m);
715	} else
716		atomic_store_rel_ptr(&m->mtx_lock, (void *)MTX_CONTESTED);
717
718	pri = PRI_MAX;
719	LIST_FOREACH(m1, &td->td_contested, mtx_contested) {
720		int cp = TAILQ_FIRST(&m1->mtx_blocked)->td_priority;
721		if (cp < pri)
722			pri = cp;
723	}
724
725	if (pri > td->td_base_pri)
726		pri = td->td_base_pri;
727	td->td_priority = pri;
728
729	if (LOCK_LOG_TEST(&m->mtx_object, opts))
730		CTR2(KTR_LOCK, "_mtx_unlock_sleep: %p contested setrunqueue %p",
731		    m, td1);
732
733	td1->td_blocked = NULL;
734	setrunqueue(td1);
735
736	if (td->td_critnest == 1 && td1->td_priority < pri) {
737#ifdef notyet
738		if (td->td_ithd != NULL) {
739			struct ithd *it = td->td_ithd;
740
741			if (it->it_interrupted) {
742				if (LOCK_LOG_TEST(&m->mtx_object, opts))
743					CTR2(KTR_LOCK,
744				    "_mtx_unlock_sleep: %p interrupted %p",
745					    it, it->it_interrupted);
746				intr_thd_fixup(it);
747			}
748		}
749#endif
750		if (LOCK_LOG_TEST(&m->mtx_object, opts))
751			CTR2(KTR_LOCK,
752			    "_mtx_unlock_sleep: %p switching out lock=%p", m,
753			    (void *)m->mtx_lock);
754
755		td->td_proc->p_stats->p_ru.ru_nivcsw++;
756		mi_switch();
757		if (LOCK_LOG_TEST(&m->mtx_object, opts))
758			CTR2(KTR_LOCK, "_mtx_unlock_sleep: %p resuming lock=%p",
759			    m, (void *)m->mtx_lock);
760	}
761
762	mtx_unlock_spin(&sched_lock);
763
764	return;
765}
766
767/*
768 * All the unlocking of MTX_SPIN locks is done inline.
769 * See the _rel_spin_lock() macro for the details.
770 */
771
772/*
773 * The backing function for the INVARIANTS-enabled mtx_assert()
774 */
775#ifdef INVARIANT_SUPPORT
776void
777_mtx_assert(struct mtx *m, int what, const char *file, int line)
778{
779
780	if (panicstr != NULL)
781		return;
782	switch (what) {
783	case MA_OWNED:
784	case MA_OWNED | MA_RECURSED:
785	case MA_OWNED | MA_NOTRECURSED:
786		if (!mtx_owned(m))
787			panic("mutex %s not owned at %s:%d",
788			    m->mtx_object.lo_name, file, line);
789		if (mtx_recursed(m)) {
790			if ((what & MA_NOTRECURSED) != 0)
791				panic("mutex %s recursed at %s:%d",
792				    m->mtx_object.lo_name, file, line);
793		} else if ((what & MA_RECURSED) != 0) {
794			panic("mutex %s unrecursed at %s:%d",
795			    m->mtx_object.lo_name, file, line);
796		}
797		break;
798	case MA_NOTOWNED:
799		if (mtx_owned(m))
800			panic("mutex %s owned at %s:%d",
801			    m->mtx_object.lo_name, file, line);
802		break;
803	default:
804		panic("unknown mtx_assert at %s:%d", file, line);
805	}
806}
807#endif
808
809/*
810 * The MUTEX_DEBUG-enabled mtx_validate()
811 *
812 * Most of these checks have been moved off into the LO_INITIALIZED flag
813 * maintained by the witness code.
814 */
815#ifdef MUTEX_DEBUG
816
817void	mtx_validate(struct mtx *);
818
819void
820mtx_validate(struct mtx *m)
821{
822
823/*
824 * XXX - When kernacc() is fixed on the alpha to handle K0_SEG memory properly
825 * we can re-enable the kernacc() checks.
826 */
827#ifndef __alpha__
828	/*
829	 * Can't call kernacc() from early init386(), especially when
830	 * initializing Giant mutex, because some stuff in kernacc()
831	 * requires Giant itself.
832	 */
833	if (!cold)
834		if (!kernacc((caddr_t)m, sizeof(m),
835		    VM_PROT_READ | VM_PROT_WRITE))
836			panic("Can't read and write to mutex %p", m);
837#endif
838}
839#endif
840
841/*
842 * General init routine used by the MTX_SYSINIT() macro.
843 */
844void
845mtx_sysinit(void *arg)
846{
847	struct mtx_args *margs = arg;
848
849	mtx_init(margs->ma_mtx, margs->ma_desc, NULL, margs->ma_opts);
850}
851
852/*
853 * Mutex initialization routine; initialize lock `m' of type contained in
854 * `opts' with options contained in `opts' and name `name.'  The optional
855 * lock type `type' is used as a general lock category name for use with
856 * witness.
857 */
858void
859mtx_init(struct mtx *m, const char *name, const char *type, int opts)
860{
861	struct lock_object *lock;
862
863	MPASS((opts & ~(MTX_SPIN | MTX_QUIET | MTX_RECURSE |
864	    MTX_SLEEPABLE | MTX_NOWITNESS | MTX_DUPOK)) == 0);
865
866#ifdef MUTEX_DEBUG
867	/* Diagnostic and error correction */
868	mtx_validate(m);
869#endif
870
871	lock = &m->mtx_object;
872	KASSERT((lock->lo_flags & LO_INITIALIZED) == 0,
873	    ("mutex %s %p already initialized", name, m));
874	bzero(m, sizeof(*m));
875	if (opts & MTX_SPIN)
876		lock->lo_class = &lock_class_mtx_spin;
877	else
878		lock->lo_class = &lock_class_mtx_sleep;
879	lock->lo_name = name;
880	lock->lo_type = type != NULL ? type : name;
881	if (opts & MTX_QUIET)
882		lock->lo_flags = LO_QUIET;
883	if (opts & MTX_RECURSE)
884		lock->lo_flags |= LO_RECURSABLE;
885	if (opts & MTX_SLEEPABLE)
886		lock->lo_flags |= LO_SLEEPABLE;
887	if ((opts & MTX_NOWITNESS) == 0)
888		lock->lo_flags |= LO_WITNESS;
889	if (opts & MTX_DUPOK)
890		lock->lo_flags |= LO_DUPOK;
891
892	m->mtx_lock = MTX_UNOWNED;
893	TAILQ_INIT(&m->mtx_blocked);
894
895	LOCK_LOG_INIT(lock, opts);
896
897	WITNESS_INIT(lock);
898}
899
900/*
901 * Remove lock `m' from all_mtx queue.  We don't allow MTX_QUIET to be
902 * passed in as a flag here because if the corresponding mtx_init() was
903 * called with MTX_QUIET set, then it will already be set in the mutex's
904 * flags.
905 */
906void
907mtx_destroy(struct mtx *m)
908{
909
910	LOCK_LOG_DESTROY(&m->mtx_object, 0);
911
912	if (!mtx_owned(m))
913		MPASS(mtx_unowned(m));
914	else {
915		MPASS((m->mtx_lock & (MTX_RECURSED|MTX_CONTESTED)) == 0);
916
917		/* Tell witness this isn't locked to make it happy. */
918		WITNESS_UNLOCK(&m->mtx_object, LOP_EXCLUSIVE, __FILE__,
919		    __LINE__);
920	}
921
922	WITNESS_DESTROY(&m->mtx_object);
923}
924
925/*
926 * Intialize the mutex code and system mutexes.  This is called from the MD
927 * startup code prior to mi_startup().  The per-CPU data space needs to be
928 * setup before this is called.
929 */
930void
931mutex_init(void)
932{
933
934	/* Setup thread0 so that mutexes work. */
935	LIST_INIT(&thread0.td_contested);
936
937	/*
938	 * Initialize mutexes.
939	 */
940	mtx_init(&Giant, "Giant", NULL, MTX_DEF | MTX_RECURSE);
941	mtx_init(&sched_lock, "sched lock", NULL, MTX_SPIN | MTX_RECURSE);
942	mtx_init(&proc0.p_mtx, "process lock", NULL, MTX_DEF | MTX_DUPOK);
943	mtx_lock(&Giant);
944}
945
946/*
947 * Encapsulated Giant mutex routines.  These routines provide encapsulation
948 * control for the Giant mutex, allowing sysctls to be used to turn on and
949 * off Giant around certain subsystems.  The default value for the sysctls
950 * are set to what developers believe is stable and working in regards to
951 * the Giant pushdown.  Developers should not turn off Giant via these
952 * sysctls unless they know what they are doing.
953 *
954 * Callers of mtx_lock_giant() are expected to pass the return value to an
955 * accompanying mtx_unlock_giant() later on.  If multiple subsystems are
956 * effected by a Giant wrap, all related sysctl variables must be zero for
957 * the subsystem call to operate without Giant (as determined by the caller).
958 */
959
960SYSCTL_NODE(_kern, OID_AUTO, giant, CTLFLAG_RD, NULL, "Giant mutex manipulation");
961
962static int kern_giant_all = 0;
963SYSCTL_INT(_kern_giant, OID_AUTO, all, CTLFLAG_RW, &kern_giant_all, 0, "");
964
965int kern_giant_proc = 1;	/* Giant around PROC locks */
966int kern_giant_file = 1;	/* Giant around struct file & filedesc */
967int kern_giant_ucred = 1;	/* Giant around ucred */
968SYSCTL_INT(_kern_giant, OID_AUTO, proc, CTLFLAG_RW, &kern_giant_proc, 0, "");
969SYSCTL_INT(_kern_giant, OID_AUTO, file, CTLFLAG_RW, &kern_giant_file, 0, "");
970SYSCTL_INT(_kern_giant, OID_AUTO, ucred, CTLFLAG_RW, &kern_giant_ucred, 0, "");
971
972int
973mtx_lock_giant(int sysctlvar)
974{
975	if (sysctlvar || kern_giant_all) {
976		mtx_lock(&Giant);
977		return(1);
978	}
979	return(0);
980}
981
982void
983mtx_unlock_giant(int s)
984{
985	if (s)
986		mtx_unlock(&Giant);
987}
988
989