kern_mutex.c revision 105919
1/*-
2 * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 *    notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 *    notice, this list of conditions and the following disclaimer in the
11 *    documentation and/or other materials provided with the distribution.
12 * 3. Berkeley Software Design Inc's name may not be used to endorse or
13 *    promote products derived from this software without specific prior
14 *    written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED.  IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 *
28 *	from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $
29 *	and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $
30 * $FreeBSD: head/sys/kern/kern_mutex.c 105919 2002-10-25 08:40:20Z phk $
31 */
32
33/*
34 * Machine independent bits of mutex implementation.
35 */
36
37#include "opt_adaptive_mutexes.h"
38#include "opt_ddb.h"
39
40#include <sys/param.h>
41#include <sys/systm.h>
42#include <sys/bus.h>
43#include <sys/kernel.h>
44#include <sys/ktr.h>
45#include <sys/lock.h>
46#include <sys/malloc.h>
47#include <sys/mutex.h>
48#include <sys/proc.h>
49#include <sys/resourcevar.h>
50#include <sys/sched.h>
51#include <sys/sbuf.h>
52#include <sys/stdint.h>
53#include <sys/sysctl.h>
54#include <sys/vmmeter.h>
55
56#include <machine/atomic.h>
57#include <machine/bus.h>
58#include <machine/clock.h>
59#include <machine/cpu.h>
60
61#include <ddb/ddb.h>
62
63#include <vm/vm.h>
64#include <vm/vm_extern.h>
65
66/*
67 * Internal utility macros.
68 */
69#define mtx_unowned(m)	((m)->mtx_lock == MTX_UNOWNED)
70
71#define mtx_owner(m)	(mtx_unowned((m)) ? NULL \
72	: (struct thread *)((m)->mtx_lock & MTX_FLAGMASK))
73
74/* XXXKSE This test will change. */
75#define	thread_running(td)						\
76	((td)->td_kse != NULL && (td)->td_kse->ke_oncpu != NOCPU)
77
78/*
79 * Lock classes for sleep and spin mutexes.
80 */
81struct lock_class lock_class_mtx_sleep = {
82	"sleep mutex",
83	LC_SLEEPLOCK | LC_RECURSABLE
84};
85struct lock_class lock_class_mtx_spin = {
86	"spin mutex",
87	LC_SPINLOCK | LC_RECURSABLE
88};
89
90/*
91 * System-wide mutexes
92 */
93struct mtx sched_lock;
94struct mtx Giant;
95
96/*
97 * Prototypes for non-exported routines.
98 */
99static void	propagate_priority(struct thread *);
100
101static void
102propagate_priority(struct thread *td)
103{
104	int pri = td->td_priority;
105	struct mtx *m = td->td_blocked;
106
107	mtx_assert(&sched_lock, MA_OWNED);
108	for (;;) {
109		struct thread *td1;
110
111		td = mtx_owner(m);
112
113		if (td == NULL) {
114			/*
115			 * This really isn't quite right. Really
116			 * ought to bump priority of thread that
117			 * next acquires the mutex.
118			 */
119			MPASS(m->mtx_lock == MTX_CONTESTED);
120			return;
121		}
122
123		MPASS(td->td_proc != NULL);
124		MPASS(td->td_proc->p_magic == P_MAGIC);
125		KASSERT(!TD_IS_SLEEPING(td), ("sleeping thread owns a mutex"));
126		if (td->td_priority <= pri) /* lower is higher priority */
127			return;
128
129
130		/*
131		 * If lock holder is actually running, just bump priority.
132		 */
133		if (TD_IS_RUNNING(td)) {
134			td->td_priority = pri;
135			return;
136		}
137
138#ifndef SMP
139		/*
140		 * For UP, we check to see if td is curthread (this shouldn't
141		 * ever happen however as it would mean we are in a deadlock.)
142		 */
143		KASSERT(td != curthread, ("Deadlock detected"));
144#endif
145
146		/*
147		 * If on run queue move to new run queue, and quit.
148		 * XXXKSE this gets a lot more complicated under threads
149		 * but try anyhow.
150		 */
151		if (TD_ON_RUNQ(td)) {
152			MPASS(td->td_blocked == NULL);
153			sched_prio(td, pri);
154			return;
155		}
156		/*
157		 * Adjust for any other cases.
158		 */
159		td->td_priority = pri;
160
161		/*
162		 * If we aren't blocked on a mutex, we should be.
163		 */
164		KASSERT(TD_ON_LOCK(td), (
165		    "process %d(%s):%d holds %s but isn't blocked on a mutex\n",
166		    td->td_proc->p_pid, td->td_proc->p_comm, td->td_state,
167		    m->mtx_object.lo_name));
168
169		/*
170		 * Pick up the mutex that td is blocked on.
171		 */
172		m = td->td_blocked;
173		MPASS(m != NULL);
174
175		/*
176		 * Check if the thread needs to be moved up on
177		 * the blocked chain
178		 */
179		if (td == TAILQ_FIRST(&m->mtx_blocked)) {
180			continue;
181		}
182
183		td1 = TAILQ_PREV(td, threadqueue, td_lockq);
184		if (td1->td_priority <= pri) {
185			continue;
186		}
187
188		/*
189		 * Remove thread from blocked chain and determine where
190		 * it should be moved up to.  Since we know that td1 has
191		 * a lower priority than td, we know that at least one
192		 * thread in the chain has a lower priority and that
193		 * td1 will thus not be NULL after the loop.
194		 */
195		TAILQ_REMOVE(&m->mtx_blocked, td, td_lockq);
196		TAILQ_FOREACH(td1, &m->mtx_blocked, td_lockq) {
197			MPASS(td1->td_proc->p_magic == P_MAGIC);
198			if (td1->td_priority > pri)
199				break;
200		}
201
202		MPASS(td1 != NULL);
203		TAILQ_INSERT_BEFORE(td1, td, td_lockq);
204		CTR4(KTR_LOCK,
205		    "propagate_priority: p %p moved before %p on [%p] %s",
206		    td, td1, m, m->mtx_object.lo_name);
207	}
208}
209
210#ifdef MUTEX_PROFILING
211SYSCTL_NODE(_debug, OID_AUTO, mutex, CTLFLAG_RD, NULL, "mutex debugging");
212SYSCTL_NODE(_debug_mutex, OID_AUTO, prof, CTLFLAG_RD, NULL, "mutex profiling");
213static int mutex_prof_enable = 0;
214SYSCTL_INT(_debug_mutex_prof, OID_AUTO, enable, CTLFLAG_RW,
215    &mutex_prof_enable, 0, "Enable tracing of mutex holdtime");
216
217struct mutex_prof {
218	const char	*name;
219	const char	*file;
220	int		line;
221	struct {
222		uintmax_t	max;
223		uintmax_t	tot;
224		uintmax_t	cur;
225	} cnt;
226	struct mutex_prof *next;
227};
228
229/*
230 * mprof_buf is a static pool of profiling records to avoid possible
231 * reentrance of the memory allocation functions.
232 *
233 * Note: NUM_MPROF_BUFFERS must be smaller than MPROF_HASH_SIZE.
234 */
235#define	NUM_MPROF_BUFFERS	1000
236static struct mutex_prof mprof_buf[NUM_MPROF_BUFFERS];
237static int first_free_mprof_buf;
238#define	MPROF_HASH_SIZE		1009
239static struct mutex_prof *mprof_hash[MPROF_HASH_SIZE];
240
241static int mutex_prof_acquisitions;
242SYSCTL_INT(_debug_mutex_prof, OID_AUTO, acquisitions, CTLFLAG_RD,
243    &mutex_prof_acquisitions, 0, "Number of mutex acquistions recorded");
244static int mutex_prof_records;
245SYSCTL_INT(_debug_mutex_prof, OID_AUTO, records, CTLFLAG_RD,
246    &mutex_prof_records, 0, "Number of profiling records");
247static int mutex_prof_maxrecords = NUM_MPROF_BUFFERS;
248SYSCTL_INT(_debug_mutex_prof, OID_AUTO, maxrecords, CTLFLAG_RD,
249    &mutex_prof_maxrecords, 0, "Maximum number of profiling records");
250static int mutex_prof_rejected;
251SYSCTL_INT(_debug_mutex_prof, OID_AUTO, rejected, CTLFLAG_RD,
252    &mutex_prof_rejected, 0, "Number of rejected profiling records");
253static int mutex_prof_hashsize = MPROF_HASH_SIZE;
254SYSCTL_INT(_debug_mutex_prof, OID_AUTO, hashsize, CTLFLAG_RD,
255    &mutex_prof_hashsize, 0, "Hash size");
256static int mutex_prof_collisions = 0;
257SYSCTL_INT(_debug_mutex_prof, OID_AUTO, collisions, CTLFLAG_RD,
258    &mutex_prof_collisions, 0, "Number of hash collisions");
259
260/*
261 * mprof_mtx protects the profiling buffers and the hash.
262 */
263static struct mtx mprof_mtx;
264MTX_SYSINIT(mprof, &mprof_mtx, "mutex profiling lock", MTX_SPIN | MTX_QUIET);
265
266static u_int64_t
267nanoseconds(void)
268{
269	struct timespec tv;
270
271	nanotime(&tv);
272	return (tv.tv_sec * (u_int64_t)1000000000 + tv.tv_nsec);
273}
274
275static int
276dump_mutex_prof_stats(SYSCTL_HANDLER_ARGS)
277{
278	struct sbuf *sb;
279	int error, i;
280
281	if (first_free_mprof_buf == 0)
282		return (SYSCTL_OUT(req, "No locking recorded",
283		    sizeof("No locking recorded")));
284
285	sb = sbuf_new(NULL, NULL, 1024, SBUF_AUTOEXTEND);
286	sbuf_printf(sb, "%6s %12s %11s %5s %s\n",
287	    "max", "total", "count", "avg", "name");
288	/*
289	 * XXX this spinlock seems to be by far the largest perpetrator
290	 * of spinlock latency (1.6 msec on an Athlon1600 was recorded
291	 * even before I pessimized it further by moving the average
292	 * computation here).
293	 */
294	mtx_lock_spin(&mprof_mtx);
295	for (i = 0; i < first_free_mprof_buf; ++i)
296		sbuf_printf(sb, "%6ju %12ju %11ju %5ju %s:%d (%s)\n",
297		    mprof_buf[i].cnt.max / 1000,
298		    mprof_buf[i].cnt.tot / 1000,
299		    mprof_buf[i].cnt.cur,
300		    mprof_buf[i].cnt.cur == 0 ? (uintmax_t)0 :
301			mprof_buf[i].cnt.tot / (mprof_buf[i].cnt.cur * 1000),
302		    mprof_buf[i].file, mprof_buf[i].line, mprof_buf[i].name);
303	mtx_unlock_spin(&mprof_mtx);
304	sbuf_finish(sb);
305	error = SYSCTL_OUT(req, sbuf_data(sb), sbuf_len(sb) + 1);
306	sbuf_delete(sb);
307	return (error);
308}
309SYSCTL_PROC(_debug_mutex_prof, OID_AUTO, stats, CTLTYPE_STRING | CTLFLAG_RD,
310    NULL, 0, dump_mutex_prof_stats, "A", "Mutex profiling statistics");
311#endif
312
313/*
314 * Function versions of the inlined __mtx_* macros.  These are used by
315 * modules and can also be called from assembly language if needed.
316 */
317void
318_mtx_lock_flags(struct mtx *m, int opts, const char *file, int line)
319{
320
321	MPASS(curthread != NULL);
322	KASSERT(m->mtx_object.lo_class == &lock_class_mtx_sleep,
323	    ("mtx_lock() of spin mutex %s @ %s:%d", m->mtx_object.lo_name,
324	    file, line));
325	_get_sleep_lock(m, curthread, opts, file, line);
326	LOCK_LOG_LOCK("LOCK", &m->mtx_object, opts, m->mtx_recurse, file,
327	    line);
328	WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line);
329#ifdef MUTEX_PROFILING
330	/* don't reset the timer when/if recursing */
331	if (m->mtx_acqtime == 0) {
332		m->mtx_filename = file;
333		m->mtx_lineno = line;
334		m->mtx_acqtime = mutex_prof_enable ? nanoseconds() : 0;
335		++mutex_prof_acquisitions;
336	}
337#endif
338}
339
340void
341_mtx_unlock_flags(struct mtx *m, int opts, const char *file, int line)
342{
343
344	MPASS(curthread != NULL);
345	KASSERT(m->mtx_object.lo_class == &lock_class_mtx_sleep,
346	    ("mtx_unlock() of spin mutex %s @ %s:%d", m->mtx_object.lo_name,
347	    file, line));
348	WITNESS_UNLOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line);
349	LOCK_LOG_LOCK("UNLOCK", &m->mtx_object, opts, m->mtx_recurse, file,
350	    line);
351	mtx_assert(m, MA_OWNED);
352#ifdef MUTEX_PROFILING
353	if (m->mtx_acqtime != 0) {
354		static const char *unknown = "(unknown)";
355		struct mutex_prof *mpp;
356		u_int64_t acqtime, now;
357		const char *p, *q;
358		volatile u_int hash;
359
360		now = nanoseconds();
361		acqtime = m->mtx_acqtime;
362		m->mtx_acqtime = 0;
363		if (now <= acqtime)
364			goto out;
365		for (p = m->mtx_filename; strncmp(p, "../", 3) == 0; p += 3)
366			/* nothing */ ;
367		if (p == NULL || *p == '\0')
368			p = unknown;
369		for (hash = m->mtx_lineno, q = p; *q != '\0'; ++q)
370			hash = (hash * 2 + *q) % MPROF_HASH_SIZE;
371		mtx_lock_spin(&mprof_mtx);
372		for (mpp = mprof_hash[hash]; mpp != NULL; mpp = mpp->next)
373			if (mpp->line == m->mtx_lineno &&
374			    strcmp(mpp->file, p) == 0)
375				break;
376		if (mpp == NULL) {
377			/* Just exit if we cannot get a trace buffer */
378			if (first_free_mprof_buf >= NUM_MPROF_BUFFERS) {
379				++mutex_prof_rejected;
380				goto unlock;
381			}
382			mpp = &mprof_buf[first_free_mprof_buf++];
383			mpp->name = mtx_name(m);
384			mpp->file = p;
385			mpp->line = m->mtx_lineno;
386			mpp->next = mprof_hash[hash];
387			if (mprof_hash[hash] != NULL)
388				++mutex_prof_collisions;
389			mprof_hash[hash] = mpp;
390			++mutex_prof_records;
391		}
392		/*
393		 * Record if the mutex has been held longer now than ever
394		 * before.
395		 */
396		if (now - acqtime > mpp->cnt.max)
397			mpp->cnt.max = now - acqtime;
398		mpp->cnt.tot += now - acqtime;
399		mpp->cnt.cur++;
400unlock:
401		mtx_unlock_spin(&mprof_mtx);
402	}
403out:
404#endif
405	_rel_sleep_lock(m, curthread, opts, file, line);
406}
407
408void
409_mtx_lock_spin_flags(struct mtx *m, int opts, const char *file, int line)
410{
411
412	MPASS(curthread != NULL);
413	KASSERT(m->mtx_object.lo_class == &lock_class_mtx_spin,
414	    ("mtx_lock_spin() of sleep mutex %s @ %s:%d",
415	    m->mtx_object.lo_name, file, line));
416#if defined(SMP) || LOCK_DEBUG > 0 || 1
417	_get_spin_lock(m, curthread, opts, file, line);
418#else
419	critical_enter();
420#endif
421	LOCK_LOG_LOCK("LOCK", &m->mtx_object, opts, m->mtx_recurse, file,
422	    line);
423	WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line);
424}
425
426void
427_mtx_unlock_spin_flags(struct mtx *m, int opts, const char *file, int line)
428{
429
430	MPASS(curthread != NULL);
431	KASSERT(m->mtx_object.lo_class == &lock_class_mtx_spin,
432	    ("mtx_unlock_spin() of sleep mutex %s @ %s:%d",
433	    m->mtx_object.lo_name, file, line));
434	WITNESS_UNLOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line);
435	LOCK_LOG_LOCK("UNLOCK", &m->mtx_object, opts, m->mtx_recurse, file,
436	    line);
437	mtx_assert(m, MA_OWNED);
438#if defined(SMP) || LOCK_DEBUG > 0 || 1
439	_rel_spin_lock(m);
440#else
441	critical_exit();
442#endif
443}
444
445/*
446 * The important part of mtx_trylock{,_flags}()
447 * Tries to acquire lock `m.' We do NOT handle recursion here; we assume that
448 * if we're called, it's because we know we don't already own this lock.
449 */
450int
451_mtx_trylock(struct mtx *m, int opts, const char *file, int line)
452{
453	int rval;
454
455	MPASS(curthread != NULL);
456
457	rval = _obtain_lock(m, curthread);
458
459	LOCK_LOG_TRY("LOCK", &m->mtx_object, opts, rval, file, line);
460	if (rval) {
461		/*
462		 * We do not handle recursion in _mtx_trylock; see the
463		 * note at the top of the routine.
464		 */
465		KASSERT(!mtx_recursed(m),
466		    ("mtx_trylock() called on a recursed mutex"));
467		WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE | LOP_TRYLOCK,
468		    file, line);
469	}
470
471	return (rval);
472}
473
474/*
475 * _mtx_lock_sleep: the tougher part of acquiring an MTX_DEF lock.
476 *
477 * We call this if the lock is either contested (i.e. we need to go to
478 * sleep waiting for it), or if we need to recurse on it.
479 */
480void
481_mtx_lock_sleep(struct mtx *m, int opts, const char *file, int line)
482{
483	struct thread *td = curthread;
484#if defined(SMP) && defined(ADAPTIVE_MUTEXES)
485	struct thread *owner;
486#endif
487#ifdef KTR
488	int cont_logged = 0;
489#endif
490
491	if ((m->mtx_lock & MTX_FLAGMASK) == (uintptr_t)td) {
492		m->mtx_recurse++;
493		atomic_set_ptr(&m->mtx_lock, MTX_RECURSED);
494		if (LOCK_LOG_TEST(&m->mtx_object, opts))
495			CTR1(KTR_LOCK, "_mtx_lock_sleep: %p recursing", m);
496		return;
497	}
498
499	if (LOCK_LOG_TEST(&m->mtx_object, opts))
500		CTR4(KTR_LOCK,
501		    "_mtx_lock_sleep: %s contested (lock=%p) at %s:%d",
502		    m->mtx_object.lo_name, (void *)m->mtx_lock, file, line);
503
504	while (!_obtain_lock(m, td)) {
505		uintptr_t v;
506		struct thread *td1;
507
508		mtx_lock_spin(&sched_lock);
509		/*
510		 * Check if the lock has been released while spinning for
511		 * the sched_lock.
512		 */
513		if ((v = m->mtx_lock) == MTX_UNOWNED) {
514			mtx_unlock_spin(&sched_lock);
515#ifdef __i386__
516			ia32_pause();
517#endif
518			continue;
519		}
520
521		/*
522		 * The mutex was marked contested on release. This means that
523		 * there are threads blocked on it.
524		 */
525		if (v == MTX_CONTESTED) {
526			td1 = TAILQ_FIRST(&m->mtx_blocked);
527			MPASS(td1 != NULL);
528			m->mtx_lock = (uintptr_t)td | MTX_CONTESTED;
529
530			if (td1->td_priority < td->td_priority)
531				td->td_priority = td1->td_priority;
532			mtx_unlock_spin(&sched_lock);
533			return;
534		}
535
536		/*
537		 * If the mutex isn't already contested and a failure occurs
538		 * setting the contested bit, the mutex was either released
539		 * or the state of the MTX_RECURSED bit changed.
540		 */
541		if ((v & MTX_CONTESTED) == 0 &&
542		    !atomic_cmpset_ptr(&m->mtx_lock, (void *)v,
543			(void *)(v | MTX_CONTESTED))) {
544			mtx_unlock_spin(&sched_lock);
545#ifdef __i386__
546			ia32_pause();
547#endif
548			continue;
549		}
550
551#if defined(SMP) && defined(ADAPTIVE_MUTEXES)
552		/*
553		 * If the current owner of the lock is executing on another
554		 * CPU, spin instead of blocking.
555		 */
556		owner = (struct thread *)(v & MTX_FLAGMASK);
557		if (m != &Giant && thread_running(owner)) {
558			mtx_unlock_spin(&sched_lock);
559			while (mtx_owner(m) == owner && thread_running(owner)) {
560#ifdef __i386__
561				ia32_pause();
562#endif
563			}
564			continue;
565		}
566#endif	/* SMP && ADAPTIVE_MUTEXES */
567
568		/*
569		 * We definitely must sleep for this lock.
570		 */
571		mtx_assert(m, MA_NOTOWNED);
572
573#ifdef notyet
574		/*
575		 * If we're borrowing an interrupted thread's VM context, we
576		 * must clean up before going to sleep.
577		 */
578		if (td->td_ithd != NULL) {
579			struct ithd *it = td->td_ithd;
580
581			if (it->it_interrupted) {
582				if (LOCK_LOG_TEST(&m->mtx_object, opts))
583					CTR2(KTR_LOCK,
584				    "_mtx_lock_sleep: %p interrupted %p",
585					    it, it->it_interrupted);
586				intr_thd_fixup(it);
587			}
588		}
589#endif
590
591		/*
592		 * Put us on the list of threads blocked on this mutex.
593		 */
594		if (TAILQ_EMPTY(&m->mtx_blocked)) {
595			td1 = mtx_owner(m);
596			LIST_INSERT_HEAD(&td1->td_contested, m, mtx_contested);
597			TAILQ_INSERT_TAIL(&m->mtx_blocked, td, td_lockq);
598		} else {
599			TAILQ_FOREACH(td1, &m->mtx_blocked, td_lockq)
600				if (td1->td_priority > td->td_priority)
601					break;
602			if (td1)
603				TAILQ_INSERT_BEFORE(td1, td, td_lockq);
604			else
605				TAILQ_INSERT_TAIL(&m->mtx_blocked, td, td_lockq);
606		}
607#ifdef KTR
608		if (!cont_logged) {
609			CTR6(KTR_CONTENTION,
610			    "contention: %p at %s:%d wants %s, taken by %s:%d",
611			    td, file, line, m->mtx_object.lo_name,
612			    WITNESS_FILE(&m->mtx_object),
613			    WITNESS_LINE(&m->mtx_object));
614			cont_logged = 1;
615		}
616#endif
617
618		/*
619		 * Save who we're blocked on.
620		 */
621		td->td_blocked = m;
622		td->td_lockname = m->mtx_object.lo_name;
623		TD_SET_LOCK(td);
624		propagate_priority(td);
625
626		if (LOCK_LOG_TEST(&m->mtx_object, opts))
627			CTR3(KTR_LOCK,
628			    "_mtx_lock_sleep: p %p blocked on [%p] %s", td, m,
629			    m->mtx_object.lo_name);
630
631		td->td_proc->p_stats->p_ru.ru_nvcsw++;
632		mi_switch();
633
634		if (LOCK_LOG_TEST(&m->mtx_object, opts))
635			CTR3(KTR_LOCK,
636			  "_mtx_lock_sleep: p %p free from blocked on [%p] %s",
637			  td, m, m->mtx_object.lo_name);
638
639		mtx_unlock_spin(&sched_lock);
640	}
641
642#ifdef KTR
643	if (cont_logged) {
644		CTR4(KTR_CONTENTION,
645		    "contention end: %s acquired by %p at %s:%d",
646		    m->mtx_object.lo_name, td, file, line);
647	}
648#endif
649	return;
650}
651
652/*
653 * _mtx_lock_spin: the tougher part of acquiring an MTX_SPIN lock.
654 *
655 * This is only called if we need to actually spin for the lock. Recursion
656 * is handled inline.
657 */
658void
659_mtx_lock_spin(struct mtx *m, int opts, const char *file, int line)
660{
661	int i = 0;
662
663	if (LOCK_LOG_TEST(&m->mtx_object, opts))
664		CTR1(KTR_LOCK, "_mtx_lock_spin: %p spinning", m);
665
666	for (;;) {
667		if (_obtain_lock(m, curthread))
668			break;
669
670		/* Give interrupts a chance while we spin. */
671		critical_exit();
672		while (m->mtx_lock != MTX_UNOWNED) {
673			if (i++ < 10000000) {
674#ifdef __i386__
675				ia32_pause();
676#endif
677				continue;
678			}
679			if (i < 60000000)
680				DELAY(1);
681#ifdef DDB
682			else if (!db_active)
683#else
684			else
685#endif
686				panic("spin lock %s held by %p for > 5 seconds",
687				    m->mtx_object.lo_name, (void *)m->mtx_lock);
688#ifdef __i386__
689			ia32_pause();
690#endif
691		}
692		critical_enter();
693	}
694
695	if (LOCK_LOG_TEST(&m->mtx_object, opts))
696		CTR1(KTR_LOCK, "_mtx_lock_spin: %p spin done", m);
697
698	return;
699}
700
701/*
702 * _mtx_unlock_sleep: the tougher part of releasing an MTX_DEF lock.
703 *
704 * We are only called here if the lock is recursed or contested (i.e. we
705 * need to wake up a blocked thread).
706 */
707void
708_mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line)
709{
710	struct thread *td, *td1;
711	struct mtx *m1;
712	int pri;
713
714	td = curthread;
715
716	if (mtx_recursed(m)) {
717		if (--(m->mtx_recurse) == 0)
718			atomic_clear_ptr(&m->mtx_lock, MTX_RECURSED);
719		if (LOCK_LOG_TEST(&m->mtx_object, opts))
720			CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p unrecurse", m);
721		return;
722	}
723
724	mtx_lock_spin(&sched_lock);
725	if (LOCK_LOG_TEST(&m->mtx_object, opts))
726		CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p contested", m);
727
728	td1 = TAILQ_FIRST(&m->mtx_blocked);
729#if defined(SMP) && defined(ADAPTIVE_MUTEXES)
730	if (td1 == NULL) {
731		_release_lock_quick(m);
732		if (LOCK_LOG_TEST(&m->mtx_object, opts))
733			CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p no sleepers", m);
734		mtx_unlock_spin(&sched_lock);
735		return;
736	}
737#endif
738	MPASS(td->td_proc->p_magic == P_MAGIC);
739	MPASS(td1->td_proc->p_magic == P_MAGIC);
740
741	TAILQ_REMOVE(&m->mtx_blocked, td1, td_lockq);
742
743	if (TAILQ_EMPTY(&m->mtx_blocked)) {
744		LIST_REMOVE(m, mtx_contested);
745		_release_lock_quick(m);
746		if (LOCK_LOG_TEST(&m->mtx_object, opts))
747			CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p not held", m);
748	} else
749		atomic_store_rel_ptr(&m->mtx_lock, (void *)MTX_CONTESTED);
750
751	pri = PRI_MAX;
752	LIST_FOREACH(m1, &td->td_contested, mtx_contested) {
753		int cp = TAILQ_FIRST(&m1->mtx_blocked)->td_priority;
754		if (cp < pri)
755			pri = cp;
756	}
757
758	if (pri > td->td_base_pri)
759		pri = td->td_base_pri;
760	td->td_priority = pri;
761
762	if (LOCK_LOG_TEST(&m->mtx_object, opts))
763		CTR2(KTR_LOCK, "_mtx_unlock_sleep: %p contested setrunqueue %p",
764		    m, td1);
765
766	td1->td_blocked = NULL;
767	TD_CLR_LOCK(td1);
768	if (!TD_CAN_RUN(td1)) {
769		mtx_unlock_spin(&sched_lock);
770		return;
771	}
772	setrunqueue(td1);
773
774	if (td->td_critnest == 1 && td1->td_priority < pri) {
775#ifdef notyet
776		if (td->td_ithd != NULL) {
777			struct ithd *it = td->td_ithd;
778
779			if (it->it_interrupted) {
780				if (LOCK_LOG_TEST(&m->mtx_object, opts))
781					CTR2(KTR_LOCK,
782				    "_mtx_unlock_sleep: %p interrupted %p",
783					    it, it->it_interrupted);
784				intr_thd_fixup(it);
785			}
786		}
787#endif
788		if (LOCK_LOG_TEST(&m->mtx_object, opts))
789			CTR2(KTR_LOCK,
790			    "_mtx_unlock_sleep: %p switching out lock=%p", m,
791			    (void *)m->mtx_lock);
792
793		td->td_proc->p_stats->p_ru.ru_nivcsw++;
794		mi_switch();
795		if (LOCK_LOG_TEST(&m->mtx_object, opts))
796			CTR2(KTR_LOCK, "_mtx_unlock_sleep: %p resuming lock=%p",
797			    m, (void *)m->mtx_lock);
798	}
799
800	mtx_unlock_spin(&sched_lock);
801
802	return;
803}
804
805/*
806 * All the unlocking of MTX_SPIN locks is done inline.
807 * See the _rel_spin_lock() macro for the details.
808 */
809
810/*
811 * The backing function for the INVARIANTS-enabled mtx_assert()
812 */
813#ifdef INVARIANT_SUPPORT
814void
815_mtx_assert(struct mtx *m, int what, const char *file, int line)
816{
817
818	if (panicstr != NULL)
819		return;
820	switch (what) {
821	case MA_OWNED:
822	case MA_OWNED | MA_RECURSED:
823	case MA_OWNED | MA_NOTRECURSED:
824		if (!mtx_owned(m))
825			panic("mutex %s not owned at %s:%d",
826			    m->mtx_object.lo_name, file, line);
827		if (mtx_recursed(m)) {
828			if ((what & MA_NOTRECURSED) != 0)
829				panic("mutex %s recursed at %s:%d",
830				    m->mtx_object.lo_name, file, line);
831		} else if ((what & MA_RECURSED) != 0) {
832			panic("mutex %s unrecursed at %s:%d",
833			    m->mtx_object.lo_name, file, line);
834		}
835		break;
836	case MA_NOTOWNED:
837		if (mtx_owned(m))
838			panic("mutex %s owned at %s:%d",
839			    m->mtx_object.lo_name, file, line);
840		break;
841	default:
842		panic("unknown mtx_assert at %s:%d", file, line);
843	}
844}
845#endif
846
847/*
848 * The MUTEX_DEBUG-enabled mtx_validate()
849 *
850 * Most of these checks have been moved off into the LO_INITIALIZED flag
851 * maintained by the witness code.
852 */
853#ifdef MUTEX_DEBUG
854
855void	mtx_validate(struct mtx *);
856
857void
858mtx_validate(struct mtx *m)
859{
860
861/*
862 * XXX: When kernacc() does not require Giant we can reenable this check
863 */
864#ifdef notyet
865/*
866 * XXX - When kernacc() is fixed on the alpha to handle K0_SEG memory properly
867 * we can re-enable the kernacc() checks.
868 */
869#ifndef __alpha__
870	/*
871	 * Can't call kernacc() from early init386(), especially when
872	 * initializing Giant mutex, because some stuff in kernacc()
873	 * requires Giant itself.
874	 */
875	if (!cold)
876		if (!kernacc((caddr_t)m, sizeof(m),
877		    VM_PROT_READ | VM_PROT_WRITE))
878			panic("Can't read and write to mutex %p", m);
879#endif
880#endif
881}
882#endif
883
884/*
885 * General init routine used by the MTX_SYSINIT() macro.
886 */
887void
888mtx_sysinit(void *arg)
889{
890	struct mtx_args *margs = arg;
891
892	mtx_init(margs->ma_mtx, margs->ma_desc, NULL, margs->ma_opts);
893}
894
895/*
896 * Mutex initialization routine; initialize lock `m' of type contained in
897 * `opts' with options contained in `opts' and name `name.'  The optional
898 * lock type `type' is used as a general lock category name for use with
899 * witness.
900 */
901void
902mtx_init(struct mtx *m, const char *name, const char *type, int opts)
903{
904	struct lock_object *lock;
905
906	MPASS((opts & ~(MTX_SPIN | MTX_QUIET | MTX_RECURSE |
907	    MTX_SLEEPABLE | MTX_NOWITNESS | MTX_DUPOK)) == 0);
908
909#ifdef MUTEX_DEBUG
910	/* Diagnostic and error correction */
911	mtx_validate(m);
912#endif
913
914	lock = &m->mtx_object;
915	KASSERT((lock->lo_flags & LO_INITIALIZED) == 0,
916	    ("mutex %s %p already initialized", name, m));
917	bzero(m, sizeof(*m));
918	if (opts & MTX_SPIN)
919		lock->lo_class = &lock_class_mtx_spin;
920	else
921		lock->lo_class = &lock_class_mtx_sleep;
922	lock->lo_name = name;
923	lock->lo_type = type != NULL ? type : name;
924	if (opts & MTX_QUIET)
925		lock->lo_flags = LO_QUIET;
926	if (opts & MTX_RECURSE)
927		lock->lo_flags |= LO_RECURSABLE;
928	if (opts & MTX_SLEEPABLE)
929		lock->lo_flags |= LO_SLEEPABLE;
930	if ((opts & MTX_NOWITNESS) == 0)
931		lock->lo_flags |= LO_WITNESS;
932	if (opts & MTX_DUPOK)
933		lock->lo_flags |= LO_DUPOK;
934
935	m->mtx_lock = MTX_UNOWNED;
936	TAILQ_INIT(&m->mtx_blocked);
937
938	LOCK_LOG_INIT(lock, opts);
939
940	WITNESS_INIT(lock);
941}
942
943/*
944 * Remove lock `m' from all_mtx queue.  We don't allow MTX_QUIET to be
945 * passed in as a flag here because if the corresponding mtx_init() was
946 * called with MTX_QUIET set, then it will already be set in the mutex's
947 * flags.
948 */
949void
950mtx_destroy(struct mtx *m)
951{
952
953	LOCK_LOG_DESTROY(&m->mtx_object, 0);
954
955	if (!mtx_owned(m))
956		MPASS(mtx_unowned(m));
957	else {
958		MPASS((m->mtx_lock & (MTX_RECURSED|MTX_CONTESTED)) == 0);
959
960		/* Tell witness this isn't locked to make it happy. */
961		WITNESS_UNLOCK(&m->mtx_object, LOP_EXCLUSIVE, __FILE__,
962		    __LINE__);
963	}
964
965	WITNESS_DESTROY(&m->mtx_object);
966}
967
968/*
969 * Intialize the mutex code and system mutexes.  This is called from the MD
970 * startup code prior to mi_startup().  The per-CPU data space needs to be
971 * setup before this is called.
972 */
973void
974mutex_init(void)
975{
976
977	/* Setup thread0 so that mutexes work. */
978	LIST_INIT(&thread0.td_contested);
979
980	/*
981	 * Initialize mutexes.
982	 */
983	mtx_init(&Giant, "Giant", NULL, MTX_DEF | MTX_RECURSE);
984	mtx_init(&sched_lock, "sched lock", NULL, MTX_SPIN | MTX_RECURSE);
985	mtx_init(&proc0.p_mtx, "process lock", NULL, MTX_DEF | MTX_DUPOK);
986	mtx_lock(&Giant);
987}
988
989/*
990 * Encapsulated Giant mutex routines.  These routines provide encapsulation
991 * control for the Giant mutex, allowing sysctls to be used to turn on and
992 * off Giant around certain subsystems.  The default value for the sysctls
993 * are set to what developers believe is stable and working in regards to
994 * the Giant pushdown.  Developers should not turn off Giant via these
995 * sysctls unless they know what they are doing.
996 *
997 * Callers of mtx_lock_giant() are expected to pass the return value to an
998 * accompanying mtx_unlock_giant() later on.  If multiple subsystems are
999 * effected by a Giant wrap, all related sysctl variables must be zero for
1000 * the subsystem call to operate without Giant (as determined by the caller).
1001 */
1002
1003SYSCTL_NODE(_kern, OID_AUTO, giant, CTLFLAG_RD, NULL, "Giant mutex manipulation");
1004
1005static int kern_giant_all = 0;
1006SYSCTL_INT(_kern_giant, OID_AUTO, all, CTLFLAG_RW, &kern_giant_all, 0, "");
1007
1008int kern_giant_proc = 1;	/* Giant around PROC locks */
1009int kern_giant_file = 1;	/* Giant around struct file & filedesc */
1010int kern_giant_ucred = 1;	/* Giant around ucred */
1011SYSCTL_INT(_kern_giant, OID_AUTO, proc, CTLFLAG_RW, &kern_giant_proc, 0, "");
1012SYSCTL_INT(_kern_giant, OID_AUTO, file, CTLFLAG_RW, &kern_giant_file, 0, "");
1013SYSCTL_INT(_kern_giant, OID_AUTO, ucred, CTLFLAG_RW, &kern_giant_ucred, 0, "");
1014
1015int
1016mtx_lock_giant(int sysctlvar)
1017{
1018	if (sysctlvar || kern_giant_all) {
1019		mtx_lock(&Giant);
1020		return(1);
1021	}
1022	return(0);
1023}
1024
1025void
1026mtx_unlock_giant(int s)
1027{
1028	if (s)
1029		mtx_unlock(&Giant);
1030}
1031