kern_mutex.c revision 117168
167754Smsmith/*- 267754Smsmith * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved. 367754Smsmith * 477424Smsmith * Redistribution and use in source and binary forms, with or without 5114237Snjl * modification, are permitted provided that the following conditions 667754Smsmith * are met: 767754Smsmith * 1. Redistributions of source code must retain the above copyright 867754Smsmith * notice, this list of conditions and the following disclaimer. 967754Smsmith * 2. Redistributions in binary form must reproduce the above copyright 1067754Smsmith * notice, this list of conditions and the following disclaimer in the 1167754Smsmith * documentation and/or other materials provided with the distribution. 1267754Smsmith * 3. Berkeley Software Design Inc's name may not be used to endorse or 13114237Snjl * promote products derived from this software without specific prior 1470243Smsmith * written permission. 1567754Smsmith * 1667754Smsmith * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND 1767754Smsmith * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 1867754Smsmith * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 1967754Smsmith * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE 2067754Smsmith * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 2167754Smsmith * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 2267754Smsmith * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 2367754Smsmith * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 2467754Smsmith * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 2567754Smsmith * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 2667754Smsmith * SUCH DAMAGE. 2767754Smsmith * 2867754Smsmith * from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $ 2967754Smsmith * and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $ 3067754Smsmith */ 3167754Smsmith 3267754Smsmith/* 3367754Smsmith * Machine independent bits of mutex implementation. 3467754Smsmith */ 3567754Smsmith 3667754Smsmith#include <sys/cdefs.h> 3767754Smsmith__FBSDID("$FreeBSD: head/sys/kern/kern_mutex.c 117168 2003-07-02 16:14:09Z jhb $"); 3867754Smsmith 3967754Smsmith#include "opt_adaptive_mutexes.h" 4067754Smsmith#include "opt_ddb.h" 4167754Smsmith 4267754Smsmith#include <sys/param.h> 4367754Smsmith#include <sys/systm.h> 4467754Smsmith#include <sys/bus.h> 4567754Smsmith#include <sys/kernel.h> 4667754Smsmith#include <sys/ktr.h> 4767754Smsmith#include <sys/lock.h> 4867754Smsmith#include <sys/malloc.h> 4967754Smsmith#include <sys/mutex.h> 5067754Smsmith#include <sys/proc.h> 5167754Smsmith#include <sys/resourcevar.h> 5267754Smsmith#include <sys/sched.h> 5367754Smsmith#include <sys/sbuf.h> 5467754Smsmith#include <sys/sysctl.h> 5567754Smsmith#include <sys/vmmeter.h> 5667754Smsmith 5767754Smsmith#include <machine/atomic.h> 5867754Smsmith#include <machine/bus.h> 5967754Smsmith#include <machine/clock.h> 6067754Smsmith#include <machine/cpu.h> 6167754Smsmith 6267754Smsmith#include <ddb/ddb.h> 6367754Smsmith 6467754Smsmith#include <vm/vm.h> 6567754Smsmith#include <vm/vm_extern.h> 6667754Smsmith 6767754Smsmith/* 6867754Smsmith * Internal utility macros. 6967754Smsmith */ 7067754Smsmith#define mtx_unowned(m) ((m)->mtx_lock == MTX_UNOWNED) 7167754Smsmith 7267754Smsmith#define mtx_owner(m) (mtx_unowned((m)) ? NULL \ 7367754Smsmith : (struct thread *)((m)->mtx_lock & MTX_FLAGMASK)) 7467754Smsmith 7567754Smsmith/* 7667754Smsmith * Lock classes for sleep and spin mutexes. 7767754Smsmith */ 7867754Smsmithstruct lock_class lock_class_mtx_sleep = { 7967754Smsmith "sleep mutex", 8067754Smsmith LC_SLEEPLOCK | LC_RECURSABLE 8167754Smsmith}; 8267754Smsmithstruct lock_class lock_class_mtx_spin = { 8367754Smsmith "spin mutex", 8467754Smsmith LC_SPINLOCK | LC_RECURSABLE 8567754Smsmith}; 8667754Smsmith 8767754Smsmith/* 8867754Smsmith * System-wide mutexes 8967754Smsmith */ 9067754Smsmithstruct mtx sched_lock; 9167754Smsmithstruct mtx Giant; 9267754Smsmith 9367754Smsmith/* 9467754Smsmith * Prototypes for non-exported routines. 9567754Smsmith */ 9667754Smsmithstatic void propagate_priority(struct thread *); 9767754Smsmith 9867754Smsmithstatic void 9967754Smsmithpropagate_priority(struct thread *td) 10067754Smsmith{ 10167754Smsmith int pri = td->td_priority; 10267754Smsmith struct mtx *m = td->td_blocked; 10367754Smsmith 10467754Smsmith mtx_assert(&sched_lock, MA_OWNED); 10567754Smsmith for (;;) { 10667754Smsmith struct thread *td1; 10767754Smsmith 10867754Smsmith td = mtx_owner(m); 10967754Smsmith 11067754Smsmith if (td == NULL) { 11167754Smsmith /* 11267754Smsmith * This really isn't quite right. Really 11367754Smsmith * ought to bump priority of thread that 11467754Smsmith * next acquires the mutex. 11567754Smsmith */ 11667754Smsmith MPASS(m->mtx_lock == MTX_CONTESTED); 11767754Smsmith return; 11877424Smsmith } 11967754Smsmith 12083174Smsmith MPASS(td->td_proc != NULL); 12183174Smsmith MPASS(td->td_proc->p_magic == P_MAGIC); 12283174Smsmith KASSERT(!TD_IS_SLEEPING(td), ("sleeping thread owns a mutex")); 12383174Smsmith if (td->td_priority <= pri) /* lower is higher priority */ 12483174Smsmith return; 12583174Smsmith 12683174Smsmith 12783174Smsmith /* 12883174Smsmith * If lock holder is actually running, just bump priority. 12983174Smsmith */ 13083174Smsmith if (TD_IS_RUNNING(td)) { 13183174Smsmith td->td_priority = pri; 13283174Smsmith return; 13383174Smsmith } 13483174Smsmith 13583174Smsmith#ifndef SMP 13667754Smsmith /* 13767754Smsmith * For UP, we check to see if td is curthread (this shouldn't 13867754Smsmith * ever happen however as it would mean we are in a deadlock.) 13967754Smsmith */ 14067754Smsmith KASSERT(td != curthread, ("Deadlock detected")); 14177424Smsmith#endif 14291116Smsmith 14367754Smsmith /* 14467754Smsmith * If on run queue move to new run queue, and quit. 145100966Siwasaki * XXXKSE this gets a lot more complicated under threads 146100966Siwasaki * but try anyhow. 147100966Siwasaki */ 148100966Siwasaki if (TD_ON_RUNQ(td)) { 14977424Smsmith MPASS(td->td_blocked == NULL); 15067754Smsmith sched_prio(td, pri); 15167754Smsmith return; 15267754Smsmith } 15391116Smsmith /* 15491116Smsmith * Adjust for any other cases. 15567754Smsmith */ 15667754Smsmith td->td_priority = pri; 15767754Smsmith 15877424Smsmith /* 15977424Smsmith * If we aren't blocked on a mutex, we should be. 16067754Smsmith */ 16177424Smsmith KASSERT(TD_ON_LOCK(td), ( 16267754Smsmith "process %d(%s):%d holds %s but isn't blocked on a mutex\n", 16391116Smsmith td->td_proc->p_pid, td->td_proc->p_comm, td->td_state, 16467754Smsmith m->mtx_object.lo_name)); 16567754Smsmith 16677424Smsmith /* 16791116Smsmith * Pick up the mutex that td is blocked on. 16891116Smsmith */ 16999679Siwasaki m = td->td_blocked; 17091116Smsmith MPASS(m != NULL); 17191116Smsmith 17277424Smsmith /* 17367754Smsmith * Check if the thread needs to be moved up on 17467754Smsmith * the blocked chain 17567754Smsmith */ 17667754Smsmith if (td == TAILQ_FIRST(&m->mtx_blocked)) { 17767754Smsmith continue; 17877424Smsmith } 17967754Smsmith 18067754Smsmith td1 = TAILQ_PREV(td, threadqueue, td_lockq); 18167754Smsmith if (td1->td_priority <= pri) { 18267754Smsmith continue; 18367754Smsmith } 18467754Smsmith 18567754Smsmith /* 18667754Smsmith * Remove thread from blocked chain and determine where 18767754Smsmith * it should be moved up to. Since we know that td1 has 18867754Smsmith * a lower priority than td, we know that at least one 18967754Smsmith * thread in the chain has a lower priority and that 19067754Smsmith * td1 will thus not be NULL after the loop. 19167754Smsmith */ 19267754Smsmith TAILQ_REMOVE(&m->mtx_blocked, td, td_lockq); 19367754Smsmith TAILQ_FOREACH(td1, &m->mtx_blocked, td_lockq) { 19467754Smsmith MPASS(td1->td_proc->p_magic == P_MAGIC); 19567754Smsmith if (td1->td_priority > pri) 19667754Smsmith break; 19777424Smsmith } 19867754Smsmith 19991116Smsmith MPASS(td1 != NULL); 20067754Smsmith TAILQ_INSERT_BEFORE(td1, td, td_lockq); 20167754Smsmith CTR4(KTR_LOCK, 20291116Smsmith "propagate_priority: p %p moved before %p on [%p] %s", 20367754Smsmith td, td1, m, m->mtx_object.lo_name); 20491116Smsmith } 20591116Smsmith} 20699679Siwasaki 20799679Siwasaki#ifdef MUTEX_PROFILING 20899679SiwasakiSYSCTL_NODE(_debug, OID_AUTO, mutex, CTLFLAG_RD, NULL, "mutex debugging"); 20999679SiwasakiSYSCTL_NODE(_debug_mutex, OID_AUTO, prof, CTLFLAG_RD, NULL, "mutex profiling"); 21091116Smsmithstatic int mutex_prof_enable = 0; 21167754SmsmithSYSCTL_INT(_debug_mutex_prof, OID_AUTO, enable, CTLFLAG_RW, 21267754Smsmith &mutex_prof_enable, 0, "Enable tracing of mutex holdtime"); 21367754Smsmith 21467754Smsmithstruct mutex_prof { 21567754Smsmith const char *name; 21667754Smsmith const char *file; 21777424Smsmith int line; 21867754Smsmith uintmax_t cnt_max; 21969450Smsmith uintmax_t cnt_tot; 22067754Smsmith uintmax_t cnt_cur; 22169450Smsmith struct mutex_prof *next; 22267754Smsmith}; 22369450Smsmith 22469450Smsmith/* 22567754Smsmith * mprof_buf is a static pool of profiling records to avoid possible 22667754Smsmith * reentrance of the memory allocation functions. 22767754Smsmith * 22869450Smsmith * Note: NUM_MPROF_BUFFERS must be smaller than MPROF_HASH_SIZE. 22977424Smsmith */ 23099679Siwasaki#define NUM_MPROF_BUFFERS 1000 23167754Smsmithstatic struct mutex_prof mprof_buf[NUM_MPROF_BUFFERS]; 23267754Smsmithstatic int first_free_mprof_buf; 23391116Smsmith#define MPROF_HASH_SIZE 1009 23483174Smsmithstatic struct mutex_prof *mprof_hash[MPROF_HASH_SIZE]; 23583174Smsmith/* SWAG: sbuf size = avg stat. line size * number of locks */ 23669450Smsmith#define MPROF_SBUF_SIZE 256 * 400 23769450Smsmith 23869450Smsmithstatic int mutex_prof_acquisitions; 23969450SmsmithSYSCTL_INT(_debug_mutex_prof, OID_AUTO, acquisitions, CTLFLAG_RD, 24069450Smsmith &mutex_prof_acquisitions, 0, "Number of mutex acquistions recorded"); 24199679Siwasakistatic int mutex_prof_records; 24269450SmsmithSYSCTL_INT(_debug_mutex_prof, OID_AUTO, records, CTLFLAG_RD, 24369450Smsmith &mutex_prof_records, 0, "Number of profiling records"); 24469450Smsmithstatic int mutex_prof_maxrecords = NUM_MPROF_BUFFERS; 24569450SmsmithSYSCTL_INT(_debug_mutex_prof, OID_AUTO, maxrecords, CTLFLAG_RD, 24699679Siwasaki &mutex_prof_maxrecords, 0, "Maximum number of profiling records"); 24769450Smsmithstatic int mutex_prof_rejected; 24869450SmsmithSYSCTL_INT(_debug_mutex_prof, OID_AUTO, rejected, CTLFLAG_RD, 24969450Smsmith &mutex_prof_rejected, 0, "Number of rejected profiling records"); 25069450Smsmithstatic int mutex_prof_hashsize = MPROF_HASH_SIZE; 25169450SmsmithSYSCTL_INT(_debug_mutex_prof, OID_AUTO, hashsize, CTLFLAG_RD, 25271867Smsmith &mutex_prof_hashsize, 0, "Hash size"); 25369450Smsmithstatic int mutex_prof_collisions = 0; 25467754SmsmithSYSCTL_INT(_debug_mutex_prof, OID_AUTO, collisions, CTLFLAG_RD, 25567754Smsmith &mutex_prof_collisions, 0, "Number of hash collisions"); 25667754Smsmith 25767754Smsmith/* 25867754Smsmith * mprof_mtx protects the profiling buffers and the hash. 25977424Smsmith */ 26067754Smsmithstatic struct mtx mprof_mtx; 26191116SmsmithMTX_SYSINIT(mprof, &mprof_mtx, "mutex profiling lock", MTX_SPIN | MTX_QUIET); 26287031Smsmith 26367754Smsmithstatic u_int64_t 26467754Smsmithnanoseconds(void) 26567754Smsmith{ 26667754Smsmith struct timespec tv; 26767754Smsmith 26867754Smsmith nanotime(&tv); 26967754Smsmith return (tv.tv_sec * (u_int64_t)1000000000 + tv.tv_nsec); 27067754Smsmith} 27167754Smsmith 27267754Smsmithstatic int 27377424Smsmithdump_mutex_prof_stats(SYSCTL_HANDLER_ARGS) 27487031Smsmith{ 27567754Smsmith struct sbuf *sb; 27667754Smsmith int error, i; 27767754Smsmith static int multiplier = 1; 27867754Smsmith 27967754Smsmith if (first_free_mprof_buf == 0) 28091116Smsmith return (SYSCTL_OUT(req, "No locking recorded", 28167754Smsmith sizeof("No locking recorded"))); 28267754Smsmith 28387031Smsmithretry_sbufops: 28467754Smsmith sb = sbuf_new(NULL, NULL, MPROF_SBUF_SIZE * multiplier, SBUF_FIXEDLEN); 28587031Smsmith sbuf_printf(sb, "%6s %12s %11s %5s %s\n", 28667754Smsmith "max", "total", "count", "avg", "name"); 28791116Smsmith /* 28867754Smsmith * XXX this spinlock seems to be by far the largest perpetrator 289107325Siwasaki * of spinlock latency (1.6 msec on an Athlon1600 was recorded 29077424Smsmith * even before I pessimized it further by moving the average 29167754Smsmith * computation here). 29277424Smsmith */ 29367754Smsmith mtx_lock_spin(&mprof_mtx); 29477424Smsmith for (i = 0; i < first_free_mprof_buf; ++i) { 29567754Smsmith sbuf_printf(sb, "%6ju %12ju %11ju %5ju %s:%d (%s)\n", 29682367Smsmith mprof_buf[i].cnt_max / 1000, 29780062Smsmith mprof_buf[i].cnt_tot / 1000, 29867754Smsmith mprof_buf[i].cnt_cur, 29967754Smsmith mprof_buf[i].cnt_cur == 0 ? (uintmax_t)0 : 30067754Smsmith mprof_buf[i].cnt_tot / (mprof_buf[i].cnt_cur * 1000), 30167754Smsmith mprof_buf[i].file, mprof_buf[i].line, mprof_buf[i].name); 30267754Smsmith if (sbuf_overflowed(sb)) { 30367754Smsmith mtx_unlock_spin(&mprof_mtx); 30467754Smsmith sbuf_delete(sb); 30567754Smsmith multiplier++; 30667754Smsmith goto retry_sbufops; 30777424Smsmith } 30867754Smsmith } 30967754Smsmith mtx_unlock_spin(&mprof_mtx); 31067754Smsmith sbuf_finish(sb); 31167754Smsmith error = SYSCTL_OUT(req, sbuf_data(sb), sbuf_len(sb) + 1); 31267754Smsmith sbuf_delete(sb); 31367754Smsmith return (error); 31467754Smsmith} 31567754SmsmithSYSCTL_PROC(_debug_mutex_prof, OID_AUTO, stats, CTLTYPE_STRING | CTLFLAG_RD, 31667754Smsmith NULL, 0, dump_mutex_prof_stats, "A", "Mutex profiling statistics"); 31767754Smsmith#endif 31899679Siwasaki 31977424Smsmith/* 32067754Smsmith * Function versions of the inlined __mtx_* macros. These are used by 32167754Smsmith * modules and can also be called from assembly language if needed. 32299679Siwasaki */ 32367754Smsmithvoid 32499679Siwasaki_mtx_lock_flags(struct mtx *m, int opts, const char *file, int line) 32591116Smsmith{ 32667754Smsmith 32767754Smsmith MPASS(curthread != NULL); 32867754Smsmith KASSERT(m->mtx_object.lo_class == &lock_class_mtx_sleep, 32967754Smsmith ("mtx_lock() of spin mutex %s @ %s:%d", m->mtx_object.lo_name, 33067754Smsmith file, line)); 33167754Smsmith _get_sleep_lock(m, curthread, opts, file, line); 33277424Smsmith LOCK_LOG_LOCK("LOCK", &m->mtx_object, opts, m->mtx_recurse, file, 33367754Smsmith line); 33499679Siwasaki WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line); 33599679Siwasaki#ifdef MUTEX_PROFILING 33699679Siwasaki /* don't reset the timer when/if recursing */ 33799679Siwasaki if (m->mtx_acqtime == 0) { 33899679Siwasaki m->mtx_filename = file; 339102550Siwasaki m->mtx_lineno = line; 340102550Siwasaki m->mtx_acqtime = mutex_prof_enable ? nanoseconds() : 0; 34199679Siwasaki ++mutex_prof_acquisitions; 34267754Smsmith } 343100966Siwasaki#endif 344100966Siwasaki} 34567754Smsmith 34667754Smsmithvoid 34767754Smsmith_mtx_unlock_flags(struct mtx *m, int opts, const char *file, int line) 34867754Smsmith{ 34967754Smsmith 35077424Smsmith MPASS(curthread != NULL); 35167754Smsmith KASSERT(m->mtx_object.lo_class == &lock_class_mtx_sleep, 35284491Smsmith ("mtx_unlock() of spin mutex %s @ %s:%d", m->mtx_object.lo_name, 35384491Smsmith file, line)); 35467754Smsmith WITNESS_UNLOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line); 35584491Smsmith LOCK_LOG_LOCK("UNLOCK", &m->mtx_object, opts, m->mtx_recurse, file, 35667754Smsmith line); 35767754Smsmith mtx_assert(m, MA_OWNED); 35867754Smsmith#ifdef MUTEX_PROFILING 35967754Smsmith if (m->mtx_acqtime != 0) { 36077424Smsmith static const char *unknown = "(unknown)"; 36184491Smsmith struct mutex_prof *mpp; 36284491Smsmith u_int64_t acqtime, now; 36367754Smsmith const char *p, *q; 36499679Siwasaki volatile u_int hash; 36599679Siwasaki 36699679Siwasaki now = nanoseconds(); 36767754Smsmith acqtime = m->mtx_acqtime; 36867754Smsmith m->mtx_acqtime = 0; 36991116Smsmith if (now <= acqtime) 37067754Smsmith goto out; 37167754Smsmith for (p = m->mtx_filename; 37299679Siwasaki p != NULL && strncmp(p, "../", 3) == 0; p += 3) 37399679Siwasaki /* nothing */ ; 37499679Siwasaki if (p == NULL || *p == '\0') 37599679Siwasaki p = unknown; 37699679Siwasaki for (hash = m->mtx_lineno, q = p; *q != '\0'; ++q) 37799679Siwasaki hash = (hash * 2 + *q) % MPROF_HASH_SIZE; 37899679Siwasaki mtx_lock_spin(&mprof_mtx); 37967754Smsmith for (mpp = mprof_hash[hash]; mpp != NULL; mpp = mpp->next) 38099679Siwasaki if (mpp->line == m->mtx_lineno && 38199679Siwasaki strcmp(mpp->file, p) == 0) 38299679Siwasaki break; 38367754Smsmith if (mpp == NULL) { 38467754Smsmith /* Just exit if we cannot get a trace buffer */ 38567754Smsmith if (first_free_mprof_buf >= NUM_MPROF_BUFFERS) { 38667754Smsmith ++mutex_prof_rejected; 38767754Smsmith goto unlock; 38867754Smsmith } 38967754Smsmith mpp = &mprof_buf[first_free_mprof_buf++]; 39067754Smsmith mpp->name = mtx_name(m); 39177424Smsmith mpp->file = p; 39267754Smsmith mpp->line = m->mtx_lineno; 39367754Smsmith mpp->next = mprof_hash[hash]; 39467754Smsmith if (mprof_hash[hash] != NULL) 39567754Smsmith ++mutex_prof_collisions; 39667754Smsmith mprof_hash[hash] = mpp; 39767754Smsmith ++mutex_prof_records; 39867754Smsmith } 39967754Smsmith /* 40099679Siwasaki * Record if the mutex has been held longer now than ever 40177424Smsmith * before. 40267754Smsmith */ 403114237Snjl if (now - acqtime > mpp->cnt_max) 40467754Smsmith mpp->cnt_max = now - acqtime; 40599679Siwasaki mpp->cnt_tot += now - acqtime; 40667754Smsmith mpp->cnt_cur++; 40783174Smsmithunlock: 40891116Smsmith mtx_unlock_spin(&mprof_mtx); 40983174Smsmith } 41083174Smsmithout: 41199679Siwasaki#endif 41267754Smsmith _rel_sleep_lock(m, curthread, opts, file, line); 41399679Siwasaki} 41467754Smsmith 415107325Siwasakivoid 41699679Siwasaki_mtx_lock_spin_flags(struct mtx *m, int opts, const char *file, int line) 41799679Siwasaki{ 41899679Siwasaki 41999679Siwasaki MPASS(curthread != NULL); 42099679Siwasaki KASSERT(m->mtx_object.lo_class == &lock_class_mtx_spin, 42199679Siwasaki ("mtx_lock_spin() of sleep mutex %s @ %s:%d", 42267754Smsmith m->mtx_object.lo_name, file, line)); 42367754Smsmith#if defined(SMP) || LOCK_DEBUG > 0 || 1 42467754Smsmith _get_spin_lock(m, curthread, opts, file, line); 42567754Smsmith#else 42667754Smsmith critical_enter(); 42767754Smsmith#endif 42877424Smsmith LOCK_LOG_LOCK("LOCK", &m->mtx_object, opts, m->mtx_recurse, file, 42967754Smsmith line); 43067754Smsmith WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line); 43167754Smsmith} 43267754Smsmith 43367754Smsmithvoid 43467754Smsmith_mtx_unlock_spin_flags(struct mtx *m, int opts, const char *file, int line) 43567754Smsmith{ 43667754Smsmith 43799679Siwasaki MPASS(curthread != NULL); 43877424Smsmith KASSERT(m->mtx_object.lo_class == &lock_class_mtx_spin, 43967754Smsmith ("mtx_unlock_spin() of sleep mutex %s @ %s:%d", 440114237Snjl m->mtx_object.lo_name, file, line)); 44167754Smsmith WITNESS_UNLOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line); 44267754Smsmith LOCK_LOG_LOCK("UNLOCK", &m->mtx_object, opts, m->mtx_recurse, file, 44367754Smsmith line); 44484491Smsmith mtx_assert(m, MA_OWNED); 44599679Siwasaki#if defined(SMP) || LOCK_DEBUG > 0 || 1 44667754Smsmith _rel_spin_lock(m); 44767754Smsmith#else 44891116Smsmith critical_exit(); 44983174Smsmith#endif 45083174Smsmith} 45177424Smsmith 45284491Smsmith/* 45367754Smsmith * The important part of mtx_trylock{,_flags}() 45467754Smsmith * Tries to acquire lock `m.' We do NOT handle recursion here. If this 45567754Smsmith * function is called on a recursed mutex, it will return failure and 45699679Siwasaki * will not recursively acquire the lock. You are expected to know what 457114237Snjl * you are doing. 45899679Siwasaki */ 45967754Smsmithint 46067754Smsmith_mtx_trylock(struct mtx *m, int opts, const char *file, int line) 46167754Smsmith{ 462100966Siwasaki int rval; 463 464 MPASS(curthread != NULL); 465 466 rval = _obtain_lock(m, curthread); 467 468 LOCK_LOG_TRY("LOCK", &m->mtx_object, opts, rval, file, line); 469 if (rval) 470 WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE | LOP_TRYLOCK, 471 file, line); 472 473 return (rval); 474} 475 476/* 477 * _mtx_lock_sleep: the tougher part of acquiring an MTX_DEF lock. 478 * 479 * We call this if the lock is either contested (i.e. we need to go to 480 * sleep waiting for it), or if we need to recurse on it. 481 */ 482void 483_mtx_lock_sleep(struct mtx *m, int opts, const char *file, int line) 484{ 485 struct thread *td = curthread; 486 struct thread *td1; 487#if defined(SMP) && defined(ADAPTIVE_MUTEXES) 488 struct thread *owner; 489#endif 490 uintptr_t v; 491#ifdef KTR 492 int cont_logged = 0; 493#endif 494 495 if (mtx_owned(m)) { 496 m->mtx_recurse++; 497 atomic_set_ptr(&m->mtx_lock, MTX_RECURSED); 498 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 499 CTR1(KTR_LOCK, "_mtx_lock_sleep: %p recursing", m); 500 return; 501 } 502 503 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 504 CTR4(KTR_LOCK, 505 "_mtx_lock_sleep: %s contested (lock=%p) at %s:%d", 506 m->mtx_object.lo_name, (void *)m->mtx_lock, file, line); 507 508 while (!_obtain_lock(m, td)) { 509 510 mtx_lock_spin(&sched_lock); 511 v = m->mtx_lock; 512 513 /* 514 * Check if the lock has been released while spinning for 515 * the sched_lock. 516 */ 517 if (v == MTX_UNOWNED) { 518 mtx_unlock_spin(&sched_lock); 519#ifdef __i386__ 520 ia32_pause(); 521#endif 522 continue; 523 } 524 525 /* 526 * The mutex was marked contested on release. This means that 527 * there are other threads blocked on it. Grab ownership of 528 * it and propagate its priority to the current thread if 529 * necessary. 530 */ 531 if (v == MTX_CONTESTED) { 532 td1 = TAILQ_FIRST(&m->mtx_blocked); 533 MPASS(td1 != NULL); 534 m->mtx_lock = (uintptr_t)td | MTX_CONTESTED; 535 LIST_INSERT_HEAD(&td->td_contested, m, mtx_contested); 536 537 if (td1->td_priority < td->td_priority) 538 td->td_priority = td1->td_priority; 539 mtx_unlock_spin(&sched_lock); 540 return; 541 } 542 543 /* 544 * If the mutex isn't already contested and a failure occurs 545 * setting the contested bit, the mutex was either released 546 * or the state of the MTX_RECURSED bit changed. 547 */ 548 if ((v & MTX_CONTESTED) == 0 && 549 !atomic_cmpset_ptr(&m->mtx_lock, (void *)v, 550 (void *)(v | MTX_CONTESTED))) { 551 mtx_unlock_spin(&sched_lock); 552#ifdef __i386__ 553 ia32_pause(); 554#endif 555 continue; 556 } 557 558#if defined(SMP) && defined(ADAPTIVE_MUTEXES) 559 /* 560 * If the current owner of the lock is executing on another 561 * CPU, spin instead of blocking. 562 */ 563 owner = (struct thread *)(v & MTX_FLAGMASK); 564 if (m != &Giant && TD_IS_RUNNING(owner)) { 565 mtx_unlock_spin(&sched_lock); 566 while (mtx_owner(m) == owner && TD_IS_RUNNING(owner)) { 567#ifdef __i386__ 568 ia32_pause(); 569#endif 570 } 571 continue; 572 } 573#endif /* SMP && ADAPTIVE_MUTEXES */ 574 575 /* 576 * We definitely must sleep for this lock. 577 */ 578 mtx_assert(m, MA_NOTOWNED); 579 580#ifdef notyet 581 /* 582 * If we're borrowing an interrupted thread's VM context, we 583 * must clean up before going to sleep. 584 */ 585 if (td->td_ithd != NULL) { 586 struct ithd *it = td->td_ithd; 587 588 if (it->it_interrupted) { 589 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 590 CTR2(KTR_LOCK, 591 "_mtx_lock_sleep: %p interrupted %p", 592 it, it->it_interrupted); 593 intr_thd_fixup(it); 594 } 595 } 596#endif 597 598 /* 599 * Put us on the list of threads blocked on this mutex 600 * and add this mutex to the owning thread's list of 601 * contested mutexes if needed. 602 */ 603 if (TAILQ_EMPTY(&m->mtx_blocked)) { 604 td1 = mtx_owner(m); 605 LIST_INSERT_HEAD(&td1->td_contested, m, mtx_contested); 606 TAILQ_INSERT_TAIL(&m->mtx_blocked, td, td_lockq); 607 } else { 608 TAILQ_FOREACH(td1, &m->mtx_blocked, td_lockq) 609 if (td1->td_priority > td->td_priority) 610 break; 611 if (td1) 612 TAILQ_INSERT_BEFORE(td1, td, td_lockq); 613 else 614 TAILQ_INSERT_TAIL(&m->mtx_blocked, td, td_lockq); 615 } 616#ifdef KTR 617 if (!cont_logged) { 618 CTR6(KTR_CONTENTION, 619 "contention: %p at %s:%d wants %s, taken by %s:%d", 620 td, file, line, m->mtx_object.lo_name, 621 WITNESS_FILE(&m->mtx_object), 622 WITNESS_LINE(&m->mtx_object)); 623 cont_logged = 1; 624 } 625#endif 626 627 /* 628 * Save who we're blocked on. 629 */ 630 td->td_blocked = m; 631 td->td_lockname = m->mtx_object.lo_name; 632 TD_SET_LOCK(td); 633 propagate_priority(td); 634 635 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 636 CTR3(KTR_LOCK, 637 "_mtx_lock_sleep: p %p blocked on [%p] %s", td, m, 638 m->mtx_object.lo_name); 639 640 td->td_proc->p_stats->p_ru.ru_nvcsw++; 641 mi_switch(); 642 643 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 644 CTR3(KTR_LOCK, 645 "_mtx_lock_sleep: p %p free from blocked on [%p] %s", 646 td, m, m->mtx_object.lo_name); 647 648 mtx_unlock_spin(&sched_lock); 649 } 650 651#ifdef KTR 652 if (cont_logged) { 653 CTR4(KTR_CONTENTION, 654 "contention end: %s acquired by %p at %s:%d", 655 m->mtx_object.lo_name, td, file, line); 656 } 657#endif 658 return; 659} 660 661/* 662 * _mtx_lock_spin: the tougher part of acquiring an MTX_SPIN lock. 663 * 664 * This is only called if we need to actually spin for the lock. Recursion 665 * is handled inline. 666 */ 667void 668_mtx_lock_spin(struct mtx *m, int opts, const char *file, int line) 669{ 670 int i = 0; 671 672 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 673 CTR1(KTR_LOCK, "_mtx_lock_spin: %p spinning", m); 674 675 for (;;) { 676 if (_obtain_lock(m, curthread)) 677 break; 678 679 /* Give interrupts a chance while we spin. */ 680 critical_exit(); 681 while (m->mtx_lock != MTX_UNOWNED) { 682 if (i++ < 10000000) { 683#ifdef __i386__ 684 ia32_pause(); 685#endif 686 continue; 687 } 688 if (i < 60000000) 689 DELAY(1); 690#ifdef DDB 691 else if (!db_active) 692#else 693 else 694#endif 695 panic("spin lock %s held by %p for > 5 seconds", 696 m->mtx_object.lo_name, (void *)m->mtx_lock); 697#ifdef __i386__ 698 ia32_pause(); 699#endif 700 } 701 critical_enter(); 702 } 703 704 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 705 CTR1(KTR_LOCK, "_mtx_lock_spin: %p spin done", m); 706 707 return; 708} 709 710/* 711 * _mtx_unlock_sleep: the tougher part of releasing an MTX_DEF lock. 712 * 713 * We are only called here if the lock is recursed or contested (i.e. we 714 * need to wake up a blocked thread). 715 */ 716void 717_mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line) 718{ 719 struct thread *td, *td1; 720 struct mtx *m1; 721 int pri; 722 723 td = curthread; 724 725 if (mtx_recursed(m)) { 726 if (--(m->mtx_recurse) == 0) 727 atomic_clear_ptr(&m->mtx_lock, MTX_RECURSED); 728 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 729 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p unrecurse", m); 730 return; 731 } 732 733 mtx_lock_spin(&sched_lock); 734 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 735 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p contested", m); 736 737 td1 = TAILQ_FIRST(&m->mtx_blocked); 738#if defined(SMP) && defined(ADAPTIVE_MUTEXES) 739 if (td1 == NULL) { 740 _release_lock_quick(m); 741 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 742 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p no sleepers", m); 743 mtx_unlock_spin(&sched_lock); 744 return; 745 } 746#endif 747 MPASS(td->td_proc->p_magic == P_MAGIC); 748 MPASS(td1->td_proc->p_magic == P_MAGIC); 749 750 TAILQ_REMOVE(&m->mtx_blocked, td1, td_lockq); 751 752 LIST_REMOVE(m, mtx_contested); 753 if (TAILQ_EMPTY(&m->mtx_blocked)) { 754 _release_lock_quick(m); 755 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 756 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p not held", m); 757 } else 758 m->mtx_lock = MTX_CONTESTED; 759 760 pri = PRI_MAX; 761 LIST_FOREACH(m1, &td->td_contested, mtx_contested) { 762 int cp = TAILQ_FIRST(&m1->mtx_blocked)->td_priority; 763 if (cp < pri) 764 pri = cp; 765 } 766 767 if (pri > td->td_base_pri) 768 pri = td->td_base_pri; 769 td->td_priority = pri; 770 771 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 772 CTR2(KTR_LOCK, "_mtx_unlock_sleep: %p contested setrunqueue %p", 773 m, td1); 774 775 td1->td_blocked = NULL; 776 TD_CLR_LOCK(td1); 777 if (!TD_CAN_RUN(td1)) { 778 mtx_unlock_spin(&sched_lock); 779 return; 780 } 781 setrunqueue(td1); 782 783 if (td->td_critnest == 1 && td1->td_priority < pri) { 784#ifdef notyet 785 if (td->td_ithd != NULL) { 786 struct ithd *it = td->td_ithd; 787 788 if (it->it_interrupted) { 789 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 790 CTR2(KTR_LOCK, 791 "_mtx_unlock_sleep: %p interrupted %p", 792 it, it->it_interrupted); 793 intr_thd_fixup(it); 794 } 795 } 796#endif 797 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 798 CTR2(KTR_LOCK, 799 "_mtx_unlock_sleep: %p switching out lock=%p", m, 800 (void *)m->mtx_lock); 801 802 td->td_proc->p_stats->p_ru.ru_nivcsw++; 803 mi_switch(); 804 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 805 CTR2(KTR_LOCK, "_mtx_unlock_sleep: %p resuming lock=%p", 806 m, (void *)m->mtx_lock); 807 } 808 809 mtx_unlock_spin(&sched_lock); 810 811 return; 812} 813 814/* 815 * All the unlocking of MTX_SPIN locks is done inline. 816 * See the _rel_spin_lock() macro for the details. 817 */ 818 819/* 820 * The backing function for the INVARIANTS-enabled mtx_assert() 821 */ 822#ifdef INVARIANT_SUPPORT 823void 824_mtx_assert(struct mtx *m, int what, const char *file, int line) 825{ 826 827 if (panicstr != NULL) 828 return; 829 switch (what) { 830 case MA_OWNED: 831 case MA_OWNED | MA_RECURSED: 832 case MA_OWNED | MA_NOTRECURSED: 833 if (!mtx_owned(m)) 834 panic("mutex %s not owned at %s:%d", 835 m->mtx_object.lo_name, file, line); 836 if (mtx_recursed(m)) { 837 if ((what & MA_NOTRECURSED) != 0) 838 panic("mutex %s recursed at %s:%d", 839 m->mtx_object.lo_name, file, line); 840 } else if ((what & MA_RECURSED) != 0) { 841 panic("mutex %s unrecursed at %s:%d", 842 m->mtx_object.lo_name, file, line); 843 } 844 break; 845 case MA_NOTOWNED: 846 if (mtx_owned(m)) 847 panic("mutex %s owned at %s:%d", 848 m->mtx_object.lo_name, file, line); 849 break; 850 default: 851 panic("unknown mtx_assert at %s:%d", file, line); 852 } 853} 854#endif 855 856/* 857 * The MUTEX_DEBUG-enabled mtx_validate() 858 * 859 * Most of these checks have been moved off into the LO_INITIALIZED flag 860 * maintained by the witness code. 861 */ 862#ifdef MUTEX_DEBUG 863 864void mtx_validate(struct mtx *); 865 866void 867mtx_validate(struct mtx *m) 868{ 869 870/* 871 * XXX: When kernacc() does not require Giant we can reenable this check 872 */ 873#ifdef notyet 874/* 875 * XXX - When kernacc() is fixed on the alpha to handle K0_SEG memory properly 876 * we can re-enable the kernacc() checks. 877 */ 878#ifndef __alpha__ 879 /* 880 * Can't call kernacc() from early init386(), especially when 881 * initializing Giant mutex, because some stuff in kernacc() 882 * requires Giant itself. 883 */ 884 if (!cold) 885 if (!kernacc((caddr_t)m, sizeof(m), 886 VM_PROT_READ | VM_PROT_WRITE)) 887 panic("Can't read and write to mutex %p", m); 888#endif 889#endif 890} 891#endif 892 893/* 894 * General init routine used by the MTX_SYSINIT() macro. 895 */ 896void 897mtx_sysinit(void *arg) 898{ 899 struct mtx_args *margs = arg; 900 901 mtx_init(margs->ma_mtx, margs->ma_desc, NULL, margs->ma_opts); 902} 903 904/* 905 * Mutex initialization routine; initialize lock `m' of type contained in 906 * `opts' with options contained in `opts' and name `name.' The optional 907 * lock type `type' is used as a general lock category name for use with 908 * witness. 909 */ 910void 911mtx_init(struct mtx *m, const char *name, const char *type, int opts) 912{ 913 struct lock_object *lock; 914 915 MPASS((opts & ~(MTX_SPIN | MTX_QUIET | MTX_RECURSE | 916 MTX_NOWITNESS | MTX_DUPOK)) == 0); 917 918#ifdef MUTEX_DEBUG 919 /* Diagnostic and error correction */ 920 mtx_validate(m); 921#endif 922 923 lock = &m->mtx_object; 924 KASSERT((lock->lo_flags & LO_INITIALIZED) == 0, 925 ("mutex \"%s\" %p already initialized", name, m)); 926 bzero(m, sizeof(*m)); 927 if (opts & MTX_SPIN) 928 lock->lo_class = &lock_class_mtx_spin; 929 else 930 lock->lo_class = &lock_class_mtx_sleep; 931 lock->lo_name = name; 932 lock->lo_type = type != NULL ? type : name; 933 if (opts & MTX_QUIET) 934 lock->lo_flags = LO_QUIET; 935 if (opts & MTX_RECURSE) 936 lock->lo_flags |= LO_RECURSABLE; 937 if ((opts & MTX_NOWITNESS) == 0) 938 lock->lo_flags |= LO_WITNESS; 939 if (opts & MTX_DUPOK) 940 lock->lo_flags |= LO_DUPOK; 941 942 m->mtx_lock = MTX_UNOWNED; 943 TAILQ_INIT(&m->mtx_blocked); 944 945 LOCK_LOG_INIT(lock, opts); 946 947 WITNESS_INIT(lock); 948} 949 950/* 951 * Remove lock `m' from all_mtx queue. We don't allow MTX_QUIET to be 952 * passed in as a flag here because if the corresponding mtx_init() was 953 * called with MTX_QUIET set, then it will already be set in the mutex's 954 * flags. 955 */ 956void 957mtx_destroy(struct mtx *m) 958{ 959 960 LOCK_LOG_DESTROY(&m->mtx_object, 0); 961 962 if (!mtx_owned(m)) 963 MPASS(mtx_unowned(m)); 964 else { 965 MPASS((m->mtx_lock & (MTX_RECURSED|MTX_CONTESTED)) == 0); 966 967 /* Tell witness this isn't locked to make it happy. */ 968 WITNESS_UNLOCK(&m->mtx_object, LOP_EXCLUSIVE, __FILE__, 969 __LINE__); 970 } 971 972 WITNESS_DESTROY(&m->mtx_object); 973} 974 975/* 976 * Intialize the mutex code and system mutexes. This is called from the MD 977 * startup code prior to mi_startup(). The per-CPU data space needs to be 978 * setup before this is called. 979 */ 980void 981mutex_init(void) 982{ 983 984 /* Setup thread0 so that mutexes work. */ 985 LIST_INIT(&thread0.td_contested); 986 987 /* 988 * Initialize mutexes. 989 */ 990 mtx_init(&Giant, "Giant", NULL, MTX_DEF | MTX_RECURSE); 991 mtx_init(&sched_lock, "sched lock", NULL, MTX_SPIN | MTX_RECURSE); 992 mtx_init(&proc0.p_mtx, "process lock", NULL, MTX_DEF | MTX_DUPOK); 993 mtx_lock(&Giant); 994} 995