kern_mutex.c revision 97839
1276485Snp/*- 2276485Snp * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved. 3276485Snp * 4276485Snp * Redistribution and use in source and binary forms, with or without 5276485Snp * modification, are permitted provided that the following conditions 6276485Snp * are met: 7276485Snp * 1. Redistributions of source code must retain the above copyright 8276485Snp * notice, this list of conditions and the following disclaimer. 9276485Snp * 2. Redistributions in binary form must reproduce the above copyright 10276485Snp * notice, this list of conditions and the following disclaimer in the 11276485Snp * documentation and/or other materials provided with the distribution. 12276485Snp * 3. Berkeley Software Design Inc's name may not be used to endorse or 13276485Snp * promote products derived from this software without specific prior 14276485Snp * written permission. 15276485Snp * 16276485Snp * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND 17276485Snp * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18276485Snp * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19276485Snp * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE 20276485Snp * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21276485Snp * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22276485Snp * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23276485Snp * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24276485Snp * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25276485Snp * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26276485Snp * SUCH DAMAGE. 27276485Snp * 28276485Snp * from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $ 29276485Snp * and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $ 30276485Snp * $FreeBSD: head/sys/kern/kern_mutex.c 97839 2002-06-04 22:36:24Z jhb $ 31276485Snp */ 32276485Snp 33276485Snp/* 34276485Snp * Machine independent bits of mutex implementation. 35276485Snp */ 36276485Snp 37276485Snp#include "opt_adaptive_mutexes.h" 38276485Snp#include "opt_ddb.h" 39276485Snp 40276485Snp#include <sys/param.h> 41277226Snp#include <sys/systm.h> 42277226Snp#include <sys/bus.h> 43277226Snp#include <sys/kernel.h> 44277226Snp#include <sys/ktr.h> 45277226Snp#include <sys/lock.h> 46276485Snp#include <sys/malloc.h> 47276485Snp#include <sys/mutex.h> 48276485Snp#include <sys/proc.h> 49276485Snp#include <sys/resourcevar.h> 50276485Snp#include <sys/sbuf.h> 51276485Snp#include <sys/stdint.h> 52276485Snp#include <sys/sysctl.h> 53276485Snp#include <sys/vmmeter.h> 54276485Snp 55276485Snp#include <machine/atomic.h> 56276485Snp#include <machine/bus.h> 57276485Snp#include <machine/clock.h> 58276485Snp#include <machine/cpu.h> 59276485Snp 60276485Snp#include <ddb/ddb.h> 61276485Snp 62276485Snp#include <vm/vm.h> 63276485Snp#include <vm/vm_extern.h> 64276485Snp 65276485Snp/* 66276485Snp * Internal utility macros. 67276485Snp */ 68276485Snp#define mtx_unowned(m) ((m)->mtx_lock == MTX_UNOWNED) 69276485Snp 70276485Snp#define mtx_owner(m) (mtx_unowned((m)) ? NULL \ 71276485Snp : (struct thread *)((m)->mtx_lock & MTX_FLAGMASK)) 72276485Snp 73276485Snp/* XXXKSE This test will change. */ 74276485Snp#define thread_running(td) \ 75276485Snp ((td)->td_kse != NULL && (td)->td_kse->ke_oncpu != NOCPU) 76276485Snp 77276485Snp/* 78276485Snp * Lock classes for sleep and spin mutexes. 79276485Snp */ 80276485Snpstruct lock_class lock_class_mtx_sleep = { 81276485Snp "sleep mutex", 82276485Snp LC_SLEEPLOCK | LC_RECURSABLE 83276485Snp}; 84276485Snpstruct lock_class lock_class_mtx_spin = { 85276485Snp "spin mutex", 86276485Snp LC_SPINLOCK | LC_RECURSABLE 87276485Snp}; 88276485Snp 89276485Snp/* 90276485Snp * System-wide mutexes 91276485Snp */ 92276485Snpstruct mtx sched_lock; 93276485Snpstruct mtx Giant; 94276485Snp 95276485Snp/* 96276485Snp * Prototypes for non-exported routines. 97276485Snp */ 98276485Snpstatic void propagate_priority(struct thread *); 99276485Snp 100276485Snpstatic void 101276485Snppropagate_priority(struct thread *td) 102276485Snp{ 103276485Snp int pri = td->td_priority; 104276485Snp struct mtx *m = td->td_blocked; 105276485Snp 106276485Snp mtx_assert(&sched_lock, MA_OWNED); 107276485Snp for (;;) { 108276485Snp struct thread *td1; 109276485Snp 110276485Snp td = mtx_owner(m); 111276485Snp 112276485Snp if (td == NULL) { 113276485Snp /* 114276485Snp * This really isn't quite right. Really 115276485Snp * ought to bump priority of thread that 116276485Snp * next acquires the mutex. 117276485Snp */ 118276485Snp MPASS(m->mtx_lock == MTX_CONTESTED); 119276485Snp return; 120276485Snp } 121276485Snp 122276485Snp MPASS(td->td_proc->p_magic == P_MAGIC); 123276485Snp KASSERT(td->td_proc->p_stat != SSLEEP, ("sleeping thread owns a mutex")); 124276485Snp if (td->td_priority <= pri) /* lower is higher priority */ 125339400Snp return; 126276485Snp 127339400Snp /* 128276485Snp * Bump this thread's priority. 129276485Snp */ 130339400Snp td->td_priority = pri; 131276485Snp 132276485Snp /* 133276485Snp * If lock holder is actually running, just bump priority. 134276485Snp */ 135276485Snp if (thread_running(td)) { 136276485Snp MPASS(td->td_proc->p_stat == SRUN 137276485Snp || td->td_proc->p_stat == SZOMB 138276485Snp || td->td_proc->p_stat == SSTOP); 139276485Snp return; 140276485Snp } 141276485Snp 142276485Snp#ifndef SMP 143276485Snp /* 144276485Snp * For UP, we check to see if td is curthread (this shouldn't 145276485Snp * ever happen however as it would mean we are in a deadlock.) 146276485Snp */ 147276485Snp KASSERT(td != curthread, ("Deadlock detected")); 148276485Snp#endif 149276485Snp 150276485Snp /* 151276485Snp * If on run queue move to new run queue, and quit. 152276485Snp * XXXKSE this gets a lot more complicated under threads 153339400Snp * but try anyhow. 154276485Snp */ 155339400Snp if (td->td_proc->p_stat == SRUN) { 156276485Snp MPASS(td->td_blocked == NULL); 157276485Snp remrunqueue(td); 158339400Snp setrunqueue(td); 159276485Snp return; 160276485Snp } 161276485Snp 162276485Snp /* 163276485Snp * If we aren't blocked on a mutex, we should be. 164276485Snp */ 165276485Snp KASSERT(td->td_proc->p_stat == SMTX, ( 166276485Snp "process %d(%s):%d holds %s but isn't blocked on a mutex\n", 167276485Snp td->td_proc->p_pid, td->td_proc->p_comm, td->td_proc->p_stat, 168276485Snp m->mtx_object.lo_name)); 169276485Snp 170276485Snp /* 171276485Snp * Pick up the mutex that td is blocked on. 172276485Snp */ 173276485Snp m = td->td_blocked; 174276485Snp MPASS(m != NULL); 175276485Snp 176276485Snp /* 177276485Snp * Check if the thread needs to be moved up on 178276485Snp * the blocked chain 179276485Snp */ 180276485Snp if (td == TAILQ_FIRST(&m->mtx_blocked)) { 181276485Snp continue; 182276485Snp } 183276485Snp 184276485Snp td1 = TAILQ_PREV(td, threadqueue, td_blkq); 185276485Snp if (td1->td_priority <= pri) { 186276485Snp continue; 187276485Snp } 188276485Snp 189276485Snp /* 190276485Snp * Remove thread from blocked chain and determine where 191276485Snp * it should be moved up to. Since we know that td1 has 192276485Snp * a lower priority than td, we know that at least one 193276485Snp * thread in the chain has a lower priority and that 194276485Snp * td1 will thus not be NULL after the loop. 195276485Snp */ 196276485Snp TAILQ_REMOVE(&m->mtx_blocked, td, td_blkq); 197276485Snp TAILQ_FOREACH(td1, &m->mtx_blocked, td_blkq) { 198276485Snp MPASS(td1->td_proc->p_magic == P_MAGIC); 199276485Snp if (td1->td_priority > pri) 200276485Snp break; 201276485Snp } 202276485Snp 203276485Snp MPASS(td1 != NULL); 204276485Snp TAILQ_INSERT_BEFORE(td1, td, td_blkq); 205276485Snp CTR4(KTR_LOCK, 206276485Snp "propagate_priority: p %p moved before %p on [%p] %s", 207276485Snp td, td1, m, m->mtx_object.lo_name); 208276485Snp } 209276485Snp} 210276485Snp 211276485Snp#ifdef MUTEX_PROFILING 212276485SnpSYSCTL_NODE(_debug, OID_AUTO, mutex, CTLFLAG_RD, NULL, "mutex debugging"); 213276485SnpSYSCTL_NODE(_debug_mutex, OID_AUTO, prof, CTLFLAG_RD, NULL, "mutex profiling"); 214276485Snpstatic int mutex_prof_enable = 0; 215276485SnpSYSCTL_INT(_debug_mutex_prof, OID_AUTO, enable, CTLFLAG_RW, 216276485Snp &mutex_prof_enable, 0, "Enable tracing of mutex holdtime"); 217276485Snp 218276485Snpstruct mutex_prof { 219276485Snp const char *name; 220276485Snp const char *file; 221276485Snp int line; 222276485Snp#define MPROF_MAX 0 223276485Snp#define MPROF_TOT 1 224276485Snp#define MPROF_CNT 2 225276485Snp#define MPROF_AVG 3 226276485Snp uintmax_t counter[4]; 227276485Snp struct mutex_prof *next; 228276485Snp}; 229276485Snp 230276485Snp/* 231276485Snp * mprof_buf is a static pool of profiling records to avoid possible 232276485Snp * reentrance of the memory allocation functions. 233276485Snp * 234276485Snp * Note: NUM_MPROF_BUFFERS must be smaller than MPROF_HASH_SIZE. 235276485Snp */ 236276485Snp#define NUM_MPROF_BUFFERS 1000 237276485Snpstatic struct mutex_prof mprof_buf[NUM_MPROF_BUFFERS]; 238276485Snpstatic int first_free_mprof_buf; 239276485Snp#define MPROF_HASH_SIZE 1009 240276485Snpstatic struct mutex_prof *mprof_hash[MPROF_HASH_SIZE]; 241276485Snp 242276485Snpstatic int mutex_prof_acquisitions; 243276485SnpSYSCTL_INT(_debug_mutex_prof, OID_AUTO, acquisitions, CTLFLAG_RD, 244276485Snp &mutex_prof_acquisitions, 0, "Number of mutex acquistions recorded"); 245276485Snpstatic int mutex_prof_records; 246276485SnpSYSCTL_INT(_debug_mutex_prof, OID_AUTO, records, CTLFLAG_RD, 247276485Snp &mutex_prof_records, 0, "Number of profiling records"); 248276485Snpstatic int mutex_prof_maxrecords = NUM_MPROF_BUFFERS; 249276485SnpSYSCTL_INT(_debug_mutex_prof, OID_AUTO, maxrecords, CTLFLAG_RD, 250276485Snp &mutex_prof_maxrecords, 0, "Maximum number of profiling records"); 251276485Snpstatic int mutex_prof_rejected; 252276485SnpSYSCTL_INT(_debug_mutex_prof, OID_AUTO, rejected, CTLFLAG_RD, 253276485Snp &mutex_prof_rejected, 0, "Number of rejected profiling records"); 254276485Snpstatic int mutex_prof_hashsize = MPROF_HASH_SIZE; 255276485SnpSYSCTL_INT(_debug_mutex_prof, OID_AUTO, hashsize, CTLFLAG_RD, 256276485Snp &mutex_prof_hashsize, 0, "Hash size"); 257276485Snpstatic int mutex_prof_collisions = 0; 258276485SnpSYSCTL_INT(_debug_mutex_prof, OID_AUTO, collisions, CTLFLAG_RD, 259276485Snp &mutex_prof_collisions, 0, "Number of hash collisions"); 260276485Snp 261276485Snp/* 262276485Snp * mprof_mtx protects the profiling buffers and the hash. 263276485Snp */ 264339400Snpstatic struct mtx mprof_mtx; 265276485SnpMTX_SYSINIT(mprof, &mprof_mtx, "mutex profiling lock", MTX_SPIN | MTX_QUIET); 266276485Snp 267276485Snpstatic u_int64_t 268276485Snpnanoseconds(void) 269276485Snp{ 270276485Snp struct timespec tv; 271276485Snp 272276485Snp nanotime(&tv); 273276485Snp return (tv.tv_sec * (u_int64_t)1000000000 + tv.tv_nsec); 274276485Snp} 275276485Snp 276339400Snpstatic int 277276485Snpdump_mutex_prof_stats(SYSCTL_HANDLER_ARGS) 278276485Snp{ 279276485Snp struct sbuf *sb; 280276485Snp int error, i; 281276485Snp 282276485Snp if (first_free_mprof_buf == 0) 283276485Snp return SYSCTL_OUT(req, "No locking recorded", 284276485Snp sizeof("No locking recorded")); 285276485Snp 286276485Snp sb = sbuf_new(NULL, NULL, 1024, SBUF_AUTOEXTEND); 287298955Spfg sbuf_printf(sb, "%12s %12s %12s %12s %s\n", 288276485Snp "max", "total", "count", "average", "name"); 289276485Snp mtx_lock_spin(&mprof_mtx); 290276485Snp for (i = 0; i < first_free_mprof_buf; ++i) 291276485Snp sbuf_printf(sb, "%12ju %12ju %12ju %12ju %s:%d (%s)\n", 292276485Snp mprof_buf[i].counter[MPROF_MAX] / 1000, 293276485Snp mprof_buf[i].counter[MPROF_TOT] / 1000, 294276485Snp mprof_buf[i].counter[MPROF_CNT], 295276485Snp mprof_buf[i].counter[MPROF_AVG] / 1000, 296276485Snp mprof_buf[i].file, mprof_buf[i].line, mprof_buf[i].name); 297276485Snp mtx_unlock_spin(&mprof_mtx); 298276485Snp sbuf_finish(sb); 299276485Snp error = SYSCTL_OUT(req, sbuf_data(sb), sbuf_len(sb) + 1); 300276485Snp sbuf_delete(sb); 301276485Snp return (error); 302276485Snp} 303276485SnpSYSCTL_PROC(_debug_mutex_prof, OID_AUTO, stats, CTLTYPE_STRING|CTLFLAG_RD, 304276485Snp NULL, 0, dump_mutex_prof_stats, "A", "Mutex profiling statistics"); 305276485Snp#endif 306339400Snp 307276485Snp/* 308339400Snp * Function versions of the inlined __mtx_* macros. These are used by 309276485Snp * modules and can also be called from assembly language if needed. 310276485Snp */ 311339400Snpvoid 312276485Snp_mtx_lock_flags(struct mtx *m, int opts, const char *file, int line) 313276485Snp{ 314276485Snp 315276485Snp MPASS(curthread != NULL); 316276485Snp _get_sleep_lock(m, curthread, opts, file, line); 317276485Snp LOCK_LOG_LOCK("LOCK", &m->mtx_object, opts, m->mtx_recurse, file, 318276485Snp line); 319276485Snp WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line); 320276485Snp#ifdef MUTEX_PROFILING 321276485Snp /* don't reset the timer when/if recursing */ 322276485Snp if (m->acqtime == 0) { 323276485Snp m->file = file; 324276485Snp m->line = line; 325276485Snp m->acqtime = mutex_prof_enable ? nanoseconds() : 0; 326276485Snp ++mutex_prof_acquisitions; 327276485Snp } 328276485Snp#endif 329276485Snp} 330276485Snp 331276485Snpvoid 332276485Snp_mtx_unlock_flags(struct mtx *m, int opts, const char *file, int line) 333276485Snp{ 334276485Snp 335276485Snp MPASS(curthread != NULL); 336276485Snp mtx_assert(m, MA_OWNED); 337276485Snp#ifdef MUTEX_PROFILING 338276485Snp if (m->acqtime != 0) { 339276485Snp static const char *unknown = "(unknown)"; 340276485Snp struct mutex_prof *mpp; 341276485Snp u_int64_t acqtime, now; 342276485Snp const char *p, *q; 343276485Snp volatile u_int hash; 344276485Snp 345276485Snp now = nanoseconds(); 346276485Snp acqtime = m->acqtime; 347276485Snp m->acqtime = 0; 348276485Snp if (now <= acqtime) 349276485Snp goto out; 350276485Snp for (p = file; strncmp(p, "../", 3) == 0; p += 3) 351276485Snp /* nothing */ ; 352276485Snp if (p == NULL || *p == '\0') 353276485Snp p = unknown; 354276485Snp for (hash = line, q = p; *q != '\0'; ++q) 355276485Snp hash = (hash * 2 + *q) % MPROF_HASH_SIZE; 356276485Snp mtx_lock_spin(&mprof_mtx); 357276485Snp for (mpp = mprof_hash[hash]; mpp != NULL; mpp = mpp->next) 358276485Snp if (mpp->line == line && strcmp(mpp->file, p) == 0) 359276485Snp break; 360276485Snp if (mpp == NULL) { 361276485Snp /* Just exit if we cannot get a trace buffer */ 362276485Snp if (first_free_mprof_buf >= NUM_MPROF_BUFFERS) { 363276485Snp ++mutex_prof_rejected; 364276485Snp goto unlock; 365276485Snp } 366276485Snp mpp = &mprof_buf[first_free_mprof_buf++]; 367276485Snp mpp->name = mtx_name(m); 368276485Snp mpp->file = p; 369276485Snp mpp->line = line; 370276485Snp mpp->next = mprof_hash[hash]; 371276485Snp if (mprof_hash[hash] != NULL) 372276485Snp ++mutex_prof_collisions; 373 mprof_hash[hash] = mpp; 374 ++mutex_prof_records; 375 } 376 /* 377 * Record if the mutex has been held longer now than ever 378 * before 379 */ 380 if ((now - acqtime) > mpp->counter[MPROF_MAX]) 381 mpp->counter[MPROF_MAX] = now - acqtime; 382 mpp->counter[MPROF_TOT] += now - acqtime; 383 mpp->counter[MPROF_CNT] += 1; 384 mpp->counter[MPROF_AVG] = 385 mpp->counter[MPROF_TOT] / mpp->counter[MPROF_CNT]; 386unlock: 387 mtx_unlock_spin(&mprof_mtx); 388 } 389out: 390#endif 391 WITNESS_UNLOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line); 392 LOCK_LOG_LOCK("UNLOCK", &m->mtx_object, opts, m->mtx_recurse, file, 393 line); 394 _rel_sleep_lock(m, curthread, opts, file, line); 395} 396 397void 398_mtx_lock_spin_flags(struct mtx *m, int opts, const char *file, int line) 399{ 400 401 MPASS(curthread != NULL); 402#if defined(SMP) || LOCK_DEBUG > 0 403 _get_spin_lock(m, curthread, opts, file, line); 404#else 405 critical_enter(); 406#endif 407 LOCK_LOG_LOCK("LOCK", &m->mtx_object, opts, m->mtx_recurse, file, 408 line); 409 WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line); 410} 411 412void 413_mtx_unlock_spin_flags(struct mtx *m, int opts, const char *file, int line) 414{ 415 416 MPASS(curthread != NULL); 417 mtx_assert(m, MA_OWNED); 418 WITNESS_UNLOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line); 419 LOCK_LOG_LOCK("UNLOCK", &m->mtx_object, opts, m->mtx_recurse, file, 420 line); 421#if defined(SMP) || LOCK_DEBUG > 0 422 _rel_spin_lock(m); 423#else 424 critical_exit(); 425#endif 426} 427 428/* 429 * The important part of mtx_trylock{,_flags}() 430 * Tries to acquire lock `m.' We do NOT handle recursion here; we assume that 431 * if we're called, it's because we know we don't already own this lock. 432 */ 433int 434_mtx_trylock(struct mtx *m, int opts, const char *file, int line) 435{ 436 int rval; 437 438 MPASS(curthread != NULL); 439 440 rval = _obtain_lock(m, curthread); 441 442 LOCK_LOG_TRY("LOCK", &m->mtx_object, opts, rval, file, line); 443 if (rval) { 444 /* 445 * We do not handle recursion in _mtx_trylock; see the 446 * note at the top of the routine. 447 */ 448 KASSERT(!mtx_recursed(m), 449 ("mtx_trylock() called on a recursed mutex")); 450 WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE | LOP_TRYLOCK, 451 file, line); 452 } 453 454 return (rval); 455} 456 457/* 458 * _mtx_lock_sleep: the tougher part of acquiring an MTX_DEF lock. 459 * 460 * We call this if the lock is either contested (i.e. we need to go to 461 * sleep waiting for it), or if we need to recurse on it. 462 */ 463void 464_mtx_lock_sleep(struct mtx *m, int opts, const char *file, int line) 465{ 466 struct thread *td = curthread; 467#if defined(SMP) && defined(ADAPTIVE_MUTEXES) 468 struct thread *owner; 469#endif 470 471 if ((m->mtx_lock & MTX_FLAGMASK) == (uintptr_t)td) { 472 m->mtx_recurse++; 473 atomic_set_ptr(&m->mtx_lock, MTX_RECURSED); 474 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 475 CTR1(KTR_LOCK, "_mtx_lock_sleep: %p recursing", m); 476 return; 477 } 478 479 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 480 CTR4(KTR_LOCK, 481 "_mtx_lock_sleep: %s contested (lock=%p) at %s:%d", 482 m->mtx_object.lo_name, (void *)m->mtx_lock, file, line); 483 484 while (!_obtain_lock(m, td)) { 485 uintptr_t v; 486 struct thread *td1; 487 488 mtx_lock_spin(&sched_lock); 489 /* 490 * Check if the lock has been released while spinning for 491 * the sched_lock. 492 */ 493 if ((v = m->mtx_lock) == MTX_UNOWNED) { 494 mtx_unlock_spin(&sched_lock); 495#ifdef __i386__ 496 ia32_pause(); 497#endif 498 continue; 499 } 500 501 /* 502 * The mutex was marked contested on release. This means that 503 * there are threads blocked on it. 504 */ 505 if (v == MTX_CONTESTED) { 506 td1 = TAILQ_FIRST(&m->mtx_blocked); 507 MPASS(td1 != NULL); 508 m->mtx_lock = (uintptr_t)td | MTX_CONTESTED; 509 510 if (td1->td_priority < td->td_priority) 511 td->td_priority = td1->td_priority; 512 mtx_unlock_spin(&sched_lock); 513 return; 514 } 515 516 /* 517 * If the mutex isn't already contested and a failure occurs 518 * setting the contested bit, the mutex was either released 519 * or the state of the MTX_RECURSED bit changed. 520 */ 521 if ((v & MTX_CONTESTED) == 0 && 522 !atomic_cmpset_ptr(&m->mtx_lock, (void *)v, 523 (void *)(v | MTX_CONTESTED))) { 524 mtx_unlock_spin(&sched_lock); 525#ifdef __i386__ 526 ia32_pause(); 527#endif 528 continue; 529 } 530 531#if defined(SMP) && defined(ADAPTIVE_MUTEXES) 532 /* 533 * If the current owner of the lock is executing on another 534 * CPU, spin instead of blocking. 535 */ 536 owner = (struct thread *)(v & MTX_FLAGMASK); 537 if (m != &Giant && thread_running(owner)) { 538 mtx_unlock_spin(&sched_lock); 539 while (mtx_owner(m) == owner && thread_running(owner)) { 540#ifdef __i386__ 541 ia32_pause(); 542#endif 543 } 544 continue; 545 } 546#endif /* SMP && ADAPTIVE_MUTEXES */ 547 548 /* 549 * We definitely must sleep for this lock. 550 */ 551 mtx_assert(m, MA_NOTOWNED); 552 553#ifdef notyet 554 /* 555 * If we're borrowing an interrupted thread's VM context, we 556 * must clean up before going to sleep. 557 */ 558 if (td->td_ithd != NULL) { 559 struct ithd *it = td->td_ithd; 560 561 if (it->it_interrupted) { 562 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 563 CTR2(KTR_LOCK, 564 "_mtx_lock_sleep: %p interrupted %p", 565 it, it->it_interrupted); 566 intr_thd_fixup(it); 567 } 568 } 569#endif 570 571 /* 572 * Put us on the list of threads blocked on this mutex. 573 */ 574 if (TAILQ_EMPTY(&m->mtx_blocked)) { 575 td1 = mtx_owner(m); 576 LIST_INSERT_HEAD(&td1->td_contested, m, mtx_contested); 577 TAILQ_INSERT_TAIL(&m->mtx_blocked, td, td_blkq); 578 } else { 579 TAILQ_FOREACH(td1, &m->mtx_blocked, td_blkq) 580 if (td1->td_priority > td->td_priority) 581 break; 582 if (td1) 583 TAILQ_INSERT_BEFORE(td1, td, td_blkq); 584 else 585 TAILQ_INSERT_TAIL(&m->mtx_blocked, td, td_blkq); 586 } 587 588 /* 589 * Save who we're blocked on. 590 */ 591 td->td_blocked = m; 592 td->td_mtxname = m->mtx_object.lo_name; 593 td->td_proc->p_stat = SMTX; 594 propagate_priority(td); 595 596 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 597 CTR3(KTR_LOCK, 598 "_mtx_lock_sleep: p %p blocked on [%p] %s", td, m, 599 m->mtx_object.lo_name); 600 601 td->td_proc->p_stats->p_ru.ru_nvcsw++; 602 mi_switch(); 603 604 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 605 CTR3(KTR_LOCK, 606 "_mtx_lock_sleep: p %p free from blocked on [%p] %s", 607 td, m, m->mtx_object.lo_name); 608 609 mtx_unlock_spin(&sched_lock); 610 } 611 612 return; 613} 614 615/* 616 * _mtx_lock_spin: the tougher part of acquiring an MTX_SPIN lock. 617 * 618 * This is only called if we need to actually spin for the lock. Recursion 619 * is handled inline. 620 */ 621void 622_mtx_lock_spin(struct mtx *m, int opts, const char *file, int line) 623{ 624 int i = 0; 625 626 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 627 CTR1(KTR_LOCK, "_mtx_lock_spin: %p spinning", m); 628 629 for (;;) { 630 if (_obtain_lock(m, curthread)) 631 break; 632 633 /* Give interrupts a chance while we spin. */ 634 critical_exit(); 635 while (m->mtx_lock != MTX_UNOWNED) { 636 if (i++ < 10000000) { 637#ifdef __i386__ 638 ia32_pause(); 639#endif 640 continue; 641 } 642 if (i < 60000000) 643 DELAY(1); 644#ifdef DDB 645 else if (!db_active) 646#else 647 else 648#endif 649 panic("spin lock %s held by %p for > 5 seconds", 650 m->mtx_object.lo_name, (void *)m->mtx_lock); 651#ifdef __i386__ 652 ia32_pause(); 653#endif 654 } 655 critical_enter(); 656 } 657 658 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 659 CTR1(KTR_LOCK, "_mtx_lock_spin: %p spin done", m); 660 661 return; 662} 663 664/* 665 * _mtx_unlock_sleep: the tougher part of releasing an MTX_DEF lock. 666 * 667 * We are only called here if the lock is recursed or contested (i.e. we 668 * need to wake up a blocked thread). 669 */ 670void 671_mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line) 672{ 673 struct thread *td, *td1; 674 struct mtx *m1; 675 int pri; 676 677 td = curthread; 678 679 if (mtx_recursed(m)) { 680 if (--(m->mtx_recurse) == 0) 681 atomic_clear_ptr(&m->mtx_lock, MTX_RECURSED); 682 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 683 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p unrecurse", m); 684 return; 685 } 686 687 mtx_lock_spin(&sched_lock); 688 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 689 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p contested", m); 690 691 td1 = TAILQ_FIRST(&m->mtx_blocked); 692#if defined(SMP) && defined(ADAPTIVE_MUTEXES) 693 if (td1 == NULL) { 694 _release_lock_quick(m); 695 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 696 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p no sleepers", m); 697 mtx_unlock_spin(&sched_lock); 698 return; 699 } 700#endif 701 MPASS(td->td_proc->p_magic == P_MAGIC); 702 MPASS(td1->td_proc->p_magic == P_MAGIC); 703 704 TAILQ_REMOVE(&m->mtx_blocked, td1, td_blkq); 705 706 if (TAILQ_EMPTY(&m->mtx_blocked)) { 707 LIST_REMOVE(m, mtx_contested); 708 _release_lock_quick(m); 709 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 710 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p not held", m); 711 } else 712 atomic_store_rel_ptr(&m->mtx_lock, (void *)MTX_CONTESTED); 713 714 pri = PRI_MAX; 715 LIST_FOREACH(m1, &td->td_contested, mtx_contested) { 716 int cp = TAILQ_FIRST(&m1->mtx_blocked)->td_priority; 717 if (cp < pri) 718 pri = cp; 719 } 720 721 if (pri > td->td_base_pri) 722 pri = td->td_base_pri; 723 td->td_priority = pri; 724 725 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 726 CTR2(KTR_LOCK, "_mtx_unlock_sleep: %p contested setrunqueue %p", 727 m, td1); 728 729 td1->td_blocked = NULL; 730 td1->td_proc->p_stat = SRUN; 731 setrunqueue(td1); 732 733 if (td->td_critnest == 1 && td1->td_priority < pri) { 734#ifdef notyet 735 if (td->td_ithd != NULL) { 736 struct ithd *it = td->td_ithd; 737 738 if (it->it_interrupted) { 739 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 740 CTR2(KTR_LOCK, 741 "_mtx_unlock_sleep: %p interrupted %p", 742 it, it->it_interrupted); 743 intr_thd_fixup(it); 744 } 745 } 746#endif 747 setrunqueue(td); 748 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 749 CTR2(KTR_LOCK, 750 "_mtx_unlock_sleep: %p switching out lock=%p", m, 751 (void *)m->mtx_lock); 752 753 td->td_proc->p_stats->p_ru.ru_nivcsw++; 754 mi_switch(); 755 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 756 CTR2(KTR_LOCK, "_mtx_unlock_sleep: %p resuming lock=%p", 757 m, (void *)m->mtx_lock); 758 } 759 760 mtx_unlock_spin(&sched_lock); 761 762 return; 763} 764 765/* 766 * All the unlocking of MTX_SPIN locks is done inline. 767 * See the _rel_spin_lock() macro for the details. 768 */ 769 770/* 771 * The backing function for the INVARIANTS-enabled mtx_assert() 772 */ 773#ifdef INVARIANT_SUPPORT 774void 775_mtx_assert(struct mtx *m, int what, const char *file, int line) 776{ 777 778 if (panicstr != NULL) 779 return; 780 switch (what) { 781 case MA_OWNED: 782 case MA_OWNED | MA_RECURSED: 783 case MA_OWNED | MA_NOTRECURSED: 784 if (!mtx_owned(m)) 785 panic("mutex %s not owned at %s:%d", 786 m->mtx_object.lo_name, file, line); 787 if (mtx_recursed(m)) { 788 if ((what & MA_NOTRECURSED) != 0) 789 panic("mutex %s recursed at %s:%d", 790 m->mtx_object.lo_name, file, line); 791 } else if ((what & MA_RECURSED) != 0) { 792 panic("mutex %s unrecursed at %s:%d", 793 m->mtx_object.lo_name, file, line); 794 } 795 break; 796 case MA_NOTOWNED: 797 if (mtx_owned(m)) 798 panic("mutex %s owned at %s:%d", 799 m->mtx_object.lo_name, file, line); 800 break; 801 default: 802 panic("unknown mtx_assert at %s:%d", file, line); 803 } 804} 805#endif 806 807/* 808 * The MUTEX_DEBUG-enabled mtx_validate() 809 * 810 * Most of these checks have been moved off into the LO_INITIALIZED flag 811 * maintained by the witness code. 812 */ 813#ifdef MUTEX_DEBUG 814 815void mtx_validate(struct mtx *); 816 817void 818mtx_validate(struct mtx *m) 819{ 820 821/* 822 * XXX - When kernacc() is fixed on the alpha to handle K0_SEG memory properly 823 * we can re-enable the kernacc() checks. 824 */ 825#ifndef __alpha__ 826 /* 827 * Can't call kernacc() from early init386(), especially when 828 * initializing Giant mutex, because some stuff in kernacc() 829 * requires Giant itself. 830 */ 831 if (!cold) 832 if (!kernacc((caddr_t)m, sizeof(m), 833 VM_PROT_READ | VM_PROT_WRITE)) 834 panic("Can't read and write to mutex %p", m); 835#endif 836} 837#endif 838 839/* 840 * General init routine used by the MTX_SYSINIT() macro. 841 */ 842void 843mtx_sysinit(void *arg) 844{ 845 struct mtx_args *margs = arg; 846 847 mtx_init(margs->ma_mtx, margs->ma_desc, NULL, margs->ma_opts); 848} 849 850/* 851 * Mutex initialization routine; initialize lock `m' of type contained in 852 * `opts' with options contained in `opts' and name `name.' The optional 853 * lock type `type' is used as a general lock category name for use with 854 * witness. 855 */ 856void 857mtx_init(struct mtx *m, const char *name, const char *type, int opts) 858{ 859 struct lock_object *lock; 860 861 MPASS((opts & ~(MTX_SPIN | MTX_QUIET | MTX_RECURSE | 862 MTX_SLEEPABLE | MTX_NOWITNESS | MTX_DUPOK)) == 0); 863 864#ifdef MUTEX_DEBUG 865 /* Diagnostic and error correction */ 866 mtx_validate(m); 867#endif 868 869 lock = &m->mtx_object; 870 KASSERT((lock->lo_flags & LO_INITIALIZED) == 0, 871 ("mutex %s %p already initialized", name, m)); 872 bzero(m, sizeof(*m)); 873 if (opts & MTX_SPIN) 874 lock->lo_class = &lock_class_mtx_spin; 875 else 876 lock->lo_class = &lock_class_mtx_sleep; 877 lock->lo_name = name; 878 lock->lo_type = type != NULL ? type : name; 879 if (opts & MTX_QUIET) 880 lock->lo_flags = LO_QUIET; 881 if (opts & MTX_RECURSE) 882 lock->lo_flags |= LO_RECURSABLE; 883 if (opts & MTX_SLEEPABLE) 884 lock->lo_flags |= LO_SLEEPABLE; 885 if ((opts & MTX_NOWITNESS) == 0) 886 lock->lo_flags |= LO_WITNESS; 887 if (opts & MTX_DUPOK) 888 lock->lo_flags |= LO_DUPOK; 889 890 m->mtx_lock = MTX_UNOWNED; 891 TAILQ_INIT(&m->mtx_blocked); 892 893 LOCK_LOG_INIT(lock, opts); 894 895 WITNESS_INIT(lock); 896} 897 898/* 899 * Remove lock `m' from all_mtx queue. We don't allow MTX_QUIET to be 900 * passed in as a flag here because if the corresponding mtx_init() was 901 * called with MTX_QUIET set, then it will already be set in the mutex's 902 * flags. 903 */ 904void 905mtx_destroy(struct mtx *m) 906{ 907 908 LOCK_LOG_DESTROY(&m->mtx_object, 0); 909 910 if (!mtx_owned(m)) 911 MPASS(mtx_unowned(m)); 912 else { 913 MPASS((m->mtx_lock & (MTX_RECURSED|MTX_CONTESTED)) == 0); 914 915 /* Tell witness this isn't locked to make it happy. */ 916 WITNESS_UNLOCK(&m->mtx_object, LOP_EXCLUSIVE, __FILE__, 917 __LINE__); 918 } 919 920 WITNESS_DESTROY(&m->mtx_object); 921} 922 923/* 924 * Intialize the mutex code and system mutexes. This is called from the MD 925 * startup code prior to mi_startup(). The per-CPU data space needs to be 926 * setup before this is called. 927 */ 928void 929mutex_init(void) 930{ 931 932 /* Setup thread0 so that mutexes work. */ 933 LIST_INIT(&thread0.td_contested); 934 935 /* 936 * Initialize mutexes. 937 */ 938 mtx_init(&Giant, "Giant", NULL, MTX_DEF | MTX_RECURSE); 939 mtx_init(&sched_lock, "sched lock", NULL, MTX_SPIN | MTX_RECURSE); 940 mtx_init(&proc0.p_mtx, "process lock", NULL, MTX_DEF | MTX_DUPOK); 941 mtx_lock(&Giant); 942} 943 944/* 945 * Encapsulated Giant mutex routines. These routines provide encapsulation 946 * control for the Giant mutex, allowing sysctls to be used to turn on and 947 * off Giant around certain subsystems. The default value for the sysctls 948 * are set to what developers believe is stable and working in regards to 949 * the Giant pushdown. Developers should not turn off Giant via these 950 * sysctls unless they know what they are doing. 951 * 952 * Callers of mtx_lock_giant() are expected to pass the return value to an 953 * accompanying mtx_unlock_giant() later on. If multiple subsystems are 954 * effected by a Giant wrap, all related sysctl variables must be zero for 955 * the subsystem call to operate without Giant (as determined by the caller). 956 */ 957 958SYSCTL_NODE(_kern, OID_AUTO, giant, CTLFLAG_RD, NULL, "Giant mutex manipulation"); 959 960static int kern_giant_all = 0; 961SYSCTL_INT(_kern_giant, OID_AUTO, all, CTLFLAG_RW, &kern_giant_all, 0, ""); 962 963int kern_giant_proc = 1; /* Giant around PROC locks */ 964int kern_giant_file = 1; /* Giant around struct file & filedesc */ 965int kern_giant_ucred = 1; /* Giant around ucred */ 966SYSCTL_INT(_kern_giant, OID_AUTO, proc, CTLFLAG_RW, &kern_giant_proc, 0, ""); 967SYSCTL_INT(_kern_giant, OID_AUTO, file, CTLFLAG_RW, &kern_giant_file, 0, ""); 968SYSCTL_INT(_kern_giant, OID_AUTO, ucred, CTLFLAG_RW, &kern_giant_ucred, 0, ""); 969 970int 971mtx_lock_giant(int sysctlvar) 972{ 973 if (sysctlvar || kern_giant_all) { 974 mtx_lock(&Giant); 975 return(1); 976 } 977 return(0); 978} 979 980void 981mtx_unlock_giant(int s) 982{ 983 if (s) 984 mtx_unlock(&Giant); 985} 986 987