kern_mutex.c revision 97081
1/*- 2 * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 3. Berkeley Software Design Inc's name may not be used to endorse or 13 * promote products derived from this software without specific prior 14 * written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 * from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $ 29 * and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $ 30 * $FreeBSD: head/sys/kern/kern_mutex.c 97081 2002-05-21 20:47:11Z jhb $ 31 */ 32 33/* 34 * Machine independent bits of mutex implementation. 35 */ 36 37#include "opt_adaptive_mutexes.h" 38#include "opt_ddb.h" 39 40#include <sys/param.h> 41#include <sys/systm.h> 42#include <sys/bus.h> 43#include <sys/kernel.h> 44#include <sys/ktr.h> 45#include <sys/lock.h> 46#include <sys/malloc.h> 47#include <sys/mutex.h> 48#include <sys/proc.h> 49#include <sys/resourcevar.h> 50#include <sys/sbuf.h> 51#include <sys/sysctl.h> 52#include <sys/vmmeter.h> 53 54#include <machine/atomic.h> 55#include <machine/bus.h> 56#include <machine/clock.h> 57#include <machine/cpu.h> 58 59#include <ddb/ddb.h> 60 61#include <vm/vm.h> 62#include <vm/vm_extern.h> 63 64/* 65 * Internal utility macros. 66 */ 67#define mtx_unowned(m) ((m)->mtx_lock == MTX_UNOWNED) 68 69#define mtx_owner(m) (mtx_unowned((m)) ? NULL \ 70 : (struct thread *)((m)->mtx_lock & MTX_FLAGMASK)) 71 72/* 73 * Lock classes for sleep and spin mutexes. 74 */ 75struct lock_class lock_class_mtx_sleep = { 76 "sleep mutex", 77 LC_SLEEPLOCK | LC_RECURSABLE 78}; 79struct lock_class lock_class_mtx_spin = { 80 "spin mutex", 81 LC_SPINLOCK | LC_RECURSABLE 82}; 83 84/* 85 * System-wide mutexes 86 */ 87struct mtx sched_lock; 88struct mtx Giant; 89 90/* 91 * Prototypes for non-exported routines. 92 */ 93static void propagate_priority(struct thread *); 94 95static void 96propagate_priority(struct thread *td) 97{ 98 int pri = td->td_priority; 99 struct mtx *m = td->td_blocked; 100 101 mtx_assert(&sched_lock, MA_OWNED); 102 for (;;) { 103 struct thread *td1; 104 105 td = mtx_owner(m); 106 107 if (td == NULL) { 108 /* 109 * This really isn't quite right. Really 110 * ought to bump priority of thread that 111 * next acquires the mutex. 112 */ 113 MPASS(m->mtx_lock == MTX_CONTESTED); 114 return; 115 } 116 117 MPASS(td->td_proc->p_magic == P_MAGIC); 118 KASSERT(td->td_proc->p_stat != SSLEEP, ("sleeping thread owns a mutex")); 119 if (td->td_priority <= pri) /* lower is higher priority */ 120 return; 121 122 /* 123 * Bump this thread's priority. 124 */ 125 td->td_priority = pri; 126 127 /* 128 * If lock holder is actually running, just bump priority. 129 */ 130 /* XXXKSE this test is not sufficient */ 131 if (td->td_kse && (td->td_kse->ke_oncpu != NOCPU)) { 132 MPASS(td->td_proc->p_stat == SRUN 133 || td->td_proc->p_stat == SZOMB 134 || td->td_proc->p_stat == SSTOP); 135 return; 136 } 137 138#ifndef SMP 139 /* 140 * For UP, we check to see if td is curthread (this shouldn't 141 * ever happen however as it would mean we are in a deadlock.) 142 */ 143 KASSERT(td != curthread, ("Deadlock detected")); 144#endif 145 146 /* 147 * If on run queue move to new run queue, and quit. 148 * XXXKSE this gets a lot more complicated under threads 149 * but try anyhow. 150 */ 151 if (td->td_proc->p_stat == SRUN) { 152 MPASS(td->td_blocked == NULL); 153 remrunqueue(td); 154 setrunqueue(td); 155 return; 156 } 157 158 /* 159 * If we aren't blocked on a mutex, we should be. 160 */ 161 KASSERT(td->td_proc->p_stat == SMTX, ( 162 "process %d(%s):%d holds %s but isn't blocked on a mutex\n", 163 td->td_proc->p_pid, td->td_proc->p_comm, td->td_proc->p_stat, 164 m->mtx_object.lo_name)); 165 166 /* 167 * Pick up the mutex that td is blocked on. 168 */ 169 m = td->td_blocked; 170 MPASS(m != NULL); 171 172 /* 173 * Check if the thread needs to be moved up on 174 * the blocked chain 175 */ 176 if (td == TAILQ_FIRST(&m->mtx_blocked)) { 177 continue; 178 } 179 180 td1 = TAILQ_PREV(td, threadqueue, td_blkq); 181 if (td1->td_priority <= pri) { 182 continue; 183 } 184 185 /* 186 * Remove thread from blocked chain and determine where 187 * it should be moved up to. Since we know that td1 has 188 * a lower priority than td, we know that at least one 189 * thread in the chain has a lower priority and that 190 * td1 will thus not be NULL after the loop. 191 */ 192 TAILQ_REMOVE(&m->mtx_blocked, td, td_blkq); 193 TAILQ_FOREACH(td1, &m->mtx_blocked, td_blkq) { 194 MPASS(td1->td_proc->p_magic == P_MAGIC); 195 if (td1->td_priority > pri) 196 break; 197 } 198 199 MPASS(td1 != NULL); 200 TAILQ_INSERT_BEFORE(td1, td, td_blkq); 201 CTR4(KTR_LOCK, 202 "propagate_priority: p %p moved before %p on [%p] %s", 203 td, td1, m, m->mtx_object.lo_name); 204 } 205} 206 207#ifdef MUTEX_PROFILING 208SYSCTL_NODE(_debug, OID_AUTO, mutex, CTLFLAG_RD, NULL, "mutex debugging"); 209SYSCTL_NODE(_debug_mutex, OID_AUTO, prof, CTLFLAG_RD, NULL, "mutex profiling"); 210static int mutex_prof_enable = 0; 211SYSCTL_INT(_debug_mutex_prof, OID_AUTO, enable, CTLFLAG_RW, 212 &mutex_prof_enable, 0, "Enable tracing of mutex holdtime"); 213 214struct mutex_prof { 215 const char *name; 216 const char *file; 217 int line; 218#define MPROF_MAX 0 219#define MPROF_TOT 1 220#define MPROF_CNT 2 221#define MPROF_AVG 3 222 u_int64_t counter[4]; 223 struct mutex_prof *next; 224}; 225 226/* 227 * mprof_buf is a static pool of profiling records to avoid possible 228 * reentrance of the memory allocation functions. 229 * 230 * Note: NUM_MPROF_BUFFERS must be smaller than MPROF_HASH_SIZE. 231 */ 232#define NUM_MPROF_BUFFERS 1000 233static struct mutex_prof mprof_buf[NUM_MPROF_BUFFERS]; 234static int first_free_mprof_buf; 235#define MPROF_HASH_SIZE 1009 236static struct mutex_prof *mprof_hash[MPROF_HASH_SIZE]; 237 238static int mutex_prof_acquisitions; 239SYSCTL_INT(_debug_mutex_prof, OID_AUTO, acquisitions, CTLFLAG_RD, 240 &mutex_prof_acquisitions, 0, "Number of mutex acquistions recorded"); 241static int mutex_prof_records; 242SYSCTL_INT(_debug_mutex_prof, OID_AUTO, records, CTLFLAG_RD, 243 &mutex_prof_records, 0, "Number of profiling records"); 244static int mutex_prof_maxrecords = NUM_MPROF_BUFFERS; 245SYSCTL_INT(_debug_mutex_prof, OID_AUTO, maxrecords, CTLFLAG_RD, 246 &mutex_prof_maxrecords, 0, "Maximum number of profiling records"); 247static int mutex_prof_rejected; 248SYSCTL_INT(_debug_mutex_prof, OID_AUTO, rejected, CTLFLAG_RD, 249 &mutex_prof_rejected, 0, "Number of rejected profiling records"); 250static int mutex_prof_hashsize = MPROF_HASH_SIZE; 251SYSCTL_INT(_debug_mutex_prof, OID_AUTO, hashsize, CTLFLAG_RD, 252 &mutex_prof_hashsize, 0, "Hash size"); 253static int mutex_prof_collisions = 0; 254SYSCTL_INT(_debug_mutex_prof, OID_AUTO, collisions, CTLFLAG_RD, 255 &mutex_prof_collisions, 0, "Number of hash collisions"); 256 257/* 258 * mprof_mtx protects the profiling buffers and the hash. 259 */ 260static struct mtx mprof_mtx; 261MTX_SYSINIT(mprof, &mprof_mtx, "mutex profiling lock", MTX_SPIN | MTX_QUIET); 262 263static u_int64_t 264nanoseconds(void) 265{ 266 struct timespec tv; 267 268 nanotime(&tv); 269 return (tv.tv_sec * (u_int64_t)1000000000 + tv.tv_nsec); 270} 271 272static int 273dump_mutex_prof_stats(SYSCTL_HANDLER_ARGS) 274{ 275 struct sbuf *sb; 276 int error, i; 277 278 if (first_free_mprof_buf == 0) 279 return SYSCTL_OUT(req, "No locking recorded", 280 sizeof("No locking recorded")); 281 282 sb = sbuf_new(NULL, NULL, 1024, SBUF_AUTOEXTEND); 283 sbuf_printf(sb, "%12s %12s %12s %12s %s\n", 284 "max", "total", "count", "average", "name"); 285 mtx_lock_spin(&mprof_mtx); 286 for (i = 0; i < first_free_mprof_buf; ++i) 287 sbuf_printf(sb, "%12llu %12llu %12llu %12llu %s:%d (%s)\n", 288 mprof_buf[i].counter[MPROF_MAX] / 1000, 289 mprof_buf[i].counter[MPROF_TOT] / 1000, 290 mprof_buf[i].counter[MPROF_CNT], 291 mprof_buf[i].counter[MPROF_AVG] / 1000, 292 mprof_buf[i].file, mprof_buf[i].line, mprof_buf[i].name); 293 mtx_unlock_spin(&mprof_mtx); 294 sbuf_finish(sb); 295 error = SYSCTL_OUT(req, sbuf_data(sb), sbuf_len(sb) + 1); 296 sbuf_delete(sb); 297 return (error); 298} 299SYSCTL_PROC(_debug_mutex_prof, OID_AUTO, stats, CTLTYPE_STRING|CTLFLAG_RD, 300 NULL, 0, dump_mutex_prof_stats, "A", "Mutex profiling statistics"); 301#endif 302 303/* 304 * Function versions of the inlined __mtx_* macros. These are used by 305 * modules and can also be called from assembly language if needed. 306 */ 307void 308_mtx_lock_flags(struct mtx *m, int opts, const char *file, int line) 309{ 310 311 MPASS(curthread != NULL); 312 _get_sleep_lock(m, curthread, opts, file, line); 313 LOCK_LOG_LOCK("LOCK", &m->mtx_object, opts, m->mtx_recurse, file, 314 line); 315 WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line); 316#ifdef MUTEX_PROFILING 317 /* don't reset the timer when/if recursing */ 318 if (m->acqtime == 0) { 319 m->file = file; 320 m->line = line; 321 m->acqtime = mutex_prof_enable ? nanoseconds() : 0; 322 ++mutex_prof_acquisitions; 323 } 324#endif 325} 326 327void 328_mtx_unlock_flags(struct mtx *m, int opts, const char *file, int line) 329{ 330 331 MPASS(curthread != NULL); 332 mtx_assert(m, MA_OWNED); 333#ifdef MUTEX_PROFILING 334 if (m->acqtime != 0) { 335 static const char *unknown = "(unknown)"; 336 struct mutex_prof *mpp; 337 u_int64_t acqtime, now; 338 const char *p, *q; 339 volatile u_int hash; 340 341 now = nanoseconds(); 342 acqtime = m->acqtime; 343 m->acqtime = 0; 344 if (now <= acqtime) 345 goto out; 346 for (p = file; strncmp(p, "../", 3) == 0; p += 3) 347 /* nothing */ ; 348 if (p == NULL || *p == '\0') 349 p = unknown; 350 for (hash = line, q = p; *q != '\0'; ++q) 351 hash = (hash * 2 + *q) % MPROF_HASH_SIZE; 352 mtx_lock_spin(&mprof_mtx); 353 for (mpp = mprof_hash[hash]; mpp != NULL; mpp = mpp->next) 354 if (mpp->line == line && strcmp(mpp->file, p) == 0) 355 break; 356 if (mpp == NULL) { 357 /* Just exit if we cannot get a trace buffer */ 358 if (first_free_mprof_buf >= NUM_MPROF_BUFFERS) { 359 ++mutex_prof_rejected; 360 goto unlock; 361 } 362 mpp = &mprof_buf[first_free_mprof_buf++]; 363 mpp->name = mtx_name(m); 364 mpp->file = p; 365 mpp->line = line; 366 mpp->next = mprof_hash[hash]; 367 if (mprof_hash[hash] != NULL) 368 ++mutex_prof_collisions; 369 mprof_hash[hash] = mpp; 370 ++mutex_prof_records; 371 } 372 /* 373 * Record if the mutex has been held longer now than ever 374 * before 375 */ 376 if ((now - acqtime) > mpp->counter[MPROF_MAX]) 377 mpp->counter[MPROF_MAX] = now - acqtime; 378 mpp->counter[MPROF_TOT] += now - acqtime; 379 mpp->counter[MPROF_CNT] += 1; 380 mpp->counter[MPROF_AVG] = 381 mpp->counter[MPROF_TOT] / mpp->counter[MPROF_CNT]; 382unlock: 383 mtx_unlock_spin(&mprof_mtx); 384 } 385out: 386#endif 387 WITNESS_UNLOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line); 388 LOCK_LOG_LOCK("UNLOCK", &m->mtx_object, opts, m->mtx_recurse, file, 389 line); 390 _rel_sleep_lock(m, curthread, opts, file, line); 391} 392 393void 394_mtx_lock_spin_flags(struct mtx *m, int opts, const char *file, int line) 395{ 396 397 MPASS(curthread != NULL); 398#if defined(SMP) || LOCK_DEBUG > 0 399 _get_spin_lock(m, curthread, opts, file, line); 400#else 401 critical_enter(); 402#endif 403 LOCK_LOG_LOCK("LOCK", &m->mtx_object, opts, m->mtx_recurse, file, 404 line); 405 WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line); 406} 407 408void 409_mtx_unlock_spin_flags(struct mtx *m, int opts, const char *file, int line) 410{ 411 412 MPASS(curthread != NULL); 413 mtx_assert(m, MA_OWNED); 414 WITNESS_UNLOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line); 415 LOCK_LOG_LOCK("UNLOCK", &m->mtx_object, opts, m->mtx_recurse, file, 416 line); 417#if defined(SMP) || LOCK_DEBUG > 0 418 _rel_spin_lock(m); 419#else 420 critical_exit(); 421#endif 422} 423 424/* 425 * The important part of mtx_trylock{,_flags}() 426 * Tries to acquire lock `m.' We do NOT handle recursion here; we assume that 427 * if we're called, it's because we know we don't already own this lock. 428 */ 429int 430_mtx_trylock(struct mtx *m, int opts, const char *file, int line) 431{ 432 int rval; 433 434 MPASS(curthread != NULL); 435 436 rval = _obtain_lock(m, curthread); 437 438 LOCK_LOG_TRY("LOCK", &m->mtx_object, opts, rval, file, line); 439 if (rval) { 440 /* 441 * We do not handle recursion in _mtx_trylock; see the 442 * note at the top of the routine. 443 */ 444 KASSERT(!mtx_recursed(m), 445 ("mtx_trylock() called on a recursed mutex")); 446 WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE | LOP_TRYLOCK, 447 file, line); 448 } 449 450 return (rval); 451} 452 453/* 454 * _mtx_lock_sleep: the tougher part of acquiring an MTX_DEF lock. 455 * 456 * We call this if the lock is either contested (i.e. we need to go to 457 * sleep waiting for it), or if we need to recurse on it. 458 */ 459void 460_mtx_lock_sleep(struct mtx *m, int opts, const char *file, int line) 461{ 462 struct thread *td = curthread; 463#if defined(SMP) && defined(ADAPTIVE_MUTEXES) 464 struct thread *owner; 465#endif 466 467 if ((m->mtx_lock & MTX_FLAGMASK) == (uintptr_t)td) { 468 m->mtx_recurse++; 469 atomic_set_ptr(&m->mtx_lock, MTX_RECURSED); 470 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 471 CTR1(KTR_LOCK, "_mtx_lock_sleep: %p recursing", m); 472 return; 473 } 474 475 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 476 CTR4(KTR_LOCK, 477 "_mtx_lock_sleep: %s contested (lock=%p) at %s:%d", 478 m->mtx_object.lo_name, (void *)m->mtx_lock, file, line); 479 480 while (!_obtain_lock(m, td)) { 481 uintptr_t v; 482 struct thread *td1; 483 484 mtx_lock_spin(&sched_lock); 485 /* 486 * Check if the lock has been released while spinning for 487 * the sched_lock. 488 */ 489 if ((v = m->mtx_lock) == MTX_UNOWNED) { 490 mtx_unlock_spin(&sched_lock); 491 continue; 492 } 493 494 /* 495 * The mutex was marked contested on release. This means that 496 * there are threads blocked on it. 497 */ 498 if (v == MTX_CONTESTED) { 499 td1 = TAILQ_FIRST(&m->mtx_blocked); 500 MPASS(td1 != NULL); 501 m->mtx_lock = (uintptr_t)td | MTX_CONTESTED; 502 503 if (td1->td_priority < td->td_priority) 504 td->td_priority = td1->td_priority; 505 mtx_unlock_spin(&sched_lock); 506 return; 507 } 508 509 /* 510 * If the mutex isn't already contested and a failure occurs 511 * setting the contested bit, the mutex was either released 512 * or the state of the MTX_RECURSED bit changed. 513 */ 514 if ((v & MTX_CONTESTED) == 0 && 515 !atomic_cmpset_ptr(&m->mtx_lock, (void *)v, 516 (void *)(v | MTX_CONTESTED))) { 517 mtx_unlock_spin(&sched_lock); 518 continue; 519 } 520 521#if defined(SMP) && defined(ADAPTIVE_MUTEXES) 522 /* 523 * If the current owner of the lock is executing on another 524 * CPU, spin instead of blocking. 525 */ 526 owner = (struct thread *)(v & MTX_FLAGMASK); 527 if (m != &Giant && owner->td_kse != NULL && 528 owner->td_kse->ke_oncpu != NOCPU) { 529 mtx_unlock_spin(&sched_lock); 530 continue; 531 } 532#endif /* SMP && ADAPTIVE_MUTEXES */ 533 534 /* 535 * We definitely must sleep for this lock. 536 */ 537 mtx_assert(m, MA_NOTOWNED); 538 539#ifdef notyet 540 /* 541 * If we're borrowing an interrupted thread's VM context, we 542 * must clean up before going to sleep. 543 */ 544 if (td->td_ithd != NULL) { 545 struct ithd *it = td->td_ithd; 546 547 if (it->it_interrupted) { 548 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 549 CTR2(KTR_LOCK, 550 "_mtx_lock_sleep: %p interrupted %p", 551 it, it->it_interrupted); 552 intr_thd_fixup(it); 553 } 554 } 555#endif 556 557 /* 558 * Put us on the list of threads blocked on this mutex. 559 */ 560 if (TAILQ_EMPTY(&m->mtx_blocked)) { 561 td1 = mtx_owner(m); 562 LIST_INSERT_HEAD(&td1->td_contested, m, mtx_contested); 563 TAILQ_INSERT_TAIL(&m->mtx_blocked, td, td_blkq); 564 } else { 565 TAILQ_FOREACH(td1, &m->mtx_blocked, td_blkq) 566 if (td1->td_priority > td->td_priority) 567 break; 568 if (td1) 569 TAILQ_INSERT_BEFORE(td1, td, td_blkq); 570 else 571 TAILQ_INSERT_TAIL(&m->mtx_blocked, td, td_blkq); 572 } 573 574 /* 575 * Save who we're blocked on. 576 */ 577 td->td_blocked = m; 578 td->td_mtxname = m->mtx_object.lo_name; 579 td->td_proc->p_stat = SMTX; 580 propagate_priority(td); 581 582 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 583 CTR3(KTR_LOCK, 584 "_mtx_lock_sleep: p %p blocked on [%p] %s", td, m, 585 m->mtx_object.lo_name); 586 587 td->td_proc->p_stats->p_ru.ru_nvcsw++; 588 mi_switch(); 589 590 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 591 CTR3(KTR_LOCK, 592 "_mtx_lock_sleep: p %p free from blocked on [%p] %s", 593 td, m, m->mtx_object.lo_name); 594 595 mtx_unlock_spin(&sched_lock); 596 } 597 598 return; 599} 600 601/* 602 * _mtx_lock_spin: the tougher part of acquiring an MTX_SPIN lock. 603 * 604 * This is only called if we need to actually spin for the lock. Recursion 605 * is handled inline. 606 */ 607void 608_mtx_lock_spin(struct mtx *m, int opts, const char *file, int line) 609{ 610 int i = 0; 611 612 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 613 CTR1(KTR_LOCK, "_mtx_lock_spin: %p spinning", m); 614 615 for (;;) { 616 if (_obtain_lock(m, curthread)) 617 break; 618 619 /* Give interrupts a chance while we spin. */ 620 critical_exit(); 621 while (m->mtx_lock != MTX_UNOWNED) { 622 if (i++ < 10000000) 623 continue; 624 if (i++ < 60000000) 625 DELAY(1); 626#ifdef DDB 627 else if (!db_active) 628#else 629 else 630#endif 631 panic("spin lock %s held by %p for > 5 seconds", 632 m->mtx_object.lo_name, (void *)m->mtx_lock); 633 } 634 critical_enter(); 635 } 636 637 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 638 CTR1(KTR_LOCK, "_mtx_lock_spin: %p spin done", m); 639 640 return; 641} 642 643/* 644 * _mtx_unlock_sleep: the tougher part of releasing an MTX_DEF lock. 645 * 646 * We are only called here if the lock is recursed or contested (i.e. we 647 * need to wake up a blocked thread). 648 */ 649void 650_mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line) 651{ 652 struct thread *td, *td1; 653 struct mtx *m1; 654 int pri; 655 656 td = curthread; 657 658 if (mtx_recursed(m)) { 659 if (--(m->mtx_recurse) == 0) 660 atomic_clear_ptr(&m->mtx_lock, MTX_RECURSED); 661 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 662 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p unrecurse", m); 663 return; 664 } 665 666 mtx_lock_spin(&sched_lock); 667 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 668 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p contested", m); 669 670 td1 = TAILQ_FIRST(&m->mtx_blocked); 671#if defined(SMP) && defined(ADAPTIVE_MUTEXES) 672 if (td1 == NULL) { 673 _release_lock_quick(m); 674 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 675 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p no sleepers", m); 676 mtx_unlock_spin(&sched_lock); 677 return; 678 } 679#endif 680 MPASS(td->td_proc->p_magic == P_MAGIC); 681 MPASS(td1->td_proc->p_magic == P_MAGIC); 682 683 TAILQ_REMOVE(&m->mtx_blocked, td1, td_blkq); 684 685 if (TAILQ_EMPTY(&m->mtx_blocked)) { 686 LIST_REMOVE(m, mtx_contested); 687 _release_lock_quick(m); 688 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 689 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p not held", m); 690 } else 691 atomic_store_rel_ptr(&m->mtx_lock, (void *)MTX_CONTESTED); 692 693 pri = PRI_MAX; 694 LIST_FOREACH(m1, &td->td_contested, mtx_contested) { 695 int cp = TAILQ_FIRST(&m1->mtx_blocked)->td_priority; 696 if (cp < pri) 697 pri = cp; 698 } 699 700 if (pri > td->td_base_pri) 701 pri = td->td_base_pri; 702 td->td_priority = pri; 703 704 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 705 CTR2(KTR_LOCK, "_mtx_unlock_sleep: %p contested setrunqueue %p", 706 m, td1); 707 708 td1->td_blocked = NULL; 709 td1->td_proc->p_stat = SRUN; 710 setrunqueue(td1); 711 712 if (td->td_critnest == 1 && td1->td_priority < pri) { 713#ifdef notyet 714 if (td->td_ithd != NULL) { 715 struct ithd *it = td->td_ithd; 716 717 if (it->it_interrupted) { 718 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 719 CTR2(KTR_LOCK, 720 "_mtx_unlock_sleep: %p interrupted %p", 721 it, it->it_interrupted); 722 intr_thd_fixup(it); 723 } 724 } 725#endif 726 setrunqueue(td); 727 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 728 CTR2(KTR_LOCK, 729 "_mtx_unlock_sleep: %p switching out lock=%p", m, 730 (void *)m->mtx_lock); 731 732 td->td_proc->p_stats->p_ru.ru_nivcsw++; 733 mi_switch(); 734 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 735 CTR2(KTR_LOCK, "_mtx_unlock_sleep: %p resuming lock=%p", 736 m, (void *)m->mtx_lock); 737 } 738 739 mtx_unlock_spin(&sched_lock); 740 741 return; 742} 743 744/* 745 * All the unlocking of MTX_SPIN locks is done inline. 746 * See the _rel_spin_lock() macro for the details. 747 */ 748 749/* 750 * The backing function for the INVARIANTS-enabled mtx_assert() 751 */ 752#ifdef INVARIANT_SUPPORT 753void 754_mtx_assert(struct mtx *m, int what, const char *file, int line) 755{ 756 757 if (panicstr != NULL) 758 return; 759 switch (what) { 760 case MA_OWNED: 761 case MA_OWNED | MA_RECURSED: 762 case MA_OWNED | MA_NOTRECURSED: 763 if (!mtx_owned(m)) 764 panic("mutex %s not owned at %s:%d", 765 m->mtx_object.lo_name, file, line); 766 if (mtx_recursed(m)) { 767 if ((what & MA_NOTRECURSED) != 0) 768 panic("mutex %s recursed at %s:%d", 769 m->mtx_object.lo_name, file, line); 770 } else if ((what & MA_RECURSED) != 0) { 771 panic("mutex %s unrecursed at %s:%d", 772 m->mtx_object.lo_name, file, line); 773 } 774 break; 775 case MA_NOTOWNED: 776 if (mtx_owned(m)) 777 panic("mutex %s owned at %s:%d", 778 m->mtx_object.lo_name, file, line); 779 break; 780 default: 781 panic("unknown mtx_assert at %s:%d", file, line); 782 } 783} 784#endif 785 786/* 787 * The MUTEX_DEBUG-enabled mtx_validate() 788 * 789 * Most of these checks have been moved off into the LO_INITIALIZED flag 790 * maintained by the witness code. 791 */ 792#ifdef MUTEX_DEBUG 793 794void mtx_validate(struct mtx *); 795 796void 797mtx_validate(struct mtx *m) 798{ 799 800/* 801 * XXX - When kernacc() is fixed on the alpha to handle K0_SEG memory properly 802 * we can re-enable the kernacc() checks. 803 */ 804#ifndef __alpha__ 805 /* 806 * Can't call kernacc() from early init386(), especially when 807 * initializing Giant mutex, because some stuff in kernacc() 808 * requires Giant itself. 809 */ 810 if (!cold) 811 if (!kernacc((caddr_t)m, sizeof(m), 812 VM_PROT_READ | VM_PROT_WRITE)) 813 panic("Can't read and write to mutex %p", m); 814#endif 815} 816#endif 817 818/* 819 * General init routine used by the MTX_SYSINIT() macro. 820 */ 821void 822mtx_sysinit(void *arg) 823{ 824 struct mtx_args *margs = arg; 825 826 mtx_init(margs->ma_mtx, margs->ma_desc, NULL, margs->ma_opts); 827} 828 829/* 830 * Mutex initialization routine; initialize lock `m' of type contained in 831 * `opts' with options contained in `opts' and name `name.' The optional 832 * lock type `type' is used as a general lock category name for use with 833 * witness. 834 */ 835void 836mtx_init(struct mtx *m, const char *name, const char *type, int opts) 837{ 838 struct lock_object *lock; 839 840 MPASS((opts & ~(MTX_SPIN | MTX_QUIET | MTX_RECURSE | 841 MTX_SLEEPABLE | MTX_NOWITNESS | MTX_DUPOK)) == 0); 842 843#ifdef MUTEX_DEBUG 844 /* Diagnostic and error correction */ 845 mtx_validate(m); 846#endif 847 848 lock = &m->mtx_object; 849 KASSERT((lock->lo_flags & LO_INITIALIZED) == 0, 850 ("mutex %s %p already initialized", name, m)); 851 bzero(m, sizeof(*m)); 852 if (opts & MTX_SPIN) 853 lock->lo_class = &lock_class_mtx_spin; 854 else 855 lock->lo_class = &lock_class_mtx_sleep; 856 lock->lo_name = name; 857 lock->lo_type = type != NULL ? type : name; 858 if (opts & MTX_QUIET) 859 lock->lo_flags = LO_QUIET; 860 if (opts & MTX_RECURSE) 861 lock->lo_flags |= LO_RECURSABLE; 862 if (opts & MTX_SLEEPABLE) 863 lock->lo_flags |= LO_SLEEPABLE; 864 if ((opts & MTX_NOWITNESS) == 0) 865 lock->lo_flags |= LO_WITNESS; 866 if (opts & MTX_DUPOK) 867 lock->lo_flags |= LO_DUPOK; 868 869 m->mtx_lock = MTX_UNOWNED; 870 TAILQ_INIT(&m->mtx_blocked); 871 872 LOCK_LOG_INIT(lock, opts); 873 874 WITNESS_INIT(lock); 875} 876 877/* 878 * Remove lock `m' from all_mtx queue. We don't allow MTX_QUIET to be 879 * passed in as a flag here because if the corresponding mtx_init() was 880 * called with MTX_QUIET set, then it will already be set in the mutex's 881 * flags. 882 */ 883void 884mtx_destroy(struct mtx *m) 885{ 886 887 LOCK_LOG_DESTROY(&m->mtx_object, 0); 888 889 if (!mtx_owned(m)) 890 MPASS(mtx_unowned(m)); 891 else { 892 MPASS((m->mtx_lock & (MTX_RECURSED|MTX_CONTESTED)) == 0); 893 894 /* Tell witness this isn't locked to make it happy. */ 895 WITNESS_UNLOCK(&m->mtx_object, LOP_EXCLUSIVE, __FILE__, 896 __LINE__); 897 } 898 899 WITNESS_DESTROY(&m->mtx_object); 900} 901 902/* 903 * Intialize the mutex code and system mutexes. This is called from the MD 904 * startup code prior to mi_startup(). The per-CPU data space needs to be 905 * setup before this is called. 906 */ 907void 908mutex_init(void) 909{ 910 911 /* Setup thread0 so that mutexes work. */ 912 LIST_INIT(&thread0.td_contested); 913 914 /* 915 * Initialize mutexes. 916 */ 917 mtx_init(&Giant, "Giant", NULL, MTX_DEF | MTX_RECURSE); 918 mtx_init(&sched_lock, "sched lock", NULL, MTX_SPIN | MTX_RECURSE); 919 mtx_init(&proc0.p_mtx, "process lock", NULL, MTX_DEF | MTX_DUPOK); 920 mtx_lock(&Giant); 921} 922 923/* 924 * Encapsulated Giant mutex routines. These routines provide encapsulation 925 * control for the Giant mutex, allowing sysctls to be used to turn on and 926 * off Giant around certain subsystems. The default value for the sysctls 927 * are set to what developers believe is stable and working in regards to 928 * the Giant pushdown. Developers should not turn off Giant via these 929 * sysctls unless they know what they are doing. 930 * 931 * Callers of mtx_lock_giant() are expected to pass the return value to an 932 * accompanying mtx_unlock_giant() later on. If multiple subsystems are 933 * effected by a Giant wrap, all related sysctl variables must be zero for 934 * the subsystem call to operate without Giant (as determined by the caller). 935 */ 936 937SYSCTL_NODE(_kern, OID_AUTO, giant, CTLFLAG_RD, NULL, "Giant mutex manipulation"); 938 939static int kern_giant_all = 0; 940SYSCTL_INT(_kern_giant, OID_AUTO, all, CTLFLAG_RW, &kern_giant_all, 0, ""); 941 942int kern_giant_proc = 1; /* Giant around PROC locks */ 943int kern_giant_file = 1; /* Giant around struct file & filedesc */ 944int kern_giant_ucred = 1; /* Giant around ucred */ 945SYSCTL_INT(_kern_giant, OID_AUTO, proc, CTLFLAG_RW, &kern_giant_proc, 0, ""); 946SYSCTL_INT(_kern_giant, OID_AUTO, file, CTLFLAG_RW, &kern_giant_file, 0, ""); 947SYSCTL_INT(_kern_giant, OID_AUTO, ucred, CTLFLAG_RW, &kern_giant_ucred, 0, ""); 948 949int 950mtx_lock_giant(int sysctlvar) 951{ 952 if (sysctlvar || kern_giant_all) { 953 mtx_lock(&Giant); 954 return(1); 955 } 956 return(0); 957} 958 959void 960mtx_unlock_giant(int s) 961{ 962 if (s) 963 mtx_unlock(&Giant); 964} 965 966