kern_mutex.c revision 167054
1/*- 2 * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 3. Berkeley Software Design Inc's name may not be used to endorse or 13 * promote products derived from this software without specific prior 14 * written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 * from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $ 29 * and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $ 30 */ 31 32/* 33 * Machine independent bits of mutex implementation. 34 */ 35 36#include <sys/cdefs.h> 37__FBSDID("$FreeBSD: head/sys/kern/kern_mutex.c 167054 2007-02-27 06:42:05Z kmacy $"); 38 39#include "opt_adaptive_mutexes.h" 40#include "opt_ddb.h" 41#include "opt_global.h" 42#include "opt_mutex_wake_all.h" 43#include "opt_sched.h" 44 45#include <sys/param.h> 46#include <sys/systm.h> 47#include <sys/bus.h> 48#include <sys/conf.h> 49#include <sys/kdb.h> 50#include <sys/kernel.h> 51#include <sys/ktr.h> 52#include <sys/lock.h> 53#include <sys/malloc.h> 54#include <sys/mutex.h> 55#include <sys/proc.h> 56#include <sys/resourcevar.h> 57#include <sys/sched.h> 58#include <sys/sbuf.h> 59#include <sys/sysctl.h> 60#include <sys/turnstile.h> 61#include <sys/vmmeter.h> 62#include <sys/lock_profile.h> 63 64#include <machine/atomic.h> 65#include <machine/bus.h> 66#include <machine/cpu.h> 67 68#include <ddb/ddb.h> 69 70#include <fs/devfs/devfs_int.h> 71 72#include <vm/vm.h> 73#include <vm/vm_extern.h> 74 75/* 76 * Force MUTEX_WAKE_ALL for now. 77 * single thread wakeup needs fixes to avoid race conditions with 78 * priority inheritance. 79 */ 80#ifndef MUTEX_WAKE_ALL 81#define MUTEX_WAKE_ALL 82#endif 83 84/* 85 * Internal utility macros. 86 */ 87#define mtx_unowned(m) ((m)->mtx_lock == MTX_UNOWNED) 88 89#define mtx_owner(m) ((struct thread *)((m)->mtx_lock & ~MTX_FLAGMASK)) 90 91#ifdef DDB 92static void db_show_mtx(struct lock_object *lock); 93#endif 94 95/* 96 * Lock classes for sleep and spin mutexes. 97 */ 98struct lock_class lock_class_mtx_sleep = { 99 "sleep mutex", 100 LC_SLEEPLOCK | LC_RECURSABLE, 101#ifdef DDB 102 db_show_mtx 103#endif 104}; 105struct lock_class lock_class_mtx_spin = { 106 "spin mutex", 107 LC_SPINLOCK | LC_RECURSABLE, 108#ifdef DDB 109 db_show_mtx 110#endif 111}; 112 113/* 114 * System-wide mutexes 115 */ 116struct mtx sched_lock; 117struct mtx Giant; 118 119#ifdef LOCK_PROFILING 120static inline void lock_profile_init(void) 121{ 122 int i; 123 /* Initialize the mutex profiling locks */ 124 for (i = 0; i < LPROF_LOCK_SIZE; i++) { 125 mtx_init(&lprof_locks[i], "mprof lock", 126 NULL, MTX_SPIN|MTX_QUIET|MTX_NOPROFILE); 127 } 128} 129#else 130static inline void lock_profile_init(void) {;} 131#endif 132 133/* 134 * Function versions of the inlined __mtx_* macros. These are used by 135 * modules and can also be called from assembly language if needed. 136 */ 137void 138_mtx_lock_flags(struct mtx *m, int opts, const char *file, int line) 139{ 140 141 MPASS(curthread != NULL); 142 KASSERT(m->mtx_lock != MTX_DESTROYED, 143 ("mtx_lock() of destroyed mutex @ %s:%d", file, line)); 144 KASSERT(LOCK_CLASS(&m->mtx_object) == &lock_class_mtx_sleep, 145 ("mtx_lock() of spin mutex %s @ %s:%d", m->mtx_object.lo_name, 146 file, line)); 147 WITNESS_CHECKORDER(&m->mtx_object, opts | LOP_NEWORDER | LOP_EXCLUSIVE, 148 file, line); 149 150 _get_sleep_lock(m, curthread, opts, file, line); 151 LOCK_LOG_LOCK("LOCK", &m->mtx_object, opts, m->mtx_recurse, file, 152 line); 153 WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line); 154 curthread->td_locks++; 155} 156 157void 158_mtx_unlock_flags(struct mtx *m, int opts, const char *file, int line) 159{ 160 161 struct lock_object lo; 162 163 MPASS(curthread != NULL); 164 KASSERT(m->mtx_lock != MTX_DESTROYED, 165 ("mtx_unlock() of destroyed mutex @ %s:%d", file, line)); 166 KASSERT(LOCK_CLASS(&m->mtx_object) == &lock_class_mtx_sleep, 167 ("mtx_unlock() of spin mutex %s @ %s:%d", m->mtx_object.lo_name, 168 file, line)); 169 curthread->td_locks--; 170 WITNESS_UNLOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line); 171 LOCK_LOG_LOCK("UNLOCK", &m->mtx_object, opts, m->mtx_recurse, file, 172 line); 173 mtx_assert(m, MA_OWNED); 174#ifdef LOCK_PROFILING 175 memcpy(&lo, &m->mtx_object, sizeof(lo)); 176 m->mtx_object.lo_flags &= ~LO_CONTESTED; 177#endif 178 _rel_sleep_lock(m, curthread, opts, file, line); 179 lock_profile_release_lock(&lo); 180} 181 182void 183_mtx_lock_spin_flags(struct mtx *m, int opts, const char *file, int line) 184{ 185 186 MPASS(curthread != NULL); 187 KASSERT(m->mtx_lock != MTX_DESTROYED, 188 ("mtx_lock_spin() of destroyed mutex @ %s:%d", file, line)); 189 KASSERT(LOCK_CLASS(&m->mtx_object) == &lock_class_mtx_spin, 190 ("mtx_lock_spin() of sleep mutex %s @ %s:%d", 191 m->mtx_object.lo_name, file, line)); 192 WITNESS_CHECKORDER(&m->mtx_object, opts | LOP_NEWORDER | LOP_EXCLUSIVE, 193 file, line); 194 _get_spin_lock(m, curthread, opts, file, line); 195 LOCK_LOG_LOCK("LOCK", &m->mtx_object, opts, m->mtx_recurse, file, 196 line); 197 WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line); 198} 199 200void 201_mtx_unlock_spin_flags(struct mtx *m, int opts, const char *file, int line) 202{ 203 204 struct lock_object lo; 205 206 MPASS(curthread != NULL); 207 KASSERT(m->mtx_lock != MTX_DESTROYED, 208 ("mtx_unlock_spin() of destroyed mutex @ %s:%d", file, line)); 209 KASSERT(LOCK_CLASS(&m->mtx_object) == &lock_class_mtx_spin, 210 ("mtx_unlock_spin() of sleep mutex %s @ %s:%d", 211 m->mtx_object.lo_name, file, line)); 212 WITNESS_UNLOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line); 213 LOCK_LOG_LOCK("UNLOCK", &m->mtx_object, opts, m->mtx_recurse, file, 214 line); 215 mtx_assert(m, MA_OWNED); 216#ifdef LOCK_PROFILING 217 memcpy(&lo, &m->mtx_object, sizeof(lo)); 218 m->mtx_object.lo_flags &= ~LO_CONTESTED; 219#endif 220 _rel_spin_lock(m); 221 lock_profile_release_lock(&lo); 222} 223 224/* 225 * The important part of mtx_trylock{,_flags}() 226 * Tries to acquire lock `m.' If this function is called on a mutex that 227 * is already owned, it will recursively acquire the lock. 228 */ 229int 230_mtx_trylock(struct mtx *m, int opts, const char *file, int line) 231{ 232 int rval, contested = 0; 233 uint64_t waittime = 0; 234 235 MPASS(curthread != NULL); 236 KASSERT(m->mtx_lock != MTX_DESTROYED, 237 ("mtx_trylock() of destroyed mutex @ %s:%d", file, line)); 238 KASSERT(LOCK_CLASS(&m->mtx_object) == &lock_class_mtx_sleep, 239 ("mtx_trylock() of spin mutex %s @ %s:%d", m->mtx_object.lo_name, 240 file, line)); 241 242 if (mtx_owned(m) && (m->mtx_object.lo_flags & LO_RECURSABLE) != 0) { 243 m->mtx_recurse++; 244 atomic_set_ptr(&m->mtx_lock, MTX_RECURSED); 245 rval = 1; 246 } else 247 rval = _obtain_lock(m, (uintptr_t)curthread); 248 249 LOCK_LOG_TRY("LOCK", &m->mtx_object, opts, rval, file, line); 250 if (rval) { 251 WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE | LOP_TRYLOCK, 252 file, line); 253 curthread->td_locks++; 254 if (m->mtx_recurse == 0) 255 lock_profile_obtain_lock_success(&m->mtx_object, contested, 256 waittime, file, line); 257 258 } 259 260 return (rval); 261} 262 263/* 264 * _mtx_lock_sleep: the tougher part of acquiring an MTX_DEF lock. 265 * 266 * We call this if the lock is either contested (i.e. we need to go to 267 * sleep waiting for it), or if we need to recurse on it. 268 */ 269void 270_mtx_lock_sleep(struct mtx *m, uintptr_t tid, int opts, const char *file, 271 int line) 272{ 273#if defined(SMP) && !defined(NO_ADAPTIVE_MUTEXES) 274 volatile struct thread *owner; 275#endif 276#ifdef KTR 277 int cont_logged = 0; 278#endif 279 uintptr_t v; 280 281 if (mtx_owned(m)) { 282 KASSERT((m->mtx_object.lo_flags & LO_RECURSABLE) != 0, 283 ("_mtx_lock_sleep: recursed on non-recursive mutex %s @ %s:%d\n", 284 m->mtx_object.lo_name, file, line)); 285 m->mtx_recurse++; 286 atomic_set_ptr(&m->mtx_lock, MTX_RECURSED); 287 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 288 CTR1(KTR_LOCK, "_mtx_lock_sleep: %p recursing", m); 289 return; 290 } 291 292 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 293 CTR4(KTR_LOCK, 294 "_mtx_lock_sleep: %s contested (lock=%p) at %s:%d", 295 m->mtx_object.lo_name, (void *)m->mtx_lock, file, line); 296 297 while (!_obtain_lock(m, tid)) { 298 turnstile_lock(&m->mtx_object); 299 v = m->mtx_lock; 300 301 /* 302 * Check if the lock has been released while spinning for 303 * the turnstile chain lock. 304 */ 305 if (v == MTX_UNOWNED) { 306 turnstile_release(&m->mtx_object); 307 cpu_spinwait(); 308 continue; 309 } 310 311#ifdef MUTEX_WAKE_ALL 312 MPASS(v != MTX_CONTESTED); 313#else 314 /* 315 * The mutex was marked contested on release. This means that 316 * there are other threads blocked on it. Grab ownership of 317 * it and propagate its priority to the current thread if 318 * necessary. 319 */ 320 if (v == MTX_CONTESTED) { 321 m->mtx_lock = tid | MTX_CONTESTED; 322 turnstile_claim(&m->mtx_object); 323 break; 324 } 325#endif 326 327 /* 328 * If the mutex isn't already contested and a failure occurs 329 * setting the contested bit, the mutex was either released 330 * or the state of the MTX_RECURSED bit changed. 331 */ 332 if ((v & MTX_CONTESTED) == 0 && 333 !atomic_cmpset_ptr(&m->mtx_lock, v, v | MTX_CONTESTED)) { 334 turnstile_release(&m->mtx_object); 335 cpu_spinwait(); 336 continue; 337 } 338 339#if defined(SMP) && !defined(NO_ADAPTIVE_MUTEXES) 340 /* 341 * If the current owner of the lock is executing on another 342 * CPU, spin instead of blocking. 343 */ 344 owner = (struct thread *)(v & ~MTX_FLAGMASK); 345#ifdef ADAPTIVE_GIANT 346 if (TD_IS_RUNNING(owner)) 347#else 348 if (m != &Giant && TD_IS_RUNNING(owner)) 349#endif 350 { 351 turnstile_release(&m->mtx_object); 352 while (mtx_owner(m) == owner && TD_IS_RUNNING(owner)) { 353 cpu_spinwait(); 354 } 355 continue; 356 } 357#endif /* SMP && !NO_ADAPTIVE_MUTEXES */ 358 359 /* 360 * We definitely must sleep for this lock. 361 */ 362 mtx_assert(m, MA_NOTOWNED); 363 364#ifdef KTR 365 if (!cont_logged) { 366 CTR6(KTR_CONTENTION, 367 "contention: %p at %s:%d wants %s, taken by %s:%d", 368 (void *)tid, file, line, m->mtx_object.lo_name, 369 WITNESS_FILE(&m->mtx_object), 370 WITNESS_LINE(&m->mtx_object)); 371 cont_logged = 1; 372 } 373#endif 374 375 /* 376 * Block on the turnstile. 377 */ 378 turnstile_wait(&m->mtx_object, mtx_owner(m), 379 TS_EXCLUSIVE_QUEUE); 380 } 381#ifdef KTR 382 if (cont_logged) { 383 CTR4(KTR_CONTENTION, 384 "contention end: %s acquired by %p at %s:%d", 385 m->mtx_object.lo_name, (void *)tid, file, line); 386 } 387#endif 388 return; 389} 390 391#ifdef SMP 392/* 393 * _mtx_lock_spin: the tougher part of acquiring an MTX_SPIN lock. 394 * 395 * This is only called if we need to actually spin for the lock. Recursion 396 * is handled inline. 397 */ 398void 399_mtx_lock_spin(struct mtx *m, uintptr_t tid, int opts, const char *file, 400 int line) 401{ 402 int i = 0; 403 struct thread *td; 404 405 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 406 CTR1(KTR_LOCK, "_mtx_lock_spin: %p spinning", m); 407 408 while (!_obtain_lock(m, tid)) { 409 410 /* Give interrupts a chance while we spin. */ 411 spinlock_exit(); 412 while (m->mtx_lock != MTX_UNOWNED) { 413 if (i++ < 10000000) { 414 cpu_spinwait(); 415 continue; 416 } 417 if (i < 60000000 || kdb_active || panicstr != NULL) 418 DELAY(1); 419 else { 420 td = mtx_owner(m); 421 422 /* If the mutex is unlocked, try again. */ 423 if (td == NULL) 424 continue; 425 printf( 426 "spin lock %p (%s) held by %p (tid %d) too long\n", 427 m, m->mtx_object.lo_name, td, td->td_tid); 428#ifdef WITNESS 429 witness_display_spinlock(&m->mtx_object, td); 430#endif 431 panic("spin lock held too long"); 432 } 433 cpu_spinwait(); 434 } 435 spinlock_enter(); 436 } 437 438 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 439 CTR1(KTR_LOCK, "_mtx_lock_spin: %p spin done", m); 440 441 return; 442} 443#endif /* SMP */ 444 445/* 446 * _mtx_unlock_sleep: the tougher part of releasing an MTX_DEF lock. 447 * 448 * We are only called here if the lock is recursed or contested (i.e. we 449 * need to wake up a blocked thread). 450 */ 451void 452_mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line) 453{ 454 struct turnstile *ts; 455#ifndef PREEMPTION 456 struct thread *td, *td1; 457#endif 458 459 if (mtx_recursed(m)) { 460 if (--(m->mtx_recurse) == 0) 461 atomic_clear_ptr(&m->mtx_lock, MTX_RECURSED); 462 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 463 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p unrecurse", m); 464 return; 465 } 466 467 turnstile_lock(&m->mtx_object); 468 ts = turnstile_lookup(&m->mtx_object); 469 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 470 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p contested", m); 471 472#if defined(SMP) && !defined(NO_ADAPTIVE_MUTEXES) 473 if (ts == NULL) { 474 _release_lock_quick(m); 475 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 476 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p no sleepers", m); 477 turnstile_release(&m->mtx_object); 478 return; 479 } 480#else 481 MPASS(ts != NULL); 482#endif 483#ifndef PREEMPTION 484 /* XXX */ 485 td1 = turnstile_head(ts, TS_EXCLUSIVE_QUEUE); 486#endif 487#ifdef MUTEX_WAKE_ALL 488 turnstile_broadcast(ts, TS_EXCLUSIVE_QUEUE); 489 _release_lock_quick(m); 490#else 491 if (turnstile_signal(ts, TS_EXCLUSIVE_QUEUE)) { 492 _release_lock_quick(m); 493 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 494 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p not held", m); 495 } else { 496 m->mtx_lock = MTX_CONTESTED; 497 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 498 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p still contested", 499 m); 500 } 501#endif 502 turnstile_unpend(ts, TS_EXCLUSIVE_LOCK); 503 504#ifndef PREEMPTION 505 /* 506 * XXX: This is just a hack until preemption is done. However, 507 * once preemption is done we need to either wrap the 508 * turnstile_signal() and release of the actual lock in an 509 * extra critical section or change the preemption code to 510 * always just set a flag and never do instant-preempts. 511 */ 512 td = curthread; 513 if (td->td_critnest > 0 || td1->td_priority >= td->td_priority) 514 return; 515 mtx_lock_spin(&sched_lock); 516 if (!TD_IS_RUNNING(td1)) { 517#ifdef notyet 518 if (td->td_ithd != NULL) { 519 struct ithd *it = td->td_ithd; 520 521 if (it->it_interrupted) { 522 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 523 CTR2(KTR_LOCK, 524 "_mtx_unlock_sleep: %p interrupted %p", 525 it, it->it_interrupted); 526 intr_thd_fixup(it); 527 } 528 } 529#endif 530 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 531 CTR2(KTR_LOCK, 532 "_mtx_unlock_sleep: %p switching out lock=%p", m, 533 (void *)m->mtx_lock); 534 535 mi_switch(SW_INVOL, NULL); 536 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 537 CTR2(KTR_LOCK, "_mtx_unlock_sleep: %p resuming lock=%p", 538 m, (void *)m->mtx_lock); 539 } 540 mtx_unlock_spin(&sched_lock); 541#endif 542 543 return; 544} 545 546/* 547 * All the unlocking of MTX_SPIN locks is done inline. 548 * See the _rel_spin_lock() macro for the details. 549 */ 550 551/* 552 * The backing function for the INVARIANTS-enabled mtx_assert() 553 */ 554#ifdef INVARIANT_SUPPORT 555void 556_mtx_assert(struct mtx *m, int what, const char *file, int line) 557{ 558 559 if (panicstr != NULL || dumping) 560 return; 561 switch (what) { 562 case MA_OWNED: 563 case MA_OWNED | MA_RECURSED: 564 case MA_OWNED | MA_NOTRECURSED: 565 if (!mtx_owned(m)) 566 panic("mutex %s not owned at %s:%d", 567 m->mtx_object.lo_name, file, line); 568 if (mtx_recursed(m)) { 569 if ((what & MA_NOTRECURSED) != 0) 570 panic("mutex %s recursed at %s:%d", 571 m->mtx_object.lo_name, file, line); 572 } else if ((what & MA_RECURSED) != 0) { 573 panic("mutex %s unrecursed at %s:%d", 574 m->mtx_object.lo_name, file, line); 575 } 576 break; 577 case MA_NOTOWNED: 578 if (mtx_owned(m)) 579 panic("mutex %s owned at %s:%d", 580 m->mtx_object.lo_name, file, line); 581 break; 582 default: 583 panic("unknown mtx_assert at %s:%d", file, line); 584 } 585} 586#endif 587 588/* 589 * The MUTEX_DEBUG-enabled mtx_validate() 590 * 591 * Most of these checks have been moved off into the LO_INITIALIZED flag 592 * maintained by the witness code. 593 */ 594#ifdef MUTEX_DEBUG 595 596void mtx_validate(struct mtx *); 597 598void 599mtx_validate(struct mtx *m) 600{ 601 602/* 603 * XXX: When kernacc() does not require Giant we can reenable this check 604 */ 605#ifdef notyet 606 /* 607 * Can't call kernacc() from early init386(), especially when 608 * initializing Giant mutex, because some stuff in kernacc() 609 * requires Giant itself. 610 */ 611 if (!cold) 612 if (!kernacc((caddr_t)m, sizeof(m), 613 VM_PROT_READ | VM_PROT_WRITE)) 614 panic("Can't read and write to mutex %p", m); 615#endif 616} 617#endif 618 619/* 620 * General init routine used by the MTX_SYSINIT() macro. 621 */ 622void 623mtx_sysinit(void *arg) 624{ 625 struct mtx_args *margs = arg; 626 627 mtx_init(margs->ma_mtx, margs->ma_desc, NULL, margs->ma_opts); 628} 629 630/* 631 * Mutex initialization routine; initialize lock `m' of type contained in 632 * `opts' with options contained in `opts' and name `name.' The optional 633 * lock type `type' is used as a general lock category name for use with 634 * witness. 635 */ 636void 637mtx_init(struct mtx *m, const char *name, const char *type, int opts) 638{ 639 struct lock_class *class; 640 int flags; 641 642 MPASS((opts & ~(MTX_SPIN | MTX_QUIET | MTX_RECURSE | 643 MTX_NOWITNESS | MTX_DUPOK | MTX_NOPROFILE)) == 0); 644 645#ifdef MUTEX_DEBUG 646 /* Diagnostic and error correction */ 647 mtx_validate(m); 648#endif 649 650 /* Determine lock class and lock flags. */ 651 if (opts & MTX_SPIN) 652 class = &lock_class_mtx_spin; 653 else 654 class = &lock_class_mtx_sleep; 655 flags = 0; 656 if (opts & MTX_QUIET) 657 flags |= LO_QUIET; 658 if (opts & MTX_RECURSE) 659 flags |= LO_RECURSABLE; 660 if ((opts & MTX_NOWITNESS) == 0) 661 flags |= LO_WITNESS; 662 if (opts & MTX_DUPOK) 663 flags |= LO_DUPOK; 664 if (opts & MTX_NOPROFILE) 665 flags |= LO_NOPROFILE; 666 667 /* Initialize mutex. */ 668 m->mtx_lock = MTX_UNOWNED; 669 m->mtx_recurse = 0; 670 671 lock_profile_object_init(&m->mtx_object, class, name); 672 lock_init(&m->mtx_object, class, name, type, flags); 673} 674 675/* 676 * Remove lock `m' from all_mtx queue. We don't allow MTX_QUIET to be 677 * passed in as a flag here because if the corresponding mtx_init() was 678 * called with MTX_QUIET set, then it will already be set in the mutex's 679 * flags. 680 */ 681void 682mtx_destroy(struct mtx *m) 683{ 684 685 if (!mtx_owned(m)) 686 MPASS(mtx_unowned(m)); 687 else { 688 MPASS((m->mtx_lock & (MTX_RECURSED|MTX_CONTESTED)) == 0); 689 690 /* Perform the non-mtx related part of mtx_unlock_spin(). */ 691 if (LOCK_CLASS(&m->mtx_object) == &lock_class_mtx_spin) 692 spinlock_exit(); 693 else 694 curthread->td_locks--; 695 696 /* Tell witness this isn't locked to make it happy. */ 697 WITNESS_UNLOCK(&m->mtx_object, LOP_EXCLUSIVE, __FILE__, 698 __LINE__); 699 } 700 701 m->mtx_lock = MTX_DESTROYED; 702 lock_profile_object_destroy(&m->mtx_object); 703 lock_destroy(&m->mtx_object); 704} 705 706/* 707 * Intialize the mutex code and system mutexes. This is called from the MD 708 * startup code prior to mi_startup(). The per-CPU data space needs to be 709 * setup before this is called. 710 */ 711void 712mutex_init(void) 713{ 714 715 /* Setup turnstiles so that sleep mutexes work. */ 716 init_turnstiles(); 717 718 /* 719 * Initialize mutexes. 720 */ 721 mtx_init(&Giant, "Giant", NULL, MTX_DEF | MTX_RECURSE); 722 mtx_init(&sched_lock, "sched lock", NULL, MTX_SPIN | MTX_RECURSE); 723 mtx_init(&proc0.p_mtx, "process lock", NULL, MTX_DEF | MTX_DUPOK); 724 mtx_init(&devmtx, "cdev", NULL, MTX_DEF); 725 mtx_lock(&Giant); 726 727 lock_profile_init(); 728} 729 730#ifdef DDB 731void 732db_show_mtx(struct lock_object *lock) 733{ 734 struct thread *td; 735 struct mtx *m; 736 737 m = (struct mtx *)lock; 738 739 db_printf(" flags: {"); 740 if (LOCK_CLASS(lock) == &lock_class_mtx_spin) 741 db_printf("SPIN"); 742 else 743 db_printf("DEF"); 744 if (m->mtx_object.lo_flags & LO_RECURSABLE) 745 db_printf(", RECURSE"); 746 if (m->mtx_object.lo_flags & LO_DUPOK) 747 db_printf(", DUPOK"); 748 db_printf("}\n"); 749 db_printf(" state: {"); 750 if (mtx_unowned(m)) 751 db_printf("UNOWNED"); 752 else { 753 db_printf("OWNED"); 754 if (m->mtx_lock & MTX_CONTESTED) 755 db_printf(", CONTESTED"); 756 if (m->mtx_lock & MTX_RECURSED) 757 db_printf(", RECURSED"); 758 } 759 db_printf("}\n"); 760 if (!mtx_unowned(m)) { 761 td = mtx_owner(m); 762 db_printf(" owner: %p (tid %d, pid %d, \"%s\")\n", td, 763 td->td_tid, td->td_proc->p_pid, td->td_proc->p_comm); 764 if (mtx_recursed(m)) 765 db_printf(" recursed: %d\n", m->mtx_recurse); 766 } 767} 768#endif 769