kern_mutex.c revision 170295
1/*- 2 * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 3. Berkeley Software Design Inc's name may not be used to endorse or 13 * promote products derived from this software without specific prior 14 * written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 * from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $ 29 * and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $ 30 */ 31 32/* 33 * Machine independent bits of mutex implementation. 34 */ 35 36#include <sys/cdefs.h> 37__FBSDID("$FreeBSD: head/sys/kern/kern_mutex.c 170295 2007-06-04 23:51:44Z jeff $"); 38 39#include "opt_adaptive_mutexes.h" 40#include "opt_ddb.h" 41#include "opt_global.h" 42#include "opt_mutex_wake_all.h" 43#include "opt_sched.h" 44 45#include <sys/param.h> 46#include <sys/systm.h> 47#include <sys/bus.h> 48#include <sys/conf.h> 49#include <sys/kdb.h> 50#include <sys/kernel.h> 51#include <sys/ktr.h> 52#include <sys/lock.h> 53#include <sys/malloc.h> 54#include <sys/mutex.h> 55#include <sys/proc.h> 56#include <sys/resourcevar.h> 57#include <sys/sched.h> 58#include <sys/sbuf.h> 59#include <sys/sysctl.h> 60#include <sys/turnstile.h> 61#include <sys/vmmeter.h> 62#include <sys/lock_profile.h> 63 64#include <machine/atomic.h> 65#include <machine/bus.h> 66#include <machine/cpu.h> 67 68#include <ddb/ddb.h> 69 70#include <fs/devfs/devfs_int.h> 71 72#include <vm/vm.h> 73#include <vm/vm_extern.h> 74 75/* 76 * Force MUTEX_WAKE_ALL for now. 77 * single thread wakeup needs fixes to avoid race conditions with 78 * priority inheritance. 79 */ 80#ifndef MUTEX_WAKE_ALL 81#define MUTEX_WAKE_ALL 82#endif 83 84#if defined(SMP) && !defined(NO_ADAPTIVE_MUTEXES) 85#define ADAPTIVE_MUTEXES 86#endif 87 88/* 89 * Internal utility macros. 90 */ 91#define mtx_unowned(m) ((m)->mtx_lock == MTX_UNOWNED) 92 93#define mtx_destroyed(m) ((m)->mtx_lock == MTX_DESTROYED) 94 95#define mtx_owner(m) ((struct thread *)((m)->mtx_lock & ~MTX_FLAGMASK)) 96 97#ifdef DDB 98static void db_show_mtx(struct lock_object *lock); 99#endif 100static void lock_mtx(struct lock_object *lock, int how); 101static void lock_spin(struct lock_object *lock, int how); 102static int unlock_mtx(struct lock_object *lock); 103static int unlock_spin(struct lock_object *lock); 104 105/* 106 * Lock classes for sleep and spin mutexes. 107 */ 108struct lock_class lock_class_mtx_sleep = { 109 .lc_name = "sleep mutex", 110 .lc_flags = LC_SLEEPLOCK | LC_RECURSABLE, 111#ifdef DDB 112 .lc_ddb_show = db_show_mtx, 113#endif 114 .lc_lock = lock_mtx, 115 .lc_unlock = unlock_mtx, 116}; 117struct lock_class lock_class_mtx_spin = { 118 .lc_name = "spin mutex", 119 .lc_flags = LC_SPINLOCK | LC_RECURSABLE, 120#ifdef DDB 121 .lc_ddb_show = db_show_mtx, 122#endif 123 .lc_lock = lock_spin, 124 .lc_unlock = unlock_spin, 125}; 126 127/* 128 * System-wide mutexes 129 */ 130struct mtx blocked_lock; 131struct mtx sched_lock; 132struct mtx Giant; 133 134#ifdef LOCK_PROFILING 135static inline void lock_profile_init(void) 136{ 137 int i; 138 /* Initialize the mutex profiling locks */ 139 for (i = 0; i < LPROF_LOCK_SIZE; i++) { 140 mtx_init(&lprof_locks[i], "mprof lock", 141 NULL, MTX_SPIN|MTX_QUIET|MTX_NOPROFILE); 142 } 143} 144#else 145static inline void lock_profile_init(void) {;} 146#endif 147 148void 149lock_mtx(struct lock_object *lock, int how) 150{ 151 152 mtx_lock((struct mtx *)lock); 153} 154 155void 156lock_spin(struct lock_object *lock, int how) 157{ 158 159 panic("spin locks can only use msleep_spin"); 160} 161 162int 163unlock_mtx(struct lock_object *lock) 164{ 165 struct mtx *m; 166 167 m = (struct mtx *)lock; 168 mtx_assert(m, MA_OWNED | MA_NOTRECURSED); 169 mtx_unlock(m); 170 return (0); 171} 172 173int 174unlock_spin(struct lock_object *lock) 175{ 176 177 panic("spin locks can only use msleep_spin"); 178} 179 180/* 181 * Function versions of the inlined __mtx_* macros. These are used by 182 * modules and can also be called from assembly language if needed. 183 */ 184void 185_mtx_lock_flags(struct mtx *m, int opts, const char *file, int line) 186{ 187 188 MPASS(curthread != NULL); 189 KASSERT(m->mtx_lock != MTX_DESTROYED, 190 ("mtx_lock() of destroyed mutex @ %s:%d", file, line)); 191 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep, 192 ("mtx_lock() of spin mutex %s @ %s:%d", m->lock_object.lo_name, 193 file, line)); 194 WITNESS_CHECKORDER(&m->lock_object, opts | LOP_NEWORDER | LOP_EXCLUSIVE, 195 file, line); 196 197 _get_sleep_lock(m, curthread, opts, file, line); 198 LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file, 199 line); 200 WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line); 201 curthread->td_locks++; 202} 203 204void 205_mtx_unlock_flags(struct mtx *m, int opts, const char *file, int line) 206{ 207 MPASS(curthread != NULL); 208 KASSERT(m->mtx_lock != MTX_DESTROYED, 209 ("mtx_unlock() of destroyed mutex @ %s:%d", file, line)); 210 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep, 211 ("mtx_unlock() of spin mutex %s @ %s:%d", m->lock_object.lo_name, 212 file, line)); 213 curthread->td_locks--; 214 WITNESS_UNLOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line); 215 LOCK_LOG_LOCK("UNLOCK", &m->lock_object, opts, m->mtx_recurse, file, 216 line); 217 mtx_assert(m, MA_OWNED); 218 219 if (m->mtx_recurse == 0) 220 lock_profile_release_lock(&m->lock_object); 221 _rel_sleep_lock(m, curthread, opts, file, line); 222} 223 224void 225_mtx_lock_spin_flags(struct mtx *m, int opts, const char *file, int line) 226{ 227 228 MPASS(curthread != NULL); 229 KASSERT(m->mtx_lock != MTX_DESTROYED, 230 ("mtx_lock_spin() of destroyed mutex @ %s:%d", file, line)); 231 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin, 232 ("mtx_lock_spin() of sleep mutex %s @ %s:%d", 233 m->lock_object.lo_name, file, line)); 234 WITNESS_CHECKORDER(&m->lock_object, opts | LOP_NEWORDER | LOP_EXCLUSIVE, 235 file, line); 236 _get_spin_lock(m, curthread, opts, file, line); 237 LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file, 238 line); 239 WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line); 240} 241 242void 243_mtx_unlock_spin_flags(struct mtx *m, int opts, const char *file, int line) 244{ 245 246 MPASS(curthread != NULL); 247 KASSERT(m->mtx_lock != MTX_DESTROYED, 248 ("mtx_unlock_spin() of destroyed mutex @ %s:%d", file, line)); 249 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin, 250 ("mtx_unlock_spin() of sleep mutex %s @ %s:%d", 251 m->lock_object.lo_name, file, line)); 252 WITNESS_UNLOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line); 253 LOCK_LOG_LOCK("UNLOCK", &m->lock_object, opts, m->mtx_recurse, file, 254 line); 255 mtx_assert(m, MA_OWNED); 256 257 _rel_spin_lock(m); 258} 259 260/* 261 * The important part of mtx_trylock{,_flags}() 262 * Tries to acquire lock `m.' If this function is called on a mutex that 263 * is already owned, it will recursively acquire the lock. 264 */ 265int 266_mtx_trylock(struct mtx *m, int opts, const char *file, int line) 267{ 268 int rval, contested = 0; 269 uint64_t waittime = 0; 270 271 MPASS(curthread != NULL); 272 KASSERT(m->mtx_lock != MTX_DESTROYED, 273 ("mtx_trylock() of destroyed mutex @ %s:%d", file, line)); 274 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep, 275 ("mtx_trylock() of spin mutex %s @ %s:%d", m->lock_object.lo_name, 276 file, line)); 277 278 if (mtx_owned(m) && (m->lock_object.lo_flags & LO_RECURSABLE) != 0) { 279 m->mtx_recurse++; 280 atomic_set_ptr(&m->mtx_lock, MTX_RECURSED); 281 rval = 1; 282 } else 283 rval = _obtain_lock(m, (uintptr_t)curthread); 284 285 LOCK_LOG_TRY("LOCK", &m->lock_object, opts, rval, file, line); 286 if (rval) { 287 WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE | LOP_TRYLOCK, 288 file, line); 289 curthread->td_locks++; 290 if (m->mtx_recurse == 0) 291 lock_profile_obtain_lock_success(&m->lock_object, contested, 292 waittime, file, line); 293 294 } 295 296 return (rval); 297} 298 299/* 300 * _mtx_lock_sleep: the tougher part of acquiring an MTX_DEF lock. 301 * 302 * We call this if the lock is either contested (i.e. we need to go to 303 * sleep waiting for it), or if we need to recurse on it. 304 */ 305void 306_mtx_lock_sleep(struct mtx *m, uintptr_t tid, int opts, const char *file, 307 int line) 308{ 309 struct turnstile *ts; 310#ifdef ADAPTIVE_MUTEXES 311 volatile struct thread *owner; 312#endif 313#ifdef KTR 314 int cont_logged = 0; 315#endif 316 int contested = 0; 317 uint64_t waittime = 0; 318 uintptr_t v; 319 320 if (mtx_owned(m)) { 321 KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0, 322 ("_mtx_lock_sleep: recursed on non-recursive mutex %s @ %s:%d\n", 323 m->lock_object.lo_name, file, line)); 324 m->mtx_recurse++; 325 atomic_set_ptr(&m->mtx_lock, MTX_RECURSED); 326 if (LOCK_LOG_TEST(&m->lock_object, opts)) 327 CTR1(KTR_LOCK, "_mtx_lock_sleep: %p recursing", m); 328 return; 329 } 330 331 lock_profile_obtain_lock_failed(&m->lock_object, 332 &contested, &waittime); 333 if (LOCK_LOG_TEST(&m->lock_object, opts)) 334 CTR4(KTR_LOCK, 335 "_mtx_lock_sleep: %s contested (lock=%p) at %s:%d", 336 m->lock_object.lo_name, (void *)m->mtx_lock, file, line); 337 338 while (!_obtain_lock(m, tid)) { 339 ts = turnstile_trywait(&m->lock_object); 340 v = m->mtx_lock; 341 342 /* 343 * Check if the lock has been released while spinning for 344 * the turnstile chain lock. 345 */ 346 if (v == MTX_UNOWNED) { 347 turnstile_cancel(ts); 348 cpu_spinwait(); 349 continue; 350 } 351 352#ifdef MUTEX_WAKE_ALL 353 MPASS(v != MTX_CONTESTED); 354#else 355 /* 356 * The mutex was marked contested on release. This means that 357 * there are other threads blocked on it. Grab ownership of 358 * it and propagate its priority to the current thread if 359 * necessary. 360 */ 361 if (v == MTX_CONTESTED) { 362 m->mtx_lock = tid | MTX_CONTESTED; 363 turnstile_claim(ts); 364 break; 365 } 366#endif 367 368 /* 369 * If the mutex isn't already contested and a failure occurs 370 * setting the contested bit, the mutex was either released 371 * or the state of the MTX_RECURSED bit changed. 372 */ 373 if ((v & MTX_CONTESTED) == 0 && 374 !atomic_cmpset_ptr(&m->mtx_lock, v, v | MTX_CONTESTED)) { 375 turnstile_cancel(ts); 376 cpu_spinwait(); 377 continue; 378 } 379 380#ifdef ADAPTIVE_MUTEXES 381 /* 382 * If the current owner of the lock is executing on another 383 * CPU, spin instead of blocking. 384 */ 385 owner = (struct thread *)(v & ~MTX_FLAGMASK); 386#ifdef ADAPTIVE_GIANT 387 if (TD_IS_RUNNING(owner)) 388#else 389 if (m != &Giant && TD_IS_RUNNING(owner)) 390#endif 391 { 392 turnstile_cancel(ts); 393 while (mtx_owner(m) == owner && TD_IS_RUNNING(owner)) { 394 cpu_spinwait(); 395 } 396 continue; 397 } 398#endif /* ADAPTIVE_MUTEXES */ 399 400 /* 401 * We definitely must sleep for this lock. 402 */ 403 mtx_assert(m, MA_NOTOWNED); 404 405#ifdef KTR 406 if (!cont_logged) { 407 CTR6(KTR_CONTENTION, 408 "contention: %p at %s:%d wants %s, taken by %s:%d", 409 (void *)tid, file, line, m->lock_object.lo_name, 410 WITNESS_FILE(&m->lock_object), 411 WITNESS_LINE(&m->lock_object)); 412 cont_logged = 1; 413 } 414#endif 415 416 /* 417 * Block on the turnstile. 418 */ 419 turnstile_wait(ts, mtx_owner(m), TS_EXCLUSIVE_QUEUE); 420 } 421#ifdef KTR 422 if (cont_logged) { 423 CTR4(KTR_CONTENTION, 424 "contention end: %s acquired by %p at %s:%d", 425 m->lock_object.lo_name, (void *)tid, file, line); 426 } 427#endif 428 lock_profile_obtain_lock_success(&m->lock_object, contested, 429 waittime, (file), (line)); 430} 431 432static void 433_mtx_lock_spin_failed(struct mtx *m) 434{ 435 struct thread *td; 436 437 td = mtx_owner(m); 438 439 /* If the mutex is unlocked, try again. */ 440 if (td == NULL) 441 return; 442#ifdef SMP 443 printf( "spin lock %p (%s) held by %p (tid %d) too long\n", 444 m, m->lock_object.lo_name, td, td->td_tid); 445#ifdef WITNESS 446 witness_display_spinlock(&m->lock_object, td); 447#endif 448 panic("spin lock held too long"); 449} 450 451/* 452 * _mtx_lock_spin: the tougher part of acquiring an MTX_SPIN lock. 453 * 454 * This is only called if we need to actually spin for the lock. Recursion 455 * is handled inline. 456 */ 457void 458_mtx_lock_spin(struct mtx *m, uintptr_t tid, int opts, const char *file, 459 int line) 460{ 461 int i = 0, contested = 0; 462 uint64_t waittime = 0; 463 464 if (LOCK_LOG_TEST(&m->lock_object, opts)) 465 CTR1(KTR_LOCK, "_mtx_lock_spin: %p spinning", m); 466 467 lock_profile_obtain_lock_failed(&m->lock_object, &contested, &waittime); 468 while (!_obtain_lock(m, tid)) { 469 470 /* Give interrupts a chance while we spin. */ 471 spinlock_exit(); 472 while (m->mtx_lock != MTX_UNOWNED) { 473 if (i++ < 10000000) { 474 cpu_spinwait(); 475 continue; 476 } 477 if (i < 60000000 || kdb_active || panicstr != NULL) 478 DELAY(1); 479 else 480 _mtx_lock_spin_failed(m); 481 cpu_spinwait(); 482 } 483 spinlock_enter(); 484 } 485 486 if (LOCK_LOG_TEST(&m->lock_object, opts)) 487 CTR1(KTR_LOCK, "_mtx_lock_spin: %p spin done", m); 488 489 lock_profile_obtain_lock_success(&m->lock_object, contested, 490 waittime, (file), (line)); 491} 492#endif /* SMP */ 493 494void 495_thread_lock_flags(struct thread *td, int opts, const char *file, int line) 496{ 497 struct mtx *m; 498 uintptr_t tid; 499 int i; 500 501 i = 0; 502 tid = (uintptr_t)curthread; 503 for (;;) { 504retry: 505 spinlock_enter(); 506 m = __DEVOLATILE(struct mtx *, td->td_lock); 507 WITNESS_CHECKORDER(&m->lock_object, 508 opts | LOP_NEWORDER | LOP_EXCLUSIVE, file, line); 509 while (!_obtain_lock(m, tid)) { 510 if (m->mtx_lock == tid) { 511 m->mtx_recurse++; 512 break; 513 } 514 /* Give interrupts a chance while we spin. */ 515 spinlock_exit(); 516 while (m->mtx_lock != MTX_UNOWNED) { 517 if (i++ < 10000000) 518 cpu_spinwait(); 519 else if (i < 60000000 || 520 kdb_active || panicstr != NULL) 521 DELAY(1); 522 else 523 _mtx_lock_spin_failed(m); 524 cpu_spinwait(); 525 if (m != td->td_lock) 526 goto retry; 527 } 528 spinlock_enter(); 529 } 530 if (m == td->td_lock) 531 break; 532 _rel_spin_lock(m); /* does spinlock_exit() */ 533 } 534 WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line); 535} 536 537struct mtx * 538thread_lock_block(struct thread *td) 539{ 540 struct mtx *lock; 541 542 spinlock_enter(); 543 THREAD_LOCK_ASSERT(td, MA_OWNED); 544 lock = __DEVOLATILE(struct mtx *, td->td_lock); 545 td->td_lock = &blocked_lock; 546 mtx_unlock_spin(lock); 547 548 return (lock); 549} 550 551void 552thread_lock_unblock(struct thread *td, struct mtx *new) 553{ 554 mtx_assert(new, MA_OWNED); 555 MPASS(td->td_lock == &blocked_lock); 556 atomic_store_rel_ptr((void *)&td->td_lock, (uintptr_t)new); 557 spinlock_exit(); 558} 559 560void 561thread_lock_set(struct thread *td, struct mtx *new) 562{ 563 struct mtx *lock; 564 565 mtx_assert(new, MA_OWNED); 566 THREAD_LOCK_ASSERT(td, MA_OWNED); 567 lock = __DEVOLATILE(struct mtx *, td->td_lock); 568 td->td_lock = new; 569 mtx_unlock_spin(lock); 570} 571 572/* 573 * _mtx_unlock_sleep: the tougher part of releasing an MTX_DEF lock. 574 * 575 * We are only called here if the lock is recursed or contested (i.e. we 576 * need to wake up a blocked thread). 577 */ 578void 579_mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line) 580{ 581 struct turnstile *ts; 582#ifndef PREEMPTION 583 struct thread *td, *td1; 584#endif 585 586 if (mtx_recursed(m)) { 587 if (--(m->mtx_recurse) == 0) 588 atomic_clear_ptr(&m->mtx_lock, MTX_RECURSED); 589 if (LOCK_LOG_TEST(&m->lock_object, opts)) 590 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p unrecurse", m); 591 return; 592 } 593 594 /* 595 * We have to lock the chain before the turnstile so this turnstile 596 * can be removed from the hash list if it is empty. 597 */ 598 turnstile_chain_lock(&m->lock_object); 599 ts = turnstile_lookup(&m->lock_object); 600 if (LOCK_LOG_TEST(&m->lock_object, opts)) 601 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p contested", m); 602 603#ifdef ADAPTIVE_MUTEXES 604 if (ts == NULL) { 605 _release_lock_quick(m); 606 if (LOCK_LOG_TEST(&m->lock_object, opts)) 607 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p no sleepers", m); 608 turnstile_chain_unlock(&m->lock_object); 609 return; 610 } 611#else 612 MPASS(ts != NULL); 613#endif 614#ifndef PREEMPTION 615 /* XXX */ 616 td1 = turnstile_head(ts, TS_EXCLUSIVE_QUEUE); 617#endif 618#ifdef MUTEX_WAKE_ALL 619 turnstile_broadcast(ts, TS_EXCLUSIVE_QUEUE); 620 _release_lock_quick(m); 621#else 622 if (turnstile_signal(ts, TS_EXCLUSIVE_QUEUE)) { 623 _release_lock_quick(m); 624 if (LOCK_LOG_TEST(&m->lock_object, opts)) 625 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p not held", m); 626 } else { 627 m->mtx_lock = MTX_CONTESTED; 628 if (LOCK_LOG_TEST(&m->lock_object, opts)) 629 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p still contested", 630 m); 631 } 632#endif 633 /* 634 * This turnstile is now no longer associated with the mutex. We can 635 * unlock the chain lock so a new turnstile may take it's place. 636 */ 637 turnstile_unpend(ts, TS_EXCLUSIVE_LOCK); 638 turnstile_chain_unlock(&m->lock_object); 639 640#ifndef PREEMPTION 641 /* 642 * XXX: This is just a hack until preemption is done. However, 643 * once preemption is done we need to either wrap the 644 * turnstile_signal() and release of the actual lock in an 645 * extra critical section or change the preemption code to 646 * always just set a flag and never do instant-preempts. 647 */ 648 td = curthread; 649 if (td->td_critnest > 0 || td1->td_priority >= td->td_priority) 650 return; 651 652 thread_lock(td1); 653 if (!TD_IS_RUNNING(td1)) { 654#ifdef notyet 655 if (td->td_ithd != NULL) { 656 struct ithd *it = td->td_ithd; 657 658 if (it->it_interrupted) { 659 if (LOCK_LOG_TEST(&m->lock_object, opts)) 660 CTR2(KTR_LOCK, 661 "_mtx_unlock_sleep: %p interrupted %p", 662 it, it->it_interrupted); 663 intr_thd_fixup(it); 664 } 665 } 666#endif 667 if (LOCK_LOG_TEST(&m->lock_object, opts)) 668 CTR2(KTR_LOCK, 669 "_mtx_unlock_sleep: %p switching out lock=%p", m, 670 (void *)m->mtx_lock); 671 672 mi_switch(SW_INVOL, NULL); 673 if (LOCK_LOG_TEST(&m->lock_object, opts)) 674 CTR2(KTR_LOCK, "_mtx_unlock_sleep: %p resuming lock=%p", 675 m, (void *)m->mtx_lock); 676 } 677 thread_unlock(td1); 678#endif 679} 680 681/* 682 * All the unlocking of MTX_SPIN locks is done inline. 683 * See the _rel_spin_lock() macro for the details. 684 */ 685 686/* 687 * The backing function for the INVARIANTS-enabled mtx_assert() 688 */ 689#ifdef INVARIANT_SUPPORT 690void 691_mtx_assert(struct mtx *m, int what, const char *file, int line) 692{ 693 694 if (panicstr != NULL || dumping) 695 return; 696 switch (what) { 697 case MA_OWNED: 698 case MA_OWNED | MA_RECURSED: 699 case MA_OWNED | MA_NOTRECURSED: 700 if (!mtx_owned(m)) 701 panic("mutex %s not owned at %s:%d", 702 m->lock_object.lo_name, file, line); 703 if (mtx_recursed(m)) { 704 if ((what & MA_NOTRECURSED) != 0) 705 panic("mutex %s recursed at %s:%d", 706 m->lock_object.lo_name, file, line); 707 } else if ((what & MA_RECURSED) != 0) { 708 panic("mutex %s unrecursed at %s:%d", 709 m->lock_object.lo_name, file, line); 710 } 711 break; 712 case MA_NOTOWNED: 713 if (mtx_owned(m)) 714 panic("mutex %s owned at %s:%d", 715 m->lock_object.lo_name, file, line); 716 break; 717 default: 718 panic("unknown mtx_assert at %s:%d", file, line); 719 } 720} 721#endif 722 723/* 724 * The MUTEX_DEBUG-enabled mtx_validate() 725 * 726 * Most of these checks have been moved off into the LO_INITIALIZED flag 727 * maintained by the witness code. 728 */ 729#ifdef MUTEX_DEBUG 730 731void mtx_validate(struct mtx *); 732 733void 734mtx_validate(struct mtx *m) 735{ 736 737/* 738 * XXX: When kernacc() does not require Giant we can reenable this check 739 */ 740#ifdef notyet 741 /* 742 * Can't call kernacc() from early init386(), especially when 743 * initializing Giant mutex, because some stuff in kernacc() 744 * requires Giant itself. 745 */ 746 if (!cold) 747 if (!kernacc((caddr_t)m, sizeof(m), 748 VM_PROT_READ | VM_PROT_WRITE)) 749 panic("Can't read and write to mutex %p", m); 750#endif 751} 752#endif 753 754/* 755 * General init routine used by the MTX_SYSINIT() macro. 756 */ 757void 758mtx_sysinit(void *arg) 759{ 760 struct mtx_args *margs = arg; 761 762 mtx_init(margs->ma_mtx, margs->ma_desc, NULL, margs->ma_opts); 763} 764 765/* 766 * Mutex initialization routine; initialize lock `m' of type contained in 767 * `opts' with options contained in `opts' and name `name.' The optional 768 * lock type `type' is used as a general lock category name for use with 769 * witness. 770 */ 771void 772mtx_init(struct mtx *m, const char *name, const char *type, int opts) 773{ 774 struct lock_class *class; 775 int flags; 776 777 MPASS((opts & ~(MTX_SPIN | MTX_QUIET | MTX_RECURSE | 778 MTX_NOWITNESS | MTX_DUPOK | MTX_NOPROFILE)) == 0); 779 780#ifdef MUTEX_DEBUG 781 /* Diagnostic and error correction */ 782 mtx_validate(m); 783#endif 784 785 /* Determine lock class and lock flags. */ 786 if (opts & MTX_SPIN) 787 class = &lock_class_mtx_spin; 788 else 789 class = &lock_class_mtx_sleep; 790 flags = 0; 791 if (opts & MTX_QUIET) 792 flags |= LO_QUIET; 793 if (opts & MTX_RECURSE) 794 flags |= LO_RECURSABLE; 795 if ((opts & MTX_NOWITNESS) == 0) 796 flags |= LO_WITNESS; 797 if (opts & MTX_DUPOK) 798 flags |= LO_DUPOK; 799 if (opts & MTX_NOPROFILE) 800 flags |= LO_NOPROFILE; 801 802 /* Initialize mutex. */ 803 m->mtx_lock = MTX_UNOWNED; 804 m->mtx_recurse = 0; 805 806 lock_init(&m->lock_object, class, name, type, flags); 807} 808 809/* 810 * Remove lock `m' from all_mtx queue. We don't allow MTX_QUIET to be 811 * passed in as a flag here because if the corresponding mtx_init() was 812 * called with MTX_QUIET set, then it will already be set in the mutex's 813 * flags. 814 */ 815void 816mtx_destroy(struct mtx *m) 817{ 818 819 if (!mtx_owned(m)) 820 MPASS(mtx_unowned(m)); 821 else { 822 MPASS((m->mtx_lock & (MTX_RECURSED|MTX_CONTESTED)) == 0); 823 824 /* Perform the non-mtx related part of mtx_unlock_spin(). */ 825 if (LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin) 826 spinlock_exit(); 827 else 828 curthread->td_locks--; 829 830 /* Tell witness this isn't locked to make it happy. */ 831 WITNESS_UNLOCK(&m->lock_object, LOP_EXCLUSIVE, __FILE__, 832 __LINE__); 833 } 834 835 m->mtx_lock = MTX_DESTROYED; 836 lock_destroy(&m->lock_object); 837} 838 839/* 840 * Intialize the mutex code and system mutexes. This is called from the MD 841 * startup code prior to mi_startup(). The per-CPU data space needs to be 842 * setup before this is called. 843 */ 844void 845mutex_init(void) 846{ 847 848 /* Setup turnstiles so that sleep mutexes work. */ 849 init_turnstiles(); 850 851 /* 852 * Initialize mutexes. 853 */ 854 mtx_init(&Giant, "Giant", NULL, MTX_DEF | MTX_RECURSE); 855 mtx_init(&sched_lock, "sched lock", NULL, MTX_SPIN | MTX_RECURSE); 856 mtx_init(&blocked_lock, "blocked lock", NULL, MTX_SPIN); 857 blocked_lock.mtx_lock = 0xdeadc0de; /* Always blocked. */ 858 mtx_init(&proc0.p_mtx, "process lock", NULL, MTX_DEF | MTX_DUPOK); 859 mtx_init(&proc0.p_slock, "process slock", NULL, MTX_SPIN | MTX_RECURSE); 860 mtx_init(&devmtx, "cdev", NULL, MTX_DEF); 861 mtx_lock(&Giant); 862 863 lock_profile_init(); 864} 865 866#ifdef DDB 867void 868db_show_mtx(struct lock_object *lock) 869{ 870 struct thread *td; 871 struct mtx *m; 872 873 m = (struct mtx *)lock; 874 875 db_printf(" flags: {"); 876 if (LOCK_CLASS(lock) == &lock_class_mtx_spin) 877 db_printf("SPIN"); 878 else 879 db_printf("DEF"); 880 if (m->lock_object.lo_flags & LO_RECURSABLE) 881 db_printf(", RECURSE"); 882 if (m->lock_object.lo_flags & LO_DUPOK) 883 db_printf(", DUPOK"); 884 db_printf("}\n"); 885 db_printf(" state: {"); 886 if (mtx_unowned(m)) 887 db_printf("UNOWNED"); 888 else if (mtx_destroyed(m)) 889 db_printf("DESTROYED"); 890 else { 891 db_printf("OWNED"); 892 if (m->mtx_lock & MTX_CONTESTED) 893 db_printf(", CONTESTED"); 894 if (m->mtx_lock & MTX_RECURSED) 895 db_printf(", RECURSED"); 896 } 897 db_printf("}\n"); 898 if (!mtx_unowned(m) && !mtx_destroyed(m)) { 899 td = mtx_owner(m); 900 db_printf(" owner: %p (tid %d, pid %d, \"%s\")\n", td, 901 td->td_tid, td->td_proc->p_pid, td->td_proc->p_comm); 902 if (mtx_recursed(m)) 903 db_printf(" recursed: %d\n", m->mtx_recurse); 904 } 905} 906#endif 907