kern_mutex.c revision 158651
1/*- 2 * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 3. Berkeley Software Design Inc's name may not be used to endorse or 13 * promote products derived from this software without specific prior 14 * written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 * from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $ 29 * and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $ 30 */ 31 32/* 33 * Machine independent bits of mutex implementation. 34 */ 35 36#include <sys/cdefs.h> 37__FBSDID("$FreeBSD: head/sys/kern/kern_mutex.c 158651 2006-05-16 14:37:58Z phk $"); 38 39#include "opt_adaptive_mutexes.h" 40#include "opt_ddb.h" 41#include "opt_mprof.h" 42#include "opt_mutex_wake_all.h" 43#include "opt_sched.h" 44 45#include <sys/param.h> 46#include <sys/systm.h> 47#include <sys/bus.h> 48#include <sys/conf.h> 49#include <sys/kdb.h> 50#include <sys/kernel.h> 51#include <sys/ktr.h> 52#include <sys/lock.h> 53#include <sys/malloc.h> 54#include <sys/mutex.h> 55#include <sys/proc.h> 56#include <sys/resourcevar.h> 57#include <sys/sched.h> 58#include <sys/sbuf.h> 59#include <sys/sysctl.h> 60#include <sys/turnstile.h> 61#include <sys/vmmeter.h> 62 63#include <machine/atomic.h> 64#include <machine/bus.h> 65#include <machine/cpu.h> 66 67#include <ddb/ddb.h> 68 69#include <fs/devfs/devfs_int.h> 70 71#include <vm/vm.h> 72#include <vm/vm_extern.h> 73 74/* 75 * Force MUTEX_WAKE_ALL for now. 76 * single thread wakeup needs fixes to avoid race conditions with 77 * priority inheritance. 78 */ 79#ifndef MUTEX_WAKE_ALL 80#define MUTEX_WAKE_ALL 81#endif 82 83/* 84 * Internal utility macros. 85 */ 86#define mtx_unowned(m) ((m)->mtx_lock == MTX_UNOWNED) 87 88#define mtx_owner(m) (mtx_unowned((m)) ? NULL \ 89 : (struct thread *)((m)->mtx_lock & MTX_FLAGMASK)) 90 91#ifdef DDB 92static void db_show_mtx(struct lock_object *lock); 93#endif 94 95/* 96 * Lock classes for sleep and spin mutexes. 97 */ 98struct lock_class lock_class_mtx_sleep = { 99 "sleep mutex", 100 LC_SLEEPLOCK | LC_RECURSABLE, 101#ifdef DDB 102 db_show_mtx 103#endif 104}; 105struct lock_class lock_class_mtx_spin = { 106 "spin mutex", 107 LC_SPINLOCK | LC_RECURSABLE, 108#ifdef DDB 109 db_show_mtx 110#endif 111}; 112 113/* 114 * System-wide mutexes 115 */ 116struct mtx sched_lock; 117struct mtx Giant; 118 119#ifdef MUTEX_PROFILING 120SYSCTL_NODE(_debug, OID_AUTO, mutex, CTLFLAG_RD, NULL, "mutex debugging"); 121SYSCTL_NODE(_debug_mutex, OID_AUTO, prof, CTLFLAG_RD, NULL, "mutex profiling"); 122static int mutex_prof_enable = 0; 123SYSCTL_INT(_debug_mutex_prof, OID_AUTO, enable, CTLFLAG_RW, 124 &mutex_prof_enable, 0, "Enable tracing of mutex holdtime"); 125 126struct mutex_prof { 127 const char *name; 128 const char *file; 129 int line; 130 uintmax_t cnt_max; 131 uintmax_t cnt_tot; 132 uintmax_t cnt_cur; 133 uintmax_t cnt_contest_holding; 134 uintmax_t cnt_contest_locking; 135 struct mutex_prof *next; 136}; 137 138/* 139 * mprof_buf is a static pool of profiling records to avoid possible 140 * reentrance of the memory allocation functions. 141 * 142 * Note: NUM_MPROF_BUFFERS must be smaller than MPROF_HASH_SIZE. 143 */ 144#ifdef MPROF_BUFFERS 145#define NUM_MPROF_BUFFERS MPROF_BUFFERS 146#else 147#define NUM_MPROF_BUFFERS 1000 148#endif 149static struct mutex_prof mprof_buf[NUM_MPROF_BUFFERS]; 150static int first_free_mprof_buf; 151#ifndef MPROF_HASH_SIZE 152#define MPROF_HASH_SIZE 1009 153#endif 154#if NUM_MPROF_BUFFERS >= MPROF_HASH_SIZE 155#error MPROF_BUFFERS must be larger than MPROF_HASH_SIZE 156#endif 157static struct mutex_prof *mprof_hash[MPROF_HASH_SIZE]; 158/* SWAG: sbuf size = avg stat. line size * number of locks */ 159#define MPROF_SBUF_SIZE 256 * 400 160 161static int mutex_prof_acquisitions; 162SYSCTL_INT(_debug_mutex_prof, OID_AUTO, acquisitions, CTLFLAG_RD, 163 &mutex_prof_acquisitions, 0, "Number of mutex acquistions recorded"); 164static int mutex_prof_records; 165SYSCTL_INT(_debug_mutex_prof, OID_AUTO, records, CTLFLAG_RD, 166 &mutex_prof_records, 0, "Number of profiling records"); 167static int mutex_prof_maxrecords = NUM_MPROF_BUFFERS; 168SYSCTL_INT(_debug_mutex_prof, OID_AUTO, maxrecords, CTLFLAG_RD, 169 &mutex_prof_maxrecords, 0, "Maximum number of profiling records"); 170static int mutex_prof_rejected; 171SYSCTL_INT(_debug_mutex_prof, OID_AUTO, rejected, CTLFLAG_RD, 172 &mutex_prof_rejected, 0, "Number of rejected profiling records"); 173static int mutex_prof_hashsize = MPROF_HASH_SIZE; 174SYSCTL_INT(_debug_mutex_prof, OID_AUTO, hashsize, CTLFLAG_RD, 175 &mutex_prof_hashsize, 0, "Hash size"); 176static int mutex_prof_collisions = 0; 177SYSCTL_INT(_debug_mutex_prof, OID_AUTO, collisions, CTLFLAG_RD, 178 &mutex_prof_collisions, 0, "Number of hash collisions"); 179 180/* 181 * mprof_mtx protects the profiling buffers and the hash. 182 */ 183static struct mtx mprof_mtx; 184MTX_SYSINIT(mprof, &mprof_mtx, "mutex profiling lock", MTX_SPIN | MTX_QUIET); 185 186static u_int64_t 187nanoseconds(void) 188{ 189 struct timespec tv; 190 191 nanotime(&tv); 192 return (tv.tv_sec * (u_int64_t)1000000000 + tv.tv_nsec); 193} 194 195static int 196dump_mutex_prof_stats(SYSCTL_HANDLER_ARGS) 197{ 198 struct sbuf *sb; 199 int error, i; 200 static int multiplier = 1; 201 202 if (first_free_mprof_buf == 0) 203 return (SYSCTL_OUT(req, "No locking recorded", 204 sizeof("No locking recorded"))); 205 206retry_sbufops: 207 sb = sbuf_new(NULL, NULL, MPROF_SBUF_SIZE * multiplier, SBUF_FIXEDLEN); 208 sbuf_printf(sb, "\n%6s %12s %11s %5s %12s %12s %s\n", 209 "max", "total", "count", "avg", "cnt_hold", "cnt_lock", "name"); 210 /* 211 * XXX this spinlock seems to be by far the largest perpetrator 212 * of spinlock latency (1.6 msec on an Athlon1600 was recorded 213 * even before I pessimized it further by moving the average 214 * computation here). 215 */ 216 mtx_lock_spin(&mprof_mtx); 217 for (i = 0; i < first_free_mprof_buf; ++i) { 218 sbuf_printf(sb, "%6ju %12ju %11ju %5ju %12ju %12ju %s:%d (%s)\n", 219 mprof_buf[i].cnt_max / 1000, 220 mprof_buf[i].cnt_tot / 1000, 221 mprof_buf[i].cnt_cur, 222 mprof_buf[i].cnt_cur == 0 ? (uintmax_t)0 : 223 mprof_buf[i].cnt_tot / (mprof_buf[i].cnt_cur * 1000), 224 mprof_buf[i].cnt_contest_holding, 225 mprof_buf[i].cnt_contest_locking, 226 mprof_buf[i].file, mprof_buf[i].line, mprof_buf[i].name); 227 if (sbuf_overflowed(sb)) { 228 mtx_unlock_spin(&mprof_mtx); 229 sbuf_delete(sb); 230 multiplier++; 231 goto retry_sbufops; 232 } 233 } 234 mtx_unlock_spin(&mprof_mtx); 235 sbuf_finish(sb); 236 error = SYSCTL_OUT(req, sbuf_data(sb), sbuf_len(sb) + 1); 237 sbuf_delete(sb); 238 return (error); 239} 240SYSCTL_PROC(_debug_mutex_prof, OID_AUTO, stats, CTLTYPE_STRING | CTLFLAG_RD, 241 NULL, 0, dump_mutex_prof_stats, "A", "Mutex profiling statistics"); 242 243static int 244reset_mutex_prof_stats(SYSCTL_HANDLER_ARGS) 245{ 246 int error, v; 247 248 if (first_free_mprof_buf == 0) 249 return (0); 250 251 v = 0; 252 error = sysctl_handle_int(oidp, &v, 0, req); 253 if (error) 254 return (error); 255 if (req->newptr == NULL) 256 return (error); 257 if (v == 0) 258 return (0); 259 260 mtx_lock_spin(&mprof_mtx); 261 bzero(mprof_buf, sizeof(*mprof_buf) * first_free_mprof_buf); 262 bzero(mprof_hash, sizeof(struct mtx *) * MPROF_HASH_SIZE); 263 first_free_mprof_buf = 0; 264 mtx_unlock_spin(&mprof_mtx); 265 return (0); 266} 267SYSCTL_PROC(_debug_mutex_prof, OID_AUTO, reset, CTLTYPE_INT | CTLFLAG_RW, 268 NULL, 0, reset_mutex_prof_stats, "I", "Reset mutex profiling statistics"); 269#endif 270 271/* 272 * Function versions of the inlined __mtx_* macros. These are used by 273 * modules and can also be called from assembly language if needed. 274 */ 275void 276_mtx_lock_flags(struct mtx *m, int opts, const char *file, int line) 277{ 278 279 MPASS(curthread != NULL); 280 KASSERT(LOCK_CLASS(&m->mtx_object) == &lock_class_mtx_sleep, 281 ("mtx_lock() of spin mutex %s @ %s:%d", m->mtx_object.lo_name, 282 file, line)); 283 WITNESS_CHECKORDER(&m->mtx_object, opts | LOP_NEWORDER | LOP_EXCLUSIVE, 284 file, line); 285 _get_sleep_lock(m, curthread, opts, file, line); 286 LOCK_LOG_LOCK("LOCK", &m->mtx_object, opts, m->mtx_recurse, file, 287 line); 288 WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line); 289#ifdef MUTEX_PROFILING 290 /* don't reset the timer when/if recursing */ 291 if (m->mtx_acqtime == 0) { 292 m->mtx_filename = file; 293 m->mtx_lineno = line; 294 m->mtx_acqtime = mutex_prof_enable ? nanoseconds() : 0; 295 ++mutex_prof_acquisitions; 296 } 297#endif 298} 299 300void 301_mtx_unlock_flags(struct mtx *m, int opts, const char *file, int line) 302{ 303 304 MPASS(curthread != NULL); 305 KASSERT(LOCK_CLASS(&m->mtx_object) == &lock_class_mtx_sleep, 306 ("mtx_unlock() of spin mutex %s @ %s:%d", m->mtx_object.lo_name, 307 file, line)); 308 WITNESS_UNLOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line); 309 LOCK_LOG_LOCK("UNLOCK", &m->mtx_object, opts, m->mtx_recurse, file, 310 line); 311 mtx_assert(m, MA_OWNED); 312#ifdef MUTEX_PROFILING 313 if (m->mtx_acqtime != 0) { 314 static const char *unknown = "(unknown)"; 315 struct mutex_prof *mpp; 316 u_int64_t acqtime, now; 317 const char *p, *q; 318 volatile u_int hash; 319 320 now = nanoseconds(); 321 acqtime = m->mtx_acqtime; 322 m->mtx_acqtime = 0; 323 if (now <= acqtime) 324 goto out; 325 for (p = m->mtx_filename; 326 p != NULL && strncmp(p, "../", 3) == 0; p += 3) 327 /* nothing */ ; 328 if (p == NULL || *p == '\0') 329 p = unknown; 330 for (hash = m->mtx_lineno, q = p; *q != '\0'; ++q) 331 hash = (hash * 2 + *q) % MPROF_HASH_SIZE; 332 mtx_lock_spin(&mprof_mtx); 333 for (mpp = mprof_hash[hash]; mpp != NULL; mpp = mpp->next) 334 if (mpp->line == m->mtx_lineno && 335 strcmp(mpp->file, p) == 0) 336 break; 337 if (mpp == NULL) { 338 /* Just exit if we cannot get a trace buffer */ 339 if (first_free_mprof_buf >= NUM_MPROF_BUFFERS) { 340 ++mutex_prof_rejected; 341 goto unlock; 342 } 343 mpp = &mprof_buf[first_free_mprof_buf++]; 344 mpp->name = mtx_name(m); 345 mpp->file = p; 346 mpp->line = m->mtx_lineno; 347 mpp->next = mprof_hash[hash]; 348 if (mprof_hash[hash] != NULL) 349 ++mutex_prof_collisions; 350 mprof_hash[hash] = mpp; 351 ++mutex_prof_records; 352 } 353 /* 354 * Record if the mutex has been held longer now than ever 355 * before. 356 */ 357 if (now - acqtime > mpp->cnt_max) 358 mpp->cnt_max = now - acqtime; 359 mpp->cnt_tot += now - acqtime; 360 mpp->cnt_cur++; 361 /* 362 * There's a small race, really we should cmpxchg 363 * 0 with the current value, but that would bill 364 * the contention to the wrong lock instance if 365 * it followed this also. 366 */ 367 mpp->cnt_contest_holding += m->mtx_contest_holding; 368 m->mtx_contest_holding = 0; 369 mpp->cnt_contest_locking += m->mtx_contest_locking; 370 m->mtx_contest_locking = 0; 371unlock: 372 mtx_unlock_spin(&mprof_mtx); 373 } 374out: 375#endif 376 _rel_sleep_lock(m, curthread, opts, file, line); 377} 378 379void 380_mtx_lock_spin_flags(struct mtx *m, int opts, const char *file, int line) 381{ 382 383 MPASS(curthread != NULL); 384 KASSERT(LOCK_CLASS(&m->mtx_object) == &lock_class_mtx_spin, 385 ("mtx_lock_spin() of sleep mutex %s @ %s:%d", 386 m->mtx_object.lo_name, file, line)); 387 WITNESS_CHECKORDER(&m->mtx_object, opts | LOP_NEWORDER | LOP_EXCLUSIVE, 388 file, line); 389 _get_spin_lock(m, curthread, opts, file, line); 390 LOCK_LOG_LOCK("LOCK", &m->mtx_object, opts, m->mtx_recurse, file, 391 line); 392 WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line); 393} 394 395void 396_mtx_unlock_spin_flags(struct mtx *m, int opts, const char *file, int line) 397{ 398 399 MPASS(curthread != NULL); 400 KASSERT(LOCK_CLASS(&m->mtx_object) == &lock_class_mtx_spin, 401 ("mtx_unlock_spin() of sleep mutex %s @ %s:%d", 402 m->mtx_object.lo_name, file, line)); 403 WITNESS_UNLOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line); 404 LOCK_LOG_LOCK("UNLOCK", &m->mtx_object, opts, m->mtx_recurse, file, 405 line); 406 mtx_assert(m, MA_OWNED); 407 _rel_spin_lock(m); 408} 409 410/* 411 * The important part of mtx_trylock{,_flags}() 412 * Tries to acquire lock `m.' If this function is called on a mutex that 413 * is already owned, it will recursively acquire the lock. 414 */ 415int 416_mtx_trylock(struct mtx *m, int opts, const char *file, int line) 417{ 418 int rval; 419 420 MPASS(curthread != NULL); 421 KASSERT(LOCK_CLASS(&m->mtx_object) == &lock_class_mtx_sleep, 422 ("mtx_trylock() of spin mutex %s @ %s:%d", m->mtx_object.lo_name, 423 file, line)); 424 425 if (mtx_owned(m) && (m->mtx_object.lo_flags & LO_RECURSABLE) != 0) { 426 m->mtx_recurse++; 427 atomic_set_ptr(&m->mtx_lock, MTX_RECURSED); 428 rval = 1; 429 } else 430 rval = _obtain_lock(m, (uintptr_t)curthread); 431 432 LOCK_LOG_TRY("LOCK", &m->mtx_object, opts, rval, file, line); 433 if (rval) 434 WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE | LOP_TRYLOCK, 435 file, line); 436 437 return (rval); 438} 439 440/* 441 * _mtx_lock_sleep: the tougher part of acquiring an MTX_DEF lock. 442 * 443 * We call this if the lock is either contested (i.e. we need to go to 444 * sleep waiting for it), or if we need to recurse on it. 445 */ 446void 447_mtx_lock_sleep(struct mtx *m, uintptr_t tid, int opts, const char *file, 448 int line) 449{ 450#if defined(SMP) && !defined(NO_ADAPTIVE_MUTEXES) 451 volatile struct thread *owner; 452#endif 453 uintptr_t v; 454#ifdef KTR 455 int cont_logged = 0; 456#endif 457#ifdef MUTEX_PROFILING 458 int contested; 459#endif 460 461 if (mtx_owned(m)) { 462 KASSERT((m->mtx_object.lo_flags & LO_RECURSABLE) != 0, 463 ("_mtx_lock_sleep: recursed on non-recursive mutex %s @ %s:%d\n", 464 m->mtx_object.lo_name, file, line)); 465 m->mtx_recurse++; 466 atomic_set_ptr(&m->mtx_lock, MTX_RECURSED); 467 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 468 CTR1(KTR_LOCK, "_mtx_lock_sleep: %p recursing", m); 469 return; 470 } 471 472 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 473 CTR4(KTR_LOCK, 474 "_mtx_lock_sleep: %s contested (lock=%p) at %s:%d", 475 m->mtx_object.lo_name, (void *)m->mtx_lock, file, line); 476 477#ifdef MUTEX_PROFILING 478 contested = 0; 479#endif 480 while (!_obtain_lock(m, tid)) { 481#ifdef MUTEX_PROFILING 482 contested = 1; 483 atomic_add_int(&m->mtx_contest_holding, 1); 484#endif 485 turnstile_lock(&m->mtx_object); 486 v = m->mtx_lock; 487 488 /* 489 * Check if the lock has been released while spinning for 490 * the turnstile chain lock. 491 */ 492 if (v == MTX_UNOWNED) { 493 turnstile_release(&m->mtx_object); 494 cpu_spinwait(); 495 continue; 496 } 497 498#ifdef MUTEX_WAKE_ALL 499 MPASS(v != MTX_CONTESTED); 500#else 501 /* 502 * The mutex was marked contested on release. This means that 503 * there are other threads blocked on it. Grab ownership of 504 * it and propagate its priority to the current thread if 505 * necessary. 506 */ 507 if (v == MTX_CONTESTED) { 508 m->mtx_lock = tid | MTX_CONTESTED; 509 turnstile_claim(&m->mtx_object); 510 break; 511 } 512#endif 513 514 /* 515 * If the mutex isn't already contested and a failure occurs 516 * setting the contested bit, the mutex was either released 517 * or the state of the MTX_RECURSED bit changed. 518 */ 519 if ((v & MTX_CONTESTED) == 0 && 520 !atomic_cmpset_ptr(&m->mtx_lock, v, v | MTX_CONTESTED)) { 521 turnstile_release(&m->mtx_object); 522 cpu_spinwait(); 523 continue; 524 } 525 526#if defined(SMP) && !defined(NO_ADAPTIVE_MUTEXES) 527 /* 528 * If the current owner of the lock is executing on another 529 * CPU, spin instead of blocking. 530 */ 531 owner = (struct thread *)(v & MTX_FLAGMASK); 532#ifdef ADAPTIVE_GIANT 533 if (TD_IS_RUNNING(owner)) { 534#else 535 if (m != &Giant && TD_IS_RUNNING(owner)) { 536#endif 537 turnstile_release(&m->mtx_object); 538 while (mtx_owner(m) == owner && TD_IS_RUNNING(owner)) { 539 cpu_spinwait(); 540 } 541 continue; 542 } 543#endif /* SMP && !NO_ADAPTIVE_MUTEXES */ 544 545 /* 546 * We definitely must sleep for this lock. 547 */ 548 mtx_assert(m, MA_NOTOWNED); 549 550#ifdef KTR 551 if (!cont_logged) { 552 CTR6(KTR_CONTENTION, 553 "contention: %p at %s:%d wants %s, taken by %s:%d", 554 (void *)tid, file, line, m->mtx_object.lo_name, 555 WITNESS_FILE(&m->mtx_object), 556 WITNESS_LINE(&m->mtx_object)); 557 cont_logged = 1; 558 } 559#endif 560 561 /* 562 * Block on the turnstile. 563 */ 564 turnstile_wait(&m->mtx_object, mtx_owner(m), 565 TS_EXCLUSIVE_QUEUE); 566 } 567 568#ifdef KTR 569 if (cont_logged) { 570 CTR4(KTR_CONTENTION, 571 "contention end: %s acquired by %p at %s:%d", 572 m->mtx_object.lo_name, (void *)tid, file, line); 573 } 574#endif 575#ifdef MUTEX_PROFILING 576 if (contested) 577 m->mtx_contest_locking++; 578 m->mtx_contest_holding = 0; 579#endif 580 return; 581} 582 583#ifdef SMP 584/* 585 * _mtx_lock_spin: the tougher part of acquiring an MTX_SPIN lock. 586 * 587 * This is only called if we need to actually spin for the lock. Recursion 588 * is handled inline. 589 */ 590void 591_mtx_lock_spin(struct mtx *m, uintptr_t tid, int opts, const char *file, 592 int line) 593{ 594 int i = 0; 595 596 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 597 CTR1(KTR_LOCK, "_mtx_lock_spin: %p spinning", m); 598 599 for (;;) { 600 if (_obtain_lock(m, tid)) 601 break; 602 603 /* Give interrupts a chance while we spin. */ 604 spinlock_exit(); 605 while (m->mtx_lock != MTX_UNOWNED) { 606 if (i++ < 10000000) { 607 cpu_spinwait(); 608 continue; 609 } 610 if (i < 60000000) 611 DELAY(1); 612 else if (!kdb_active && !panicstr) { 613 printf("spin lock %s held by %p for > 5 seconds\n", 614 m->mtx_object.lo_name, (void *)m->mtx_lock); 615#ifdef WITNESS 616 witness_display_spinlock(&m->mtx_object, 617 mtx_owner(m)); 618#endif 619 panic("spin lock held too long"); 620 } 621 cpu_spinwait(); 622 } 623 spinlock_enter(); 624 } 625 626 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 627 CTR1(KTR_LOCK, "_mtx_lock_spin: %p spin done", m); 628 629 return; 630} 631#endif /* SMP */ 632 633/* 634 * _mtx_unlock_sleep: the tougher part of releasing an MTX_DEF lock. 635 * 636 * We are only called here if the lock is recursed or contested (i.e. we 637 * need to wake up a blocked thread). 638 */ 639void 640_mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line) 641{ 642 struct turnstile *ts; 643#ifndef PREEMPTION 644 struct thread *td, *td1; 645#endif 646 647 if (mtx_recursed(m)) { 648 if (--(m->mtx_recurse) == 0) 649 atomic_clear_ptr(&m->mtx_lock, MTX_RECURSED); 650 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 651 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p unrecurse", m); 652 return; 653 } 654 655 turnstile_lock(&m->mtx_object); 656 ts = turnstile_lookup(&m->mtx_object); 657 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 658 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p contested", m); 659 660#if defined(SMP) && !defined(NO_ADAPTIVE_MUTEXES) 661 if (ts == NULL) { 662 _release_lock_quick(m); 663 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 664 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p no sleepers", m); 665 turnstile_release(&m->mtx_object); 666 return; 667 } 668#else 669 MPASS(ts != NULL); 670#endif 671#ifndef PREEMPTION 672 /* XXX */ 673 td1 = turnstile_head(ts, TS_EXCLUSIVE_QUEUE); 674#endif 675#ifdef MUTEX_WAKE_ALL 676 turnstile_broadcast(ts, TS_EXCLUSIVE_QUEUE); 677 _release_lock_quick(m); 678#else 679 if (turnstile_signal(ts, TS_EXCLUSIVE_QUEUE)) { 680 _release_lock_quick(m); 681 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 682 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p not held", m); 683 } else { 684 m->mtx_lock = MTX_CONTESTED; 685 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 686 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p still contested", 687 m); 688 } 689#endif 690 turnstile_unpend(ts, TS_EXCLUSIVE_LOCK); 691 692#ifndef PREEMPTION 693 /* 694 * XXX: This is just a hack until preemption is done. However, 695 * once preemption is done we need to either wrap the 696 * turnstile_signal() and release of the actual lock in an 697 * extra critical section or change the preemption code to 698 * always just set a flag and never do instant-preempts. 699 */ 700 td = curthread; 701 if (td->td_critnest > 0 || td1->td_priority >= td->td_priority) 702 return; 703 mtx_lock_spin(&sched_lock); 704 if (!TD_IS_RUNNING(td1)) { 705#ifdef notyet 706 if (td->td_ithd != NULL) { 707 struct ithd *it = td->td_ithd; 708 709 if (it->it_interrupted) { 710 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 711 CTR2(KTR_LOCK, 712 "_mtx_unlock_sleep: %p interrupted %p", 713 it, it->it_interrupted); 714 intr_thd_fixup(it); 715 } 716 } 717#endif 718 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 719 CTR2(KTR_LOCK, 720 "_mtx_unlock_sleep: %p switching out lock=%p", m, 721 (void *)m->mtx_lock); 722 723 mi_switch(SW_INVOL, NULL); 724 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 725 CTR2(KTR_LOCK, "_mtx_unlock_sleep: %p resuming lock=%p", 726 m, (void *)m->mtx_lock); 727 } 728 mtx_unlock_spin(&sched_lock); 729#endif 730 731 return; 732} 733 734/* 735 * All the unlocking of MTX_SPIN locks is done inline. 736 * See the _rel_spin_lock() macro for the details. 737 */ 738 739/* 740 * The backing function for the INVARIANTS-enabled mtx_assert() 741 */ 742#ifdef INVARIANT_SUPPORT 743void 744_mtx_assert(struct mtx *m, int what, const char *file, int line) 745{ 746 747 if (panicstr != NULL || dumping) 748 return; 749 switch (what) { 750 case MA_OWNED: 751 case MA_OWNED | MA_RECURSED: 752 case MA_OWNED | MA_NOTRECURSED: 753 if (!mtx_owned(m)) 754 panic("mutex %s not owned at %s:%d", 755 m->mtx_object.lo_name, file, line); 756 if (mtx_recursed(m)) { 757 if ((what & MA_NOTRECURSED) != 0) 758 panic("mutex %s recursed at %s:%d", 759 m->mtx_object.lo_name, file, line); 760 } else if ((what & MA_RECURSED) != 0) { 761 panic("mutex %s unrecursed at %s:%d", 762 m->mtx_object.lo_name, file, line); 763 } 764 break; 765 case MA_NOTOWNED: 766 if (mtx_owned(m)) 767 panic("mutex %s owned at %s:%d", 768 m->mtx_object.lo_name, file, line); 769 break; 770 default: 771 panic("unknown mtx_assert at %s:%d", file, line); 772 } 773} 774#endif 775 776/* 777 * The MUTEX_DEBUG-enabled mtx_validate() 778 * 779 * Most of these checks have been moved off into the LO_INITIALIZED flag 780 * maintained by the witness code. 781 */ 782#ifdef MUTEX_DEBUG 783 784void mtx_validate(struct mtx *); 785 786void 787mtx_validate(struct mtx *m) 788{ 789 790/* 791 * XXX: When kernacc() does not require Giant we can reenable this check 792 */ 793#ifdef notyet 794 /* 795 * Can't call kernacc() from early init386(), especially when 796 * initializing Giant mutex, because some stuff in kernacc() 797 * requires Giant itself. 798 */ 799 if (!cold) 800 if (!kernacc((caddr_t)m, sizeof(m), 801 VM_PROT_READ | VM_PROT_WRITE)) 802 panic("Can't read and write to mutex %p", m); 803#endif 804} 805#endif 806 807/* 808 * General init routine used by the MTX_SYSINIT() macro. 809 */ 810void 811mtx_sysinit(void *arg) 812{ 813 struct mtx_args *margs = arg; 814 815 mtx_init(margs->ma_mtx, margs->ma_desc, NULL, margs->ma_opts); 816} 817 818/* 819 * Mutex initialization routine; initialize lock `m' of type contained in 820 * `opts' with options contained in `opts' and name `name.' The optional 821 * lock type `type' is used as a general lock category name for use with 822 * witness. 823 */ 824void 825mtx_init(struct mtx *m, const char *name, const char *type, int opts) 826{ 827 struct lock_class *class; 828 int flags; 829 830 MPASS((opts & ~(MTX_SPIN | MTX_QUIET | MTX_RECURSE | 831 MTX_NOWITNESS | MTX_DUPOK)) == 0); 832 833#ifdef MUTEX_DEBUG 834 /* Diagnostic and error correction */ 835 mtx_validate(m); 836#endif 837 838 /* Determine lock class and lock flags. */ 839 if (opts & MTX_SPIN) 840 class = &lock_class_mtx_spin; 841 else 842 class = &lock_class_mtx_sleep; 843 flags = 0; 844 if (opts & MTX_QUIET) 845 flags |= LO_QUIET; 846 if (opts & MTX_RECURSE) 847 flags |= LO_RECURSABLE; 848 if ((opts & MTX_NOWITNESS) == 0) 849 flags |= LO_WITNESS; 850 if (opts & MTX_DUPOK) 851 flags |= LO_DUPOK; 852 853 /* Initialize mutex. */ 854 m->mtx_lock = MTX_UNOWNED; 855 m->mtx_recurse = 0; 856#ifdef MUTEX_PROFILING 857 m->mtx_acqtime = 0; 858 m->mtx_filename = NULL; 859 m->mtx_lineno = 0; 860 m->mtx_contest_holding = 0; 861 m->mtx_contest_locking = 0; 862#endif 863 864 lock_init(&m->mtx_object, class, name, type, flags); 865} 866 867/* 868 * Remove lock `m' from all_mtx queue. We don't allow MTX_QUIET to be 869 * passed in as a flag here because if the corresponding mtx_init() was 870 * called with MTX_QUIET set, then it will already be set in the mutex's 871 * flags. 872 */ 873void 874mtx_destroy(struct mtx *m) 875{ 876 877 if (!mtx_owned(m)) 878 MPASS(mtx_unowned(m)); 879 else { 880 MPASS((m->mtx_lock & (MTX_RECURSED|MTX_CONTESTED)) == 0); 881 882 /* Perform the non-mtx related part of mtx_unlock_spin(). */ 883 if (LOCK_CLASS(&m->mtx_object) == &lock_class_mtx_spin) 884 spinlock_exit(); 885 886 /* Tell witness this isn't locked to make it happy. */ 887 WITNESS_UNLOCK(&m->mtx_object, LOP_EXCLUSIVE, __FILE__, 888 __LINE__); 889 } 890 891 lock_destroy(&m->mtx_object); 892} 893 894/* 895 * Intialize the mutex code and system mutexes. This is called from the MD 896 * startup code prior to mi_startup(). The per-CPU data space needs to be 897 * setup before this is called. 898 */ 899void 900mutex_init(void) 901{ 902 903 /* Setup turnstiles so that sleep mutexes work. */ 904 init_turnstiles(); 905 906 /* 907 * Initialize mutexes. 908 */ 909 mtx_init(&Giant, "Giant", NULL, MTX_DEF | MTX_RECURSE); 910 mtx_init(&sched_lock, "sched lock", NULL, MTX_SPIN | MTX_RECURSE); 911 mtx_init(&proc0.p_mtx, "process lock", NULL, MTX_DEF | MTX_DUPOK); 912 mtx_init(&devmtx, "cdev", NULL, MTX_DEF); 913 mtx_lock(&Giant); 914} 915 916#ifdef DDB 917void 918db_show_mtx(struct lock_object *lock) 919{ 920 struct thread *td; 921 struct mtx *m; 922 923 m = (struct mtx *)lock; 924 925 db_printf(" flags: {"); 926 if (LOCK_CLASS(lock) == &lock_class_mtx_spin) 927 db_printf("SPIN"); 928 else 929 db_printf("DEF"); 930 if (m->mtx_object.lo_flags & LO_RECURSABLE) 931 db_printf(", RECURSE"); 932 if (m->mtx_object.lo_flags & LO_DUPOK) 933 db_printf(", DUPOK"); 934 db_printf("}\n"); 935 db_printf(" state: {"); 936 if (mtx_unowned(m)) 937 db_printf("UNOWNED"); 938 else { 939 db_printf("OWNED"); 940 if (m->mtx_lock & MTX_CONTESTED) 941 db_printf(", CONTESTED"); 942 if (m->mtx_lock & MTX_RECURSED) 943 db_printf(", RECURSED"); 944 } 945 db_printf("}\n"); 946 if (!mtx_unowned(m)) { 947 td = mtx_owner(m); 948 db_printf(" owner: %p (tid %d, pid %d, \"%s\")\n", td, 949 td->td_tid, td->td_proc->p_pid, td->td_proc->p_comm); 950 if (mtx_recursed(m)) 951 db_printf(" recursed: %d\n", m->mtx_recurse); 952 } 953} 954#endif 955