kern_mutex.c revision 160766
1/*- 2 * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 3. Berkeley Software Design Inc's name may not be used to endorse or 13 * promote products derived from this software without specific prior 14 * written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 * from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $ 29 * and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $ 30 */ 31 32/* 33 * Machine independent bits of mutex implementation. 34 */ 35 36#include <sys/cdefs.h> 37__FBSDID("$FreeBSD: head/sys/kern/kern_mutex.c 160766 2006-07-27 19:58:18Z jhb $"); 38 39#include "opt_adaptive_mutexes.h" 40#include "opt_ddb.h" 41#include "opt_mprof.h" 42#include "opt_mutex_wake_all.h" 43#include "opt_sched.h" 44 45#include <sys/param.h> 46#include <sys/systm.h> 47#include <sys/bus.h> 48#include <sys/conf.h> 49#include <sys/kdb.h> 50#include <sys/kernel.h> 51#include <sys/ktr.h> 52#include <sys/lock.h> 53#include <sys/malloc.h> 54#include <sys/mutex.h> 55#include <sys/proc.h> 56#include <sys/resourcevar.h> 57#include <sys/sched.h> 58#include <sys/sbuf.h> 59#include <sys/sysctl.h> 60#include <sys/turnstile.h> 61#include <sys/vmmeter.h> 62 63#include <machine/atomic.h> 64#include <machine/bus.h> 65#include <machine/cpu.h> 66 67#include <ddb/ddb.h> 68 69#include <fs/devfs/devfs_int.h> 70 71#include <vm/vm.h> 72#include <vm/vm_extern.h> 73 74/* 75 * Force MUTEX_WAKE_ALL for now. 76 * single thread wakeup needs fixes to avoid race conditions with 77 * priority inheritance. 78 */ 79#ifndef MUTEX_WAKE_ALL 80#define MUTEX_WAKE_ALL 81#endif 82 83/* 84 * Internal utility macros. 85 */ 86#define mtx_unowned(m) ((m)->mtx_lock == MTX_UNOWNED) 87 88#define mtx_owner(m) ((struct thread *)((m)->mtx_lock & ~MTX_FLAGMASK)) 89 90#ifdef DDB 91static void db_show_mtx(struct lock_object *lock); 92#endif 93 94/* 95 * Lock classes for sleep and spin mutexes. 96 */ 97struct lock_class lock_class_mtx_sleep = { 98 "sleep mutex", 99 LC_SLEEPLOCK | LC_RECURSABLE, 100#ifdef DDB 101 db_show_mtx 102#endif 103}; 104struct lock_class lock_class_mtx_spin = { 105 "spin mutex", 106 LC_SPINLOCK | LC_RECURSABLE, 107#ifdef DDB 108 db_show_mtx 109#endif 110}; 111 112/* 113 * System-wide mutexes 114 */ 115struct mtx sched_lock; 116struct mtx Giant; 117 118#ifdef MUTEX_PROFILING 119SYSCTL_NODE(_debug, OID_AUTO, mutex, CTLFLAG_RD, NULL, "mutex debugging"); 120SYSCTL_NODE(_debug_mutex, OID_AUTO, prof, CTLFLAG_RD, NULL, "mutex profiling"); 121static int mutex_prof_enable = 0; 122SYSCTL_INT(_debug_mutex_prof, OID_AUTO, enable, CTLFLAG_RW, 123 &mutex_prof_enable, 0, "Enable tracing of mutex holdtime"); 124 125struct mutex_prof { 126 const char *name; 127 const char *file; 128 int line; 129 uintmax_t cnt_max; 130 uintmax_t cnt_tot; 131 uintmax_t cnt_cur; 132 uintmax_t cnt_contest_holding; 133 uintmax_t cnt_contest_locking; 134 struct mutex_prof *next; 135}; 136 137/* 138 * mprof_buf is a static pool of profiling records to avoid possible 139 * reentrance of the memory allocation functions. 140 * 141 * Note: NUM_MPROF_BUFFERS must be smaller than MPROF_HASH_SIZE. 142 */ 143#ifdef MPROF_BUFFERS 144#define NUM_MPROF_BUFFERS MPROF_BUFFERS 145#else 146#define NUM_MPROF_BUFFERS 1000 147#endif 148static struct mutex_prof mprof_buf[NUM_MPROF_BUFFERS]; 149static int first_free_mprof_buf; 150#ifndef MPROF_HASH_SIZE 151#define MPROF_HASH_SIZE 1009 152#endif 153#if NUM_MPROF_BUFFERS >= MPROF_HASH_SIZE 154#error MPROF_BUFFERS must be larger than MPROF_HASH_SIZE 155#endif 156static struct mutex_prof *mprof_hash[MPROF_HASH_SIZE]; 157/* SWAG: sbuf size = avg stat. line size * number of locks */ 158#define MPROF_SBUF_SIZE 256 * 400 159 160static int mutex_prof_acquisitions; 161SYSCTL_INT(_debug_mutex_prof, OID_AUTO, acquisitions, CTLFLAG_RD, 162 &mutex_prof_acquisitions, 0, "Number of mutex acquistions recorded"); 163static int mutex_prof_records; 164SYSCTL_INT(_debug_mutex_prof, OID_AUTO, records, CTLFLAG_RD, 165 &mutex_prof_records, 0, "Number of profiling records"); 166static int mutex_prof_maxrecords = NUM_MPROF_BUFFERS; 167SYSCTL_INT(_debug_mutex_prof, OID_AUTO, maxrecords, CTLFLAG_RD, 168 &mutex_prof_maxrecords, 0, "Maximum number of profiling records"); 169static int mutex_prof_rejected; 170SYSCTL_INT(_debug_mutex_prof, OID_AUTO, rejected, CTLFLAG_RD, 171 &mutex_prof_rejected, 0, "Number of rejected profiling records"); 172static int mutex_prof_hashsize = MPROF_HASH_SIZE; 173SYSCTL_INT(_debug_mutex_prof, OID_AUTO, hashsize, CTLFLAG_RD, 174 &mutex_prof_hashsize, 0, "Hash size"); 175static int mutex_prof_collisions = 0; 176SYSCTL_INT(_debug_mutex_prof, OID_AUTO, collisions, CTLFLAG_RD, 177 &mutex_prof_collisions, 0, "Number of hash collisions"); 178 179/* 180 * mprof_mtx protects the profiling buffers and the hash. 181 */ 182static struct mtx mprof_mtx; 183MTX_SYSINIT(mprof, &mprof_mtx, "mutex profiling lock", MTX_SPIN | MTX_QUIET); 184 185static u_int64_t 186nanoseconds(void) 187{ 188 struct timespec tv; 189 190 nanotime(&tv); 191 return (tv.tv_sec * (u_int64_t)1000000000 + tv.tv_nsec); 192} 193 194static int 195dump_mutex_prof_stats(SYSCTL_HANDLER_ARGS) 196{ 197 struct sbuf *sb; 198 int error, i; 199 static int multiplier = 1; 200 201 if (first_free_mprof_buf == 0) 202 return (SYSCTL_OUT(req, "No locking recorded", 203 sizeof("No locking recorded"))); 204 205retry_sbufops: 206 sb = sbuf_new(NULL, NULL, MPROF_SBUF_SIZE * multiplier, SBUF_FIXEDLEN); 207 sbuf_printf(sb, "\n%6s %12s %11s %5s %12s %12s %s\n", 208 "max", "total", "count", "avg", "cnt_hold", "cnt_lock", "name"); 209 /* 210 * XXX this spinlock seems to be by far the largest perpetrator 211 * of spinlock latency (1.6 msec on an Athlon1600 was recorded 212 * even before I pessimized it further by moving the average 213 * computation here). 214 */ 215 mtx_lock_spin(&mprof_mtx); 216 for (i = 0; i < first_free_mprof_buf; ++i) { 217 sbuf_printf(sb, "%6ju %12ju %11ju %5ju %12ju %12ju %s:%d (%s)\n", 218 mprof_buf[i].cnt_max / 1000, 219 mprof_buf[i].cnt_tot / 1000, 220 mprof_buf[i].cnt_cur, 221 mprof_buf[i].cnt_cur == 0 ? (uintmax_t)0 : 222 mprof_buf[i].cnt_tot / (mprof_buf[i].cnt_cur * 1000), 223 mprof_buf[i].cnt_contest_holding, 224 mprof_buf[i].cnt_contest_locking, 225 mprof_buf[i].file, mprof_buf[i].line, mprof_buf[i].name); 226 if (sbuf_overflowed(sb)) { 227 mtx_unlock_spin(&mprof_mtx); 228 sbuf_delete(sb); 229 multiplier++; 230 goto retry_sbufops; 231 } 232 } 233 mtx_unlock_spin(&mprof_mtx); 234 sbuf_finish(sb); 235 error = SYSCTL_OUT(req, sbuf_data(sb), sbuf_len(sb) + 1); 236 sbuf_delete(sb); 237 return (error); 238} 239SYSCTL_PROC(_debug_mutex_prof, OID_AUTO, stats, CTLTYPE_STRING | CTLFLAG_RD, 240 NULL, 0, dump_mutex_prof_stats, "A", "Mutex profiling statistics"); 241 242static int 243reset_mutex_prof_stats(SYSCTL_HANDLER_ARGS) 244{ 245 int error, v; 246 247 if (first_free_mprof_buf == 0) 248 return (0); 249 250 v = 0; 251 error = sysctl_handle_int(oidp, &v, 0, req); 252 if (error) 253 return (error); 254 if (req->newptr == NULL) 255 return (error); 256 if (v == 0) 257 return (0); 258 259 mtx_lock_spin(&mprof_mtx); 260 bzero(mprof_buf, sizeof(*mprof_buf) * first_free_mprof_buf); 261 bzero(mprof_hash, sizeof(struct mtx *) * MPROF_HASH_SIZE); 262 first_free_mprof_buf = 0; 263 mtx_unlock_spin(&mprof_mtx); 264 return (0); 265} 266SYSCTL_PROC(_debug_mutex_prof, OID_AUTO, reset, CTLTYPE_INT | CTLFLAG_RW, 267 NULL, 0, reset_mutex_prof_stats, "I", "Reset mutex profiling statistics"); 268#endif 269 270/* 271 * Function versions of the inlined __mtx_* macros. These are used by 272 * modules and can also be called from assembly language if needed. 273 */ 274void 275_mtx_lock_flags(struct mtx *m, int opts, const char *file, int line) 276{ 277 278 MPASS(curthread != NULL); 279 KASSERT(m->mtx_lock != MTX_DESTROYED, 280 ("mtx_lock() of destroyed mutex @ %s:%d", file, line)); 281 KASSERT(LOCK_CLASS(&m->mtx_object) == &lock_class_mtx_sleep, 282 ("mtx_lock() of spin mutex %s @ %s:%d", m->mtx_object.lo_name, 283 file, line)); 284 WITNESS_CHECKORDER(&m->mtx_object, opts | LOP_NEWORDER | LOP_EXCLUSIVE, 285 file, line); 286 _get_sleep_lock(m, curthread, opts, file, line); 287 LOCK_LOG_LOCK("LOCK", &m->mtx_object, opts, m->mtx_recurse, file, 288 line); 289 WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line); 290#ifdef MUTEX_PROFILING 291 /* don't reset the timer when/if recursing */ 292 if (m->mtx_acqtime == 0) { 293 m->mtx_filename = file; 294 m->mtx_lineno = line; 295 m->mtx_acqtime = mutex_prof_enable ? nanoseconds() : 0; 296 ++mutex_prof_acquisitions; 297 } 298#endif 299} 300 301void 302_mtx_unlock_flags(struct mtx *m, int opts, const char *file, int line) 303{ 304 305 MPASS(curthread != NULL); 306 KASSERT(m->mtx_lock != MTX_DESTROYED, 307 ("mtx_unlock() of destroyed mutex @ %s:%d", file, line)); 308 KASSERT(LOCK_CLASS(&m->mtx_object) == &lock_class_mtx_sleep, 309 ("mtx_unlock() of spin mutex %s @ %s:%d", m->mtx_object.lo_name, 310 file, line)); 311 WITNESS_UNLOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line); 312 LOCK_LOG_LOCK("UNLOCK", &m->mtx_object, opts, m->mtx_recurse, file, 313 line); 314 mtx_assert(m, MA_OWNED); 315#ifdef MUTEX_PROFILING 316 if (m->mtx_acqtime != 0) { 317 static const char *unknown = "(unknown)"; 318 struct mutex_prof *mpp; 319 u_int64_t acqtime, now; 320 const char *p, *q; 321 volatile u_int hash; 322 323 now = nanoseconds(); 324 acqtime = m->mtx_acqtime; 325 m->mtx_acqtime = 0; 326 if (now <= acqtime) 327 goto out; 328 for (p = m->mtx_filename; 329 p != NULL && strncmp(p, "../", 3) == 0; p += 3) 330 /* nothing */ ; 331 if (p == NULL || *p == '\0') 332 p = unknown; 333 for (hash = m->mtx_lineno, q = p; *q != '\0'; ++q) 334 hash = (hash * 2 + *q) % MPROF_HASH_SIZE; 335 mtx_lock_spin(&mprof_mtx); 336 for (mpp = mprof_hash[hash]; mpp != NULL; mpp = mpp->next) 337 if (mpp->line == m->mtx_lineno && 338 strcmp(mpp->file, p) == 0) 339 break; 340 if (mpp == NULL) { 341 /* Just exit if we cannot get a trace buffer */ 342 if (first_free_mprof_buf >= NUM_MPROF_BUFFERS) { 343 ++mutex_prof_rejected; 344 goto unlock; 345 } 346 mpp = &mprof_buf[first_free_mprof_buf++]; 347 mpp->name = mtx_name(m); 348 mpp->file = p; 349 mpp->line = m->mtx_lineno; 350 mpp->next = mprof_hash[hash]; 351 if (mprof_hash[hash] != NULL) 352 ++mutex_prof_collisions; 353 mprof_hash[hash] = mpp; 354 ++mutex_prof_records; 355 } 356 /* 357 * Record if the mutex has been held longer now than ever 358 * before. 359 */ 360 if (now - acqtime > mpp->cnt_max) 361 mpp->cnt_max = now - acqtime; 362 mpp->cnt_tot += now - acqtime; 363 mpp->cnt_cur++; 364 /* 365 * There's a small race, really we should cmpxchg 366 * 0 with the current value, but that would bill 367 * the contention to the wrong lock instance if 368 * it followed this also. 369 */ 370 mpp->cnt_contest_holding += m->mtx_contest_holding; 371 m->mtx_contest_holding = 0; 372 mpp->cnt_contest_locking += m->mtx_contest_locking; 373 m->mtx_contest_locking = 0; 374unlock: 375 mtx_unlock_spin(&mprof_mtx); 376 } 377out: 378#endif 379 _rel_sleep_lock(m, curthread, opts, file, line); 380} 381 382void 383_mtx_lock_spin_flags(struct mtx *m, int opts, const char *file, int line) 384{ 385 386 MPASS(curthread != NULL); 387 KASSERT(m->mtx_lock != MTX_DESTROYED, 388 ("mtx_lock_spin() of destroyed mutex @ %s:%d", file, line)); 389 KASSERT(LOCK_CLASS(&m->mtx_object) == &lock_class_mtx_spin, 390 ("mtx_lock_spin() of sleep mutex %s @ %s:%d", 391 m->mtx_object.lo_name, file, line)); 392 WITNESS_CHECKORDER(&m->mtx_object, opts | LOP_NEWORDER | LOP_EXCLUSIVE, 393 file, line); 394 _get_spin_lock(m, curthread, opts, file, line); 395 LOCK_LOG_LOCK("LOCK", &m->mtx_object, opts, m->mtx_recurse, file, 396 line); 397 WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line); 398} 399 400void 401_mtx_unlock_spin_flags(struct mtx *m, int opts, const char *file, int line) 402{ 403 404 MPASS(curthread != NULL); 405 KASSERT(m->mtx_lock != MTX_DESTROYED, 406 ("mtx_unlock_spin() of destroyed mutex @ %s:%d", file, line)); 407 KASSERT(LOCK_CLASS(&m->mtx_object) == &lock_class_mtx_spin, 408 ("mtx_unlock_spin() of sleep mutex %s @ %s:%d", 409 m->mtx_object.lo_name, file, line)); 410 WITNESS_UNLOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line); 411 LOCK_LOG_LOCK("UNLOCK", &m->mtx_object, opts, m->mtx_recurse, file, 412 line); 413 mtx_assert(m, MA_OWNED); 414 _rel_spin_lock(m); 415} 416 417/* 418 * The important part of mtx_trylock{,_flags}() 419 * Tries to acquire lock `m.' If this function is called on a mutex that 420 * is already owned, it will recursively acquire the lock. 421 */ 422int 423_mtx_trylock(struct mtx *m, int opts, const char *file, int line) 424{ 425 int rval; 426 427 MPASS(curthread != NULL); 428 KASSERT(m->mtx_lock != MTX_DESTROYED, 429 ("mtx_trylock() of destroyed mutex @ %s:%d", file, line)); 430 KASSERT(LOCK_CLASS(&m->mtx_object) == &lock_class_mtx_sleep, 431 ("mtx_trylock() of spin mutex %s @ %s:%d", m->mtx_object.lo_name, 432 file, line)); 433 434 if (mtx_owned(m) && (m->mtx_object.lo_flags & LO_RECURSABLE) != 0) { 435 m->mtx_recurse++; 436 atomic_set_ptr(&m->mtx_lock, MTX_RECURSED); 437 rval = 1; 438 } else 439 rval = _obtain_lock(m, (uintptr_t)curthread); 440 441 LOCK_LOG_TRY("LOCK", &m->mtx_object, opts, rval, file, line); 442 if (rval) 443 WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE | LOP_TRYLOCK, 444 file, line); 445 446 return (rval); 447} 448 449/* 450 * _mtx_lock_sleep: the tougher part of acquiring an MTX_DEF lock. 451 * 452 * We call this if the lock is either contested (i.e. we need to go to 453 * sleep waiting for it), or if we need to recurse on it. 454 */ 455void 456_mtx_lock_sleep(struct mtx *m, uintptr_t tid, int opts, const char *file, 457 int line) 458{ 459#if defined(SMP) && !defined(NO_ADAPTIVE_MUTEXES) 460 volatile struct thread *owner; 461#endif 462 uintptr_t v; 463#ifdef KTR 464 int cont_logged = 0; 465#endif 466#ifdef MUTEX_PROFILING 467 int contested; 468#endif 469 470 if (mtx_owned(m)) { 471 KASSERT((m->mtx_object.lo_flags & LO_RECURSABLE) != 0, 472 ("_mtx_lock_sleep: recursed on non-recursive mutex %s @ %s:%d\n", 473 m->mtx_object.lo_name, file, line)); 474 m->mtx_recurse++; 475 atomic_set_ptr(&m->mtx_lock, MTX_RECURSED); 476 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 477 CTR1(KTR_LOCK, "_mtx_lock_sleep: %p recursing", m); 478 return; 479 } 480 481 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 482 CTR4(KTR_LOCK, 483 "_mtx_lock_sleep: %s contested (lock=%p) at %s:%d", 484 m->mtx_object.lo_name, (void *)m->mtx_lock, file, line); 485 486#ifdef MUTEX_PROFILING 487 contested = 0; 488#endif 489 while (!_obtain_lock(m, tid)) { 490#ifdef MUTEX_PROFILING 491 contested = 1; 492 atomic_add_int(&m->mtx_contest_holding, 1); 493#endif 494 turnstile_lock(&m->mtx_object); 495 v = m->mtx_lock; 496 497 /* 498 * Check if the lock has been released while spinning for 499 * the turnstile chain lock. 500 */ 501 if (v == MTX_UNOWNED) { 502 turnstile_release(&m->mtx_object); 503 cpu_spinwait(); 504 continue; 505 } 506 507#ifdef MUTEX_WAKE_ALL 508 MPASS(v != MTX_CONTESTED); 509#else 510 /* 511 * The mutex was marked contested on release. This means that 512 * there are other threads blocked on it. Grab ownership of 513 * it and propagate its priority to the current thread if 514 * necessary. 515 */ 516 if (v == MTX_CONTESTED) { 517 m->mtx_lock = tid | MTX_CONTESTED; 518 turnstile_claim(&m->mtx_object); 519 break; 520 } 521#endif 522 523 /* 524 * If the mutex isn't already contested and a failure occurs 525 * setting the contested bit, the mutex was either released 526 * or the state of the MTX_RECURSED bit changed. 527 */ 528 if ((v & MTX_CONTESTED) == 0 && 529 !atomic_cmpset_ptr(&m->mtx_lock, v, v | MTX_CONTESTED)) { 530 turnstile_release(&m->mtx_object); 531 cpu_spinwait(); 532 continue; 533 } 534 535#if defined(SMP) && !defined(NO_ADAPTIVE_MUTEXES) 536 /* 537 * If the current owner of the lock is executing on another 538 * CPU, spin instead of blocking. 539 */ 540 owner = (struct thread *)(v & ~MTX_FLAGMASK); 541#ifdef ADAPTIVE_GIANT 542 if (TD_IS_RUNNING(owner)) { 543#else 544 if (m != &Giant && TD_IS_RUNNING(owner)) { 545#endif 546 turnstile_release(&m->mtx_object); 547 while (mtx_owner(m) == owner && TD_IS_RUNNING(owner)) { 548 cpu_spinwait(); 549 } 550 continue; 551 } 552#endif /* SMP && !NO_ADAPTIVE_MUTEXES */ 553 554 /* 555 * We definitely must sleep for this lock. 556 */ 557 mtx_assert(m, MA_NOTOWNED); 558 559#ifdef KTR 560 if (!cont_logged) { 561 CTR6(KTR_CONTENTION, 562 "contention: %p at %s:%d wants %s, taken by %s:%d", 563 (void *)tid, file, line, m->mtx_object.lo_name, 564 WITNESS_FILE(&m->mtx_object), 565 WITNESS_LINE(&m->mtx_object)); 566 cont_logged = 1; 567 } 568#endif 569 570 /* 571 * Block on the turnstile. 572 */ 573 turnstile_wait(&m->mtx_object, mtx_owner(m), 574 TS_EXCLUSIVE_QUEUE); 575 } 576 577#ifdef KTR 578 if (cont_logged) { 579 CTR4(KTR_CONTENTION, 580 "contention end: %s acquired by %p at %s:%d", 581 m->mtx_object.lo_name, (void *)tid, file, line); 582 } 583#endif 584#ifdef MUTEX_PROFILING 585 if (contested) 586 m->mtx_contest_locking++; 587 m->mtx_contest_holding = 0; 588#endif 589 return; 590} 591 592#ifdef SMP 593/* 594 * _mtx_lock_spin: the tougher part of acquiring an MTX_SPIN lock. 595 * 596 * This is only called if we need to actually spin for the lock. Recursion 597 * is handled inline. 598 */ 599void 600_mtx_lock_spin(struct mtx *m, uintptr_t tid, int opts, const char *file, 601 int line) 602{ 603 int i = 0; 604 605 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 606 CTR1(KTR_LOCK, "_mtx_lock_spin: %p spinning", m); 607 608 while (!_obtain_lock(m, tid)) { 609 610 /* Give interrupts a chance while we spin. */ 611 spinlock_exit(); 612 while (m->mtx_lock != MTX_UNOWNED) { 613 if (i++ < 10000000) { 614 cpu_spinwait(); 615 continue; 616 } 617 if (i < 60000000) 618 DELAY(1); 619 else if (!kdb_active && !panicstr) { 620 printf("spin lock %s held by %p for > 5 seconds\n", 621 m->mtx_object.lo_name, (void *)m->mtx_lock); 622#ifdef WITNESS 623 witness_display_spinlock(&m->mtx_object, 624 mtx_owner(m)); 625#endif 626 panic("spin lock held too long"); 627 } 628 cpu_spinwait(); 629 } 630 spinlock_enter(); 631 } 632 633 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 634 CTR1(KTR_LOCK, "_mtx_lock_spin: %p spin done", m); 635 636 return; 637} 638#endif /* SMP */ 639 640/* 641 * _mtx_unlock_sleep: the tougher part of releasing an MTX_DEF lock. 642 * 643 * We are only called here if the lock is recursed or contested (i.e. we 644 * need to wake up a blocked thread). 645 */ 646void 647_mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line) 648{ 649 struct turnstile *ts; 650#ifndef PREEMPTION 651 struct thread *td, *td1; 652#endif 653 654 if (mtx_recursed(m)) { 655 if (--(m->mtx_recurse) == 0) 656 atomic_clear_ptr(&m->mtx_lock, MTX_RECURSED); 657 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 658 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p unrecurse", m); 659 return; 660 } 661 662 turnstile_lock(&m->mtx_object); 663 ts = turnstile_lookup(&m->mtx_object); 664 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 665 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p contested", m); 666 667#if defined(SMP) && !defined(NO_ADAPTIVE_MUTEXES) 668 if (ts == NULL) { 669 _release_lock_quick(m); 670 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 671 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p no sleepers", m); 672 turnstile_release(&m->mtx_object); 673 return; 674 } 675#else 676 MPASS(ts != NULL); 677#endif 678#ifndef PREEMPTION 679 /* XXX */ 680 td1 = turnstile_head(ts, TS_EXCLUSIVE_QUEUE); 681#endif 682#ifdef MUTEX_WAKE_ALL 683 turnstile_broadcast(ts, TS_EXCLUSIVE_QUEUE); 684 _release_lock_quick(m); 685#else 686 if (turnstile_signal(ts, TS_EXCLUSIVE_QUEUE)) { 687 _release_lock_quick(m); 688 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 689 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p not held", m); 690 } else { 691 m->mtx_lock = MTX_CONTESTED; 692 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 693 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p still contested", 694 m); 695 } 696#endif 697 turnstile_unpend(ts, TS_EXCLUSIVE_LOCK); 698 699#ifndef PREEMPTION 700 /* 701 * XXX: This is just a hack until preemption is done. However, 702 * once preemption is done we need to either wrap the 703 * turnstile_signal() and release of the actual lock in an 704 * extra critical section or change the preemption code to 705 * always just set a flag and never do instant-preempts. 706 */ 707 td = curthread; 708 if (td->td_critnest > 0 || td1->td_priority >= td->td_priority) 709 return; 710 mtx_lock_spin(&sched_lock); 711 if (!TD_IS_RUNNING(td1)) { 712#ifdef notyet 713 if (td->td_ithd != NULL) { 714 struct ithd *it = td->td_ithd; 715 716 if (it->it_interrupted) { 717 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 718 CTR2(KTR_LOCK, 719 "_mtx_unlock_sleep: %p interrupted %p", 720 it, it->it_interrupted); 721 intr_thd_fixup(it); 722 } 723 } 724#endif 725 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 726 CTR2(KTR_LOCK, 727 "_mtx_unlock_sleep: %p switching out lock=%p", m, 728 (void *)m->mtx_lock); 729 730 mi_switch(SW_INVOL, NULL); 731 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 732 CTR2(KTR_LOCK, "_mtx_unlock_sleep: %p resuming lock=%p", 733 m, (void *)m->mtx_lock); 734 } 735 mtx_unlock_spin(&sched_lock); 736#endif 737 738 return; 739} 740 741/* 742 * All the unlocking of MTX_SPIN locks is done inline. 743 * See the _rel_spin_lock() macro for the details. 744 */ 745 746/* 747 * The backing function for the INVARIANTS-enabled mtx_assert() 748 */ 749#ifdef INVARIANT_SUPPORT 750void 751_mtx_assert(struct mtx *m, int what, const char *file, int line) 752{ 753 754 if (panicstr != NULL || dumping) 755 return; 756 switch (what) { 757 case MA_OWNED: 758 case MA_OWNED | MA_RECURSED: 759 case MA_OWNED | MA_NOTRECURSED: 760 if (!mtx_owned(m)) 761 panic("mutex %s not owned at %s:%d", 762 m->mtx_object.lo_name, file, line); 763 if (mtx_recursed(m)) { 764 if ((what & MA_NOTRECURSED) != 0) 765 panic("mutex %s recursed at %s:%d", 766 m->mtx_object.lo_name, file, line); 767 } else if ((what & MA_RECURSED) != 0) { 768 panic("mutex %s unrecursed at %s:%d", 769 m->mtx_object.lo_name, file, line); 770 } 771 break; 772 case MA_NOTOWNED: 773 if (mtx_owned(m)) 774 panic("mutex %s owned at %s:%d", 775 m->mtx_object.lo_name, file, line); 776 break; 777 default: 778 panic("unknown mtx_assert at %s:%d", file, line); 779 } 780} 781#endif 782 783/* 784 * The MUTEX_DEBUG-enabled mtx_validate() 785 * 786 * Most of these checks have been moved off into the LO_INITIALIZED flag 787 * maintained by the witness code. 788 */ 789#ifdef MUTEX_DEBUG 790 791void mtx_validate(struct mtx *); 792 793void 794mtx_validate(struct mtx *m) 795{ 796 797/* 798 * XXX: When kernacc() does not require Giant we can reenable this check 799 */ 800#ifdef notyet 801 /* 802 * Can't call kernacc() from early init386(), especially when 803 * initializing Giant mutex, because some stuff in kernacc() 804 * requires Giant itself. 805 */ 806 if (!cold) 807 if (!kernacc((caddr_t)m, sizeof(m), 808 VM_PROT_READ | VM_PROT_WRITE)) 809 panic("Can't read and write to mutex %p", m); 810#endif 811} 812#endif 813 814/* 815 * General init routine used by the MTX_SYSINIT() macro. 816 */ 817void 818mtx_sysinit(void *arg) 819{ 820 struct mtx_args *margs = arg; 821 822 mtx_init(margs->ma_mtx, margs->ma_desc, NULL, margs->ma_opts); 823} 824 825/* 826 * Mutex initialization routine; initialize lock `m' of type contained in 827 * `opts' with options contained in `opts' and name `name.' The optional 828 * lock type `type' is used as a general lock category name for use with 829 * witness. 830 */ 831void 832mtx_init(struct mtx *m, const char *name, const char *type, int opts) 833{ 834 struct lock_class *class; 835 int flags; 836 837 MPASS((opts & ~(MTX_SPIN | MTX_QUIET | MTX_RECURSE | 838 MTX_NOWITNESS | MTX_DUPOK)) == 0); 839 840#ifdef MUTEX_DEBUG 841 /* Diagnostic and error correction */ 842 mtx_validate(m); 843#endif 844 845 /* Determine lock class and lock flags. */ 846 if (opts & MTX_SPIN) 847 class = &lock_class_mtx_spin; 848 else 849 class = &lock_class_mtx_sleep; 850 flags = 0; 851 if (opts & MTX_QUIET) 852 flags |= LO_QUIET; 853 if (opts & MTX_RECURSE) 854 flags |= LO_RECURSABLE; 855 if ((opts & MTX_NOWITNESS) == 0) 856 flags |= LO_WITNESS; 857 if (opts & MTX_DUPOK) 858 flags |= LO_DUPOK; 859 860 /* Initialize mutex. */ 861 m->mtx_lock = MTX_UNOWNED; 862 m->mtx_recurse = 0; 863#ifdef MUTEX_PROFILING 864 m->mtx_acqtime = 0; 865 m->mtx_filename = NULL; 866 m->mtx_lineno = 0; 867 m->mtx_contest_holding = 0; 868 m->mtx_contest_locking = 0; 869#endif 870 871 lock_init(&m->mtx_object, class, name, type, flags); 872} 873 874/* 875 * Remove lock `m' from all_mtx queue. We don't allow MTX_QUIET to be 876 * passed in as a flag here because if the corresponding mtx_init() was 877 * called with MTX_QUIET set, then it will already be set in the mutex's 878 * flags. 879 */ 880void 881mtx_destroy(struct mtx *m) 882{ 883 884 if (!mtx_owned(m)) 885 MPASS(mtx_unowned(m)); 886 else { 887 MPASS((m->mtx_lock & (MTX_RECURSED|MTX_CONTESTED)) == 0); 888 889 /* Perform the non-mtx related part of mtx_unlock_spin(). */ 890 if (LOCK_CLASS(&m->mtx_object) == &lock_class_mtx_spin) 891 spinlock_exit(); 892 893 /* Tell witness this isn't locked to make it happy. */ 894 WITNESS_UNLOCK(&m->mtx_object, LOP_EXCLUSIVE, __FILE__, 895 __LINE__); 896 } 897 898 m->mtx_lock = MTX_DESTROYED; 899 lock_destroy(&m->mtx_object); 900} 901 902/* 903 * Intialize the mutex code and system mutexes. This is called from the MD 904 * startup code prior to mi_startup(). The per-CPU data space needs to be 905 * setup before this is called. 906 */ 907void 908mutex_init(void) 909{ 910 911 /* Setup turnstiles so that sleep mutexes work. */ 912 init_turnstiles(); 913 914 /* 915 * Initialize mutexes. 916 */ 917 mtx_init(&Giant, "Giant", NULL, MTX_DEF | MTX_RECURSE); 918 mtx_init(&sched_lock, "sched lock", NULL, MTX_SPIN | MTX_RECURSE); 919 mtx_init(&proc0.p_mtx, "process lock", NULL, MTX_DEF | MTX_DUPOK); 920 mtx_init(&devmtx, "cdev", NULL, MTX_DEF); 921 mtx_lock(&Giant); 922} 923 924#ifdef DDB 925void 926db_show_mtx(struct lock_object *lock) 927{ 928 struct thread *td; 929 struct mtx *m; 930 931 m = (struct mtx *)lock; 932 933 db_printf(" flags: {"); 934 if (LOCK_CLASS(lock) == &lock_class_mtx_spin) 935 db_printf("SPIN"); 936 else 937 db_printf("DEF"); 938 if (m->mtx_object.lo_flags & LO_RECURSABLE) 939 db_printf(", RECURSE"); 940 if (m->mtx_object.lo_flags & LO_DUPOK) 941 db_printf(", DUPOK"); 942 db_printf("}\n"); 943 db_printf(" state: {"); 944 if (mtx_unowned(m)) 945 db_printf("UNOWNED"); 946 else { 947 db_printf("OWNED"); 948 if (m->mtx_lock & MTX_CONTESTED) 949 db_printf(", CONTESTED"); 950 if (m->mtx_lock & MTX_RECURSED) 951 db_printf(", RECURSED"); 952 } 953 db_printf("}\n"); 954 if (!mtx_unowned(m)) { 955 td = mtx_owner(m); 956 db_printf(" owner: %p (tid %d, pid %d, \"%s\")\n", td, 957 td->td_tid, td->td_proc->p_pid, td->td_proc->p_comm); 958 if (mtx_recursed(m)) 959 db_printf(" recursed: %d\n", m->mtx_recurse); 960 } 961} 962#endif 963