kern_mutex.c revision 133998
1/*- 2 * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 3. Berkeley Software Design Inc's name may not be used to endorse or 13 * promote products derived from this software without specific prior 14 * written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 * from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $ 29 * and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $ 30 */ 31 32/* 33 * Machine independent bits of mutex implementation. 34 */ 35 36#include <sys/cdefs.h> 37__FBSDID("$FreeBSD: head/sys/kern/kern_mutex.c 133998 2004-08-19 06:38:26Z jmg $"); 38 39#include "opt_adaptive_mutexes.h" 40#include "opt_ddb.h" 41#include "opt_mprof.h" 42#include "opt_mutex_wake_all.h" 43 44#include <sys/param.h> 45#include <sys/systm.h> 46#include <sys/bus.h> 47#include <sys/kdb.h> 48#include <sys/kernel.h> 49#include <sys/ktr.h> 50#include <sys/lock.h> 51#include <sys/malloc.h> 52#include <sys/mutex.h> 53#include <sys/proc.h> 54#include <sys/resourcevar.h> 55#include <sys/sched.h> 56#include <sys/sbuf.h> 57#include <sys/sysctl.h> 58#include <sys/turnstile.h> 59#include <sys/vmmeter.h> 60 61#include <machine/atomic.h> 62#include <machine/bus.h> 63#include <machine/clock.h> 64#include <machine/cpu.h> 65 66#include <ddb/ddb.h> 67 68#include <vm/vm.h> 69#include <vm/vm_extern.h> 70 71/* 72 * Internal utility macros. 73 */ 74#define mtx_unowned(m) ((m)->mtx_lock == MTX_UNOWNED) 75 76#define mtx_owner(m) (mtx_unowned((m)) ? NULL \ 77 : (struct thread *)((m)->mtx_lock & MTX_FLAGMASK)) 78 79/* 80 * Lock classes for sleep and spin mutexes. 81 */ 82struct lock_class lock_class_mtx_sleep = { 83 "sleep mutex", 84 LC_SLEEPLOCK | LC_RECURSABLE 85}; 86struct lock_class lock_class_mtx_spin = { 87 "spin mutex", 88 LC_SPINLOCK | LC_RECURSABLE 89}; 90 91/* 92 * System-wide mutexes 93 */ 94struct mtx sched_lock; 95struct mtx Giant; 96 97#ifdef MUTEX_PROFILING 98SYSCTL_NODE(_debug, OID_AUTO, mutex, CTLFLAG_RD, NULL, "mutex debugging"); 99SYSCTL_NODE(_debug_mutex, OID_AUTO, prof, CTLFLAG_RD, NULL, "mutex profiling"); 100static int mutex_prof_enable = 0; 101SYSCTL_INT(_debug_mutex_prof, OID_AUTO, enable, CTLFLAG_RW, 102 &mutex_prof_enable, 0, "Enable tracing of mutex holdtime"); 103 104struct mutex_prof { 105 const char *name; 106 const char *file; 107 int line; 108 uintmax_t cnt_max; 109 uintmax_t cnt_tot; 110 uintmax_t cnt_cur; 111 uintmax_t cnt_contest_holding; 112 uintmax_t cnt_contest_locking; 113 struct mutex_prof *next; 114}; 115 116/* 117 * mprof_buf is a static pool of profiling records to avoid possible 118 * reentrance of the memory allocation functions. 119 * 120 * Note: NUM_MPROF_BUFFERS must be smaller than MPROF_HASH_SIZE. 121 */ 122#ifdef MPROF_BUFFERS 123#define NUM_MPROF_BUFFERS MPROF_BUFFERS 124#else 125#define NUM_MPROF_BUFFERS 1000 126#endif 127static struct mutex_prof mprof_buf[NUM_MPROF_BUFFERS]; 128static int first_free_mprof_buf; 129#ifndef MPROF_HASH_SIZE 130#define MPROF_HASH_SIZE 1009 131#endif 132#if NUM_MPROF_BUFFERS >= MPROF_HASH_SIZE 133#error MPROF_BUFFERS must be larger than MPROF_HASH_SIZE 134#endif 135static struct mutex_prof *mprof_hash[MPROF_HASH_SIZE]; 136/* SWAG: sbuf size = avg stat. line size * number of locks */ 137#define MPROF_SBUF_SIZE 256 * 400 138 139static int mutex_prof_acquisitions; 140SYSCTL_INT(_debug_mutex_prof, OID_AUTO, acquisitions, CTLFLAG_RD, 141 &mutex_prof_acquisitions, 0, "Number of mutex acquistions recorded"); 142static int mutex_prof_records; 143SYSCTL_INT(_debug_mutex_prof, OID_AUTO, records, CTLFLAG_RD, 144 &mutex_prof_records, 0, "Number of profiling records"); 145static int mutex_prof_maxrecords = NUM_MPROF_BUFFERS; 146SYSCTL_INT(_debug_mutex_prof, OID_AUTO, maxrecords, CTLFLAG_RD, 147 &mutex_prof_maxrecords, 0, "Maximum number of profiling records"); 148static int mutex_prof_rejected; 149SYSCTL_INT(_debug_mutex_prof, OID_AUTO, rejected, CTLFLAG_RD, 150 &mutex_prof_rejected, 0, "Number of rejected profiling records"); 151static int mutex_prof_hashsize = MPROF_HASH_SIZE; 152SYSCTL_INT(_debug_mutex_prof, OID_AUTO, hashsize, CTLFLAG_RD, 153 &mutex_prof_hashsize, 0, "Hash size"); 154static int mutex_prof_collisions = 0; 155SYSCTL_INT(_debug_mutex_prof, OID_AUTO, collisions, CTLFLAG_RD, 156 &mutex_prof_collisions, 0, "Number of hash collisions"); 157 158/* 159 * mprof_mtx protects the profiling buffers and the hash. 160 */ 161static struct mtx mprof_mtx; 162MTX_SYSINIT(mprof, &mprof_mtx, "mutex profiling lock", MTX_SPIN | MTX_QUIET); 163 164static u_int64_t 165nanoseconds(void) 166{ 167 struct timespec tv; 168 169 nanotime(&tv); 170 return (tv.tv_sec * (u_int64_t)1000000000 + tv.tv_nsec); 171} 172 173static int 174dump_mutex_prof_stats(SYSCTL_HANDLER_ARGS) 175{ 176 struct sbuf *sb; 177 int error, i; 178 static int multiplier = 1; 179 180 if (first_free_mprof_buf == 0) 181 return (SYSCTL_OUT(req, "No locking recorded", 182 sizeof("No locking recorded"))); 183 184retry_sbufops: 185 sb = sbuf_new(NULL, NULL, MPROF_SBUF_SIZE * multiplier, SBUF_FIXEDLEN); 186 sbuf_printf(sb, "%6s %12s %11s %5s %12s %12s %s\n", 187 "max", "total", "count", "avg", "cnt_hold", "cnt_lock", "name"); 188 /* 189 * XXX this spinlock seems to be by far the largest perpetrator 190 * of spinlock latency (1.6 msec on an Athlon1600 was recorded 191 * even before I pessimized it further by moving the average 192 * computation here). 193 */ 194 mtx_lock_spin(&mprof_mtx); 195 for (i = 0; i < first_free_mprof_buf; ++i) { 196 sbuf_printf(sb, "%6ju %12ju %11ju %5ju %12ju %12ju %s:%d (%s)\n", 197 mprof_buf[i].cnt_max / 1000, 198 mprof_buf[i].cnt_tot / 1000, 199 mprof_buf[i].cnt_cur, 200 mprof_buf[i].cnt_cur == 0 ? (uintmax_t)0 : 201 mprof_buf[i].cnt_tot / (mprof_buf[i].cnt_cur * 1000), 202 mprof_buf[i].cnt_contest_holding, 203 mprof_buf[i].cnt_contest_locking, 204 mprof_buf[i].file, mprof_buf[i].line, mprof_buf[i].name); 205 if (sbuf_overflowed(sb)) { 206 mtx_unlock_spin(&mprof_mtx); 207 sbuf_delete(sb); 208 multiplier++; 209 goto retry_sbufops; 210 } 211 } 212 mtx_unlock_spin(&mprof_mtx); 213 sbuf_finish(sb); 214 error = SYSCTL_OUT(req, sbuf_data(sb), sbuf_len(sb) + 1); 215 sbuf_delete(sb); 216 return (error); 217} 218SYSCTL_PROC(_debug_mutex_prof, OID_AUTO, stats, CTLTYPE_STRING | CTLFLAG_RD, 219 NULL, 0, dump_mutex_prof_stats, "A", "Mutex profiling statistics"); 220 221static int 222reset_mutex_prof_stats(SYSCTL_HANDLER_ARGS) 223{ 224 int error, v; 225 226 if (first_free_mprof_buf == 0) 227 return (0); 228 229 v = 0; 230 error = sysctl_handle_int(oidp, &v, 0, req); 231 if (error) 232 return (error); 233 if (req->newptr == NULL) 234 return (error); 235 if (v == 0) 236 return (0); 237 238 mtx_lock_spin(&mprof_mtx); 239 bzero(mprof_buf, sizeof(*mprof_buf) * first_free_mprof_buf); 240 bzero(mprof_hash, sizeof(struct mtx *) * MPROF_HASH_SIZE); 241 first_free_mprof_buf = 0; 242 mtx_unlock_spin(&mprof_mtx); 243 return (0); 244} 245SYSCTL_PROC(_debug_mutex_prof, OID_AUTO, reset, CTLTYPE_INT | CTLFLAG_RW, 246 NULL, 0, reset_mutex_prof_stats, "I", "Reset mutex profiling statistics"); 247#endif 248 249/* 250 * Function versions of the inlined __mtx_* macros. These are used by 251 * modules and can also be called from assembly language if needed. 252 */ 253void 254_mtx_lock_flags(struct mtx *m, int opts, const char *file, int line) 255{ 256 257 MPASS(curthread != NULL); 258 KASSERT(m->mtx_object.lo_class == &lock_class_mtx_sleep, 259 ("mtx_lock() of spin mutex %s @ %s:%d", m->mtx_object.lo_name, 260 file, line)); 261 WITNESS_CHECKORDER(&m->mtx_object, opts | LOP_NEWORDER | LOP_EXCLUSIVE, 262 file, line); 263 _get_sleep_lock(m, curthread, opts, file, line); 264 LOCK_LOG_LOCK("LOCK", &m->mtx_object, opts, m->mtx_recurse, file, 265 line); 266 WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line); 267#ifdef MUTEX_PROFILING 268 /* don't reset the timer when/if recursing */ 269 if (m->mtx_acqtime == 0) { 270 m->mtx_filename = file; 271 m->mtx_lineno = line; 272 m->mtx_acqtime = mutex_prof_enable ? nanoseconds() : 0; 273 ++mutex_prof_acquisitions; 274 } 275#endif 276} 277 278void 279_mtx_unlock_flags(struct mtx *m, int opts, const char *file, int line) 280{ 281 282 MPASS(curthread != NULL); 283 KASSERT(m->mtx_object.lo_class == &lock_class_mtx_sleep, 284 ("mtx_unlock() of spin mutex %s @ %s:%d", m->mtx_object.lo_name, 285 file, line)); 286 WITNESS_UNLOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line); 287 LOCK_LOG_LOCK("UNLOCK", &m->mtx_object, opts, m->mtx_recurse, file, 288 line); 289 mtx_assert(m, MA_OWNED); 290#ifdef MUTEX_PROFILING 291 if (m->mtx_acqtime != 0) { 292 static const char *unknown = "(unknown)"; 293 struct mutex_prof *mpp; 294 u_int64_t acqtime, now; 295 const char *p, *q; 296 volatile u_int hash; 297 298 now = nanoseconds(); 299 acqtime = m->mtx_acqtime; 300 m->mtx_acqtime = 0; 301 if (now <= acqtime) 302 goto out; 303 for (p = m->mtx_filename; 304 p != NULL && strncmp(p, "../", 3) == 0; p += 3) 305 /* nothing */ ; 306 if (p == NULL || *p == '\0') 307 p = unknown; 308 for (hash = m->mtx_lineno, q = p; *q != '\0'; ++q) 309 hash = (hash * 2 + *q) % MPROF_HASH_SIZE; 310 mtx_lock_spin(&mprof_mtx); 311 for (mpp = mprof_hash[hash]; mpp != NULL; mpp = mpp->next) 312 if (mpp->line == m->mtx_lineno && 313 strcmp(mpp->file, p) == 0) 314 break; 315 if (mpp == NULL) { 316 /* Just exit if we cannot get a trace buffer */ 317 if (first_free_mprof_buf >= NUM_MPROF_BUFFERS) { 318 ++mutex_prof_rejected; 319 goto unlock; 320 } 321 mpp = &mprof_buf[first_free_mprof_buf++]; 322 mpp->name = mtx_name(m); 323 mpp->file = p; 324 mpp->line = m->mtx_lineno; 325 mpp->next = mprof_hash[hash]; 326 if (mprof_hash[hash] != NULL) 327 ++mutex_prof_collisions; 328 mprof_hash[hash] = mpp; 329 ++mutex_prof_records; 330 } 331 /* 332 * Record if the mutex has been held longer now than ever 333 * before. 334 */ 335 if (now - acqtime > mpp->cnt_max) 336 mpp->cnt_max = now - acqtime; 337 mpp->cnt_tot += now - acqtime; 338 mpp->cnt_cur++; 339 /* 340 * There's a small race, really we should cmpxchg 341 * 0 with the current value, but that would bill 342 * the contention to the wrong lock instance if 343 * it followed this also. 344 */ 345 mpp->cnt_contest_holding += m->mtx_contest_holding; 346 m->mtx_contest_holding = 0; 347 mpp->cnt_contest_locking += m->mtx_contest_locking; 348 m->mtx_contest_locking = 0; 349unlock: 350 mtx_unlock_spin(&mprof_mtx); 351 } 352out: 353#endif 354 _rel_sleep_lock(m, curthread, opts, file, line); 355} 356 357void 358_mtx_lock_spin_flags(struct mtx *m, int opts, const char *file, int line) 359{ 360 361 MPASS(curthread != NULL); 362 KASSERT(m->mtx_object.lo_class == &lock_class_mtx_spin, 363 ("mtx_lock_spin() of sleep mutex %s @ %s:%d", 364 m->mtx_object.lo_name, file, line)); 365 WITNESS_CHECKORDER(&m->mtx_object, opts | LOP_NEWORDER | LOP_EXCLUSIVE, 366 file, line); 367#if defined(SMP) || LOCK_DEBUG > 0 || 1 368 _get_spin_lock(m, curthread, opts, file, line); 369#else 370 critical_enter(); 371#endif 372 LOCK_LOG_LOCK("LOCK", &m->mtx_object, opts, m->mtx_recurse, file, 373 line); 374 WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line); 375} 376 377void 378_mtx_unlock_spin_flags(struct mtx *m, int opts, const char *file, int line) 379{ 380 381 MPASS(curthread != NULL); 382 KASSERT(m->mtx_object.lo_class == &lock_class_mtx_spin, 383 ("mtx_unlock_spin() of sleep mutex %s @ %s:%d", 384 m->mtx_object.lo_name, file, line)); 385 WITNESS_UNLOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line); 386 LOCK_LOG_LOCK("UNLOCK", &m->mtx_object, opts, m->mtx_recurse, file, 387 line); 388 mtx_assert(m, MA_OWNED); 389#if defined(SMP) || LOCK_DEBUG > 0 || 1 390 _rel_spin_lock(m); 391#else 392 critical_exit(); 393#endif 394} 395 396/* 397 * The important part of mtx_trylock{,_flags}() 398 * Tries to acquire lock `m.' If this function is called on a mutex that 399 * is already owned, it will recursively acquire the lock. 400 */ 401int 402_mtx_trylock(struct mtx *m, int opts, const char *file, int line) 403{ 404 int rval; 405 406 MPASS(curthread != NULL); 407 408 if (mtx_owned(m) && (m->mtx_object.lo_flags & LO_RECURSABLE) != 0) { 409 m->mtx_recurse++; 410 atomic_set_ptr(&m->mtx_lock, MTX_RECURSED); 411 rval = 1; 412 } else 413 rval = _obtain_lock(m, curthread); 414 415 LOCK_LOG_TRY("LOCK", &m->mtx_object, opts, rval, file, line); 416 if (rval) 417 WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE | LOP_TRYLOCK, 418 file, line); 419 420 return (rval); 421} 422 423/* 424 * _mtx_lock_sleep: the tougher part of acquiring an MTX_DEF lock. 425 * 426 * We call this if the lock is either contested (i.e. we need to go to 427 * sleep waiting for it), or if we need to recurse on it. 428 */ 429void 430_mtx_lock_sleep(struct mtx *m, struct thread *td, int opts, const char *file, 431 int line) 432{ 433 struct turnstile *ts; 434#if defined(SMP) && !defined(NO_ADAPTIVE_MUTEXES) 435 struct thread *owner; 436#endif 437 uintptr_t v; 438#ifdef KTR 439 int cont_logged = 0; 440#endif 441#ifdef MUTEX_PROFILING 442 int contested; 443#endif 444 445 if (mtx_owned(m)) { 446 KASSERT((m->mtx_object.lo_flags & LO_RECURSABLE) != 0, 447 ("_mtx_lock_sleep: recursed on non-recursive mutex %s @ %s:%d\n", 448 m->mtx_object.lo_name, file, line)); 449 m->mtx_recurse++; 450 atomic_set_ptr(&m->mtx_lock, MTX_RECURSED); 451 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 452 CTR1(KTR_LOCK, "_mtx_lock_sleep: %p recursing", m); 453 return; 454 } 455 456 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 457 CTR4(KTR_LOCK, 458 "_mtx_lock_sleep: %s contested (lock=%p) at %s:%d", 459 m->mtx_object.lo_name, (void *)m->mtx_lock, file, line); 460 461#ifdef MUTEX_PROFILING 462 contested = 0; 463#endif 464 while (!_obtain_lock(m, td)) { 465#ifdef MUTEX_PROFILING 466 contested = 1; 467 atomic_add_int(&m->mtx_contest_holding, 1); 468#endif 469 ts = turnstile_lookup(&m->mtx_object); 470 v = m->mtx_lock; 471 472 /* 473 * Check if the lock has been released while spinning for 474 * the turnstile chain lock. 475 */ 476 if (v == MTX_UNOWNED) { 477 turnstile_release(&m->mtx_object); 478 cpu_spinwait(); 479 continue; 480 } 481 482#ifdef MUTEX_WAKE_ALL 483 MPASS(v != MTX_CONTESTED); 484#else 485 /* 486 * The mutex was marked contested on release. This means that 487 * there are other threads blocked on it. Grab ownership of 488 * it and propagate its priority to the current thread if 489 * necessary. 490 */ 491 if (v == MTX_CONTESTED) { 492 MPASS(ts != NULL); 493 m->mtx_lock = (uintptr_t)td | MTX_CONTESTED; 494 turnstile_claim(ts); 495 break; 496 } 497#endif 498 499 /* 500 * If the mutex isn't already contested and a failure occurs 501 * setting the contested bit, the mutex was either released 502 * or the state of the MTX_RECURSED bit changed. 503 */ 504 if ((v & MTX_CONTESTED) == 0 && 505 !atomic_cmpset_ptr(&m->mtx_lock, (void *)v, 506 (void *)(v | MTX_CONTESTED))) { 507 turnstile_release(&m->mtx_object); 508 cpu_spinwait(); 509 continue; 510 } 511 512#if defined(SMP) && !defined(NO_ADAPTIVE_MUTEXES) 513 /* 514 * If the current owner of the lock is executing on another 515 * CPU, spin instead of blocking. 516 */ 517 owner = (struct thread *)(v & MTX_FLAGMASK); 518#ifdef ADAPTIVE_GIANT 519 if (TD_IS_RUNNING(owner)) { 520#else 521 if (m != &Giant && TD_IS_RUNNING(owner)) { 522#endif 523 turnstile_release(&m->mtx_object); 524 while (mtx_owner(m) == owner && TD_IS_RUNNING(owner)) { 525 cpu_spinwait(); 526 } 527 continue; 528 } 529#endif /* SMP && !NO_ADAPTIVE_MUTEXES */ 530 531 /* 532 * We definitely must sleep for this lock. 533 */ 534 mtx_assert(m, MA_NOTOWNED); 535 536#ifdef KTR 537 if (!cont_logged) { 538 CTR6(KTR_CONTENTION, 539 "contention: %p at %s:%d wants %s, taken by %s:%d", 540 td, file, line, m->mtx_object.lo_name, 541 WITNESS_FILE(&m->mtx_object), 542 WITNESS_LINE(&m->mtx_object)); 543 cont_logged = 1; 544 } 545#endif 546 547 /* 548 * Block on the turnstile. 549 */ 550 turnstile_wait(ts, &m->mtx_object, mtx_owner(m)); 551 } 552 553#ifdef KTR 554 if (cont_logged) { 555 CTR4(KTR_CONTENTION, 556 "contention end: %s acquired by %p at %s:%d", 557 m->mtx_object.lo_name, td, file, line); 558 } 559#endif 560#ifdef MUTEX_PROFILING 561 if (contested) 562 m->mtx_contest_locking++; 563 m->mtx_contest_holding = 0; 564#endif 565 return; 566} 567 568/* 569 * _mtx_lock_spin: the tougher part of acquiring an MTX_SPIN lock. 570 * 571 * This is only called if we need to actually spin for the lock. Recursion 572 * is handled inline. 573 */ 574void 575_mtx_lock_spin(struct mtx *m, struct thread *td, int opts, const char *file, 576 int line) 577{ 578 int i = 0; 579 580 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 581 CTR1(KTR_LOCK, "_mtx_lock_spin: %p spinning", m); 582 583 for (;;) { 584 if (_obtain_lock(m, td)) 585 break; 586 587 /* Give interrupts a chance while we spin. */ 588 critical_exit(); 589 while (m->mtx_lock != MTX_UNOWNED) { 590 if (i++ < 10000000) { 591 cpu_spinwait(); 592 continue; 593 } 594 if (i < 60000000) 595 DELAY(1); 596 else if (!kdb_active) { 597 printf("spin lock %s held by %p for > 5 seconds\n", 598 m->mtx_object.lo_name, (void *)m->mtx_lock); 599#ifdef WITNESS 600 witness_display_spinlock(&m->mtx_object, 601 mtx_owner(m)); 602#endif 603 panic("spin lock held too long"); 604 } 605 cpu_spinwait(); 606 } 607 critical_enter(); 608 } 609 610 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 611 CTR1(KTR_LOCK, "_mtx_lock_spin: %p spin done", m); 612 613 return; 614} 615 616/* 617 * _mtx_unlock_sleep: the tougher part of releasing an MTX_DEF lock. 618 * 619 * We are only called here if the lock is recursed or contested (i.e. we 620 * need to wake up a blocked thread). 621 */ 622void 623_mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line) 624{ 625 struct turnstile *ts; 626#ifndef PREEMPTION 627 struct thread *td, *td1; 628#endif 629 630 if (mtx_recursed(m)) { 631 if (--(m->mtx_recurse) == 0) 632 atomic_clear_ptr(&m->mtx_lock, MTX_RECURSED); 633 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 634 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p unrecurse", m); 635 return; 636 } 637 638 ts = turnstile_lookup(&m->mtx_object); 639 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 640 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p contested", m); 641 642#if defined(SMP) && !defined(NO_ADAPTIVE_MUTEXES) 643 if (ts == NULL) { 644 _release_lock_quick(m); 645 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 646 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p no sleepers", m); 647 turnstile_release(&m->mtx_object); 648 return; 649 } 650#else 651 MPASS(ts != NULL); 652#endif 653#ifndef PREEMPTION 654 /* XXX */ 655 td1 = turnstile_head(ts); 656#endif 657#ifdef MUTEX_WAKE_ALL 658 turnstile_broadcast(ts); 659 _release_lock_quick(m); 660#else 661 if (turnstile_signal(ts)) { 662 _release_lock_quick(m); 663 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 664 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p not held", m); 665 } else { 666 m->mtx_lock = MTX_CONTESTED; 667 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 668 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p still contested", 669 m); 670 } 671#endif 672 turnstile_unpend(ts); 673 674#ifndef PREEMPTION 675 /* 676 * XXX: This is just a hack until preemption is done. However, 677 * once preemption is done we need to either wrap the 678 * turnstile_signal() and release of the actual lock in an 679 * extra critical section or change the preemption code to 680 * always just set a flag and never do instant-preempts. 681 */ 682 td = curthread; 683 if (td->td_critnest > 0 || td1->td_priority >= td->td_priority) 684 return; 685 mtx_lock_spin(&sched_lock); 686 if (!TD_IS_RUNNING(td1)) { 687#ifdef notyet 688 if (td->td_ithd != NULL) { 689 struct ithd *it = td->td_ithd; 690 691 if (it->it_interrupted) { 692 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 693 CTR2(KTR_LOCK, 694 "_mtx_unlock_sleep: %p interrupted %p", 695 it, it->it_interrupted); 696 intr_thd_fixup(it); 697 } 698 } 699#endif 700 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 701 CTR2(KTR_LOCK, 702 "_mtx_unlock_sleep: %p switching out lock=%p", m, 703 (void *)m->mtx_lock); 704 705 mi_switch(SW_INVOL, NULL); 706 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 707 CTR2(KTR_LOCK, "_mtx_unlock_sleep: %p resuming lock=%p", 708 m, (void *)m->mtx_lock); 709 } 710 mtx_unlock_spin(&sched_lock); 711#endif 712 713 return; 714} 715 716/* 717 * All the unlocking of MTX_SPIN locks is done inline. 718 * See the _rel_spin_lock() macro for the details. 719 */ 720 721/* 722 * The backing function for the INVARIANTS-enabled mtx_assert() 723 */ 724#ifdef INVARIANT_SUPPORT 725void 726_mtx_assert(struct mtx *m, int what, const char *file, int line) 727{ 728 729 if (panicstr != NULL) 730 return; 731 switch (what) { 732 case MA_OWNED: 733 case MA_OWNED | MA_RECURSED: 734 case MA_OWNED | MA_NOTRECURSED: 735 if (!mtx_owned(m)) 736 panic("mutex %s not owned at %s:%d", 737 m->mtx_object.lo_name, file, line); 738 if (mtx_recursed(m)) { 739 if ((what & MA_NOTRECURSED) != 0) 740 panic("mutex %s recursed at %s:%d", 741 m->mtx_object.lo_name, file, line); 742 } else if ((what & MA_RECURSED) != 0) { 743 panic("mutex %s unrecursed at %s:%d", 744 m->mtx_object.lo_name, file, line); 745 } 746 break; 747 case MA_NOTOWNED: 748 if (mtx_owned(m)) 749 panic("mutex %s owned at %s:%d", 750 m->mtx_object.lo_name, file, line); 751 break; 752 default: 753 panic("unknown mtx_assert at %s:%d", file, line); 754 } 755} 756#endif 757 758/* 759 * The MUTEX_DEBUG-enabled mtx_validate() 760 * 761 * Most of these checks have been moved off into the LO_INITIALIZED flag 762 * maintained by the witness code. 763 */ 764#ifdef MUTEX_DEBUG 765 766void mtx_validate(struct mtx *); 767 768void 769mtx_validate(struct mtx *m) 770{ 771 772/* 773 * XXX: When kernacc() does not require Giant we can reenable this check 774 */ 775#ifdef notyet 776/* 777 * XXX - When kernacc() is fixed on the alpha to handle K0_SEG memory properly 778 * we can re-enable the kernacc() checks. 779 */ 780#ifndef __alpha__ 781 /* 782 * Can't call kernacc() from early init386(), especially when 783 * initializing Giant mutex, because some stuff in kernacc() 784 * requires Giant itself. 785 */ 786 if (!cold) 787 if (!kernacc((caddr_t)m, sizeof(m), 788 VM_PROT_READ | VM_PROT_WRITE)) 789 panic("Can't read and write to mutex %p", m); 790#endif 791#endif 792} 793#endif 794 795/* 796 * General init routine used by the MTX_SYSINIT() macro. 797 */ 798void 799mtx_sysinit(void *arg) 800{ 801 struct mtx_args *margs = arg; 802 803 mtx_init(margs->ma_mtx, margs->ma_desc, NULL, margs->ma_opts); 804} 805 806/* 807 * Mutex initialization routine; initialize lock `m' of type contained in 808 * `opts' with options contained in `opts' and name `name.' The optional 809 * lock type `type' is used as a general lock category name for use with 810 * witness. 811 */ 812void 813mtx_init(struct mtx *m, const char *name, const char *type, int opts) 814{ 815 struct lock_object *lock; 816 817 MPASS((opts & ~(MTX_SPIN | MTX_QUIET | MTX_RECURSE | 818 MTX_NOWITNESS | MTX_DUPOK)) == 0); 819 820#ifdef MUTEX_DEBUG 821 /* Diagnostic and error correction */ 822 mtx_validate(m); 823#endif 824 825 lock = &m->mtx_object; 826 KASSERT((lock->lo_flags & LO_INITIALIZED) == 0, 827 ("mutex \"%s\" %p already initialized", name, m)); 828 bzero(m, sizeof(*m)); 829 if (opts & MTX_SPIN) 830 lock->lo_class = &lock_class_mtx_spin; 831 else 832 lock->lo_class = &lock_class_mtx_sleep; 833 lock->lo_name = name; 834 lock->lo_type = type != NULL ? type : name; 835 if (opts & MTX_QUIET) 836 lock->lo_flags = LO_QUIET; 837 if (opts & MTX_RECURSE) 838 lock->lo_flags |= LO_RECURSABLE; 839 if ((opts & MTX_NOWITNESS) == 0) 840 lock->lo_flags |= LO_WITNESS; 841 if (opts & MTX_DUPOK) 842 lock->lo_flags |= LO_DUPOK; 843 844 m->mtx_lock = MTX_UNOWNED; 845 846 LOCK_LOG_INIT(lock, opts); 847 848 WITNESS_INIT(lock); 849} 850 851/* 852 * Remove lock `m' from all_mtx queue. We don't allow MTX_QUIET to be 853 * passed in as a flag here because if the corresponding mtx_init() was 854 * called with MTX_QUIET set, then it will already be set in the mutex's 855 * flags. 856 */ 857void 858mtx_destroy(struct mtx *m) 859{ 860 861 LOCK_LOG_DESTROY(&m->mtx_object, 0); 862 863 if (!mtx_owned(m)) 864 MPASS(mtx_unowned(m)); 865 else { 866 MPASS((m->mtx_lock & (MTX_RECURSED|MTX_CONTESTED)) == 0); 867 868 /* Tell witness this isn't locked to make it happy. */ 869 WITNESS_UNLOCK(&m->mtx_object, LOP_EXCLUSIVE, __FILE__, 870 __LINE__); 871 } 872 873 WITNESS_DESTROY(&m->mtx_object); 874} 875 876/* 877 * Intialize the mutex code and system mutexes. This is called from the MD 878 * startup code prior to mi_startup(). The per-CPU data space needs to be 879 * setup before this is called. 880 */ 881void 882mutex_init(void) 883{ 884 885 /* Setup thread0 so that mutexes work. */ 886 LIST_INIT(&thread0.td_contested); 887 888 /* Setup turnstiles so that sleep mutexes work. */ 889 init_turnstiles(); 890 891 /* 892 * Initialize mutexes. 893 */ 894 mtx_init(&Giant, "Giant", NULL, MTX_DEF | MTX_RECURSE); 895 mtx_init(&sched_lock, "sched lock", NULL, MTX_SPIN | MTX_RECURSE); 896 mtx_init(&proc0.p_mtx, "process lock", NULL, MTX_DEF | MTX_DUPOK); 897 mtx_lock(&Giant); 898} 899