kern_mutex.c revision 134649
1/*- 2 * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 3. Berkeley Software Design Inc's name may not be used to endorse or 13 * promote products derived from this software without specific prior 14 * written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 * from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $ 29 * and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $ 30 */ 31 32/* 33 * Machine independent bits of mutex implementation. 34 */ 35 36#include <sys/cdefs.h> 37__FBSDID("$FreeBSD: head/sys/kern/kern_mutex.c 134649 2004-09-02 18:59:15Z scottl $"); 38 39#include "opt_adaptive_mutexes.h" 40#include "opt_ddb.h" 41#include "opt_mprof.h" 42#include "opt_mutex_wake_all.h" 43#include "opt_sched.h" 44 45#include <sys/param.h> 46#include <sys/systm.h> 47#include <sys/bus.h> 48#include <sys/kdb.h> 49#include <sys/kernel.h> 50#include <sys/ktr.h> 51#include <sys/lock.h> 52#include <sys/malloc.h> 53#include <sys/mutex.h> 54#include <sys/proc.h> 55#include <sys/resourcevar.h> 56#include <sys/sched.h> 57#include <sys/sbuf.h> 58#include <sys/sysctl.h> 59#include <sys/turnstile.h> 60#include <sys/vmmeter.h> 61 62#include <machine/atomic.h> 63#include <machine/bus.h> 64#include <machine/clock.h> 65#include <machine/cpu.h> 66 67#include <ddb/ddb.h> 68 69#include <vm/vm.h> 70#include <vm/vm_extern.h> 71 72/* 73 * Internal utility macros. 74 */ 75#define mtx_unowned(m) ((m)->mtx_lock == MTX_UNOWNED) 76 77#define mtx_owner(m) (mtx_unowned((m)) ? NULL \ 78 : (struct thread *)((m)->mtx_lock & MTX_FLAGMASK)) 79 80/* 81 * Lock classes for sleep and spin mutexes. 82 */ 83struct lock_class lock_class_mtx_sleep = { 84 "sleep mutex", 85 LC_SLEEPLOCK | LC_RECURSABLE 86}; 87struct lock_class lock_class_mtx_spin = { 88 "spin mutex", 89 LC_SPINLOCK | LC_RECURSABLE 90}; 91 92/* 93 * System-wide mutexes 94 */ 95struct mtx sched_lock; 96struct mtx Giant; 97 98#ifdef MUTEX_PROFILING 99SYSCTL_NODE(_debug, OID_AUTO, mutex, CTLFLAG_RD, NULL, "mutex debugging"); 100SYSCTL_NODE(_debug_mutex, OID_AUTO, prof, CTLFLAG_RD, NULL, "mutex profiling"); 101static int mutex_prof_enable = 0; 102SYSCTL_INT(_debug_mutex_prof, OID_AUTO, enable, CTLFLAG_RW, 103 &mutex_prof_enable, 0, "Enable tracing of mutex holdtime"); 104 105struct mutex_prof { 106 const char *name; 107 const char *file; 108 int line; 109 uintmax_t cnt_max; 110 uintmax_t cnt_tot; 111 uintmax_t cnt_cur; 112 uintmax_t cnt_contest_holding; 113 uintmax_t cnt_contest_locking; 114 struct mutex_prof *next; 115}; 116 117/* 118 * mprof_buf is a static pool of profiling records to avoid possible 119 * reentrance of the memory allocation functions. 120 * 121 * Note: NUM_MPROF_BUFFERS must be smaller than MPROF_HASH_SIZE. 122 */ 123#ifdef MPROF_BUFFERS 124#define NUM_MPROF_BUFFERS MPROF_BUFFERS 125#else 126#define NUM_MPROF_BUFFERS 1000 127#endif 128static struct mutex_prof mprof_buf[NUM_MPROF_BUFFERS]; 129static int first_free_mprof_buf; 130#ifndef MPROF_HASH_SIZE 131#define MPROF_HASH_SIZE 1009 132#endif 133#if NUM_MPROF_BUFFERS >= MPROF_HASH_SIZE 134#error MPROF_BUFFERS must be larger than MPROF_HASH_SIZE 135#endif 136static struct mutex_prof *mprof_hash[MPROF_HASH_SIZE]; 137/* SWAG: sbuf size = avg stat. line size * number of locks */ 138#define MPROF_SBUF_SIZE 256 * 400 139 140static int mutex_prof_acquisitions; 141SYSCTL_INT(_debug_mutex_prof, OID_AUTO, acquisitions, CTLFLAG_RD, 142 &mutex_prof_acquisitions, 0, "Number of mutex acquistions recorded"); 143static int mutex_prof_records; 144SYSCTL_INT(_debug_mutex_prof, OID_AUTO, records, CTLFLAG_RD, 145 &mutex_prof_records, 0, "Number of profiling records"); 146static int mutex_prof_maxrecords = NUM_MPROF_BUFFERS; 147SYSCTL_INT(_debug_mutex_prof, OID_AUTO, maxrecords, CTLFLAG_RD, 148 &mutex_prof_maxrecords, 0, "Maximum number of profiling records"); 149static int mutex_prof_rejected; 150SYSCTL_INT(_debug_mutex_prof, OID_AUTO, rejected, CTLFLAG_RD, 151 &mutex_prof_rejected, 0, "Number of rejected profiling records"); 152static int mutex_prof_hashsize = MPROF_HASH_SIZE; 153SYSCTL_INT(_debug_mutex_prof, OID_AUTO, hashsize, CTLFLAG_RD, 154 &mutex_prof_hashsize, 0, "Hash size"); 155static int mutex_prof_collisions = 0; 156SYSCTL_INT(_debug_mutex_prof, OID_AUTO, collisions, CTLFLAG_RD, 157 &mutex_prof_collisions, 0, "Number of hash collisions"); 158 159/* 160 * mprof_mtx protects the profiling buffers and the hash. 161 */ 162static struct mtx mprof_mtx; 163MTX_SYSINIT(mprof, &mprof_mtx, "mutex profiling lock", MTX_SPIN | MTX_QUIET); 164 165static u_int64_t 166nanoseconds(void) 167{ 168 struct timespec tv; 169 170 nanotime(&tv); 171 return (tv.tv_sec * (u_int64_t)1000000000 + tv.tv_nsec); 172} 173 174static int 175dump_mutex_prof_stats(SYSCTL_HANDLER_ARGS) 176{ 177 struct sbuf *sb; 178 int error, i; 179 static int multiplier = 1; 180 181 if (first_free_mprof_buf == 0) 182 return (SYSCTL_OUT(req, "No locking recorded", 183 sizeof("No locking recorded"))); 184 185retry_sbufops: 186 sb = sbuf_new(NULL, NULL, MPROF_SBUF_SIZE * multiplier, SBUF_FIXEDLEN); 187 sbuf_printf(sb, "%6s %12s %11s %5s %12s %12s %s\n", 188 "max", "total", "count", "avg", "cnt_hold", "cnt_lock", "name"); 189 /* 190 * XXX this spinlock seems to be by far the largest perpetrator 191 * of spinlock latency (1.6 msec on an Athlon1600 was recorded 192 * even before I pessimized it further by moving the average 193 * computation here). 194 */ 195 mtx_lock_spin(&mprof_mtx); 196 for (i = 0; i < first_free_mprof_buf; ++i) { 197 sbuf_printf(sb, "%6ju %12ju %11ju %5ju %12ju %12ju %s:%d (%s)\n", 198 mprof_buf[i].cnt_max / 1000, 199 mprof_buf[i].cnt_tot / 1000, 200 mprof_buf[i].cnt_cur, 201 mprof_buf[i].cnt_cur == 0 ? (uintmax_t)0 : 202 mprof_buf[i].cnt_tot / (mprof_buf[i].cnt_cur * 1000), 203 mprof_buf[i].cnt_contest_holding, 204 mprof_buf[i].cnt_contest_locking, 205 mprof_buf[i].file, mprof_buf[i].line, mprof_buf[i].name); 206 if (sbuf_overflowed(sb)) { 207 mtx_unlock_spin(&mprof_mtx); 208 sbuf_delete(sb); 209 multiplier++; 210 goto retry_sbufops; 211 } 212 } 213 mtx_unlock_spin(&mprof_mtx); 214 sbuf_finish(sb); 215 error = SYSCTL_OUT(req, sbuf_data(sb), sbuf_len(sb) + 1); 216 sbuf_delete(sb); 217 return (error); 218} 219SYSCTL_PROC(_debug_mutex_prof, OID_AUTO, stats, CTLTYPE_STRING | CTLFLAG_RD, 220 NULL, 0, dump_mutex_prof_stats, "A", "Mutex profiling statistics"); 221 222static int 223reset_mutex_prof_stats(SYSCTL_HANDLER_ARGS) 224{ 225 int error, v; 226 227 if (first_free_mprof_buf == 0) 228 return (0); 229 230 v = 0; 231 error = sysctl_handle_int(oidp, &v, 0, req); 232 if (error) 233 return (error); 234 if (req->newptr == NULL) 235 return (error); 236 if (v == 0) 237 return (0); 238 239 mtx_lock_spin(&mprof_mtx); 240 bzero(mprof_buf, sizeof(*mprof_buf) * first_free_mprof_buf); 241 bzero(mprof_hash, sizeof(struct mtx *) * MPROF_HASH_SIZE); 242 first_free_mprof_buf = 0; 243 mtx_unlock_spin(&mprof_mtx); 244 return (0); 245} 246SYSCTL_PROC(_debug_mutex_prof, OID_AUTO, reset, CTLTYPE_INT | CTLFLAG_RW, 247 NULL, 0, reset_mutex_prof_stats, "I", "Reset mutex profiling statistics"); 248#endif 249 250/* 251 * Function versions of the inlined __mtx_* macros. These are used by 252 * modules and can also be called from assembly language if needed. 253 */ 254void 255_mtx_lock_flags(struct mtx *m, int opts, const char *file, int line) 256{ 257 258 MPASS(curthread != NULL); 259 KASSERT(m->mtx_object.lo_class == &lock_class_mtx_sleep, 260 ("mtx_lock() of spin mutex %s @ %s:%d", m->mtx_object.lo_name, 261 file, line)); 262 WITNESS_CHECKORDER(&m->mtx_object, opts | LOP_NEWORDER | LOP_EXCLUSIVE, 263 file, line); 264 _get_sleep_lock(m, curthread, opts, file, line); 265 LOCK_LOG_LOCK("LOCK", &m->mtx_object, opts, m->mtx_recurse, file, 266 line); 267 WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line); 268#ifdef MUTEX_PROFILING 269 /* don't reset the timer when/if recursing */ 270 if (m->mtx_acqtime == 0) { 271 m->mtx_filename = file; 272 m->mtx_lineno = line; 273 m->mtx_acqtime = mutex_prof_enable ? nanoseconds() : 0; 274 ++mutex_prof_acquisitions; 275 } 276#endif 277} 278 279void 280_mtx_unlock_flags(struct mtx *m, int opts, const char *file, int line) 281{ 282 283 MPASS(curthread != NULL); 284 KASSERT(m->mtx_object.lo_class == &lock_class_mtx_sleep, 285 ("mtx_unlock() of spin mutex %s @ %s:%d", m->mtx_object.lo_name, 286 file, line)); 287 WITNESS_UNLOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line); 288 LOCK_LOG_LOCK("UNLOCK", &m->mtx_object, opts, m->mtx_recurse, file, 289 line); 290 mtx_assert(m, MA_OWNED); 291#ifdef MUTEX_PROFILING 292 if (m->mtx_acqtime != 0) { 293 static const char *unknown = "(unknown)"; 294 struct mutex_prof *mpp; 295 u_int64_t acqtime, now; 296 const char *p, *q; 297 volatile u_int hash; 298 299 now = nanoseconds(); 300 acqtime = m->mtx_acqtime; 301 m->mtx_acqtime = 0; 302 if (now <= acqtime) 303 goto out; 304 for (p = m->mtx_filename; 305 p != NULL && strncmp(p, "../", 3) == 0; p += 3) 306 /* nothing */ ; 307 if (p == NULL || *p == '\0') 308 p = unknown; 309 for (hash = m->mtx_lineno, q = p; *q != '\0'; ++q) 310 hash = (hash * 2 + *q) % MPROF_HASH_SIZE; 311 mtx_lock_spin(&mprof_mtx); 312 for (mpp = mprof_hash[hash]; mpp != NULL; mpp = mpp->next) 313 if (mpp->line == m->mtx_lineno && 314 strcmp(mpp->file, p) == 0) 315 break; 316 if (mpp == NULL) { 317 /* Just exit if we cannot get a trace buffer */ 318 if (first_free_mprof_buf >= NUM_MPROF_BUFFERS) { 319 ++mutex_prof_rejected; 320 goto unlock; 321 } 322 mpp = &mprof_buf[first_free_mprof_buf++]; 323 mpp->name = mtx_name(m); 324 mpp->file = p; 325 mpp->line = m->mtx_lineno; 326 mpp->next = mprof_hash[hash]; 327 if (mprof_hash[hash] != NULL) 328 ++mutex_prof_collisions; 329 mprof_hash[hash] = mpp; 330 ++mutex_prof_records; 331 } 332 /* 333 * Record if the mutex has been held longer now than ever 334 * before. 335 */ 336 if (now - acqtime > mpp->cnt_max) 337 mpp->cnt_max = now - acqtime; 338 mpp->cnt_tot += now - acqtime; 339 mpp->cnt_cur++; 340 /* 341 * There's a small race, really we should cmpxchg 342 * 0 with the current value, but that would bill 343 * the contention to the wrong lock instance if 344 * it followed this also. 345 */ 346 mpp->cnt_contest_holding += m->mtx_contest_holding; 347 m->mtx_contest_holding = 0; 348 mpp->cnt_contest_locking += m->mtx_contest_locking; 349 m->mtx_contest_locking = 0; 350unlock: 351 mtx_unlock_spin(&mprof_mtx); 352 } 353out: 354#endif 355 _rel_sleep_lock(m, curthread, opts, file, line); 356} 357 358void 359_mtx_lock_spin_flags(struct mtx *m, int opts, const char *file, int line) 360{ 361 362 MPASS(curthread != NULL); 363 KASSERT(m->mtx_object.lo_class == &lock_class_mtx_spin, 364 ("mtx_lock_spin() of sleep mutex %s @ %s:%d", 365 m->mtx_object.lo_name, file, line)); 366 WITNESS_CHECKORDER(&m->mtx_object, opts | LOP_NEWORDER | LOP_EXCLUSIVE, 367 file, line); 368#if defined(SMP) || LOCK_DEBUG > 0 || 1 369 _get_spin_lock(m, curthread, opts, file, line); 370#else 371 critical_enter(); 372#endif 373 LOCK_LOG_LOCK("LOCK", &m->mtx_object, opts, m->mtx_recurse, file, 374 line); 375 WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line); 376} 377 378void 379_mtx_unlock_spin_flags(struct mtx *m, int opts, const char *file, int line) 380{ 381 382 MPASS(curthread != NULL); 383 KASSERT(m->mtx_object.lo_class == &lock_class_mtx_spin, 384 ("mtx_unlock_spin() of sleep mutex %s @ %s:%d", 385 m->mtx_object.lo_name, file, line)); 386 WITNESS_UNLOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line); 387 LOCK_LOG_LOCK("UNLOCK", &m->mtx_object, opts, m->mtx_recurse, file, 388 line); 389 mtx_assert(m, MA_OWNED); 390#if defined(SMP) || LOCK_DEBUG > 0 || 1 391 _rel_spin_lock(m); 392#else 393 critical_exit(); 394#endif 395} 396 397/* 398 * The important part of mtx_trylock{,_flags}() 399 * Tries to acquire lock `m.' If this function is called on a mutex that 400 * is already owned, it will recursively acquire the lock. 401 */ 402int 403_mtx_trylock(struct mtx *m, int opts, const char *file, int line) 404{ 405 int rval; 406 407 MPASS(curthread != NULL); 408 409 if (mtx_owned(m) && (m->mtx_object.lo_flags & LO_RECURSABLE) != 0) { 410 m->mtx_recurse++; 411 atomic_set_ptr(&m->mtx_lock, MTX_RECURSED); 412 rval = 1; 413 } else 414 rval = _obtain_lock(m, curthread); 415 416 LOCK_LOG_TRY("LOCK", &m->mtx_object, opts, rval, file, line); 417 if (rval) 418 WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE | LOP_TRYLOCK, 419 file, line); 420 421 return (rval); 422} 423 424/* 425 * _mtx_lock_sleep: the tougher part of acquiring an MTX_DEF lock. 426 * 427 * We call this if the lock is either contested (i.e. we need to go to 428 * sleep waiting for it), or if we need to recurse on it. 429 */ 430void 431_mtx_lock_sleep(struct mtx *m, struct thread *td, int opts, const char *file, 432 int line) 433{ 434 struct turnstile *ts; 435#if defined(SMP) && !defined(NO_ADAPTIVE_MUTEXES) 436 struct thread *owner; 437#endif 438 uintptr_t v; 439#ifdef KTR 440 int cont_logged = 0; 441#endif 442#ifdef MUTEX_PROFILING 443 int contested; 444#endif 445 446 if (mtx_owned(m)) { 447 KASSERT((m->mtx_object.lo_flags & LO_RECURSABLE) != 0, 448 ("_mtx_lock_sleep: recursed on non-recursive mutex %s @ %s:%d\n", 449 m->mtx_object.lo_name, file, line)); 450 m->mtx_recurse++; 451 atomic_set_ptr(&m->mtx_lock, MTX_RECURSED); 452 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 453 CTR1(KTR_LOCK, "_mtx_lock_sleep: %p recursing", m); 454 return; 455 } 456 457 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 458 CTR4(KTR_LOCK, 459 "_mtx_lock_sleep: %s contested (lock=%p) at %s:%d", 460 m->mtx_object.lo_name, (void *)m->mtx_lock, file, line); 461 462#ifdef MUTEX_PROFILING 463 contested = 0; 464#endif 465 while (!_obtain_lock(m, td)) { 466#ifdef MUTEX_PROFILING 467 contested = 1; 468 atomic_add_int(&m->mtx_contest_holding, 1); 469#endif 470 ts = turnstile_lookup(&m->mtx_object); 471 v = m->mtx_lock; 472 473 /* 474 * Check if the lock has been released while spinning for 475 * the turnstile chain lock. 476 */ 477 if (v == MTX_UNOWNED) { 478 turnstile_release(&m->mtx_object); 479 cpu_spinwait(); 480 continue; 481 } 482 483#ifdef MUTEX_WAKE_ALL 484 MPASS(v != MTX_CONTESTED); 485#else 486 /* 487 * The mutex was marked contested on release. This means that 488 * there are other threads blocked on it. Grab ownership of 489 * it and propagate its priority to the current thread if 490 * necessary. 491 */ 492 if (v == MTX_CONTESTED) { 493 MPASS(ts != NULL); 494 m->mtx_lock = (uintptr_t)td | MTX_CONTESTED; 495 turnstile_claim(ts); 496 break; 497 } 498#endif 499 500 /* 501 * If the mutex isn't already contested and a failure occurs 502 * setting the contested bit, the mutex was either released 503 * or the state of the MTX_RECURSED bit changed. 504 */ 505 if ((v & MTX_CONTESTED) == 0 && 506 !atomic_cmpset_ptr(&m->mtx_lock, (void *)v, 507 (void *)(v | MTX_CONTESTED))) { 508 turnstile_release(&m->mtx_object); 509 cpu_spinwait(); 510 continue; 511 } 512 513#if defined(SMP) && !defined(NO_ADAPTIVE_MUTEXES) 514 /* 515 * If the current owner of the lock is executing on another 516 * CPU, spin instead of blocking. 517 */ 518 owner = (struct thread *)(v & MTX_FLAGMASK); 519#ifdef ADAPTIVE_GIANT 520 if (TD_IS_RUNNING(owner)) { 521#else 522 if (m != &Giant && TD_IS_RUNNING(owner)) { 523#endif 524 turnstile_release(&m->mtx_object); 525 while (mtx_owner(m) == owner && TD_IS_RUNNING(owner)) { 526 cpu_spinwait(); 527 } 528 continue; 529 } 530#endif /* SMP && !NO_ADAPTIVE_MUTEXES */ 531 532 /* 533 * We definitely must sleep for this lock. 534 */ 535 mtx_assert(m, MA_NOTOWNED); 536 537#ifdef KTR 538 if (!cont_logged) { 539 CTR6(KTR_CONTENTION, 540 "contention: %p at %s:%d wants %s, taken by %s:%d", 541 td, file, line, m->mtx_object.lo_name, 542 WITNESS_FILE(&m->mtx_object), 543 WITNESS_LINE(&m->mtx_object)); 544 cont_logged = 1; 545 } 546#endif 547 548 /* 549 * Block on the turnstile. 550 */ 551 turnstile_wait(ts, &m->mtx_object, mtx_owner(m)); 552 } 553 554#ifdef KTR 555 if (cont_logged) { 556 CTR4(KTR_CONTENTION, 557 "contention end: %s acquired by %p at %s:%d", 558 m->mtx_object.lo_name, td, file, line); 559 } 560#endif 561#ifdef MUTEX_PROFILING 562 if (contested) 563 m->mtx_contest_locking++; 564 m->mtx_contest_holding = 0; 565#endif 566 return; 567} 568 569/* 570 * _mtx_lock_spin: the tougher part of acquiring an MTX_SPIN lock. 571 * 572 * This is only called if we need to actually spin for the lock. Recursion 573 * is handled inline. 574 */ 575void 576_mtx_lock_spin(struct mtx *m, struct thread *td, int opts, const char *file, 577 int line) 578{ 579 int i = 0; 580 581 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 582 CTR1(KTR_LOCK, "_mtx_lock_spin: %p spinning", m); 583 584 for (;;) { 585 if (_obtain_lock(m, td)) 586 break; 587 588 /* Give interrupts a chance while we spin. */ 589 critical_exit(); 590 while (m->mtx_lock != MTX_UNOWNED) { 591 if (i++ < 10000000) { 592 cpu_spinwait(); 593 continue; 594 } 595 if (i < 60000000) 596 DELAY(1); 597 else if (!kdb_active) { 598 printf("spin lock %s held by %p for > 5 seconds\n", 599 m->mtx_object.lo_name, (void *)m->mtx_lock); 600#ifdef WITNESS 601 witness_display_spinlock(&m->mtx_object, 602 mtx_owner(m)); 603#endif 604 panic("spin lock held too long"); 605 } 606 cpu_spinwait(); 607 } 608 critical_enter(); 609 } 610 611 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 612 CTR1(KTR_LOCK, "_mtx_lock_spin: %p spin done", m); 613 614 return; 615} 616 617/* 618 * _mtx_unlock_sleep: the tougher part of releasing an MTX_DEF lock. 619 * 620 * We are only called here if the lock is recursed or contested (i.e. we 621 * need to wake up a blocked thread). 622 */ 623void 624_mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line) 625{ 626 struct turnstile *ts; 627#ifndef PREEMPTION 628 struct thread *td, *td1; 629#endif 630 631 if (mtx_recursed(m)) { 632 if (--(m->mtx_recurse) == 0) 633 atomic_clear_ptr(&m->mtx_lock, MTX_RECURSED); 634 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 635 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p unrecurse", m); 636 return; 637 } 638 639 ts = turnstile_lookup(&m->mtx_object); 640 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 641 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p contested", m); 642 643#if defined(SMP) && !defined(NO_ADAPTIVE_MUTEXES) 644 if (ts == NULL) { 645 _release_lock_quick(m); 646 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 647 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p no sleepers", m); 648 turnstile_release(&m->mtx_object); 649 return; 650 } 651#else 652 MPASS(ts != NULL); 653#endif 654#ifndef PREEMPTION 655 /* XXX */ 656 td1 = turnstile_head(ts); 657#endif 658#ifdef MUTEX_WAKE_ALL 659 turnstile_broadcast(ts); 660 _release_lock_quick(m); 661#else 662 if (turnstile_signal(ts)) { 663 _release_lock_quick(m); 664 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 665 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p not held", m); 666 } else { 667 m->mtx_lock = MTX_CONTESTED; 668 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 669 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p still contested", 670 m); 671 } 672#endif 673 turnstile_unpend(ts); 674 675#ifndef PREEMPTION 676 /* 677 * XXX: This is just a hack until preemption is done. However, 678 * once preemption is done we need to either wrap the 679 * turnstile_signal() and release of the actual lock in an 680 * extra critical section or change the preemption code to 681 * always just set a flag and never do instant-preempts. 682 */ 683 td = curthread; 684 if (td->td_critnest > 0 || td1->td_priority >= td->td_priority) 685 return; 686 mtx_lock_spin(&sched_lock); 687 if (!TD_IS_RUNNING(td1)) { 688#ifdef notyet 689 if (td->td_ithd != NULL) { 690 struct ithd *it = td->td_ithd; 691 692 if (it->it_interrupted) { 693 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 694 CTR2(KTR_LOCK, 695 "_mtx_unlock_sleep: %p interrupted %p", 696 it, it->it_interrupted); 697 intr_thd_fixup(it); 698 } 699 } 700#endif 701 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 702 CTR2(KTR_LOCK, 703 "_mtx_unlock_sleep: %p switching out lock=%p", m, 704 (void *)m->mtx_lock); 705 706 mi_switch(SW_INVOL, NULL); 707 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 708 CTR2(KTR_LOCK, "_mtx_unlock_sleep: %p resuming lock=%p", 709 m, (void *)m->mtx_lock); 710 } 711 mtx_unlock_spin(&sched_lock); 712#endif 713 714 return; 715} 716 717/* 718 * All the unlocking of MTX_SPIN locks is done inline. 719 * See the _rel_spin_lock() macro for the details. 720 */ 721 722/* 723 * The backing function for the INVARIANTS-enabled mtx_assert() 724 */ 725#ifdef INVARIANT_SUPPORT 726void 727_mtx_assert(struct mtx *m, int what, const char *file, int line) 728{ 729 730 if (panicstr != NULL) 731 return; 732 switch (what) { 733 case MA_OWNED: 734 case MA_OWNED | MA_RECURSED: 735 case MA_OWNED | MA_NOTRECURSED: 736 if (!mtx_owned(m)) 737 panic("mutex %s not owned at %s:%d", 738 m->mtx_object.lo_name, file, line); 739 if (mtx_recursed(m)) { 740 if ((what & MA_NOTRECURSED) != 0) 741 panic("mutex %s recursed at %s:%d", 742 m->mtx_object.lo_name, file, line); 743 } else if ((what & MA_RECURSED) != 0) { 744 panic("mutex %s unrecursed at %s:%d", 745 m->mtx_object.lo_name, file, line); 746 } 747 break; 748 case MA_NOTOWNED: 749 if (mtx_owned(m)) 750 panic("mutex %s owned at %s:%d", 751 m->mtx_object.lo_name, file, line); 752 break; 753 default: 754 panic("unknown mtx_assert at %s:%d", file, line); 755 } 756} 757#endif 758 759/* 760 * The MUTEX_DEBUG-enabled mtx_validate() 761 * 762 * Most of these checks have been moved off into the LO_INITIALIZED flag 763 * maintained by the witness code. 764 */ 765#ifdef MUTEX_DEBUG 766 767void mtx_validate(struct mtx *); 768 769void 770mtx_validate(struct mtx *m) 771{ 772 773/* 774 * XXX: When kernacc() does not require Giant we can reenable this check 775 */ 776#ifdef notyet 777/* 778 * XXX - When kernacc() is fixed on the alpha to handle K0_SEG memory properly 779 * we can re-enable the kernacc() checks. 780 */ 781#ifndef __alpha__ 782 /* 783 * Can't call kernacc() from early init386(), especially when 784 * initializing Giant mutex, because some stuff in kernacc() 785 * requires Giant itself. 786 */ 787 if (!cold) 788 if (!kernacc((caddr_t)m, sizeof(m), 789 VM_PROT_READ | VM_PROT_WRITE)) 790 panic("Can't read and write to mutex %p", m); 791#endif 792#endif 793} 794#endif 795 796/* 797 * General init routine used by the MTX_SYSINIT() macro. 798 */ 799void 800mtx_sysinit(void *arg) 801{ 802 struct mtx_args *margs = arg; 803 804 mtx_init(margs->ma_mtx, margs->ma_desc, NULL, margs->ma_opts); 805} 806 807/* 808 * Mutex initialization routine; initialize lock `m' of type contained in 809 * `opts' with options contained in `opts' and name `name.' The optional 810 * lock type `type' is used as a general lock category name for use with 811 * witness. 812 */ 813void 814mtx_init(struct mtx *m, const char *name, const char *type, int opts) 815{ 816 struct lock_object *lock; 817 818 MPASS((opts & ~(MTX_SPIN | MTX_QUIET | MTX_RECURSE | 819 MTX_NOWITNESS | MTX_DUPOK)) == 0); 820 821#ifdef MUTEX_DEBUG 822 /* Diagnostic and error correction */ 823 mtx_validate(m); 824#endif 825 826 lock = &m->mtx_object; 827 KASSERT((lock->lo_flags & LO_INITIALIZED) == 0, 828 ("mutex \"%s\" %p already initialized", name, m)); 829 bzero(m, sizeof(*m)); 830 if (opts & MTX_SPIN) 831 lock->lo_class = &lock_class_mtx_spin; 832 else 833 lock->lo_class = &lock_class_mtx_sleep; 834 lock->lo_name = name; 835 lock->lo_type = type != NULL ? type : name; 836 if (opts & MTX_QUIET) 837 lock->lo_flags = LO_QUIET; 838 if (opts & MTX_RECURSE) 839 lock->lo_flags |= LO_RECURSABLE; 840 if ((opts & MTX_NOWITNESS) == 0) 841 lock->lo_flags |= LO_WITNESS; 842 if (opts & MTX_DUPOK) 843 lock->lo_flags |= LO_DUPOK; 844 845 m->mtx_lock = MTX_UNOWNED; 846 847 LOCK_LOG_INIT(lock, opts); 848 849 WITNESS_INIT(lock); 850} 851 852/* 853 * Remove lock `m' from all_mtx queue. We don't allow MTX_QUIET to be 854 * passed in as a flag here because if the corresponding mtx_init() was 855 * called with MTX_QUIET set, then it will already be set in the mutex's 856 * flags. 857 */ 858void 859mtx_destroy(struct mtx *m) 860{ 861 862 LOCK_LOG_DESTROY(&m->mtx_object, 0); 863 864 if (!mtx_owned(m)) 865 MPASS(mtx_unowned(m)); 866 else { 867 MPASS((m->mtx_lock & (MTX_RECURSED|MTX_CONTESTED)) == 0); 868 869 /* Tell witness this isn't locked to make it happy. */ 870 WITNESS_UNLOCK(&m->mtx_object, LOP_EXCLUSIVE, __FILE__, 871 __LINE__); 872 } 873 874 WITNESS_DESTROY(&m->mtx_object); 875} 876 877/* 878 * Intialize the mutex code and system mutexes. This is called from the MD 879 * startup code prior to mi_startup(). The per-CPU data space needs to be 880 * setup before this is called. 881 */ 882void 883mutex_init(void) 884{ 885 886 /* Setup thread0 so that mutexes work. */ 887 LIST_INIT(&thread0.td_contested); 888 889 /* Setup turnstiles so that sleep mutexes work. */ 890 init_turnstiles(); 891 892 /* 893 * Initialize mutexes. 894 */ 895 mtx_init(&Giant, "Giant", NULL, MTX_DEF | MTX_RECURSE); 896 mtx_init(&sched_lock, "sched lock", NULL, MTX_SPIN | MTX_RECURSE); 897 mtx_init(&proc0.p_mtx, "process lock", NULL, MTX_DEF | MTX_DUPOK); 898 mtx_lock(&Giant); 899} 900