kern_mutex.c revision 111880
1/*- 2 * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 3. Berkeley Software Design Inc's name may not be used to endorse or 13 * promote products derived from this software without specific prior 14 * written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 * from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $ 29 * and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $ 30 * $FreeBSD: head/sys/kern/kern_mutex.c 111880 2003-03-04 20:32:41Z jhb $ 31 */ 32 33/* 34 * Machine independent bits of mutex implementation. 35 */ 36 37#include "opt_adaptive_mutexes.h" 38#include "opt_ddb.h" 39 40#include <sys/param.h> 41#include <sys/systm.h> 42#include <sys/bus.h> 43#include <sys/kernel.h> 44#include <sys/ktr.h> 45#include <sys/lock.h> 46#include <sys/malloc.h> 47#include <sys/mutex.h> 48#include <sys/proc.h> 49#include <sys/resourcevar.h> 50#include <sys/sched.h> 51#include <sys/sbuf.h> 52#include <sys/stdint.h> 53#include <sys/sysctl.h> 54#include <sys/vmmeter.h> 55 56#include <machine/atomic.h> 57#include <machine/bus.h> 58#include <machine/clock.h> 59#include <machine/cpu.h> 60 61#include <ddb/ddb.h> 62 63#include <vm/vm.h> 64#include <vm/vm_extern.h> 65 66/* 67 * Internal utility macros. 68 */ 69#define mtx_unowned(m) ((m)->mtx_lock == MTX_UNOWNED) 70 71#define mtx_owner(m) (mtx_unowned((m)) ? NULL \ 72 : (struct thread *)((m)->mtx_lock & MTX_FLAGMASK)) 73 74/* XXXKSE This test will change. */ 75#define thread_running(td) \ 76 ((td)->td_kse != NULL && (td)->td_kse->ke_oncpu != NOCPU) 77 78/* 79 * Lock classes for sleep and spin mutexes. 80 */ 81struct lock_class lock_class_mtx_sleep = { 82 "sleep mutex", 83 LC_SLEEPLOCK | LC_RECURSABLE 84}; 85struct lock_class lock_class_mtx_spin = { 86 "spin mutex", 87 LC_SPINLOCK | LC_RECURSABLE 88}; 89 90/* 91 * System-wide mutexes 92 */ 93struct mtx sched_lock; 94struct mtx Giant; 95 96/* 97 * Prototypes for non-exported routines. 98 */ 99static void propagate_priority(struct thread *); 100 101static void 102propagate_priority(struct thread *td) 103{ 104 int pri = td->td_priority; 105 struct mtx *m = td->td_blocked; 106 107 mtx_assert(&sched_lock, MA_OWNED); 108 for (;;) { 109 struct thread *td1; 110 111 td = mtx_owner(m); 112 113 if (td == NULL) { 114 /* 115 * This really isn't quite right. Really 116 * ought to bump priority of thread that 117 * next acquires the mutex. 118 */ 119 MPASS(m->mtx_lock == MTX_CONTESTED); 120 return; 121 } 122 123 MPASS(td->td_proc != NULL); 124 MPASS(td->td_proc->p_magic == P_MAGIC); 125 KASSERT(!TD_IS_SLEEPING(td), ("sleeping thread owns a mutex")); 126 if (td->td_priority <= pri) /* lower is higher priority */ 127 return; 128 129 130 /* 131 * If lock holder is actually running, just bump priority. 132 */ 133 if (TD_IS_RUNNING(td)) { 134 td->td_priority = pri; 135 return; 136 } 137 138#ifndef SMP 139 /* 140 * For UP, we check to see if td is curthread (this shouldn't 141 * ever happen however as it would mean we are in a deadlock.) 142 */ 143 KASSERT(td != curthread, ("Deadlock detected")); 144#endif 145 146 /* 147 * If on run queue move to new run queue, and quit. 148 * XXXKSE this gets a lot more complicated under threads 149 * but try anyhow. 150 */ 151 if (TD_ON_RUNQ(td)) { 152 MPASS(td->td_blocked == NULL); 153 sched_prio(td, pri); 154 return; 155 } 156 /* 157 * Adjust for any other cases. 158 */ 159 td->td_priority = pri; 160 161 /* 162 * If we aren't blocked on a mutex, we should be. 163 */ 164 KASSERT(TD_ON_LOCK(td), ( 165 "process %d(%s):%d holds %s but isn't blocked on a mutex\n", 166 td->td_proc->p_pid, td->td_proc->p_comm, td->td_state, 167 m->mtx_object.lo_name)); 168 169 /* 170 * Pick up the mutex that td is blocked on. 171 */ 172 m = td->td_blocked; 173 MPASS(m != NULL); 174 175 /* 176 * Check if the thread needs to be moved up on 177 * the blocked chain 178 */ 179 if (td == TAILQ_FIRST(&m->mtx_blocked)) { 180 continue; 181 } 182 183 td1 = TAILQ_PREV(td, threadqueue, td_lockq); 184 if (td1->td_priority <= pri) { 185 continue; 186 } 187 188 /* 189 * Remove thread from blocked chain and determine where 190 * it should be moved up to. Since we know that td1 has 191 * a lower priority than td, we know that at least one 192 * thread in the chain has a lower priority and that 193 * td1 will thus not be NULL after the loop. 194 */ 195 TAILQ_REMOVE(&m->mtx_blocked, td, td_lockq); 196 TAILQ_FOREACH(td1, &m->mtx_blocked, td_lockq) { 197 MPASS(td1->td_proc->p_magic == P_MAGIC); 198 if (td1->td_priority > pri) 199 break; 200 } 201 202 MPASS(td1 != NULL); 203 TAILQ_INSERT_BEFORE(td1, td, td_lockq); 204 CTR4(KTR_LOCK, 205 "propagate_priority: p %p moved before %p on [%p] %s", 206 td, td1, m, m->mtx_object.lo_name); 207 } 208} 209 210#ifdef MUTEX_PROFILING 211SYSCTL_NODE(_debug, OID_AUTO, mutex, CTLFLAG_RD, NULL, "mutex debugging"); 212SYSCTL_NODE(_debug_mutex, OID_AUTO, prof, CTLFLAG_RD, NULL, "mutex profiling"); 213static int mutex_prof_enable = 0; 214SYSCTL_INT(_debug_mutex_prof, OID_AUTO, enable, CTLFLAG_RW, 215 &mutex_prof_enable, 0, "Enable tracing of mutex holdtime"); 216 217struct mutex_prof { 218 const char *name; 219 const char *file; 220 int line; 221 uintmax_t cnt_max; 222 uintmax_t cnt_tot; 223 uintmax_t cnt_cur; 224 struct mutex_prof *next; 225}; 226 227/* 228 * mprof_buf is a static pool of profiling records to avoid possible 229 * reentrance of the memory allocation functions. 230 * 231 * Note: NUM_MPROF_BUFFERS must be smaller than MPROF_HASH_SIZE. 232 */ 233#define NUM_MPROF_BUFFERS 1000 234static struct mutex_prof mprof_buf[NUM_MPROF_BUFFERS]; 235static int first_free_mprof_buf; 236#define MPROF_HASH_SIZE 1009 237static struct mutex_prof *mprof_hash[MPROF_HASH_SIZE]; 238/* SWAG: sbuf size = avg stat. line size * number of locks */ 239#define MPROF_SBUF_SIZE 256 * 400 240 241static int mutex_prof_acquisitions; 242SYSCTL_INT(_debug_mutex_prof, OID_AUTO, acquisitions, CTLFLAG_RD, 243 &mutex_prof_acquisitions, 0, "Number of mutex acquistions recorded"); 244static int mutex_prof_records; 245SYSCTL_INT(_debug_mutex_prof, OID_AUTO, records, CTLFLAG_RD, 246 &mutex_prof_records, 0, "Number of profiling records"); 247static int mutex_prof_maxrecords = NUM_MPROF_BUFFERS; 248SYSCTL_INT(_debug_mutex_prof, OID_AUTO, maxrecords, CTLFLAG_RD, 249 &mutex_prof_maxrecords, 0, "Maximum number of profiling records"); 250static int mutex_prof_rejected; 251SYSCTL_INT(_debug_mutex_prof, OID_AUTO, rejected, CTLFLAG_RD, 252 &mutex_prof_rejected, 0, "Number of rejected profiling records"); 253static int mutex_prof_hashsize = MPROF_HASH_SIZE; 254SYSCTL_INT(_debug_mutex_prof, OID_AUTO, hashsize, CTLFLAG_RD, 255 &mutex_prof_hashsize, 0, "Hash size"); 256static int mutex_prof_collisions = 0; 257SYSCTL_INT(_debug_mutex_prof, OID_AUTO, collisions, CTLFLAG_RD, 258 &mutex_prof_collisions, 0, "Number of hash collisions"); 259 260/* 261 * mprof_mtx protects the profiling buffers and the hash. 262 */ 263static struct mtx mprof_mtx; 264MTX_SYSINIT(mprof, &mprof_mtx, "mutex profiling lock", MTX_SPIN | MTX_QUIET); 265 266static u_int64_t 267nanoseconds(void) 268{ 269 struct timespec tv; 270 271 nanotime(&tv); 272 return (tv.tv_sec * (u_int64_t)1000000000 + tv.tv_nsec); 273} 274 275static int 276dump_mutex_prof_stats(SYSCTL_HANDLER_ARGS) 277{ 278 struct sbuf *sb; 279 int error, i; 280 static int multiplier = 1; 281 282 if (first_free_mprof_buf == 0) 283 return (SYSCTL_OUT(req, "No locking recorded", 284 sizeof("No locking recorded"))); 285 286retry_sbufops: 287 sb = sbuf_new(NULL, NULL, MPROF_SBUF_SIZE * multiplier, SBUF_FIXEDLEN); 288 sbuf_printf(sb, "%6s %12s %11s %5s %s\n", 289 "max", "total", "count", "avg", "name"); 290 /* 291 * XXX this spinlock seems to be by far the largest perpetrator 292 * of spinlock latency (1.6 msec on an Athlon1600 was recorded 293 * even before I pessimized it further by moving the average 294 * computation here). 295 */ 296 mtx_lock_spin(&mprof_mtx); 297 for (i = 0; i < first_free_mprof_buf; ++i) { 298 sbuf_printf(sb, "%6ju %12ju %11ju %5ju %s:%d (%s)\n", 299 mprof_buf[i].cnt_max / 1000, 300 mprof_buf[i].cnt_tot / 1000, 301 mprof_buf[i].cnt_cur, 302 mprof_buf[i].cnt_cur == 0 ? (uintmax_t)0 : 303 mprof_buf[i].cnt_tot / (mprof_buf[i].cnt_cur * 1000), 304 mprof_buf[i].file, mprof_buf[i].line, mprof_buf[i].name); 305 if (sbuf_overflowed(sb)) { 306 mtx_unlock_spin(&mprof_mtx); 307 sbuf_delete(sb); 308 multiplier++; 309 goto retry_sbufops; 310 } 311 } 312 mtx_unlock_spin(&mprof_mtx); 313 sbuf_finish(sb); 314 error = SYSCTL_OUT(req, sbuf_data(sb), sbuf_len(sb) + 1); 315 sbuf_delete(sb); 316 return (error); 317} 318SYSCTL_PROC(_debug_mutex_prof, OID_AUTO, stats, CTLTYPE_STRING | CTLFLAG_RD, 319 NULL, 0, dump_mutex_prof_stats, "A", "Mutex profiling statistics"); 320#endif 321 322/* 323 * Function versions of the inlined __mtx_* macros. These are used by 324 * modules and can also be called from assembly language if needed. 325 */ 326void 327_mtx_lock_flags(struct mtx *m, int opts, const char *file, int line) 328{ 329 330 MPASS(curthread != NULL); 331 KASSERT(m->mtx_object.lo_class == &lock_class_mtx_sleep, 332 ("mtx_lock() of spin mutex %s @ %s:%d", m->mtx_object.lo_name, 333 file, line)); 334 _get_sleep_lock(m, curthread, opts, file, line); 335 LOCK_LOG_LOCK("LOCK", &m->mtx_object, opts, m->mtx_recurse, file, 336 line); 337 WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line); 338#ifdef MUTEX_PROFILING 339 /* don't reset the timer when/if recursing */ 340 if (m->mtx_acqtime == 0) { 341 m->mtx_filename = file; 342 m->mtx_lineno = line; 343 m->mtx_acqtime = mutex_prof_enable ? nanoseconds() : 0; 344 ++mutex_prof_acquisitions; 345 } 346#endif 347} 348 349void 350_mtx_unlock_flags(struct mtx *m, int opts, const char *file, int line) 351{ 352 353 MPASS(curthread != NULL); 354 KASSERT(m->mtx_object.lo_class == &lock_class_mtx_sleep, 355 ("mtx_unlock() of spin mutex %s @ %s:%d", m->mtx_object.lo_name, 356 file, line)); 357 WITNESS_UNLOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line); 358 LOCK_LOG_LOCK("UNLOCK", &m->mtx_object, opts, m->mtx_recurse, file, 359 line); 360 mtx_assert(m, MA_OWNED); 361#ifdef MUTEX_PROFILING 362 if (m->mtx_acqtime != 0) { 363 static const char *unknown = "(unknown)"; 364 struct mutex_prof *mpp; 365 u_int64_t acqtime, now; 366 const char *p, *q; 367 volatile u_int hash; 368 369 now = nanoseconds(); 370 acqtime = m->mtx_acqtime; 371 m->mtx_acqtime = 0; 372 if (now <= acqtime) 373 goto out; 374 for (p = m->mtx_filename; 375 p != NULL && strncmp(p, "../", 3) == 0; p += 3) 376 /* nothing */ ; 377 if (p == NULL || *p == '\0') 378 p = unknown; 379 for (hash = m->mtx_lineno, q = p; *q != '\0'; ++q) 380 hash = (hash * 2 + *q) % MPROF_HASH_SIZE; 381 mtx_lock_spin(&mprof_mtx); 382 for (mpp = mprof_hash[hash]; mpp != NULL; mpp = mpp->next) 383 if (mpp->line == m->mtx_lineno && 384 strcmp(mpp->file, p) == 0) 385 break; 386 if (mpp == NULL) { 387 /* Just exit if we cannot get a trace buffer */ 388 if (first_free_mprof_buf >= NUM_MPROF_BUFFERS) { 389 ++mutex_prof_rejected; 390 goto unlock; 391 } 392 mpp = &mprof_buf[first_free_mprof_buf++]; 393 mpp->name = mtx_name(m); 394 mpp->file = p; 395 mpp->line = m->mtx_lineno; 396 mpp->next = mprof_hash[hash]; 397 if (mprof_hash[hash] != NULL) 398 ++mutex_prof_collisions; 399 mprof_hash[hash] = mpp; 400 ++mutex_prof_records; 401 } 402 /* 403 * Record if the mutex has been held longer now than ever 404 * before. 405 */ 406 if (now - acqtime > mpp->cnt_max) 407 mpp->cnt_max = now - acqtime; 408 mpp->cnt_tot += now - acqtime; 409 mpp->cnt_cur++; 410unlock: 411 mtx_unlock_spin(&mprof_mtx); 412 } 413out: 414#endif 415 _rel_sleep_lock(m, curthread, opts, file, line); 416} 417 418void 419_mtx_lock_spin_flags(struct mtx *m, int opts, const char *file, int line) 420{ 421 422 MPASS(curthread != NULL); 423 KASSERT(m->mtx_object.lo_class == &lock_class_mtx_spin, 424 ("mtx_lock_spin() of sleep mutex %s @ %s:%d", 425 m->mtx_object.lo_name, file, line)); 426#if defined(SMP) || LOCK_DEBUG > 0 || 1 427 _get_spin_lock(m, curthread, opts, file, line); 428#else 429 critical_enter(); 430#endif 431 LOCK_LOG_LOCK("LOCK", &m->mtx_object, opts, m->mtx_recurse, file, 432 line); 433 WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line); 434} 435 436void 437_mtx_unlock_spin_flags(struct mtx *m, int opts, const char *file, int line) 438{ 439 440 MPASS(curthread != NULL); 441 KASSERT(m->mtx_object.lo_class == &lock_class_mtx_spin, 442 ("mtx_unlock_spin() of sleep mutex %s @ %s:%d", 443 m->mtx_object.lo_name, file, line)); 444 WITNESS_UNLOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line); 445 LOCK_LOG_LOCK("UNLOCK", &m->mtx_object, opts, m->mtx_recurse, file, 446 line); 447 mtx_assert(m, MA_OWNED); 448#if defined(SMP) || LOCK_DEBUG > 0 || 1 449 _rel_spin_lock(m); 450#else 451 critical_exit(); 452#endif 453} 454 455/* 456 * The important part of mtx_trylock{,_flags}() 457 * Tries to acquire lock `m.' We do NOT handle recursion here; we assume that 458 * if we're called, it's because we know we don't already own this lock. 459 */ 460int 461_mtx_trylock(struct mtx *m, int opts, const char *file, int line) 462{ 463 int rval; 464 465 MPASS(curthread != NULL); 466 467 KASSERT(!mtx_owned(m), 468 ("mtx_trylock() called on a mutex already owned")); 469 470 rval = _obtain_lock(m, curthread); 471 472 LOCK_LOG_TRY("LOCK", &m->mtx_object, opts, rval, file, line); 473 if (rval) 474 WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE | LOP_TRYLOCK, 475 file, line); 476 477 return (rval); 478} 479 480/* 481 * _mtx_lock_sleep: the tougher part of acquiring an MTX_DEF lock. 482 * 483 * We call this if the lock is either contested (i.e. we need to go to 484 * sleep waiting for it), or if we need to recurse on it. 485 */ 486void 487_mtx_lock_sleep(struct mtx *m, int opts, const char *file, int line) 488{ 489 struct thread *td = curthread; 490 struct thread *td1; 491#if defined(SMP) && defined(ADAPTIVE_MUTEXES) 492 struct thread *owner; 493#endif 494 uintptr_t v; 495#ifdef KTR 496 int cont_logged = 0; 497#endif 498 499 if (mtx_owned(m)) { 500 m->mtx_recurse++; 501 atomic_set_ptr(&m->mtx_lock, MTX_RECURSED); 502 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 503 CTR1(KTR_LOCK, "_mtx_lock_sleep: %p recursing", m); 504 return; 505 } 506 507 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 508 CTR4(KTR_LOCK, 509 "_mtx_lock_sleep: %s contested (lock=%p) at %s:%d", 510 m->mtx_object.lo_name, (void *)m->mtx_lock, file, line); 511 512 while (!_obtain_lock(m, td)) { 513 514 mtx_lock_spin(&sched_lock); 515 v = m->mtx_lock; 516 517 /* 518 * Check if the lock has been released while spinning for 519 * the sched_lock. 520 */ 521 if (v == MTX_UNOWNED) { 522 mtx_unlock_spin(&sched_lock); 523#ifdef __i386__ 524 ia32_pause(); 525#endif 526 continue; 527 } 528 529 /* 530 * The mutex was marked contested on release. This means that 531 * there are threads blocked on it. 532 */ 533 if (v == MTX_CONTESTED) { 534 td1 = TAILQ_FIRST(&m->mtx_blocked); 535 MPASS(td1 != NULL); 536 m->mtx_lock = (uintptr_t)td | MTX_CONTESTED; 537 538 if (td1->td_priority < td->td_priority) 539 td->td_priority = td1->td_priority; 540 mtx_unlock_spin(&sched_lock); 541 return; 542 } 543 544 /* 545 * If the mutex isn't already contested and a failure occurs 546 * setting the contested bit, the mutex was either released 547 * or the state of the MTX_RECURSED bit changed. 548 */ 549 if ((v & MTX_CONTESTED) == 0 && 550 !atomic_cmpset_ptr(&m->mtx_lock, (void *)v, 551 (void *)(v | MTX_CONTESTED))) { 552 mtx_unlock_spin(&sched_lock); 553#ifdef __i386__ 554 ia32_pause(); 555#endif 556 continue; 557 } 558 559#if defined(SMP) && defined(ADAPTIVE_MUTEXES) 560 /* 561 * If the current owner of the lock is executing on another 562 * CPU, spin instead of blocking. 563 */ 564 owner = (struct thread *)(v & MTX_FLAGMASK); 565 if (m != &Giant && thread_running(owner)) { 566 mtx_unlock_spin(&sched_lock); 567 while (mtx_owner(m) == owner && thread_running(owner)) { 568#ifdef __i386__ 569 ia32_pause(); 570#endif 571 } 572 continue; 573 } 574#endif /* SMP && ADAPTIVE_MUTEXES */ 575 576 /* 577 * We definitely must sleep for this lock. 578 */ 579 mtx_assert(m, MA_NOTOWNED); 580 581#ifdef notyet 582 /* 583 * If we're borrowing an interrupted thread's VM context, we 584 * must clean up before going to sleep. 585 */ 586 if (td->td_ithd != NULL) { 587 struct ithd *it = td->td_ithd; 588 589 if (it->it_interrupted) { 590 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 591 CTR2(KTR_LOCK, 592 "_mtx_lock_sleep: %p interrupted %p", 593 it, it->it_interrupted); 594 intr_thd_fixup(it); 595 } 596 } 597#endif 598 599 /* 600 * Put us on the list of threads blocked on this mutex. 601 */ 602 if (TAILQ_EMPTY(&m->mtx_blocked)) { 603 td1 = mtx_owner(m); 604 LIST_INSERT_HEAD(&td1->td_contested, m, mtx_contested); 605 TAILQ_INSERT_TAIL(&m->mtx_blocked, td, td_lockq); 606 } else { 607 TAILQ_FOREACH(td1, &m->mtx_blocked, td_lockq) 608 if (td1->td_priority > td->td_priority) 609 break; 610 if (td1) 611 TAILQ_INSERT_BEFORE(td1, td, td_lockq); 612 else 613 TAILQ_INSERT_TAIL(&m->mtx_blocked, td, td_lockq); 614 } 615#ifdef KTR 616 if (!cont_logged) { 617 CTR6(KTR_CONTENTION, 618 "contention: %p at %s:%d wants %s, taken by %s:%d", 619 td, file, line, m->mtx_object.lo_name, 620 WITNESS_FILE(&m->mtx_object), 621 WITNESS_LINE(&m->mtx_object)); 622 cont_logged = 1; 623 } 624#endif 625 626 /* 627 * Save who we're blocked on. 628 */ 629 td->td_blocked = m; 630 td->td_lockname = m->mtx_object.lo_name; 631 TD_SET_LOCK(td); 632 propagate_priority(td); 633 634 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 635 CTR3(KTR_LOCK, 636 "_mtx_lock_sleep: p %p blocked on [%p] %s", td, m, 637 m->mtx_object.lo_name); 638 639 td->td_proc->p_stats->p_ru.ru_nvcsw++; 640 mi_switch(); 641 642 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 643 CTR3(KTR_LOCK, 644 "_mtx_lock_sleep: p %p free from blocked on [%p] %s", 645 td, m, m->mtx_object.lo_name); 646 647 mtx_unlock_spin(&sched_lock); 648 } 649 650#ifdef KTR 651 if (cont_logged) { 652 CTR4(KTR_CONTENTION, 653 "contention end: %s acquired by %p at %s:%d", 654 m->mtx_object.lo_name, td, file, line); 655 } 656#endif 657 return; 658} 659 660/* 661 * _mtx_lock_spin: the tougher part of acquiring an MTX_SPIN lock. 662 * 663 * This is only called if we need to actually spin for the lock. Recursion 664 * is handled inline. 665 */ 666void 667_mtx_lock_spin(struct mtx *m, int opts, const char *file, int line) 668{ 669 int i = 0; 670 671 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 672 CTR1(KTR_LOCK, "_mtx_lock_spin: %p spinning", m); 673 674 for (;;) { 675 if (_obtain_lock(m, curthread)) 676 break; 677 678 /* Give interrupts a chance while we spin. */ 679 critical_exit(); 680 while (m->mtx_lock != MTX_UNOWNED) { 681 if (i++ < 10000000) { 682#ifdef __i386__ 683 ia32_pause(); 684#endif 685 continue; 686 } 687 if (i < 60000000) 688 DELAY(1); 689#ifdef DDB 690 else if (!db_active) 691#else 692 else 693#endif 694 panic("spin lock %s held by %p for > 5 seconds", 695 m->mtx_object.lo_name, (void *)m->mtx_lock); 696#ifdef __i386__ 697 ia32_pause(); 698#endif 699 } 700 critical_enter(); 701 } 702 703 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 704 CTR1(KTR_LOCK, "_mtx_lock_spin: %p spin done", m); 705 706 return; 707} 708 709/* 710 * _mtx_unlock_sleep: the tougher part of releasing an MTX_DEF lock. 711 * 712 * We are only called here if the lock is recursed or contested (i.e. we 713 * need to wake up a blocked thread). 714 */ 715void 716_mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line) 717{ 718 struct thread *td, *td1; 719 struct mtx *m1; 720 int pri; 721 722 td = curthread; 723 724 if (mtx_recursed(m)) { 725 if (--(m->mtx_recurse) == 0) 726 atomic_clear_ptr(&m->mtx_lock, MTX_RECURSED); 727 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 728 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p unrecurse", m); 729 return; 730 } 731 732 mtx_lock_spin(&sched_lock); 733 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 734 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p contested", m); 735 736 td1 = TAILQ_FIRST(&m->mtx_blocked); 737#if defined(SMP) && defined(ADAPTIVE_MUTEXES) 738 if (td1 == NULL) { 739 _release_lock_quick(m); 740 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 741 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p no sleepers", m); 742 mtx_unlock_spin(&sched_lock); 743 return; 744 } 745#endif 746 MPASS(td->td_proc->p_magic == P_MAGIC); 747 MPASS(td1->td_proc->p_magic == P_MAGIC); 748 749 TAILQ_REMOVE(&m->mtx_blocked, td1, td_lockq); 750 751 if (TAILQ_EMPTY(&m->mtx_blocked)) { 752 LIST_REMOVE(m, mtx_contested); 753 _release_lock_quick(m); 754 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 755 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p not held", m); 756 } else 757 atomic_store_rel_ptr(&m->mtx_lock, (void *)MTX_CONTESTED); 758 759 pri = PRI_MAX; 760 LIST_FOREACH(m1, &td->td_contested, mtx_contested) { 761 int cp = TAILQ_FIRST(&m1->mtx_blocked)->td_priority; 762 if (cp < pri) 763 pri = cp; 764 } 765 766 if (pri > td->td_base_pri) 767 pri = td->td_base_pri; 768 td->td_priority = pri; 769 770 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 771 CTR2(KTR_LOCK, "_mtx_unlock_sleep: %p contested setrunqueue %p", 772 m, td1); 773 774 td1->td_blocked = NULL; 775 TD_CLR_LOCK(td1); 776 if (!TD_CAN_RUN(td1)) { 777 mtx_unlock_spin(&sched_lock); 778 return; 779 } 780 setrunqueue(td1); 781 782 if (td->td_critnest == 1 && td1->td_priority < pri) { 783#ifdef notyet 784 if (td->td_ithd != NULL) { 785 struct ithd *it = td->td_ithd; 786 787 if (it->it_interrupted) { 788 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 789 CTR2(KTR_LOCK, 790 "_mtx_unlock_sleep: %p interrupted %p", 791 it, it->it_interrupted); 792 intr_thd_fixup(it); 793 } 794 } 795#endif 796 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 797 CTR2(KTR_LOCK, 798 "_mtx_unlock_sleep: %p switching out lock=%p", m, 799 (void *)m->mtx_lock); 800 801 td->td_proc->p_stats->p_ru.ru_nivcsw++; 802 mi_switch(); 803 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 804 CTR2(KTR_LOCK, "_mtx_unlock_sleep: %p resuming lock=%p", 805 m, (void *)m->mtx_lock); 806 } 807 808 mtx_unlock_spin(&sched_lock); 809 810 return; 811} 812 813/* 814 * All the unlocking of MTX_SPIN locks is done inline. 815 * See the _rel_spin_lock() macro for the details. 816 */ 817 818/* 819 * The backing function for the INVARIANTS-enabled mtx_assert() 820 */ 821#ifdef INVARIANT_SUPPORT 822void 823_mtx_assert(struct mtx *m, int what, const char *file, int line) 824{ 825 826 if (panicstr != NULL) 827 return; 828 switch (what) { 829 case MA_OWNED: 830 case MA_OWNED | MA_RECURSED: 831 case MA_OWNED | MA_NOTRECURSED: 832 if (!mtx_owned(m)) 833 panic("mutex %s not owned at %s:%d", 834 m->mtx_object.lo_name, file, line); 835 if (mtx_recursed(m)) { 836 if ((what & MA_NOTRECURSED) != 0) 837 panic("mutex %s recursed at %s:%d", 838 m->mtx_object.lo_name, file, line); 839 } else if ((what & MA_RECURSED) != 0) { 840 panic("mutex %s unrecursed at %s:%d", 841 m->mtx_object.lo_name, file, line); 842 } 843 break; 844 case MA_NOTOWNED: 845 if (mtx_owned(m)) 846 panic("mutex %s owned at %s:%d", 847 m->mtx_object.lo_name, file, line); 848 break; 849 default: 850 panic("unknown mtx_assert at %s:%d", file, line); 851 } 852} 853#endif 854 855/* 856 * The MUTEX_DEBUG-enabled mtx_validate() 857 * 858 * Most of these checks have been moved off into the LO_INITIALIZED flag 859 * maintained by the witness code. 860 */ 861#ifdef MUTEX_DEBUG 862 863void mtx_validate(struct mtx *); 864 865void 866mtx_validate(struct mtx *m) 867{ 868 869/* 870 * XXX: When kernacc() does not require Giant we can reenable this check 871 */ 872#ifdef notyet 873/* 874 * XXX - When kernacc() is fixed on the alpha to handle K0_SEG memory properly 875 * we can re-enable the kernacc() checks. 876 */ 877#ifndef __alpha__ 878 /* 879 * Can't call kernacc() from early init386(), especially when 880 * initializing Giant mutex, because some stuff in kernacc() 881 * requires Giant itself. 882 */ 883 if (!cold) 884 if (!kernacc((caddr_t)m, sizeof(m), 885 VM_PROT_READ | VM_PROT_WRITE)) 886 panic("Can't read and write to mutex %p", m); 887#endif 888#endif 889} 890#endif 891 892/* 893 * General init routine used by the MTX_SYSINIT() macro. 894 */ 895void 896mtx_sysinit(void *arg) 897{ 898 struct mtx_args *margs = arg; 899 900 mtx_init(margs->ma_mtx, margs->ma_desc, NULL, margs->ma_opts); 901} 902 903/* 904 * Mutex initialization routine; initialize lock `m' of type contained in 905 * `opts' with options contained in `opts' and name `name.' The optional 906 * lock type `type' is used as a general lock category name for use with 907 * witness. 908 */ 909void 910mtx_init(struct mtx *m, const char *name, const char *type, int opts) 911{ 912 struct lock_object *lock; 913 914 MPASS((opts & ~(MTX_SPIN | MTX_QUIET | MTX_RECURSE | 915 MTX_SLEEPABLE | MTX_NOWITNESS | MTX_DUPOK)) == 0); 916 917#ifdef MUTEX_DEBUG 918 /* Diagnostic and error correction */ 919 mtx_validate(m); 920#endif 921 922 lock = &m->mtx_object; 923 KASSERT((lock->lo_flags & LO_INITIALIZED) == 0, 924 ("mutex %s %p already initialized", name, m)); 925 bzero(m, sizeof(*m)); 926 if (opts & MTX_SPIN) 927 lock->lo_class = &lock_class_mtx_spin; 928 else 929 lock->lo_class = &lock_class_mtx_sleep; 930 lock->lo_name = name; 931 lock->lo_type = type != NULL ? type : name; 932 if (opts & MTX_QUIET) 933 lock->lo_flags = LO_QUIET; 934 if (opts & MTX_RECURSE) 935 lock->lo_flags |= LO_RECURSABLE; 936 if (opts & MTX_SLEEPABLE) 937 lock->lo_flags |= LO_SLEEPABLE; 938 if ((opts & MTX_NOWITNESS) == 0) 939 lock->lo_flags |= LO_WITNESS; 940 if (opts & MTX_DUPOK) 941 lock->lo_flags |= LO_DUPOK; 942 943 m->mtx_lock = MTX_UNOWNED; 944 TAILQ_INIT(&m->mtx_blocked); 945 946 LOCK_LOG_INIT(lock, opts); 947 948 WITNESS_INIT(lock); 949} 950 951/* 952 * Remove lock `m' from all_mtx queue. We don't allow MTX_QUIET to be 953 * passed in as a flag here because if the corresponding mtx_init() was 954 * called with MTX_QUIET set, then it will already be set in the mutex's 955 * flags. 956 */ 957void 958mtx_destroy(struct mtx *m) 959{ 960 961 LOCK_LOG_DESTROY(&m->mtx_object, 0); 962 963 if (!mtx_owned(m)) 964 MPASS(mtx_unowned(m)); 965 else { 966 MPASS((m->mtx_lock & (MTX_RECURSED|MTX_CONTESTED)) == 0); 967 968 /* Tell witness this isn't locked to make it happy. */ 969 WITNESS_UNLOCK(&m->mtx_object, LOP_EXCLUSIVE, __FILE__, 970 __LINE__); 971 } 972 973 WITNESS_DESTROY(&m->mtx_object); 974} 975 976/* 977 * Intialize the mutex code and system mutexes. This is called from the MD 978 * startup code prior to mi_startup(). The per-CPU data space needs to be 979 * setup before this is called. 980 */ 981void 982mutex_init(void) 983{ 984 985 /* Setup thread0 so that mutexes work. */ 986 LIST_INIT(&thread0.td_contested); 987 988 /* 989 * Initialize mutexes. 990 */ 991 mtx_init(&Giant, "Giant", NULL, MTX_DEF | MTX_RECURSE); 992 mtx_init(&sched_lock, "sched lock", NULL, MTX_SPIN | MTX_RECURSE); 993 mtx_init(&proc0.p_mtx, "process lock", NULL, MTX_DEF | MTX_DUPOK); 994 mtx_lock(&Giant); 995} 996 997/* 998 * Encapsulated Giant mutex routines. These routines provide encapsulation 999 * control for the Giant mutex, allowing sysctls to be used to turn on and 1000 * off Giant around certain subsystems. The default value for the sysctls 1001 * are set to what developers believe is stable and working in regards to 1002 * the Giant pushdown. Developers should not turn off Giant via these 1003 * sysctls unless they know what they are doing. 1004 * 1005 * Callers of mtx_lock_giant() are expected to pass the return value to an 1006 * accompanying mtx_unlock_giant() later on. If multiple subsystems are 1007 * effected by a Giant wrap, all related sysctl variables must be zero for 1008 * the subsystem call to operate without Giant (as determined by the caller). 1009 */ 1010 1011SYSCTL_NODE(_kern, OID_AUTO, giant, CTLFLAG_RD, NULL, "Giant mutex manipulation"); 1012 1013static int kern_giant_all = 0; 1014SYSCTL_INT(_kern_giant, OID_AUTO, all, CTLFLAG_RW, &kern_giant_all, 0, ""); 1015 1016int kern_giant_proc = 1; /* Giant around PROC locks */ 1017int kern_giant_file = 1; /* Giant around struct file & filedesc */ 1018int kern_giant_ucred = 1; /* Giant around ucred */ 1019SYSCTL_INT(_kern_giant, OID_AUTO, proc, CTLFLAG_RW, &kern_giant_proc, 0, ""); 1020SYSCTL_INT(_kern_giant, OID_AUTO, file, CTLFLAG_RW, &kern_giant_file, 0, ""); 1021SYSCTL_INT(_kern_giant, OID_AUTO, ucred, CTLFLAG_RW, &kern_giant_ucred, 0, ""); 1022 1023int 1024mtx_lock_giant(int sysctlvar) 1025{ 1026 if (sysctlvar || kern_giant_all) { 1027 mtx_lock(&Giant); 1028 return(1); 1029 } 1030 return(0); 1031} 1032 1033void 1034mtx_unlock_giant(int s) 1035{ 1036 if (s) 1037 mtx_unlock(&Giant); 1038} 1039