kern_mutex.c revision 105644
1/*- 2 * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 3. Berkeley Software Design Inc's name may not be used to endorse or 13 * promote products derived from this software without specific prior 14 * written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 * from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $ 29 * and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $ 30 * $FreeBSD: head/sys/kern/kern_mutex.c 105644 2002-10-21 18:48:28Z des $ 31 */ 32 33/* 34 * Machine independent bits of mutex implementation. 35 */ 36 37#include "opt_adaptive_mutexes.h" 38#include "opt_ddb.h" 39 40#include <sys/param.h> 41#include <sys/systm.h> 42#include <sys/bus.h> 43#include <sys/kernel.h> 44#include <sys/ktr.h> 45#include <sys/lock.h> 46#include <sys/malloc.h> 47#include <sys/mutex.h> 48#include <sys/proc.h> 49#include <sys/resourcevar.h> 50#include <sys/sched.h> 51#include <sys/sbuf.h> 52#include <sys/stdint.h> 53#include <sys/sysctl.h> 54#include <sys/vmmeter.h> 55 56#include <machine/atomic.h> 57#include <machine/bus.h> 58#include <machine/clock.h> 59#include <machine/cpu.h> 60 61#include <ddb/ddb.h> 62 63#include <vm/vm.h> 64#include <vm/vm_extern.h> 65 66/* 67 * Internal utility macros. 68 */ 69#define mtx_unowned(m) ((m)->mtx_lock == MTX_UNOWNED) 70 71#define mtx_owner(m) (mtx_unowned((m)) ? NULL \ 72 : (struct thread *)((m)->mtx_lock & MTX_FLAGMASK)) 73 74/* XXXKSE This test will change. */ 75#define thread_running(td) \ 76 ((td)->td_kse != NULL && (td)->td_kse->ke_oncpu != NOCPU) 77 78/* 79 * Lock classes for sleep and spin mutexes. 80 */ 81struct lock_class lock_class_mtx_sleep = { 82 "sleep mutex", 83 LC_SLEEPLOCK | LC_RECURSABLE 84}; 85struct lock_class lock_class_mtx_spin = { 86 "spin mutex", 87 LC_SPINLOCK | LC_RECURSABLE 88}; 89 90/* 91 * System-wide mutexes 92 */ 93struct mtx sched_lock; 94struct mtx Giant; 95 96/* 97 * Prototypes for non-exported routines. 98 */ 99static void propagate_priority(struct thread *); 100 101static void 102propagate_priority(struct thread *td) 103{ 104 int pri = td->td_priority; 105 struct mtx *m = td->td_blocked; 106 107 mtx_assert(&sched_lock, MA_OWNED); 108 for (;;) { 109 struct thread *td1; 110 111 td = mtx_owner(m); 112 113 if (td == NULL) { 114 /* 115 * This really isn't quite right. Really 116 * ought to bump priority of thread that 117 * next acquires the mutex. 118 */ 119 MPASS(m->mtx_lock == MTX_CONTESTED); 120 return; 121 } 122 123 MPASS(td->td_proc != NULL); 124 MPASS(td->td_proc->p_magic == P_MAGIC); 125 KASSERT(!TD_IS_SLEEPING(td), ("sleeping thread owns a mutex")); 126 if (td->td_priority <= pri) /* lower is higher priority */ 127 return; 128 129 130 /* 131 * If lock holder is actually running, just bump priority. 132 */ 133 if (TD_IS_RUNNING(td)) { 134 td->td_priority = pri; 135 return; 136 } 137 138#ifndef SMP 139 /* 140 * For UP, we check to see if td is curthread (this shouldn't 141 * ever happen however as it would mean we are in a deadlock.) 142 */ 143 KASSERT(td != curthread, ("Deadlock detected")); 144#endif 145 146 /* 147 * If on run queue move to new run queue, and quit. 148 * XXXKSE this gets a lot more complicated under threads 149 * but try anyhow. 150 */ 151 if (TD_ON_RUNQ(td)) { 152 MPASS(td->td_blocked == NULL); 153 sched_prio(td, pri); 154 return; 155 } 156 /* 157 * Adjust for any other cases. 158 */ 159 td->td_priority = pri; 160 161 /* 162 * If we aren't blocked on a mutex, we should be. 163 */ 164 KASSERT(TD_ON_LOCK(td), ( 165 "process %d(%s):%d holds %s but isn't blocked on a mutex\n", 166 td->td_proc->p_pid, td->td_proc->p_comm, td->td_state, 167 m->mtx_object.lo_name)); 168 169 /* 170 * Pick up the mutex that td is blocked on. 171 */ 172 m = td->td_blocked; 173 MPASS(m != NULL); 174 175 /* 176 * Check if the thread needs to be moved up on 177 * the blocked chain 178 */ 179 if (td == TAILQ_FIRST(&m->mtx_blocked)) { 180 continue; 181 } 182 183 td1 = TAILQ_PREV(td, threadqueue, td_lockq); 184 if (td1->td_priority <= pri) { 185 continue; 186 } 187 188 /* 189 * Remove thread from blocked chain and determine where 190 * it should be moved up to. Since we know that td1 has 191 * a lower priority than td, we know that at least one 192 * thread in the chain has a lower priority and that 193 * td1 will thus not be NULL after the loop. 194 */ 195 TAILQ_REMOVE(&m->mtx_blocked, td, td_lockq); 196 TAILQ_FOREACH(td1, &m->mtx_blocked, td_lockq) { 197 MPASS(td1->td_proc->p_magic == P_MAGIC); 198 if (td1->td_priority > pri) 199 break; 200 } 201 202 MPASS(td1 != NULL); 203 TAILQ_INSERT_BEFORE(td1, td, td_lockq); 204 CTR4(KTR_LOCK, 205 "propagate_priority: p %p moved before %p on [%p] %s", 206 td, td1, m, m->mtx_object.lo_name); 207 } 208} 209 210#ifdef MUTEX_PROFILING 211SYSCTL_NODE(_debug, OID_AUTO, mutex, CTLFLAG_RD, NULL, "mutex debugging"); 212SYSCTL_NODE(_debug_mutex, OID_AUTO, prof, CTLFLAG_RD, NULL, "mutex profiling"); 213static int mutex_prof_enable = 0; 214SYSCTL_INT(_debug_mutex_prof, OID_AUTO, enable, CTLFLAG_RW, 215 &mutex_prof_enable, 0, "Enable tracing of mutex holdtime"); 216 217struct mutex_prof { 218 const char *name; 219 const char *file; 220 int line; 221 /* 222 * XXX should use specialized struct members instead of an array 223 * and these silly #defines. 224 */ 225#define MPROF_MAX 0 226#define MPROF_TOT 1 227#define MPROF_CNT 2 228 uintmax_t counter[3]; 229 struct mutex_prof *next; 230}; 231 232/* 233 * mprof_buf is a static pool of profiling records to avoid possible 234 * reentrance of the memory allocation functions. 235 * 236 * Note: NUM_MPROF_BUFFERS must be smaller than MPROF_HASH_SIZE. 237 */ 238#define NUM_MPROF_BUFFERS 1000 239static struct mutex_prof mprof_buf[NUM_MPROF_BUFFERS]; 240static int first_free_mprof_buf; 241#define MPROF_HASH_SIZE 1009 242static struct mutex_prof *mprof_hash[MPROF_HASH_SIZE]; 243 244static int mutex_prof_acquisitions; 245SYSCTL_INT(_debug_mutex_prof, OID_AUTO, acquisitions, CTLFLAG_RD, 246 &mutex_prof_acquisitions, 0, "Number of mutex acquistions recorded"); 247static int mutex_prof_records; 248SYSCTL_INT(_debug_mutex_prof, OID_AUTO, records, CTLFLAG_RD, 249 &mutex_prof_records, 0, "Number of profiling records"); 250static int mutex_prof_maxrecords = NUM_MPROF_BUFFERS; 251SYSCTL_INT(_debug_mutex_prof, OID_AUTO, maxrecords, CTLFLAG_RD, 252 &mutex_prof_maxrecords, 0, "Maximum number of profiling records"); 253static int mutex_prof_rejected; 254SYSCTL_INT(_debug_mutex_prof, OID_AUTO, rejected, CTLFLAG_RD, 255 &mutex_prof_rejected, 0, "Number of rejected profiling records"); 256static int mutex_prof_hashsize = MPROF_HASH_SIZE; 257SYSCTL_INT(_debug_mutex_prof, OID_AUTO, hashsize, CTLFLAG_RD, 258 &mutex_prof_hashsize, 0, "Hash size"); 259static int mutex_prof_collisions = 0; 260SYSCTL_INT(_debug_mutex_prof, OID_AUTO, collisions, CTLFLAG_RD, 261 &mutex_prof_collisions, 0, "Number of hash collisions"); 262 263/* 264 * mprof_mtx protects the profiling buffers and the hash. 265 */ 266static struct mtx mprof_mtx; 267MTX_SYSINIT(mprof, &mprof_mtx, "mutex profiling lock", MTX_SPIN | MTX_QUIET); 268 269static u_int64_t 270nanoseconds(void) 271{ 272 struct timespec tv; 273 274 nanotime(&tv); 275 return (tv.tv_sec * (u_int64_t)1000000000 + tv.tv_nsec); 276} 277 278static int 279dump_mutex_prof_stats(SYSCTL_HANDLER_ARGS) 280{ 281 struct sbuf *sb; 282 int error, i; 283 284 if (first_free_mprof_buf == 0) 285 return (SYSCTL_OUT(req, "No locking recorded", 286 sizeof("No locking recorded"))); 287 288 sb = sbuf_new(NULL, NULL, 1024, SBUF_AUTOEXTEND); 289 sbuf_printf(sb, "%6s %12s %11s %5s %s\n", 290 "max", "total", "count", "avg", "name"); 291 /* 292 * XXX this spinlock seems to be by far the largest perpetrator 293 * of spinlock latency (1.6 msec on an Athlon1600 was recorded 294 * even before I pessimized it further by moving the average 295 * computation here). 296 */ 297 mtx_lock_spin(&mprof_mtx); 298 for (i = 0; i < first_free_mprof_buf; ++i) 299 sbuf_printf(sb, "%6ju %12ju %11ju %5ju %s:%d (%s)\n", 300 mprof_buf[i].counter[MPROF_MAX] / 1000, 301 mprof_buf[i].counter[MPROF_TOT] / 1000, 302 mprof_buf[i].counter[MPROF_CNT], 303 mprof_buf[i].counter[MPROF_CNT] == 0 ? (uintmax_t)0 : 304 mprof_buf[i].counter[MPROF_TOT] / 305 (mprof_buf[i].counter[MPROF_CNT] * 1000), 306 mprof_buf[i].file, mprof_buf[i].line, mprof_buf[i].name); 307 mtx_unlock_spin(&mprof_mtx); 308 sbuf_finish(sb); 309 error = SYSCTL_OUT(req, sbuf_data(sb), sbuf_len(sb) + 1); 310 sbuf_delete(sb); 311 return (error); 312} 313SYSCTL_PROC(_debug_mutex_prof, OID_AUTO, stats, CTLTYPE_STRING | CTLFLAG_RD, 314 NULL, 0, dump_mutex_prof_stats, "A", "Mutex profiling statistics"); 315#endif 316 317/* 318 * Function versions of the inlined __mtx_* macros. These are used by 319 * modules and can also be called from assembly language if needed. 320 */ 321void 322_mtx_lock_flags(struct mtx *m, int opts, const char *file, int line) 323{ 324 325 MPASS(curthread != NULL); 326 KASSERT(m->mtx_object.lo_class == &lock_class_mtx_sleep, 327 ("mtx_lock() of spin mutex %s @ %s:%d", m->mtx_object.lo_name, 328 file, line)); 329 _get_sleep_lock(m, curthread, opts, file, line); 330 LOCK_LOG_LOCK("LOCK", &m->mtx_object, opts, m->mtx_recurse, file, 331 line); 332 WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line); 333#ifdef MUTEX_PROFILING 334 /* don't reset the timer when/if recursing */ 335 if (m->mtx_acqtime == 0) { 336 m->mtx_filename = file; 337 m->mtx_lineno = line; 338 m->mtx_acqtime = mutex_prof_enable ? nanoseconds() : 0; 339 ++mutex_prof_acquisitions; 340 } 341#endif 342} 343 344void 345_mtx_unlock_flags(struct mtx *m, int opts, const char *file, int line) 346{ 347 348 MPASS(curthread != NULL); 349 KASSERT(m->mtx_object.lo_class == &lock_class_mtx_sleep, 350 ("mtx_unlock() of spin mutex %s @ %s:%d", m->mtx_object.lo_name, 351 file, line)); 352 WITNESS_UNLOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line); 353 LOCK_LOG_LOCK("UNLOCK", &m->mtx_object, opts, m->mtx_recurse, file, 354 line); 355 mtx_assert(m, MA_OWNED); 356#ifdef MUTEX_PROFILING 357 if (m->mtx_acqtime != 0) { 358 static const char *unknown = "(unknown)"; 359 struct mutex_prof *mpp; 360 u_int64_t acqtime, now; 361 const char *p, *q; 362 volatile u_int hash; 363 364 now = nanoseconds(); 365 acqtime = m->mtx_acqtime; 366 m->mtx_acqtime = 0; 367 if (now <= acqtime) 368 goto out; 369 for (p = m->mtx_filename; strncmp(p, "../", 3) == 0; p += 3) 370 /* nothing */ ; 371 if (p == NULL || *p == '\0') 372 p = unknown; 373 for (hash = m->mtx_lineno, q = p; *q != '\0'; ++q) 374 hash = (hash * 2 + *q) % MPROF_HASH_SIZE; 375 mtx_lock_spin(&mprof_mtx); 376 for (mpp = mprof_hash[hash]; mpp != NULL; mpp = mpp->next) 377 if (mpp->line == m->mtx_lineno && 378 strcmp(mpp->file, p) == 0) 379 break; 380 if (mpp == NULL) { 381 /* Just exit if we cannot get a trace buffer */ 382 if (first_free_mprof_buf >= NUM_MPROF_BUFFERS) { 383 ++mutex_prof_rejected; 384 goto unlock; 385 } 386 mpp = &mprof_buf[first_free_mprof_buf++]; 387 mpp->name = mtx_name(m); 388 mpp->file = p; 389 mpp->line = m->mtx_lineno; 390 mpp->next = mprof_hash[hash]; 391 if (mprof_hash[hash] != NULL) 392 ++mutex_prof_collisions; 393 mprof_hash[hash] = mpp; 394 ++mutex_prof_records; 395 } 396 /* 397 * Record if the mutex has been held longer now than ever 398 * before. 399 */ 400 if (now - acqtime > mpp->counter[MPROF_MAX]) 401 mpp->counter[MPROF_MAX] = now - acqtime; 402 mpp->counter[MPROF_TOT] += now - acqtime; 403 mpp->counter[MPROF_CNT]++; 404unlock: 405 mtx_unlock_spin(&mprof_mtx); 406 } 407out: 408#endif 409 _rel_sleep_lock(m, curthread, opts, file, line); 410} 411 412void 413_mtx_lock_spin_flags(struct mtx *m, int opts, const char *file, int line) 414{ 415 416 MPASS(curthread != NULL); 417 KASSERT(m->mtx_object.lo_class == &lock_class_mtx_spin, 418 ("mtx_lock_spin() of sleep mutex %s @ %s:%d", 419 m->mtx_object.lo_name, file, line)); 420#if defined(SMP) || LOCK_DEBUG > 0 || 1 421 _get_spin_lock(m, curthread, opts, file, line); 422#else 423 critical_enter(); 424#endif 425 LOCK_LOG_LOCK("LOCK", &m->mtx_object, opts, m->mtx_recurse, file, 426 line); 427 WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line); 428} 429 430void 431_mtx_unlock_spin_flags(struct mtx *m, int opts, const char *file, int line) 432{ 433 434 MPASS(curthread != NULL); 435 KASSERT(m->mtx_object.lo_class == &lock_class_mtx_spin, 436 ("mtx_unlock_spin() of sleep mutex %s @ %s:%d", 437 m->mtx_object.lo_name, file, line)); 438 WITNESS_UNLOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line); 439 LOCK_LOG_LOCK("UNLOCK", &m->mtx_object, opts, m->mtx_recurse, file, 440 line); 441 mtx_assert(m, MA_OWNED); 442#if defined(SMP) || LOCK_DEBUG > 0 || 1 443 _rel_spin_lock(m); 444#else 445 critical_exit(); 446#endif 447} 448 449/* 450 * The important part of mtx_trylock{,_flags}() 451 * Tries to acquire lock `m.' We do NOT handle recursion here; we assume that 452 * if we're called, it's because we know we don't already own this lock. 453 */ 454int 455_mtx_trylock(struct mtx *m, int opts, const char *file, int line) 456{ 457 int rval; 458 459 MPASS(curthread != NULL); 460 461 rval = _obtain_lock(m, curthread); 462 463 LOCK_LOG_TRY("LOCK", &m->mtx_object, opts, rval, file, line); 464 if (rval) { 465 /* 466 * We do not handle recursion in _mtx_trylock; see the 467 * note at the top of the routine. 468 */ 469 KASSERT(!mtx_recursed(m), 470 ("mtx_trylock() called on a recursed mutex")); 471 WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE | LOP_TRYLOCK, 472 file, line); 473 } 474 475 return (rval); 476} 477 478/* 479 * _mtx_lock_sleep: the tougher part of acquiring an MTX_DEF lock. 480 * 481 * We call this if the lock is either contested (i.e. we need to go to 482 * sleep waiting for it), or if we need to recurse on it. 483 */ 484void 485_mtx_lock_sleep(struct mtx *m, int opts, const char *file, int line) 486{ 487 struct thread *td = curthread; 488#if defined(SMP) && defined(ADAPTIVE_MUTEXES) 489 struct thread *owner; 490#endif 491#ifdef KTR 492 int cont_logged = 0; 493#endif 494 495 if ((m->mtx_lock & MTX_FLAGMASK) == (uintptr_t)td) { 496 m->mtx_recurse++; 497 atomic_set_ptr(&m->mtx_lock, MTX_RECURSED); 498 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 499 CTR1(KTR_LOCK, "_mtx_lock_sleep: %p recursing", m); 500 return; 501 } 502 503 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 504 CTR4(KTR_LOCK, 505 "_mtx_lock_sleep: %s contested (lock=%p) at %s:%d", 506 m->mtx_object.lo_name, (void *)m->mtx_lock, file, line); 507 508 while (!_obtain_lock(m, td)) { 509 uintptr_t v; 510 struct thread *td1; 511 512 mtx_lock_spin(&sched_lock); 513 /* 514 * Check if the lock has been released while spinning for 515 * the sched_lock. 516 */ 517 if ((v = m->mtx_lock) == MTX_UNOWNED) { 518 mtx_unlock_spin(&sched_lock); 519#ifdef __i386__ 520 ia32_pause(); 521#endif 522 continue; 523 } 524 525 /* 526 * The mutex was marked contested on release. This means that 527 * there are threads blocked on it. 528 */ 529 if (v == MTX_CONTESTED) { 530 td1 = TAILQ_FIRST(&m->mtx_blocked); 531 MPASS(td1 != NULL); 532 m->mtx_lock = (uintptr_t)td | MTX_CONTESTED; 533 534 if (td1->td_priority < td->td_priority) 535 td->td_priority = td1->td_priority; 536 mtx_unlock_spin(&sched_lock); 537 return; 538 } 539 540 /* 541 * If the mutex isn't already contested and a failure occurs 542 * setting the contested bit, the mutex was either released 543 * or the state of the MTX_RECURSED bit changed. 544 */ 545 if ((v & MTX_CONTESTED) == 0 && 546 !atomic_cmpset_ptr(&m->mtx_lock, (void *)v, 547 (void *)(v | MTX_CONTESTED))) { 548 mtx_unlock_spin(&sched_lock); 549#ifdef __i386__ 550 ia32_pause(); 551#endif 552 continue; 553 } 554 555#if defined(SMP) && defined(ADAPTIVE_MUTEXES) 556 /* 557 * If the current owner of the lock is executing on another 558 * CPU, spin instead of blocking. 559 */ 560 owner = (struct thread *)(v & MTX_FLAGMASK); 561 if (m != &Giant && thread_running(owner)) { 562 mtx_unlock_spin(&sched_lock); 563 while (mtx_owner(m) == owner && thread_running(owner)) { 564#ifdef __i386__ 565 ia32_pause(); 566#endif 567 } 568 continue; 569 } 570#endif /* SMP && ADAPTIVE_MUTEXES */ 571 572 /* 573 * We definitely must sleep for this lock. 574 */ 575 mtx_assert(m, MA_NOTOWNED); 576 577#ifdef notyet 578 /* 579 * If we're borrowing an interrupted thread's VM context, we 580 * must clean up before going to sleep. 581 */ 582 if (td->td_ithd != NULL) { 583 struct ithd *it = td->td_ithd; 584 585 if (it->it_interrupted) { 586 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 587 CTR2(KTR_LOCK, 588 "_mtx_lock_sleep: %p interrupted %p", 589 it, it->it_interrupted); 590 intr_thd_fixup(it); 591 } 592 } 593#endif 594 595 /* 596 * Put us on the list of threads blocked on this mutex. 597 */ 598 if (TAILQ_EMPTY(&m->mtx_blocked)) { 599 td1 = mtx_owner(m); 600 LIST_INSERT_HEAD(&td1->td_contested, m, mtx_contested); 601 TAILQ_INSERT_TAIL(&m->mtx_blocked, td, td_lockq); 602 } else { 603 TAILQ_FOREACH(td1, &m->mtx_blocked, td_lockq) 604 if (td1->td_priority > td->td_priority) 605 break; 606 if (td1) 607 TAILQ_INSERT_BEFORE(td1, td, td_lockq); 608 else 609 TAILQ_INSERT_TAIL(&m->mtx_blocked, td, td_lockq); 610 } 611#ifdef KTR 612 if (!cont_logged) { 613 CTR6(KTR_CONTENTION, 614 "contention: %p at %s:%d wants %s, taken by %s:%d", 615 td, file, line, m->mtx_object.lo_name, 616 WITNESS_FILE(&m->mtx_object), 617 WITNESS_LINE(&m->mtx_object)); 618 cont_logged = 1; 619 } 620#endif 621 622 /* 623 * Save who we're blocked on. 624 */ 625 td->td_blocked = m; 626 td->td_lockname = m->mtx_object.lo_name; 627 TD_SET_LOCK(td); 628 propagate_priority(td); 629 630 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 631 CTR3(KTR_LOCK, 632 "_mtx_lock_sleep: p %p blocked on [%p] %s", td, m, 633 m->mtx_object.lo_name); 634 635 td->td_proc->p_stats->p_ru.ru_nvcsw++; 636 mi_switch(); 637 638 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 639 CTR3(KTR_LOCK, 640 "_mtx_lock_sleep: p %p free from blocked on [%p] %s", 641 td, m, m->mtx_object.lo_name); 642 643 mtx_unlock_spin(&sched_lock); 644 } 645 646#ifdef KTR 647 if (cont_logged) { 648 CTR4(KTR_CONTENTION, 649 "contention end: %s acquired by %p at %s:%d", 650 m->mtx_object.lo_name, td, file, line); 651 } 652#endif 653 return; 654} 655 656/* 657 * _mtx_lock_spin: the tougher part of acquiring an MTX_SPIN lock. 658 * 659 * This is only called if we need to actually spin for the lock. Recursion 660 * is handled inline. 661 */ 662void 663_mtx_lock_spin(struct mtx *m, int opts, const char *file, int line) 664{ 665 int i = 0; 666 667 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 668 CTR1(KTR_LOCK, "_mtx_lock_spin: %p spinning", m); 669 670 for (;;) { 671 if (_obtain_lock(m, curthread)) 672 break; 673 674 /* Give interrupts a chance while we spin. */ 675 critical_exit(); 676 while (m->mtx_lock != MTX_UNOWNED) { 677 if (i++ < 10000000) { 678#ifdef __i386__ 679 ia32_pause(); 680#endif 681 continue; 682 } 683 if (i < 60000000) 684 DELAY(1); 685#ifdef DDB 686 else if (!db_active) 687#else 688 else 689#endif 690 panic("spin lock %s held by %p for > 5 seconds", 691 m->mtx_object.lo_name, (void *)m->mtx_lock); 692#ifdef __i386__ 693 ia32_pause(); 694#endif 695 } 696 critical_enter(); 697 } 698 699 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 700 CTR1(KTR_LOCK, "_mtx_lock_spin: %p spin done", m); 701 702 return; 703} 704 705/* 706 * _mtx_unlock_sleep: the tougher part of releasing an MTX_DEF lock. 707 * 708 * We are only called here if the lock is recursed or contested (i.e. we 709 * need to wake up a blocked thread). 710 */ 711void 712_mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line) 713{ 714 struct thread *td, *td1; 715 struct mtx *m1; 716 int pri; 717 718 td = curthread; 719 720 if (mtx_recursed(m)) { 721 if (--(m->mtx_recurse) == 0) 722 atomic_clear_ptr(&m->mtx_lock, MTX_RECURSED); 723 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 724 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p unrecurse", m); 725 return; 726 } 727 728 mtx_lock_spin(&sched_lock); 729 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 730 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p contested", m); 731 732 td1 = TAILQ_FIRST(&m->mtx_blocked); 733#if defined(SMP) && defined(ADAPTIVE_MUTEXES) 734 if (td1 == NULL) { 735 _release_lock_quick(m); 736 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 737 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p no sleepers", m); 738 mtx_unlock_spin(&sched_lock); 739 return; 740 } 741#endif 742 MPASS(td->td_proc->p_magic == P_MAGIC); 743 MPASS(td1->td_proc->p_magic == P_MAGIC); 744 745 TAILQ_REMOVE(&m->mtx_blocked, td1, td_lockq); 746 747 if (TAILQ_EMPTY(&m->mtx_blocked)) { 748 LIST_REMOVE(m, mtx_contested); 749 _release_lock_quick(m); 750 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 751 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p not held", m); 752 } else 753 atomic_store_rel_ptr(&m->mtx_lock, (void *)MTX_CONTESTED); 754 755 pri = PRI_MAX; 756 LIST_FOREACH(m1, &td->td_contested, mtx_contested) { 757 int cp = TAILQ_FIRST(&m1->mtx_blocked)->td_priority; 758 if (cp < pri) 759 pri = cp; 760 } 761 762 if (pri > td->td_base_pri) 763 pri = td->td_base_pri; 764 td->td_priority = pri; 765 766 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 767 CTR2(KTR_LOCK, "_mtx_unlock_sleep: %p contested setrunqueue %p", 768 m, td1); 769 770 td1->td_blocked = NULL; 771 TD_CLR_LOCK(td1); 772 if (!TD_CAN_RUN(td1)) { 773 mtx_unlock_spin(&sched_lock); 774 return; 775 } 776 setrunqueue(td1); 777 778 if (td->td_critnest == 1 && td1->td_priority < pri) { 779#ifdef notyet 780 if (td->td_ithd != NULL) { 781 struct ithd *it = td->td_ithd; 782 783 if (it->it_interrupted) { 784 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 785 CTR2(KTR_LOCK, 786 "_mtx_unlock_sleep: %p interrupted %p", 787 it, it->it_interrupted); 788 intr_thd_fixup(it); 789 } 790 } 791#endif 792 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 793 CTR2(KTR_LOCK, 794 "_mtx_unlock_sleep: %p switching out lock=%p", m, 795 (void *)m->mtx_lock); 796 797 td->td_proc->p_stats->p_ru.ru_nivcsw++; 798 mi_switch(); 799 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 800 CTR2(KTR_LOCK, "_mtx_unlock_sleep: %p resuming lock=%p", 801 m, (void *)m->mtx_lock); 802 } 803 804 mtx_unlock_spin(&sched_lock); 805 806 return; 807} 808 809/* 810 * All the unlocking of MTX_SPIN locks is done inline. 811 * See the _rel_spin_lock() macro for the details. 812 */ 813 814/* 815 * The backing function for the INVARIANTS-enabled mtx_assert() 816 */ 817#ifdef INVARIANT_SUPPORT 818void 819_mtx_assert(struct mtx *m, int what, const char *file, int line) 820{ 821 822 if (panicstr != NULL) 823 return; 824 switch (what) { 825 case MA_OWNED: 826 case MA_OWNED | MA_RECURSED: 827 case MA_OWNED | MA_NOTRECURSED: 828 if (!mtx_owned(m)) 829 panic("mutex %s not owned at %s:%d", 830 m->mtx_object.lo_name, file, line); 831 if (mtx_recursed(m)) { 832 if ((what & MA_NOTRECURSED) != 0) 833 panic("mutex %s recursed at %s:%d", 834 m->mtx_object.lo_name, file, line); 835 } else if ((what & MA_RECURSED) != 0) { 836 panic("mutex %s unrecursed at %s:%d", 837 m->mtx_object.lo_name, file, line); 838 } 839 break; 840 case MA_NOTOWNED: 841 if (mtx_owned(m)) 842 panic("mutex %s owned at %s:%d", 843 m->mtx_object.lo_name, file, line); 844 break; 845 default: 846 panic("unknown mtx_assert at %s:%d", file, line); 847 } 848} 849#endif 850 851/* 852 * The MUTEX_DEBUG-enabled mtx_validate() 853 * 854 * Most of these checks have been moved off into the LO_INITIALIZED flag 855 * maintained by the witness code. 856 */ 857#ifdef MUTEX_DEBUG 858 859void mtx_validate(struct mtx *); 860 861void 862mtx_validate(struct mtx *m) 863{ 864 865/* 866 * XXX - When kernacc() is fixed on the alpha to handle K0_SEG memory properly 867 * we can re-enable the kernacc() checks. 868 */ 869#ifndef __alpha__ 870 /* 871 * Can't call kernacc() from early init386(), especially when 872 * initializing Giant mutex, because some stuff in kernacc() 873 * requires Giant itself. 874 */ 875 if (!cold) 876 if (!kernacc((caddr_t)m, sizeof(m), 877 VM_PROT_READ | VM_PROT_WRITE)) 878 panic("Can't read and write to mutex %p", m); 879#endif 880} 881#endif 882 883/* 884 * General init routine used by the MTX_SYSINIT() macro. 885 */ 886void 887mtx_sysinit(void *arg) 888{ 889 struct mtx_args *margs = arg; 890 891 mtx_init(margs->ma_mtx, margs->ma_desc, NULL, margs->ma_opts); 892} 893 894/* 895 * Mutex initialization routine; initialize lock `m' of type contained in 896 * `opts' with options contained in `opts' and name `name.' The optional 897 * lock type `type' is used as a general lock category name for use with 898 * witness. 899 */ 900void 901mtx_init(struct mtx *m, const char *name, const char *type, int opts) 902{ 903 struct lock_object *lock; 904 905 MPASS((opts & ~(MTX_SPIN | MTX_QUIET | MTX_RECURSE | 906 MTX_SLEEPABLE | MTX_NOWITNESS | MTX_DUPOK)) == 0); 907 908#ifdef MUTEX_DEBUG 909 /* Diagnostic and error correction */ 910 mtx_validate(m); 911#endif 912 913 lock = &m->mtx_object; 914 KASSERT((lock->lo_flags & LO_INITIALIZED) == 0, 915 ("mutex %s %p already initialized", name, m)); 916 bzero(m, sizeof(*m)); 917 if (opts & MTX_SPIN) 918 lock->lo_class = &lock_class_mtx_spin; 919 else 920 lock->lo_class = &lock_class_mtx_sleep; 921 lock->lo_name = name; 922 lock->lo_type = type != NULL ? type : name; 923 if (opts & MTX_QUIET) 924 lock->lo_flags = LO_QUIET; 925 if (opts & MTX_RECURSE) 926 lock->lo_flags |= LO_RECURSABLE; 927 if (opts & MTX_SLEEPABLE) 928 lock->lo_flags |= LO_SLEEPABLE; 929 if ((opts & MTX_NOWITNESS) == 0) 930 lock->lo_flags |= LO_WITNESS; 931 if (opts & MTX_DUPOK) 932 lock->lo_flags |= LO_DUPOK; 933 934 m->mtx_lock = MTX_UNOWNED; 935 TAILQ_INIT(&m->mtx_blocked); 936 937 LOCK_LOG_INIT(lock, opts); 938 939 WITNESS_INIT(lock); 940} 941 942/* 943 * Remove lock `m' from all_mtx queue. We don't allow MTX_QUIET to be 944 * passed in as a flag here because if the corresponding mtx_init() was 945 * called with MTX_QUIET set, then it will already be set in the mutex's 946 * flags. 947 */ 948void 949mtx_destroy(struct mtx *m) 950{ 951 952 LOCK_LOG_DESTROY(&m->mtx_object, 0); 953 954 if (!mtx_owned(m)) 955 MPASS(mtx_unowned(m)); 956 else { 957 MPASS((m->mtx_lock & (MTX_RECURSED|MTX_CONTESTED)) == 0); 958 959 /* Tell witness this isn't locked to make it happy. */ 960 WITNESS_UNLOCK(&m->mtx_object, LOP_EXCLUSIVE, __FILE__, 961 __LINE__); 962 } 963 964 WITNESS_DESTROY(&m->mtx_object); 965} 966 967/* 968 * Intialize the mutex code and system mutexes. This is called from the MD 969 * startup code prior to mi_startup(). The per-CPU data space needs to be 970 * setup before this is called. 971 */ 972void 973mutex_init(void) 974{ 975 976 /* Setup thread0 so that mutexes work. */ 977 LIST_INIT(&thread0.td_contested); 978 979 /* 980 * Initialize mutexes. 981 */ 982 mtx_init(&Giant, "Giant", NULL, MTX_DEF | MTX_RECURSE); 983 mtx_init(&sched_lock, "sched lock", NULL, MTX_SPIN | MTX_RECURSE); 984 mtx_init(&proc0.p_mtx, "process lock", NULL, MTX_DEF | MTX_DUPOK); 985 mtx_lock(&Giant); 986} 987 988/* 989 * Encapsulated Giant mutex routines. These routines provide encapsulation 990 * control for the Giant mutex, allowing sysctls to be used to turn on and 991 * off Giant around certain subsystems. The default value for the sysctls 992 * are set to what developers believe is stable and working in regards to 993 * the Giant pushdown. Developers should not turn off Giant via these 994 * sysctls unless they know what they are doing. 995 * 996 * Callers of mtx_lock_giant() are expected to pass the return value to an 997 * accompanying mtx_unlock_giant() later on. If multiple subsystems are 998 * effected by a Giant wrap, all related sysctl variables must be zero for 999 * the subsystem call to operate without Giant (as determined by the caller). 1000 */ 1001 1002SYSCTL_NODE(_kern, OID_AUTO, giant, CTLFLAG_RD, NULL, "Giant mutex manipulation"); 1003 1004static int kern_giant_all = 0; 1005SYSCTL_INT(_kern_giant, OID_AUTO, all, CTLFLAG_RW, &kern_giant_all, 0, ""); 1006 1007int kern_giant_proc = 1; /* Giant around PROC locks */ 1008int kern_giant_file = 1; /* Giant around struct file & filedesc */ 1009int kern_giant_ucred = 1; /* Giant around ucred */ 1010SYSCTL_INT(_kern_giant, OID_AUTO, proc, CTLFLAG_RW, &kern_giant_proc, 0, ""); 1011SYSCTL_INT(_kern_giant, OID_AUTO, file, CTLFLAG_RW, &kern_giant_file, 0, ""); 1012SYSCTL_INT(_kern_giant, OID_AUTO, ucred, CTLFLAG_RW, &kern_giant_ucred, 0, ""); 1013 1014int 1015mtx_lock_giant(int sysctlvar) 1016{ 1017 if (sysctlvar || kern_giant_all) { 1018 mtx_lock(&Giant); 1019 return(1); 1020 } 1021 return(0); 1022} 1023 1024void 1025mtx_unlock_giant(int s) 1026{ 1027 if (s) 1028 mtx_unlock(&Giant); 1029} 1030 1031