kern_mutex.c revision 93702
1/*- 2 * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 3. Berkeley Software Design Inc's name may not be used to endorse or 13 * promote products derived from this software without specific prior 14 * written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 * from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $ 29 * and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $ 30 * $FreeBSD: head/sys/kern/kern_mutex.c 93702 2002-04-02 22:19:16Z jhb $ 31 */ 32 33/* 34 * Machine independent bits of mutex implementation. 35 */ 36 37#include "opt_ddb.h" 38 39#include <sys/param.h> 40#include <sys/systm.h> 41#include <sys/bus.h> 42#include <sys/kernel.h> 43#include <sys/ktr.h> 44#include <sys/lock.h> 45#include <sys/malloc.h> 46#include <sys/mutex.h> 47#include <sys/proc.h> 48#include <sys/resourcevar.h> 49#include <sys/sbuf.h> 50#include <sys/sysctl.h> 51#include <sys/vmmeter.h> 52 53#include <machine/atomic.h> 54#include <machine/bus.h> 55#include <machine/clock.h> 56#include <machine/cpu.h> 57 58#include <ddb/ddb.h> 59 60#include <vm/vm.h> 61#include <vm/vm_extern.h> 62 63/* 64 * Internal utility macros. 65 */ 66#define mtx_unowned(m) ((m)->mtx_lock == MTX_UNOWNED) 67 68#define mtx_owner(m) (mtx_unowned((m)) ? NULL \ 69 : (struct thread *)((m)->mtx_lock & MTX_FLAGMASK)) 70 71/* 72 * Lock classes for sleep and spin mutexes. 73 */ 74struct lock_class lock_class_mtx_sleep = { 75 "sleep mutex", 76 LC_SLEEPLOCK | LC_RECURSABLE 77}; 78struct lock_class lock_class_mtx_spin = { 79 "spin mutex", 80 LC_SPINLOCK | LC_RECURSABLE 81}; 82 83/* 84 * System-wide mutexes 85 */ 86struct mtx sched_lock; 87struct mtx Giant; 88 89/* 90 * Prototypes for non-exported routines. 91 */ 92static void propagate_priority(struct thread *); 93 94static void 95propagate_priority(struct thread *td) 96{ 97 int pri = td->td_priority; 98 struct mtx *m = td->td_blocked; 99 100 mtx_assert(&sched_lock, MA_OWNED); 101 for (;;) { 102 struct thread *td1; 103 104 td = mtx_owner(m); 105 106 if (td == NULL) { 107 /* 108 * This really isn't quite right. Really 109 * ought to bump priority of thread that 110 * next acquires the mutex. 111 */ 112 MPASS(m->mtx_lock == MTX_CONTESTED); 113 return; 114 } 115 116 MPASS(td->td_proc->p_magic == P_MAGIC); 117 KASSERT(td->td_proc->p_stat != SSLEEP, ("sleeping thread owns a mutex")); 118 if (td->td_priority <= pri) /* lower is higher priority */ 119 return; 120 121 /* 122 * Bump this thread's priority. 123 */ 124 td->td_priority = pri; 125 126 /* 127 * If lock holder is actually running, just bump priority. 128 */ 129 /* XXXKSE this test is not sufficient */ 130 if (td->td_kse && (td->td_kse->ke_oncpu != NOCPU)) { 131 MPASS(td->td_proc->p_stat == SRUN 132 || td->td_proc->p_stat == SZOMB 133 || td->td_proc->p_stat == SSTOP); 134 return; 135 } 136 137#ifndef SMP 138 /* 139 * For UP, we check to see if td is curthread (this shouldn't 140 * ever happen however as it would mean we are in a deadlock.) 141 */ 142 KASSERT(td != curthread, ("Deadlock detected")); 143#endif 144 145 /* 146 * If on run queue move to new run queue, and quit. 147 * XXXKSE this gets a lot more complicated under threads 148 * but try anyhow. 149 */ 150 if (td->td_proc->p_stat == SRUN) { 151 MPASS(td->td_blocked == NULL); 152 remrunqueue(td); 153 setrunqueue(td); 154 return; 155 } 156 157 /* 158 * If we aren't blocked on a mutex, we should be. 159 */ 160 KASSERT(td->td_proc->p_stat == SMTX, ( 161 "process %d(%s):%d holds %s but isn't blocked on a mutex\n", 162 td->td_proc->p_pid, td->td_proc->p_comm, td->td_proc->p_stat, 163 m->mtx_object.lo_name)); 164 165 /* 166 * Pick up the mutex that td is blocked on. 167 */ 168 m = td->td_blocked; 169 MPASS(m != NULL); 170 171 /* 172 * Check if the thread needs to be moved up on 173 * the blocked chain 174 */ 175 if (td == TAILQ_FIRST(&m->mtx_blocked)) { 176 continue; 177 } 178 179 td1 = TAILQ_PREV(td, threadqueue, td_blkq); 180 if (td1->td_priority <= pri) { 181 continue; 182 } 183 184 /* 185 * Remove thread from blocked chain and determine where 186 * it should be moved up to. Since we know that td1 has 187 * a lower priority than td, we know that at least one 188 * thread in the chain has a lower priority and that 189 * td1 will thus not be NULL after the loop. 190 */ 191 TAILQ_REMOVE(&m->mtx_blocked, td, td_blkq); 192 TAILQ_FOREACH(td1, &m->mtx_blocked, td_blkq) { 193 MPASS(td1->td_proc->p_magic == P_MAGIC); 194 if (td1->td_priority > pri) 195 break; 196 } 197 198 MPASS(td1 != NULL); 199 TAILQ_INSERT_BEFORE(td1, td, td_blkq); 200 CTR4(KTR_LOCK, 201 "propagate_priority: p %p moved before %p on [%p] %s", 202 td, td1, m, m->mtx_object.lo_name); 203 } 204} 205 206#ifdef MUTEX_PROFILING 207SYSCTL_NODE(_debug, OID_AUTO, mutex, CTLFLAG_RD, NULL, "mutex debugging"); 208SYSCTL_NODE(_debug_mutex, OID_AUTO, prof, CTLFLAG_RD, NULL, "mutex profiling"); 209static int mutex_prof_enable = 0; 210SYSCTL_INT(_debug_mutex_prof, OID_AUTO, enable, CTLFLAG_RW, 211 &mutex_prof_enable, 0, "Enable tracing of mutex holdtime"); 212 213struct mutex_prof { 214 const char *name; 215 const char *file; 216 int line; 217#define MPROF_MAX 0 218#define MPROF_TOT 1 219#define MPROF_CNT 2 220#define MPROF_AVG 3 221 u_int64_t counter[4]; 222}; 223 224/* 225 * mprof_buf is a static pool of profiling records to avoid possible 226 * reentrance of the memory allocation functions. 227 * 228 * Note: NUM_MPROF_BUFFERS must be smaller than MPROF_HASH_SIZE. 229 */ 230#define NUM_MPROF_BUFFERS 4096 231static struct mutex_prof mprof_buf[NUM_MPROF_BUFFERS]; 232static int first_free_mprof_buf; 233#define MPROF_HASH_SIZE 32771 234static struct mutex_prof *mprof_hash[MPROF_HASH_SIZE]; 235 236static int mutex_prof_acquisitions; 237SYSCTL_INT(_debug_mutex_prof, OID_AUTO, acquisitions, CTLFLAG_RD, 238 &mutex_prof_acquisitions, 0, "Number of mutex acquistions recorded"); 239static int mutex_prof_records; 240SYSCTL_INT(_debug_mutex_prof, OID_AUTO, records, CTLFLAG_RD, 241 &mutex_prof_records, 0, "Number of profiling records"); 242static int mutex_prof_maxrecords = NUM_MPROF_BUFFERS; 243SYSCTL_INT(_debug_mutex_prof, OID_AUTO, maxrecords, CTLFLAG_RD, 244 &mutex_prof_maxrecords, 0, "Maximum number of profiling records"); 245static int mutex_prof_rejected; 246SYSCTL_INT(_debug_mutex_prof, OID_AUTO, rejected, CTLFLAG_RD, 247 &mutex_prof_rejected, 0, "Number of rejected profiling records"); 248static int mutex_prof_hashsize = MPROF_HASH_SIZE; 249SYSCTL_INT(_debug_mutex_prof, OID_AUTO, hashsize, CTLFLAG_RD, 250 &mutex_prof_hashsize, 0, "Hash size"); 251static int mutex_prof_collisions = 0; 252SYSCTL_INT(_debug_mutex_prof, OID_AUTO, collisions, CTLFLAG_RD, 253 &mutex_prof_collisions, 0, "Number of hash collisions"); 254 255/* 256 * mprof_mtx protects the profiling buffers and the hash. 257 */ 258static struct mtx mprof_mtx; 259 260static void 261mprof_init(void *arg __unused) 262{ 263 mtx_init(&mprof_mtx, "mutex profiling lock", MTX_SPIN | MTX_QUIET); 264} 265SYSINIT(mprofinit, SI_SUB_LOCK, SI_ORDER_ANY, mprof_init, NULL); 266 267static u_int64_t 268nanoseconds(void) 269{ 270 struct timespec tv; 271 272 nanotime(&tv); 273 return (tv.tv_sec * (u_int64_t)1000000000 + tv.tv_nsec); 274} 275 276static int 277dump_mutex_prof_stats(SYSCTL_HANDLER_ARGS) 278{ 279 struct sbuf *sb; 280 int error, i; 281 282 if (first_free_mprof_buf == 0) 283 return SYSCTL_OUT(req, "No locking recorded", 284 sizeof("No locking recorded")); 285 286 sb = sbuf_new(NULL, NULL, 1024, SBUF_AUTOEXTEND); 287 sbuf_printf(sb, "%12s %12s %12s %12s %s\n", 288 "max", "total", "count", "average", "name"); 289 mtx_lock_spin(&mprof_mtx); 290 for (i = 0; i < first_free_mprof_buf; ++i) 291 sbuf_printf(sb, "%12llu %12llu %12llu %12llu %s:%d (%s)\n", 292 mprof_buf[i].counter[MPROF_MAX] / 1000, 293 mprof_buf[i].counter[MPROF_TOT] / 1000, 294 mprof_buf[i].counter[MPROF_CNT], 295 mprof_buf[i].counter[MPROF_AVG] / 1000, 296 mprof_buf[i].file, mprof_buf[i].line, mprof_buf[i].name); 297 mtx_unlock_spin(&mprof_mtx); 298 sbuf_finish(sb); 299 error = SYSCTL_OUT(req, sbuf_data(sb), sbuf_len(sb) + 1); 300 sbuf_delete(sb); 301 return (error); 302} 303SYSCTL_PROC(_debug_mutex_prof, OID_AUTO, stats, CTLTYPE_STRING|CTLFLAG_RD, 304 NULL, 0, dump_mutex_prof_stats, "A", "Mutex profiling statistics"); 305#endif 306 307/* 308 * Function versions of the inlined __mtx_* macros. These are used by 309 * modules and can also be called from assembly language if needed. 310 */ 311void 312_mtx_lock_flags(struct mtx *m, int opts, const char *file, int line) 313{ 314 315 MPASS(curthread != NULL); 316 _get_sleep_lock(m, curthread, opts, file, line); 317 LOCK_LOG_LOCK("LOCK", &m->mtx_object, opts, m->mtx_recurse, file, 318 line); 319 WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line); 320#ifdef MUTEX_PROFILING 321 /* don't reset the timer when/if recursing */ 322 if (m->acqtime == 0) { 323 m->file = file; 324 m->line = line; 325 m->acqtime = mutex_prof_enable ? nanoseconds() : 0; 326 ++mutex_prof_acquisitions; 327 } 328#endif 329} 330 331void 332_mtx_unlock_flags(struct mtx *m, int opts, const char *file, int line) 333{ 334 335 MPASS(curthread != NULL); 336 mtx_assert(m, MA_OWNED); 337#ifdef MUTEX_PROFILING 338 if (m->acqtime != 0) { 339 static const char *unknown = "(unknown)"; 340 struct mutex_prof *mpp; 341 u_int64_t acqtime, now; 342 const char *p, *q; 343 volatile u_int hash, n; 344 345 now = nanoseconds(); 346 acqtime = m->acqtime; 347 m->acqtime = 0; 348 if (now <= acqtime) 349 goto out; 350 for (p = file; strncmp(p, "../", 3) == 0; p += 3) 351 /* nothing */ ; 352 if (p == NULL || *p == '\0') 353 p = unknown; 354 for (hash = line, q = p; *q != '\0'; ++q) 355 hash = (hash * 2 + *q) % MPROF_HASH_SIZE; 356 mtx_lock_spin(&mprof_mtx); 357 n = hash; 358 while ((mpp = mprof_hash[n]) != NULL) { 359 if (mpp->line == line && strcmp(mpp->file, p) == 0) 360 break; 361 n = (n + 1) % MPROF_HASH_SIZE; 362 } 363 if (mpp == NULL) { 364 /* Just exit if we cannot get a trace buffer */ 365 if (first_free_mprof_buf >= NUM_MPROF_BUFFERS) { 366 ++mutex_prof_rejected; 367 goto unlock; 368 } 369 mpp = &mprof_buf[first_free_mprof_buf++]; 370 mpp->name = mtx_name(m); 371 mpp->file = p; 372 mpp->line = line; 373 mutex_prof_collisions += n - hash; 374 ++mutex_prof_records; 375 mprof_hash[hash] = mpp; 376 } 377 /* 378 * Record if the mutex has been held longer now than ever 379 * before 380 */ 381 if ((now - acqtime) > mpp->counter[MPROF_MAX]) 382 mpp->counter[MPROF_MAX] = now - acqtime; 383 mpp->counter[MPROF_TOT] += now - acqtime; 384 mpp->counter[MPROF_CNT] += 1; 385 mpp->counter[MPROF_AVG] = 386 mpp->counter[MPROF_TOT] / mpp->counter[MPROF_CNT]; 387unlock: 388 mtx_unlock_spin(&mprof_mtx); 389 } 390out: 391#endif 392 WITNESS_UNLOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line); 393 LOCK_LOG_LOCK("UNLOCK", &m->mtx_object, opts, m->mtx_recurse, file, 394 line); 395 _rel_sleep_lock(m, curthread, opts, file, line); 396} 397 398void 399_mtx_lock_spin_flags(struct mtx *m, int opts, const char *file, int line) 400{ 401 402 MPASS(curthread != NULL); 403 _get_spin_lock(m, curthread, opts, file, line); 404 LOCK_LOG_LOCK("LOCK", &m->mtx_object, opts, m->mtx_recurse, file, 405 line); 406 WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line); 407} 408 409void 410_mtx_unlock_spin_flags(struct mtx *m, int opts, const char *file, int line) 411{ 412 413 MPASS(curthread != NULL); 414 mtx_assert(m, MA_OWNED); 415 WITNESS_UNLOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line); 416 LOCK_LOG_LOCK("UNLOCK", &m->mtx_object, opts, m->mtx_recurse, file, 417 line); 418 _rel_spin_lock(m); 419} 420 421/* 422 * The important part of mtx_trylock{,_flags}() 423 * Tries to acquire lock `m.' We do NOT handle recursion here; we assume that 424 * if we're called, it's because we know we don't already own this lock. 425 */ 426int 427_mtx_trylock(struct mtx *m, int opts, const char *file, int line) 428{ 429 int rval; 430 431 MPASS(curthread != NULL); 432 433 rval = _obtain_lock(m, curthread); 434 435 LOCK_LOG_TRY("LOCK", &m->mtx_object, opts, rval, file, line); 436 if (rval) { 437 /* 438 * We do not handle recursion in _mtx_trylock; see the 439 * note at the top of the routine. 440 */ 441 KASSERT(!mtx_recursed(m), 442 ("mtx_trylock() called on a recursed mutex")); 443 WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE | LOP_TRYLOCK, 444 file, line); 445 } 446 447 return (rval); 448} 449 450/* 451 * _mtx_lock_sleep: the tougher part of acquiring an MTX_DEF lock. 452 * 453 * We call this if the lock is either contested (i.e. we need to go to 454 * sleep waiting for it), or if we need to recurse on it. 455 */ 456void 457_mtx_lock_sleep(struct mtx *m, int opts, const char *file, int line) 458{ 459 struct thread *td = curthread; 460 461 if ((m->mtx_lock & MTX_FLAGMASK) == (uintptr_t)td) { 462 m->mtx_recurse++; 463 atomic_set_ptr(&m->mtx_lock, MTX_RECURSED); 464 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 465 CTR1(KTR_LOCK, "_mtx_lock_sleep: %p recursing", m); 466 return; 467 } 468 469 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 470 CTR4(KTR_LOCK, 471 "_mtx_lock_sleep: %s contested (lock=%p) at %s:%d", 472 m->mtx_object.lo_name, (void *)m->mtx_lock, file, line); 473 474 while (!_obtain_lock(m, td)) { 475 uintptr_t v; 476 struct thread *td1; 477 478 mtx_lock_spin(&sched_lock); 479 /* 480 * Check if the lock has been released while spinning for 481 * the sched_lock. 482 */ 483 if ((v = m->mtx_lock) == MTX_UNOWNED) { 484 mtx_unlock_spin(&sched_lock); 485 continue; 486 } 487 488 /* 489 * The mutex was marked contested on release. This means that 490 * there are threads blocked on it. 491 */ 492 if (v == MTX_CONTESTED) { 493 td1 = TAILQ_FIRST(&m->mtx_blocked); 494 MPASS(td1 != NULL); 495 m->mtx_lock = (uintptr_t)td | MTX_CONTESTED; 496 497 if (td1->td_priority < td->td_priority) 498 td->td_priority = td1->td_priority; 499 mtx_unlock_spin(&sched_lock); 500 return; 501 } 502 503 /* 504 * If the mutex isn't already contested and a failure occurs 505 * setting the contested bit, the mutex was either released 506 * or the state of the MTX_RECURSED bit changed. 507 */ 508 if ((v & MTX_CONTESTED) == 0 && 509 !atomic_cmpset_ptr(&m->mtx_lock, (void *)v, 510 (void *)(v | MTX_CONTESTED))) { 511 mtx_unlock_spin(&sched_lock); 512 continue; 513 } 514 515 /* 516 * We definitely must sleep for this lock. 517 */ 518 mtx_assert(m, MA_NOTOWNED); 519 520#ifdef notyet 521 /* 522 * If we're borrowing an interrupted thread's VM context, we 523 * must clean up before going to sleep. 524 */ 525 if (td->td_ithd != NULL) { 526 struct ithd *it = td->td_ithd; 527 528 if (it->it_interrupted) { 529 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 530 CTR2(KTR_LOCK, 531 "_mtx_lock_sleep: %p interrupted %p", 532 it, it->it_interrupted); 533 intr_thd_fixup(it); 534 } 535 } 536#endif 537 538 /* 539 * Put us on the list of threads blocked on this mutex. 540 */ 541 if (TAILQ_EMPTY(&m->mtx_blocked)) { 542 td1 = mtx_owner(m); 543 LIST_INSERT_HEAD(&td1->td_contested, m, mtx_contested); 544 TAILQ_INSERT_TAIL(&m->mtx_blocked, td, td_blkq); 545 } else { 546 TAILQ_FOREACH(td1, &m->mtx_blocked, td_blkq) 547 if (td1->td_priority > td->td_priority) 548 break; 549 if (td1) 550 TAILQ_INSERT_BEFORE(td1, td, td_blkq); 551 else 552 TAILQ_INSERT_TAIL(&m->mtx_blocked, td, td_blkq); 553 } 554 555 /* 556 * Save who we're blocked on. 557 */ 558 td->td_blocked = m; 559 td->td_mtxname = m->mtx_object.lo_name; 560 td->td_proc->p_stat = SMTX; 561 propagate_priority(td); 562 563 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 564 CTR3(KTR_LOCK, 565 "_mtx_lock_sleep: p %p blocked on [%p] %s", td, m, 566 m->mtx_object.lo_name); 567 568 td->td_proc->p_stats->p_ru.ru_nvcsw++; 569 mi_switch(); 570 571 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 572 CTR3(KTR_LOCK, 573 "_mtx_lock_sleep: p %p free from blocked on [%p] %s", 574 td, m, m->mtx_object.lo_name); 575 576 mtx_unlock_spin(&sched_lock); 577 } 578 579 return; 580} 581 582/* 583 * _mtx_lock_spin: the tougher part of acquiring an MTX_SPIN lock. 584 * 585 * This is only called if we need to actually spin for the lock. Recursion 586 * is handled inline. 587 */ 588void 589_mtx_lock_spin(struct mtx *m, int opts, const char *file, int line) 590{ 591 int i = 0; 592 593 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 594 CTR1(KTR_LOCK, "_mtx_lock_spin: %p spinning", m); 595 596 for (;;) { 597 if (_obtain_lock(m, curthread)) 598 break; 599 600 /* Give interrupts a chance while we spin. */ 601 critical_exit(); 602 while (m->mtx_lock != MTX_UNOWNED) { 603 if (i++ < 10000000) 604 continue; 605 if (i++ < 60000000) 606 DELAY(1); 607#ifdef DDB 608 else if (!db_active) 609#else 610 else 611#endif 612 panic("spin lock %s held by %p for > 5 seconds", 613 m->mtx_object.lo_name, (void *)m->mtx_lock); 614 } 615 critical_enter(); 616 } 617 618 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 619 CTR1(KTR_LOCK, "_mtx_lock_spin: %p spin done", m); 620 621 return; 622} 623 624/* 625 * _mtx_unlock_sleep: the tougher part of releasing an MTX_DEF lock. 626 * 627 * We are only called here if the lock is recursed or contested (i.e. we 628 * need to wake up a blocked thread). 629 */ 630void 631_mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line) 632{ 633 struct thread *td, *td1; 634 struct mtx *m1; 635 int pri; 636 637 td = curthread; 638 639 if (mtx_recursed(m)) { 640 if (--(m->mtx_recurse) == 0) 641 atomic_clear_ptr(&m->mtx_lock, MTX_RECURSED); 642 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 643 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p unrecurse", m); 644 return; 645 } 646 647 mtx_lock_spin(&sched_lock); 648 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 649 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p contested", m); 650 651 td1 = TAILQ_FIRST(&m->mtx_blocked); 652 MPASS(td->td_proc->p_magic == P_MAGIC); 653 MPASS(td1->td_proc->p_magic == P_MAGIC); 654 655 TAILQ_REMOVE(&m->mtx_blocked, td1, td_blkq); 656 657 if (TAILQ_EMPTY(&m->mtx_blocked)) { 658 LIST_REMOVE(m, mtx_contested); 659 _release_lock_quick(m); 660 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 661 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p not held", m); 662 } else 663 atomic_store_rel_ptr(&m->mtx_lock, (void *)MTX_CONTESTED); 664 665 pri = PRI_MAX; 666 LIST_FOREACH(m1, &td->td_contested, mtx_contested) { 667 int cp = TAILQ_FIRST(&m1->mtx_blocked)->td_priority; 668 if (cp < pri) 669 pri = cp; 670 } 671 672 if (pri > td->td_base_pri) 673 pri = td->td_base_pri; 674 td->td_priority = pri; 675 676 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 677 CTR2(KTR_LOCK, "_mtx_unlock_sleep: %p contested setrunqueue %p", 678 m, td1); 679 680 td1->td_blocked = NULL; 681 td1->td_proc->p_stat = SRUN; 682 setrunqueue(td1); 683 684 if (td->td_critnest == 1 && td1->td_priority < pri) { 685#ifdef notyet 686 if (td->td_ithd != NULL) { 687 struct ithd *it = td->td_ithd; 688 689 if (it->it_interrupted) { 690 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 691 CTR2(KTR_LOCK, 692 "_mtx_unlock_sleep: %p interrupted %p", 693 it, it->it_interrupted); 694 intr_thd_fixup(it); 695 } 696 } 697#endif 698 setrunqueue(td); 699 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 700 CTR2(KTR_LOCK, 701 "_mtx_unlock_sleep: %p switching out lock=%p", m, 702 (void *)m->mtx_lock); 703 704 td->td_proc->p_stats->p_ru.ru_nivcsw++; 705 mi_switch(); 706 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 707 CTR2(KTR_LOCK, "_mtx_unlock_sleep: %p resuming lock=%p", 708 m, (void *)m->mtx_lock); 709 } 710 711 mtx_unlock_spin(&sched_lock); 712 713 return; 714} 715 716/* 717 * All the unlocking of MTX_SPIN locks is done inline. 718 * See the _rel_spin_lock() macro for the details. 719 */ 720 721/* 722 * The backing function for the INVARIANTS-enabled mtx_assert() 723 */ 724#ifdef INVARIANT_SUPPORT 725void 726_mtx_assert(struct mtx *m, int what, const char *file, int line) 727{ 728 729 if (panicstr != NULL) 730 return; 731 switch (what) { 732 case MA_OWNED: 733 case MA_OWNED | MA_RECURSED: 734 case MA_OWNED | MA_NOTRECURSED: 735 if (!mtx_owned(m)) 736 panic("mutex %s not owned at %s:%d", 737 m->mtx_object.lo_name, file, line); 738 if (mtx_recursed(m)) { 739 if ((what & MA_NOTRECURSED) != 0) 740 panic("mutex %s recursed at %s:%d", 741 m->mtx_object.lo_name, file, line); 742 } else if ((what & MA_RECURSED) != 0) { 743 panic("mutex %s unrecursed at %s:%d", 744 m->mtx_object.lo_name, file, line); 745 } 746 break; 747 case MA_NOTOWNED: 748 if (mtx_owned(m)) 749 panic("mutex %s owned at %s:%d", 750 m->mtx_object.lo_name, file, line); 751 break; 752 default: 753 panic("unknown mtx_assert at %s:%d", file, line); 754 } 755} 756#endif 757 758/* 759 * The MUTEX_DEBUG-enabled mtx_validate() 760 * 761 * Most of these checks have been moved off into the LO_INITIALIZED flag 762 * maintained by the witness code. 763 */ 764#ifdef MUTEX_DEBUG 765 766void mtx_validate(struct mtx *); 767 768void 769mtx_validate(struct mtx *m) 770{ 771 772/* 773 * XXX - When kernacc() is fixed on the alpha to handle K0_SEG memory properly 774 * we can re-enable the kernacc() checks. 775 */ 776#ifndef __alpha__ 777 /* 778 * Can't call kernacc() from early init386(), especially when 779 * initializing Giant mutex, because some stuff in kernacc() 780 * requires Giant itself. 781 */ 782 if (!cold) 783 if (!kernacc((caddr_t)m, sizeof(m), 784 VM_PROT_READ | VM_PROT_WRITE)) 785 panic("Can't read and write to mutex %p", m); 786#endif 787} 788#endif 789 790/* 791 * General init routine used by the MTX_SYSINIT() macro. 792 */ 793void 794mtx_sysinit(void *arg) 795{ 796 struct mtx_args *margs = arg; 797 798 mtx_init(margs->ma_mtx, margs->ma_desc, margs->ma_opts); 799} 800 801/* 802 * Mutex initialization routine; initialize lock `m' of type contained in 803 * `opts' with options contained in `opts' and description `description.' 804 */ 805void 806mtx_init(struct mtx *m, const char *description, int opts) 807{ 808 struct lock_object *lock; 809 810 MPASS((opts & ~(MTX_SPIN | MTX_QUIET | MTX_RECURSE | 811 MTX_SLEEPABLE | MTX_NOWITNESS | MTX_DUPOK)) == 0); 812 813#ifdef MUTEX_DEBUG 814 /* Diagnostic and error correction */ 815 mtx_validate(m); 816#endif 817 818 lock = &m->mtx_object; 819 KASSERT((lock->lo_flags & LO_INITIALIZED) == 0, 820 ("mutex %s %p already initialized", description, m)); 821 bzero(m, sizeof(*m)); 822 if (opts & MTX_SPIN) 823 lock->lo_class = &lock_class_mtx_spin; 824 else 825 lock->lo_class = &lock_class_mtx_sleep; 826 lock->lo_name = description; 827 if (opts & MTX_QUIET) 828 lock->lo_flags = LO_QUIET; 829 if (opts & MTX_RECURSE) 830 lock->lo_flags |= LO_RECURSABLE; 831 if (opts & MTX_SLEEPABLE) 832 lock->lo_flags |= LO_SLEEPABLE; 833 if ((opts & MTX_NOWITNESS) == 0) 834 lock->lo_flags |= LO_WITNESS; 835 if (opts & MTX_DUPOK) 836 lock->lo_flags |= LO_DUPOK; 837 838 m->mtx_lock = MTX_UNOWNED; 839 TAILQ_INIT(&m->mtx_blocked); 840 841 LOCK_LOG_INIT(lock, opts); 842 843 WITNESS_INIT(lock); 844} 845 846/* 847 * Remove lock `m' from all_mtx queue. We don't allow MTX_QUIET to be 848 * passed in as a flag here because if the corresponding mtx_init() was 849 * called with MTX_QUIET set, then it will already be set in the mutex's 850 * flags. 851 */ 852void 853mtx_destroy(struct mtx *m) 854{ 855 856 LOCK_LOG_DESTROY(&m->mtx_object, 0); 857 858 if (!mtx_owned(m)) 859 MPASS(mtx_unowned(m)); 860 else { 861 MPASS((m->mtx_lock & (MTX_RECURSED|MTX_CONTESTED)) == 0); 862 863 /* Tell witness this isn't locked to make it happy. */ 864 WITNESS_UNLOCK(&m->mtx_object, LOP_EXCLUSIVE, __FILE__, 865 __LINE__); 866 } 867 868 WITNESS_DESTROY(&m->mtx_object); 869} 870 871/* 872 * Intialize the mutex code and system mutexes. This is called from the MD 873 * startup code prior to mi_startup(). The per-CPU data space needs to be 874 * setup before this is called. 875 */ 876void 877mutex_init(void) 878{ 879 880 /* Setup thread0 so that mutexes work. */ 881 LIST_INIT(&thread0.td_contested); 882 883 /* 884 * Initialize mutexes. 885 */ 886 mtx_init(&Giant, "Giant", MTX_DEF | MTX_RECURSE); 887 mtx_init(&sched_lock, "sched lock", MTX_SPIN | MTX_RECURSE); 888 mtx_init(&proc0.p_mtx, "process lock", MTX_DEF | MTX_DUPOK); 889 mtx_lock(&Giant); 890} 891 892/* 893 * Encapsulated Giant mutex routines. These routines provide encapsulation 894 * control for the Giant mutex, allowing sysctls to be used to turn on and 895 * off Giant around certain subsystems. The default value for the sysctls 896 * are set to what developers believe is stable and working in regards to 897 * the Giant pushdown. Developers should not turn off Giant via these 898 * sysctls unless they know what they are doing. 899 * 900 * Callers of mtx_lock_giant() are expected to pass the return value to an 901 * accompanying mtx_unlock_giant() later on. If multiple subsystems are 902 * effected by a Giant wrap, all related sysctl variables must be zero for 903 * the subsystem call to operate without Giant (as determined by the caller). 904 */ 905 906SYSCTL_NODE(_kern, OID_AUTO, giant, CTLFLAG_RD, NULL, "Giant mutex manipulation"); 907 908static int kern_giant_all = 0; 909SYSCTL_INT(_kern_giant, OID_AUTO, all, CTLFLAG_RW, &kern_giant_all, 0, ""); 910 911int kern_giant_proc = 1; /* Giant around PROC locks */ 912int kern_giant_file = 1; /* Giant around struct file & filedesc */ 913int kern_giant_ucred = 1; /* Giant around ucred */ 914SYSCTL_INT(_kern_giant, OID_AUTO, proc, CTLFLAG_RW, &kern_giant_proc, 0, ""); 915SYSCTL_INT(_kern_giant, OID_AUTO, file, CTLFLAG_RW, &kern_giant_file, 0, ""); 916SYSCTL_INT(_kern_giant, OID_AUTO, ucred, CTLFLAG_RW, &kern_giant_ucred, 0, ""); 917 918int 919mtx_lock_giant(int sysctlvar) 920{ 921 if (sysctlvar || kern_giant_all) { 922 mtx_lock(&Giant); 923 return(1); 924 } 925 return(0); 926} 927 928void 929mtx_unlock_giant(int s) 930{ 931 if (s) 932 mtx_unlock(&Giant); 933} 934 935