kern_lock.c revision 278693
1/*- 2 * Copyright (c) 2008 Attilio Rao <attilio@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice(s), this list of conditions and the following disclaimer as 10 * the first lines of this file unmodified other than the possible 11 * addition of one or more copyright notices. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice(s), this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY 17 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 18 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 19 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY 20 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 22 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 23 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 26 * DAMAGE. 27 */ 28 29#include "opt_adaptive_lockmgrs.h" 30#include "opt_ddb.h" 31#include "opt_hwpmc_hooks.h" 32#include "opt_kdtrace.h" 33 34#include <sys/cdefs.h> 35__FBSDID("$FreeBSD: stable/10/sys/kern/kern_lock.c 278693 2015-02-13 18:45:44Z sbruno $"); 36 37#include <sys/param.h> 38#include <sys/kdb.h> 39#include <sys/ktr.h> 40#include <sys/lock.h> 41#include <sys/lock_profile.h> 42#include <sys/lockmgr.h> 43#include <sys/mutex.h> 44#include <sys/proc.h> 45#include <sys/sleepqueue.h> 46#ifdef DEBUG_LOCKS 47#include <sys/stack.h> 48#endif 49#include <sys/sysctl.h> 50#include <sys/systm.h> 51 52#include <machine/cpu.h> 53 54#ifdef DDB 55#include <ddb/ddb.h> 56#endif 57 58#ifdef HWPMC_HOOKS 59#include <sys/pmckern.h> 60PMC_SOFT_DECLARE( , , lock, failed); 61#endif 62 63CTASSERT(((LK_ADAPTIVE | LK_NOSHARE) & LO_CLASSFLAGS) == 64 (LK_ADAPTIVE | LK_NOSHARE)); 65CTASSERT(LK_UNLOCKED == (LK_UNLOCKED & 66 ~(LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS))); 67 68#define SQ_EXCLUSIVE_QUEUE 0 69#define SQ_SHARED_QUEUE 1 70 71#ifndef INVARIANTS 72#define _lockmgr_assert(lk, what, file, line) 73#define TD_LOCKS_INC(td) 74#define TD_LOCKS_DEC(td) 75#else 76#define TD_LOCKS_INC(td) ((td)->td_locks++) 77#define TD_LOCKS_DEC(td) ((td)->td_locks--) 78#endif 79#define TD_SLOCKS_INC(td) ((td)->td_lk_slocks++) 80#define TD_SLOCKS_DEC(td) ((td)->td_lk_slocks--) 81 82#ifndef DEBUG_LOCKS 83#define STACK_PRINT(lk) 84#define STACK_SAVE(lk) 85#define STACK_ZERO(lk) 86#else 87#define STACK_PRINT(lk) stack_print_ddb(&(lk)->lk_stack) 88#define STACK_SAVE(lk) stack_save(&(lk)->lk_stack) 89#define STACK_ZERO(lk) stack_zero(&(lk)->lk_stack) 90#endif 91 92#define LOCK_LOG2(lk, string, arg1, arg2) \ 93 if (LOCK_LOG_TEST(&(lk)->lock_object, 0)) \ 94 CTR2(KTR_LOCK, (string), (arg1), (arg2)) 95#define LOCK_LOG3(lk, string, arg1, arg2, arg3) \ 96 if (LOCK_LOG_TEST(&(lk)->lock_object, 0)) \ 97 CTR3(KTR_LOCK, (string), (arg1), (arg2), (arg3)) 98 99#define GIANT_DECLARE \ 100 int _i = 0; \ 101 WITNESS_SAVE_DECL(Giant) 102#define GIANT_RESTORE() do { \ 103 if (_i > 0) { \ 104 while (_i--) \ 105 mtx_lock(&Giant); \ 106 WITNESS_RESTORE(&Giant.lock_object, Giant); \ 107 } \ 108} while (0) 109#define GIANT_SAVE() do { \ 110 if (mtx_owned(&Giant)) { \ 111 WITNESS_SAVE(&Giant.lock_object, Giant); \ 112 while (mtx_owned(&Giant)) { \ 113 _i++; \ 114 mtx_unlock(&Giant); \ 115 } \ 116 } \ 117} while (0) 118 119#define LK_CAN_SHARE(x, flags) \ 120 (((x) & LK_SHARE) && \ 121 (((x) & (LK_EXCLUSIVE_WAITERS | LK_EXCLUSIVE_SPINNERS)) == 0 || \ 122 (curthread->td_lk_slocks != 0 && !(flags & LK_NODDLKTREAT)) || \ 123 (curthread->td_pflags & TDP_DEADLKTREAT))) 124#define LK_TRYOP(x) \ 125 ((x) & LK_NOWAIT) 126 127#define LK_CAN_WITNESS(x) \ 128 (((x) & LK_NOWITNESS) == 0 && !LK_TRYOP(x)) 129#define LK_TRYWIT(x) \ 130 (LK_TRYOP(x) ? LOP_TRYLOCK : 0) 131 132#define LK_CAN_ADAPT(lk, f) \ 133 (((lk)->lock_object.lo_flags & LK_ADAPTIVE) != 0 && \ 134 ((f) & LK_SLEEPFAIL) == 0) 135 136#define lockmgr_disowned(lk) \ 137 (((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == LK_KERNPROC) 138 139#define lockmgr_xlocked(lk) \ 140 (((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == (uintptr_t)curthread) 141 142static void assert_lockmgr(const struct lock_object *lock, int how); 143#ifdef DDB 144static void db_show_lockmgr(const struct lock_object *lock); 145#endif 146static void lock_lockmgr(struct lock_object *lock, uintptr_t how); 147#ifdef KDTRACE_HOOKS 148static int owner_lockmgr(const struct lock_object *lock, 149 struct thread **owner); 150#endif 151static uintptr_t unlock_lockmgr(struct lock_object *lock); 152 153struct lock_class lock_class_lockmgr = { 154 .lc_name = "lockmgr", 155 .lc_flags = LC_RECURSABLE | LC_SLEEPABLE | LC_SLEEPLOCK | LC_UPGRADABLE, 156 .lc_assert = assert_lockmgr, 157#ifdef DDB 158 .lc_ddb_show = db_show_lockmgr, 159#endif 160 .lc_lock = lock_lockmgr, 161 .lc_unlock = unlock_lockmgr, 162#ifdef KDTRACE_HOOKS 163 .lc_owner = owner_lockmgr, 164#endif 165}; 166 167#ifdef ADAPTIVE_LOCKMGRS 168static u_int alk_retries = 10; 169static u_int alk_loops = 10000; 170static SYSCTL_NODE(_debug, OID_AUTO, lockmgr, CTLFLAG_RD, NULL, 171 "lockmgr debugging"); 172SYSCTL_UINT(_debug_lockmgr, OID_AUTO, retries, CTLFLAG_RW, &alk_retries, 0, ""); 173SYSCTL_UINT(_debug_lockmgr, OID_AUTO, loops, CTLFLAG_RW, &alk_loops, 0, ""); 174#endif 175 176static __inline struct thread * 177lockmgr_xholder(const struct lock *lk) 178{ 179 uintptr_t x; 180 181 x = lk->lk_lock; 182 return ((x & LK_SHARE) ? NULL : (struct thread *)LK_HOLDER(x)); 183} 184 185/* 186 * It assumes sleepq_lock held and returns with this one unheld. 187 * It also assumes the generic interlock is sane and previously checked. 188 * If LK_INTERLOCK is specified the interlock is not reacquired after the 189 * sleep. 190 */ 191static __inline int 192sleeplk(struct lock *lk, u_int flags, struct lock_object *ilk, 193 const char *wmesg, int pri, int timo, int queue) 194{ 195 GIANT_DECLARE; 196 struct lock_class *class; 197 int catch, error; 198 199 class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL; 200 catch = pri & PCATCH; 201 pri &= PRIMASK; 202 error = 0; 203 204 LOCK_LOG3(lk, "%s: %p blocking on the %s sleepqueue", __func__, lk, 205 (queue == SQ_EXCLUSIVE_QUEUE) ? "exclusive" : "shared"); 206 207 if (flags & LK_INTERLOCK) 208 class->lc_unlock(ilk); 209 if (queue == SQ_EXCLUSIVE_QUEUE && (flags & LK_SLEEPFAIL) != 0) 210 lk->lk_exslpfail++; 211 GIANT_SAVE(); 212 sleepq_add(&lk->lock_object, NULL, wmesg, SLEEPQ_LK | (catch ? 213 SLEEPQ_INTERRUPTIBLE : 0), queue); 214 if ((flags & LK_TIMELOCK) && timo) 215 sleepq_set_timeout(&lk->lock_object, timo); 216 217 /* 218 * Decisional switch for real sleeping. 219 */ 220 if ((flags & LK_TIMELOCK) && timo && catch) 221 error = sleepq_timedwait_sig(&lk->lock_object, pri); 222 else if ((flags & LK_TIMELOCK) && timo) 223 error = sleepq_timedwait(&lk->lock_object, pri); 224 else if (catch) 225 error = sleepq_wait_sig(&lk->lock_object, pri); 226 else 227 sleepq_wait(&lk->lock_object, pri); 228 GIANT_RESTORE(); 229 if ((flags & LK_SLEEPFAIL) && error == 0) 230 error = ENOLCK; 231 232 return (error); 233} 234 235static __inline int 236wakeupshlk(struct lock *lk, const char *file, int line) 237{ 238 uintptr_t v, x; 239 u_int realexslp; 240 int queue, wakeup_swapper; 241 242 WITNESS_UNLOCK(&lk->lock_object, 0, file, line); 243 LOCK_LOG_LOCK("SUNLOCK", &lk->lock_object, 0, 0, file, line); 244 245 wakeup_swapper = 0; 246 for (;;) { 247 x = lk->lk_lock; 248 249 /* 250 * If there is more than one shared lock held, just drop one 251 * and return. 252 */ 253 if (LK_SHARERS(x) > 1) { 254 if (atomic_cmpset_rel_ptr(&lk->lk_lock, x, 255 x - LK_ONE_SHARER)) 256 break; 257 continue; 258 } 259 260 /* 261 * If there are not waiters on the exclusive queue, drop the 262 * lock quickly. 263 */ 264 if ((x & LK_ALL_WAITERS) == 0) { 265 MPASS((x & ~LK_EXCLUSIVE_SPINNERS) == 266 LK_SHARERS_LOCK(1)); 267 if (atomic_cmpset_rel_ptr(&lk->lk_lock, x, LK_UNLOCKED)) 268 break; 269 continue; 270 } 271 272 /* 273 * We should have a sharer with waiters, so enter the hard 274 * path in order to handle wakeups correctly. 275 */ 276 sleepq_lock(&lk->lock_object); 277 x = lk->lk_lock & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS); 278 v = LK_UNLOCKED; 279 280 /* 281 * If the lock has exclusive waiters, give them preference in 282 * order to avoid deadlock with shared runners up. 283 * If interruptible sleeps left the exclusive queue empty 284 * avoid a starvation for the threads sleeping on the shared 285 * queue by giving them precedence and cleaning up the 286 * exclusive waiters bit anyway. 287 * Please note that lk_exslpfail count may be lying about 288 * the real number of waiters with the LK_SLEEPFAIL flag on 289 * because they may be used in conjuction with interruptible 290 * sleeps so lk_exslpfail might be considered an 'upper limit' 291 * bound, including the edge cases. 292 */ 293 realexslp = sleepq_sleepcnt(&lk->lock_object, 294 SQ_EXCLUSIVE_QUEUE); 295 if ((x & LK_EXCLUSIVE_WAITERS) != 0 && realexslp != 0) { 296 if (lk->lk_exslpfail < realexslp) { 297 lk->lk_exslpfail = 0; 298 queue = SQ_EXCLUSIVE_QUEUE; 299 v |= (x & LK_SHARED_WAITERS); 300 } else { 301 lk->lk_exslpfail = 0; 302 LOCK_LOG2(lk, 303 "%s: %p has only LK_SLEEPFAIL sleepers", 304 __func__, lk); 305 LOCK_LOG2(lk, 306 "%s: %p waking up threads on the exclusive queue", 307 __func__, lk); 308 wakeup_swapper = 309 sleepq_broadcast(&lk->lock_object, 310 SLEEPQ_LK, 0, SQ_EXCLUSIVE_QUEUE); 311 queue = SQ_SHARED_QUEUE; 312 } 313 314 } else { 315 316 /* 317 * Exclusive waiters sleeping with LK_SLEEPFAIL on 318 * and using interruptible sleeps/timeout may have 319 * left spourious lk_exslpfail counts on, so clean 320 * it up anyway. 321 */ 322 lk->lk_exslpfail = 0; 323 queue = SQ_SHARED_QUEUE; 324 } 325 326 if (!atomic_cmpset_rel_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1) | x, 327 v)) { 328 sleepq_release(&lk->lock_object); 329 continue; 330 } 331 LOCK_LOG3(lk, "%s: %p waking up threads on the %s queue", 332 __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" : 333 "exclusive"); 334 wakeup_swapper |= sleepq_broadcast(&lk->lock_object, SLEEPQ_LK, 335 0, queue); 336 sleepq_release(&lk->lock_object); 337 break; 338 } 339 340 lock_profile_release_lock(&lk->lock_object); 341 TD_LOCKS_DEC(curthread); 342 TD_SLOCKS_DEC(curthread); 343 return (wakeup_swapper); 344} 345 346static void 347assert_lockmgr(const struct lock_object *lock, int what) 348{ 349 350 panic("lockmgr locks do not support assertions"); 351} 352 353static void 354lock_lockmgr(struct lock_object *lock, uintptr_t how) 355{ 356 357 panic("lockmgr locks do not support sleep interlocking"); 358} 359 360static uintptr_t 361unlock_lockmgr(struct lock_object *lock) 362{ 363 364 panic("lockmgr locks do not support sleep interlocking"); 365} 366 367#ifdef KDTRACE_HOOKS 368static int 369owner_lockmgr(const struct lock_object *lock, struct thread **owner) 370{ 371 372 panic("lockmgr locks do not support owner inquiring"); 373} 374#endif 375 376void 377lockinit(struct lock *lk, int pri, const char *wmesg, int timo, int flags) 378{ 379 int iflags; 380 381 MPASS((flags & ~LK_INIT_MASK) == 0); 382 ASSERT_ATOMIC_LOAD_PTR(lk->lk_lock, 383 ("%s: lockmgr not aligned for %s: %p", __func__, wmesg, 384 &lk->lk_lock)); 385 386 iflags = LO_SLEEPABLE | LO_UPGRADABLE; 387 if (flags & LK_CANRECURSE) 388 iflags |= LO_RECURSABLE; 389 if ((flags & LK_NODUP) == 0) 390 iflags |= LO_DUPOK; 391 if (flags & LK_NOPROFILE) 392 iflags |= LO_NOPROFILE; 393 if ((flags & LK_NOWITNESS) == 0) 394 iflags |= LO_WITNESS; 395 if (flags & LK_QUIET) 396 iflags |= LO_QUIET; 397 if (flags & LK_IS_VNODE) 398 iflags |= LO_IS_VNODE; 399 iflags |= flags & (LK_ADAPTIVE | LK_NOSHARE); 400 401 lock_init(&lk->lock_object, &lock_class_lockmgr, wmesg, NULL, iflags); 402 lk->lk_lock = LK_UNLOCKED; 403 lk->lk_recurse = 0; 404 lk->lk_exslpfail = 0; 405 lk->lk_timo = timo; 406 lk->lk_pri = pri; 407 STACK_ZERO(lk); 408} 409 410/* 411 * XXX: Gross hacks to manipulate external lock flags after 412 * initialization. Used for certain vnode and buf locks. 413 */ 414void 415lockallowshare(struct lock *lk) 416{ 417 418 lockmgr_assert(lk, KA_XLOCKED); 419 lk->lock_object.lo_flags &= ~LK_NOSHARE; 420} 421 422void 423lockdisableshare(struct lock *lk) 424{ 425 426 lockmgr_assert(lk, KA_XLOCKED); 427 lk->lock_object.lo_flags |= LK_NOSHARE; 428} 429 430void 431lockallowrecurse(struct lock *lk) 432{ 433 434 lockmgr_assert(lk, KA_XLOCKED); 435 lk->lock_object.lo_flags |= LO_RECURSABLE; 436} 437 438void 439lockdisablerecurse(struct lock *lk) 440{ 441 442 lockmgr_assert(lk, KA_XLOCKED); 443 lk->lock_object.lo_flags &= ~LO_RECURSABLE; 444} 445 446void 447lockdestroy(struct lock *lk) 448{ 449 450 KASSERT(lk->lk_lock == LK_UNLOCKED, ("lockmgr still held")); 451 KASSERT(lk->lk_recurse == 0, ("lockmgr still recursed")); 452 KASSERT(lk->lk_exslpfail == 0, ("lockmgr still exclusive waiters")); 453 lock_destroy(&lk->lock_object); 454} 455 456int 457__lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk, 458 const char *wmesg, int pri, int timo, const char *file, int line) 459{ 460 GIANT_DECLARE; 461 struct lock_class *class; 462 const char *iwmesg; 463 uintptr_t tid, v, x; 464 u_int op, realexslp; 465 int error, ipri, itimo, queue, wakeup_swapper; 466#ifdef LOCK_PROFILING 467 uint64_t waittime = 0; 468 int contested = 0; 469#endif 470#ifdef ADAPTIVE_LOCKMGRS 471 volatile struct thread *owner; 472 u_int i, spintries = 0; 473#endif 474 475 error = 0; 476 tid = (uintptr_t)curthread; 477 op = (flags & LK_TYPE_MASK); 478 iwmesg = (wmesg == LK_WMESG_DEFAULT) ? lk->lock_object.lo_name : wmesg; 479 ipri = (pri == LK_PRIO_DEFAULT) ? lk->lk_pri : pri; 480 itimo = (timo == LK_TIMO_DEFAULT) ? lk->lk_timo : timo; 481 482 MPASS((flags & ~LK_TOTAL_MASK) == 0); 483 KASSERT((op & (op - 1)) == 0, 484 ("%s: Invalid requested operation @ %s:%d", __func__, file, line)); 485 KASSERT((flags & (LK_NOWAIT | LK_SLEEPFAIL)) == 0 || 486 (op != LK_DOWNGRADE && op != LK_RELEASE), 487 ("%s: Invalid flags in regard of the operation desired @ %s:%d", 488 __func__, file, line)); 489 KASSERT((flags & LK_INTERLOCK) == 0 || ilk != NULL, 490 ("%s: LK_INTERLOCK passed without valid interlock @ %s:%d", 491 __func__, file, line)); 492 KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread), 493 ("%s: idle thread %p on lockmgr %s @ %s:%d", __func__, curthread, 494 lk->lock_object.lo_name, file, line)); 495 496 class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL; 497 if (panicstr != NULL) { 498 if (flags & LK_INTERLOCK) 499 class->lc_unlock(ilk); 500 return (0); 501 } 502 503 if (lk->lock_object.lo_flags & LK_NOSHARE) { 504 switch (op) { 505 case LK_SHARED: 506 op = LK_EXCLUSIVE; 507 break; 508 case LK_UPGRADE: 509 case LK_TRYUPGRADE: 510 case LK_DOWNGRADE: 511 _lockmgr_assert(lk, KA_XLOCKED | KA_NOTRECURSED, 512 file, line); 513 if (flags & LK_INTERLOCK) 514 class->lc_unlock(ilk); 515 return (0); 516 } 517 } 518 519 wakeup_swapper = 0; 520 switch (op) { 521 case LK_SHARED: 522 if (LK_CAN_WITNESS(flags)) 523 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER, 524 file, line, flags & LK_INTERLOCK ? ilk : NULL); 525 for (;;) { 526 x = lk->lk_lock; 527 528 /* 529 * If no other thread has an exclusive lock, or 530 * no exclusive waiter is present, bump the count of 531 * sharers. Since we have to preserve the state of 532 * waiters, if we fail to acquire the shared lock 533 * loop back and retry. 534 */ 535 if (LK_CAN_SHARE(x, flags)) { 536 if (atomic_cmpset_acq_ptr(&lk->lk_lock, x, 537 x + LK_ONE_SHARER)) 538 break; 539 continue; 540 } 541#ifdef HWPMC_HOOKS 542 PMC_SOFT_CALL( , , lock, failed); 543#endif 544 lock_profile_obtain_lock_failed(&lk->lock_object, 545 &contested, &waittime); 546 547 /* 548 * If the lock is already held by curthread in 549 * exclusive way avoid a deadlock. 550 */ 551 if (LK_HOLDER(x) == tid) { 552 LOCK_LOG2(lk, 553 "%s: %p already held in exclusive mode", 554 __func__, lk); 555 error = EDEADLK; 556 break; 557 } 558 559 /* 560 * If the lock is expected to not sleep just give up 561 * and return. 562 */ 563 if (LK_TRYOP(flags)) { 564 LOCK_LOG2(lk, "%s: %p fails the try operation", 565 __func__, lk); 566 error = EBUSY; 567 break; 568 } 569 570#ifdef ADAPTIVE_LOCKMGRS 571 /* 572 * If the owner is running on another CPU, spin until 573 * the owner stops running or the state of the lock 574 * changes. We need a double-state handle here 575 * because for a failed acquisition the lock can be 576 * either held in exclusive mode or shared mode 577 * (for the writer starvation avoidance technique). 578 */ 579 if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 && 580 LK_HOLDER(x) != LK_KERNPROC) { 581 owner = (struct thread *)LK_HOLDER(x); 582 if (LOCK_LOG_TEST(&lk->lock_object, 0)) 583 CTR3(KTR_LOCK, 584 "%s: spinning on %p held by %p", 585 __func__, lk, owner); 586 587 /* 588 * If we are holding also an interlock drop it 589 * in order to avoid a deadlock if the lockmgr 590 * owner is adaptively spinning on the 591 * interlock itself. 592 */ 593 if (flags & LK_INTERLOCK) { 594 class->lc_unlock(ilk); 595 flags &= ~LK_INTERLOCK; 596 } 597 GIANT_SAVE(); 598 while (LK_HOLDER(lk->lk_lock) == 599 (uintptr_t)owner && TD_IS_RUNNING(owner)) 600 cpu_spinwait(); 601 GIANT_RESTORE(); 602 continue; 603 } else if (LK_CAN_ADAPT(lk, flags) && 604 (x & LK_SHARE) != 0 && LK_SHARERS(x) && 605 spintries < alk_retries) { 606 if (flags & LK_INTERLOCK) { 607 class->lc_unlock(ilk); 608 flags &= ~LK_INTERLOCK; 609 } 610 GIANT_SAVE(); 611 spintries++; 612 for (i = 0; i < alk_loops; i++) { 613 if (LOCK_LOG_TEST(&lk->lock_object, 0)) 614 CTR4(KTR_LOCK, 615 "%s: shared spinning on %p with %u and %u", 616 __func__, lk, spintries, i); 617 x = lk->lk_lock; 618 if ((x & LK_SHARE) == 0 || 619 LK_CAN_SHARE(x, flags) != 0) 620 break; 621 cpu_spinwait(); 622 } 623 GIANT_RESTORE(); 624 if (i != alk_loops) 625 continue; 626 } 627#endif 628 629 /* 630 * Acquire the sleepqueue chain lock because we 631 * probabilly will need to manipulate waiters flags. 632 */ 633 sleepq_lock(&lk->lock_object); 634 x = lk->lk_lock; 635 636 /* 637 * if the lock can be acquired in shared mode, try 638 * again. 639 */ 640 if (LK_CAN_SHARE(x, flags)) { 641 sleepq_release(&lk->lock_object); 642 continue; 643 } 644 645#ifdef ADAPTIVE_LOCKMGRS 646 /* 647 * The current lock owner might have started executing 648 * on another CPU (or the lock could have changed 649 * owner) while we were waiting on the turnstile 650 * chain lock. If so, drop the turnstile lock and try 651 * again. 652 */ 653 if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 && 654 LK_HOLDER(x) != LK_KERNPROC) { 655 owner = (struct thread *)LK_HOLDER(x); 656 if (TD_IS_RUNNING(owner)) { 657 sleepq_release(&lk->lock_object); 658 continue; 659 } 660 } 661#endif 662 663 /* 664 * Try to set the LK_SHARED_WAITERS flag. If we fail, 665 * loop back and retry. 666 */ 667 if ((x & LK_SHARED_WAITERS) == 0) { 668 if (!atomic_cmpset_acq_ptr(&lk->lk_lock, x, 669 x | LK_SHARED_WAITERS)) { 670 sleepq_release(&lk->lock_object); 671 continue; 672 } 673 LOCK_LOG2(lk, "%s: %p set shared waiters flag", 674 __func__, lk); 675 } 676 677 /* 678 * As far as we have been unable to acquire the 679 * shared lock and the shared waiters flag is set, 680 * we will sleep. 681 */ 682 error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo, 683 SQ_SHARED_QUEUE); 684 flags &= ~LK_INTERLOCK; 685 if (error) { 686 LOCK_LOG3(lk, 687 "%s: interrupted sleep for %p with %d", 688 __func__, lk, error); 689 break; 690 } 691 LOCK_LOG2(lk, "%s: %p resuming from the sleep queue", 692 __func__, lk); 693 } 694 if (error == 0) { 695 lock_profile_obtain_lock_success(&lk->lock_object, 696 contested, waittime, file, line); 697 LOCK_LOG_LOCK("SLOCK", &lk->lock_object, 0, 0, file, 698 line); 699 WITNESS_LOCK(&lk->lock_object, LK_TRYWIT(flags), file, 700 line); 701 TD_LOCKS_INC(curthread); 702 TD_SLOCKS_INC(curthread); 703 STACK_SAVE(lk); 704 } 705 break; 706 case LK_UPGRADE: 707 case LK_TRYUPGRADE: 708 _lockmgr_assert(lk, KA_SLOCKED, file, line); 709 v = lk->lk_lock; 710 x = v & LK_ALL_WAITERS; 711 v &= LK_EXCLUSIVE_SPINNERS; 712 713 /* 714 * Try to switch from one shared lock to an exclusive one. 715 * We need to preserve waiters flags during the operation. 716 */ 717 if (atomic_cmpset_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1) | x | v, 718 tid | x)) { 719 LOCK_LOG_LOCK("XUPGRADE", &lk->lock_object, 0, 0, file, 720 line); 721 WITNESS_UPGRADE(&lk->lock_object, LOP_EXCLUSIVE | 722 LK_TRYWIT(flags), file, line); 723 TD_SLOCKS_DEC(curthread); 724 break; 725 } 726 727 /* 728 * In LK_TRYUPGRADE mode, do not drop the lock, 729 * returning EBUSY instead. 730 */ 731 if (op == LK_TRYUPGRADE) { 732 LOCK_LOG2(lk, "%s: %p failed the nowait upgrade", 733 __func__, lk); 734 error = EBUSY; 735 break; 736 } 737 738 /* 739 * We have been unable to succeed in upgrading, so just 740 * give up the shared lock. 741 */ 742 wakeup_swapper |= wakeupshlk(lk, file, line); 743 744 /* FALLTHROUGH */ 745 case LK_EXCLUSIVE: 746 if (LK_CAN_WITNESS(flags)) 747 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER | 748 LOP_EXCLUSIVE, file, line, flags & LK_INTERLOCK ? 749 ilk : NULL); 750 751 /* 752 * If curthread already holds the lock and this one is 753 * allowed to recurse, simply recurse on it. 754 */ 755 if (lockmgr_xlocked(lk)) { 756 if ((flags & LK_CANRECURSE) == 0 && 757 (lk->lock_object.lo_flags & LO_RECURSABLE) == 0) { 758 759 /* 760 * If the lock is expected to not panic just 761 * give up and return. 762 */ 763 if (LK_TRYOP(flags)) { 764 LOCK_LOG2(lk, 765 "%s: %p fails the try operation", 766 __func__, lk); 767 error = EBUSY; 768 break; 769 } 770 if (flags & LK_INTERLOCK) 771 class->lc_unlock(ilk); 772 panic("%s: recursing on non recursive lockmgr %s @ %s:%d\n", 773 __func__, iwmesg, file, line); 774 } 775 lk->lk_recurse++; 776 LOCK_LOG2(lk, "%s: %p recursing", __func__, lk); 777 LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0, 778 lk->lk_recurse, file, line); 779 WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE | 780 LK_TRYWIT(flags), file, line); 781 TD_LOCKS_INC(curthread); 782 break; 783 } 784 785 while (!atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, 786 tid)) { 787#ifdef HWPMC_HOOKS 788 PMC_SOFT_CALL( , , lock, failed); 789#endif 790 lock_profile_obtain_lock_failed(&lk->lock_object, 791 &contested, &waittime); 792 793 /* 794 * If the lock is expected to not sleep just give up 795 * and return. 796 */ 797 if (LK_TRYOP(flags)) { 798 LOCK_LOG2(lk, "%s: %p fails the try operation", 799 __func__, lk); 800 error = EBUSY; 801 break; 802 } 803 804#ifdef ADAPTIVE_LOCKMGRS 805 /* 806 * If the owner is running on another CPU, spin until 807 * the owner stops running or the state of the lock 808 * changes. 809 */ 810 x = lk->lk_lock; 811 if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 && 812 LK_HOLDER(x) != LK_KERNPROC) { 813 owner = (struct thread *)LK_HOLDER(x); 814 if (LOCK_LOG_TEST(&lk->lock_object, 0)) 815 CTR3(KTR_LOCK, 816 "%s: spinning on %p held by %p", 817 __func__, lk, owner); 818 819 /* 820 * If we are holding also an interlock drop it 821 * in order to avoid a deadlock if the lockmgr 822 * owner is adaptively spinning on the 823 * interlock itself. 824 */ 825 if (flags & LK_INTERLOCK) { 826 class->lc_unlock(ilk); 827 flags &= ~LK_INTERLOCK; 828 } 829 GIANT_SAVE(); 830 while (LK_HOLDER(lk->lk_lock) == 831 (uintptr_t)owner && TD_IS_RUNNING(owner)) 832 cpu_spinwait(); 833 GIANT_RESTORE(); 834 continue; 835 } else if (LK_CAN_ADAPT(lk, flags) && 836 (x & LK_SHARE) != 0 && LK_SHARERS(x) && 837 spintries < alk_retries) { 838 if ((x & LK_EXCLUSIVE_SPINNERS) == 0 && 839 !atomic_cmpset_ptr(&lk->lk_lock, x, 840 x | LK_EXCLUSIVE_SPINNERS)) 841 continue; 842 if (flags & LK_INTERLOCK) { 843 class->lc_unlock(ilk); 844 flags &= ~LK_INTERLOCK; 845 } 846 GIANT_SAVE(); 847 spintries++; 848 for (i = 0; i < alk_loops; i++) { 849 if (LOCK_LOG_TEST(&lk->lock_object, 0)) 850 CTR4(KTR_LOCK, 851 "%s: shared spinning on %p with %u and %u", 852 __func__, lk, spintries, i); 853 if ((lk->lk_lock & 854 LK_EXCLUSIVE_SPINNERS) == 0) 855 break; 856 cpu_spinwait(); 857 } 858 GIANT_RESTORE(); 859 if (i != alk_loops) 860 continue; 861 } 862#endif 863 864 /* 865 * Acquire the sleepqueue chain lock because we 866 * probabilly will need to manipulate waiters flags. 867 */ 868 sleepq_lock(&lk->lock_object); 869 x = lk->lk_lock; 870 871 /* 872 * if the lock has been released while we spun on 873 * the sleepqueue chain lock just try again. 874 */ 875 if (x == LK_UNLOCKED) { 876 sleepq_release(&lk->lock_object); 877 continue; 878 } 879 880#ifdef ADAPTIVE_LOCKMGRS 881 /* 882 * The current lock owner might have started executing 883 * on another CPU (or the lock could have changed 884 * owner) while we were waiting on the turnstile 885 * chain lock. If so, drop the turnstile lock and try 886 * again. 887 */ 888 if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 && 889 LK_HOLDER(x) != LK_KERNPROC) { 890 owner = (struct thread *)LK_HOLDER(x); 891 if (TD_IS_RUNNING(owner)) { 892 sleepq_release(&lk->lock_object); 893 continue; 894 } 895 } 896#endif 897 898 /* 899 * The lock can be in the state where there is a 900 * pending queue of waiters, but still no owner. 901 * This happens when the lock is contested and an 902 * owner is going to claim the lock. 903 * If curthread is the one successfully acquiring it 904 * claim lock ownership and return, preserving waiters 905 * flags. 906 */ 907 v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS); 908 if ((x & ~v) == LK_UNLOCKED) { 909 v &= ~LK_EXCLUSIVE_SPINNERS; 910 if (atomic_cmpset_acq_ptr(&lk->lk_lock, x, 911 tid | v)) { 912 sleepq_release(&lk->lock_object); 913 LOCK_LOG2(lk, 914 "%s: %p claimed by a new writer", 915 __func__, lk); 916 break; 917 } 918 sleepq_release(&lk->lock_object); 919 continue; 920 } 921 922 /* 923 * Try to set the LK_EXCLUSIVE_WAITERS flag. If we 924 * fail, loop back and retry. 925 */ 926 if ((x & LK_EXCLUSIVE_WAITERS) == 0) { 927 if (!atomic_cmpset_ptr(&lk->lk_lock, x, 928 x | LK_EXCLUSIVE_WAITERS)) { 929 sleepq_release(&lk->lock_object); 930 continue; 931 } 932 LOCK_LOG2(lk, "%s: %p set excl waiters flag", 933 __func__, lk); 934 } 935 936 /* 937 * As far as we have been unable to acquire the 938 * exclusive lock and the exclusive waiters flag 939 * is set, we will sleep. 940 */ 941 error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo, 942 SQ_EXCLUSIVE_QUEUE); 943 flags &= ~LK_INTERLOCK; 944 if (error) { 945 LOCK_LOG3(lk, 946 "%s: interrupted sleep for %p with %d", 947 __func__, lk, error); 948 break; 949 } 950 LOCK_LOG2(lk, "%s: %p resuming from the sleep queue", 951 __func__, lk); 952 } 953 if (error == 0) { 954 lock_profile_obtain_lock_success(&lk->lock_object, 955 contested, waittime, file, line); 956 LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0, 957 lk->lk_recurse, file, line); 958 WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE | 959 LK_TRYWIT(flags), file, line); 960 TD_LOCKS_INC(curthread); 961 STACK_SAVE(lk); 962 } 963 break; 964 case LK_DOWNGRADE: 965 _lockmgr_assert(lk, KA_XLOCKED, file, line); 966 LOCK_LOG_LOCK("XDOWNGRADE", &lk->lock_object, 0, 0, file, line); 967 WITNESS_DOWNGRADE(&lk->lock_object, 0, file, line); 968 969 /* 970 * Panic if the lock is recursed. 971 */ 972 if (lockmgr_xlocked(lk) && lockmgr_recursed(lk)) { 973 if (flags & LK_INTERLOCK) 974 class->lc_unlock(ilk); 975 panic("%s: downgrade a recursed lockmgr %s @ %s:%d\n", 976 __func__, iwmesg, file, line); 977 } 978 TD_SLOCKS_INC(curthread); 979 980 /* 981 * In order to preserve waiters flags, just spin. 982 */ 983 for (;;) { 984 x = lk->lk_lock; 985 MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0); 986 x &= LK_ALL_WAITERS; 987 if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x, 988 LK_SHARERS_LOCK(1) | x)) 989 break; 990 cpu_spinwait(); 991 } 992 break; 993 case LK_RELEASE: 994 _lockmgr_assert(lk, KA_LOCKED, file, line); 995 x = lk->lk_lock; 996 997 if ((x & LK_SHARE) == 0) { 998 999 /* 1000 * As first option, treact the lock as if it has not 1001 * any waiter. 1002 * Fix-up the tid var if the lock has been disowned. 1003 */ 1004 if (LK_HOLDER(x) == LK_KERNPROC) 1005 tid = LK_KERNPROC; 1006 else { 1007 WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE, 1008 file, line); 1009 TD_LOCKS_DEC(curthread); 1010 } 1011 LOCK_LOG_LOCK("XUNLOCK", &lk->lock_object, 0, 1012 lk->lk_recurse, file, line); 1013 1014 /* 1015 * The lock is held in exclusive mode. 1016 * If the lock is recursed also, then unrecurse it. 1017 */ 1018 if (lockmgr_xlocked(lk) && lockmgr_recursed(lk)) { 1019 LOCK_LOG2(lk, "%s: %p unrecursing", __func__, 1020 lk); 1021 lk->lk_recurse--; 1022 break; 1023 } 1024 if (tid != LK_KERNPROC) 1025 lock_profile_release_lock(&lk->lock_object); 1026 1027 if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid, 1028 LK_UNLOCKED)) 1029 break; 1030 1031 sleepq_lock(&lk->lock_object); 1032 x = lk->lk_lock; 1033 v = LK_UNLOCKED; 1034 1035 /* 1036 * If the lock has exclusive waiters, give them 1037 * preference in order to avoid deadlock with 1038 * shared runners up. 1039 * If interruptible sleeps left the exclusive queue 1040 * empty avoid a starvation for the threads sleeping 1041 * on the shared queue by giving them precedence 1042 * and cleaning up the exclusive waiters bit anyway. 1043 * Please note that lk_exslpfail count may be lying 1044 * about the real number of waiters with the 1045 * LK_SLEEPFAIL flag on because they may be used in 1046 * conjuction with interruptible sleeps so 1047 * lk_exslpfail might be considered an 'upper limit' 1048 * bound, including the edge cases. 1049 */ 1050 MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0); 1051 realexslp = sleepq_sleepcnt(&lk->lock_object, 1052 SQ_EXCLUSIVE_QUEUE); 1053 if ((x & LK_EXCLUSIVE_WAITERS) != 0 && realexslp != 0) { 1054 if (lk->lk_exslpfail < realexslp) { 1055 lk->lk_exslpfail = 0; 1056 queue = SQ_EXCLUSIVE_QUEUE; 1057 v |= (x & LK_SHARED_WAITERS); 1058 } else { 1059 lk->lk_exslpfail = 0; 1060 LOCK_LOG2(lk, 1061 "%s: %p has only LK_SLEEPFAIL sleepers", 1062 __func__, lk); 1063 LOCK_LOG2(lk, 1064 "%s: %p waking up threads on the exclusive queue", 1065 __func__, lk); 1066 wakeup_swapper = 1067 sleepq_broadcast(&lk->lock_object, 1068 SLEEPQ_LK, 0, SQ_EXCLUSIVE_QUEUE); 1069 queue = SQ_SHARED_QUEUE; 1070 } 1071 } else { 1072 1073 /* 1074 * Exclusive waiters sleeping with LK_SLEEPFAIL 1075 * on and using interruptible sleeps/timeout 1076 * may have left spourious lk_exslpfail counts 1077 * on, so clean it up anyway. 1078 */ 1079 lk->lk_exslpfail = 0; 1080 queue = SQ_SHARED_QUEUE; 1081 } 1082 1083 LOCK_LOG3(lk, 1084 "%s: %p waking up threads on the %s queue", 1085 __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" : 1086 "exclusive"); 1087 atomic_store_rel_ptr(&lk->lk_lock, v); 1088 wakeup_swapper |= sleepq_broadcast(&lk->lock_object, 1089 SLEEPQ_LK, 0, queue); 1090 sleepq_release(&lk->lock_object); 1091 break; 1092 } else 1093 wakeup_swapper = wakeupshlk(lk, file, line); 1094 break; 1095 case LK_DRAIN: 1096 if (LK_CAN_WITNESS(flags)) 1097 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER | 1098 LOP_EXCLUSIVE, file, line, flags & LK_INTERLOCK ? 1099 ilk : NULL); 1100 1101 /* 1102 * Trying to drain a lock we already own will result in a 1103 * deadlock. 1104 */ 1105 if (lockmgr_xlocked(lk)) { 1106 if (flags & LK_INTERLOCK) 1107 class->lc_unlock(ilk); 1108 panic("%s: draining %s with the lock held @ %s:%d\n", 1109 __func__, iwmesg, file, line); 1110 } 1111 1112 while (!atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid)) { 1113#ifdef HWPMC_HOOKS 1114 PMC_SOFT_CALL( , , lock, failed); 1115#endif 1116 lock_profile_obtain_lock_failed(&lk->lock_object, 1117 &contested, &waittime); 1118 1119 /* 1120 * If the lock is expected to not sleep just give up 1121 * and return. 1122 */ 1123 if (LK_TRYOP(flags)) { 1124 LOCK_LOG2(lk, "%s: %p fails the try operation", 1125 __func__, lk); 1126 error = EBUSY; 1127 break; 1128 } 1129 1130 /* 1131 * Acquire the sleepqueue chain lock because we 1132 * probabilly will need to manipulate waiters flags. 1133 */ 1134 sleepq_lock(&lk->lock_object); 1135 x = lk->lk_lock; 1136 1137 /* 1138 * if the lock has been released while we spun on 1139 * the sleepqueue chain lock just try again. 1140 */ 1141 if (x == LK_UNLOCKED) { 1142 sleepq_release(&lk->lock_object); 1143 continue; 1144 } 1145 1146 v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS); 1147 if ((x & ~v) == LK_UNLOCKED) { 1148 v = (x & ~LK_EXCLUSIVE_SPINNERS); 1149 1150 /* 1151 * If interruptible sleeps left the exclusive 1152 * queue empty avoid a starvation for the 1153 * threads sleeping on the shared queue by 1154 * giving them precedence and cleaning up the 1155 * exclusive waiters bit anyway. 1156 * Please note that lk_exslpfail count may be 1157 * lying about the real number of waiters with 1158 * the LK_SLEEPFAIL flag on because they may 1159 * be used in conjuction with interruptible 1160 * sleeps so lk_exslpfail might be considered 1161 * an 'upper limit' bound, including the edge 1162 * cases. 1163 */ 1164 if (v & LK_EXCLUSIVE_WAITERS) { 1165 queue = SQ_EXCLUSIVE_QUEUE; 1166 v &= ~LK_EXCLUSIVE_WAITERS; 1167 } else { 1168 1169 /* 1170 * Exclusive waiters sleeping with 1171 * LK_SLEEPFAIL on and using 1172 * interruptible sleeps/timeout may 1173 * have left spourious lk_exslpfail 1174 * counts on, so clean it up anyway. 1175 */ 1176 MPASS(v & LK_SHARED_WAITERS); 1177 lk->lk_exslpfail = 0; 1178 queue = SQ_SHARED_QUEUE; 1179 v &= ~LK_SHARED_WAITERS; 1180 } 1181 if (queue == SQ_EXCLUSIVE_QUEUE) { 1182 realexslp = 1183 sleepq_sleepcnt(&lk->lock_object, 1184 SQ_EXCLUSIVE_QUEUE); 1185 if (lk->lk_exslpfail >= realexslp) { 1186 lk->lk_exslpfail = 0; 1187 queue = SQ_SHARED_QUEUE; 1188 v &= ~LK_SHARED_WAITERS; 1189 if (realexslp != 0) { 1190 LOCK_LOG2(lk, 1191 "%s: %p has only LK_SLEEPFAIL sleepers", 1192 __func__, lk); 1193 LOCK_LOG2(lk, 1194 "%s: %p waking up threads on the exclusive queue", 1195 __func__, lk); 1196 wakeup_swapper = 1197 sleepq_broadcast( 1198 &lk->lock_object, 1199 SLEEPQ_LK, 0, 1200 SQ_EXCLUSIVE_QUEUE); 1201 } 1202 } else 1203 lk->lk_exslpfail = 0; 1204 } 1205 if (!atomic_cmpset_ptr(&lk->lk_lock, x, v)) { 1206 sleepq_release(&lk->lock_object); 1207 continue; 1208 } 1209 LOCK_LOG3(lk, 1210 "%s: %p waking up all threads on the %s queue", 1211 __func__, lk, queue == SQ_SHARED_QUEUE ? 1212 "shared" : "exclusive"); 1213 wakeup_swapper |= sleepq_broadcast( 1214 &lk->lock_object, SLEEPQ_LK, 0, queue); 1215 1216 /* 1217 * If shared waiters have been woken up we need 1218 * to wait for one of them to acquire the lock 1219 * before to set the exclusive waiters in 1220 * order to avoid a deadlock. 1221 */ 1222 if (queue == SQ_SHARED_QUEUE) { 1223 for (v = lk->lk_lock; 1224 (v & LK_SHARE) && !LK_SHARERS(v); 1225 v = lk->lk_lock) 1226 cpu_spinwait(); 1227 } 1228 } 1229 1230 /* 1231 * Try to set the LK_EXCLUSIVE_WAITERS flag. If we 1232 * fail, loop back and retry. 1233 */ 1234 if ((x & LK_EXCLUSIVE_WAITERS) == 0) { 1235 if (!atomic_cmpset_ptr(&lk->lk_lock, x, 1236 x | LK_EXCLUSIVE_WAITERS)) { 1237 sleepq_release(&lk->lock_object); 1238 continue; 1239 } 1240 LOCK_LOG2(lk, "%s: %p set drain waiters flag", 1241 __func__, lk); 1242 } 1243 1244 /* 1245 * As far as we have been unable to acquire the 1246 * exclusive lock and the exclusive waiters flag 1247 * is set, we will sleep. 1248 */ 1249 if (flags & LK_INTERLOCK) { 1250 class->lc_unlock(ilk); 1251 flags &= ~LK_INTERLOCK; 1252 } 1253 GIANT_SAVE(); 1254 sleepq_add(&lk->lock_object, NULL, iwmesg, SLEEPQ_LK, 1255 SQ_EXCLUSIVE_QUEUE); 1256 sleepq_wait(&lk->lock_object, ipri & PRIMASK); 1257 GIANT_RESTORE(); 1258 LOCK_LOG2(lk, "%s: %p resuming from the sleep queue", 1259 __func__, lk); 1260 } 1261 1262 if (error == 0) { 1263 lock_profile_obtain_lock_success(&lk->lock_object, 1264 contested, waittime, file, line); 1265 LOCK_LOG_LOCK("DRAIN", &lk->lock_object, 0, 1266 lk->lk_recurse, file, line); 1267 WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE | 1268 LK_TRYWIT(flags), file, line); 1269 TD_LOCKS_INC(curthread); 1270 STACK_SAVE(lk); 1271 } 1272 break; 1273 default: 1274 if (flags & LK_INTERLOCK) 1275 class->lc_unlock(ilk); 1276 panic("%s: unknown lockmgr request 0x%x\n", __func__, op); 1277 } 1278 1279 if (flags & LK_INTERLOCK) 1280 class->lc_unlock(ilk); 1281 if (wakeup_swapper) 1282 kick_proc0(); 1283 1284 return (error); 1285} 1286 1287void 1288_lockmgr_disown(struct lock *lk, const char *file, int line) 1289{ 1290 uintptr_t tid, x; 1291 1292 if (SCHEDULER_STOPPED()) 1293 return; 1294 1295 tid = (uintptr_t)curthread; 1296 _lockmgr_assert(lk, KA_XLOCKED, file, line); 1297 1298 /* 1299 * Panic if the lock is recursed. 1300 */ 1301 if (lockmgr_xlocked(lk) && lockmgr_recursed(lk)) 1302 panic("%s: disown a recursed lockmgr @ %s:%d\n", 1303 __func__, file, line); 1304 1305 /* 1306 * If the owner is already LK_KERNPROC just skip the whole operation. 1307 */ 1308 if (LK_HOLDER(lk->lk_lock) != tid) 1309 return; 1310 lock_profile_release_lock(&lk->lock_object); 1311 LOCK_LOG_LOCK("XDISOWN", &lk->lock_object, 0, 0, file, line); 1312 WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE, file, line); 1313 TD_LOCKS_DEC(curthread); 1314 STACK_SAVE(lk); 1315 1316 /* 1317 * In order to preserve waiters flags, just spin. 1318 */ 1319 for (;;) { 1320 x = lk->lk_lock; 1321 MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0); 1322 x &= LK_ALL_WAITERS; 1323 if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x, 1324 LK_KERNPROC | x)) 1325 return; 1326 cpu_spinwait(); 1327 } 1328} 1329 1330void 1331lockmgr_printinfo(const struct lock *lk) 1332{ 1333 struct thread *td; 1334 uintptr_t x; 1335 1336 if (lk->lk_lock == LK_UNLOCKED) 1337 printf("lock type %s: UNLOCKED\n", lk->lock_object.lo_name); 1338 else if (lk->lk_lock & LK_SHARE) 1339 printf("lock type %s: SHARED (count %ju)\n", 1340 lk->lock_object.lo_name, 1341 (uintmax_t)LK_SHARERS(lk->lk_lock)); 1342 else { 1343 td = lockmgr_xholder(lk); 1344 if (td == (struct thread *)LK_KERNPROC) 1345 printf("lock type %s: EXCL by KERNPROC\n", 1346 lk->lock_object.lo_name); 1347 else 1348 printf("lock type %s: EXCL by thread %p " 1349 "(pid %d, %s, tid %d)\n", lk->lock_object.lo_name, 1350 td, td->td_proc->p_pid, td->td_proc->p_comm, 1351 td->td_tid); 1352 } 1353 1354 x = lk->lk_lock; 1355 if (x & LK_EXCLUSIVE_WAITERS) 1356 printf(" with exclusive waiters pending\n"); 1357 if (x & LK_SHARED_WAITERS) 1358 printf(" with shared waiters pending\n"); 1359 if (x & LK_EXCLUSIVE_SPINNERS) 1360 printf(" with exclusive spinners pending\n"); 1361 1362 STACK_PRINT(lk); 1363} 1364 1365int 1366lockstatus(const struct lock *lk) 1367{ 1368 uintptr_t v, x; 1369 int ret; 1370 1371 ret = LK_SHARED; 1372 x = lk->lk_lock; 1373 v = LK_HOLDER(x); 1374 1375 if ((x & LK_SHARE) == 0) { 1376 if (v == (uintptr_t)curthread || v == LK_KERNPROC) 1377 ret = LK_EXCLUSIVE; 1378 else 1379 ret = LK_EXCLOTHER; 1380 } else if (x == LK_UNLOCKED) 1381 ret = 0; 1382 1383 return (ret); 1384} 1385 1386#ifdef INVARIANT_SUPPORT 1387 1388FEATURE(invariant_support, 1389 "Support for modules compiled with INVARIANTS option"); 1390 1391#ifndef INVARIANTS 1392#undef _lockmgr_assert 1393#endif 1394 1395void 1396_lockmgr_assert(const struct lock *lk, int what, const char *file, int line) 1397{ 1398 int slocked = 0; 1399 1400 if (panicstr != NULL) 1401 return; 1402 switch (what) { 1403 case KA_SLOCKED: 1404 case KA_SLOCKED | KA_NOTRECURSED: 1405 case KA_SLOCKED | KA_RECURSED: 1406 slocked = 1; 1407 case KA_LOCKED: 1408 case KA_LOCKED | KA_NOTRECURSED: 1409 case KA_LOCKED | KA_RECURSED: 1410#ifdef WITNESS 1411 1412 /* 1413 * We cannot trust WITNESS if the lock is held in exclusive 1414 * mode and a call to lockmgr_disown() happened. 1415 * Workaround this skipping the check if the lock is held in 1416 * exclusive mode even for the KA_LOCKED case. 1417 */ 1418 if (slocked || (lk->lk_lock & LK_SHARE)) { 1419 witness_assert(&lk->lock_object, what, file, line); 1420 break; 1421 } 1422#endif 1423 if (lk->lk_lock == LK_UNLOCKED || 1424 ((lk->lk_lock & LK_SHARE) == 0 && (slocked || 1425 (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk))))) 1426 panic("Lock %s not %slocked @ %s:%d\n", 1427 lk->lock_object.lo_name, slocked ? "share" : "", 1428 file, line); 1429 1430 if ((lk->lk_lock & LK_SHARE) == 0) { 1431 if (lockmgr_recursed(lk)) { 1432 if (what & KA_NOTRECURSED) 1433 panic("Lock %s recursed @ %s:%d\n", 1434 lk->lock_object.lo_name, file, 1435 line); 1436 } else if (what & KA_RECURSED) 1437 panic("Lock %s not recursed @ %s:%d\n", 1438 lk->lock_object.lo_name, file, line); 1439 } 1440 break; 1441 case KA_XLOCKED: 1442 case KA_XLOCKED | KA_NOTRECURSED: 1443 case KA_XLOCKED | KA_RECURSED: 1444 if (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk)) 1445 panic("Lock %s not exclusively locked @ %s:%d\n", 1446 lk->lock_object.lo_name, file, line); 1447 if (lockmgr_recursed(lk)) { 1448 if (what & KA_NOTRECURSED) 1449 panic("Lock %s recursed @ %s:%d\n", 1450 lk->lock_object.lo_name, file, line); 1451 } else if (what & KA_RECURSED) 1452 panic("Lock %s not recursed @ %s:%d\n", 1453 lk->lock_object.lo_name, file, line); 1454 break; 1455 case KA_UNLOCKED: 1456 if (lockmgr_xlocked(lk) || lockmgr_disowned(lk)) 1457 panic("Lock %s exclusively locked @ %s:%d\n", 1458 lk->lock_object.lo_name, file, line); 1459 break; 1460 default: 1461 panic("Unknown lockmgr assertion: %d @ %s:%d\n", what, file, 1462 line); 1463 } 1464} 1465#endif 1466 1467#ifdef DDB 1468int 1469lockmgr_chain(struct thread *td, struct thread **ownerp) 1470{ 1471 struct lock *lk; 1472 1473 lk = td->td_wchan; 1474 1475 if (LOCK_CLASS(&lk->lock_object) != &lock_class_lockmgr) 1476 return (0); 1477 db_printf("blocked on lockmgr %s", lk->lock_object.lo_name); 1478 if (lk->lk_lock & LK_SHARE) 1479 db_printf("SHARED (count %ju)\n", 1480 (uintmax_t)LK_SHARERS(lk->lk_lock)); 1481 else 1482 db_printf("EXCL\n"); 1483 *ownerp = lockmgr_xholder(lk); 1484 1485 return (1); 1486} 1487 1488static void 1489db_show_lockmgr(const struct lock_object *lock) 1490{ 1491 struct thread *td; 1492 const struct lock *lk; 1493 1494 lk = (const struct lock *)lock; 1495 1496 db_printf(" state: "); 1497 if (lk->lk_lock == LK_UNLOCKED) 1498 db_printf("UNLOCKED\n"); 1499 else if (lk->lk_lock & LK_SHARE) 1500 db_printf("SLOCK: %ju\n", (uintmax_t)LK_SHARERS(lk->lk_lock)); 1501 else { 1502 td = lockmgr_xholder(lk); 1503 if (td == (struct thread *)LK_KERNPROC) 1504 db_printf("XLOCK: LK_KERNPROC\n"); 1505 else 1506 db_printf("XLOCK: %p (tid %d, pid %d, \"%s\")\n", td, 1507 td->td_tid, td->td_proc->p_pid, 1508 td->td_proc->p_comm); 1509 if (lockmgr_recursed(lk)) 1510 db_printf(" recursed: %d\n", lk->lk_recurse); 1511 } 1512 db_printf(" waiters: "); 1513 switch (lk->lk_lock & LK_ALL_WAITERS) { 1514 case LK_SHARED_WAITERS: 1515 db_printf("shared\n"); 1516 break; 1517 case LK_EXCLUSIVE_WAITERS: 1518 db_printf("exclusive\n"); 1519 break; 1520 case LK_ALL_WAITERS: 1521 db_printf("shared and exclusive\n"); 1522 break; 1523 default: 1524 db_printf("none\n"); 1525 } 1526 db_printf(" spinners: "); 1527 if (lk->lk_lock & LK_EXCLUSIVE_SPINNERS) 1528 db_printf("exclusive\n"); 1529 else 1530 db_printf("none\n"); 1531} 1532#endif 1533