kern_umtx.c revision 139292
1/* 2 * Copyright (c) 2004, David Xu <davidxu@freebsd.org> 3 * Copyright (c) 2002, Jeffrey Roberson <jeff@freebsd.org> 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice unmodified, this list of conditions, and the following 11 * disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 */ 27 28#include <sys/cdefs.h> 29__FBSDID("$FreeBSD: head/sys/kern/kern_umtx.c 139292 2004-12-25 13:02:50Z davidxu $"); 30 31#include <sys/param.h> 32#include <sys/kernel.h> 33#include <sys/limits.h> 34#include <sys/lock.h> 35#include <sys/malloc.h> 36#include <sys/mutex.h> 37#include <sys/proc.h> 38#include <sys/sysent.h> 39#include <sys/systm.h> 40#include <sys/sysproto.h> 41#include <sys/eventhandler.h> 42#include <sys/thr.h> 43#include <sys/umtx.h> 44 45#include <vm/vm.h> 46#include <vm/vm_param.h> 47#include <vm/pmap.h> 48#include <vm/vm_map.h> 49#include <vm/vm_object.h> 50 51#define UMTX_PRIVATE 0 52#define UMTX_SHARED 1 53 54#define UMTX_STATIC_SHARED 55 56struct umtx_key { 57 int type; 58 union { 59 struct { 60 vm_object_t object; 61 long offset; 62 } shared; 63 struct { 64 struct umtx *umtx; 65 long pid; 66 } private; 67 struct { 68 void *ptr; 69 long word; 70 } both; 71 } info; 72}; 73 74struct umtx_q { 75 LIST_ENTRY(umtx_q) uq_next; /* Linked list for the hash. */ 76 struct umtx_key uq_key; /* Umtx key. */ 77 struct thread *uq_thread; /* The thread waits on. */ 78 LIST_ENTRY(umtx_q) uq_rqnext; /* Linked list for requeuing. */ 79 vm_offset_t uq_addr; /* Umtx's virtual address. */ 80}; 81 82LIST_HEAD(umtx_head, umtx_q); 83struct umtxq_chain { 84 struct mtx uc_lock; /* Lock for this chain. */ 85 struct umtx_head uc_queue; /* List of sleep queues. */ 86#define UCF_BUSY 0x01 87#define UCF_WANT 0x02 88 int uc_flags; 89}; 90 91#define GOLDEN_RATIO_PRIME 2654404609U 92#define UMTX_CHAINS 128 93#define UMTX_SHIFTS (__WORD_BIT - 7) 94 95static struct umtxq_chain umtxq_chains[UMTX_CHAINS]; 96static MALLOC_DEFINE(M_UMTX, "umtx", "UMTX queue memory"); 97 98static void umtxq_init_chains(void *); 99static int umtxq_hash(struct umtx_key *key); 100static struct mtx *umtxq_mtx(int chain); 101static void umtxq_lock(struct umtx_key *key); 102static void umtxq_unlock(struct umtx_key *key); 103static void umtxq_busy(struct umtx_key *key); 104static void umtxq_unbusy(struct umtx_key *key); 105static void umtxq_insert(struct umtx_q *uq); 106static void umtxq_remove(struct umtx_q *uq); 107static int umtxq_sleep(struct thread *td, struct umtx_key *key, 108 int prio, const char *wmesg, int timo); 109static int umtxq_count(struct umtx_key *key); 110static int umtxq_signal(struct umtx_key *key, int nr_wakeup); 111#ifdef UMTX_DYNAMIC_SHARED 112static void fork_handler(void *arg, struct proc *p1, struct proc *p2, 113 int flags); 114#endif 115static int umtx_key_match(const struct umtx_key *k1, const struct umtx_key *k2); 116static int umtx_key_get(struct thread *td, struct umtx *umtx, 117 struct umtx_key *key); 118static void umtx_key_release(struct umtx_key *key); 119 120SYSINIT(umtx, SI_SUB_EVENTHANDLER+1, SI_ORDER_MIDDLE, umtxq_init_chains, NULL); 121 122static void 123umtxq_init_chains(void *arg __unused) 124{ 125 int i; 126 127 for (i = 0; i < UMTX_CHAINS; ++i) { 128 mtx_init(&umtxq_chains[i].uc_lock, "umtxq_lock", NULL, 129 MTX_DEF | MTX_DUPOK); 130 LIST_INIT(&umtxq_chains[i].uc_queue); 131 umtxq_chains[i].uc_flags = 0; 132 } 133#ifdef UMTX_DYNAMIC_SHARED 134 EVENTHANDLER_REGISTER(process_fork, fork_handler, 0, 10000); 135#endif 136} 137 138static inline int 139umtxq_hash(struct umtx_key *key) 140{ 141 unsigned n = (uintptr_t)key->info.both.ptr + key->info.both.word; 142 return (((n * GOLDEN_RATIO_PRIME) >> UMTX_SHIFTS) % UMTX_CHAINS); 143} 144 145static inline int 146umtx_key_match(const struct umtx_key *k1, const struct umtx_key *k2) 147{ 148 return (k1->type == k2->type && 149 k1->info.both.ptr == k2->info.both.ptr && 150 k1->info.both.word == k2->info.both.word); 151} 152 153static inline struct mtx * 154umtxq_mtx(int chain) 155{ 156 return (&umtxq_chains[chain].uc_lock); 157} 158 159static inline void 160umtxq_busy(struct umtx_key *key) 161{ 162 int chain = umtxq_hash(key); 163 164 mtx_assert(umtxq_mtx(chain), MA_OWNED); 165 while (umtxq_chains[chain].uc_flags & UCF_BUSY) { 166 umtxq_chains[chain].uc_flags |= UCF_WANT; 167 msleep(&umtxq_chains[chain], umtxq_mtx(chain), 168 curthread->td_priority, "umtxq_busy", 0); 169 } 170 umtxq_chains[chain].uc_flags |= UCF_BUSY; 171} 172 173static inline void 174umtxq_unbusy(struct umtx_key *key) 175{ 176 int chain = umtxq_hash(key); 177 178 mtx_assert(umtxq_mtx(chain), MA_OWNED); 179 KASSERT(umtxq_chains[chain].uc_flags & UCF_BUSY, ("not busy")); 180 umtxq_chains[chain].uc_flags &= ~UCF_BUSY; 181 if (umtxq_chains[chain].uc_flags & UCF_WANT) { 182 umtxq_chains[chain].uc_flags &= ~UCF_WANT; 183 wakeup(&umtxq_chains[chain]); 184 } 185} 186 187static inline void 188umtxq_lock(struct umtx_key *key) 189{ 190 int chain = umtxq_hash(key); 191 mtx_lock(umtxq_mtx(chain)); 192} 193 194static inline void 195umtxq_unlock(struct umtx_key *key) 196{ 197 int chain = umtxq_hash(key); 198 mtx_unlock(umtxq_mtx(chain)); 199} 200 201/* 202 * Insert a thread onto the umtx queue. 203 */ 204static inline void 205umtxq_insert(struct umtx_q *uq) 206{ 207 struct umtx_head *head; 208 int chain = umtxq_hash(&uq->uq_key); 209 210 mtx_assert(umtxq_mtx(chain), MA_OWNED); 211 head = &umtxq_chains[chain].uc_queue; 212 LIST_INSERT_HEAD(head, uq, uq_next); 213 uq->uq_thread->td_umtxq = uq; 214 mtx_lock_spin(&sched_lock); 215 uq->uq_thread->td_flags |= TDF_UMTXQ; 216 mtx_unlock_spin(&sched_lock); 217} 218 219/* 220 * Remove thread from the umtx queue. 221 */ 222static inline void 223umtxq_remove(struct umtx_q *uq) 224{ 225 mtx_assert(umtxq_mtx(umtxq_hash(&uq->uq_key)), MA_OWNED); 226 if (uq->uq_thread->td_flags & TDF_UMTXQ) { 227 LIST_REMOVE(uq, uq_next); 228 uq->uq_thread->td_umtxq = NULL; 229 /* turning off TDF_UMTXQ should be the last thing. */ 230 mtx_lock_spin(&sched_lock); 231 uq->uq_thread->td_flags &= ~TDF_UMTXQ; 232 mtx_unlock_spin(&sched_lock); 233 } 234} 235 236static int 237umtxq_count(struct umtx_key *key) 238{ 239 struct umtx_q *uq; 240 struct umtx_head *head; 241 int chain, count = 0; 242 243 chain = umtxq_hash(key); 244 mtx_assert(umtxq_mtx(chain), MA_OWNED); 245 head = &umtxq_chains[chain].uc_queue; 246 LIST_FOREACH(uq, head, uq_next) { 247 if (umtx_key_match(&uq->uq_key, key)) { 248 if (++count > 1) 249 break; 250 } 251 } 252 return (count); 253} 254 255static int 256umtxq_signal(struct umtx_key *key, int n_wake) 257{ 258 struct umtx_q *uq, *next; 259 struct umtx_head *head; 260 struct thread *blocked = NULL; 261 int chain, ret; 262 263 ret = 0; 264 chain = umtxq_hash(key); 265 mtx_assert(umtxq_mtx(chain), MA_OWNED); 266 head = &umtxq_chains[chain].uc_queue; 267 for (uq = LIST_FIRST(head); uq; uq = next) { 268 next = LIST_NEXT(uq, uq_next); 269 if (umtx_key_match(&uq->uq_key, key)) { 270 blocked = uq->uq_thread; 271 umtxq_remove(uq); 272 wakeup(blocked); 273 if (++ret >= n_wake) 274 break; 275 } 276 } 277 return (ret); 278} 279 280static inline int 281umtxq_sleep(struct thread *td, struct umtx_key *key, int priority, 282 const char *wmesg, int timo) 283{ 284 int chain = umtxq_hash(key); 285 286 return (msleep(td, umtxq_mtx(chain), priority, wmesg, timo)); 287} 288 289static int 290umtx_key_get(struct thread *td, struct umtx *umtx, struct umtx_key *key) 291{ 292#if defined(UMTX_DYNAMIC_SHARED) || defined(UMTX_STATIC_SHARED) 293 vm_map_t map; 294 vm_map_entry_t entry; 295 vm_pindex_t pindex; 296 vm_prot_t prot; 297 boolean_t wired; 298 299 map = &td->td_proc->p_vmspace->vm_map; 300 if (vm_map_lookup(&map, (vm_offset_t)umtx, VM_PROT_WRITE, 301 &entry, &key->info.shared.object, &pindex, &prot, 302 &wired) != KERN_SUCCESS) { 303 return EFAULT; 304 } 305#endif 306 307#if defined(UMTX_DYNAMIC_SHARED) 308 key->type = UMTX_SHARED; 309 key->info.shared.offset = entry->offset + entry->start - 310 (vm_offset_t)umtx; 311 /* 312 * Add object reference, if we don't do this, a buggy application 313 * deallocates the object, the object will be reused by other 314 * applications, then unlock will wake wrong thread. 315 */ 316 vm_object_reference(key->info.shared.object); 317 vm_map_lookup_done(map, entry); 318#elif defined(UMTX_STATIC_SHARED) 319 if (VM_INHERIT_SHARE == entry->inheritance) { 320 key->type = UMTX_SHARED; 321 key->info.shared.offset = entry->offset + entry->start - 322 (vm_offset_t)umtx; 323 vm_object_reference(key->info.shared.object); 324 } else { 325 key->type = UMTX_PRIVATE; 326 key->info.private.umtx = umtx; 327 key->info.private.pid = td->td_proc->p_pid; 328 } 329 vm_map_lookup_done(map, entry); 330#else 331 key->type = UMTX_PRIVATE; 332 key->info.private.umtx = umtx; 333 key->info.private.pid = td->td_proc->p_pid; 334#endif 335 return (0); 336} 337 338static inline void 339umtx_key_release(struct umtx_key *key) 340{ 341 if (key->type == UMTX_SHARED) 342 vm_object_deallocate(key->info.shared.object); 343} 344 345static inline int 346umtxq_queue_me(struct thread *td, struct umtx *umtx, struct umtx_q *uq) 347{ 348 int error; 349 350 if ((error = umtx_key_get(td, umtx, &uq->uq_key)) != 0) 351 return (error); 352 353 uq->uq_addr = (vm_offset_t)umtx; 354 uq->uq_thread = td; 355 umtxq_lock(&uq->uq_key); 356 /* hmm, for condition variable, we don't need busy flag. */ 357 umtxq_busy(&uq->uq_key); 358 umtxq_insert(uq); 359 umtxq_unbusy(&uq->uq_key); 360 umtxq_unlock(&uq->uq_key); 361 return (0); 362} 363 364#if defined(UMTX_DYNAMIC_SHARED) 365static void 366fork_handler(void *arg, struct proc *p1, struct proc *p2, int flags) 367{ 368 vm_map_t map; 369 vm_map_entry_t entry; 370 vm_object_t object; 371 vm_pindex_t pindex; 372 vm_prot_t prot; 373 boolean_t wired; 374 struct umtx_key key; 375 LIST_HEAD(, umtx_q) workq; 376 struct umtx_q *uq; 377 struct thread *td; 378 int onq; 379 380 LIST_INIT(&workq); 381 382 /* Collect threads waiting on umtxq */ 383 PROC_LOCK(p1); 384 FOREACH_THREAD_IN_PROC(p1, td) { 385 if (td->td_flags & TDF_UMTXQ) { 386 uq = td->td_umtxq; 387 if (uq) 388 LIST_INSERT_HEAD(&workq, uq, uq_rqnext); 389 } 390 } 391 PROC_UNLOCK(p1); 392 393 LIST_FOREACH(uq, &workq, uq_rqnext) { 394 map = &p1->p_vmspace->vm_map; 395 if (vm_map_lookup(&map, uq->uq_addr, VM_PROT_WRITE, 396 &entry, &object, &pindex, &prot, &wired) != KERN_SUCCESS) { 397 continue; 398 } 399 key.type = UMTX_SHARED; 400 key.info.shared.object = object; 401 key.info.shared.offset = entry->offset + entry->start - 402 uq->uq_addr; 403 if (umtx_key_match(&key, &uq->uq_key)) { 404 vm_map_lookup_done(map, entry); 405 continue; 406 } 407 408 umtxq_lock(&uq->uq_key); 409 umtxq_busy(&uq->uq_key); 410 if (uq->uq_thread->td_flags & TDF_UMTXQ) { 411 umtxq_remove(uq); 412 onq = 1; 413 } else 414 onq = 0; 415 umtxq_unbusy(&uq->uq_key); 416 umtxq_unlock(&uq->uq_key); 417 if (onq) { 418 vm_object_deallocate(uq->uq_key.info.shared.object); 419 uq->uq_key = key; 420 umtxq_lock(&uq->uq_key); 421 umtxq_busy(&uq->uq_key); 422 umtxq_insert(uq); 423 umtxq_unbusy(&uq->uq_key); 424 umtxq_unlock(&uq->uq_key); 425 vm_object_reference(uq->uq_key.info.shared.object); 426 } 427 vm_map_lookup_done(map, entry); 428 } 429} 430#endif 431 432static int 433_do_lock(struct thread *td, struct umtx *umtx, long id, int timo) 434{ 435 struct umtx_q uq; 436 intptr_t owner; 437 intptr_t old; 438 int error = 0; 439 440 /* 441 * Care must be exercised when dealing with umtx structure. It 442 * can fault on any access. 443 */ 444 445 for (;;) { 446 /* 447 * Try the uncontested case. This should be done in userland. 448 */ 449 owner = casuptr((intptr_t *)&umtx->u_owner, 450 UMTX_UNOWNED, id); 451 452 /* The acquire succeeded. */ 453 if (owner == UMTX_UNOWNED) 454 return (0); 455 456 /* The address was invalid. */ 457 if (owner == -1) 458 return (EFAULT); 459 460 /* If no one owns it but it is contested try to acquire it. */ 461 if (owner == UMTX_CONTESTED) { 462 owner = casuptr((intptr_t *)&umtx->u_owner, 463 UMTX_CONTESTED, id | UMTX_CONTESTED); 464 465 if (owner == UMTX_CONTESTED) 466 return (0); 467 468 /* The address was invalid. */ 469 if (owner == -1) 470 return (EFAULT); 471 472 /* If this failed the lock has changed, restart. */ 473 continue; 474 } 475 476 /* 477 * If we caught a signal, we have retried and now 478 * exit immediately. 479 */ 480 if (error || (error = umtxq_queue_me(td, umtx, &uq)) != 0) 481 return (error); 482 483 /* 484 * Set the contested bit so that a release in user space 485 * knows to use the system call for unlock. If this fails 486 * either some one else has acquired the lock or it has been 487 * released. 488 */ 489 old = casuptr((intptr_t *)&umtx->u_owner, owner, 490 owner | UMTX_CONTESTED); 491 492 /* The address was invalid. */ 493 if (old == -1) { 494 umtxq_lock(&uq.uq_key); 495 umtxq_busy(&uq.uq_key); 496 umtxq_remove(&uq); 497 umtxq_unbusy(&uq.uq_key); 498 umtxq_unlock(&uq.uq_key); 499 umtx_key_release(&uq.uq_key); 500 return (EFAULT); 501 } 502 503 /* 504 * We set the contested bit, sleep. Otherwise the lock changed 505 * and we need to retry or we lost a race to the thread 506 * unlocking the umtx. 507 */ 508 umtxq_lock(&uq.uq_key); 509 if (old == owner && (td->td_flags & TDF_UMTXQ)) { 510 error = umtxq_sleep(td, &uq.uq_key, 511 td->td_priority | PCATCH, 512 "umtx", timo); 513 } 514 umtxq_busy(&uq.uq_key); 515 umtxq_remove(&uq); 516 umtxq_unbusy(&uq.uq_key); 517 umtxq_unlock(&uq.uq_key); 518 umtx_key_release(&uq.uq_key); 519 } 520 521 return (0); 522} 523 524static int 525do_lock(struct thread *td, struct umtx *umtx, long id, 526 struct timespec *abstime) 527{ 528 struct timespec ts1, ts2; 529 struct timeval tv; 530 int timo, error; 531 532 if (abstime == NULL) { 533 error = _do_lock(td, umtx, id, 0); 534 } else { 535 for (;;) { 536 ts1 = *abstime; 537 getnanotime(&ts2); 538 timespecsub(&ts1, &ts2); 539 TIMESPEC_TO_TIMEVAL(&tv, &ts1); 540 if (tv.tv_sec < 0) { 541 error = EWOULDBLOCK; 542 break; 543 } 544 timo = tvtohz(&tv); 545 error = _do_lock(td, umtx, id, timo); 546 if (error != EWOULDBLOCK) 547 break; 548 } 549 } 550 /* 551 * This lets userland back off critical region if needed. 552 */ 553 if (error == ERESTART) 554 error = EINTR; 555 return (error); 556} 557 558static int 559do_unlock(struct thread *td, struct umtx *umtx, long id) 560{ 561 struct umtx_key key; 562 intptr_t owner; 563 intptr_t old; 564 int error; 565 int count; 566 567 /* 568 * Make sure we own this mtx. 569 * 570 * XXX Need a {fu,su}ptr this is not correct on arch where 571 * sizeof(intptr_t) != sizeof(long). 572 */ 573 if ((owner = fuword(&umtx->u_owner)) == -1) 574 return (EFAULT); 575 576 if ((owner & ~UMTX_CONTESTED) != id) 577 return (EPERM); 578 579 /* We should only ever be in here for contested locks */ 580 if ((owner & UMTX_CONTESTED) == 0) 581 return (EINVAL); 582 583 if ((error = umtx_key_get(td, umtx, &key)) != 0) 584 return (error); 585 586 umtxq_lock(&key); 587 umtxq_busy(&key); 588 count = umtxq_count(&key); 589 umtxq_unlock(&key); 590 591 /* 592 * When unlocking the umtx, it must be marked as unowned if 593 * there is zero or one thread only waiting for it. 594 * Otherwise, it must be marked as contested. 595 */ 596 old = casuptr((intptr_t *)&umtx->u_owner, owner, 597 count <= 1 ? UMTX_UNOWNED : UMTX_CONTESTED); 598 umtxq_lock(&key); 599 umtxq_signal(&key, 0); 600 umtxq_unbusy(&key); 601 umtxq_unlock(&key); 602 umtx_key_release(&key); 603 if (old == -1) 604 return (EFAULT); 605 if (old != owner) 606 return (EINVAL); 607 return (0); 608} 609 610static int 611do_unlock_and_wait(struct thread *td, struct umtx *umtx, long id, void *uaddr, 612 struct timespec *abstime) 613{ 614 struct umtx_q uq; 615 intptr_t owner; 616 intptr_t old; 617 struct timespec ts1, ts2; 618 struct timeval tv; 619 int timo, error = 0; 620 621 if (umtx == uaddr) 622 return (EINVAL); 623 624 /* 625 * Make sure we own this mtx. 626 * 627 * XXX Need a {fu,su}ptr this is not correct on arch where 628 * sizeof(intptr_t) != sizeof(long). 629 */ 630 if ((owner = fuword(&umtx->u_owner)) == -1) 631 return (EFAULT); 632 633 if ((owner & ~UMTX_CONTESTED) != id) 634 return (EPERM); 635 636 if ((error = umtxq_queue_me(td, uaddr, &uq)) != 0) 637 return (error); 638 639 old = casuptr((intptr_t *)&umtx->u_owner, id, UMTX_UNOWNED); 640 if (old == -1) { 641 umtxq_lock(&uq.uq_key); 642 umtxq_remove(&uq); 643 umtxq_unlock(&uq.uq_key); 644 umtx_key_release(&uq.uq_key); 645 return (EFAULT); 646 } 647 if (old != id) { 648 error = do_unlock(td, umtx, id); 649 if (error) { 650 umtxq_lock(&uq.uq_key); 651 umtxq_remove(&uq); 652 umtxq_unlock(&uq.uq_key); 653 umtx_key_release(&uq.uq_key); 654 return (error); 655 } 656 } 657 if (abstime == NULL) { 658 umtxq_lock(&uq.uq_key); 659 if (td->td_flags & TDF_UMTXQ) 660 error = umtxq_sleep(td, &uq.uq_key, 661 td->td_priority | PCATCH, "ucond", 0); 662 if (!(td->td_flags & TDF_UMTXQ)) 663 error = 0; 664 else 665 umtxq_remove(&uq); 666 umtxq_unlock(&uq.uq_key); 667 } else { 668 for (;;) { 669 ts1 = *abstime; 670 getnanotime(&ts2); 671 timespecsub(&ts1, &ts2); 672 TIMESPEC_TO_TIMEVAL(&tv, &ts1); 673 umtxq_lock(&uq.uq_key); 674 if (tv.tv_sec < 0) { 675 error = EWOULDBLOCK; 676 break; 677 } 678 timo = tvtohz(&tv); 679 if (td->td_flags & TDF_UMTXQ) 680 error = umtxq_sleep(td, &uq.uq_key, 681 td->td_priority | PCATCH, 682 "ucond", timo); 683 if (!(td->td_flags & TDF_UMTXQ)) 684 break; 685 umtxq_unlock(&uq.uq_key); 686 } 687 if (!(td->td_flags & TDF_UMTXQ)) 688 error = 0; 689 else 690 umtxq_remove(&uq); 691 umtxq_unlock(&uq.uq_key); 692 } 693 umtx_key_release(&uq.uq_key); 694 if (error == ERESTART) 695 error = EINTR; 696 return (error); 697} 698 699static int 700do_wake(struct thread *td, void *uaddr, int n_wake) 701{ 702 struct umtx_key key; 703 int ret; 704 705 if ((ret = umtx_key_get(td, uaddr, &key)) != 0) 706 return (ret); 707 umtxq_lock(&key); 708 ret = umtxq_signal(&key, n_wake); 709 umtxq_unlock(&key); 710 umtx_key_release(&key); 711 td->td_retval[0] = ret; 712 return (0); 713} 714 715int 716_umtx_lock(struct thread *td, struct _umtx_lock_args *uap) 717 /* struct umtx *umtx */ 718{ 719 return _do_lock(td, uap->umtx, td->td_tid, 0); 720} 721 722int 723_umtx_unlock(struct thread *td, struct _umtx_unlock_args *uap) 724 /* struct umtx *umtx */ 725{ 726 return do_unlock(td, uap->umtx, td->td_tid); 727} 728 729int 730_umtx_op(struct thread *td, struct _umtx_op_args *uap) 731{ 732 struct timespec abstime; 733 struct timespec *ts; 734 int error; 735 736 switch(uap->op) { 737 case UMTX_OP_LOCK: 738 /* Allow a null timespec (wait forever). */ 739 if (uap->uaddr2 == NULL) 740 ts = NULL; 741 else { 742 error = copyin(uap->uaddr2, &abstime, sizeof(abstime)); 743 if (error != 0) 744 return (error); 745 if (abstime.tv_nsec >= 1000000000 || 746 abstime.tv_nsec < 0) 747 return (EINVAL); 748 ts = &abstime; 749 } 750 return do_lock(td, uap->umtx, uap->id, ts); 751 case UMTX_OP_UNLOCK: 752 return do_unlock(td, uap->umtx, uap->id); 753 case UMTX_OP_UNLOCK_AND_WAIT: 754 /* Allow a null timespec (wait forever). */ 755 if (uap->uaddr2 == NULL) 756 ts = NULL; 757 else { 758 error = copyin(uap->uaddr2, &abstime, sizeof(abstime)); 759 if (error != 0) 760 return (error); 761 if (abstime.tv_nsec >= 1000000000 || 762 abstime.tv_nsec < 0) 763 return (EINVAL); 764 ts = &abstime; 765 } 766 return do_unlock_and_wait(td, uap->umtx, uap->id, 767 uap->uaddr, ts); 768 case UMTX_OP_WAKE: 769 return do_wake(td, uap->uaddr, uap->id); 770 default: 771 return (EINVAL); 772 } 773} 774