kern_umtx.c revision 140102
1/*- 2 * Copyright (c) 2004, David Xu <davidxu@freebsd.org> 3 * Copyright (c) 2002, Jeffrey Roberson <jeff@freebsd.org> 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice unmodified, this list of conditions, and the following 11 * disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 */ 27 28#include <sys/cdefs.h> 29__FBSDID("$FreeBSD: head/sys/kern/kern_umtx.c 140102 2005-01-12 05:55:52Z davidxu $"); 30 31#include <sys/param.h> 32#include <sys/kernel.h> 33#include <sys/limits.h> 34#include <sys/lock.h> 35#include <sys/malloc.h> 36#include <sys/mutex.h> 37#include <sys/proc.h> 38#include <sys/sysent.h> 39#include <sys/systm.h> 40#include <sys/sysproto.h> 41#include <sys/eventhandler.h> 42#include <sys/thr.h> 43#include <sys/umtx.h> 44 45#include <vm/vm.h> 46#include <vm/vm_param.h> 47#include <vm/pmap.h> 48#include <vm/vm_map.h> 49#include <vm/vm_object.h> 50 51#define UMTX_PRIVATE 0 52#define UMTX_SHARED 1 53 54#define UMTX_STATIC_SHARED 55 56struct umtx_key { 57 int type; 58 union { 59 struct { 60 vm_object_t object; 61 long offset; 62 } shared; 63 struct { 64 struct umtx *umtx; 65 long pid; 66 } private; 67 struct { 68 void *ptr; 69 long word; 70 } both; 71 } info; 72}; 73 74struct umtx_q { 75 LIST_ENTRY(umtx_q) uq_next; /* Linked list for the hash. */ 76 struct umtx_key uq_key; /* Umtx key. */ 77 struct thread *uq_thread; /* The thread waits on. */ 78 LIST_ENTRY(umtx_q) uq_rqnext; /* Linked list for requeuing. */ 79 vm_offset_t uq_addr; /* Umtx's virtual address. */ 80}; 81 82LIST_HEAD(umtx_head, umtx_q); 83struct umtxq_chain { 84 struct mtx uc_lock; /* Lock for this chain. */ 85 struct umtx_head uc_queue; /* List of sleep queues. */ 86#define UCF_BUSY 0x01 87#define UCF_WANT 0x02 88 int uc_flags; 89}; 90 91#define GOLDEN_RATIO_PRIME 2654404609U 92#define UMTX_CHAINS 128 93#define UMTX_SHIFTS (__WORD_BIT - 7) 94 95static struct umtxq_chain umtxq_chains[UMTX_CHAINS]; 96static MALLOC_DEFINE(M_UMTX, "umtx", "UMTX queue memory"); 97 98static void umtxq_init_chains(void *); 99static int umtxq_hash(struct umtx_key *key); 100static struct mtx *umtxq_mtx(int chain); 101static void umtxq_lock(struct umtx_key *key); 102static void umtxq_unlock(struct umtx_key *key); 103static void umtxq_busy(struct umtx_key *key); 104static void umtxq_unbusy(struct umtx_key *key); 105static void umtxq_insert(struct umtx_q *uq); 106static void umtxq_remove(struct umtx_q *uq); 107static int umtxq_sleep(struct thread *td, struct umtx_key *key, 108 int prio, const char *wmesg, int timo); 109static int umtxq_count(struct umtx_key *key); 110static int umtxq_signal(struct umtx_key *key, int nr_wakeup); 111#ifdef UMTX_DYNAMIC_SHARED 112static void fork_handler(void *arg, struct proc *p1, struct proc *p2, 113 int flags); 114#endif 115static int umtx_key_match(const struct umtx_key *k1, const struct umtx_key *k2); 116static int umtx_key_get(struct thread *td, struct umtx *umtx, 117 struct umtx_key *key); 118static void umtx_key_release(struct umtx_key *key); 119 120SYSINIT(umtx, SI_SUB_EVENTHANDLER+1, SI_ORDER_MIDDLE, umtxq_init_chains, NULL); 121 122static void 123umtxq_init_chains(void *arg __unused) 124{ 125 int i; 126 127 for (i = 0; i < UMTX_CHAINS; ++i) { 128 mtx_init(&umtxq_chains[i].uc_lock, "umtxq_lock", NULL, 129 MTX_DEF | MTX_DUPOK); 130 LIST_INIT(&umtxq_chains[i].uc_queue); 131 umtxq_chains[i].uc_flags = 0; 132 } 133#ifdef UMTX_DYNAMIC_SHARED 134 EVENTHANDLER_REGISTER(process_fork, fork_handler, 0, 10000); 135#endif 136} 137 138static inline int 139umtxq_hash(struct umtx_key *key) 140{ 141 unsigned n = (uintptr_t)key->info.both.ptr + key->info.both.word; 142 return (((n * GOLDEN_RATIO_PRIME) >> UMTX_SHIFTS) % UMTX_CHAINS); 143} 144 145static inline int 146umtx_key_match(const struct umtx_key *k1, const struct umtx_key *k2) 147{ 148 return (k1->type == k2->type && 149 k1->info.both.ptr == k2->info.both.ptr && 150 k1->info.both.word == k2->info.both.word); 151} 152 153static inline struct mtx * 154umtxq_mtx(int chain) 155{ 156 return (&umtxq_chains[chain].uc_lock); 157} 158 159static inline void 160umtxq_busy(struct umtx_key *key) 161{ 162 int chain = umtxq_hash(key); 163 164 mtx_assert(umtxq_mtx(chain), MA_OWNED); 165 while (umtxq_chains[chain].uc_flags & UCF_BUSY) { 166 umtxq_chains[chain].uc_flags |= UCF_WANT; 167 msleep(&umtxq_chains[chain], umtxq_mtx(chain), 168 curthread->td_priority, "umtxq_busy", 0); 169 } 170 umtxq_chains[chain].uc_flags |= UCF_BUSY; 171} 172 173static inline void 174umtxq_unbusy(struct umtx_key *key) 175{ 176 int chain = umtxq_hash(key); 177 178 mtx_assert(umtxq_mtx(chain), MA_OWNED); 179 KASSERT(umtxq_chains[chain].uc_flags & UCF_BUSY, ("not busy")); 180 umtxq_chains[chain].uc_flags &= ~UCF_BUSY; 181 if (umtxq_chains[chain].uc_flags & UCF_WANT) { 182 umtxq_chains[chain].uc_flags &= ~UCF_WANT; 183 wakeup(&umtxq_chains[chain]); 184 } 185} 186 187static inline void 188umtxq_lock(struct umtx_key *key) 189{ 190 int chain = umtxq_hash(key); 191 mtx_lock(umtxq_mtx(chain)); 192} 193 194static inline void 195umtxq_unlock(struct umtx_key *key) 196{ 197 int chain = umtxq_hash(key); 198 mtx_unlock(umtxq_mtx(chain)); 199} 200 201/* 202 * Insert a thread onto the umtx queue. 203 */ 204static inline void 205umtxq_insert(struct umtx_q *uq) 206{ 207 struct umtx_head *head; 208 int chain = umtxq_hash(&uq->uq_key); 209 210 mtx_assert(umtxq_mtx(chain), MA_OWNED); 211 head = &umtxq_chains[chain].uc_queue; 212 LIST_INSERT_HEAD(head, uq, uq_next); 213 uq->uq_thread->td_umtxq = uq; 214 mtx_lock_spin(&sched_lock); 215 uq->uq_thread->td_flags |= TDF_UMTXQ; 216 mtx_unlock_spin(&sched_lock); 217} 218 219/* 220 * Remove thread from the umtx queue. 221 */ 222static inline void 223umtxq_remove(struct umtx_q *uq) 224{ 225 mtx_assert(umtxq_mtx(umtxq_hash(&uq->uq_key)), MA_OWNED); 226 if (uq->uq_thread->td_flags & TDF_UMTXQ) { 227 LIST_REMOVE(uq, uq_next); 228 uq->uq_thread->td_umtxq = NULL; 229 /* turning off TDF_UMTXQ should be the last thing. */ 230 mtx_lock_spin(&sched_lock); 231 uq->uq_thread->td_flags &= ~TDF_UMTXQ; 232 mtx_unlock_spin(&sched_lock); 233 } 234} 235 236static int 237umtxq_count(struct umtx_key *key) 238{ 239 struct umtx_q *uq; 240 struct umtx_head *head; 241 int chain, count = 0; 242 243 chain = umtxq_hash(key); 244 mtx_assert(umtxq_mtx(chain), MA_OWNED); 245 head = &umtxq_chains[chain].uc_queue; 246 LIST_FOREACH(uq, head, uq_next) { 247 if (umtx_key_match(&uq->uq_key, key)) { 248 if (++count > 1) 249 break; 250 } 251 } 252 return (count); 253} 254 255static int 256umtxq_signal(struct umtx_key *key, int n_wake) 257{ 258 struct umtx_q *uq, *next; 259 struct umtx_head *head; 260 struct thread *blocked = NULL; 261 int chain, ret; 262 263 ret = 0; 264 chain = umtxq_hash(key); 265 mtx_assert(umtxq_mtx(chain), MA_OWNED); 266 head = &umtxq_chains[chain].uc_queue; 267 for (uq = LIST_FIRST(head); uq; uq = next) { 268 next = LIST_NEXT(uq, uq_next); 269 if (umtx_key_match(&uq->uq_key, key)) { 270 blocked = uq->uq_thread; 271 umtxq_remove(uq); 272 wakeup(blocked); 273 if (++ret >= n_wake) 274 break; 275 } 276 } 277 return (ret); 278} 279 280static inline int 281umtxq_sleep(struct thread *td, struct umtx_key *key, int priority, 282 const char *wmesg, int timo) 283{ 284 int chain = umtxq_hash(key); 285 int error = msleep(td, umtxq_mtx(chain), priority, wmesg, timo); 286 if (error == EWOULDBLOCK) 287 error = ETIMEDOUT; 288 return (error); 289} 290 291static int 292umtx_key_get(struct thread *td, struct umtx *umtx, struct umtx_key *key) 293{ 294#if defined(UMTX_DYNAMIC_SHARED) || defined(UMTX_STATIC_SHARED) 295 vm_map_t map; 296 vm_map_entry_t entry; 297 vm_pindex_t pindex; 298 vm_prot_t prot; 299 boolean_t wired; 300 301 map = &td->td_proc->p_vmspace->vm_map; 302 if (vm_map_lookup(&map, (vm_offset_t)umtx, VM_PROT_WRITE, 303 &entry, &key->info.shared.object, &pindex, &prot, 304 &wired) != KERN_SUCCESS) { 305 return EFAULT; 306 } 307#endif 308 309#if defined(UMTX_DYNAMIC_SHARED) 310 key->type = UMTX_SHARED; 311 key->info.shared.offset = entry->offset + entry->start - 312 (vm_offset_t)umtx; 313 /* 314 * Add object reference, if we don't do this, a buggy application 315 * deallocates the object, the object will be reused by other 316 * applications, then unlock will wake wrong thread. 317 */ 318 vm_object_reference(key->info.shared.object); 319 vm_map_lookup_done(map, entry); 320#elif defined(UMTX_STATIC_SHARED) 321 if (VM_INHERIT_SHARE == entry->inheritance) { 322 key->type = UMTX_SHARED; 323 key->info.shared.offset = entry->offset + entry->start - 324 (vm_offset_t)umtx; 325 vm_object_reference(key->info.shared.object); 326 } else { 327 key->type = UMTX_PRIVATE; 328 key->info.private.umtx = umtx; 329 key->info.private.pid = td->td_proc->p_pid; 330 } 331 vm_map_lookup_done(map, entry); 332#else 333 key->type = UMTX_PRIVATE; 334 key->info.private.umtx = umtx; 335 key->info.private.pid = td->td_proc->p_pid; 336#endif 337 return (0); 338} 339 340static inline void 341umtx_key_release(struct umtx_key *key) 342{ 343 if (key->type == UMTX_SHARED) 344 vm_object_deallocate(key->info.shared.object); 345} 346 347static inline int 348umtxq_queue_me(struct thread *td, struct umtx *umtx, struct umtx_q *uq) 349{ 350 int error; 351 352 if ((error = umtx_key_get(td, umtx, &uq->uq_key)) != 0) 353 return (error); 354 355 uq->uq_addr = (vm_offset_t)umtx; 356 uq->uq_thread = td; 357 umtxq_lock(&uq->uq_key); 358 /* hmm, for condition variable, we don't need busy flag. */ 359 umtxq_busy(&uq->uq_key); 360 umtxq_insert(uq); 361 umtxq_unbusy(&uq->uq_key); 362 umtxq_unlock(&uq->uq_key); 363 return (0); 364} 365 366#if defined(UMTX_DYNAMIC_SHARED) 367static void 368fork_handler(void *arg, struct proc *p1, struct proc *p2, int flags) 369{ 370 vm_map_t map; 371 vm_map_entry_t entry; 372 vm_object_t object; 373 vm_pindex_t pindex; 374 vm_prot_t prot; 375 boolean_t wired; 376 struct umtx_key key; 377 LIST_HEAD(, umtx_q) workq; 378 struct umtx_q *uq; 379 struct thread *td; 380 int onq; 381 382 LIST_INIT(&workq); 383 384 /* Collect threads waiting on umtxq */ 385 PROC_LOCK(p1); 386 FOREACH_THREAD_IN_PROC(p1, td) { 387 if (td->td_flags & TDF_UMTXQ) { 388 uq = td->td_umtxq; 389 if (uq) 390 LIST_INSERT_HEAD(&workq, uq, uq_rqnext); 391 } 392 } 393 PROC_UNLOCK(p1); 394 395 LIST_FOREACH(uq, &workq, uq_rqnext) { 396 map = &p1->p_vmspace->vm_map; 397 if (vm_map_lookup(&map, uq->uq_addr, VM_PROT_WRITE, 398 &entry, &object, &pindex, &prot, &wired) != KERN_SUCCESS) { 399 continue; 400 } 401 key.type = UMTX_SHARED; 402 key.info.shared.object = object; 403 key.info.shared.offset = entry->offset + entry->start - 404 uq->uq_addr; 405 if (umtx_key_match(&key, &uq->uq_key)) { 406 vm_map_lookup_done(map, entry); 407 continue; 408 } 409 410 umtxq_lock(&uq->uq_key); 411 umtxq_busy(&uq->uq_key); 412 if (uq->uq_thread->td_flags & TDF_UMTXQ) { 413 umtxq_remove(uq); 414 onq = 1; 415 } else 416 onq = 0; 417 umtxq_unbusy(&uq->uq_key); 418 umtxq_unlock(&uq->uq_key); 419 if (onq) { 420 vm_object_deallocate(uq->uq_key.info.shared.object); 421 uq->uq_key = key; 422 umtxq_lock(&uq->uq_key); 423 umtxq_busy(&uq->uq_key); 424 umtxq_insert(uq); 425 umtxq_unbusy(&uq->uq_key); 426 umtxq_unlock(&uq->uq_key); 427 vm_object_reference(uq->uq_key.info.shared.object); 428 } 429 vm_map_lookup_done(map, entry); 430 } 431} 432#endif 433 434static int 435_do_lock(struct thread *td, struct umtx *umtx, long id, int timo) 436{ 437 struct umtx_q uq; 438 intptr_t owner; 439 intptr_t old; 440 int error = 0; 441 442 /* 443 * Care must be exercised when dealing with umtx structure. It 444 * can fault on any access. 445 */ 446 447 for (;;) { 448 /* 449 * Try the uncontested case. This should be done in userland. 450 */ 451 owner = casuptr((intptr_t *)&umtx->u_owner, 452 UMTX_UNOWNED, id); 453 454 /* The acquire succeeded. */ 455 if (owner == UMTX_UNOWNED) 456 return (0); 457 458 /* The address was invalid. */ 459 if (owner == -1) 460 return (EFAULT); 461 462 /* If no one owns it but it is contested try to acquire it. */ 463 if (owner == UMTX_CONTESTED) { 464 owner = casuptr((intptr_t *)&umtx->u_owner, 465 UMTX_CONTESTED, id | UMTX_CONTESTED); 466 467 if (owner == UMTX_CONTESTED) 468 return (0); 469 470 /* The address was invalid. */ 471 if (owner == -1) 472 return (EFAULT); 473 474 /* If this failed the lock has changed, restart. */ 475 continue; 476 } 477 478 /* 479 * If we caught a signal, we have retried and now 480 * exit immediately. 481 */ 482 if (error || (error = umtxq_queue_me(td, umtx, &uq)) != 0) 483 return (error); 484 485 /* 486 * Set the contested bit so that a release in user space 487 * knows to use the system call for unlock. If this fails 488 * either some one else has acquired the lock or it has been 489 * released. 490 */ 491 old = casuptr((intptr_t *)&umtx->u_owner, owner, 492 owner | UMTX_CONTESTED); 493 494 /* The address was invalid. */ 495 if (old == -1) { 496 umtxq_lock(&uq.uq_key); 497 umtxq_busy(&uq.uq_key); 498 umtxq_remove(&uq); 499 umtxq_unbusy(&uq.uq_key); 500 umtxq_unlock(&uq.uq_key); 501 umtx_key_release(&uq.uq_key); 502 return (EFAULT); 503 } 504 505 /* 506 * We set the contested bit, sleep. Otherwise the lock changed 507 * and we need to retry or we lost a race to the thread 508 * unlocking the umtx. 509 */ 510 umtxq_lock(&uq.uq_key); 511 if (old == owner && (td->td_flags & TDF_UMTXQ)) { 512 error = umtxq_sleep(td, &uq.uq_key, 513 td->td_priority | PCATCH, 514 "umtx", timo); 515 } 516 umtxq_busy(&uq.uq_key); 517 umtxq_remove(&uq); 518 umtxq_unbusy(&uq.uq_key); 519 umtxq_unlock(&uq.uq_key); 520 umtx_key_release(&uq.uq_key); 521 } 522 523 return (0); 524} 525 526static int 527do_lock(struct thread *td, struct umtx *umtx, long id, 528 struct timespec *abstime) 529{ 530 struct timespec ts1, ts2; 531 struct timeval tv; 532 int timo, error; 533 534 if (abstime == NULL) { 535 error = _do_lock(td, umtx, id, 0); 536 } else { 537 for (;;) { 538 ts1 = *abstime; 539 getnanotime(&ts2); 540 timespecsub(&ts1, &ts2); 541 TIMESPEC_TO_TIMEVAL(&tv, &ts1); 542 if (tv.tv_sec < 0) { 543 error = ETIMEDOUT; 544 break; 545 } 546 timo = tvtohz(&tv); 547 error = _do_lock(td, umtx, id, timo); 548 if (error != ETIMEDOUT) 549 break; 550 } 551 } 552 /* 553 * This lets userland back off critical region if needed. 554 */ 555 if (error == ERESTART) 556 error = EINTR; 557 return (error); 558} 559 560static int 561do_unlock(struct thread *td, struct umtx *umtx, long id) 562{ 563 struct umtx_key key; 564 intptr_t owner; 565 intptr_t old; 566 int error; 567 int count; 568 569 /* 570 * Make sure we own this mtx. 571 * 572 * XXX Need a {fu,su}ptr this is not correct on arch where 573 * sizeof(intptr_t) != sizeof(long). 574 */ 575 if ((owner = fuword(&umtx->u_owner)) == -1) 576 return (EFAULT); 577 578 if ((owner & ~UMTX_CONTESTED) != id) 579 return (EPERM); 580 581 /* We should only ever be in here for contested locks */ 582 if ((owner & UMTX_CONTESTED) == 0) 583 return (EINVAL); 584 585 if ((error = umtx_key_get(td, umtx, &key)) != 0) 586 return (error); 587 588 umtxq_lock(&key); 589 umtxq_busy(&key); 590 count = umtxq_count(&key); 591 umtxq_unlock(&key); 592 593 /* 594 * When unlocking the umtx, it must be marked as unowned if 595 * there is zero or one thread only waiting for it. 596 * Otherwise, it must be marked as contested. 597 */ 598 old = casuptr((intptr_t *)&umtx->u_owner, owner, 599 count <= 1 ? UMTX_UNOWNED : UMTX_CONTESTED); 600 umtxq_lock(&key); 601 umtxq_signal(&key, 0); 602 umtxq_unbusy(&key); 603 umtxq_unlock(&key); 604 umtx_key_release(&key); 605 if (old == -1) 606 return (EFAULT); 607 if (old != owner) 608 return (EINVAL); 609 return (0); 610} 611 612static int 613do_wait(struct thread *td, struct umtx *umtx, long id, struct timespec *abstime) 614{ 615 struct umtx_q uq; 616 struct timespec ts1, ts2; 617 struct timeval tv; 618 long tmp; 619 int timo, error = 0; 620 621 if ((error = umtxq_queue_me(td, umtx, &uq)) != 0) 622 return (error); 623 tmp = fuword(&umtx->u_owner); 624 if (tmp != id) { 625 umtxq_lock(&uq.uq_key); 626 umtxq_remove(&uq); 627 umtxq_unlock(&uq.uq_key); 628 } else if (abstime == NULL) { 629 umtxq_lock(&uq.uq_key); 630 if (td->td_flags & TDF_UMTXQ) 631 error = umtxq_sleep(td, &uq.uq_key, 632 td->td_priority | PCATCH, "ucond", 0); 633 if (!(td->td_flags & TDF_UMTXQ)) 634 error = 0; 635 else 636 umtxq_remove(&uq); 637 umtxq_unlock(&uq.uq_key); 638 } else { 639 for (;;) { 640 ts1 = *abstime; 641 getnanotime(&ts2); 642 timespecsub(&ts1, &ts2); 643 TIMESPEC_TO_TIMEVAL(&tv, &ts1); 644 umtxq_lock(&uq.uq_key); 645 if (tv.tv_sec < 0) { 646 error = ETIMEDOUT; 647 break; 648 } 649 timo = tvtohz(&tv); 650 if (td->td_flags & TDF_UMTXQ) 651 error = umtxq_sleep(td, &uq.uq_key, 652 td->td_priority | PCATCH, 653 "ucond", timo); 654 if (error != ETIMEDOUT || !(td->td_flags & TDF_UMTXQ)) 655 break; 656 umtxq_unlock(&uq.uq_key); 657 } 658 if (!(td->td_flags & TDF_UMTXQ)) 659 error = 0; 660 else 661 umtxq_remove(&uq); 662 umtxq_unlock(&uq.uq_key); 663 } 664 umtx_key_release(&uq.uq_key); 665 if (error == ERESTART) 666 error = EINTR; 667 return (error); 668} 669 670static int 671do_wake(struct thread *td, void *uaddr, int n_wake) 672{ 673 struct umtx_key key; 674 int ret; 675 676 if ((ret = umtx_key_get(td, uaddr, &key)) != 0) 677 return (ret); 678 umtxq_lock(&key); 679 ret = umtxq_signal(&key, n_wake); 680 umtxq_unlock(&key); 681 umtx_key_release(&key); 682 return (0); 683} 684 685int 686_umtx_lock(struct thread *td, struct _umtx_lock_args *uap) 687 /* struct umtx *umtx */ 688{ 689 return _do_lock(td, uap->umtx, td->td_tid, 0); 690} 691 692int 693_umtx_unlock(struct thread *td, struct _umtx_unlock_args *uap) 694 /* struct umtx *umtx */ 695{ 696 return do_unlock(td, uap->umtx, td->td_tid); 697} 698 699int 700_umtx_op(struct thread *td, struct _umtx_op_args *uap) 701{ 702 struct timespec abstime; 703 struct timespec *ts; 704 int error; 705 706 switch(uap->op) { 707 case UMTX_OP_LOCK: 708 /* Allow a null timespec (wait forever). */ 709 if (uap->uaddr2 == NULL) 710 ts = NULL; 711 else { 712 error = copyin(uap->uaddr2, &abstime, sizeof(abstime)); 713 if (error != 0) 714 break; 715 printf("uap->abstime: %d.%ld\n", abstime.tv_sec, abstime.tv_nsec); 716 if (abstime.tv_nsec >= 1000000000 || 717 abstime.tv_nsec < 0) { 718 error = EINVAL; 719 break; 720 } 721 ts = &abstime; 722 } 723 error = do_lock(td, uap->umtx, uap->id, ts); 724 break; 725 case UMTX_OP_UNLOCK: 726 error = do_unlock(td, uap->umtx, uap->id); 727 break; 728 case UMTX_OP_WAIT: 729 /* Allow a null timespec (wait forever). */ 730 if (uap->uaddr2 == NULL) 731 ts = NULL; 732 else { 733 error = copyin(uap->uaddr2, &abstime, sizeof(abstime)); 734 if (error != 0) 735 break; 736 if (abstime.tv_nsec >= 1000000000 || 737 abstime.tv_nsec < 0) { 738 error = EINVAL; 739 break; 740 } 741 ts = &abstime; 742 } 743 error = do_wait(td, uap->umtx, uap->id, ts); 744 break; 745 case UMTX_OP_WAKE: 746 error = do_wake(td, uap->umtx, uap->id); 747 break; 748 default: 749 error = EINVAL; 750 break; 751 } 752 td->td_retval[0] = -error; 753 return (0); 754} 755