1139804Simp/*- 2139013Sdavidxu * Copyright (c) 2004, David Xu <davidxu@freebsd.org> 3112904Sjeff * Copyright (c) 2002, Jeffrey Roberson <jeff@freebsd.org> 4112904Sjeff * All rights reserved. 5112904Sjeff * 6112904Sjeff * Redistribution and use in source and binary forms, with or without 7112904Sjeff * modification, are permitted provided that the following conditions 8112904Sjeff * are met: 9112904Sjeff * 1. Redistributions of source code must retain the above copyright 10112904Sjeff * notice unmodified, this list of conditions, and the following 11112904Sjeff * disclaimer. 12112904Sjeff * 2. Redistributions in binary form must reproduce the above copyright 13112904Sjeff * notice, this list of conditions and the following disclaimer in the 14112904Sjeff * documentation and/or other materials provided with the distribution. 15112904Sjeff * 16112904Sjeff * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 17112904Sjeff * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 18112904Sjeff * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 19112904Sjeff * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 20112904Sjeff * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 21112904Sjeff * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 22112904Sjeff * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 23112904Sjeff * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24112904Sjeff * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 25112904Sjeff * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26112904Sjeff */ 27112904Sjeff 28116182Sobrien#include <sys/cdefs.h> 29116182Sobrien__FBSDID("$FreeBSD$"); 30116182Sobrien 31162536Sdavidxu#include "opt_compat.h" 32233045Sdavide#include "opt_umtx_profiling.h" 33233045Sdavide 34112904Sjeff#include <sys/param.h> 35112904Sjeff#include <sys/kernel.h> 36131431Smarcel#include <sys/limits.h> 37112904Sjeff#include <sys/lock.h> 38115765Sjeff#include <sys/malloc.h> 39112904Sjeff#include <sys/mutex.h> 40164033Srwatson#include <sys/priv.h> 41112904Sjeff#include <sys/proc.h> 42248105Sattilio#include <sys/sbuf.h> 43161678Sdavidxu#include <sys/sched.h> 44165369Sdavidxu#include <sys/smp.h> 45161678Sdavidxu#include <sys/sysctl.h> 46112904Sjeff#include <sys/sysent.h> 47112904Sjeff#include <sys/systm.h> 48112904Sjeff#include <sys/sysproto.h> 49216641Sdavidxu#include <sys/syscallsubr.h> 50139013Sdavidxu#include <sys/eventhandler.h> 51112904Sjeff#include <sys/umtx.h> 52112904Sjeff 53139013Sdavidxu#include <vm/vm.h> 54139013Sdavidxu#include <vm/vm_param.h> 55139013Sdavidxu#include <vm/pmap.h> 56139013Sdavidxu#include <vm/vm_map.h> 57139013Sdavidxu#include <vm/vm_object.h> 58139013Sdavidxu 59165369Sdavidxu#include <machine/cpu.h> 60165369Sdavidxu 61205014Snwhitehorn#ifdef COMPAT_FREEBSD32 62162536Sdavidxu#include <compat/freebsd32/freebsd32_proto.h> 63162536Sdavidxu#endif 64162536Sdavidxu 65179970Sdavidxu#define _UMUTEX_TRY 1 66179970Sdavidxu#define _UMUTEX_WAIT 2 67179970Sdavidxu 68248105Sattilio#ifdef UMTX_PROFILING 69248105Sattilio#define UPROF_PERC_BIGGER(w, f, sw, sf) \ 70248105Sattilio (((w) > (sw)) || ((w) == (sw) && (f) > (sf))) 71248105Sattilio#endif 72248105Sattilio 73161678Sdavidxu/* Priority inheritance mutex info. */ 74161678Sdavidxustruct umtx_pi { 75161678Sdavidxu /* Owner thread */ 76161678Sdavidxu struct thread *pi_owner; 77161678Sdavidxu 78161678Sdavidxu /* Reference count */ 79161678Sdavidxu int pi_refcount; 80161678Sdavidxu 81161678Sdavidxu /* List entry to link umtx holding by thread */ 82161678Sdavidxu TAILQ_ENTRY(umtx_pi) pi_link; 83161678Sdavidxu 84161678Sdavidxu /* List entry in hash */ 85161678Sdavidxu TAILQ_ENTRY(umtx_pi) pi_hashlink; 86161678Sdavidxu 87161678Sdavidxu /* List for waiters */ 88161678Sdavidxu TAILQ_HEAD(,umtx_q) pi_blocked; 89161678Sdavidxu 90161678Sdavidxu /* Identify a userland lock object */ 91161678Sdavidxu struct umtx_key pi_key; 92161678Sdavidxu}; 93161678Sdavidxu 94161678Sdavidxu/* A userland synchronous object user. */ 95115765Sjeffstruct umtx_q { 96161678Sdavidxu /* Linked list for the hash. */ 97161678Sdavidxu TAILQ_ENTRY(umtx_q) uq_link; 98161678Sdavidxu 99161678Sdavidxu /* Umtx key. */ 100161678Sdavidxu struct umtx_key uq_key; 101161678Sdavidxu 102161678Sdavidxu /* Umtx flags. */ 103161678Sdavidxu int uq_flags; 104161678Sdavidxu#define UQF_UMTXQ 0x0001 105161678Sdavidxu 106161678Sdavidxu /* The thread waits on. */ 107161678Sdavidxu struct thread *uq_thread; 108161678Sdavidxu 109161678Sdavidxu /* 110161678Sdavidxu * Blocked on PI mutex. read can use chain lock 111170300Sjeff * or umtx_lock, write must have both chain lock and 112170300Sjeff * umtx_lock being hold. 113161678Sdavidxu */ 114161678Sdavidxu struct umtx_pi *uq_pi_blocked; 115161678Sdavidxu 116161678Sdavidxu /* On blocked list */ 117161678Sdavidxu TAILQ_ENTRY(umtx_q) uq_lockq; 118161678Sdavidxu 119161678Sdavidxu /* Thread contending with us */ 120161678Sdavidxu TAILQ_HEAD(,umtx_pi) uq_pi_contested; 121161678Sdavidxu 122161742Sdavidxu /* Inherited priority from PP mutex */ 123161678Sdavidxu u_char uq_inherited_pri; 124201991Sdavidxu 125201991Sdavidxu /* Spare queue ready to be reused */ 126201991Sdavidxu struct umtxq_queue *uq_spare_queue; 127201991Sdavidxu 128201991Sdavidxu /* The queue we on */ 129201991Sdavidxu struct umtxq_queue *uq_cur_queue; 130115765Sjeff}; 131115765Sjeff 132161678SdavidxuTAILQ_HEAD(umtxq_head, umtx_q); 133161678Sdavidxu 134201991Sdavidxu/* Per-key wait-queue */ 135201991Sdavidxustruct umtxq_queue { 136201991Sdavidxu struct umtxq_head head; 137201991Sdavidxu struct umtx_key key; 138201991Sdavidxu LIST_ENTRY(umtxq_queue) link; 139201991Sdavidxu int length; 140201991Sdavidxu}; 141201991Sdavidxu 142201991SdavidxuLIST_HEAD(umtxq_list, umtxq_queue); 143201991Sdavidxu 144161678Sdavidxu/* Userland lock object's wait-queue chain */ 145138224Sdavidxustruct umtxq_chain { 146161678Sdavidxu /* Lock for this chain. */ 147161678Sdavidxu struct mtx uc_lock; 148161678Sdavidxu 149161678Sdavidxu /* List of sleep queues. */ 150201991Sdavidxu struct umtxq_list uc_queue[2]; 151177848Sdavidxu#define UMTX_SHARED_QUEUE 0 152177848Sdavidxu#define UMTX_EXCLUSIVE_QUEUE 1 153161678Sdavidxu 154201991Sdavidxu LIST_HEAD(, umtxq_queue) uc_spare_queue; 155201991Sdavidxu 156161678Sdavidxu /* Busy flag */ 157161678Sdavidxu char uc_busy; 158161678Sdavidxu 159161678Sdavidxu /* Chain lock waiters */ 160158377Sdavidxu int uc_waiters; 161161678Sdavidxu 162161678Sdavidxu /* All PI in the list */ 163161678Sdavidxu TAILQ_HEAD(,umtx_pi) uc_pi_list; 164201991Sdavidxu 165233045Sdavide#ifdef UMTX_PROFILING 166248105Sattilio u_int length; 167248105Sattilio u_int max_length; 168233045Sdavide#endif 169138224Sdavidxu}; 170115765Sjeff 171161678Sdavidxu#define UMTXQ_LOCKED_ASSERT(uc) mtx_assert(&(uc)->uc_lock, MA_OWNED) 172189756Sdavidxu#define UMTXQ_BUSY_ASSERT(uc) KASSERT(&(uc)->uc_busy, ("umtx chain is not busy")) 173161678Sdavidxu 174161678Sdavidxu/* 175161678Sdavidxu * Don't propagate time-sharing priority, there is a security reason, 176161678Sdavidxu * a user can simply introduce PI-mutex, let thread A lock the mutex, 177161678Sdavidxu * and let another thread B block on the mutex, because B is 178161678Sdavidxu * sleeping, its priority will be boosted, this causes A's priority to 179161678Sdavidxu * be boosted via priority propagating too and will never be lowered even 180161678Sdavidxu * if it is using 100%CPU, this is unfair to other processes. 181161678Sdavidxu */ 182161678Sdavidxu 183163709Sjb#define UPRI(td) (((td)->td_user_pri >= PRI_MIN_TIMESHARE &&\ 184163709Sjb (td)->td_user_pri <= PRI_MAX_TIMESHARE) ?\ 185163709Sjb PRI_MAX_TIMESHARE : (td)->td_user_pri) 186161678Sdavidxu 187138224Sdavidxu#define GOLDEN_RATIO_PRIME 2654404609U 188216678Sdavidxu#define UMTX_CHAINS 512 189216678Sdavidxu#define UMTX_SHIFTS (__WORD_BIT - 9) 190115765Sjeff 191161678Sdavidxu#define GET_SHARE(flags) \ 192161678Sdavidxu (((flags) & USYNC_PROCESS_SHARED) == 0 ? THREAD_SHARE : PROCESS_SHARE) 193161678Sdavidxu 194177848Sdavidxu#define BUSY_SPINS 200 195177848Sdavidxu 196233690Sdavidxustruct abs_timeout { 197233690Sdavidxu int clockid; 198233690Sdavidxu struct timespec cur; 199233690Sdavidxu struct timespec end; 200233690Sdavidxu}; 201233690Sdavidxu 202161678Sdavidxustatic uma_zone_t umtx_pi_zone; 203179421Sdavidxustatic struct umtxq_chain umtxq_chains[2][UMTX_CHAINS]; 204138224Sdavidxustatic MALLOC_DEFINE(M_UMTX, "umtx", "UMTX queue memory"); 205161678Sdavidxustatic int umtx_pi_allocated; 206115310Sjeff 207227309Sedstatic SYSCTL_NODE(_debug, OID_AUTO, umtx, CTLFLAG_RW, 0, "umtx debug"); 208161678SdavidxuSYSCTL_INT(_debug_umtx, OID_AUTO, umtx_pi_allocated, CTLFLAG_RD, 209161678Sdavidxu &umtx_pi_allocated, 0, "Allocated umtx_pi"); 210161678Sdavidxu 211233045Sdavide#ifdef UMTX_PROFILING 212233045Sdavidestatic long max_length; 213233045SdavideSYSCTL_LONG(_debug_umtx, OID_AUTO, max_length, CTLFLAG_RD, &max_length, 0, "max_length"); 214233045Sdavidestatic SYSCTL_NODE(_debug_umtx, OID_AUTO, chains, CTLFLAG_RD, 0, "umtx chain stats"); 215233045Sdavide#endif 216233045Sdavide 217161678Sdavidxustatic void umtxq_sysinit(void *); 218161678Sdavidxustatic void umtxq_hash(struct umtx_key *key); 219161678Sdavidxustatic struct umtxq_chain *umtxq_getchain(struct umtx_key *key); 220139013Sdavidxustatic void umtxq_lock(struct umtx_key *key); 221139013Sdavidxustatic void umtxq_unlock(struct umtx_key *key); 222139257Sdavidxustatic void umtxq_busy(struct umtx_key *key); 223139257Sdavidxustatic void umtxq_unbusy(struct umtx_key *key); 224177848Sdavidxustatic void umtxq_insert_queue(struct umtx_q *uq, int q); 225177848Sdavidxustatic void umtxq_remove_queue(struct umtx_q *uq, int q); 226233690Sdavidxustatic int umtxq_sleep(struct umtx_q *uq, const char *wmesg, struct abs_timeout *); 227139257Sdavidxustatic int umtxq_count(struct umtx_key *key); 228163697Sdavidxustatic struct umtx_pi *umtx_pi_alloc(int); 229161678Sdavidxustatic void umtx_pi_free(struct umtx_pi *pi); 230161678Sdavidxustatic int do_unlock_pp(struct thread *td, struct umutex *m, uint32_t flags); 231161678Sdavidxustatic void umtx_thread_cleanup(struct thread *td); 232161678Sdavidxustatic void umtx_exec_hook(void *arg __unused, struct proc *p __unused, 233161678Sdavidxu struct image_params *imgp __unused); 234161678SdavidxuSYSINIT(umtx, SI_SUB_EVENTHANDLER+1, SI_ORDER_MIDDLE, umtxq_sysinit, NULL); 235115310Sjeff 236177848Sdavidxu#define umtxq_signal(key, nwake) umtxq_signal_queue((key), (nwake), UMTX_SHARED_QUEUE) 237177848Sdavidxu#define umtxq_insert(uq) umtxq_insert_queue((uq), UMTX_SHARED_QUEUE) 238177848Sdavidxu#define umtxq_remove(uq) umtxq_remove_queue((uq), UMTX_SHARED_QUEUE) 239177848Sdavidxu 240170300Sjeffstatic struct mtx umtx_lock; 241170300Sjeff 242233045Sdavide#ifdef UMTX_PROFILING 243161678Sdavidxustatic void 244233045Sdavideumtx_init_profiling(void) 245233045Sdavide{ 246233045Sdavide struct sysctl_oid *chain_oid; 247233045Sdavide char chain_name[10]; 248233045Sdavide int i; 249233045Sdavide 250233045Sdavide for (i = 0; i < UMTX_CHAINS; ++i) { 251233045Sdavide snprintf(chain_name, sizeof(chain_name), "%d", i); 252233045Sdavide chain_oid = SYSCTL_ADD_NODE(NULL, 253233045Sdavide SYSCTL_STATIC_CHILDREN(_debug_umtx_chains), OID_AUTO, 254233045Sdavide chain_name, CTLFLAG_RD, NULL, "umtx hash stats"); 255233045Sdavide SYSCTL_ADD_INT(NULL, SYSCTL_CHILDREN(chain_oid), OID_AUTO, 256233045Sdavide "max_length0", CTLFLAG_RD, &umtxq_chains[0][i].max_length, 0, NULL); 257233045Sdavide SYSCTL_ADD_INT(NULL, SYSCTL_CHILDREN(chain_oid), OID_AUTO, 258233045Sdavide "max_length1", CTLFLAG_RD, &umtxq_chains[1][i].max_length, 0, NULL); 259233045Sdavide } 260233045Sdavide} 261248105Sattilio 262248105Sattiliostatic int 263248105Sattiliosysctl_debug_umtx_chains_peaks(SYSCTL_HANDLER_ARGS) 264248105Sattilio{ 265248105Sattilio char buf[512]; 266248105Sattilio struct sbuf sb; 267248105Sattilio struct umtxq_chain *uc; 268248105Sattilio u_int fract, i, j, tot, whole; 269248105Sattilio u_int sf0, sf1, sf2, sf3, sf4; 270248105Sattilio u_int si0, si1, si2, si3, si4; 271248105Sattilio u_int sw0, sw1, sw2, sw3, sw4; 272248105Sattilio 273248105Sattilio sbuf_new(&sb, buf, sizeof(buf), SBUF_FIXEDLEN); 274248105Sattilio for (i = 0; i < 2; i++) { 275248105Sattilio tot = 0; 276248105Sattilio for (j = 0; j < UMTX_CHAINS; ++j) { 277248105Sattilio uc = &umtxq_chains[i][j]; 278248105Sattilio mtx_lock(&uc->uc_lock); 279248105Sattilio tot += uc->max_length; 280248105Sattilio mtx_unlock(&uc->uc_lock); 281248105Sattilio } 282248105Sattilio if (tot == 0) 283248105Sattilio sbuf_printf(&sb, "%u) Empty ", i); 284248105Sattilio else { 285248105Sattilio sf0 = sf1 = sf2 = sf3 = sf4 = 0; 286248105Sattilio si0 = si1 = si2 = si3 = si4 = 0; 287248105Sattilio sw0 = sw1 = sw2 = sw3 = sw4 = 0; 288248105Sattilio for (j = 0; j < UMTX_CHAINS; j++) { 289248105Sattilio uc = &umtxq_chains[i][j]; 290248105Sattilio mtx_lock(&uc->uc_lock); 291248105Sattilio whole = uc->max_length * 100; 292248105Sattilio mtx_unlock(&uc->uc_lock); 293248105Sattilio fract = (whole % tot) * 100; 294248105Sattilio if (UPROF_PERC_BIGGER(whole, fract, sw0, sf0)) { 295248105Sattilio sf0 = fract; 296248105Sattilio si0 = j; 297248105Sattilio sw0 = whole; 298248105Sattilio } else if (UPROF_PERC_BIGGER(whole, fract, sw1, 299248105Sattilio sf1)) { 300248105Sattilio sf1 = fract; 301248105Sattilio si1 = j; 302248105Sattilio sw1 = whole; 303248105Sattilio } else if (UPROF_PERC_BIGGER(whole, fract, sw2, 304248105Sattilio sf2)) { 305248105Sattilio sf2 = fract; 306248105Sattilio si2 = j; 307248105Sattilio sw2 = whole; 308248105Sattilio } else if (UPROF_PERC_BIGGER(whole, fract, sw3, 309248105Sattilio sf3)) { 310248105Sattilio sf3 = fract; 311248105Sattilio si3 = j; 312248105Sattilio sw3 = whole; 313248105Sattilio } else if (UPROF_PERC_BIGGER(whole, fract, sw4, 314248105Sattilio sf4)) { 315248105Sattilio sf4 = fract; 316248105Sattilio si4 = j; 317248105Sattilio sw4 = whole; 318248105Sattilio } 319248105Sattilio } 320248105Sattilio sbuf_printf(&sb, "queue %u:\n", i); 321248105Sattilio sbuf_printf(&sb, "1st: %u.%u%% idx: %u\n", sw0 / tot, 322248105Sattilio sf0 / tot, si0); 323248105Sattilio sbuf_printf(&sb, "2nd: %u.%u%% idx: %u\n", sw1 / tot, 324248105Sattilio sf1 / tot, si1); 325248105Sattilio sbuf_printf(&sb, "3rd: %u.%u%% idx: %u\n", sw2 / tot, 326248105Sattilio sf2 / tot, si2); 327248105Sattilio sbuf_printf(&sb, "4th: %u.%u%% idx: %u\n", sw3 / tot, 328248105Sattilio sf3 / tot, si3); 329248105Sattilio sbuf_printf(&sb, "5th: %u.%u%% idx: %u\n", sw4 / tot, 330248105Sattilio sf4 / tot, si4); 331248105Sattilio } 332248105Sattilio } 333248105Sattilio sbuf_trim(&sb); 334248105Sattilio sbuf_finish(&sb); 335248105Sattilio sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req); 336248105Sattilio sbuf_delete(&sb); 337248105Sattilio return (0); 338248105Sattilio} 339248105Sattilio 340248105Sattiliostatic int 341248105Sattiliosysctl_debug_umtx_chains_clear(SYSCTL_HANDLER_ARGS) 342248105Sattilio{ 343248105Sattilio struct umtxq_chain *uc; 344248105Sattilio u_int i, j; 345248105Sattilio int clear, error; 346248105Sattilio 347248105Sattilio clear = 0; 348248105Sattilio error = sysctl_handle_int(oidp, &clear, 0, req); 349248105Sattilio if (error != 0 || req->newptr == NULL) 350248105Sattilio return (error); 351248105Sattilio 352248105Sattilio if (clear != 0) { 353248105Sattilio for (i = 0; i < 2; ++i) { 354248105Sattilio for (j = 0; j < UMTX_CHAINS; ++j) { 355248105Sattilio uc = &umtxq_chains[i][j]; 356248105Sattilio mtx_lock(&uc->uc_lock); 357248105Sattilio uc->length = 0; 358248105Sattilio uc->max_length = 0; 359248105Sattilio mtx_unlock(&uc->uc_lock); 360248105Sattilio } 361248105Sattilio } 362248105Sattilio } 363248105Sattilio return (0); 364248105Sattilio} 365248105Sattilio 366248105SattilioSYSCTL_PROC(_debug_umtx_chains, OID_AUTO, clear, 367248105Sattilio CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 0, 368248105Sattilio sysctl_debug_umtx_chains_clear, "I", "Clear umtx chains statistics"); 369248105SattilioSYSCTL_PROC(_debug_umtx_chains, OID_AUTO, peaks, 370248105Sattilio CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 0, 371248105Sattilio sysctl_debug_umtx_chains_peaks, "A", "Highest peaks in chains max length"); 372233045Sdavide#endif 373233045Sdavide 374233045Sdavidestatic void 375161678Sdavidxuumtxq_sysinit(void *arg __unused) 376161678Sdavidxu{ 377179421Sdavidxu int i, j; 378138224Sdavidxu 379161678Sdavidxu umtx_pi_zone = uma_zcreate("umtx pi", sizeof(struct umtx_pi), 380161678Sdavidxu NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 381179421Sdavidxu for (i = 0; i < 2; ++i) { 382179421Sdavidxu for (j = 0; j < UMTX_CHAINS; ++j) { 383179421Sdavidxu mtx_init(&umtxq_chains[i][j].uc_lock, "umtxql", NULL, 384179421Sdavidxu MTX_DEF | MTX_DUPOK); 385201991Sdavidxu LIST_INIT(&umtxq_chains[i][j].uc_queue[0]); 386201991Sdavidxu LIST_INIT(&umtxq_chains[i][j].uc_queue[1]); 387201991Sdavidxu LIST_INIT(&umtxq_chains[i][j].uc_spare_queue); 388179421Sdavidxu TAILQ_INIT(&umtxq_chains[i][j].uc_pi_list); 389179421Sdavidxu umtxq_chains[i][j].uc_busy = 0; 390179421Sdavidxu umtxq_chains[i][j].uc_waiters = 0; 391234302Sdavide#ifdef UMTX_PROFILING 392233045Sdavide umtxq_chains[i][j].length = 0; 393233045Sdavide umtxq_chains[i][j].max_length = 0; 394234302Sdavide#endif 395179421Sdavidxu } 396161678Sdavidxu } 397234302Sdavide#ifdef UMTX_PROFILING 398233045Sdavide umtx_init_profiling(); 399234302Sdavide#endif 400170300Sjeff mtx_init(&umtx_lock, "umtx lock", NULL, MTX_SPIN); 401161678Sdavidxu EVENTHANDLER_REGISTER(process_exec, umtx_exec_hook, NULL, 402161678Sdavidxu EVENTHANDLER_PRI_ANY); 403161678Sdavidxu} 404161678Sdavidxu 405143149Sdavidxustruct umtx_q * 406143149Sdavidxuumtxq_alloc(void) 407143149Sdavidxu{ 408161678Sdavidxu struct umtx_q *uq; 409161678Sdavidxu 410161678Sdavidxu uq = malloc(sizeof(struct umtx_q), M_UMTX, M_WAITOK | M_ZERO); 411201991Sdavidxu uq->uq_spare_queue = malloc(sizeof(struct umtxq_queue), M_UMTX, M_WAITOK | M_ZERO); 412201991Sdavidxu TAILQ_INIT(&uq->uq_spare_queue->head); 413161678Sdavidxu TAILQ_INIT(&uq->uq_pi_contested); 414161678Sdavidxu uq->uq_inherited_pri = PRI_MAX; 415161678Sdavidxu return (uq); 416143149Sdavidxu} 417143149Sdavidxu 418143149Sdavidxuvoid 419143149Sdavidxuumtxq_free(struct umtx_q *uq) 420143149Sdavidxu{ 421201991Sdavidxu MPASS(uq->uq_spare_queue != NULL); 422201991Sdavidxu free(uq->uq_spare_queue, M_UMTX); 423143149Sdavidxu free(uq, M_UMTX); 424143149Sdavidxu} 425143149Sdavidxu 426161678Sdavidxustatic inline void 427139013Sdavidxuumtxq_hash(struct umtx_key *key) 428138224Sdavidxu{ 429161678Sdavidxu unsigned n = (uintptr_t)key->info.both.a + key->info.both.b; 430161678Sdavidxu key->hash = ((n * GOLDEN_RATIO_PRIME) >> UMTX_SHIFTS) % UMTX_CHAINS; 431138224Sdavidxu} 432138224Sdavidxu 433161678Sdavidxustatic inline struct umtxq_chain * 434161678Sdavidxuumtxq_getchain(struct umtx_key *key) 435139013Sdavidxu{ 436201886Sdavidxu if (key->type <= TYPE_SEM) 437179421Sdavidxu return (&umtxq_chains[1][key->hash]); 438179421Sdavidxu return (&umtxq_chains[0][key->hash]); 439139013Sdavidxu} 440139013Sdavidxu 441161678Sdavidxu/* 442177848Sdavidxu * Lock a chain. 443161678Sdavidxu */ 444138224Sdavidxustatic inline void 445177848Sdavidxuumtxq_lock(struct umtx_key *key) 446139257Sdavidxu{ 447161678Sdavidxu struct umtxq_chain *uc; 448139257Sdavidxu 449161678Sdavidxu uc = umtxq_getchain(key); 450177848Sdavidxu mtx_lock(&uc->uc_lock); 451139257Sdavidxu} 452139257Sdavidxu 453161678Sdavidxu/* 454177848Sdavidxu * Unlock a chain. 455161678Sdavidxu */ 456139257Sdavidxustatic inline void 457177848Sdavidxuumtxq_unlock(struct umtx_key *key) 458139257Sdavidxu{ 459161678Sdavidxu struct umtxq_chain *uc; 460139257Sdavidxu 461161678Sdavidxu uc = umtxq_getchain(key); 462177848Sdavidxu mtx_unlock(&uc->uc_lock); 463139257Sdavidxu} 464139257Sdavidxu 465161678Sdavidxu/* 466177848Sdavidxu * Set chain to busy state when following operation 467177848Sdavidxu * may be blocked (kernel mutex can not be used). 468161678Sdavidxu */ 469139257Sdavidxustatic inline void 470177848Sdavidxuumtxq_busy(struct umtx_key *key) 471138224Sdavidxu{ 472161678Sdavidxu struct umtxq_chain *uc; 473161678Sdavidxu 474161678Sdavidxu uc = umtxq_getchain(key); 475177848Sdavidxu mtx_assert(&uc->uc_lock, MA_OWNED); 476177848Sdavidxu if (uc->uc_busy) { 477177880Sdavidxu#ifdef SMP 478177880Sdavidxu if (smp_cpus > 1) { 479177880Sdavidxu int count = BUSY_SPINS; 480177880Sdavidxu if (count > 0) { 481177880Sdavidxu umtxq_unlock(key); 482177880Sdavidxu while (uc->uc_busy && --count > 0) 483177880Sdavidxu cpu_spinwait(); 484177880Sdavidxu umtxq_lock(key); 485177880Sdavidxu } 486177848Sdavidxu } 487177880Sdavidxu#endif 488177880Sdavidxu while (uc->uc_busy) { 489177848Sdavidxu uc->uc_waiters++; 490177848Sdavidxu msleep(uc, &uc->uc_lock, 0, "umtxqb", 0); 491177848Sdavidxu uc->uc_waiters--; 492177848Sdavidxu } 493177848Sdavidxu } 494177848Sdavidxu uc->uc_busy = 1; 495138224Sdavidxu} 496138224Sdavidxu 497161678Sdavidxu/* 498177848Sdavidxu * Unbusy a chain. 499161678Sdavidxu */ 500138225Sdavidxustatic inline void 501177848Sdavidxuumtxq_unbusy(struct umtx_key *key) 502138224Sdavidxu{ 503161678Sdavidxu struct umtxq_chain *uc; 504161678Sdavidxu 505161678Sdavidxu uc = umtxq_getchain(key); 506177848Sdavidxu mtx_assert(&uc->uc_lock, MA_OWNED); 507177848Sdavidxu KASSERT(uc->uc_busy != 0, ("not busy")); 508177848Sdavidxu uc->uc_busy = 0; 509177848Sdavidxu if (uc->uc_waiters) 510177848Sdavidxu wakeup_one(uc); 511138224Sdavidxu} 512138224Sdavidxu 513201991Sdavidxustatic struct umtxq_queue * 514201991Sdavidxuumtxq_queue_lookup(struct umtx_key *key, int q) 515201991Sdavidxu{ 516201991Sdavidxu struct umtxq_queue *uh; 517201991Sdavidxu struct umtxq_chain *uc; 518201991Sdavidxu 519201991Sdavidxu uc = umtxq_getchain(key); 520201991Sdavidxu UMTXQ_LOCKED_ASSERT(uc); 521201991Sdavidxu LIST_FOREACH(uh, &uc->uc_queue[q], link) { 522201991Sdavidxu if (umtx_key_match(&uh->key, key)) 523201991Sdavidxu return (uh); 524201991Sdavidxu } 525201991Sdavidxu 526201991Sdavidxu return (NULL); 527201991Sdavidxu} 528201991Sdavidxu 529139013Sdavidxustatic inline void 530177848Sdavidxuumtxq_insert_queue(struct umtx_q *uq, int q) 531115765Sjeff{ 532201991Sdavidxu struct umtxq_queue *uh; 533161678Sdavidxu struct umtxq_chain *uc; 534139013Sdavidxu 535161678Sdavidxu uc = umtxq_getchain(&uq->uq_key); 536161678Sdavidxu UMTXQ_LOCKED_ASSERT(uc); 537201991Sdavidxu KASSERT((uq->uq_flags & UQF_UMTXQ) == 0, ("umtx_q is already on queue")); 538203744Sdavidxu uh = umtxq_queue_lookup(&uq->uq_key, q); 539201991Sdavidxu if (uh != NULL) { 540201991Sdavidxu LIST_INSERT_HEAD(&uc->uc_spare_queue, uq->uq_spare_queue, link); 541201991Sdavidxu } else { 542201991Sdavidxu uh = uq->uq_spare_queue; 543201991Sdavidxu uh->key = uq->uq_key; 544201991Sdavidxu LIST_INSERT_HEAD(&uc->uc_queue[q], uh, link); 545248591Sattilio#ifdef UMTX_PROFILING 546248591Sattilio uc->length++; 547248591Sattilio if (uc->length > uc->max_length) { 548248591Sattilio uc->max_length = uc->length; 549248591Sattilio if (uc->max_length > max_length) 550248591Sattilio max_length = uc->max_length; 551248591Sattilio } 552248591Sattilio#endif 553201991Sdavidxu } 554201991Sdavidxu uq->uq_spare_queue = NULL; 555201991Sdavidxu 556201991Sdavidxu TAILQ_INSERT_TAIL(&uh->head, uq, uq_link); 557201991Sdavidxu uh->length++; 558158718Sdavidxu uq->uq_flags |= UQF_UMTXQ; 559201991Sdavidxu uq->uq_cur_queue = uh; 560201991Sdavidxu return; 561139013Sdavidxu} 562139013Sdavidxu 563139013Sdavidxustatic inline void 564177848Sdavidxuumtxq_remove_queue(struct umtx_q *uq, int q) 565139013Sdavidxu{ 566161678Sdavidxu struct umtxq_chain *uc; 567201991Sdavidxu struct umtxq_queue *uh; 568161678Sdavidxu 569161678Sdavidxu uc = umtxq_getchain(&uq->uq_key); 570161678Sdavidxu UMTXQ_LOCKED_ASSERT(uc); 571158718Sdavidxu if (uq->uq_flags & UQF_UMTXQ) { 572201991Sdavidxu uh = uq->uq_cur_queue; 573201991Sdavidxu TAILQ_REMOVE(&uh->head, uq, uq_link); 574201991Sdavidxu uh->length--; 575158718Sdavidxu uq->uq_flags &= ~UQF_UMTXQ; 576201991Sdavidxu if (TAILQ_EMPTY(&uh->head)) { 577201991Sdavidxu KASSERT(uh->length == 0, 578201991Sdavidxu ("inconsistent umtxq_queue length")); 579248591Sattilio#ifdef UMTX_PROFILING 580248591Sattilio uc->length--; 581248591Sattilio#endif 582201991Sdavidxu LIST_REMOVE(uh, link); 583201991Sdavidxu } else { 584201991Sdavidxu uh = LIST_FIRST(&uc->uc_spare_queue); 585201991Sdavidxu KASSERT(uh != NULL, ("uc_spare_queue is empty")); 586201991Sdavidxu LIST_REMOVE(uh, link); 587201991Sdavidxu } 588201991Sdavidxu uq->uq_spare_queue = uh; 589201991Sdavidxu uq->uq_cur_queue = NULL; 590139013Sdavidxu } 591139013Sdavidxu} 592139013Sdavidxu 593161678Sdavidxu/* 594161678Sdavidxu * Check if there are multiple waiters 595161678Sdavidxu */ 596139013Sdavidxustatic int 597139013Sdavidxuumtxq_count(struct umtx_key *key) 598139013Sdavidxu{ 599161678Sdavidxu struct umtxq_chain *uc; 600201991Sdavidxu struct umtxq_queue *uh; 601115765Sjeff 602161678Sdavidxu uc = umtxq_getchain(key); 603161678Sdavidxu UMTXQ_LOCKED_ASSERT(uc); 604201991Sdavidxu uh = umtxq_queue_lookup(key, UMTX_SHARED_QUEUE); 605201991Sdavidxu if (uh != NULL) 606201991Sdavidxu return (uh->length); 607201991Sdavidxu return (0); 608115765Sjeff} 609115765Sjeff 610161678Sdavidxu/* 611161678Sdavidxu * Check if there are multiple PI waiters and returns first 612161678Sdavidxu * waiter. 613161678Sdavidxu */ 614139257Sdavidxustatic int 615161678Sdavidxuumtxq_count_pi(struct umtx_key *key, struct umtx_q **first) 616161678Sdavidxu{ 617161678Sdavidxu struct umtxq_chain *uc; 618201991Sdavidxu struct umtxq_queue *uh; 619161678Sdavidxu 620161678Sdavidxu *first = NULL; 621161678Sdavidxu uc = umtxq_getchain(key); 622161678Sdavidxu UMTXQ_LOCKED_ASSERT(uc); 623201991Sdavidxu uh = umtxq_queue_lookup(key, UMTX_SHARED_QUEUE); 624201991Sdavidxu if (uh != NULL) { 625201991Sdavidxu *first = TAILQ_FIRST(&uh->head); 626201991Sdavidxu return (uh->length); 627161678Sdavidxu } 628201991Sdavidxu return (0); 629161678Sdavidxu} 630161678Sdavidxu 631251684Skibstatic int 632251684Skibumtxq_check_susp(struct thread *td) 633251684Skib{ 634251684Skib struct proc *p; 635251684Skib int error; 636251684Skib 637251684Skib /* 638251684Skib * The check for TDF_NEEDSUSPCHK is racy, but it is enough to 639251684Skib * eventually break the lockstep loop. 640251684Skib */ 641251684Skib if ((td->td_flags & TDF_NEEDSUSPCHK) == 0) 642251684Skib return (0); 643251684Skib error = 0; 644251684Skib p = td->td_proc; 645251684Skib PROC_LOCK(p); 646251684Skib if (P_SHOULDSTOP(p) || 647251684Skib ((p->p_flag & P_TRACED) && (td->td_dbgflags & TDB_SUSPEND))) { 648251684Skib if (p->p_flag & P_SINGLE_EXIT) 649251684Skib error = EINTR; 650251684Skib else 651251684Skib error = ERESTART; 652251684Skib } 653251684Skib PROC_UNLOCK(p); 654251684Skib return (error); 655251684Skib} 656251684Skib 657161678Sdavidxu/* 658161678Sdavidxu * Wake up threads waiting on an userland object. 659161678Sdavidxu */ 660177848Sdavidxu 661161678Sdavidxustatic int 662177848Sdavidxuumtxq_signal_queue(struct umtx_key *key, int n_wake, int q) 663115765Sjeff{ 664161678Sdavidxu struct umtxq_chain *uc; 665201991Sdavidxu struct umtxq_queue *uh; 666201991Sdavidxu struct umtx_q *uq; 667161678Sdavidxu int ret; 668115765Sjeff 669139257Sdavidxu ret = 0; 670161678Sdavidxu uc = umtxq_getchain(key); 671161678Sdavidxu UMTXQ_LOCKED_ASSERT(uc); 672201991Sdavidxu uh = umtxq_queue_lookup(key, q); 673201991Sdavidxu if (uh != NULL) { 674201991Sdavidxu while ((uq = TAILQ_FIRST(&uh->head)) != NULL) { 675177848Sdavidxu umtxq_remove_queue(uq, q); 676161678Sdavidxu wakeup(uq); 677139257Sdavidxu if (++ret >= n_wake) 678201991Sdavidxu return (ret); 679139013Sdavidxu } 680139013Sdavidxu } 681139257Sdavidxu return (ret); 682138224Sdavidxu} 683138224Sdavidxu 684177848Sdavidxu 685161678Sdavidxu/* 686161678Sdavidxu * Wake up specified thread. 687161678Sdavidxu */ 688161678Sdavidxustatic inline void 689161678Sdavidxuumtxq_signal_thread(struct umtx_q *uq) 690161678Sdavidxu{ 691161678Sdavidxu struct umtxq_chain *uc; 692161678Sdavidxu 693161678Sdavidxu uc = umtxq_getchain(&uq->uq_key); 694161678Sdavidxu UMTXQ_LOCKED_ASSERT(uc); 695161678Sdavidxu umtxq_remove(uq); 696161678Sdavidxu wakeup(uq); 697161678Sdavidxu} 698161678Sdavidxu 699233690Sdavidxustatic inline int 700233690Sdavidxutstohz(const struct timespec *tsp) 701233690Sdavidxu{ 702233690Sdavidxu struct timeval tv; 703233690Sdavidxu 704233690Sdavidxu TIMESPEC_TO_TIMEVAL(&tv, tsp); 705233690Sdavidxu return tvtohz(&tv); 706233690Sdavidxu} 707233690Sdavidxu 708233690Sdavidxustatic void 709233690Sdavidxuabs_timeout_init(struct abs_timeout *timo, int clockid, int absolute, 710233690Sdavidxu const struct timespec *timeout) 711233690Sdavidxu{ 712233690Sdavidxu 713233690Sdavidxu timo->clockid = clockid; 714233690Sdavidxu if (!absolute) { 715233690Sdavidxu kern_clock_gettime(curthread, clockid, &timo->end); 716233690Sdavidxu timo->cur = timo->end; 717233690Sdavidxu timespecadd(&timo->end, timeout); 718233690Sdavidxu } else { 719233690Sdavidxu timo->end = *timeout; 720233690Sdavidxu kern_clock_gettime(curthread, clockid, &timo->cur); 721233690Sdavidxu } 722233690Sdavidxu} 723233690Sdavidxu 724233690Sdavidxustatic void 725233690Sdavidxuabs_timeout_init2(struct abs_timeout *timo, const struct _umtx_time *umtxtime) 726233690Sdavidxu{ 727233690Sdavidxu 728233690Sdavidxu abs_timeout_init(timo, umtxtime->_clockid, 729233690Sdavidxu (umtxtime->_flags & UMTX_ABSTIME) != 0, 730233690Sdavidxu &umtxtime->_timeout); 731233690Sdavidxu} 732233690Sdavidxu 733239202Sdavidxustatic inline void 734233690Sdavidxuabs_timeout_update(struct abs_timeout *timo) 735233690Sdavidxu{ 736233690Sdavidxu kern_clock_gettime(curthread, timo->clockid, &timo->cur); 737233690Sdavidxu} 738233690Sdavidxu 739233690Sdavidxustatic int 740233690Sdavidxuabs_timeout_gethz(struct abs_timeout *timo) 741233690Sdavidxu{ 742233690Sdavidxu struct timespec tts; 743233690Sdavidxu 744239202Sdavidxu if (timespeccmp(&timo->end, &timo->cur, <=)) 745239202Sdavidxu return (-1); 746233690Sdavidxu tts = timo->end; 747233690Sdavidxu timespecsub(&tts, &timo->cur); 748233690Sdavidxu return (tstohz(&tts)); 749233690Sdavidxu} 750233690Sdavidxu 751161678Sdavidxu/* 752161678Sdavidxu * Put thread into sleep state, before sleeping, check if 753161678Sdavidxu * thread was removed from umtx queue. 754161678Sdavidxu */ 755138224Sdavidxustatic inline int 756239202Sdavidxuumtxq_sleep(struct umtx_q *uq, const char *wmesg, struct abs_timeout *abstime) 757138224Sdavidxu{ 758161678Sdavidxu struct umtxq_chain *uc; 759239202Sdavidxu int error, timo; 760161678Sdavidxu 761161678Sdavidxu uc = umtxq_getchain(&uq->uq_key); 762161678Sdavidxu UMTXQ_LOCKED_ASSERT(uc); 763233690Sdavidxu for (;;) { 764233690Sdavidxu if (!(uq->uq_flags & UQF_UMTXQ)) 765233690Sdavidxu return (0); 766239202Sdavidxu if (abstime != NULL) { 767239202Sdavidxu timo = abs_timeout_gethz(abstime); 768239202Sdavidxu if (timo < 0) 769239187Sdavidxu return (ETIMEDOUT); 770239187Sdavidxu } else 771239202Sdavidxu timo = 0; 772239202Sdavidxu error = msleep(uq, &uc->uc_lock, PCATCH | PDROP, wmesg, timo); 773239187Sdavidxu if (error != EWOULDBLOCK) { 774233690Sdavidxu umtxq_lock(&uq->uq_key); 775233690Sdavidxu break; 776233690Sdavidxu } 777239202Sdavidxu if (abstime != NULL) 778239202Sdavidxu abs_timeout_update(abstime); 779233690Sdavidxu umtxq_lock(&uq->uq_key); 780233690Sdavidxu } 781139751Sdavidxu return (error); 782138224Sdavidxu} 783138224Sdavidxu 784161678Sdavidxu/* 785161678Sdavidxu * Convert userspace address into unique logical address. 786161678Sdavidxu */ 787218969Sjhbint 788161678Sdavidxuumtx_key_get(void *addr, int type, int share, struct umtx_key *key) 789139013Sdavidxu{ 790161678Sdavidxu struct thread *td = curthread; 791139013Sdavidxu vm_map_t map; 792139013Sdavidxu vm_map_entry_t entry; 793139013Sdavidxu vm_pindex_t pindex; 794139013Sdavidxu vm_prot_t prot; 795139013Sdavidxu boolean_t wired; 796139013Sdavidxu 797161678Sdavidxu key->type = type; 798161678Sdavidxu if (share == THREAD_SHARE) { 799161678Sdavidxu key->shared = 0; 800161678Sdavidxu key->info.private.vs = td->td_proc->p_vmspace; 801161678Sdavidxu key->info.private.addr = (uintptr_t)addr; 802163677Sdavidxu } else { 803163677Sdavidxu MPASS(share == PROCESS_SHARE || share == AUTO_SHARE); 804161678Sdavidxu map = &td->td_proc->p_vmspace->vm_map; 805161678Sdavidxu if (vm_map_lookup(&map, (vm_offset_t)addr, VM_PROT_WRITE, 806161678Sdavidxu &entry, &key->info.shared.object, &pindex, &prot, 807161678Sdavidxu &wired) != KERN_SUCCESS) { 808161678Sdavidxu return EFAULT; 809161678Sdavidxu } 810161678Sdavidxu 811161678Sdavidxu if ((share == PROCESS_SHARE) || 812161678Sdavidxu (share == AUTO_SHARE && 813161678Sdavidxu VM_INHERIT_SHARE == entry->inheritance)) { 814161678Sdavidxu key->shared = 1; 815161678Sdavidxu key->info.shared.offset = entry->offset + entry->start - 816161678Sdavidxu (vm_offset_t)addr; 817161678Sdavidxu vm_object_reference(key->info.shared.object); 818161678Sdavidxu } else { 819161678Sdavidxu key->shared = 0; 820161678Sdavidxu key->info.private.vs = td->td_proc->p_vmspace; 821161678Sdavidxu key->info.private.addr = (uintptr_t)addr; 822161678Sdavidxu } 823161678Sdavidxu vm_map_lookup_done(map, entry); 824139013Sdavidxu } 825139013Sdavidxu 826161678Sdavidxu umtxq_hash(key); 827139013Sdavidxu return (0); 828139013Sdavidxu} 829139013Sdavidxu 830161678Sdavidxu/* 831161678Sdavidxu * Release key. 832161678Sdavidxu */ 833218969Sjhbvoid 834139013Sdavidxuumtx_key_release(struct umtx_key *key) 835139013Sdavidxu{ 836161678Sdavidxu if (key->shared) 837139013Sdavidxu vm_object_deallocate(key->info.shared.object); 838139013Sdavidxu} 839139013Sdavidxu 840161678Sdavidxu/* 841161678Sdavidxu * Lock a umtx object. 842161678Sdavidxu */ 843139013Sdavidxustatic int 844233690Sdavidxudo_lock_umtx(struct thread *td, struct umtx *umtx, u_long id, 845233690Sdavidxu const struct timespec *timeout) 846112904Sjeff{ 847233690Sdavidxu struct abs_timeout timo; 848143149Sdavidxu struct umtx_q *uq; 849163449Sdavidxu u_long owner; 850163449Sdavidxu u_long old; 851138224Sdavidxu int error = 0; 852112904Sjeff 853143149Sdavidxu uq = td->td_umtxq; 854233690Sdavidxu if (timeout != NULL) 855233690Sdavidxu abs_timeout_init(&timo, CLOCK_REALTIME, 0, timeout); 856161678Sdavidxu 857112904Sjeff /* 858161678Sdavidxu * Care must be exercised when dealing with umtx structure. It 859112904Sjeff * can fault on any access. 860112904Sjeff */ 861112904Sjeff for (;;) { 862112904Sjeff /* 863112904Sjeff * Try the uncontested case. This should be done in userland. 864112904Sjeff */ 865163449Sdavidxu owner = casuword(&umtx->u_owner, UMTX_UNOWNED, id); 866112904Sjeff 867138224Sdavidxu /* The acquire succeeded. */ 868138224Sdavidxu if (owner == UMTX_UNOWNED) 869138224Sdavidxu return (0); 870138224Sdavidxu 871115765Sjeff /* The address was invalid. */ 872115765Sjeff if (owner == -1) 873115765Sjeff return (EFAULT); 874115765Sjeff 875115765Sjeff /* If no one owns it but it is contested try to acquire it. */ 876115765Sjeff if (owner == UMTX_CONTESTED) { 877163449Sdavidxu owner = casuword(&umtx->u_owner, 878139013Sdavidxu UMTX_CONTESTED, id | UMTX_CONTESTED); 879115765Sjeff 880138224Sdavidxu if (owner == UMTX_CONTESTED) 881138224Sdavidxu return (0); 882138224Sdavidxu 883115765Sjeff /* The address was invalid. */ 884115765Sjeff if (owner == -1) 885115765Sjeff return (EFAULT); 886115765Sjeff 887251684Skib error = umtxq_check_susp(td); 888251684Skib if (error != 0) 889251684Skib break; 890251684Skib 891115765Sjeff /* If this failed the lock has changed, restart. */ 892115765Sjeff continue; 893112904Sjeff } 894112904Sjeff 895138224Sdavidxu /* 896138224Sdavidxu * If we caught a signal, we have retried and now 897138224Sdavidxu * exit immediately. 898138224Sdavidxu */ 899161678Sdavidxu if (error != 0) 900233690Sdavidxu break; 901112904Sjeff 902161678Sdavidxu if ((error = umtx_key_get(umtx, TYPE_SIMPLE_LOCK, 903161678Sdavidxu AUTO_SHARE, &uq->uq_key)) != 0) 904161678Sdavidxu return (error); 905161678Sdavidxu 906161678Sdavidxu umtxq_lock(&uq->uq_key); 907161678Sdavidxu umtxq_busy(&uq->uq_key); 908161678Sdavidxu umtxq_insert(uq); 909161678Sdavidxu umtxq_unbusy(&uq->uq_key); 910161678Sdavidxu umtxq_unlock(&uq->uq_key); 911161678Sdavidxu 912112904Sjeff /* 913112904Sjeff * Set the contested bit so that a release in user space 914112904Sjeff * knows to use the system call for unlock. If this fails 915112904Sjeff * either some one else has acquired the lock or it has been 916112904Sjeff * released. 917112904Sjeff */ 918163449Sdavidxu old = casuword(&umtx->u_owner, owner, owner | UMTX_CONTESTED); 919112904Sjeff 920112904Sjeff /* The address was invalid. */ 921112967Sjake if (old == -1) { 922143149Sdavidxu umtxq_lock(&uq->uq_key); 923143149Sdavidxu umtxq_remove(uq); 924143149Sdavidxu umtxq_unlock(&uq->uq_key); 925143149Sdavidxu umtx_key_release(&uq->uq_key); 926115765Sjeff return (EFAULT); 927112904Sjeff } 928112904Sjeff 929112904Sjeff /* 930115765Sjeff * We set the contested bit, sleep. Otherwise the lock changed 931117685Smtm * and we need to retry or we lost a race to the thread 932117685Smtm * unlocking the umtx. 933112904Sjeff */ 934143149Sdavidxu umtxq_lock(&uq->uq_key); 935161678Sdavidxu if (old == owner) 936233690Sdavidxu error = umtxq_sleep(uq, "umtx", timeout == NULL ? NULL : 937233690Sdavidxu &timo); 938143149Sdavidxu umtxq_remove(uq); 939143149Sdavidxu umtxq_unlock(&uq->uq_key); 940143149Sdavidxu umtx_key_release(&uq->uq_key); 941251684Skib 942251684Skib if (error == 0) 943251684Skib error = umtxq_check_susp(td); 944112904Sjeff } 945117743Smtm 946140245Sdavidxu if (timeout == NULL) { 947162030Sdavidxu /* Mutex locking is restarted if it is interrupted. */ 948162030Sdavidxu if (error == EINTR) 949162030Sdavidxu error = ERESTART; 950139013Sdavidxu } else { 951162030Sdavidxu /* Timed-locking is not restarted. */ 952162030Sdavidxu if (error == ERESTART) 953162030Sdavidxu error = EINTR; 954139013Sdavidxu } 955139013Sdavidxu return (error); 956139013Sdavidxu} 957139013Sdavidxu 958161678Sdavidxu/* 959161678Sdavidxu * Unlock a umtx object. 960161678Sdavidxu */ 961139013Sdavidxustatic int 962163449Sdavidxudo_unlock_umtx(struct thread *td, struct umtx *umtx, u_long id) 963139013Sdavidxu{ 964139013Sdavidxu struct umtx_key key; 965163449Sdavidxu u_long owner; 966163449Sdavidxu u_long old; 967139257Sdavidxu int error; 968139257Sdavidxu int count; 969112904Sjeff 970112904Sjeff /* 971112904Sjeff * Make sure we own this mtx. 972112904Sjeff */ 973163449Sdavidxu owner = fuword(__DEVOLATILE(u_long *, &umtx->u_owner)); 974161678Sdavidxu if (owner == -1) 975115765Sjeff return (EFAULT); 976115765Sjeff 977139013Sdavidxu if ((owner & ~UMTX_CONTESTED) != id) 978115765Sjeff return (EPERM); 979112904Sjeff 980161678Sdavidxu /* This should be done in userland */ 981161678Sdavidxu if ((owner & UMTX_CONTESTED) == 0) { 982163449Sdavidxu old = casuword(&umtx->u_owner, owner, UMTX_UNOWNED); 983161678Sdavidxu if (old == -1) 984161678Sdavidxu return (EFAULT); 985161678Sdavidxu if (old == owner) 986161678Sdavidxu return (0); 987161855Sdavidxu owner = old; 988161678Sdavidxu } 989161678Sdavidxu 990117685Smtm /* We should only ever be in here for contested locks */ 991161678Sdavidxu if ((error = umtx_key_get(umtx, TYPE_SIMPLE_LOCK, AUTO_SHARE, 992161678Sdavidxu &key)) != 0) 993139257Sdavidxu return (error); 994139257Sdavidxu 995139257Sdavidxu umtxq_lock(&key); 996139257Sdavidxu umtxq_busy(&key); 997139257Sdavidxu count = umtxq_count(&key); 998139257Sdavidxu umtxq_unlock(&key); 999139257Sdavidxu 1000117743Smtm /* 1001117743Smtm * When unlocking the umtx, it must be marked as unowned if 1002117743Smtm * there is zero or one thread only waiting for it. 1003117743Smtm * Otherwise, it must be marked as contested. 1004117743Smtm */ 1005163449Sdavidxu old = casuword(&umtx->u_owner, owner, 1006163449Sdavidxu count <= 1 ? UMTX_UNOWNED : UMTX_CONTESTED); 1007139257Sdavidxu umtxq_lock(&key); 1008161678Sdavidxu umtxq_signal(&key,1); 1009139257Sdavidxu umtxq_unbusy(&key); 1010139257Sdavidxu umtxq_unlock(&key); 1011139257Sdavidxu umtx_key_release(&key); 1012115765Sjeff if (old == -1) 1013115765Sjeff return (EFAULT); 1014138224Sdavidxu if (old != owner) 1015138224Sdavidxu return (EINVAL); 1016115765Sjeff return (0); 1017112904Sjeff} 1018139013Sdavidxu 1019205014Snwhitehorn#ifdef COMPAT_FREEBSD32 1020162536Sdavidxu 1021161678Sdavidxu/* 1022162536Sdavidxu * Lock a umtx object. 1023162536Sdavidxu */ 1024162536Sdavidxustatic int 1025233690Sdavidxudo_lock_umtx32(struct thread *td, uint32_t *m, uint32_t id, 1026233690Sdavidxu const struct timespec *timeout) 1027162536Sdavidxu{ 1028233690Sdavidxu struct abs_timeout timo; 1029162536Sdavidxu struct umtx_q *uq; 1030162536Sdavidxu uint32_t owner; 1031162536Sdavidxu uint32_t old; 1032162536Sdavidxu int error = 0; 1033162536Sdavidxu 1034162536Sdavidxu uq = td->td_umtxq; 1035162536Sdavidxu 1036233690Sdavidxu if (timeout != NULL) 1037233690Sdavidxu abs_timeout_init(&timo, CLOCK_REALTIME, 0, timeout); 1038233690Sdavidxu 1039162536Sdavidxu /* 1040162536Sdavidxu * Care must be exercised when dealing with umtx structure. It 1041162536Sdavidxu * can fault on any access. 1042162536Sdavidxu */ 1043162536Sdavidxu for (;;) { 1044162536Sdavidxu /* 1045162536Sdavidxu * Try the uncontested case. This should be done in userland. 1046162536Sdavidxu */ 1047162536Sdavidxu owner = casuword32(m, UMUTEX_UNOWNED, id); 1048162536Sdavidxu 1049162536Sdavidxu /* The acquire succeeded. */ 1050162536Sdavidxu if (owner == UMUTEX_UNOWNED) 1051162536Sdavidxu return (0); 1052162536Sdavidxu 1053162536Sdavidxu /* The address was invalid. */ 1054162536Sdavidxu if (owner == -1) 1055162536Sdavidxu return (EFAULT); 1056162536Sdavidxu 1057162536Sdavidxu /* If no one owns it but it is contested try to acquire it. */ 1058162536Sdavidxu if (owner == UMUTEX_CONTESTED) { 1059162536Sdavidxu owner = casuword32(m, 1060162536Sdavidxu UMUTEX_CONTESTED, id | UMUTEX_CONTESTED); 1061162536Sdavidxu if (owner == UMUTEX_CONTESTED) 1062162536Sdavidxu return (0); 1063162536Sdavidxu 1064162536Sdavidxu /* The address was invalid. */ 1065162536Sdavidxu if (owner == -1) 1066162536Sdavidxu return (EFAULT); 1067162536Sdavidxu 1068251684Skib error = umtxq_check_susp(td); 1069251684Skib if (error != 0) 1070251684Skib break; 1071251684Skib 1072162536Sdavidxu /* If this failed the lock has changed, restart. */ 1073162536Sdavidxu continue; 1074162536Sdavidxu } 1075162536Sdavidxu 1076162536Sdavidxu /* 1077162536Sdavidxu * If we caught a signal, we have retried and now 1078162536Sdavidxu * exit immediately. 1079162536Sdavidxu */ 1080162536Sdavidxu if (error != 0) 1081162536Sdavidxu return (error); 1082162536Sdavidxu 1083162536Sdavidxu if ((error = umtx_key_get(m, TYPE_SIMPLE_LOCK, 1084162536Sdavidxu AUTO_SHARE, &uq->uq_key)) != 0) 1085162536Sdavidxu return (error); 1086162536Sdavidxu 1087162536Sdavidxu umtxq_lock(&uq->uq_key); 1088162536Sdavidxu umtxq_busy(&uq->uq_key); 1089162536Sdavidxu umtxq_insert(uq); 1090162536Sdavidxu umtxq_unbusy(&uq->uq_key); 1091162536Sdavidxu umtxq_unlock(&uq->uq_key); 1092162536Sdavidxu 1093162536Sdavidxu /* 1094162536Sdavidxu * Set the contested bit so that a release in user space 1095162536Sdavidxu * knows to use the system call for unlock. If this fails 1096162536Sdavidxu * either some one else has acquired the lock or it has been 1097162536Sdavidxu * released. 1098162536Sdavidxu */ 1099162536Sdavidxu old = casuword32(m, owner, owner | UMUTEX_CONTESTED); 1100162536Sdavidxu 1101162536Sdavidxu /* The address was invalid. */ 1102162536Sdavidxu if (old == -1) { 1103162536Sdavidxu umtxq_lock(&uq->uq_key); 1104162536Sdavidxu umtxq_remove(uq); 1105162536Sdavidxu umtxq_unlock(&uq->uq_key); 1106162536Sdavidxu umtx_key_release(&uq->uq_key); 1107162536Sdavidxu return (EFAULT); 1108162536Sdavidxu } 1109162536Sdavidxu 1110162536Sdavidxu /* 1111162536Sdavidxu * We set the contested bit, sleep. Otherwise the lock changed 1112162536Sdavidxu * and we need to retry or we lost a race to the thread 1113162536Sdavidxu * unlocking the umtx. 1114162536Sdavidxu */ 1115162536Sdavidxu umtxq_lock(&uq->uq_key); 1116162536Sdavidxu if (old == owner) 1117233690Sdavidxu error = umtxq_sleep(uq, "umtx", timeout == NULL ? 1118233693Sdavidxu NULL : &timo); 1119162536Sdavidxu umtxq_remove(uq); 1120162536Sdavidxu umtxq_unlock(&uq->uq_key); 1121162536Sdavidxu umtx_key_release(&uq->uq_key); 1122251684Skib 1123251684Skib if (error == 0) 1124251684Skib error = umtxq_check_susp(td); 1125162536Sdavidxu } 1126162536Sdavidxu 1127162536Sdavidxu if (timeout == NULL) { 1128162536Sdavidxu /* Mutex locking is restarted if it is interrupted. */ 1129162536Sdavidxu if (error == EINTR) 1130162536Sdavidxu error = ERESTART; 1131162536Sdavidxu } else { 1132162536Sdavidxu /* Timed-locking is not restarted. */ 1133162536Sdavidxu if (error == ERESTART) 1134162536Sdavidxu error = EINTR; 1135162536Sdavidxu } 1136162536Sdavidxu return (error); 1137162536Sdavidxu} 1138162536Sdavidxu 1139162536Sdavidxu/* 1140162536Sdavidxu * Unlock a umtx object. 1141162536Sdavidxu */ 1142162536Sdavidxustatic int 1143162536Sdavidxudo_unlock_umtx32(struct thread *td, uint32_t *m, uint32_t id) 1144162536Sdavidxu{ 1145162536Sdavidxu struct umtx_key key; 1146162536Sdavidxu uint32_t owner; 1147162536Sdavidxu uint32_t old; 1148162536Sdavidxu int error; 1149162536Sdavidxu int count; 1150162536Sdavidxu 1151162536Sdavidxu /* 1152162536Sdavidxu * Make sure we own this mtx. 1153162536Sdavidxu */ 1154162536Sdavidxu owner = fuword32(m); 1155162536Sdavidxu if (owner == -1) 1156162536Sdavidxu return (EFAULT); 1157162536Sdavidxu 1158162536Sdavidxu if ((owner & ~UMUTEX_CONTESTED) != id) 1159162536Sdavidxu return (EPERM); 1160162536Sdavidxu 1161162536Sdavidxu /* This should be done in userland */ 1162162536Sdavidxu if ((owner & UMUTEX_CONTESTED) == 0) { 1163162536Sdavidxu old = casuword32(m, owner, UMUTEX_UNOWNED); 1164162536Sdavidxu if (old == -1) 1165162536Sdavidxu return (EFAULT); 1166162536Sdavidxu if (old == owner) 1167162536Sdavidxu return (0); 1168162536Sdavidxu owner = old; 1169162536Sdavidxu } 1170162536Sdavidxu 1171162536Sdavidxu /* We should only ever be in here for contested locks */ 1172162536Sdavidxu if ((error = umtx_key_get(m, TYPE_SIMPLE_LOCK, AUTO_SHARE, 1173162536Sdavidxu &key)) != 0) 1174162536Sdavidxu return (error); 1175162536Sdavidxu 1176162536Sdavidxu umtxq_lock(&key); 1177162536Sdavidxu umtxq_busy(&key); 1178162536Sdavidxu count = umtxq_count(&key); 1179162536Sdavidxu umtxq_unlock(&key); 1180162536Sdavidxu 1181162536Sdavidxu /* 1182162536Sdavidxu * When unlocking the umtx, it must be marked as unowned if 1183162536Sdavidxu * there is zero or one thread only waiting for it. 1184162536Sdavidxu * Otherwise, it must be marked as contested. 1185162536Sdavidxu */ 1186162536Sdavidxu old = casuword32(m, owner, 1187162536Sdavidxu count <= 1 ? UMUTEX_UNOWNED : UMUTEX_CONTESTED); 1188162536Sdavidxu umtxq_lock(&key); 1189162536Sdavidxu umtxq_signal(&key,1); 1190162536Sdavidxu umtxq_unbusy(&key); 1191162536Sdavidxu umtxq_unlock(&key); 1192162536Sdavidxu umtx_key_release(&key); 1193162536Sdavidxu if (old == -1) 1194162536Sdavidxu return (EFAULT); 1195162536Sdavidxu if (old != owner) 1196162536Sdavidxu return (EINVAL); 1197162536Sdavidxu return (0); 1198162536Sdavidxu} 1199162536Sdavidxu#endif 1200162536Sdavidxu 1201162536Sdavidxu/* 1202161678Sdavidxu * Fetch and compare value, sleep on the address if value is not changed. 1203161678Sdavidxu */ 1204139013Sdavidxustatic int 1205163449Sdavidxudo_wait(struct thread *td, void *addr, u_long id, 1206232144Sdavidxu struct _umtx_time *timeout, int compat32, int is_private) 1207139013Sdavidxu{ 1208233690Sdavidxu struct abs_timeout timo; 1209143149Sdavidxu struct umtx_q *uq; 1210163449Sdavidxu u_long tmp; 1211140245Sdavidxu int error = 0; 1212139013Sdavidxu 1213143149Sdavidxu uq = td->td_umtxq; 1214178646Sdavidxu if ((error = umtx_key_get(addr, TYPE_SIMPLE_WAIT, 1215178646Sdavidxu is_private ? THREAD_SHARE : AUTO_SHARE, &uq->uq_key)) != 0) 1216139013Sdavidxu return (error); 1217161678Sdavidxu 1218233690Sdavidxu if (timeout != NULL) 1219233690Sdavidxu abs_timeout_init2(&timo, timeout); 1220233690Sdavidxu 1221161678Sdavidxu umtxq_lock(&uq->uq_key); 1222161678Sdavidxu umtxq_insert(uq); 1223161678Sdavidxu umtxq_unlock(&uq->uq_key); 1224162536Sdavidxu if (compat32 == 0) 1225162536Sdavidxu tmp = fuword(addr); 1226162536Sdavidxu else 1227190987Sdavidxu tmp = (unsigned int)fuword32(addr); 1228233642Sdavidxu umtxq_lock(&uq->uq_key); 1229233690Sdavidxu if (tmp == id) 1230233690Sdavidxu error = umtxq_sleep(uq, "uwait", timeout == NULL ? 1231233690Sdavidxu NULL : &timo); 1232233642Sdavidxu if ((uq->uq_flags & UQF_UMTXQ) == 0) 1233233642Sdavidxu error = 0; 1234233642Sdavidxu else 1235143149Sdavidxu umtxq_remove(uq); 1236233642Sdavidxu umtxq_unlock(&uq->uq_key); 1237143149Sdavidxu umtx_key_release(&uq->uq_key); 1238139257Sdavidxu if (error == ERESTART) 1239139257Sdavidxu error = EINTR; 1240139013Sdavidxu return (error); 1241139013Sdavidxu} 1242139013Sdavidxu 1243161678Sdavidxu/* 1244161678Sdavidxu * Wake up threads sleeping on the specified address. 1245161678Sdavidxu */ 1246151692Sdavidxuint 1247178646Sdavidxukern_umtx_wake(struct thread *td, void *uaddr, int n_wake, int is_private) 1248139013Sdavidxu{ 1249139013Sdavidxu struct umtx_key key; 1250139257Sdavidxu int ret; 1251139013Sdavidxu 1252178646Sdavidxu if ((ret = umtx_key_get(uaddr, TYPE_SIMPLE_WAIT, 1253178646Sdavidxu is_private ? THREAD_SHARE : AUTO_SHARE, &key)) != 0) 1254139257Sdavidxu return (ret); 1255139258Sdavidxu umtxq_lock(&key); 1256139257Sdavidxu ret = umtxq_signal(&key, n_wake); 1257139258Sdavidxu umtxq_unlock(&key); 1258139257Sdavidxu umtx_key_release(&key); 1259139013Sdavidxu return (0); 1260139013Sdavidxu} 1261139013Sdavidxu 1262161678Sdavidxu/* 1263161678Sdavidxu * Lock PTHREAD_PRIO_NONE protocol POSIX mutex. 1264161678Sdavidxu */ 1265161678Sdavidxustatic int 1266233690Sdavidxudo_lock_normal(struct thread *td, struct umutex *m, uint32_t flags, 1267233690Sdavidxu struct _umtx_time *timeout, int mode) 1268161678Sdavidxu{ 1269233690Sdavidxu struct abs_timeout timo; 1270161678Sdavidxu struct umtx_q *uq; 1271161678Sdavidxu uint32_t owner, old, id; 1272161678Sdavidxu int error = 0; 1273161678Sdavidxu 1274161678Sdavidxu id = td->td_tid; 1275161678Sdavidxu uq = td->td_umtxq; 1276161678Sdavidxu 1277233690Sdavidxu if (timeout != NULL) 1278233690Sdavidxu abs_timeout_init2(&timo, timeout); 1279233690Sdavidxu 1280161678Sdavidxu /* 1281161678Sdavidxu * Care must be exercised when dealing with umtx structure. It 1282161678Sdavidxu * can fault on any access. 1283161678Sdavidxu */ 1284161678Sdavidxu for (;;) { 1285179970Sdavidxu owner = fuword32(__DEVOLATILE(void *, &m->m_owner)); 1286179970Sdavidxu if (mode == _UMUTEX_WAIT) { 1287179970Sdavidxu if (owner == UMUTEX_UNOWNED || owner == UMUTEX_CONTESTED) 1288179970Sdavidxu return (0); 1289179970Sdavidxu } else { 1290179970Sdavidxu /* 1291179970Sdavidxu * Try the uncontested case. This should be done in userland. 1292179970Sdavidxu */ 1293179970Sdavidxu owner = casuword32(&m->m_owner, UMUTEX_UNOWNED, id); 1294161678Sdavidxu 1295179970Sdavidxu /* The acquire succeeded. */ 1296179970Sdavidxu if (owner == UMUTEX_UNOWNED) 1297161678Sdavidxu return (0); 1298161678Sdavidxu 1299161678Sdavidxu /* The address was invalid. */ 1300161678Sdavidxu if (owner == -1) 1301161678Sdavidxu return (EFAULT); 1302161678Sdavidxu 1303179970Sdavidxu /* If no one owns it but it is contested try to acquire it. */ 1304179970Sdavidxu if (owner == UMUTEX_CONTESTED) { 1305179970Sdavidxu owner = casuword32(&m->m_owner, 1306179970Sdavidxu UMUTEX_CONTESTED, id | UMUTEX_CONTESTED); 1307179970Sdavidxu 1308179970Sdavidxu if (owner == UMUTEX_CONTESTED) 1309179970Sdavidxu return (0); 1310179970Sdavidxu 1311179970Sdavidxu /* The address was invalid. */ 1312179970Sdavidxu if (owner == -1) 1313179970Sdavidxu return (EFAULT); 1314179970Sdavidxu 1315251684Skib error = umtxq_check_susp(td); 1316251684Skib if (error != 0) 1317251684Skib return (error); 1318251684Skib 1319179970Sdavidxu /* If this failed the lock has changed, restart. */ 1320179970Sdavidxu continue; 1321179970Sdavidxu } 1322161678Sdavidxu } 1323161678Sdavidxu 1324161678Sdavidxu if ((flags & UMUTEX_ERROR_CHECK) != 0 && 1325161678Sdavidxu (owner & ~UMUTEX_CONTESTED) == id) 1326161678Sdavidxu return (EDEADLK); 1327161678Sdavidxu 1328179970Sdavidxu if (mode == _UMUTEX_TRY) 1329161678Sdavidxu return (EBUSY); 1330161678Sdavidxu 1331161678Sdavidxu /* 1332161678Sdavidxu * If we caught a signal, we have retried and now 1333161678Sdavidxu * exit immediately. 1334161678Sdavidxu */ 1335233691Sdavidxu if (error != 0) 1336161678Sdavidxu return (error); 1337161678Sdavidxu 1338161678Sdavidxu if ((error = umtx_key_get(m, TYPE_NORMAL_UMUTEX, 1339161678Sdavidxu GET_SHARE(flags), &uq->uq_key)) != 0) 1340161678Sdavidxu return (error); 1341161678Sdavidxu 1342161678Sdavidxu umtxq_lock(&uq->uq_key); 1343161678Sdavidxu umtxq_busy(&uq->uq_key); 1344161678Sdavidxu umtxq_insert(uq); 1345161678Sdavidxu umtxq_unlock(&uq->uq_key); 1346161678Sdavidxu 1347161678Sdavidxu /* 1348161678Sdavidxu * Set the contested bit so that a release in user space 1349161678Sdavidxu * knows to use the system call for unlock. If this fails 1350161678Sdavidxu * either some one else has acquired the lock or it has been 1351161678Sdavidxu * released. 1352161678Sdavidxu */ 1353161678Sdavidxu old = casuword32(&m->m_owner, owner, owner | UMUTEX_CONTESTED); 1354161678Sdavidxu 1355161678Sdavidxu /* The address was invalid. */ 1356161678Sdavidxu if (old == -1) { 1357161678Sdavidxu umtxq_lock(&uq->uq_key); 1358161678Sdavidxu umtxq_remove(uq); 1359179970Sdavidxu umtxq_unbusy(&uq->uq_key); 1360161678Sdavidxu umtxq_unlock(&uq->uq_key); 1361161678Sdavidxu umtx_key_release(&uq->uq_key); 1362161678Sdavidxu return (EFAULT); 1363161678Sdavidxu } 1364161678Sdavidxu 1365161678Sdavidxu /* 1366161678Sdavidxu * We set the contested bit, sleep. Otherwise the lock changed 1367161678Sdavidxu * and we need to retry or we lost a race to the thread 1368161678Sdavidxu * unlocking the umtx. 1369161678Sdavidxu */ 1370161678Sdavidxu umtxq_lock(&uq->uq_key); 1371179970Sdavidxu umtxq_unbusy(&uq->uq_key); 1372161678Sdavidxu if (old == owner) 1373233690Sdavidxu error = umtxq_sleep(uq, "umtxn", timeout == NULL ? 1374233690Sdavidxu NULL : &timo); 1375161678Sdavidxu umtxq_remove(uq); 1376161678Sdavidxu umtxq_unlock(&uq->uq_key); 1377161678Sdavidxu umtx_key_release(&uq->uq_key); 1378251684Skib 1379251684Skib if (error == 0) 1380251684Skib error = umtxq_check_susp(td); 1381161678Sdavidxu } 1382161678Sdavidxu 1383161678Sdavidxu return (0); 1384161678Sdavidxu} 1385161678Sdavidxu 1386161678Sdavidxu/* 1387161678Sdavidxu * Unlock PTHREAD_PRIO_NONE protocol POSIX mutex. 1388161678Sdavidxu */ 1389161678Sdavidxustatic int 1390161678Sdavidxudo_unlock_normal(struct thread *td, struct umutex *m, uint32_t flags) 1391161678Sdavidxu{ 1392161678Sdavidxu struct umtx_key key; 1393161678Sdavidxu uint32_t owner, old, id; 1394161678Sdavidxu int error; 1395161678Sdavidxu int count; 1396161678Sdavidxu 1397161678Sdavidxu id = td->td_tid; 1398161678Sdavidxu /* 1399161678Sdavidxu * Make sure we own this mtx. 1400161678Sdavidxu */ 1401163449Sdavidxu owner = fuword32(__DEVOLATILE(uint32_t *, &m->m_owner)); 1402161678Sdavidxu if (owner == -1) 1403161678Sdavidxu return (EFAULT); 1404161678Sdavidxu 1405161678Sdavidxu if ((owner & ~UMUTEX_CONTESTED) != id) 1406161678Sdavidxu return (EPERM); 1407161678Sdavidxu 1408161678Sdavidxu if ((owner & UMUTEX_CONTESTED) == 0) { 1409161678Sdavidxu old = casuword32(&m->m_owner, owner, UMUTEX_UNOWNED); 1410161678Sdavidxu if (old == -1) 1411161678Sdavidxu return (EFAULT); 1412161678Sdavidxu if (old == owner) 1413161678Sdavidxu return (0); 1414161855Sdavidxu owner = old; 1415161678Sdavidxu } 1416161678Sdavidxu 1417161678Sdavidxu /* We should only ever be in here for contested locks */ 1418161678Sdavidxu if ((error = umtx_key_get(m, TYPE_NORMAL_UMUTEX, GET_SHARE(flags), 1419161678Sdavidxu &key)) != 0) 1420161678Sdavidxu return (error); 1421161678Sdavidxu 1422161678Sdavidxu umtxq_lock(&key); 1423161678Sdavidxu umtxq_busy(&key); 1424161678Sdavidxu count = umtxq_count(&key); 1425161678Sdavidxu umtxq_unlock(&key); 1426161678Sdavidxu 1427161678Sdavidxu /* 1428161678Sdavidxu * When unlocking the umtx, it must be marked as unowned if 1429161678Sdavidxu * there is zero or one thread only waiting for it. 1430161678Sdavidxu * Otherwise, it must be marked as contested. 1431161678Sdavidxu */ 1432161678Sdavidxu old = casuword32(&m->m_owner, owner, 1433161678Sdavidxu count <= 1 ? UMUTEX_UNOWNED : UMUTEX_CONTESTED); 1434161678Sdavidxu umtxq_lock(&key); 1435161678Sdavidxu umtxq_signal(&key,1); 1436161678Sdavidxu umtxq_unbusy(&key); 1437161678Sdavidxu umtxq_unlock(&key); 1438161678Sdavidxu umtx_key_release(&key); 1439161678Sdavidxu if (old == -1) 1440161678Sdavidxu return (EFAULT); 1441161678Sdavidxu if (old != owner) 1442161678Sdavidxu return (EINVAL); 1443161678Sdavidxu return (0); 1444161678Sdavidxu} 1445161678Sdavidxu 1446179970Sdavidxu/* 1447179970Sdavidxu * Check if the mutex is available and wake up a waiter, 1448179970Sdavidxu * only for simple mutex. 1449179970Sdavidxu */ 1450179970Sdavidxustatic int 1451179970Sdavidxudo_wake_umutex(struct thread *td, struct umutex *m) 1452179970Sdavidxu{ 1453179970Sdavidxu struct umtx_key key; 1454179970Sdavidxu uint32_t owner; 1455179970Sdavidxu uint32_t flags; 1456179970Sdavidxu int error; 1457179970Sdavidxu int count; 1458179970Sdavidxu 1459179970Sdavidxu owner = fuword32(__DEVOLATILE(uint32_t *, &m->m_owner)); 1460179970Sdavidxu if (owner == -1) 1461179970Sdavidxu return (EFAULT); 1462179970Sdavidxu 1463179970Sdavidxu if ((owner & ~UMUTEX_CONTESTED) != 0) 1464179970Sdavidxu return (0); 1465179970Sdavidxu 1466179970Sdavidxu flags = fuword32(&m->m_flags); 1467179970Sdavidxu 1468179970Sdavidxu /* We should only ever be in here for contested locks */ 1469179970Sdavidxu if ((error = umtx_key_get(m, TYPE_NORMAL_UMUTEX, GET_SHARE(flags), 1470179970Sdavidxu &key)) != 0) 1471179970Sdavidxu return (error); 1472179970Sdavidxu 1473179970Sdavidxu umtxq_lock(&key); 1474179970Sdavidxu umtxq_busy(&key); 1475179970Sdavidxu count = umtxq_count(&key); 1476179970Sdavidxu umtxq_unlock(&key); 1477179970Sdavidxu 1478179970Sdavidxu if (count <= 1) 1479179970Sdavidxu owner = casuword32(&m->m_owner, UMUTEX_CONTESTED, UMUTEX_UNOWNED); 1480179970Sdavidxu 1481179970Sdavidxu umtxq_lock(&key); 1482179970Sdavidxu if (count != 0 && (owner & ~UMUTEX_CONTESTED) == 0) 1483179970Sdavidxu umtxq_signal(&key, 1); 1484179970Sdavidxu umtxq_unbusy(&key); 1485179970Sdavidxu umtxq_unlock(&key); 1486179970Sdavidxu umtx_key_release(&key); 1487179970Sdavidxu return (0); 1488179970Sdavidxu} 1489179970Sdavidxu 1490233912Sdavidxu/* 1491233912Sdavidxu * Check if the mutex has waiters and tries to fix contention bit. 1492233912Sdavidxu */ 1493233912Sdavidxustatic int 1494233912Sdavidxudo_wake2_umutex(struct thread *td, struct umutex *m, uint32_t flags) 1495233912Sdavidxu{ 1496233912Sdavidxu struct umtx_key key; 1497233912Sdavidxu uint32_t owner, old; 1498233912Sdavidxu int type; 1499233912Sdavidxu int error; 1500233912Sdavidxu int count; 1501233912Sdavidxu 1502233912Sdavidxu switch(flags & (UMUTEX_PRIO_INHERIT | UMUTEX_PRIO_PROTECT)) { 1503233912Sdavidxu case 0: 1504233912Sdavidxu type = TYPE_NORMAL_UMUTEX; 1505233912Sdavidxu break; 1506233912Sdavidxu case UMUTEX_PRIO_INHERIT: 1507233912Sdavidxu type = TYPE_PI_UMUTEX; 1508233912Sdavidxu break; 1509233912Sdavidxu case UMUTEX_PRIO_PROTECT: 1510233912Sdavidxu type = TYPE_PP_UMUTEX; 1511233912Sdavidxu break; 1512233912Sdavidxu default: 1513233912Sdavidxu return (EINVAL); 1514233912Sdavidxu } 1515233912Sdavidxu if ((error = umtx_key_get(m, type, GET_SHARE(flags), 1516233912Sdavidxu &key)) != 0) 1517233912Sdavidxu return (error); 1518233912Sdavidxu 1519233912Sdavidxu owner = 0; 1520233912Sdavidxu umtxq_lock(&key); 1521233912Sdavidxu umtxq_busy(&key); 1522233912Sdavidxu count = umtxq_count(&key); 1523233912Sdavidxu umtxq_unlock(&key); 1524233912Sdavidxu /* 1525233912Sdavidxu * Only repair contention bit if there is a waiter, this means the mutex 1526233912Sdavidxu * is still being referenced by userland code, otherwise don't update 1527233912Sdavidxu * any memory. 1528233912Sdavidxu */ 1529233912Sdavidxu if (count > 1) { 1530233912Sdavidxu owner = fuword32(__DEVOLATILE(uint32_t *, &m->m_owner)); 1531233912Sdavidxu while ((owner & UMUTEX_CONTESTED) ==0) { 1532233912Sdavidxu old = casuword32(&m->m_owner, owner, 1533233912Sdavidxu owner|UMUTEX_CONTESTED); 1534233912Sdavidxu if (old == owner) 1535233912Sdavidxu break; 1536233912Sdavidxu owner = old; 1537251684Skib if (old == -1) 1538251684Skib break; 1539251684Skib error = umtxq_check_susp(td); 1540251684Skib if (error != 0) 1541251684Skib break; 1542233912Sdavidxu } 1543233912Sdavidxu } else if (count == 1) { 1544233912Sdavidxu owner = fuword32(__DEVOLATILE(uint32_t *, &m->m_owner)); 1545233912Sdavidxu while ((owner & ~UMUTEX_CONTESTED) != 0 && 1546233912Sdavidxu (owner & UMUTEX_CONTESTED) == 0) { 1547233912Sdavidxu old = casuword32(&m->m_owner, owner, 1548233912Sdavidxu owner|UMUTEX_CONTESTED); 1549233912Sdavidxu if (old == owner) 1550233912Sdavidxu break; 1551233912Sdavidxu owner = old; 1552251684Skib if (old == -1) 1553251684Skib break; 1554251684Skib error = umtxq_check_susp(td); 1555251684Skib if (error != 0) 1556251684Skib break; 1557233912Sdavidxu } 1558233912Sdavidxu } 1559233912Sdavidxu umtxq_lock(&key); 1560233912Sdavidxu if (owner == -1) { 1561233912Sdavidxu error = EFAULT; 1562233912Sdavidxu umtxq_signal(&key, INT_MAX); 1563233912Sdavidxu } 1564233912Sdavidxu else if (count != 0 && (owner & ~UMUTEX_CONTESTED) == 0) 1565233912Sdavidxu umtxq_signal(&key, 1); 1566233912Sdavidxu umtxq_unbusy(&key); 1567233912Sdavidxu umtxq_unlock(&key); 1568233912Sdavidxu umtx_key_release(&key); 1569233912Sdavidxu return (error); 1570233912Sdavidxu} 1571233912Sdavidxu 1572161678Sdavidxustatic inline struct umtx_pi * 1573163697Sdavidxuumtx_pi_alloc(int flags) 1574161678Sdavidxu{ 1575161678Sdavidxu struct umtx_pi *pi; 1576161678Sdavidxu 1577163697Sdavidxu pi = uma_zalloc(umtx_pi_zone, M_ZERO | flags); 1578161678Sdavidxu TAILQ_INIT(&pi->pi_blocked); 1579161678Sdavidxu atomic_add_int(&umtx_pi_allocated, 1); 1580161678Sdavidxu return (pi); 1581161678Sdavidxu} 1582161678Sdavidxu 1583161678Sdavidxustatic inline void 1584161678Sdavidxuumtx_pi_free(struct umtx_pi *pi) 1585161678Sdavidxu{ 1586161678Sdavidxu uma_zfree(umtx_pi_zone, pi); 1587161678Sdavidxu atomic_add_int(&umtx_pi_allocated, -1); 1588161678Sdavidxu} 1589161678Sdavidxu 1590161678Sdavidxu/* 1591161678Sdavidxu * Adjust the thread's position on a pi_state after its priority has been 1592161678Sdavidxu * changed. 1593161678Sdavidxu */ 1594161678Sdavidxustatic int 1595161678Sdavidxuumtx_pi_adjust_thread(struct umtx_pi *pi, struct thread *td) 1596161678Sdavidxu{ 1597161678Sdavidxu struct umtx_q *uq, *uq1, *uq2; 1598161678Sdavidxu struct thread *td1; 1599161678Sdavidxu 1600170300Sjeff mtx_assert(&umtx_lock, MA_OWNED); 1601161678Sdavidxu if (pi == NULL) 1602161678Sdavidxu return (0); 1603161678Sdavidxu 1604161678Sdavidxu uq = td->td_umtxq; 1605161678Sdavidxu 1606161678Sdavidxu /* 1607161678Sdavidxu * Check if the thread needs to be moved on the blocked chain. 1608161678Sdavidxu * It needs to be moved if either its priority is lower than 1609161678Sdavidxu * the previous thread or higher than the next thread. 1610161678Sdavidxu */ 1611161678Sdavidxu uq1 = TAILQ_PREV(uq, umtxq_head, uq_lockq); 1612161678Sdavidxu uq2 = TAILQ_NEXT(uq, uq_lockq); 1613161678Sdavidxu if ((uq1 != NULL && UPRI(td) < UPRI(uq1->uq_thread)) || 1614161678Sdavidxu (uq2 != NULL && UPRI(td) > UPRI(uq2->uq_thread))) { 1615161678Sdavidxu /* 1616161678Sdavidxu * Remove thread from blocked chain and determine where 1617161678Sdavidxu * it should be moved to. 1618161678Sdavidxu */ 1619161678Sdavidxu TAILQ_REMOVE(&pi->pi_blocked, uq, uq_lockq); 1620161678Sdavidxu TAILQ_FOREACH(uq1, &pi->pi_blocked, uq_lockq) { 1621161678Sdavidxu td1 = uq1->uq_thread; 1622161678Sdavidxu MPASS(td1->td_proc->p_magic == P_MAGIC); 1623161678Sdavidxu if (UPRI(td1) > UPRI(td)) 1624161678Sdavidxu break; 1625161678Sdavidxu } 1626161678Sdavidxu 1627161678Sdavidxu if (uq1 == NULL) 1628161678Sdavidxu TAILQ_INSERT_TAIL(&pi->pi_blocked, uq, uq_lockq); 1629161678Sdavidxu else 1630161678Sdavidxu TAILQ_INSERT_BEFORE(uq1, uq, uq_lockq); 1631161678Sdavidxu } 1632161678Sdavidxu return (1); 1633161678Sdavidxu} 1634161678Sdavidxu 1635161678Sdavidxu/* 1636161678Sdavidxu * Propagate priority when a thread is blocked on POSIX 1637161678Sdavidxu * PI mutex. 1638161678Sdavidxu */ 1639161678Sdavidxustatic void 1640161678Sdavidxuumtx_propagate_priority(struct thread *td) 1641161678Sdavidxu{ 1642161678Sdavidxu struct umtx_q *uq; 1643161678Sdavidxu struct umtx_pi *pi; 1644161678Sdavidxu int pri; 1645161678Sdavidxu 1646170300Sjeff mtx_assert(&umtx_lock, MA_OWNED); 1647161678Sdavidxu pri = UPRI(td); 1648161678Sdavidxu uq = td->td_umtxq; 1649161678Sdavidxu pi = uq->uq_pi_blocked; 1650161678Sdavidxu if (pi == NULL) 1651161678Sdavidxu return; 1652161678Sdavidxu 1653161678Sdavidxu for (;;) { 1654161678Sdavidxu td = pi->pi_owner; 1655216313Sdavidxu if (td == NULL || td == curthread) 1656161678Sdavidxu return; 1657161678Sdavidxu 1658161678Sdavidxu MPASS(td->td_proc != NULL); 1659161678Sdavidxu MPASS(td->td_proc->p_magic == P_MAGIC); 1660161678Sdavidxu 1661170300Sjeff thread_lock(td); 1662216313Sdavidxu if (td->td_lend_user_pri > pri) 1663216313Sdavidxu sched_lend_user_prio(td, pri); 1664216313Sdavidxu else { 1665216313Sdavidxu thread_unlock(td); 1666216313Sdavidxu break; 1667216313Sdavidxu } 1668170300Sjeff thread_unlock(td); 1669161678Sdavidxu 1670161678Sdavidxu /* 1671161678Sdavidxu * Pick up the lock that td is blocked on. 1672161678Sdavidxu */ 1673161678Sdavidxu uq = td->td_umtxq; 1674161678Sdavidxu pi = uq->uq_pi_blocked; 1675216791Sdavidxu if (pi == NULL) 1676216791Sdavidxu break; 1677161678Sdavidxu /* Resort td on the list if needed. */ 1678216791Sdavidxu umtx_pi_adjust_thread(pi, td); 1679161678Sdavidxu } 1680161678Sdavidxu} 1681161678Sdavidxu 1682161678Sdavidxu/* 1683161678Sdavidxu * Unpropagate priority for a PI mutex when a thread blocked on 1684161678Sdavidxu * it is interrupted by signal or resumed by others. 1685161678Sdavidxu */ 1686161678Sdavidxustatic void 1687216791Sdavidxuumtx_repropagate_priority(struct umtx_pi *pi) 1688161678Sdavidxu{ 1689161678Sdavidxu struct umtx_q *uq, *uq_owner; 1690161678Sdavidxu struct umtx_pi *pi2; 1691216791Sdavidxu int pri; 1692161678Sdavidxu 1693170300Sjeff mtx_assert(&umtx_lock, MA_OWNED); 1694161678Sdavidxu 1695161678Sdavidxu while (pi != NULL && pi->pi_owner != NULL) { 1696161678Sdavidxu pri = PRI_MAX; 1697161678Sdavidxu uq_owner = pi->pi_owner->td_umtxq; 1698161678Sdavidxu 1699161678Sdavidxu TAILQ_FOREACH(pi2, &uq_owner->uq_pi_contested, pi_link) { 1700161678Sdavidxu uq = TAILQ_FIRST(&pi2->pi_blocked); 1701161678Sdavidxu if (uq != NULL) { 1702161678Sdavidxu if (pri > UPRI(uq->uq_thread)) 1703161678Sdavidxu pri = UPRI(uq->uq_thread); 1704161678Sdavidxu } 1705161678Sdavidxu } 1706161678Sdavidxu 1707161678Sdavidxu if (pri > uq_owner->uq_inherited_pri) 1708161678Sdavidxu pri = uq_owner->uq_inherited_pri; 1709170300Sjeff thread_lock(pi->pi_owner); 1710216791Sdavidxu sched_lend_user_prio(pi->pi_owner, pri); 1711170300Sjeff thread_unlock(pi->pi_owner); 1712216791Sdavidxu if ((pi = uq_owner->uq_pi_blocked) != NULL) 1713216791Sdavidxu umtx_pi_adjust_thread(pi, uq_owner->uq_thread); 1714161678Sdavidxu } 1715161678Sdavidxu} 1716161678Sdavidxu 1717161678Sdavidxu/* 1718161678Sdavidxu * Insert a PI mutex into owned list. 1719161678Sdavidxu */ 1720161678Sdavidxustatic void 1721161678Sdavidxuumtx_pi_setowner(struct umtx_pi *pi, struct thread *owner) 1722161678Sdavidxu{ 1723161678Sdavidxu struct umtx_q *uq_owner; 1724161678Sdavidxu 1725161678Sdavidxu uq_owner = owner->td_umtxq; 1726170300Sjeff mtx_assert(&umtx_lock, MA_OWNED); 1727161678Sdavidxu if (pi->pi_owner != NULL) 1728161678Sdavidxu panic("pi_ower != NULL"); 1729161678Sdavidxu pi->pi_owner = owner; 1730161678Sdavidxu TAILQ_INSERT_TAIL(&uq_owner->uq_pi_contested, pi, pi_link); 1731161678Sdavidxu} 1732161678Sdavidxu 1733161678Sdavidxu/* 1734161678Sdavidxu * Claim ownership of a PI mutex. 1735161678Sdavidxu */ 1736161678Sdavidxustatic int 1737161678Sdavidxuumtx_pi_claim(struct umtx_pi *pi, struct thread *owner) 1738161678Sdavidxu{ 1739161678Sdavidxu struct umtx_q *uq, *uq_owner; 1740161678Sdavidxu 1741161678Sdavidxu uq_owner = owner->td_umtxq; 1742170300Sjeff mtx_lock_spin(&umtx_lock); 1743161678Sdavidxu if (pi->pi_owner == owner) { 1744170300Sjeff mtx_unlock_spin(&umtx_lock); 1745161678Sdavidxu return (0); 1746161678Sdavidxu } 1747161678Sdavidxu 1748161678Sdavidxu if (pi->pi_owner != NULL) { 1749161678Sdavidxu /* 1750161678Sdavidxu * userland may have already messed the mutex, sigh. 1751161678Sdavidxu */ 1752170300Sjeff mtx_unlock_spin(&umtx_lock); 1753161678Sdavidxu return (EPERM); 1754161678Sdavidxu } 1755161678Sdavidxu umtx_pi_setowner(pi, owner); 1756161678Sdavidxu uq = TAILQ_FIRST(&pi->pi_blocked); 1757161678Sdavidxu if (uq != NULL) { 1758161678Sdavidxu int pri; 1759161678Sdavidxu 1760161678Sdavidxu pri = UPRI(uq->uq_thread); 1761170300Sjeff thread_lock(owner); 1762161678Sdavidxu if (pri < UPRI(owner)) 1763161678Sdavidxu sched_lend_user_prio(owner, pri); 1764170300Sjeff thread_unlock(owner); 1765161678Sdavidxu } 1766170300Sjeff mtx_unlock_spin(&umtx_lock); 1767161678Sdavidxu return (0); 1768161678Sdavidxu} 1769161678Sdavidxu 1770161678Sdavidxu/* 1771174701Sdavidxu * Adjust a thread's order position in its blocked PI mutex, 1772174701Sdavidxu * this may result new priority propagating process. 1773174701Sdavidxu */ 1774174701Sdavidxuvoid 1775174701Sdavidxuumtx_pi_adjust(struct thread *td, u_char oldpri) 1776174701Sdavidxu{ 1777174707Sdavidxu struct umtx_q *uq; 1778174707Sdavidxu struct umtx_pi *pi; 1779174707Sdavidxu 1780174707Sdavidxu uq = td->td_umtxq; 1781174701Sdavidxu mtx_lock_spin(&umtx_lock); 1782174707Sdavidxu /* 1783174707Sdavidxu * Pick up the lock that td is blocked on. 1784174707Sdavidxu */ 1785174707Sdavidxu pi = uq->uq_pi_blocked; 1786216791Sdavidxu if (pi != NULL) { 1787216791Sdavidxu umtx_pi_adjust_thread(pi, td); 1788216791Sdavidxu umtx_repropagate_priority(pi); 1789216791Sdavidxu } 1790174701Sdavidxu mtx_unlock_spin(&umtx_lock); 1791174701Sdavidxu} 1792174701Sdavidxu 1793174701Sdavidxu/* 1794161678Sdavidxu * Sleep on a PI mutex. 1795161678Sdavidxu */ 1796161678Sdavidxustatic int 1797161678Sdavidxuumtxq_sleep_pi(struct umtx_q *uq, struct umtx_pi *pi, 1798233690Sdavidxu uint32_t owner, const char *wmesg, struct abs_timeout *timo) 1799161678Sdavidxu{ 1800161678Sdavidxu struct umtxq_chain *uc; 1801161678Sdavidxu struct thread *td, *td1; 1802161678Sdavidxu struct umtx_q *uq1; 1803161678Sdavidxu int pri; 1804161678Sdavidxu int error = 0; 1805161678Sdavidxu 1806161678Sdavidxu td = uq->uq_thread; 1807161678Sdavidxu KASSERT(td == curthread, ("inconsistent uq_thread")); 1808161678Sdavidxu uc = umtxq_getchain(&uq->uq_key); 1809161678Sdavidxu UMTXQ_LOCKED_ASSERT(uc); 1810189756Sdavidxu UMTXQ_BUSY_ASSERT(uc); 1811161678Sdavidxu umtxq_insert(uq); 1812189756Sdavidxu mtx_lock_spin(&umtx_lock); 1813161678Sdavidxu if (pi->pi_owner == NULL) { 1814189756Sdavidxu mtx_unlock_spin(&umtx_lock); 1815213642Sdavidxu /* XXX Only look up thread in current process. */ 1816213642Sdavidxu td1 = tdfind(owner, curproc->p_pid); 1817170300Sjeff mtx_lock_spin(&umtx_lock); 1818215336Sdavidxu if (td1 != NULL) { 1819215336Sdavidxu if (pi->pi_owner == NULL) 1820215336Sdavidxu umtx_pi_setowner(pi, td1); 1821215336Sdavidxu PROC_UNLOCK(td1->td_proc); 1822161678Sdavidxu } 1823161678Sdavidxu } 1824161678Sdavidxu 1825161678Sdavidxu TAILQ_FOREACH(uq1, &pi->pi_blocked, uq_lockq) { 1826161678Sdavidxu pri = UPRI(uq1->uq_thread); 1827161678Sdavidxu if (pri > UPRI(td)) 1828161678Sdavidxu break; 1829161678Sdavidxu } 1830161678Sdavidxu 1831161678Sdavidxu if (uq1 != NULL) 1832161678Sdavidxu TAILQ_INSERT_BEFORE(uq1, uq, uq_lockq); 1833161678Sdavidxu else 1834161678Sdavidxu TAILQ_INSERT_TAIL(&pi->pi_blocked, uq, uq_lockq); 1835161678Sdavidxu 1836161678Sdavidxu uq->uq_pi_blocked = pi; 1837174701Sdavidxu thread_lock(td); 1838161678Sdavidxu td->td_flags |= TDF_UPIBLOCKED; 1839174701Sdavidxu thread_unlock(td); 1840161678Sdavidxu umtx_propagate_priority(td); 1841170300Sjeff mtx_unlock_spin(&umtx_lock); 1842189756Sdavidxu umtxq_unbusy(&uq->uq_key); 1843161678Sdavidxu 1844233690Sdavidxu error = umtxq_sleep(uq, wmesg, timo); 1845233690Sdavidxu umtxq_remove(uq); 1846233690Sdavidxu 1847170300Sjeff mtx_lock_spin(&umtx_lock); 1848161678Sdavidxu uq->uq_pi_blocked = NULL; 1849174701Sdavidxu thread_lock(td); 1850161678Sdavidxu td->td_flags &= ~TDF_UPIBLOCKED; 1851174701Sdavidxu thread_unlock(td); 1852161678Sdavidxu TAILQ_REMOVE(&pi->pi_blocked, uq, uq_lockq); 1853216791Sdavidxu umtx_repropagate_priority(pi); 1854170300Sjeff mtx_unlock_spin(&umtx_lock); 1855189756Sdavidxu umtxq_unlock(&uq->uq_key); 1856161678Sdavidxu 1857161678Sdavidxu return (error); 1858161678Sdavidxu} 1859161678Sdavidxu 1860161678Sdavidxu/* 1861161678Sdavidxu * Add reference count for a PI mutex. 1862161678Sdavidxu */ 1863161678Sdavidxustatic void 1864161678Sdavidxuumtx_pi_ref(struct umtx_pi *pi) 1865161678Sdavidxu{ 1866161678Sdavidxu struct umtxq_chain *uc; 1867161678Sdavidxu 1868161678Sdavidxu uc = umtxq_getchain(&pi->pi_key); 1869161678Sdavidxu UMTXQ_LOCKED_ASSERT(uc); 1870161678Sdavidxu pi->pi_refcount++; 1871161678Sdavidxu} 1872161678Sdavidxu 1873161678Sdavidxu/* 1874161678Sdavidxu * Decrease reference count for a PI mutex, if the counter 1875161678Sdavidxu * is decreased to zero, its memory space is freed. 1876161678Sdavidxu */ 1877161678Sdavidxustatic void 1878161678Sdavidxuumtx_pi_unref(struct umtx_pi *pi) 1879161678Sdavidxu{ 1880161678Sdavidxu struct umtxq_chain *uc; 1881161678Sdavidxu 1882161678Sdavidxu uc = umtxq_getchain(&pi->pi_key); 1883161678Sdavidxu UMTXQ_LOCKED_ASSERT(uc); 1884161678Sdavidxu KASSERT(pi->pi_refcount > 0, ("invalid reference count")); 1885161678Sdavidxu if (--pi->pi_refcount == 0) { 1886170300Sjeff mtx_lock_spin(&umtx_lock); 1887161678Sdavidxu if (pi->pi_owner != NULL) { 1888161678Sdavidxu TAILQ_REMOVE(&pi->pi_owner->td_umtxq->uq_pi_contested, 1889161678Sdavidxu pi, pi_link); 1890161678Sdavidxu pi->pi_owner = NULL; 1891161678Sdavidxu } 1892161678Sdavidxu KASSERT(TAILQ_EMPTY(&pi->pi_blocked), 1893161678Sdavidxu ("blocked queue not empty")); 1894170300Sjeff mtx_unlock_spin(&umtx_lock); 1895161678Sdavidxu TAILQ_REMOVE(&uc->uc_pi_list, pi, pi_hashlink); 1896189756Sdavidxu umtx_pi_free(pi); 1897161678Sdavidxu } 1898161678Sdavidxu} 1899161678Sdavidxu 1900161678Sdavidxu/* 1901161678Sdavidxu * Find a PI mutex in hash table. 1902161678Sdavidxu */ 1903161678Sdavidxustatic struct umtx_pi * 1904161678Sdavidxuumtx_pi_lookup(struct umtx_key *key) 1905161678Sdavidxu{ 1906161678Sdavidxu struct umtxq_chain *uc; 1907161678Sdavidxu struct umtx_pi *pi; 1908161678Sdavidxu 1909161678Sdavidxu uc = umtxq_getchain(key); 1910161678Sdavidxu UMTXQ_LOCKED_ASSERT(uc); 1911161678Sdavidxu 1912161678Sdavidxu TAILQ_FOREACH(pi, &uc->uc_pi_list, pi_hashlink) { 1913161678Sdavidxu if (umtx_key_match(&pi->pi_key, key)) { 1914161678Sdavidxu return (pi); 1915161678Sdavidxu } 1916161678Sdavidxu } 1917161678Sdavidxu return (NULL); 1918161678Sdavidxu} 1919161678Sdavidxu 1920161678Sdavidxu/* 1921161678Sdavidxu * Insert a PI mutex into hash table. 1922161678Sdavidxu */ 1923161678Sdavidxustatic inline void 1924161678Sdavidxuumtx_pi_insert(struct umtx_pi *pi) 1925161678Sdavidxu{ 1926161678Sdavidxu struct umtxq_chain *uc; 1927161678Sdavidxu 1928161678Sdavidxu uc = umtxq_getchain(&pi->pi_key); 1929161678Sdavidxu UMTXQ_LOCKED_ASSERT(uc); 1930161678Sdavidxu TAILQ_INSERT_TAIL(&uc->uc_pi_list, pi, pi_hashlink); 1931161678Sdavidxu} 1932161678Sdavidxu 1933161678Sdavidxu/* 1934161678Sdavidxu * Lock a PI mutex. 1935161678Sdavidxu */ 1936161678Sdavidxustatic int 1937233690Sdavidxudo_lock_pi(struct thread *td, struct umutex *m, uint32_t flags, 1938233690Sdavidxu struct _umtx_time *timeout, int try) 1939161678Sdavidxu{ 1940233690Sdavidxu struct abs_timeout timo; 1941161678Sdavidxu struct umtx_q *uq; 1942161678Sdavidxu struct umtx_pi *pi, *new_pi; 1943161678Sdavidxu uint32_t id, owner, old; 1944161678Sdavidxu int error; 1945161678Sdavidxu 1946161678Sdavidxu id = td->td_tid; 1947161678Sdavidxu uq = td->td_umtxq; 1948161678Sdavidxu 1949161678Sdavidxu if ((error = umtx_key_get(m, TYPE_PI_UMUTEX, GET_SHARE(flags), 1950161678Sdavidxu &uq->uq_key)) != 0) 1951161678Sdavidxu return (error); 1952233690Sdavidxu 1953233690Sdavidxu if (timeout != NULL) 1954233690Sdavidxu abs_timeout_init2(&timo, timeout); 1955233690Sdavidxu 1956163697Sdavidxu umtxq_lock(&uq->uq_key); 1957163697Sdavidxu pi = umtx_pi_lookup(&uq->uq_key); 1958163697Sdavidxu if (pi == NULL) { 1959163697Sdavidxu new_pi = umtx_pi_alloc(M_NOWAIT); 1960163697Sdavidxu if (new_pi == NULL) { 1961161678Sdavidxu umtxq_unlock(&uq->uq_key); 1962163697Sdavidxu new_pi = umtx_pi_alloc(M_WAITOK); 1963161678Sdavidxu umtxq_lock(&uq->uq_key); 1964161678Sdavidxu pi = umtx_pi_lookup(&uq->uq_key); 1965163697Sdavidxu if (pi != NULL) { 1966161678Sdavidxu umtx_pi_free(new_pi); 1967163697Sdavidxu new_pi = NULL; 1968161678Sdavidxu } 1969161678Sdavidxu } 1970163697Sdavidxu if (new_pi != NULL) { 1971163697Sdavidxu new_pi->pi_key = uq->uq_key; 1972163697Sdavidxu umtx_pi_insert(new_pi); 1973163697Sdavidxu pi = new_pi; 1974163697Sdavidxu } 1975163697Sdavidxu } 1976163697Sdavidxu umtx_pi_ref(pi); 1977163697Sdavidxu umtxq_unlock(&uq->uq_key); 1978161678Sdavidxu 1979163697Sdavidxu /* 1980163697Sdavidxu * Care must be exercised when dealing with umtx structure. It 1981163697Sdavidxu * can fault on any access. 1982163697Sdavidxu */ 1983163697Sdavidxu for (;;) { 1984161678Sdavidxu /* 1985161678Sdavidxu * Try the uncontested case. This should be done in userland. 1986161678Sdavidxu */ 1987161678Sdavidxu owner = casuword32(&m->m_owner, UMUTEX_UNOWNED, id); 1988161678Sdavidxu 1989161678Sdavidxu /* The acquire succeeded. */ 1990161678Sdavidxu if (owner == UMUTEX_UNOWNED) { 1991161678Sdavidxu error = 0; 1992161678Sdavidxu break; 1993161678Sdavidxu } 1994161678Sdavidxu 1995161678Sdavidxu /* The address was invalid. */ 1996161678Sdavidxu if (owner == -1) { 1997161678Sdavidxu error = EFAULT; 1998161678Sdavidxu break; 1999161678Sdavidxu } 2000161678Sdavidxu 2001161678Sdavidxu /* If no one owns it but it is contested try to acquire it. */ 2002161678Sdavidxu if (owner == UMUTEX_CONTESTED) { 2003161678Sdavidxu owner = casuword32(&m->m_owner, 2004161678Sdavidxu UMUTEX_CONTESTED, id | UMUTEX_CONTESTED); 2005161678Sdavidxu 2006161678Sdavidxu if (owner == UMUTEX_CONTESTED) { 2007161678Sdavidxu umtxq_lock(&uq->uq_key); 2008189756Sdavidxu umtxq_busy(&uq->uq_key); 2009161678Sdavidxu error = umtx_pi_claim(pi, td); 2010189756Sdavidxu umtxq_unbusy(&uq->uq_key); 2011161678Sdavidxu umtxq_unlock(&uq->uq_key); 2012161678Sdavidxu break; 2013161678Sdavidxu } 2014161678Sdavidxu 2015161678Sdavidxu /* The address was invalid. */ 2016161678Sdavidxu if (owner == -1) { 2017161678Sdavidxu error = EFAULT; 2018161678Sdavidxu break; 2019161678Sdavidxu } 2020161678Sdavidxu 2021251684Skib error = umtxq_check_susp(td); 2022251684Skib if (error != 0) 2023251684Skib break; 2024251684Skib 2025161678Sdavidxu /* If this failed the lock has changed, restart. */ 2026161678Sdavidxu continue; 2027161678Sdavidxu } 2028161678Sdavidxu 2029161678Sdavidxu if ((flags & UMUTEX_ERROR_CHECK) != 0 && 2030161678Sdavidxu (owner & ~UMUTEX_CONTESTED) == id) { 2031161678Sdavidxu error = EDEADLK; 2032161678Sdavidxu break; 2033161678Sdavidxu } 2034161678Sdavidxu 2035161678Sdavidxu if (try != 0) { 2036161678Sdavidxu error = EBUSY; 2037161678Sdavidxu break; 2038161678Sdavidxu } 2039161678Sdavidxu 2040161678Sdavidxu /* 2041161678Sdavidxu * If we caught a signal, we have retried and now 2042161678Sdavidxu * exit immediately. 2043161678Sdavidxu */ 2044161678Sdavidxu if (error != 0) 2045161678Sdavidxu break; 2046161678Sdavidxu 2047161678Sdavidxu umtxq_lock(&uq->uq_key); 2048161678Sdavidxu umtxq_busy(&uq->uq_key); 2049161678Sdavidxu umtxq_unlock(&uq->uq_key); 2050161678Sdavidxu 2051161678Sdavidxu /* 2052161678Sdavidxu * Set the contested bit so that a release in user space 2053161678Sdavidxu * knows to use the system call for unlock. If this fails 2054161678Sdavidxu * either some one else has acquired the lock or it has been 2055161678Sdavidxu * released. 2056161678Sdavidxu */ 2057161678Sdavidxu old = casuword32(&m->m_owner, owner, owner | UMUTEX_CONTESTED); 2058161678Sdavidxu 2059161678Sdavidxu /* The address was invalid. */ 2060161678Sdavidxu if (old == -1) { 2061161678Sdavidxu umtxq_lock(&uq->uq_key); 2062161678Sdavidxu umtxq_unbusy(&uq->uq_key); 2063161678Sdavidxu umtxq_unlock(&uq->uq_key); 2064161678Sdavidxu error = EFAULT; 2065161678Sdavidxu break; 2066161678Sdavidxu } 2067161678Sdavidxu 2068161678Sdavidxu umtxq_lock(&uq->uq_key); 2069161678Sdavidxu /* 2070161678Sdavidxu * We set the contested bit, sleep. Otherwise the lock changed 2071161678Sdavidxu * and we need to retry or we lost a race to the thread 2072161678Sdavidxu * unlocking the umtx. 2073161678Sdavidxu */ 2074161678Sdavidxu if (old == owner) 2075161678Sdavidxu error = umtxq_sleep_pi(uq, pi, owner & ~UMUTEX_CONTESTED, 2076233690Sdavidxu "umtxpi", timeout == NULL ? NULL : &timo); 2077189756Sdavidxu else { 2078189756Sdavidxu umtxq_unbusy(&uq->uq_key); 2079189756Sdavidxu umtxq_unlock(&uq->uq_key); 2080189756Sdavidxu } 2081251684Skib 2082251684Skib error = umtxq_check_susp(td); 2083251684Skib if (error != 0) 2084251684Skib break; 2085161678Sdavidxu } 2086161678Sdavidxu 2087163697Sdavidxu umtxq_lock(&uq->uq_key); 2088163697Sdavidxu umtx_pi_unref(pi); 2089163697Sdavidxu umtxq_unlock(&uq->uq_key); 2090161678Sdavidxu 2091161678Sdavidxu umtx_key_release(&uq->uq_key); 2092161678Sdavidxu return (error); 2093161678Sdavidxu} 2094161678Sdavidxu 2095161678Sdavidxu/* 2096161678Sdavidxu * Unlock a PI mutex. 2097161678Sdavidxu */ 2098161678Sdavidxustatic int 2099161678Sdavidxudo_unlock_pi(struct thread *td, struct umutex *m, uint32_t flags) 2100161678Sdavidxu{ 2101161678Sdavidxu struct umtx_key key; 2102161678Sdavidxu struct umtx_q *uq_first, *uq_first2, *uq_me; 2103161678Sdavidxu struct umtx_pi *pi, *pi2; 2104161678Sdavidxu uint32_t owner, old, id; 2105161678Sdavidxu int error; 2106161678Sdavidxu int count; 2107161678Sdavidxu int pri; 2108161678Sdavidxu 2109161678Sdavidxu id = td->td_tid; 2110161678Sdavidxu /* 2111161678Sdavidxu * Make sure we own this mtx. 2112161678Sdavidxu */ 2113163449Sdavidxu owner = fuword32(__DEVOLATILE(uint32_t *, &m->m_owner)); 2114161678Sdavidxu if (owner == -1) 2115161678Sdavidxu return (EFAULT); 2116161678Sdavidxu 2117161678Sdavidxu if ((owner & ~UMUTEX_CONTESTED) != id) 2118161678Sdavidxu return (EPERM); 2119161678Sdavidxu 2120161678Sdavidxu /* This should be done in userland */ 2121161678Sdavidxu if ((owner & UMUTEX_CONTESTED) == 0) { 2122161678Sdavidxu old = casuword32(&m->m_owner, owner, UMUTEX_UNOWNED); 2123161678Sdavidxu if (old == -1) 2124161678Sdavidxu return (EFAULT); 2125161678Sdavidxu if (old == owner) 2126161678Sdavidxu return (0); 2127161855Sdavidxu owner = old; 2128161678Sdavidxu } 2129161678Sdavidxu 2130161678Sdavidxu /* We should only ever be in here for contested locks */ 2131161678Sdavidxu if ((error = umtx_key_get(m, TYPE_PI_UMUTEX, GET_SHARE(flags), 2132161678Sdavidxu &key)) != 0) 2133161678Sdavidxu return (error); 2134161678Sdavidxu 2135161678Sdavidxu umtxq_lock(&key); 2136161678Sdavidxu umtxq_busy(&key); 2137161678Sdavidxu count = umtxq_count_pi(&key, &uq_first); 2138161678Sdavidxu if (uq_first != NULL) { 2139189756Sdavidxu mtx_lock_spin(&umtx_lock); 2140161678Sdavidxu pi = uq_first->uq_pi_blocked; 2141189756Sdavidxu KASSERT(pi != NULL, ("pi == NULL?")); 2142161678Sdavidxu if (pi->pi_owner != curthread) { 2143189756Sdavidxu mtx_unlock_spin(&umtx_lock); 2144161678Sdavidxu umtxq_unbusy(&key); 2145161678Sdavidxu umtxq_unlock(&key); 2146189756Sdavidxu umtx_key_release(&key); 2147161678Sdavidxu /* userland messed the mutex */ 2148161678Sdavidxu return (EPERM); 2149161678Sdavidxu } 2150161678Sdavidxu uq_me = curthread->td_umtxq; 2151161678Sdavidxu pi->pi_owner = NULL; 2152161678Sdavidxu TAILQ_REMOVE(&uq_me->uq_pi_contested, pi, pi_link); 2153189756Sdavidxu /* get highest priority thread which is still sleeping. */ 2154161678Sdavidxu uq_first = TAILQ_FIRST(&pi->pi_blocked); 2155189756Sdavidxu while (uq_first != NULL && 2156189756Sdavidxu (uq_first->uq_flags & UQF_UMTXQ) == 0) { 2157189756Sdavidxu uq_first = TAILQ_NEXT(uq_first, uq_lockq); 2158189756Sdavidxu } 2159161678Sdavidxu pri = PRI_MAX; 2160161678Sdavidxu TAILQ_FOREACH(pi2, &uq_me->uq_pi_contested, pi_link) { 2161161678Sdavidxu uq_first2 = TAILQ_FIRST(&pi2->pi_blocked); 2162161678Sdavidxu if (uq_first2 != NULL) { 2163161678Sdavidxu if (pri > UPRI(uq_first2->uq_thread)) 2164161678Sdavidxu pri = UPRI(uq_first2->uq_thread); 2165161678Sdavidxu } 2166161678Sdavidxu } 2167170300Sjeff thread_lock(curthread); 2168216791Sdavidxu sched_lend_user_prio(curthread, pri); 2169170300Sjeff thread_unlock(curthread); 2170170300Sjeff mtx_unlock_spin(&umtx_lock); 2171189756Sdavidxu if (uq_first) 2172189756Sdavidxu umtxq_signal_thread(uq_first); 2173161678Sdavidxu } 2174161678Sdavidxu umtxq_unlock(&key); 2175161678Sdavidxu 2176161678Sdavidxu /* 2177161678Sdavidxu * When unlocking the umtx, it must be marked as unowned if 2178161678Sdavidxu * there is zero or one thread only waiting for it. 2179161678Sdavidxu * Otherwise, it must be marked as contested. 2180161678Sdavidxu */ 2181161678Sdavidxu old = casuword32(&m->m_owner, owner, 2182161678Sdavidxu count <= 1 ? UMUTEX_UNOWNED : UMUTEX_CONTESTED); 2183161678Sdavidxu 2184161678Sdavidxu umtxq_lock(&key); 2185161678Sdavidxu umtxq_unbusy(&key); 2186161678Sdavidxu umtxq_unlock(&key); 2187161678Sdavidxu umtx_key_release(&key); 2188161678Sdavidxu if (old == -1) 2189161678Sdavidxu return (EFAULT); 2190161678Sdavidxu if (old != owner) 2191161678Sdavidxu return (EINVAL); 2192161678Sdavidxu return (0); 2193161678Sdavidxu} 2194161678Sdavidxu 2195161678Sdavidxu/* 2196161678Sdavidxu * Lock a PP mutex. 2197161678Sdavidxu */ 2198161678Sdavidxustatic int 2199233690Sdavidxudo_lock_pp(struct thread *td, struct umutex *m, uint32_t flags, 2200233690Sdavidxu struct _umtx_time *timeout, int try) 2201161678Sdavidxu{ 2202233690Sdavidxu struct abs_timeout timo; 2203161678Sdavidxu struct umtx_q *uq, *uq2; 2204161678Sdavidxu struct umtx_pi *pi; 2205161678Sdavidxu uint32_t ceiling; 2206161678Sdavidxu uint32_t owner, id; 2207161678Sdavidxu int error, pri, old_inherited_pri, su; 2208161678Sdavidxu 2209161678Sdavidxu id = td->td_tid; 2210161678Sdavidxu uq = td->td_umtxq; 2211161678Sdavidxu if ((error = umtx_key_get(m, TYPE_PP_UMUTEX, GET_SHARE(flags), 2212161678Sdavidxu &uq->uq_key)) != 0) 2213161678Sdavidxu return (error); 2214233690Sdavidxu 2215233690Sdavidxu if (timeout != NULL) 2216233690Sdavidxu abs_timeout_init2(&timo, timeout); 2217233690Sdavidxu 2218164033Srwatson su = (priv_check(td, PRIV_SCHED_RTPRIO) == 0); 2219161678Sdavidxu for (;;) { 2220161678Sdavidxu old_inherited_pri = uq->uq_inherited_pri; 2221161678Sdavidxu umtxq_lock(&uq->uq_key); 2222161678Sdavidxu umtxq_busy(&uq->uq_key); 2223161678Sdavidxu umtxq_unlock(&uq->uq_key); 2224161678Sdavidxu 2225161678Sdavidxu ceiling = RTP_PRIO_MAX - fuword32(&m->m_ceilings[0]); 2226161678Sdavidxu if (ceiling > RTP_PRIO_MAX) { 2227161678Sdavidxu error = EINVAL; 2228161678Sdavidxu goto out; 2229161678Sdavidxu } 2230161678Sdavidxu 2231170300Sjeff mtx_lock_spin(&umtx_lock); 2232161678Sdavidxu if (UPRI(td) < PRI_MIN_REALTIME + ceiling) { 2233170300Sjeff mtx_unlock_spin(&umtx_lock); 2234161678Sdavidxu error = EINVAL; 2235161678Sdavidxu goto out; 2236161678Sdavidxu } 2237161678Sdavidxu if (su && PRI_MIN_REALTIME + ceiling < uq->uq_inherited_pri) { 2238161678Sdavidxu uq->uq_inherited_pri = PRI_MIN_REALTIME + ceiling; 2239170300Sjeff thread_lock(td); 2240161678Sdavidxu if (uq->uq_inherited_pri < UPRI(td)) 2241161678Sdavidxu sched_lend_user_prio(td, uq->uq_inherited_pri); 2242170300Sjeff thread_unlock(td); 2243161678Sdavidxu } 2244170300Sjeff mtx_unlock_spin(&umtx_lock); 2245161678Sdavidxu 2246161678Sdavidxu owner = casuword32(&m->m_owner, 2247161678Sdavidxu UMUTEX_CONTESTED, id | UMUTEX_CONTESTED); 2248161678Sdavidxu 2249161678Sdavidxu if (owner == UMUTEX_CONTESTED) { 2250161678Sdavidxu error = 0; 2251161678Sdavidxu break; 2252161678Sdavidxu } 2253161678Sdavidxu 2254161678Sdavidxu /* The address was invalid. */ 2255161678Sdavidxu if (owner == -1) { 2256161678Sdavidxu error = EFAULT; 2257161678Sdavidxu break; 2258161678Sdavidxu } 2259161678Sdavidxu 2260161678Sdavidxu if ((flags & UMUTEX_ERROR_CHECK) != 0 && 2261161678Sdavidxu (owner & ~UMUTEX_CONTESTED) == id) { 2262161678Sdavidxu error = EDEADLK; 2263161678Sdavidxu break; 2264161678Sdavidxu } 2265161678Sdavidxu 2266161678Sdavidxu if (try != 0) { 2267161678Sdavidxu error = EBUSY; 2268161678Sdavidxu break; 2269161678Sdavidxu } 2270161678Sdavidxu 2271161678Sdavidxu /* 2272161678Sdavidxu * If we caught a signal, we have retried and now 2273161678Sdavidxu * exit immediately. 2274161678Sdavidxu */ 2275161678Sdavidxu if (error != 0) 2276161678Sdavidxu break; 2277161678Sdavidxu 2278161678Sdavidxu umtxq_lock(&uq->uq_key); 2279161678Sdavidxu umtxq_insert(uq); 2280161678Sdavidxu umtxq_unbusy(&uq->uq_key); 2281233690Sdavidxu error = umtxq_sleep(uq, "umtxpp", timeout == NULL ? 2282233690Sdavidxu NULL : &timo); 2283161678Sdavidxu umtxq_remove(uq); 2284161678Sdavidxu umtxq_unlock(&uq->uq_key); 2285161678Sdavidxu 2286170300Sjeff mtx_lock_spin(&umtx_lock); 2287161678Sdavidxu uq->uq_inherited_pri = old_inherited_pri; 2288161678Sdavidxu pri = PRI_MAX; 2289161678Sdavidxu TAILQ_FOREACH(pi, &uq->uq_pi_contested, pi_link) { 2290161678Sdavidxu uq2 = TAILQ_FIRST(&pi->pi_blocked); 2291161678Sdavidxu if (uq2 != NULL) { 2292161678Sdavidxu if (pri > UPRI(uq2->uq_thread)) 2293161678Sdavidxu pri = UPRI(uq2->uq_thread); 2294161678Sdavidxu } 2295161678Sdavidxu } 2296161678Sdavidxu if (pri > uq->uq_inherited_pri) 2297161678Sdavidxu pri = uq->uq_inherited_pri; 2298170300Sjeff thread_lock(td); 2299216791Sdavidxu sched_lend_user_prio(td, pri); 2300170300Sjeff thread_unlock(td); 2301170300Sjeff mtx_unlock_spin(&umtx_lock); 2302161678Sdavidxu } 2303161678Sdavidxu 2304161678Sdavidxu if (error != 0) { 2305170300Sjeff mtx_lock_spin(&umtx_lock); 2306161678Sdavidxu uq->uq_inherited_pri = old_inherited_pri; 2307161678Sdavidxu pri = PRI_MAX; 2308161678Sdavidxu TAILQ_FOREACH(pi, &uq->uq_pi_contested, pi_link) { 2309161678Sdavidxu uq2 = TAILQ_FIRST(&pi->pi_blocked); 2310161678Sdavidxu if (uq2 != NULL) { 2311161678Sdavidxu if (pri > UPRI(uq2->uq_thread)) 2312161678Sdavidxu pri = UPRI(uq2->uq_thread); 2313161678Sdavidxu } 2314161678Sdavidxu } 2315161678Sdavidxu if (pri > uq->uq_inherited_pri) 2316161678Sdavidxu pri = uq->uq_inherited_pri; 2317170300Sjeff thread_lock(td); 2318216791Sdavidxu sched_lend_user_prio(td, pri); 2319170300Sjeff thread_unlock(td); 2320170300Sjeff mtx_unlock_spin(&umtx_lock); 2321161678Sdavidxu } 2322161678Sdavidxu 2323161678Sdavidxuout: 2324161678Sdavidxu umtxq_lock(&uq->uq_key); 2325161678Sdavidxu umtxq_unbusy(&uq->uq_key); 2326161678Sdavidxu umtxq_unlock(&uq->uq_key); 2327161678Sdavidxu umtx_key_release(&uq->uq_key); 2328161678Sdavidxu return (error); 2329161678Sdavidxu} 2330161678Sdavidxu 2331161678Sdavidxu/* 2332161678Sdavidxu * Unlock a PP mutex. 2333161678Sdavidxu */ 2334161678Sdavidxustatic int 2335161678Sdavidxudo_unlock_pp(struct thread *td, struct umutex *m, uint32_t flags) 2336161678Sdavidxu{ 2337161678Sdavidxu struct umtx_key key; 2338161678Sdavidxu struct umtx_q *uq, *uq2; 2339161678Sdavidxu struct umtx_pi *pi; 2340161678Sdavidxu uint32_t owner, id; 2341161678Sdavidxu uint32_t rceiling; 2342161926Sdavidxu int error, pri, new_inherited_pri, su; 2343161678Sdavidxu 2344161678Sdavidxu id = td->td_tid; 2345161678Sdavidxu uq = td->td_umtxq; 2346164033Srwatson su = (priv_check(td, PRIV_SCHED_RTPRIO) == 0); 2347161678Sdavidxu 2348161678Sdavidxu /* 2349161678Sdavidxu * Make sure we own this mtx. 2350161678Sdavidxu */ 2351163449Sdavidxu owner = fuword32(__DEVOLATILE(uint32_t *, &m->m_owner)); 2352161678Sdavidxu if (owner == -1) 2353161678Sdavidxu return (EFAULT); 2354161678Sdavidxu 2355161678Sdavidxu if ((owner & ~UMUTEX_CONTESTED) != id) 2356161678Sdavidxu return (EPERM); 2357161678Sdavidxu 2358161678Sdavidxu error = copyin(&m->m_ceilings[1], &rceiling, sizeof(uint32_t)); 2359161678Sdavidxu if (error != 0) 2360161678Sdavidxu return (error); 2361161678Sdavidxu 2362161678Sdavidxu if (rceiling == -1) 2363161678Sdavidxu new_inherited_pri = PRI_MAX; 2364161678Sdavidxu else { 2365161678Sdavidxu rceiling = RTP_PRIO_MAX - rceiling; 2366161678Sdavidxu if (rceiling > RTP_PRIO_MAX) 2367161678Sdavidxu return (EINVAL); 2368161678Sdavidxu new_inherited_pri = PRI_MIN_REALTIME + rceiling; 2369161678Sdavidxu } 2370161678Sdavidxu 2371161678Sdavidxu if ((error = umtx_key_get(m, TYPE_PP_UMUTEX, GET_SHARE(flags), 2372161678Sdavidxu &key)) != 0) 2373161678Sdavidxu return (error); 2374161678Sdavidxu umtxq_lock(&key); 2375161678Sdavidxu umtxq_busy(&key); 2376161678Sdavidxu umtxq_unlock(&key); 2377161678Sdavidxu /* 2378161678Sdavidxu * For priority protected mutex, always set unlocked state 2379161678Sdavidxu * to UMUTEX_CONTESTED, so that userland always enters kernel 2380161678Sdavidxu * to lock the mutex, it is necessary because thread priority 2381161678Sdavidxu * has to be adjusted for such mutex. 2382161678Sdavidxu */ 2383163449Sdavidxu error = suword32(__DEVOLATILE(uint32_t *, &m->m_owner), 2384163449Sdavidxu UMUTEX_CONTESTED); 2385161678Sdavidxu 2386161678Sdavidxu umtxq_lock(&key); 2387161678Sdavidxu if (error == 0) 2388161678Sdavidxu umtxq_signal(&key, 1); 2389161678Sdavidxu umtxq_unbusy(&key); 2390161678Sdavidxu umtxq_unlock(&key); 2391161678Sdavidxu 2392161678Sdavidxu if (error == -1) 2393161678Sdavidxu error = EFAULT; 2394161678Sdavidxu else { 2395170300Sjeff mtx_lock_spin(&umtx_lock); 2396161926Sdavidxu if (su != 0) 2397161926Sdavidxu uq->uq_inherited_pri = new_inherited_pri; 2398161678Sdavidxu pri = PRI_MAX; 2399161678Sdavidxu TAILQ_FOREACH(pi, &uq->uq_pi_contested, pi_link) { 2400161678Sdavidxu uq2 = TAILQ_FIRST(&pi->pi_blocked); 2401161678Sdavidxu if (uq2 != NULL) { 2402161678Sdavidxu if (pri > UPRI(uq2->uq_thread)) 2403161678Sdavidxu pri = UPRI(uq2->uq_thread); 2404161678Sdavidxu } 2405161678Sdavidxu } 2406161678Sdavidxu if (pri > uq->uq_inherited_pri) 2407161678Sdavidxu pri = uq->uq_inherited_pri; 2408170300Sjeff thread_lock(td); 2409216791Sdavidxu sched_lend_user_prio(td, pri); 2410170300Sjeff thread_unlock(td); 2411170300Sjeff mtx_unlock_spin(&umtx_lock); 2412161678Sdavidxu } 2413161678Sdavidxu umtx_key_release(&key); 2414161678Sdavidxu return (error); 2415161678Sdavidxu} 2416161678Sdavidxu 2417161678Sdavidxustatic int 2418161678Sdavidxudo_set_ceiling(struct thread *td, struct umutex *m, uint32_t ceiling, 2419161678Sdavidxu uint32_t *old_ceiling) 2420161678Sdavidxu{ 2421161678Sdavidxu struct umtx_q *uq; 2422161678Sdavidxu uint32_t save_ceiling; 2423161678Sdavidxu uint32_t owner, id; 2424161678Sdavidxu uint32_t flags; 2425161678Sdavidxu int error; 2426161678Sdavidxu 2427161678Sdavidxu flags = fuword32(&m->m_flags); 2428161678Sdavidxu if ((flags & UMUTEX_PRIO_PROTECT) == 0) 2429161678Sdavidxu return (EINVAL); 2430161678Sdavidxu if (ceiling > RTP_PRIO_MAX) 2431161678Sdavidxu return (EINVAL); 2432161678Sdavidxu id = td->td_tid; 2433161678Sdavidxu uq = td->td_umtxq; 2434161678Sdavidxu if ((error = umtx_key_get(m, TYPE_PP_UMUTEX, GET_SHARE(flags), 2435161678Sdavidxu &uq->uq_key)) != 0) 2436161678Sdavidxu return (error); 2437161678Sdavidxu for (;;) { 2438161678Sdavidxu umtxq_lock(&uq->uq_key); 2439161678Sdavidxu umtxq_busy(&uq->uq_key); 2440161678Sdavidxu umtxq_unlock(&uq->uq_key); 2441161678Sdavidxu 2442161678Sdavidxu save_ceiling = fuword32(&m->m_ceilings[0]); 2443161678Sdavidxu 2444161678Sdavidxu owner = casuword32(&m->m_owner, 2445161678Sdavidxu UMUTEX_CONTESTED, id | UMUTEX_CONTESTED); 2446161678Sdavidxu 2447161678Sdavidxu if (owner == UMUTEX_CONTESTED) { 2448161678Sdavidxu suword32(&m->m_ceilings[0], ceiling); 2449163449Sdavidxu suword32(__DEVOLATILE(uint32_t *, &m->m_owner), 2450163449Sdavidxu UMUTEX_CONTESTED); 2451161678Sdavidxu error = 0; 2452161678Sdavidxu break; 2453161678Sdavidxu } 2454161678Sdavidxu 2455161678Sdavidxu /* The address was invalid. */ 2456161678Sdavidxu if (owner == -1) { 2457161678Sdavidxu error = EFAULT; 2458161678Sdavidxu break; 2459161678Sdavidxu } 2460161678Sdavidxu 2461161678Sdavidxu if ((owner & ~UMUTEX_CONTESTED) == id) { 2462161678Sdavidxu suword32(&m->m_ceilings[0], ceiling); 2463161678Sdavidxu error = 0; 2464161678Sdavidxu break; 2465161678Sdavidxu } 2466161678Sdavidxu 2467161678Sdavidxu /* 2468161678Sdavidxu * If we caught a signal, we have retried and now 2469161678Sdavidxu * exit immediately. 2470161678Sdavidxu */ 2471161678Sdavidxu if (error != 0) 2472161678Sdavidxu break; 2473161678Sdavidxu 2474161678Sdavidxu /* 2475161678Sdavidxu * We set the contested bit, sleep. Otherwise the lock changed 2476161678Sdavidxu * and we need to retry or we lost a race to the thread 2477161678Sdavidxu * unlocking the umtx. 2478161678Sdavidxu */ 2479161678Sdavidxu umtxq_lock(&uq->uq_key); 2480161678Sdavidxu umtxq_insert(uq); 2481161678Sdavidxu umtxq_unbusy(&uq->uq_key); 2482233690Sdavidxu error = umtxq_sleep(uq, "umtxpp", NULL); 2483161678Sdavidxu umtxq_remove(uq); 2484161678Sdavidxu umtxq_unlock(&uq->uq_key); 2485161678Sdavidxu } 2486161678Sdavidxu umtxq_lock(&uq->uq_key); 2487161678Sdavidxu if (error == 0) 2488161678Sdavidxu umtxq_signal(&uq->uq_key, INT_MAX); 2489161678Sdavidxu umtxq_unbusy(&uq->uq_key); 2490161678Sdavidxu umtxq_unlock(&uq->uq_key); 2491161678Sdavidxu umtx_key_release(&uq->uq_key); 2492161678Sdavidxu if (error == 0 && old_ceiling != NULL) 2493161678Sdavidxu suword32(old_ceiling, save_ceiling); 2494161678Sdavidxu return (error); 2495161678Sdavidxu} 2496161678Sdavidxu 2497161678Sdavidxu/* 2498161678Sdavidxu * Lock a userland POSIX mutex. 2499161678Sdavidxu */ 2500161678Sdavidxustatic int 2501162030Sdavidxudo_lock_umutex(struct thread *td, struct umutex *m, 2502233690Sdavidxu struct _umtx_time *timeout, int mode) 2503161678Sdavidxu{ 2504161678Sdavidxu uint32_t flags; 2505162030Sdavidxu int error; 2506161678Sdavidxu 2507161678Sdavidxu flags = fuword32(&m->m_flags); 2508161678Sdavidxu if (flags == -1) 2509161678Sdavidxu return (EFAULT); 2510161678Sdavidxu 2511233690Sdavidxu switch(flags & (UMUTEX_PRIO_INHERIT | UMUTEX_PRIO_PROTECT)) { 2512233690Sdavidxu case 0: 2513233690Sdavidxu error = do_lock_normal(td, m, flags, timeout, mode); 2514233690Sdavidxu break; 2515233690Sdavidxu case UMUTEX_PRIO_INHERIT: 2516233690Sdavidxu error = do_lock_pi(td, m, flags, timeout, mode); 2517233690Sdavidxu break; 2518233690Sdavidxu case UMUTEX_PRIO_PROTECT: 2519233690Sdavidxu error = do_lock_pp(td, m, flags, timeout, mode); 2520233690Sdavidxu break; 2521233690Sdavidxu default: 2522233690Sdavidxu return (EINVAL); 2523233690Sdavidxu } 2524162030Sdavidxu if (timeout == NULL) { 2525179970Sdavidxu if (error == EINTR && mode != _UMUTEX_WAIT) 2526162030Sdavidxu error = ERESTART; 2527162030Sdavidxu } else { 2528162030Sdavidxu /* Timed-locking is not restarted. */ 2529162030Sdavidxu if (error == ERESTART) 2530162030Sdavidxu error = EINTR; 2531161742Sdavidxu } 2532162030Sdavidxu return (error); 2533161678Sdavidxu} 2534161678Sdavidxu 2535161678Sdavidxu/* 2536161678Sdavidxu * Unlock a userland POSIX mutex. 2537161678Sdavidxu */ 2538161678Sdavidxustatic int 2539161678Sdavidxudo_unlock_umutex(struct thread *td, struct umutex *m) 2540161678Sdavidxu{ 2541161678Sdavidxu uint32_t flags; 2542161678Sdavidxu 2543161678Sdavidxu flags = fuword32(&m->m_flags); 2544161678Sdavidxu if (flags == -1) 2545161678Sdavidxu return (EFAULT); 2546161678Sdavidxu 2547161855Sdavidxu switch(flags & (UMUTEX_PRIO_INHERIT | UMUTEX_PRIO_PROTECT)) { 2548161855Sdavidxu case 0: 2549161855Sdavidxu return (do_unlock_normal(td, m, flags)); 2550161855Sdavidxu case UMUTEX_PRIO_INHERIT: 2551161855Sdavidxu return (do_unlock_pi(td, m, flags)); 2552161855Sdavidxu case UMUTEX_PRIO_PROTECT: 2553161855Sdavidxu return (do_unlock_pp(td, m, flags)); 2554161855Sdavidxu } 2555161678Sdavidxu 2556161855Sdavidxu return (EINVAL); 2557161678Sdavidxu} 2558161678Sdavidxu 2559164839Sdavidxustatic int 2560164839Sdavidxudo_cv_wait(struct thread *td, struct ucond *cv, struct umutex *m, 2561164876Sdavidxu struct timespec *timeout, u_long wflags) 2562164839Sdavidxu{ 2563233690Sdavidxu struct abs_timeout timo; 2564164839Sdavidxu struct umtx_q *uq; 2565164839Sdavidxu uint32_t flags; 2566216641Sdavidxu uint32_t clockid; 2567164839Sdavidxu int error; 2568164839Sdavidxu 2569164839Sdavidxu uq = td->td_umtxq; 2570164839Sdavidxu flags = fuword32(&cv->c_flags); 2571164839Sdavidxu error = umtx_key_get(cv, TYPE_CV, GET_SHARE(flags), &uq->uq_key); 2572164839Sdavidxu if (error != 0) 2573164839Sdavidxu return (error); 2574216641Sdavidxu 2575216641Sdavidxu if ((wflags & CVWAIT_CLOCKID) != 0) { 2576216641Sdavidxu clockid = fuword32(&cv->c_clockid); 2577216641Sdavidxu if (clockid < CLOCK_REALTIME || 2578216641Sdavidxu clockid >= CLOCK_THREAD_CPUTIME_ID) { 2579216641Sdavidxu /* hmm, only HW clock id will work. */ 2580216641Sdavidxu return (EINVAL); 2581216641Sdavidxu } 2582216641Sdavidxu } else { 2583216641Sdavidxu clockid = CLOCK_REALTIME; 2584216641Sdavidxu } 2585216641Sdavidxu 2586164839Sdavidxu umtxq_lock(&uq->uq_key); 2587164839Sdavidxu umtxq_busy(&uq->uq_key); 2588164839Sdavidxu umtxq_insert(uq); 2589164839Sdavidxu umtxq_unlock(&uq->uq_key); 2590164839Sdavidxu 2591164839Sdavidxu /* 2592216641Sdavidxu * Set c_has_waiters to 1 before releasing user mutex, also 2593216641Sdavidxu * don't modify cache line when unnecessary. 2594164839Sdavidxu */ 2595216641Sdavidxu if (fuword32(__DEVOLATILE(uint32_t *, &cv->c_has_waiters)) == 0) 2596216641Sdavidxu suword32(__DEVOLATILE(uint32_t *, &cv->c_has_waiters), 1); 2597164839Sdavidxu 2598164839Sdavidxu umtxq_lock(&uq->uq_key); 2599164839Sdavidxu umtxq_unbusy(&uq->uq_key); 2600164839Sdavidxu umtxq_unlock(&uq->uq_key); 2601164839Sdavidxu 2602164839Sdavidxu error = do_unlock_umutex(td, m); 2603233690Sdavidxu 2604233700Sdavidxu if (timeout != NULL) 2605233690Sdavidxu abs_timeout_init(&timo, clockid, ((wflags & CVWAIT_ABSTIME) != 0), 2606233690Sdavidxu timeout); 2607164839Sdavidxu 2608164839Sdavidxu umtxq_lock(&uq->uq_key); 2609164839Sdavidxu if (error == 0) { 2610233690Sdavidxu error = umtxq_sleep(uq, "ucond", timeout == NULL ? 2611233690Sdavidxu NULL : &timo); 2612164839Sdavidxu } 2613164839Sdavidxu 2614211794Sdavidxu if ((uq->uq_flags & UQF_UMTXQ) == 0) 2615211794Sdavidxu error = 0; 2616211794Sdavidxu else { 2617216641Sdavidxu /* 2618216641Sdavidxu * This must be timeout,interrupted by signal or 2619216641Sdavidxu * surprious wakeup, clear c_has_waiter flag when 2620216641Sdavidxu * necessary. 2621216641Sdavidxu */ 2622216641Sdavidxu umtxq_busy(&uq->uq_key); 2623216641Sdavidxu if ((uq->uq_flags & UQF_UMTXQ) != 0) { 2624216641Sdavidxu int oldlen = uq->uq_cur_queue->length; 2625216641Sdavidxu umtxq_remove(uq); 2626216641Sdavidxu if (oldlen == 1) { 2627216641Sdavidxu umtxq_unlock(&uq->uq_key); 2628216641Sdavidxu suword32( 2629216641Sdavidxu __DEVOLATILE(uint32_t *, 2630216641Sdavidxu &cv->c_has_waiters), 0); 2631216641Sdavidxu umtxq_lock(&uq->uq_key); 2632216641Sdavidxu } 2633216641Sdavidxu } 2634216641Sdavidxu umtxq_unbusy(&uq->uq_key); 2635164839Sdavidxu if (error == ERESTART) 2636164839Sdavidxu error = EINTR; 2637164839Sdavidxu } 2638211794Sdavidxu 2639164839Sdavidxu umtxq_unlock(&uq->uq_key); 2640164839Sdavidxu umtx_key_release(&uq->uq_key); 2641164839Sdavidxu return (error); 2642164839Sdavidxu} 2643164839Sdavidxu 2644164839Sdavidxu/* 2645164839Sdavidxu * Signal a userland condition variable. 2646164839Sdavidxu */ 2647164839Sdavidxustatic int 2648164839Sdavidxudo_cv_signal(struct thread *td, struct ucond *cv) 2649164839Sdavidxu{ 2650164839Sdavidxu struct umtx_key key; 2651164839Sdavidxu int error, cnt, nwake; 2652164839Sdavidxu uint32_t flags; 2653164839Sdavidxu 2654164839Sdavidxu flags = fuword32(&cv->c_flags); 2655164839Sdavidxu if ((error = umtx_key_get(cv, TYPE_CV, GET_SHARE(flags), &key)) != 0) 2656164839Sdavidxu return (error); 2657164839Sdavidxu umtxq_lock(&key); 2658164839Sdavidxu umtxq_busy(&key); 2659164839Sdavidxu cnt = umtxq_count(&key); 2660164839Sdavidxu nwake = umtxq_signal(&key, 1); 2661164839Sdavidxu if (cnt <= nwake) { 2662164839Sdavidxu umtxq_unlock(&key); 2663164839Sdavidxu error = suword32( 2664164839Sdavidxu __DEVOLATILE(uint32_t *, &cv->c_has_waiters), 0); 2665164839Sdavidxu umtxq_lock(&key); 2666164839Sdavidxu } 2667164839Sdavidxu umtxq_unbusy(&key); 2668164839Sdavidxu umtxq_unlock(&key); 2669164839Sdavidxu umtx_key_release(&key); 2670164839Sdavidxu return (error); 2671164839Sdavidxu} 2672164839Sdavidxu 2673164839Sdavidxustatic int 2674164839Sdavidxudo_cv_broadcast(struct thread *td, struct ucond *cv) 2675164839Sdavidxu{ 2676164839Sdavidxu struct umtx_key key; 2677164839Sdavidxu int error; 2678164839Sdavidxu uint32_t flags; 2679164839Sdavidxu 2680164839Sdavidxu flags = fuword32(&cv->c_flags); 2681164839Sdavidxu if ((error = umtx_key_get(cv, TYPE_CV, GET_SHARE(flags), &key)) != 0) 2682164839Sdavidxu return (error); 2683164839Sdavidxu 2684164839Sdavidxu umtxq_lock(&key); 2685164839Sdavidxu umtxq_busy(&key); 2686164839Sdavidxu umtxq_signal(&key, INT_MAX); 2687164839Sdavidxu umtxq_unlock(&key); 2688164839Sdavidxu 2689164839Sdavidxu error = suword32(__DEVOLATILE(uint32_t *, &cv->c_has_waiters), 0); 2690164839Sdavidxu 2691164839Sdavidxu umtxq_lock(&key); 2692164839Sdavidxu umtxq_unbusy(&key); 2693164839Sdavidxu umtxq_unlock(&key); 2694164839Sdavidxu 2695164839Sdavidxu umtx_key_release(&key); 2696164839Sdavidxu return (error); 2697164839Sdavidxu} 2698164839Sdavidxu 2699177848Sdavidxustatic int 2700233690Sdavidxudo_rw_rdlock(struct thread *td, struct urwlock *rwlock, long fflag, struct _umtx_time *timeout) 2701177848Sdavidxu{ 2702233690Sdavidxu struct abs_timeout timo; 2703177848Sdavidxu struct umtx_q *uq; 2704177848Sdavidxu uint32_t flags, wrflags; 2705177848Sdavidxu int32_t state, oldstate; 2706177848Sdavidxu int32_t blocked_readers; 2707177848Sdavidxu int error; 2708177848Sdavidxu 2709177848Sdavidxu uq = td->td_umtxq; 2710177848Sdavidxu flags = fuword32(&rwlock->rw_flags); 2711177848Sdavidxu error = umtx_key_get(rwlock, TYPE_RWLOCK, GET_SHARE(flags), &uq->uq_key); 2712177848Sdavidxu if (error != 0) 2713177848Sdavidxu return (error); 2714177848Sdavidxu 2715233690Sdavidxu if (timeout != NULL) 2716233690Sdavidxu abs_timeout_init2(&timo, timeout); 2717233690Sdavidxu 2718177848Sdavidxu wrflags = URWLOCK_WRITE_OWNER; 2719177848Sdavidxu if (!(fflag & URWLOCK_PREFER_READER) && !(flags & URWLOCK_PREFER_READER)) 2720177848Sdavidxu wrflags |= URWLOCK_WRITE_WAITERS; 2721177848Sdavidxu 2722177848Sdavidxu for (;;) { 2723177848Sdavidxu state = fuword32(__DEVOLATILE(int32_t *, &rwlock->rw_state)); 2724177848Sdavidxu /* try to lock it */ 2725177848Sdavidxu while (!(state & wrflags)) { 2726177848Sdavidxu if (__predict_false(URWLOCK_READER_COUNT(state) == URWLOCK_MAX_READERS)) { 2727177848Sdavidxu umtx_key_release(&uq->uq_key); 2728177848Sdavidxu return (EAGAIN); 2729177848Sdavidxu } 2730177848Sdavidxu oldstate = casuword32(&rwlock->rw_state, state, state + 1); 2731251684Skib if (oldstate == -1) { 2732251684Skib umtx_key_release(&uq->uq_key); 2733251684Skib return (EFAULT); 2734251684Skib } 2735177848Sdavidxu if (oldstate == state) { 2736177848Sdavidxu umtx_key_release(&uq->uq_key); 2737177848Sdavidxu return (0); 2738177848Sdavidxu } 2739251684Skib error = umtxq_check_susp(td); 2740251684Skib if (error != 0) 2741251684Skib break; 2742177848Sdavidxu state = oldstate; 2743177848Sdavidxu } 2744177848Sdavidxu 2745177848Sdavidxu if (error) 2746177848Sdavidxu break; 2747177848Sdavidxu 2748177848Sdavidxu /* grab monitor lock */ 2749177848Sdavidxu umtxq_lock(&uq->uq_key); 2750177848Sdavidxu umtxq_busy(&uq->uq_key); 2751177848Sdavidxu umtxq_unlock(&uq->uq_key); 2752177848Sdavidxu 2753203414Sdavidxu /* 2754203414Sdavidxu * re-read the state, in case it changed between the try-lock above 2755203414Sdavidxu * and the check below 2756203414Sdavidxu */ 2757203414Sdavidxu state = fuword32(__DEVOLATILE(int32_t *, &rwlock->rw_state)); 2758203414Sdavidxu 2759177848Sdavidxu /* set read contention bit */ 2760177848Sdavidxu while ((state & wrflags) && !(state & URWLOCK_READ_WAITERS)) { 2761177848Sdavidxu oldstate = casuword32(&rwlock->rw_state, state, state | URWLOCK_READ_WAITERS); 2762251684Skib if (oldstate == -1) { 2763251684Skib error = EFAULT; 2764251684Skib break; 2765251684Skib } 2766177848Sdavidxu if (oldstate == state) 2767177848Sdavidxu goto sleep; 2768177848Sdavidxu state = oldstate; 2769251684Skib error = umtxq_check_susp(td); 2770251684Skib if (error != 0) 2771251684Skib break; 2772177848Sdavidxu } 2773251684Skib if (error != 0) { 2774251684Skib umtxq_lock(&uq->uq_key); 2775251684Skib umtxq_unbusy(&uq->uq_key); 2776251684Skib umtxq_unlock(&uq->uq_key); 2777251684Skib break; 2778251684Skib } 2779177848Sdavidxu 2780177848Sdavidxu /* state is changed while setting flags, restart */ 2781177848Sdavidxu if (!(state & wrflags)) { 2782177848Sdavidxu umtxq_lock(&uq->uq_key); 2783177848Sdavidxu umtxq_unbusy(&uq->uq_key); 2784177848Sdavidxu umtxq_unlock(&uq->uq_key); 2785251684Skib error = umtxq_check_susp(td); 2786251684Skib if (error != 0) 2787251684Skib break; 2788177848Sdavidxu continue; 2789177848Sdavidxu } 2790177848Sdavidxu 2791177848Sdavidxusleep: 2792177848Sdavidxu /* contention bit is set, before sleeping, increase read waiter count */ 2793177848Sdavidxu blocked_readers = fuword32(&rwlock->rw_blocked_readers); 2794177848Sdavidxu suword32(&rwlock->rw_blocked_readers, blocked_readers+1); 2795177848Sdavidxu 2796177848Sdavidxu while (state & wrflags) { 2797177848Sdavidxu umtxq_lock(&uq->uq_key); 2798177848Sdavidxu umtxq_insert(uq); 2799177848Sdavidxu umtxq_unbusy(&uq->uq_key); 2800177848Sdavidxu 2801233690Sdavidxu error = umtxq_sleep(uq, "urdlck", timeout == NULL ? 2802233690Sdavidxu NULL : &timo); 2803177848Sdavidxu 2804177848Sdavidxu umtxq_busy(&uq->uq_key); 2805177848Sdavidxu umtxq_remove(uq); 2806177848Sdavidxu umtxq_unlock(&uq->uq_key); 2807177848Sdavidxu if (error) 2808177848Sdavidxu break; 2809177848Sdavidxu state = fuword32(__DEVOLATILE(int32_t *, &rwlock->rw_state)); 2810177848Sdavidxu } 2811177848Sdavidxu 2812177848Sdavidxu /* decrease read waiter count, and may clear read contention bit */ 2813177848Sdavidxu blocked_readers = fuword32(&rwlock->rw_blocked_readers); 2814177848Sdavidxu suword32(&rwlock->rw_blocked_readers, blocked_readers-1); 2815177848Sdavidxu if (blocked_readers == 1) { 2816177848Sdavidxu state = fuword32(__DEVOLATILE(int32_t *, &rwlock->rw_state)); 2817177848Sdavidxu for (;;) { 2818177848Sdavidxu oldstate = casuword32(&rwlock->rw_state, state, 2819177848Sdavidxu state & ~URWLOCK_READ_WAITERS); 2820251684Skib if (oldstate == -1) { 2821251684Skib error = EFAULT; 2822251684Skib break; 2823251684Skib } 2824177848Sdavidxu if (oldstate == state) 2825177848Sdavidxu break; 2826177848Sdavidxu state = oldstate; 2827251684Skib error = umtxq_check_susp(td); 2828251684Skib if (error != 0) 2829251684Skib break; 2830177848Sdavidxu } 2831177848Sdavidxu } 2832177848Sdavidxu 2833177848Sdavidxu umtxq_lock(&uq->uq_key); 2834177848Sdavidxu umtxq_unbusy(&uq->uq_key); 2835177848Sdavidxu umtxq_unlock(&uq->uq_key); 2836251684Skib if (error != 0) 2837251684Skib break; 2838177848Sdavidxu } 2839177848Sdavidxu umtx_key_release(&uq->uq_key); 2840177849Sdavidxu if (error == ERESTART) 2841177849Sdavidxu error = EINTR; 2842177848Sdavidxu return (error); 2843177848Sdavidxu} 2844177848Sdavidxu 2845177848Sdavidxustatic int 2846233690Sdavidxudo_rw_wrlock(struct thread *td, struct urwlock *rwlock, struct _umtx_time *timeout) 2847177848Sdavidxu{ 2848233690Sdavidxu struct abs_timeout timo; 2849177848Sdavidxu struct umtx_q *uq; 2850177848Sdavidxu uint32_t flags; 2851177848Sdavidxu int32_t state, oldstate; 2852177848Sdavidxu int32_t blocked_writers; 2853197476Sdavidxu int32_t blocked_readers; 2854177848Sdavidxu int error; 2855177848Sdavidxu 2856177848Sdavidxu uq = td->td_umtxq; 2857177848Sdavidxu flags = fuword32(&rwlock->rw_flags); 2858177848Sdavidxu error = umtx_key_get(rwlock, TYPE_RWLOCK, GET_SHARE(flags), &uq->uq_key); 2859177848Sdavidxu if (error != 0) 2860177848Sdavidxu return (error); 2861177848Sdavidxu 2862233690Sdavidxu if (timeout != NULL) 2863233690Sdavidxu abs_timeout_init2(&timo, timeout); 2864233690Sdavidxu 2865197476Sdavidxu blocked_readers = 0; 2866177848Sdavidxu for (;;) { 2867177848Sdavidxu state = fuword32(__DEVOLATILE(int32_t *, &rwlock->rw_state)); 2868177848Sdavidxu while (!(state & URWLOCK_WRITE_OWNER) && URWLOCK_READER_COUNT(state) == 0) { 2869177848Sdavidxu oldstate = casuword32(&rwlock->rw_state, state, state | URWLOCK_WRITE_OWNER); 2870251684Skib if (oldstate == -1) { 2871251684Skib umtx_key_release(&uq->uq_key); 2872251684Skib return (EFAULT); 2873251684Skib } 2874177848Sdavidxu if (oldstate == state) { 2875177848Sdavidxu umtx_key_release(&uq->uq_key); 2876177848Sdavidxu return (0); 2877177848Sdavidxu } 2878177848Sdavidxu state = oldstate; 2879251684Skib error = umtxq_check_susp(td); 2880251684Skib if (error != 0) 2881251684Skib break; 2882177848Sdavidxu } 2883177848Sdavidxu 2884197476Sdavidxu if (error) { 2885197476Sdavidxu if (!(state & (URWLOCK_WRITE_OWNER|URWLOCK_WRITE_WAITERS)) && 2886197476Sdavidxu blocked_readers != 0) { 2887197476Sdavidxu umtxq_lock(&uq->uq_key); 2888197476Sdavidxu umtxq_busy(&uq->uq_key); 2889197476Sdavidxu umtxq_signal_queue(&uq->uq_key, INT_MAX, UMTX_SHARED_QUEUE); 2890197476Sdavidxu umtxq_unbusy(&uq->uq_key); 2891197476Sdavidxu umtxq_unlock(&uq->uq_key); 2892197476Sdavidxu } 2893197476Sdavidxu 2894177848Sdavidxu break; 2895197476Sdavidxu } 2896177848Sdavidxu 2897177848Sdavidxu /* grab monitor lock */ 2898177848Sdavidxu umtxq_lock(&uq->uq_key); 2899177848Sdavidxu umtxq_busy(&uq->uq_key); 2900177848Sdavidxu umtxq_unlock(&uq->uq_key); 2901177848Sdavidxu 2902203414Sdavidxu /* 2903203414Sdavidxu * re-read the state, in case it changed between the try-lock above 2904203414Sdavidxu * and the check below 2905203414Sdavidxu */ 2906203414Sdavidxu state = fuword32(__DEVOLATILE(int32_t *, &rwlock->rw_state)); 2907203414Sdavidxu 2908177848Sdavidxu while (((state & URWLOCK_WRITE_OWNER) || URWLOCK_READER_COUNT(state) != 0) && 2909177848Sdavidxu (state & URWLOCK_WRITE_WAITERS) == 0) { 2910177848Sdavidxu oldstate = casuword32(&rwlock->rw_state, state, state | URWLOCK_WRITE_WAITERS); 2911251684Skib if (oldstate == -1) { 2912251684Skib error = EFAULT; 2913251684Skib break; 2914251684Skib } 2915177848Sdavidxu if (oldstate == state) 2916177848Sdavidxu goto sleep; 2917177848Sdavidxu state = oldstate; 2918251684Skib error = umtxq_check_susp(td); 2919251684Skib if (error != 0) 2920251684Skib break; 2921177848Sdavidxu } 2922251684Skib if (error != 0) { 2923251684Skib umtxq_lock(&uq->uq_key); 2924251684Skib umtxq_unbusy(&uq->uq_key); 2925251684Skib umtxq_unlock(&uq->uq_key); 2926251684Skib break; 2927251684Skib } 2928177848Sdavidxu 2929177848Sdavidxu if (!(state & URWLOCK_WRITE_OWNER) && URWLOCK_READER_COUNT(state) == 0) { 2930177848Sdavidxu umtxq_lock(&uq->uq_key); 2931177848Sdavidxu umtxq_unbusy(&uq->uq_key); 2932177848Sdavidxu umtxq_unlock(&uq->uq_key); 2933251684Skib error = umtxq_check_susp(td); 2934251684Skib if (error != 0) 2935251684Skib break; 2936177848Sdavidxu continue; 2937177848Sdavidxu } 2938177848Sdavidxusleep: 2939177848Sdavidxu blocked_writers = fuword32(&rwlock->rw_blocked_writers); 2940177848Sdavidxu suword32(&rwlock->rw_blocked_writers, blocked_writers+1); 2941177848Sdavidxu 2942177848Sdavidxu while ((state & URWLOCK_WRITE_OWNER) || URWLOCK_READER_COUNT(state) != 0) { 2943177848Sdavidxu umtxq_lock(&uq->uq_key); 2944177848Sdavidxu umtxq_insert_queue(uq, UMTX_EXCLUSIVE_QUEUE); 2945177848Sdavidxu umtxq_unbusy(&uq->uq_key); 2946177848Sdavidxu 2947233690Sdavidxu error = umtxq_sleep(uq, "uwrlck", timeout == NULL ? 2948233690Sdavidxu NULL : &timo); 2949177848Sdavidxu 2950177848Sdavidxu umtxq_busy(&uq->uq_key); 2951177848Sdavidxu umtxq_remove_queue(uq, UMTX_EXCLUSIVE_QUEUE); 2952177848Sdavidxu umtxq_unlock(&uq->uq_key); 2953177848Sdavidxu if (error) 2954177848Sdavidxu break; 2955177848Sdavidxu state = fuword32(__DEVOLATILE(int32_t *, &rwlock->rw_state)); 2956177848Sdavidxu } 2957177848Sdavidxu 2958177848Sdavidxu blocked_writers = fuword32(&rwlock->rw_blocked_writers); 2959177848Sdavidxu suword32(&rwlock->rw_blocked_writers, blocked_writers-1); 2960177848Sdavidxu if (blocked_writers == 1) { 2961177848Sdavidxu state = fuword32(__DEVOLATILE(int32_t *, &rwlock->rw_state)); 2962177848Sdavidxu for (;;) { 2963177848Sdavidxu oldstate = casuword32(&rwlock->rw_state, state, 2964177848Sdavidxu state & ~URWLOCK_WRITE_WAITERS); 2965251684Skib if (oldstate == -1) { 2966251684Skib error = EFAULT; 2967251684Skib break; 2968251684Skib } 2969177848Sdavidxu if (oldstate == state) 2970177848Sdavidxu break; 2971177848Sdavidxu state = oldstate; 2972251684Skib error = umtxq_check_susp(td); 2973251684Skib /* 2974251684Skib * We are leaving the URWLOCK_WRITE_WAITERS 2975251684Skib * behind, but this should not harm the 2976251684Skib * correctness. 2977251684Skib */ 2978251684Skib if (error != 0) 2979251684Skib break; 2980177848Sdavidxu } 2981197476Sdavidxu blocked_readers = fuword32(&rwlock->rw_blocked_readers); 2982197476Sdavidxu } else 2983197476Sdavidxu blocked_readers = 0; 2984177848Sdavidxu 2985177848Sdavidxu umtxq_lock(&uq->uq_key); 2986177848Sdavidxu umtxq_unbusy(&uq->uq_key); 2987177848Sdavidxu umtxq_unlock(&uq->uq_key); 2988177848Sdavidxu } 2989177848Sdavidxu 2990177848Sdavidxu umtx_key_release(&uq->uq_key); 2991177849Sdavidxu if (error == ERESTART) 2992177849Sdavidxu error = EINTR; 2993177848Sdavidxu return (error); 2994177848Sdavidxu} 2995177848Sdavidxu 2996177848Sdavidxustatic int 2997177880Sdavidxudo_rw_unlock(struct thread *td, struct urwlock *rwlock) 2998177848Sdavidxu{ 2999177848Sdavidxu struct umtx_q *uq; 3000177848Sdavidxu uint32_t flags; 3001177848Sdavidxu int32_t state, oldstate; 3002177848Sdavidxu int error, q, count; 3003177848Sdavidxu 3004177848Sdavidxu uq = td->td_umtxq; 3005177848Sdavidxu flags = fuword32(&rwlock->rw_flags); 3006177848Sdavidxu error = umtx_key_get(rwlock, TYPE_RWLOCK, GET_SHARE(flags), &uq->uq_key); 3007177848Sdavidxu if (error != 0) 3008177848Sdavidxu return (error); 3009177848Sdavidxu 3010177848Sdavidxu state = fuword32(__DEVOLATILE(int32_t *, &rwlock->rw_state)); 3011177848Sdavidxu if (state & URWLOCK_WRITE_OWNER) { 3012177848Sdavidxu for (;;) { 3013177848Sdavidxu oldstate = casuword32(&rwlock->rw_state, state, 3014177848Sdavidxu state & ~URWLOCK_WRITE_OWNER); 3015251684Skib if (oldstate == -1) { 3016251684Skib error = EFAULT; 3017251684Skib goto out; 3018251684Skib } 3019177848Sdavidxu if (oldstate != state) { 3020177848Sdavidxu state = oldstate; 3021177848Sdavidxu if (!(oldstate & URWLOCK_WRITE_OWNER)) { 3022177848Sdavidxu error = EPERM; 3023177848Sdavidxu goto out; 3024177848Sdavidxu } 3025251684Skib error = umtxq_check_susp(td); 3026251684Skib if (error != 0) 3027251684Skib goto out; 3028177848Sdavidxu } else 3029177848Sdavidxu break; 3030177848Sdavidxu } 3031177848Sdavidxu } else if (URWLOCK_READER_COUNT(state) != 0) { 3032177848Sdavidxu for (;;) { 3033177848Sdavidxu oldstate = casuword32(&rwlock->rw_state, state, 3034177848Sdavidxu state - 1); 3035251684Skib if (oldstate == -1) { 3036251684Skib error = EFAULT; 3037251684Skib goto out; 3038251684Skib } 3039177848Sdavidxu if (oldstate != state) { 3040177848Sdavidxu state = oldstate; 3041177848Sdavidxu if (URWLOCK_READER_COUNT(oldstate) == 0) { 3042177848Sdavidxu error = EPERM; 3043177848Sdavidxu goto out; 3044177848Sdavidxu } 3045251684Skib error = umtxq_check_susp(td); 3046251684Skib if (error != 0) 3047251684Skib goto out; 3048251684Skib } else 3049177848Sdavidxu break; 3050177848Sdavidxu } 3051177848Sdavidxu } else { 3052177848Sdavidxu error = EPERM; 3053177848Sdavidxu goto out; 3054177848Sdavidxu } 3055177848Sdavidxu 3056177848Sdavidxu count = 0; 3057177848Sdavidxu 3058177848Sdavidxu if (!(flags & URWLOCK_PREFER_READER)) { 3059177848Sdavidxu if (state & URWLOCK_WRITE_WAITERS) { 3060177848Sdavidxu count = 1; 3061177848Sdavidxu q = UMTX_EXCLUSIVE_QUEUE; 3062177848Sdavidxu } else if (state & URWLOCK_READ_WAITERS) { 3063177848Sdavidxu count = INT_MAX; 3064177848Sdavidxu q = UMTX_SHARED_QUEUE; 3065177848Sdavidxu } 3066177848Sdavidxu } else { 3067177848Sdavidxu if (state & URWLOCK_READ_WAITERS) { 3068177848Sdavidxu count = INT_MAX; 3069177848Sdavidxu q = UMTX_SHARED_QUEUE; 3070177848Sdavidxu } else if (state & URWLOCK_WRITE_WAITERS) { 3071177848Sdavidxu count = 1; 3072177848Sdavidxu q = UMTX_EXCLUSIVE_QUEUE; 3073177848Sdavidxu } 3074177848Sdavidxu } 3075177848Sdavidxu 3076177848Sdavidxu if (count) { 3077177848Sdavidxu umtxq_lock(&uq->uq_key); 3078177848Sdavidxu umtxq_busy(&uq->uq_key); 3079177848Sdavidxu umtxq_signal_queue(&uq->uq_key, count, q); 3080177848Sdavidxu umtxq_unbusy(&uq->uq_key); 3081177848Sdavidxu umtxq_unlock(&uq->uq_key); 3082177848Sdavidxu } 3083177848Sdavidxuout: 3084177848Sdavidxu umtx_key_release(&uq->uq_key); 3085177848Sdavidxu return (error); 3086177848Sdavidxu} 3087177848Sdavidxu 3088201472Sdavidxustatic int 3089232144Sdavidxudo_sem_wait(struct thread *td, struct _usem *sem, struct _umtx_time *timeout) 3090201472Sdavidxu{ 3091233690Sdavidxu struct abs_timeout timo; 3092201472Sdavidxu struct umtx_q *uq; 3093201472Sdavidxu uint32_t flags, count; 3094201472Sdavidxu int error; 3095201472Sdavidxu 3096201472Sdavidxu uq = td->td_umtxq; 3097201472Sdavidxu flags = fuword32(&sem->_flags); 3098201885Sdavidxu error = umtx_key_get(sem, TYPE_SEM, GET_SHARE(flags), &uq->uq_key); 3099201472Sdavidxu if (error != 0) 3100201472Sdavidxu return (error); 3101233690Sdavidxu 3102233690Sdavidxu if (timeout != NULL) 3103233690Sdavidxu abs_timeout_init2(&timo, timeout); 3104233690Sdavidxu 3105201472Sdavidxu umtxq_lock(&uq->uq_key); 3106201472Sdavidxu umtxq_busy(&uq->uq_key); 3107201472Sdavidxu umtxq_insert(uq); 3108201472Sdavidxu umtxq_unlock(&uq->uq_key); 3109230194Sdavidxu casuword32(__DEVOLATILE(uint32_t *, &sem->_has_waiters), 0, 1); 3110201472Sdavidxu count = fuword32(__DEVOLATILE(uint32_t *, &sem->_count)); 3111201472Sdavidxu if (count != 0) { 3112201472Sdavidxu umtxq_lock(&uq->uq_key); 3113201472Sdavidxu umtxq_unbusy(&uq->uq_key); 3114201472Sdavidxu umtxq_remove(uq); 3115201472Sdavidxu umtxq_unlock(&uq->uq_key); 3116201472Sdavidxu umtx_key_release(&uq->uq_key); 3117201472Sdavidxu return (0); 3118201472Sdavidxu } 3119201472Sdavidxu umtxq_lock(&uq->uq_key); 3120201472Sdavidxu umtxq_unbusy(&uq->uq_key); 3121201472Sdavidxu 3122233690Sdavidxu error = umtxq_sleep(uq, "usem", timeout == NULL ? NULL : &timo); 3123201472Sdavidxu 3124211794Sdavidxu if ((uq->uq_flags & UQF_UMTXQ) == 0) 3125211794Sdavidxu error = 0; 3126211794Sdavidxu else { 3127211794Sdavidxu umtxq_remove(uq); 3128249644Sjilles /* A relative timeout cannot be restarted. */ 3129249644Sjilles if (error == ERESTART && timeout != NULL && 3130249644Sjilles (timeout->_flags & UMTX_ABSTIME) == 0) 3131201472Sdavidxu error = EINTR; 3132201472Sdavidxu } 3133201472Sdavidxu umtxq_unlock(&uq->uq_key); 3134201472Sdavidxu umtx_key_release(&uq->uq_key); 3135201472Sdavidxu return (error); 3136201472Sdavidxu} 3137201472Sdavidxu 3138201472Sdavidxu/* 3139201472Sdavidxu * Signal a userland condition variable. 3140201472Sdavidxu */ 3141201472Sdavidxustatic int 3142201472Sdavidxudo_sem_wake(struct thread *td, struct _usem *sem) 3143201472Sdavidxu{ 3144201472Sdavidxu struct umtx_key key; 3145233913Sdavidxu int error, cnt; 3146201472Sdavidxu uint32_t flags; 3147201472Sdavidxu 3148201472Sdavidxu flags = fuword32(&sem->_flags); 3149201885Sdavidxu if ((error = umtx_key_get(sem, TYPE_SEM, GET_SHARE(flags), &key)) != 0) 3150201472Sdavidxu return (error); 3151201472Sdavidxu umtxq_lock(&key); 3152201472Sdavidxu umtxq_busy(&key); 3153201472Sdavidxu cnt = umtxq_count(&key); 3154233913Sdavidxu if (cnt > 0) { 3155233913Sdavidxu umtxq_signal(&key, 1); 3156233913Sdavidxu /* 3157233913Sdavidxu * Check if count is greater than 0, this means the memory is 3158233913Sdavidxu * still being referenced by user code, so we can safely 3159233913Sdavidxu * update _has_waiters flag. 3160233913Sdavidxu */ 3161233913Sdavidxu if (cnt == 1) { 3162233913Sdavidxu umtxq_unlock(&key); 3163233913Sdavidxu error = suword32( 3164233913Sdavidxu __DEVOLATILE(uint32_t *, &sem->_has_waiters), 0); 3165233913Sdavidxu umtxq_lock(&key); 3166233913Sdavidxu } 3167201472Sdavidxu } 3168201472Sdavidxu umtxq_unbusy(&key); 3169201472Sdavidxu umtxq_unlock(&key); 3170201472Sdavidxu umtx_key_release(&key); 3171201472Sdavidxu return (error); 3172201472Sdavidxu} 3173201472Sdavidxu 3174139013Sdavidxuint 3175225617Skmacysys__umtx_lock(struct thread *td, struct _umtx_lock_args *uap) 3176139013Sdavidxu /* struct umtx *umtx */ 3177139013Sdavidxu{ 3178233690Sdavidxu return do_lock_umtx(td, uap->umtx, td->td_tid, 0); 3179139013Sdavidxu} 3180139013Sdavidxu 3181139013Sdavidxuint 3182225617Skmacysys__umtx_unlock(struct thread *td, struct _umtx_unlock_args *uap) 3183139013Sdavidxu /* struct umtx *umtx */ 3184139013Sdavidxu{ 3185162536Sdavidxu return do_unlock_umtx(td, uap->umtx, td->td_tid); 3186139013Sdavidxu} 3187139013Sdavidxu 3188228219Sphoinline int 3189228219Sphoumtx_copyin_timeout(const void *addr, struct timespec *tsp) 3190228219Spho{ 3191228219Spho int error; 3192228219Spho 3193228219Spho error = copyin(addr, tsp, sizeof(struct timespec)); 3194228219Spho if (error == 0) { 3195228219Spho if (tsp->tv_sec < 0 || 3196228219Spho tsp->tv_nsec >= 1000000000 || 3197228219Spho tsp->tv_nsec < 0) 3198228219Spho error = EINVAL; 3199228219Spho } 3200228219Spho return (error); 3201228219Spho} 3202228219Spho 3203232144Sdavidxustatic inline int 3204232144Sdavidxuumtx_copyin_umtx_time(const void *addr, size_t size, struct _umtx_time *tp) 3205232144Sdavidxu{ 3206232144Sdavidxu int error; 3207232144Sdavidxu 3208232286Sdavidxu if (size <= sizeof(struct timespec)) { 3209232286Sdavidxu tp->_clockid = CLOCK_REALTIME; 3210232286Sdavidxu tp->_flags = 0; 3211232144Sdavidxu error = copyin(addr, &tp->_timeout, sizeof(struct timespec)); 3212232286Sdavidxu } else 3213232144Sdavidxu error = copyin(addr, tp, sizeof(struct _umtx_time)); 3214232144Sdavidxu if (error != 0) 3215232144Sdavidxu return (error); 3216232144Sdavidxu if (tp->_timeout.tv_sec < 0 || 3217232144Sdavidxu tp->_timeout.tv_nsec >= 1000000000 || tp->_timeout.tv_nsec < 0) 3218232144Sdavidxu return (EINVAL); 3219232144Sdavidxu return (0); 3220232144Sdavidxu} 3221232144Sdavidxu 3222162536Sdavidxustatic int 3223162536Sdavidxu__umtx_op_lock_umtx(struct thread *td, struct _umtx_op_args *uap) 3224139013Sdavidxu{ 3225162536Sdavidxu struct timespec *ts, timeout; 3226139013Sdavidxu int error; 3227139013Sdavidxu 3228162536Sdavidxu /* Allow a null timespec (wait forever). */ 3229162536Sdavidxu if (uap->uaddr2 == NULL) 3230162536Sdavidxu ts = NULL; 3231162536Sdavidxu else { 3232228219Spho error = umtx_copyin_timeout(uap->uaddr2, &timeout); 3233162536Sdavidxu if (error != 0) 3234162536Sdavidxu return (error); 3235162536Sdavidxu ts = &timeout; 3236162536Sdavidxu } 3237162536Sdavidxu return (do_lock_umtx(td, uap->obj, uap->val, ts)); 3238162536Sdavidxu} 3239162536Sdavidxu 3240162536Sdavidxustatic int 3241162536Sdavidxu__umtx_op_unlock_umtx(struct thread *td, struct _umtx_op_args *uap) 3242162536Sdavidxu{ 3243162536Sdavidxu return (do_unlock_umtx(td, uap->obj, uap->val)); 3244162536Sdavidxu} 3245162536Sdavidxu 3246162536Sdavidxustatic int 3247162536Sdavidxu__umtx_op_wait(struct thread *td, struct _umtx_op_args *uap) 3248162536Sdavidxu{ 3249232144Sdavidxu struct _umtx_time timeout, *tm_p; 3250162536Sdavidxu int error; 3251162536Sdavidxu 3252162536Sdavidxu if (uap->uaddr2 == NULL) 3253232144Sdavidxu tm_p = NULL; 3254162536Sdavidxu else { 3255232144Sdavidxu error = umtx_copyin_umtx_time( 3256232144Sdavidxu uap->uaddr2, (size_t)uap->uaddr1, &timeout); 3257162536Sdavidxu if (error != 0) 3258162536Sdavidxu return (error); 3259232144Sdavidxu tm_p = &timeout; 3260162536Sdavidxu } 3261232144Sdavidxu return do_wait(td, uap->obj, uap->val, tm_p, 0, 0); 3262162536Sdavidxu} 3263162536Sdavidxu 3264162536Sdavidxustatic int 3265173800Sdavidxu__umtx_op_wait_uint(struct thread *td, struct _umtx_op_args *uap) 3266173800Sdavidxu{ 3267232144Sdavidxu struct _umtx_time timeout, *tm_p; 3268173800Sdavidxu int error; 3269173800Sdavidxu 3270173800Sdavidxu if (uap->uaddr2 == NULL) 3271232144Sdavidxu tm_p = NULL; 3272173800Sdavidxu else { 3273232144Sdavidxu error = umtx_copyin_umtx_time( 3274232144Sdavidxu uap->uaddr2, (size_t)uap->uaddr1, &timeout); 3275173800Sdavidxu if (error != 0) 3276173800Sdavidxu return (error); 3277232144Sdavidxu tm_p = &timeout; 3278173800Sdavidxu } 3279232144Sdavidxu return do_wait(td, uap->obj, uap->val, tm_p, 1, 0); 3280173800Sdavidxu} 3281173800Sdavidxu 3282173800Sdavidxustatic int 3283178646Sdavidxu__umtx_op_wait_uint_private(struct thread *td, struct _umtx_op_args *uap) 3284178646Sdavidxu{ 3285232144Sdavidxu struct _umtx_time *tm_p, timeout; 3286178646Sdavidxu int error; 3287178646Sdavidxu 3288178646Sdavidxu if (uap->uaddr2 == NULL) 3289232144Sdavidxu tm_p = NULL; 3290178646Sdavidxu else { 3291232144Sdavidxu error = umtx_copyin_umtx_time( 3292232144Sdavidxu uap->uaddr2, (size_t)uap->uaddr1, &timeout); 3293178646Sdavidxu if (error != 0) 3294178646Sdavidxu return (error); 3295232144Sdavidxu tm_p = &timeout; 3296178646Sdavidxu } 3297232144Sdavidxu return do_wait(td, uap->obj, uap->val, tm_p, 1, 1); 3298178646Sdavidxu} 3299178646Sdavidxu 3300178646Sdavidxustatic int 3301162536Sdavidxu__umtx_op_wake(struct thread *td, struct _umtx_op_args *uap) 3302162536Sdavidxu{ 3303178646Sdavidxu return (kern_umtx_wake(td, uap->obj, uap->val, 0)); 3304162536Sdavidxu} 3305162536Sdavidxu 3306216641Sdavidxu#define BATCH_SIZE 128 3307162536Sdavidxustatic int 3308216641Sdavidxu__umtx_op_nwake_private(struct thread *td, struct _umtx_op_args *uap) 3309216641Sdavidxu{ 3310216641Sdavidxu int count = uap->val; 3311216641Sdavidxu void *uaddrs[BATCH_SIZE]; 3312216641Sdavidxu char **upp = (char **)uap->obj; 3313216641Sdavidxu int tocopy; 3314216641Sdavidxu int error = 0; 3315216641Sdavidxu int i, pos = 0; 3316216641Sdavidxu 3317216641Sdavidxu while (count > 0) { 3318216641Sdavidxu tocopy = count; 3319216641Sdavidxu if (tocopy > BATCH_SIZE) 3320216641Sdavidxu tocopy = BATCH_SIZE; 3321216641Sdavidxu error = copyin(upp+pos, uaddrs, tocopy * sizeof(char *)); 3322216641Sdavidxu if (error != 0) 3323216641Sdavidxu break; 3324216641Sdavidxu for (i = 0; i < tocopy; ++i) 3325216641Sdavidxu kern_umtx_wake(td, uaddrs[i], INT_MAX, 1); 3326216641Sdavidxu count -= tocopy; 3327216641Sdavidxu pos += tocopy; 3328216641Sdavidxu } 3329216641Sdavidxu return (error); 3330216641Sdavidxu} 3331216641Sdavidxu 3332216641Sdavidxustatic int 3333178646Sdavidxu__umtx_op_wake_private(struct thread *td, struct _umtx_op_args *uap) 3334178646Sdavidxu{ 3335178646Sdavidxu return (kern_umtx_wake(td, uap->obj, uap->val, 1)); 3336178646Sdavidxu} 3337178646Sdavidxu 3338178646Sdavidxustatic int 3339162536Sdavidxu__umtx_op_lock_umutex(struct thread *td, struct _umtx_op_args *uap) 3340162536Sdavidxu{ 3341232144Sdavidxu struct _umtx_time *tm_p, timeout; 3342162536Sdavidxu int error; 3343162536Sdavidxu 3344162536Sdavidxu /* Allow a null timespec (wait forever). */ 3345162536Sdavidxu if (uap->uaddr2 == NULL) 3346232144Sdavidxu tm_p = NULL; 3347162536Sdavidxu else { 3348232144Sdavidxu error = umtx_copyin_umtx_time( 3349232144Sdavidxu uap->uaddr2, (size_t)uap->uaddr1, &timeout); 3350162536Sdavidxu if (error != 0) 3351162536Sdavidxu return (error); 3352232144Sdavidxu tm_p = &timeout; 3353139013Sdavidxu } 3354232144Sdavidxu return do_lock_umutex(td, uap->obj, tm_p, 0); 3355162536Sdavidxu} 3356162536Sdavidxu 3357162536Sdavidxustatic int 3358162536Sdavidxu__umtx_op_trylock_umutex(struct thread *td, struct _umtx_op_args *uap) 3359162536Sdavidxu{ 3360179970Sdavidxu return do_lock_umutex(td, uap->obj, NULL, _UMUTEX_TRY); 3361162536Sdavidxu} 3362162536Sdavidxu 3363162536Sdavidxustatic int 3364179970Sdavidxu__umtx_op_wait_umutex(struct thread *td, struct _umtx_op_args *uap) 3365179970Sdavidxu{ 3366232144Sdavidxu struct _umtx_time *tm_p, timeout; 3367179970Sdavidxu int error; 3368179970Sdavidxu 3369179970Sdavidxu /* Allow a null timespec (wait forever). */ 3370179970Sdavidxu if (uap->uaddr2 == NULL) 3371232144Sdavidxu tm_p = NULL; 3372179970Sdavidxu else { 3373232144Sdavidxu error = umtx_copyin_umtx_time( 3374232144Sdavidxu uap->uaddr2, (size_t)uap->uaddr1, &timeout); 3375179970Sdavidxu if (error != 0) 3376179970Sdavidxu return (error); 3377232144Sdavidxu tm_p = &timeout; 3378179970Sdavidxu } 3379232144Sdavidxu return do_lock_umutex(td, uap->obj, tm_p, _UMUTEX_WAIT); 3380179970Sdavidxu} 3381179970Sdavidxu 3382179970Sdavidxustatic int 3383179970Sdavidxu__umtx_op_wake_umutex(struct thread *td, struct _umtx_op_args *uap) 3384179970Sdavidxu{ 3385179970Sdavidxu return do_wake_umutex(td, uap->obj); 3386179970Sdavidxu} 3387179970Sdavidxu 3388179970Sdavidxustatic int 3389162536Sdavidxu__umtx_op_unlock_umutex(struct thread *td, struct _umtx_op_args *uap) 3390162536Sdavidxu{ 3391162536Sdavidxu return do_unlock_umutex(td, uap->obj); 3392162536Sdavidxu} 3393162536Sdavidxu 3394162536Sdavidxustatic int 3395162536Sdavidxu__umtx_op_set_ceiling(struct thread *td, struct _umtx_op_args *uap) 3396162536Sdavidxu{ 3397162536Sdavidxu return do_set_ceiling(td, uap->obj, uap->val, uap->uaddr1); 3398162536Sdavidxu} 3399162536Sdavidxu 3400164839Sdavidxustatic int 3401164839Sdavidxu__umtx_op_cv_wait(struct thread *td, struct _umtx_op_args *uap) 3402164839Sdavidxu{ 3403164839Sdavidxu struct timespec *ts, timeout; 3404164839Sdavidxu int error; 3405164839Sdavidxu 3406164839Sdavidxu /* Allow a null timespec (wait forever). */ 3407164839Sdavidxu if (uap->uaddr2 == NULL) 3408164839Sdavidxu ts = NULL; 3409164839Sdavidxu else { 3410228219Spho error = umtx_copyin_timeout(uap->uaddr2, &timeout); 3411164839Sdavidxu if (error != 0) 3412164839Sdavidxu return (error); 3413164839Sdavidxu ts = &timeout; 3414164839Sdavidxu } 3415164876Sdavidxu return (do_cv_wait(td, uap->obj, uap->uaddr1, ts, uap->val)); 3416164839Sdavidxu} 3417164839Sdavidxu 3418164839Sdavidxustatic int 3419164839Sdavidxu__umtx_op_cv_signal(struct thread *td, struct _umtx_op_args *uap) 3420164839Sdavidxu{ 3421164839Sdavidxu return do_cv_signal(td, uap->obj); 3422164839Sdavidxu} 3423164839Sdavidxu 3424164839Sdavidxustatic int 3425164839Sdavidxu__umtx_op_cv_broadcast(struct thread *td, struct _umtx_op_args *uap) 3426164839Sdavidxu{ 3427164839Sdavidxu return do_cv_broadcast(td, uap->obj); 3428164839Sdavidxu} 3429164839Sdavidxu 3430177848Sdavidxustatic int 3431177848Sdavidxu__umtx_op_rw_rdlock(struct thread *td, struct _umtx_op_args *uap) 3432177848Sdavidxu{ 3433232209Sdavidxu struct _umtx_time timeout; 3434177848Sdavidxu int error; 3435177848Sdavidxu 3436177848Sdavidxu /* Allow a null timespec (wait forever). */ 3437177848Sdavidxu if (uap->uaddr2 == NULL) { 3438177848Sdavidxu error = do_rw_rdlock(td, uap->obj, uap->val, 0); 3439177848Sdavidxu } else { 3440232209Sdavidxu error = umtx_copyin_umtx_time(uap->uaddr2, 3441232209Sdavidxu (size_t)uap->uaddr1, &timeout); 3442177848Sdavidxu if (error != 0) 3443177848Sdavidxu return (error); 3444233690Sdavidxu error = do_rw_rdlock(td, uap->obj, uap->val, &timeout); 3445177848Sdavidxu } 3446177848Sdavidxu return (error); 3447177848Sdavidxu} 3448177848Sdavidxu 3449177848Sdavidxustatic int 3450177848Sdavidxu__umtx_op_rw_wrlock(struct thread *td, struct _umtx_op_args *uap) 3451177848Sdavidxu{ 3452232209Sdavidxu struct _umtx_time timeout; 3453177848Sdavidxu int error; 3454177848Sdavidxu 3455177848Sdavidxu /* Allow a null timespec (wait forever). */ 3456177848Sdavidxu if (uap->uaddr2 == NULL) { 3457177848Sdavidxu error = do_rw_wrlock(td, uap->obj, 0); 3458177848Sdavidxu } else { 3459232209Sdavidxu error = umtx_copyin_umtx_time(uap->uaddr2, 3460232209Sdavidxu (size_t)uap->uaddr1, &timeout); 3461177848Sdavidxu if (error != 0) 3462177848Sdavidxu return (error); 3463177848Sdavidxu 3464233690Sdavidxu error = do_rw_wrlock(td, uap->obj, &timeout); 3465177848Sdavidxu } 3466177848Sdavidxu return (error); 3467177848Sdavidxu} 3468177848Sdavidxu 3469177848Sdavidxustatic int 3470177848Sdavidxu__umtx_op_rw_unlock(struct thread *td, struct _umtx_op_args *uap) 3471177848Sdavidxu{ 3472177880Sdavidxu return do_rw_unlock(td, uap->obj); 3473177848Sdavidxu} 3474177848Sdavidxu 3475201472Sdavidxustatic int 3476201472Sdavidxu__umtx_op_sem_wait(struct thread *td, struct _umtx_op_args *uap) 3477201472Sdavidxu{ 3478232144Sdavidxu struct _umtx_time *tm_p, timeout; 3479201472Sdavidxu int error; 3480201472Sdavidxu 3481201472Sdavidxu /* Allow a null timespec (wait forever). */ 3482201472Sdavidxu if (uap->uaddr2 == NULL) 3483232144Sdavidxu tm_p = NULL; 3484201472Sdavidxu else { 3485232144Sdavidxu error = umtx_copyin_umtx_time( 3486232144Sdavidxu uap->uaddr2, (size_t)uap->uaddr1, &timeout); 3487201472Sdavidxu if (error != 0) 3488201472Sdavidxu return (error); 3489232144Sdavidxu tm_p = &timeout; 3490201472Sdavidxu } 3491232144Sdavidxu return (do_sem_wait(td, uap->obj, tm_p)); 3492201472Sdavidxu} 3493201472Sdavidxu 3494201472Sdavidxustatic int 3495201472Sdavidxu__umtx_op_sem_wake(struct thread *td, struct _umtx_op_args *uap) 3496201472Sdavidxu{ 3497201472Sdavidxu return do_sem_wake(td, uap->obj); 3498201472Sdavidxu} 3499201472Sdavidxu 3500233912Sdavidxustatic int 3501233912Sdavidxu__umtx_op_wake2_umutex(struct thread *td, struct _umtx_op_args *uap) 3502233912Sdavidxu{ 3503233912Sdavidxu return do_wake2_umutex(td, uap->obj, uap->val); 3504233912Sdavidxu} 3505233912Sdavidxu 3506162536Sdavidxutypedef int (*_umtx_op_func)(struct thread *td, struct _umtx_op_args *uap); 3507162536Sdavidxu 3508162536Sdavidxustatic _umtx_op_func op_table[] = { 3509162536Sdavidxu __umtx_op_lock_umtx, /* UMTX_OP_LOCK */ 3510162536Sdavidxu __umtx_op_unlock_umtx, /* UMTX_OP_UNLOCK */ 3511162536Sdavidxu __umtx_op_wait, /* UMTX_OP_WAIT */ 3512162536Sdavidxu __umtx_op_wake, /* UMTX_OP_WAKE */ 3513162536Sdavidxu __umtx_op_trylock_umutex, /* UMTX_OP_MUTEX_TRYLOCK */ 3514162536Sdavidxu __umtx_op_lock_umutex, /* UMTX_OP_MUTEX_LOCK */ 3515162536Sdavidxu __umtx_op_unlock_umutex, /* UMTX_OP_MUTEX_UNLOCK */ 3516164839Sdavidxu __umtx_op_set_ceiling, /* UMTX_OP_SET_CEILING */ 3517164839Sdavidxu __umtx_op_cv_wait, /* UMTX_OP_CV_WAIT*/ 3518164839Sdavidxu __umtx_op_cv_signal, /* UMTX_OP_CV_SIGNAL */ 3519173800Sdavidxu __umtx_op_cv_broadcast, /* UMTX_OP_CV_BROADCAST */ 3520177848Sdavidxu __umtx_op_wait_uint, /* UMTX_OP_WAIT_UINT */ 3521177848Sdavidxu __umtx_op_rw_rdlock, /* UMTX_OP_RW_RDLOCK */ 3522177848Sdavidxu __umtx_op_rw_wrlock, /* UMTX_OP_RW_WRLOCK */ 3523178646Sdavidxu __umtx_op_rw_unlock, /* UMTX_OP_RW_UNLOCK */ 3524178646Sdavidxu __umtx_op_wait_uint_private, /* UMTX_OP_WAIT_UINT_PRIVATE */ 3525179970Sdavidxu __umtx_op_wake_private, /* UMTX_OP_WAKE_PRIVATE */ 3526179970Sdavidxu __umtx_op_wait_umutex, /* UMTX_OP_UMUTEX_WAIT */ 3527201472Sdavidxu __umtx_op_wake_umutex, /* UMTX_OP_UMUTEX_WAKE */ 3528201472Sdavidxu __umtx_op_sem_wait, /* UMTX_OP_SEM_WAIT */ 3529216641Sdavidxu __umtx_op_sem_wake, /* UMTX_OP_SEM_WAKE */ 3530233912Sdavidxu __umtx_op_nwake_private, /* UMTX_OP_NWAKE_PRIVATE */ 3531233912Sdavidxu __umtx_op_wake2_umutex /* UMTX_OP_UMUTEX_WAKE2 */ 3532162536Sdavidxu}; 3533162536Sdavidxu 3534162536Sdavidxuint 3535225617Skmacysys__umtx_op(struct thread *td, struct _umtx_op_args *uap) 3536162536Sdavidxu{ 3537163678Sdavidxu if ((unsigned)uap->op < UMTX_OP_MAX) 3538162536Sdavidxu return (*op_table[uap->op])(td, uap); 3539162536Sdavidxu return (EINVAL); 3540162536Sdavidxu} 3541162536Sdavidxu 3542205014Snwhitehorn#ifdef COMPAT_FREEBSD32 3543163046Sdavidxuint 3544163046Sdavidxufreebsd32_umtx_lock(struct thread *td, struct freebsd32_umtx_lock_args *uap) 3545163046Sdavidxu /* struct umtx *umtx */ 3546163046Sdavidxu{ 3547163046Sdavidxu return (do_lock_umtx32(td, (uint32_t *)uap->umtx, td->td_tid, NULL)); 3548163046Sdavidxu} 3549163046Sdavidxu 3550163046Sdavidxuint 3551163046Sdavidxufreebsd32_umtx_unlock(struct thread *td, struct freebsd32_umtx_unlock_args *uap) 3552163046Sdavidxu /* struct umtx *umtx */ 3553163046Sdavidxu{ 3554163046Sdavidxu return (do_unlock_umtx32(td, (uint32_t *)uap->umtx, td->td_tid)); 3555163046Sdavidxu} 3556163046Sdavidxu 3557162536Sdavidxustruct timespec32 { 3558242202Sdavide int32_t tv_sec; 3559242202Sdavide int32_t tv_nsec; 3560162536Sdavidxu}; 3561162536Sdavidxu 3562232144Sdavidxustruct umtx_time32 { 3563232144Sdavidxu struct timespec32 timeout; 3564232144Sdavidxu uint32_t flags; 3565232144Sdavidxu uint32_t clockid; 3566232144Sdavidxu}; 3567232144Sdavidxu 3568162536Sdavidxustatic inline int 3569228218Sphoumtx_copyin_timeout32(void *addr, struct timespec *tsp) 3570162536Sdavidxu{ 3571162536Sdavidxu struct timespec32 ts32; 3572162536Sdavidxu int error; 3573162536Sdavidxu 3574162536Sdavidxu error = copyin(addr, &ts32, sizeof(struct timespec32)); 3575162536Sdavidxu if (error == 0) { 3576228218Spho if (ts32.tv_sec < 0 || 3577228218Spho ts32.tv_nsec >= 1000000000 || 3578228218Spho ts32.tv_nsec < 0) 3579228218Spho error = EINVAL; 3580228218Spho else { 3581228218Spho tsp->tv_sec = ts32.tv_sec; 3582228218Spho tsp->tv_nsec = ts32.tv_nsec; 3583228218Spho } 3584162536Sdavidxu } 3585140421Sdavidxu return (error); 3586139013Sdavidxu} 3587161678Sdavidxu 3588232144Sdavidxustatic inline int 3589232144Sdavidxuumtx_copyin_umtx_time32(const void *addr, size_t size, struct _umtx_time *tp) 3590232144Sdavidxu{ 3591232144Sdavidxu struct umtx_time32 t32; 3592232144Sdavidxu int error; 3593232144Sdavidxu 3594232144Sdavidxu t32.clockid = CLOCK_REALTIME; 3595232144Sdavidxu t32.flags = 0; 3596232144Sdavidxu if (size <= sizeof(struct timespec32)) 3597232144Sdavidxu error = copyin(addr, &t32.timeout, sizeof(struct timespec32)); 3598232144Sdavidxu else 3599232144Sdavidxu error = copyin(addr, &t32, sizeof(struct umtx_time32)); 3600232144Sdavidxu if (error != 0) 3601232144Sdavidxu return (error); 3602232144Sdavidxu if (t32.timeout.tv_sec < 0 || 3603232144Sdavidxu t32.timeout.tv_nsec >= 1000000000 || t32.timeout.tv_nsec < 0) 3604232144Sdavidxu return (EINVAL); 3605232144Sdavidxu tp->_timeout.tv_sec = t32.timeout.tv_sec; 3606232144Sdavidxu tp->_timeout.tv_nsec = t32.timeout.tv_nsec; 3607232144Sdavidxu tp->_flags = t32.flags; 3608232144Sdavidxu tp->_clockid = t32.clockid; 3609232144Sdavidxu return (0); 3610232144Sdavidxu} 3611232144Sdavidxu 3612162536Sdavidxustatic int 3613162536Sdavidxu__umtx_op_lock_umtx_compat32(struct thread *td, struct _umtx_op_args *uap) 3614162536Sdavidxu{ 3615162536Sdavidxu struct timespec *ts, timeout; 3616162536Sdavidxu int error; 3617162536Sdavidxu 3618162536Sdavidxu /* Allow a null timespec (wait forever). */ 3619162536Sdavidxu if (uap->uaddr2 == NULL) 3620162536Sdavidxu ts = NULL; 3621162536Sdavidxu else { 3622228218Spho error = umtx_copyin_timeout32(uap->uaddr2, &timeout); 3623162536Sdavidxu if (error != 0) 3624162536Sdavidxu return (error); 3625162536Sdavidxu ts = &timeout; 3626162536Sdavidxu } 3627162536Sdavidxu return (do_lock_umtx32(td, uap->obj, uap->val, ts)); 3628162536Sdavidxu} 3629162536Sdavidxu 3630162536Sdavidxustatic int 3631162536Sdavidxu__umtx_op_unlock_umtx_compat32(struct thread *td, struct _umtx_op_args *uap) 3632162536Sdavidxu{ 3633162536Sdavidxu return (do_unlock_umtx32(td, uap->obj, (uint32_t)uap->val)); 3634162536Sdavidxu} 3635162536Sdavidxu 3636162536Sdavidxustatic int 3637162536Sdavidxu__umtx_op_wait_compat32(struct thread *td, struct _umtx_op_args *uap) 3638162536Sdavidxu{ 3639232144Sdavidxu struct _umtx_time *tm_p, timeout; 3640162536Sdavidxu int error; 3641162536Sdavidxu 3642162536Sdavidxu if (uap->uaddr2 == NULL) 3643232144Sdavidxu tm_p = NULL; 3644162536Sdavidxu else { 3645232144Sdavidxu error = umtx_copyin_umtx_time32(uap->uaddr2, 3646232144Sdavidxu (size_t)uap->uaddr1, &timeout); 3647162536Sdavidxu if (error != 0) 3648162536Sdavidxu return (error); 3649232144Sdavidxu tm_p = &timeout; 3650162536Sdavidxu } 3651232144Sdavidxu return do_wait(td, uap->obj, uap->val, tm_p, 1, 0); 3652162536Sdavidxu} 3653162536Sdavidxu 3654162536Sdavidxustatic int 3655162536Sdavidxu__umtx_op_lock_umutex_compat32(struct thread *td, struct _umtx_op_args *uap) 3656162536Sdavidxu{ 3657232144Sdavidxu struct _umtx_time *tm_p, timeout; 3658162536Sdavidxu int error; 3659162536Sdavidxu 3660162536Sdavidxu /* Allow a null timespec (wait forever). */ 3661162536Sdavidxu if (uap->uaddr2 == NULL) 3662232144Sdavidxu tm_p = NULL; 3663162536Sdavidxu else { 3664232144Sdavidxu error = umtx_copyin_umtx_time(uap->uaddr2, 3665232144Sdavidxu (size_t)uap->uaddr1, &timeout); 3666162536Sdavidxu if (error != 0) 3667162536Sdavidxu return (error); 3668232144Sdavidxu tm_p = &timeout; 3669162536Sdavidxu } 3670232144Sdavidxu return do_lock_umutex(td, uap->obj, tm_p, 0); 3671162536Sdavidxu} 3672162536Sdavidxu 3673164839Sdavidxustatic int 3674179970Sdavidxu__umtx_op_wait_umutex_compat32(struct thread *td, struct _umtx_op_args *uap) 3675179970Sdavidxu{ 3676232144Sdavidxu struct _umtx_time *tm_p, timeout; 3677179970Sdavidxu int error; 3678179970Sdavidxu 3679179970Sdavidxu /* Allow a null timespec (wait forever). */ 3680179970Sdavidxu if (uap->uaddr2 == NULL) 3681232144Sdavidxu tm_p = NULL; 3682179970Sdavidxu else { 3683232144Sdavidxu error = umtx_copyin_umtx_time32(uap->uaddr2, 3684232144Sdavidxu (size_t)uap->uaddr1, &timeout); 3685179970Sdavidxu if (error != 0) 3686179970Sdavidxu return (error); 3687232144Sdavidxu tm_p = &timeout; 3688179970Sdavidxu } 3689232144Sdavidxu return do_lock_umutex(td, uap->obj, tm_p, _UMUTEX_WAIT); 3690179970Sdavidxu} 3691179970Sdavidxu 3692179970Sdavidxustatic int 3693164839Sdavidxu__umtx_op_cv_wait_compat32(struct thread *td, struct _umtx_op_args *uap) 3694164839Sdavidxu{ 3695164839Sdavidxu struct timespec *ts, timeout; 3696164839Sdavidxu int error; 3697164839Sdavidxu 3698164839Sdavidxu /* Allow a null timespec (wait forever). */ 3699164839Sdavidxu if (uap->uaddr2 == NULL) 3700164839Sdavidxu ts = NULL; 3701164839Sdavidxu else { 3702228218Spho error = umtx_copyin_timeout32(uap->uaddr2, &timeout); 3703164839Sdavidxu if (error != 0) 3704164839Sdavidxu return (error); 3705164839Sdavidxu ts = &timeout; 3706164839Sdavidxu } 3707164876Sdavidxu return (do_cv_wait(td, uap->obj, uap->uaddr1, ts, uap->val)); 3708164839Sdavidxu} 3709164839Sdavidxu 3710177848Sdavidxustatic int 3711177848Sdavidxu__umtx_op_rw_rdlock_compat32(struct thread *td, struct _umtx_op_args *uap) 3712177848Sdavidxu{ 3713232209Sdavidxu struct _umtx_time timeout; 3714177848Sdavidxu int error; 3715177848Sdavidxu 3716177848Sdavidxu /* Allow a null timespec (wait forever). */ 3717177848Sdavidxu if (uap->uaddr2 == NULL) { 3718177848Sdavidxu error = do_rw_rdlock(td, uap->obj, uap->val, 0); 3719177848Sdavidxu } else { 3720232209Sdavidxu error = umtx_copyin_umtx_time32(uap->uaddr2, 3721232209Sdavidxu (size_t)uap->uaddr1, &timeout); 3722177848Sdavidxu if (error != 0) 3723177848Sdavidxu return (error); 3724233693Sdavidxu error = do_rw_rdlock(td, uap->obj, uap->val, &timeout); 3725177848Sdavidxu } 3726177848Sdavidxu return (error); 3727177848Sdavidxu} 3728177848Sdavidxu 3729177848Sdavidxustatic int 3730177848Sdavidxu__umtx_op_rw_wrlock_compat32(struct thread *td, struct _umtx_op_args *uap) 3731177848Sdavidxu{ 3732232209Sdavidxu struct _umtx_time timeout; 3733177848Sdavidxu int error; 3734177848Sdavidxu 3735177848Sdavidxu /* Allow a null timespec (wait forever). */ 3736177848Sdavidxu if (uap->uaddr2 == NULL) { 3737177852Sdavidxu error = do_rw_wrlock(td, uap->obj, 0); 3738177848Sdavidxu } else { 3739232209Sdavidxu error = umtx_copyin_umtx_time32(uap->uaddr2, 3740232209Sdavidxu (size_t)uap->uaddr1, &timeout); 3741177848Sdavidxu if (error != 0) 3742177848Sdavidxu return (error); 3743233693Sdavidxu error = do_rw_wrlock(td, uap->obj, &timeout); 3744177848Sdavidxu } 3745177848Sdavidxu return (error); 3746177848Sdavidxu} 3747177848Sdavidxu 3748178646Sdavidxustatic int 3749178646Sdavidxu__umtx_op_wait_uint_private_compat32(struct thread *td, struct _umtx_op_args *uap) 3750178646Sdavidxu{ 3751232144Sdavidxu struct _umtx_time *tm_p, timeout; 3752178646Sdavidxu int error; 3753178646Sdavidxu 3754178646Sdavidxu if (uap->uaddr2 == NULL) 3755232144Sdavidxu tm_p = NULL; 3756178646Sdavidxu else { 3757232144Sdavidxu error = umtx_copyin_umtx_time32( 3758232144Sdavidxu uap->uaddr2, (size_t)uap->uaddr1,&timeout); 3759178646Sdavidxu if (error != 0) 3760178646Sdavidxu return (error); 3761232144Sdavidxu tm_p = &timeout; 3762178646Sdavidxu } 3763232144Sdavidxu return do_wait(td, uap->obj, uap->val, tm_p, 1, 1); 3764178646Sdavidxu} 3765178646Sdavidxu 3766201472Sdavidxustatic int 3767201472Sdavidxu__umtx_op_sem_wait_compat32(struct thread *td, struct _umtx_op_args *uap) 3768201472Sdavidxu{ 3769232144Sdavidxu struct _umtx_time *tm_p, timeout; 3770201472Sdavidxu int error; 3771201472Sdavidxu 3772201472Sdavidxu /* Allow a null timespec (wait forever). */ 3773201472Sdavidxu if (uap->uaddr2 == NULL) 3774232144Sdavidxu tm_p = NULL; 3775201472Sdavidxu else { 3776232144Sdavidxu error = umtx_copyin_umtx_time32(uap->uaddr2, 3777232144Sdavidxu (size_t)uap->uaddr1, &timeout); 3778201472Sdavidxu if (error != 0) 3779201472Sdavidxu return (error); 3780232144Sdavidxu tm_p = &timeout; 3781201472Sdavidxu } 3782232144Sdavidxu return (do_sem_wait(td, uap->obj, tm_p)); 3783201472Sdavidxu} 3784201472Sdavidxu 3785216641Sdavidxustatic int 3786216641Sdavidxu__umtx_op_nwake_private32(struct thread *td, struct _umtx_op_args *uap) 3787216641Sdavidxu{ 3788216641Sdavidxu int count = uap->val; 3789216641Sdavidxu uint32_t uaddrs[BATCH_SIZE]; 3790216641Sdavidxu uint32_t **upp = (uint32_t **)uap->obj; 3791216641Sdavidxu int tocopy; 3792216641Sdavidxu int error = 0; 3793216641Sdavidxu int i, pos = 0; 3794216641Sdavidxu 3795216641Sdavidxu while (count > 0) { 3796216641Sdavidxu tocopy = count; 3797216641Sdavidxu if (tocopy > BATCH_SIZE) 3798216641Sdavidxu tocopy = BATCH_SIZE; 3799216641Sdavidxu error = copyin(upp+pos, uaddrs, tocopy * sizeof(uint32_t)); 3800216641Sdavidxu if (error != 0) 3801216641Sdavidxu break; 3802216641Sdavidxu for (i = 0; i < tocopy; ++i) 3803216641Sdavidxu kern_umtx_wake(td, (void *)(intptr_t)uaddrs[i], 3804216641Sdavidxu INT_MAX, 1); 3805216641Sdavidxu count -= tocopy; 3806216641Sdavidxu pos += tocopy; 3807216641Sdavidxu } 3808216641Sdavidxu return (error); 3809216641Sdavidxu} 3810216641Sdavidxu 3811162536Sdavidxustatic _umtx_op_func op_table_compat32[] = { 3812162536Sdavidxu __umtx_op_lock_umtx_compat32, /* UMTX_OP_LOCK */ 3813162536Sdavidxu __umtx_op_unlock_umtx_compat32, /* UMTX_OP_UNLOCK */ 3814162536Sdavidxu __umtx_op_wait_compat32, /* UMTX_OP_WAIT */ 3815162536Sdavidxu __umtx_op_wake, /* UMTX_OP_WAKE */ 3816162550Sdavidxu __umtx_op_trylock_umutex, /* UMTX_OP_MUTEX_LOCK */ 3817162536Sdavidxu __umtx_op_lock_umutex_compat32, /* UMTX_OP_MUTEX_TRYLOCK */ 3818162536Sdavidxu __umtx_op_unlock_umutex, /* UMTX_OP_MUTEX_UNLOCK */ 3819164839Sdavidxu __umtx_op_set_ceiling, /* UMTX_OP_SET_CEILING */ 3820164839Sdavidxu __umtx_op_cv_wait_compat32, /* UMTX_OP_CV_WAIT*/ 3821164839Sdavidxu __umtx_op_cv_signal, /* UMTX_OP_CV_SIGNAL */ 3822173800Sdavidxu __umtx_op_cv_broadcast, /* UMTX_OP_CV_BROADCAST */ 3823177848Sdavidxu __umtx_op_wait_compat32, /* UMTX_OP_WAIT_UINT */ 3824177848Sdavidxu __umtx_op_rw_rdlock_compat32, /* UMTX_OP_RW_RDLOCK */ 3825177848Sdavidxu __umtx_op_rw_wrlock_compat32, /* UMTX_OP_RW_WRLOCK */ 3826178646Sdavidxu __umtx_op_rw_unlock, /* UMTX_OP_RW_UNLOCK */ 3827178646Sdavidxu __umtx_op_wait_uint_private_compat32, /* UMTX_OP_WAIT_UINT_PRIVATE */ 3828179970Sdavidxu __umtx_op_wake_private, /* UMTX_OP_WAKE_PRIVATE */ 3829179970Sdavidxu __umtx_op_wait_umutex_compat32, /* UMTX_OP_UMUTEX_WAIT */ 3830201472Sdavidxu __umtx_op_wake_umutex, /* UMTX_OP_UMUTEX_WAKE */ 3831201472Sdavidxu __umtx_op_sem_wait_compat32, /* UMTX_OP_SEM_WAIT */ 3832216641Sdavidxu __umtx_op_sem_wake, /* UMTX_OP_SEM_WAKE */ 3833233912Sdavidxu __umtx_op_nwake_private32, /* UMTX_OP_NWAKE_PRIVATE */ 3834233912Sdavidxu __umtx_op_wake2_umutex /* UMTX_OP_UMUTEX_WAKE2 */ 3835162536Sdavidxu}; 3836162536Sdavidxu 3837162536Sdavidxuint 3838162536Sdavidxufreebsd32_umtx_op(struct thread *td, struct freebsd32_umtx_op_args *uap) 3839162536Sdavidxu{ 3840163678Sdavidxu if ((unsigned)uap->op < UMTX_OP_MAX) 3841162536Sdavidxu return (*op_table_compat32[uap->op])(td, 3842162536Sdavidxu (struct _umtx_op_args *)uap); 3843162536Sdavidxu return (EINVAL); 3844162536Sdavidxu} 3845162536Sdavidxu#endif 3846162536Sdavidxu 3847161678Sdavidxuvoid 3848161678Sdavidxuumtx_thread_init(struct thread *td) 3849161678Sdavidxu{ 3850161678Sdavidxu td->td_umtxq = umtxq_alloc(); 3851161678Sdavidxu td->td_umtxq->uq_thread = td; 3852161678Sdavidxu} 3853161678Sdavidxu 3854161678Sdavidxuvoid 3855161678Sdavidxuumtx_thread_fini(struct thread *td) 3856161678Sdavidxu{ 3857161678Sdavidxu umtxq_free(td->td_umtxq); 3858161678Sdavidxu} 3859161678Sdavidxu 3860161678Sdavidxu/* 3861161678Sdavidxu * It will be called when new thread is created, e.g fork(). 3862161678Sdavidxu */ 3863161678Sdavidxuvoid 3864161678Sdavidxuumtx_thread_alloc(struct thread *td) 3865161678Sdavidxu{ 3866161678Sdavidxu struct umtx_q *uq; 3867161678Sdavidxu 3868161678Sdavidxu uq = td->td_umtxq; 3869161678Sdavidxu uq->uq_inherited_pri = PRI_MAX; 3870161678Sdavidxu 3871161678Sdavidxu KASSERT(uq->uq_flags == 0, ("uq_flags != 0")); 3872161678Sdavidxu KASSERT(uq->uq_thread == td, ("uq_thread != td")); 3873161678Sdavidxu KASSERT(uq->uq_pi_blocked == NULL, ("uq_pi_blocked != NULL")); 3874161678Sdavidxu KASSERT(TAILQ_EMPTY(&uq->uq_pi_contested), ("uq_pi_contested is not empty")); 3875161678Sdavidxu} 3876161678Sdavidxu 3877161678Sdavidxu/* 3878161678Sdavidxu * exec() hook. 3879161678Sdavidxu */ 3880161678Sdavidxustatic void 3881161678Sdavidxuumtx_exec_hook(void *arg __unused, struct proc *p __unused, 3882161678Sdavidxu struct image_params *imgp __unused) 3883161678Sdavidxu{ 3884161678Sdavidxu umtx_thread_cleanup(curthread); 3885161678Sdavidxu} 3886161678Sdavidxu 3887161678Sdavidxu/* 3888161678Sdavidxu * thread_exit() hook. 3889161678Sdavidxu */ 3890161678Sdavidxuvoid 3891161678Sdavidxuumtx_thread_exit(struct thread *td) 3892161678Sdavidxu{ 3893161678Sdavidxu umtx_thread_cleanup(td); 3894161678Sdavidxu} 3895161678Sdavidxu 3896161678Sdavidxu/* 3897161678Sdavidxu * clean up umtx data. 3898161678Sdavidxu */ 3899161678Sdavidxustatic void 3900161678Sdavidxuumtx_thread_cleanup(struct thread *td) 3901161678Sdavidxu{ 3902161678Sdavidxu struct umtx_q *uq; 3903161678Sdavidxu struct umtx_pi *pi; 3904161678Sdavidxu 3905161678Sdavidxu if ((uq = td->td_umtxq) == NULL) 3906161678Sdavidxu return; 3907161678Sdavidxu 3908170300Sjeff mtx_lock_spin(&umtx_lock); 3909161678Sdavidxu uq->uq_inherited_pri = PRI_MAX; 3910161678Sdavidxu while ((pi = TAILQ_FIRST(&uq->uq_pi_contested)) != NULL) { 3911161678Sdavidxu pi->pi_owner = NULL; 3912161678Sdavidxu TAILQ_REMOVE(&uq->uq_pi_contested, pi, pi_link); 3913161678Sdavidxu } 3914216313Sdavidxu mtx_unlock_spin(&umtx_lock); 3915174701Sdavidxu thread_lock(td); 3916216791Sdavidxu sched_lend_user_prio(td, PRI_MAX); 3917174701Sdavidxu thread_unlock(td); 3918161678Sdavidxu} 3919