kern_umtx.c revision 215336
1/*-
2 * Copyright (c) 2004, David Xu <davidxu@freebsd.org>
3 * Copyright (c) 2002, Jeffrey Roberson <jeff@freebsd.org>
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice unmodified, this list of conditions, and the following
11 *    disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 */
27
28#include <sys/cdefs.h>
29__FBSDID("$FreeBSD: head/sys/kern/kern_umtx.c 215336 2010-11-15 07:33:54Z davidxu $");
30
31#include "opt_compat.h"
32#include <sys/param.h>
33#include <sys/kernel.h>
34#include <sys/limits.h>
35#include <sys/lock.h>
36#include <sys/malloc.h>
37#include <sys/mutex.h>
38#include <sys/priv.h>
39#include <sys/proc.h>
40#include <sys/sched.h>
41#include <sys/smp.h>
42#include <sys/sysctl.h>
43#include <sys/sysent.h>
44#include <sys/systm.h>
45#include <sys/sysproto.h>
46#include <sys/eventhandler.h>
47#include <sys/umtx.h>
48
49#include <vm/vm.h>
50#include <vm/vm_param.h>
51#include <vm/pmap.h>
52#include <vm/vm_map.h>
53#include <vm/vm_object.h>
54
55#include <machine/cpu.h>
56
57#ifdef COMPAT_FREEBSD32
58#include <compat/freebsd32/freebsd32_proto.h>
59#endif
60
61enum {
62	TYPE_SIMPLE_WAIT,
63	TYPE_CV,
64	TYPE_SEM,
65	TYPE_SIMPLE_LOCK,
66	TYPE_NORMAL_UMUTEX,
67	TYPE_PI_UMUTEX,
68	TYPE_PP_UMUTEX,
69	TYPE_RWLOCK
70};
71
72#define _UMUTEX_TRY		1
73#define _UMUTEX_WAIT		2
74
75/* Key to represent a unique userland synchronous object */
76struct umtx_key {
77	int	hash;
78	int	type;
79	int	shared;
80	union {
81		struct {
82			vm_object_t	object;
83			uintptr_t	offset;
84		} shared;
85		struct {
86			struct vmspace	*vs;
87			uintptr_t	addr;
88		} private;
89		struct {
90			void		*a;
91			uintptr_t	b;
92		} both;
93	} info;
94};
95
96/* Priority inheritance mutex info. */
97struct umtx_pi {
98	/* Owner thread */
99	struct thread		*pi_owner;
100
101	/* Reference count */
102	int			pi_refcount;
103
104 	/* List entry to link umtx holding by thread */
105	TAILQ_ENTRY(umtx_pi)	pi_link;
106
107	/* List entry in hash */
108	TAILQ_ENTRY(umtx_pi)	pi_hashlink;
109
110	/* List for waiters */
111	TAILQ_HEAD(,umtx_q)	pi_blocked;
112
113	/* Identify a userland lock object */
114	struct umtx_key		pi_key;
115};
116
117/* A userland synchronous object user. */
118struct umtx_q {
119	/* Linked list for the hash. */
120	TAILQ_ENTRY(umtx_q)	uq_link;
121
122	/* Umtx key. */
123	struct umtx_key		uq_key;
124
125	/* Umtx flags. */
126	int			uq_flags;
127#define UQF_UMTXQ	0x0001
128
129	/* The thread waits on. */
130	struct thread		*uq_thread;
131
132	/*
133	 * Blocked on PI mutex. read can use chain lock
134	 * or umtx_lock, write must have both chain lock and
135	 * umtx_lock being hold.
136	 */
137	struct umtx_pi		*uq_pi_blocked;
138
139	/* On blocked list */
140	TAILQ_ENTRY(umtx_q)	uq_lockq;
141
142	/* Thread contending with us */
143	TAILQ_HEAD(,umtx_pi)	uq_pi_contested;
144
145	/* Inherited priority from PP mutex */
146	u_char			uq_inherited_pri;
147
148	/* Spare queue ready to be reused */
149	struct umtxq_queue	*uq_spare_queue;
150
151	/* The queue we on */
152	struct umtxq_queue	*uq_cur_queue;
153};
154
155TAILQ_HEAD(umtxq_head, umtx_q);
156
157/* Per-key wait-queue */
158struct umtxq_queue {
159	struct umtxq_head	head;
160	struct umtx_key		key;
161	LIST_ENTRY(umtxq_queue)	link;
162	int			length;
163};
164
165LIST_HEAD(umtxq_list, umtxq_queue);
166
167/* Userland lock object's wait-queue chain */
168struct umtxq_chain {
169	/* Lock for this chain. */
170	struct mtx		uc_lock;
171
172	/* List of sleep queues. */
173	struct umtxq_list	uc_queue[2];
174#define UMTX_SHARED_QUEUE	0
175#define UMTX_EXCLUSIVE_QUEUE	1
176
177	LIST_HEAD(, umtxq_queue) uc_spare_queue;
178
179	/* Busy flag */
180	char			uc_busy;
181
182	/* Chain lock waiters */
183	int			uc_waiters;
184
185	/* All PI in the list */
186	TAILQ_HEAD(,umtx_pi)	uc_pi_list;
187
188};
189
190#define	UMTXQ_LOCKED_ASSERT(uc)		mtx_assert(&(uc)->uc_lock, MA_OWNED)
191#define	UMTXQ_BUSY_ASSERT(uc)	KASSERT(&(uc)->uc_busy, ("umtx chain is not busy"))
192
193/*
194 * Don't propagate time-sharing priority, there is a security reason,
195 * a user can simply introduce PI-mutex, let thread A lock the mutex,
196 * and let another thread B block on the mutex, because B is
197 * sleeping, its priority will be boosted, this causes A's priority to
198 * be boosted via priority propagating too and will never be lowered even
199 * if it is using 100%CPU, this is unfair to other processes.
200 */
201
202#define UPRI(td)	(((td)->td_user_pri >= PRI_MIN_TIMESHARE &&\
203			  (td)->td_user_pri <= PRI_MAX_TIMESHARE) ?\
204			 PRI_MAX_TIMESHARE : (td)->td_user_pri)
205
206#define	GOLDEN_RATIO_PRIME	2654404609U
207#define	UMTX_CHAINS		128
208#define	UMTX_SHIFTS		(__WORD_BIT - 7)
209
210#define THREAD_SHARE		0
211#define PROCESS_SHARE		1
212#define AUTO_SHARE		2
213
214#define	GET_SHARE(flags)	\
215    (((flags) & USYNC_PROCESS_SHARED) == 0 ? THREAD_SHARE : PROCESS_SHARE)
216
217#define BUSY_SPINS		200
218
219static uma_zone_t		umtx_pi_zone;
220static struct umtxq_chain	umtxq_chains[2][UMTX_CHAINS];
221static MALLOC_DEFINE(M_UMTX, "umtx", "UMTX queue memory");
222static int			umtx_pi_allocated;
223
224SYSCTL_NODE(_debug, OID_AUTO, umtx, CTLFLAG_RW, 0, "umtx debug");
225SYSCTL_INT(_debug_umtx, OID_AUTO, umtx_pi_allocated, CTLFLAG_RD,
226    &umtx_pi_allocated, 0, "Allocated umtx_pi");
227
228static void umtxq_sysinit(void *);
229static void umtxq_hash(struct umtx_key *key);
230static struct umtxq_chain *umtxq_getchain(struct umtx_key *key);
231static void umtxq_lock(struct umtx_key *key);
232static void umtxq_unlock(struct umtx_key *key);
233static void umtxq_busy(struct umtx_key *key);
234static void umtxq_unbusy(struct umtx_key *key);
235static void umtxq_insert_queue(struct umtx_q *uq, int q);
236static void umtxq_remove_queue(struct umtx_q *uq, int q);
237static int umtxq_sleep(struct umtx_q *uq, const char *wmesg, int timo);
238static int umtxq_count(struct umtx_key *key);
239static int umtx_key_match(const struct umtx_key *k1, const struct umtx_key *k2);
240static int umtx_key_get(void *addr, int type, int share,
241	struct umtx_key *key);
242static void umtx_key_release(struct umtx_key *key);
243static struct umtx_pi *umtx_pi_alloc(int);
244static void umtx_pi_free(struct umtx_pi *pi);
245static void umtx_pi_adjust_locked(struct thread *td, u_char oldpri);
246static int do_unlock_pp(struct thread *td, struct umutex *m, uint32_t flags);
247static void umtx_thread_cleanup(struct thread *td);
248static void umtx_exec_hook(void *arg __unused, struct proc *p __unused,
249	struct image_params *imgp __unused);
250SYSINIT(umtx, SI_SUB_EVENTHANDLER+1, SI_ORDER_MIDDLE, umtxq_sysinit, NULL);
251
252#define umtxq_signal(key, nwake)	umtxq_signal_queue((key), (nwake), UMTX_SHARED_QUEUE)
253#define umtxq_insert(uq)	umtxq_insert_queue((uq), UMTX_SHARED_QUEUE)
254#define umtxq_remove(uq)	umtxq_remove_queue((uq), UMTX_SHARED_QUEUE)
255
256static struct mtx umtx_lock;
257
258static void
259umtxq_sysinit(void *arg __unused)
260{
261	int i, j;
262
263	umtx_pi_zone = uma_zcreate("umtx pi", sizeof(struct umtx_pi),
264		NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
265	for (i = 0; i < 2; ++i) {
266		for (j = 0; j < UMTX_CHAINS; ++j) {
267			mtx_init(&umtxq_chains[i][j].uc_lock, "umtxql", NULL,
268				 MTX_DEF | MTX_DUPOK);
269			LIST_INIT(&umtxq_chains[i][j].uc_queue[0]);
270			LIST_INIT(&umtxq_chains[i][j].uc_queue[1]);
271			LIST_INIT(&umtxq_chains[i][j].uc_spare_queue);
272			TAILQ_INIT(&umtxq_chains[i][j].uc_pi_list);
273			umtxq_chains[i][j].uc_busy = 0;
274			umtxq_chains[i][j].uc_waiters = 0;
275		}
276	}
277	mtx_init(&umtx_lock, "umtx lock", NULL, MTX_SPIN);
278	EVENTHANDLER_REGISTER(process_exec, umtx_exec_hook, NULL,
279	    EVENTHANDLER_PRI_ANY);
280}
281
282struct umtx_q *
283umtxq_alloc(void)
284{
285	struct umtx_q *uq;
286
287	uq = malloc(sizeof(struct umtx_q), M_UMTX, M_WAITOK | M_ZERO);
288	uq->uq_spare_queue = malloc(sizeof(struct umtxq_queue), M_UMTX, M_WAITOK | M_ZERO);
289	TAILQ_INIT(&uq->uq_spare_queue->head);
290	TAILQ_INIT(&uq->uq_pi_contested);
291	uq->uq_inherited_pri = PRI_MAX;
292	return (uq);
293}
294
295void
296umtxq_free(struct umtx_q *uq)
297{
298	MPASS(uq->uq_spare_queue != NULL);
299	free(uq->uq_spare_queue, M_UMTX);
300	free(uq, M_UMTX);
301}
302
303static inline void
304umtxq_hash(struct umtx_key *key)
305{
306	unsigned n = (uintptr_t)key->info.both.a + key->info.both.b;
307	key->hash = ((n * GOLDEN_RATIO_PRIME) >> UMTX_SHIFTS) % UMTX_CHAINS;
308}
309
310static inline int
311umtx_key_match(const struct umtx_key *k1, const struct umtx_key *k2)
312{
313	return (k1->type == k2->type &&
314		k1->info.both.a == k2->info.both.a &&
315	        k1->info.both.b == k2->info.both.b);
316}
317
318static inline struct umtxq_chain *
319umtxq_getchain(struct umtx_key *key)
320{
321	if (key->type <= TYPE_SEM)
322		return (&umtxq_chains[1][key->hash]);
323	return (&umtxq_chains[0][key->hash]);
324}
325
326/*
327 * Lock a chain.
328 */
329static inline void
330umtxq_lock(struct umtx_key *key)
331{
332	struct umtxq_chain *uc;
333
334	uc = umtxq_getchain(key);
335	mtx_lock(&uc->uc_lock);
336}
337
338/*
339 * Unlock a chain.
340 */
341static inline void
342umtxq_unlock(struct umtx_key *key)
343{
344	struct umtxq_chain *uc;
345
346	uc = umtxq_getchain(key);
347	mtx_unlock(&uc->uc_lock);
348}
349
350/*
351 * Set chain to busy state when following operation
352 * may be blocked (kernel mutex can not be used).
353 */
354static inline void
355umtxq_busy(struct umtx_key *key)
356{
357	struct umtxq_chain *uc;
358
359	uc = umtxq_getchain(key);
360	mtx_assert(&uc->uc_lock, MA_OWNED);
361	if (uc->uc_busy) {
362#ifdef SMP
363		if (smp_cpus > 1) {
364			int count = BUSY_SPINS;
365			if (count > 0) {
366				umtxq_unlock(key);
367				while (uc->uc_busy && --count > 0)
368					cpu_spinwait();
369				umtxq_lock(key);
370			}
371		}
372#endif
373		while (uc->uc_busy) {
374			uc->uc_waiters++;
375			msleep(uc, &uc->uc_lock, 0, "umtxqb", 0);
376			uc->uc_waiters--;
377		}
378	}
379	uc->uc_busy = 1;
380}
381
382/*
383 * Unbusy a chain.
384 */
385static inline void
386umtxq_unbusy(struct umtx_key *key)
387{
388	struct umtxq_chain *uc;
389
390	uc = umtxq_getchain(key);
391	mtx_assert(&uc->uc_lock, MA_OWNED);
392	KASSERT(uc->uc_busy != 0, ("not busy"));
393	uc->uc_busy = 0;
394	if (uc->uc_waiters)
395		wakeup_one(uc);
396}
397
398static struct umtxq_queue *
399umtxq_queue_lookup(struct umtx_key *key, int q)
400{
401	struct umtxq_queue *uh;
402	struct umtxq_chain *uc;
403
404	uc = umtxq_getchain(key);
405	UMTXQ_LOCKED_ASSERT(uc);
406	LIST_FOREACH(uh, &uc->uc_queue[q], link) {
407		if (umtx_key_match(&uh->key, key))
408			return (uh);
409	}
410
411	return (NULL);
412}
413
414static inline void
415umtxq_insert_queue(struct umtx_q *uq, int q)
416{
417	struct umtxq_queue *uh;
418	struct umtxq_chain *uc;
419
420	uc = umtxq_getchain(&uq->uq_key);
421	UMTXQ_LOCKED_ASSERT(uc);
422	KASSERT((uq->uq_flags & UQF_UMTXQ) == 0, ("umtx_q is already on queue"));
423	uh = umtxq_queue_lookup(&uq->uq_key, q);
424	if (uh != NULL) {
425		LIST_INSERT_HEAD(&uc->uc_spare_queue, uq->uq_spare_queue, link);
426	} else {
427		uh = uq->uq_spare_queue;
428		uh->key = uq->uq_key;
429		LIST_INSERT_HEAD(&uc->uc_queue[q], uh, link);
430	}
431	uq->uq_spare_queue = NULL;
432
433	TAILQ_INSERT_TAIL(&uh->head, uq, uq_link);
434	uh->length++;
435	uq->uq_flags |= UQF_UMTXQ;
436	uq->uq_cur_queue = uh;
437	return;
438}
439
440static inline void
441umtxq_remove_queue(struct umtx_q *uq, int q)
442{
443	struct umtxq_chain *uc;
444	struct umtxq_queue *uh;
445
446	uc = umtxq_getchain(&uq->uq_key);
447	UMTXQ_LOCKED_ASSERT(uc);
448	if (uq->uq_flags & UQF_UMTXQ) {
449		uh = uq->uq_cur_queue;
450		TAILQ_REMOVE(&uh->head, uq, uq_link);
451		uh->length--;
452		uq->uq_flags &= ~UQF_UMTXQ;
453		if (TAILQ_EMPTY(&uh->head)) {
454			KASSERT(uh->length == 0,
455			    ("inconsistent umtxq_queue length"));
456			LIST_REMOVE(uh, link);
457		} else {
458			uh = LIST_FIRST(&uc->uc_spare_queue);
459			KASSERT(uh != NULL, ("uc_spare_queue is empty"));
460			LIST_REMOVE(uh, link);
461		}
462		uq->uq_spare_queue = uh;
463		uq->uq_cur_queue = NULL;
464	}
465}
466
467/*
468 * Check if there are multiple waiters
469 */
470static int
471umtxq_count(struct umtx_key *key)
472{
473	struct umtxq_chain *uc;
474	struct umtxq_queue *uh;
475
476	uc = umtxq_getchain(key);
477	UMTXQ_LOCKED_ASSERT(uc);
478	uh = umtxq_queue_lookup(key, UMTX_SHARED_QUEUE);
479	if (uh != NULL)
480		return (uh->length);
481	return (0);
482}
483
484/*
485 * Check if there are multiple PI waiters and returns first
486 * waiter.
487 */
488static int
489umtxq_count_pi(struct umtx_key *key, struct umtx_q **first)
490{
491	struct umtxq_chain *uc;
492	struct umtxq_queue *uh;
493
494	*first = NULL;
495	uc = umtxq_getchain(key);
496	UMTXQ_LOCKED_ASSERT(uc);
497	uh = umtxq_queue_lookup(key, UMTX_SHARED_QUEUE);
498	if (uh != NULL) {
499		*first = TAILQ_FIRST(&uh->head);
500		return (uh->length);
501	}
502	return (0);
503}
504
505/*
506 * Wake up threads waiting on an userland object.
507 */
508
509static int
510umtxq_signal_queue(struct umtx_key *key, int n_wake, int q)
511{
512	struct umtxq_chain *uc;
513	struct umtxq_queue *uh;
514	struct umtx_q *uq;
515	int ret;
516
517	ret = 0;
518	uc = umtxq_getchain(key);
519	UMTXQ_LOCKED_ASSERT(uc);
520	uh = umtxq_queue_lookup(key, q);
521	if (uh != NULL) {
522		while ((uq = TAILQ_FIRST(&uh->head)) != NULL) {
523			umtxq_remove_queue(uq, q);
524			wakeup(uq);
525			if (++ret >= n_wake)
526				return (ret);
527		}
528	}
529	return (ret);
530}
531
532
533/*
534 * Wake up specified thread.
535 */
536static inline void
537umtxq_signal_thread(struct umtx_q *uq)
538{
539	struct umtxq_chain *uc;
540
541	uc = umtxq_getchain(&uq->uq_key);
542	UMTXQ_LOCKED_ASSERT(uc);
543	umtxq_remove(uq);
544	wakeup(uq);
545}
546
547/*
548 * Put thread into sleep state, before sleeping, check if
549 * thread was removed from umtx queue.
550 */
551static inline int
552umtxq_sleep(struct umtx_q *uq, const char *wmesg, int timo)
553{
554	struct umtxq_chain *uc;
555	int error;
556
557	uc = umtxq_getchain(&uq->uq_key);
558	UMTXQ_LOCKED_ASSERT(uc);
559	if (!(uq->uq_flags & UQF_UMTXQ))
560		return (0);
561	error = msleep(uq, &uc->uc_lock, PCATCH, wmesg, timo);
562	if (error == EWOULDBLOCK)
563		error = ETIMEDOUT;
564	return (error);
565}
566
567/*
568 * Convert userspace address into unique logical address.
569 */
570static int
571umtx_key_get(void *addr, int type, int share, struct umtx_key *key)
572{
573	struct thread *td = curthread;
574	vm_map_t map;
575	vm_map_entry_t entry;
576	vm_pindex_t pindex;
577	vm_prot_t prot;
578	boolean_t wired;
579
580	key->type = type;
581	if (share == THREAD_SHARE) {
582		key->shared = 0;
583		key->info.private.vs = td->td_proc->p_vmspace;
584		key->info.private.addr = (uintptr_t)addr;
585	} else {
586		MPASS(share == PROCESS_SHARE || share == AUTO_SHARE);
587		map = &td->td_proc->p_vmspace->vm_map;
588		if (vm_map_lookup(&map, (vm_offset_t)addr, VM_PROT_WRITE,
589		    &entry, &key->info.shared.object, &pindex, &prot,
590		    &wired) != KERN_SUCCESS) {
591			return EFAULT;
592		}
593
594		if ((share == PROCESS_SHARE) ||
595		    (share == AUTO_SHARE &&
596		     VM_INHERIT_SHARE == entry->inheritance)) {
597			key->shared = 1;
598			key->info.shared.offset = entry->offset + entry->start -
599				(vm_offset_t)addr;
600			vm_object_reference(key->info.shared.object);
601		} else {
602			key->shared = 0;
603			key->info.private.vs = td->td_proc->p_vmspace;
604			key->info.private.addr = (uintptr_t)addr;
605		}
606		vm_map_lookup_done(map, entry);
607	}
608
609	umtxq_hash(key);
610	return (0);
611}
612
613/*
614 * Release key.
615 */
616static inline void
617umtx_key_release(struct umtx_key *key)
618{
619	if (key->shared)
620		vm_object_deallocate(key->info.shared.object);
621}
622
623/*
624 * Lock a umtx object.
625 */
626static int
627_do_lock_umtx(struct thread *td, struct umtx *umtx, u_long id, int timo)
628{
629	struct umtx_q *uq;
630	u_long owner;
631	u_long old;
632	int error = 0;
633
634	uq = td->td_umtxq;
635
636	/*
637	 * Care must be exercised when dealing with umtx structure. It
638	 * can fault on any access.
639	 */
640	for (;;) {
641		/*
642		 * Try the uncontested case.  This should be done in userland.
643		 */
644		owner = casuword(&umtx->u_owner, UMTX_UNOWNED, id);
645
646		/* The acquire succeeded. */
647		if (owner == UMTX_UNOWNED)
648			return (0);
649
650		/* The address was invalid. */
651		if (owner == -1)
652			return (EFAULT);
653
654		/* If no one owns it but it is contested try to acquire it. */
655		if (owner == UMTX_CONTESTED) {
656			owner = casuword(&umtx->u_owner,
657			    UMTX_CONTESTED, id | UMTX_CONTESTED);
658
659			if (owner == UMTX_CONTESTED)
660				return (0);
661
662			/* The address was invalid. */
663			if (owner == -1)
664				return (EFAULT);
665
666			/* If this failed the lock has changed, restart. */
667			continue;
668		}
669
670		/*
671		 * If we caught a signal, we have retried and now
672		 * exit immediately.
673		 */
674		if (error != 0)
675			return (error);
676
677		if ((error = umtx_key_get(umtx, TYPE_SIMPLE_LOCK,
678			AUTO_SHARE, &uq->uq_key)) != 0)
679			return (error);
680
681		umtxq_lock(&uq->uq_key);
682		umtxq_busy(&uq->uq_key);
683		umtxq_insert(uq);
684		umtxq_unbusy(&uq->uq_key);
685		umtxq_unlock(&uq->uq_key);
686
687		/*
688		 * Set the contested bit so that a release in user space
689		 * knows to use the system call for unlock.  If this fails
690		 * either some one else has acquired the lock or it has been
691		 * released.
692		 */
693		old = casuword(&umtx->u_owner, owner, owner | UMTX_CONTESTED);
694
695		/* The address was invalid. */
696		if (old == -1) {
697			umtxq_lock(&uq->uq_key);
698			umtxq_remove(uq);
699			umtxq_unlock(&uq->uq_key);
700			umtx_key_release(&uq->uq_key);
701			return (EFAULT);
702		}
703
704		/*
705		 * We set the contested bit, sleep. Otherwise the lock changed
706		 * and we need to retry or we lost a race to the thread
707		 * unlocking the umtx.
708		 */
709		umtxq_lock(&uq->uq_key);
710		if (old == owner)
711			error = umtxq_sleep(uq, "umtx", timo);
712		umtxq_remove(uq);
713		umtxq_unlock(&uq->uq_key);
714		umtx_key_release(&uq->uq_key);
715	}
716
717	return (0);
718}
719
720/*
721 * Lock a umtx object.
722 */
723static int
724do_lock_umtx(struct thread *td, struct umtx *umtx, u_long id,
725	struct timespec *timeout)
726{
727	struct timespec ts, ts2, ts3;
728	struct timeval tv;
729	int error;
730
731	if (timeout == NULL) {
732		error = _do_lock_umtx(td, umtx, id, 0);
733		/* Mutex locking is restarted if it is interrupted. */
734		if (error == EINTR)
735			error = ERESTART;
736	} else {
737		getnanouptime(&ts);
738		timespecadd(&ts, timeout);
739		TIMESPEC_TO_TIMEVAL(&tv, timeout);
740		for (;;) {
741			error = _do_lock_umtx(td, umtx, id, tvtohz(&tv));
742			if (error != ETIMEDOUT)
743				break;
744			getnanouptime(&ts2);
745			if (timespeccmp(&ts2, &ts, >=)) {
746				error = ETIMEDOUT;
747				break;
748			}
749			ts3 = ts;
750			timespecsub(&ts3, &ts2);
751			TIMESPEC_TO_TIMEVAL(&tv, &ts3);
752		}
753		/* Timed-locking is not restarted. */
754		if (error == ERESTART)
755			error = EINTR;
756	}
757	return (error);
758}
759
760/*
761 * Unlock a umtx object.
762 */
763static int
764do_unlock_umtx(struct thread *td, struct umtx *umtx, u_long id)
765{
766	struct umtx_key key;
767	u_long owner;
768	u_long old;
769	int error;
770	int count;
771
772	/*
773	 * Make sure we own this mtx.
774	 */
775	owner = fuword(__DEVOLATILE(u_long *, &umtx->u_owner));
776	if (owner == -1)
777		return (EFAULT);
778
779	if ((owner & ~UMTX_CONTESTED) != id)
780		return (EPERM);
781
782	/* This should be done in userland */
783	if ((owner & UMTX_CONTESTED) == 0) {
784		old = casuword(&umtx->u_owner, owner, UMTX_UNOWNED);
785		if (old == -1)
786			return (EFAULT);
787		if (old == owner)
788			return (0);
789		owner = old;
790	}
791
792	/* We should only ever be in here for contested locks */
793	if ((error = umtx_key_get(umtx, TYPE_SIMPLE_LOCK, AUTO_SHARE,
794		&key)) != 0)
795		return (error);
796
797	umtxq_lock(&key);
798	umtxq_busy(&key);
799	count = umtxq_count(&key);
800	umtxq_unlock(&key);
801
802	/*
803	 * When unlocking the umtx, it must be marked as unowned if
804	 * there is zero or one thread only waiting for it.
805	 * Otherwise, it must be marked as contested.
806	 */
807	old = casuword(&umtx->u_owner, owner,
808		count <= 1 ? UMTX_UNOWNED : UMTX_CONTESTED);
809	umtxq_lock(&key);
810	umtxq_signal(&key,1);
811	umtxq_unbusy(&key);
812	umtxq_unlock(&key);
813	umtx_key_release(&key);
814	if (old == -1)
815		return (EFAULT);
816	if (old != owner)
817		return (EINVAL);
818	return (0);
819}
820
821#ifdef COMPAT_FREEBSD32
822
823/*
824 * Lock a umtx object.
825 */
826static int
827_do_lock_umtx32(struct thread *td, uint32_t *m, uint32_t id, int timo)
828{
829	struct umtx_q *uq;
830	uint32_t owner;
831	uint32_t old;
832	int error = 0;
833
834	uq = td->td_umtxq;
835
836	/*
837	 * Care must be exercised when dealing with umtx structure. It
838	 * can fault on any access.
839	 */
840	for (;;) {
841		/*
842		 * Try the uncontested case.  This should be done in userland.
843		 */
844		owner = casuword32(m, UMUTEX_UNOWNED, id);
845
846		/* The acquire succeeded. */
847		if (owner == UMUTEX_UNOWNED)
848			return (0);
849
850		/* The address was invalid. */
851		if (owner == -1)
852			return (EFAULT);
853
854		/* If no one owns it but it is contested try to acquire it. */
855		if (owner == UMUTEX_CONTESTED) {
856			owner = casuword32(m,
857			    UMUTEX_CONTESTED, id | UMUTEX_CONTESTED);
858			if (owner == UMUTEX_CONTESTED)
859				return (0);
860
861			/* The address was invalid. */
862			if (owner == -1)
863				return (EFAULT);
864
865			/* If this failed the lock has changed, restart. */
866			continue;
867		}
868
869		/*
870		 * If we caught a signal, we have retried and now
871		 * exit immediately.
872		 */
873		if (error != 0)
874			return (error);
875
876		if ((error = umtx_key_get(m, TYPE_SIMPLE_LOCK,
877			AUTO_SHARE, &uq->uq_key)) != 0)
878			return (error);
879
880		umtxq_lock(&uq->uq_key);
881		umtxq_busy(&uq->uq_key);
882		umtxq_insert(uq);
883		umtxq_unbusy(&uq->uq_key);
884		umtxq_unlock(&uq->uq_key);
885
886		/*
887		 * Set the contested bit so that a release in user space
888		 * knows to use the system call for unlock.  If this fails
889		 * either some one else has acquired the lock or it has been
890		 * released.
891		 */
892		old = casuword32(m, owner, owner | UMUTEX_CONTESTED);
893
894		/* The address was invalid. */
895		if (old == -1) {
896			umtxq_lock(&uq->uq_key);
897			umtxq_remove(uq);
898			umtxq_unlock(&uq->uq_key);
899			umtx_key_release(&uq->uq_key);
900			return (EFAULT);
901		}
902
903		/*
904		 * We set the contested bit, sleep. Otherwise the lock changed
905		 * and we need to retry or we lost a race to the thread
906		 * unlocking the umtx.
907		 */
908		umtxq_lock(&uq->uq_key);
909		if (old == owner)
910			error = umtxq_sleep(uq, "umtx", timo);
911		umtxq_remove(uq);
912		umtxq_unlock(&uq->uq_key);
913		umtx_key_release(&uq->uq_key);
914	}
915
916	return (0);
917}
918
919/*
920 * Lock a umtx object.
921 */
922static int
923do_lock_umtx32(struct thread *td, void *m, uint32_t id,
924	struct timespec *timeout)
925{
926	struct timespec ts, ts2, ts3;
927	struct timeval tv;
928	int error;
929
930	if (timeout == NULL) {
931		error = _do_lock_umtx32(td, m, id, 0);
932		/* Mutex locking is restarted if it is interrupted. */
933		if (error == EINTR)
934			error = ERESTART;
935	} else {
936		getnanouptime(&ts);
937		timespecadd(&ts, timeout);
938		TIMESPEC_TO_TIMEVAL(&tv, timeout);
939		for (;;) {
940			error = _do_lock_umtx32(td, m, id, tvtohz(&tv));
941			if (error != ETIMEDOUT)
942				break;
943			getnanouptime(&ts2);
944			if (timespeccmp(&ts2, &ts, >=)) {
945				error = ETIMEDOUT;
946				break;
947			}
948			ts3 = ts;
949			timespecsub(&ts3, &ts2);
950			TIMESPEC_TO_TIMEVAL(&tv, &ts3);
951		}
952		/* Timed-locking is not restarted. */
953		if (error == ERESTART)
954			error = EINTR;
955	}
956	return (error);
957}
958
959/*
960 * Unlock a umtx object.
961 */
962static int
963do_unlock_umtx32(struct thread *td, uint32_t *m, uint32_t id)
964{
965	struct umtx_key key;
966	uint32_t owner;
967	uint32_t old;
968	int error;
969	int count;
970
971	/*
972	 * Make sure we own this mtx.
973	 */
974	owner = fuword32(m);
975	if (owner == -1)
976		return (EFAULT);
977
978	if ((owner & ~UMUTEX_CONTESTED) != id)
979		return (EPERM);
980
981	/* This should be done in userland */
982	if ((owner & UMUTEX_CONTESTED) == 0) {
983		old = casuword32(m, owner, UMUTEX_UNOWNED);
984		if (old == -1)
985			return (EFAULT);
986		if (old == owner)
987			return (0);
988		owner = old;
989	}
990
991	/* We should only ever be in here for contested locks */
992	if ((error = umtx_key_get(m, TYPE_SIMPLE_LOCK, AUTO_SHARE,
993		&key)) != 0)
994		return (error);
995
996	umtxq_lock(&key);
997	umtxq_busy(&key);
998	count = umtxq_count(&key);
999	umtxq_unlock(&key);
1000
1001	/*
1002	 * When unlocking the umtx, it must be marked as unowned if
1003	 * there is zero or one thread only waiting for it.
1004	 * Otherwise, it must be marked as contested.
1005	 */
1006	old = casuword32(m, owner,
1007		count <= 1 ? UMUTEX_UNOWNED : UMUTEX_CONTESTED);
1008	umtxq_lock(&key);
1009	umtxq_signal(&key,1);
1010	umtxq_unbusy(&key);
1011	umtxq_unlock(&key);
1012	umtx_key_release(&key);
1013	if (old == -1)
1014		return (EFAULT);
1015	if (old != owner)
1016		return (EINVAL);
1017	return (0);
1018}
1019#endif
1020
1021/*
1022 * Fetch and compare value, sleep on the address if value is not changed.
1023 */
1024static int
1025do_wait(struct thread *td, void *addr, u_long id,
1026	struct timespec *timeout, int compat32, int is_private)
1027{
1028	struct umtx_q *uq;
1029	struct timespec ts, ts2, ts3;
1030	struct timeval tv;
1031	u_long tmp;
1032	int error = 0;
1033
1034	uq = td->td_umtxq;
1035	if ((error = umtx_key_get(addr, TYPE_SIMPLE_WAIT,
1036		is_private ? THREAD_SHARE : AUTO_SHARE, &uq->uq_key)) != 0)
1037		return (error);
1038
1039	umtxq_lock(&uq->uq_key);
1040	umtxq_insert(uq);
1041	umtxq_unlock(&uq->uq_key);
1042	if (compat32 == 0)
1043		tmp = fuword(addr);
1044        else
1045		tmp = (unsigned int)fuword32(addr);
1046	if (tmp != id) {
1047		umtxq_lock(&uq->uq_key);
1048		umtxq_remove(uq);
1049		umtxq_unlock(&uq->uq_key);
1050	} else if (timeout == NULL) {
1051		umtxq_lock(&uq->uq_key);
1052		error = umtxq_sleep(uq, "uwait", 0);
1053		umtxq_remove(uq);
1054		umtxq_unlock(&uq->uq_key);
1055	} else {
1056		getnanouptime(&ts);
1057		timespecadd(&ts, timeout);
1058		TIMESPEC_TO_TIMEVAL(&tv, timeout);
1059		umtxq_lock(&uq->uq_key);
1060		for (;;) {
1061			error = umtxq_sleep(uq, "uwait", tvtohz(&tv));
1062			if (!(uq->uq_flags & UQF_UMTXQ)) {
1063				error = 0;
1064				break;
1065			}
1066			if (error != ETIMEDOUT)
1067				break;
1068			umtxq_unlock(&uq->uq_key);
1069			getnanouptime(&ts2);
1070			if (timespeccmp(&ts2, &ts, >=)) {
1071				error = ETIMEDOUT;
1072				umtxq_lock(&uq->uq_key);
1073				break;
1074			}
1075			ts3 = ts;
1076			timespecsub(&ts3, &ts2);
1077			TIMESPEC_TO_TIMEVAL(&tv, &ts3);
1078			umtxq_lock(&uq->uq_key);
1079		}
1080		umtxq_remove(uq);
1081		umtxq_unlock(&uq->uq_key);
1082	}
1083	umtx_key_release(&uq->uq_key);
1084	if (error == ERESTART)
1085		error = EINTR;
1086	return (error);
1087}
1088
1089/*
1090 * Wake up threads sleeping on the specified address.
1091 */
1092int
1093kern_umtx_wake(struct thread *td, void *uaddr, int n_wake, int is_private)
1094{
1095	struct umtx_key key;
1096	int ret;
1097
1098	if ((ret = umtx_key_get(uaddr, TYPE_SIMPLE_WAIT,
1099		is_private ? THREAD_SHARE : AUTO_SHARE, &key)) != 0)
1100		return (ret);
1101	umtxq_lock(&key);
1102	ret = umtxq_signal(&key, n_wake);
1103	umtxq_unlock(&key);
1104	umtx_key_release(&key);
1105	return (0);
1106}
1107
1108/*
1109 * Lock PTHREAD_PRIO_NONE protocol POSIX mutex.
1110 */
1111static int
1112_do_lock_normal(struct thread *td, struct umutex *m, uint32_t flags, int timo,
1113	int mode)
1114{
1115	struct umtx_q *uq;
1116	uint32_t owner, old, id;
1117	int error = 0;
1118
1119	id = td->td_tid;
1120	uq = td->td_umtxq;
1121
1122	/*
1123	 * Care must be exercised when dealing with umtx structure. It
1124	 * can fault on any access.
1125	 */
1126	for (;;) {
1127		owner = fuword32(__DEVOLATILE(void *, &m->m_owner));
1128		if (mode == _UMUTEX_WAIT) {
1129			if (owner == UMUTEX_UNOWNED || owner == UMUTEX_CONTESTED)
1130				return (0);
1131		} else {
1132			/*
1133			 * Try the uncontested case.  This should be done in userland.
1134			 */
1135			owner = casuword32(&m->m_owner, UMUTEX_UNOWNED, id);
1136
1137			/* The acquire succeeded. */
1138			if (owner == UMUTEX_UNOWNED)
1139				return (0);
1140
1141			/* The address was invalid. */
1142			if (owner == -1)
1143				return (EFAULT);
1144
1145			/* If no one owns it but it is contested try to acquire it. */
1146			if (owner == UMUTEX_CONTESTED) {
1147				owner = casuword32(&m->m_owner,
1148				    UMUTEX_CONTESTED, id | UMUTEX_CONTESTED);
1149
1150				if (owner == UMUTEX_CONTESTED)
1151					return (0);
1152
1153				/* The address was invalid. */
1154				if (owner == -1)
1155					return (EFAULT);
1156
1157				/* If this failed the lock has changed, restart. */
1158				continue;
1159			}
1160		}
1161
1162		if ((flags & UMUTEX_ERROR_CHECK) != 0 &&
1163		    (owner & ~UMUTEX_CONTESTED) == id)
1164			return (EDEADLK);
1165
1166		if (mode == _UMUTEX_TRY)
1167			return (EBUSY);
1168
1169		/*
1170		 * If we caught a signal, we have retried and now
1171		 * exit immediately.
1172		 */
1173		if (error != 0)
1174			return (error);
1175
1176		if ((error = umtx_key_get(m, TYPE_NORMAL_UMUTEX,
1177		    GET_SHARE(flags), &uq->uq_key)) != 0)
1178			return (error);
1179
1180		umtxq_lock(&uq->uq_key);
1181		umtxq_busy(&uq->uq_key);
1182		umtxq_insert(uq);
1183		umtxq_unlock(&uq->uq_key);
1184
1185		/*
1186		 * Set the contested bit so that a release in user space
1187		 * knows to use the system call for unlock.  If this fails
1188		 * either some one else has acquired the lock or it has been
1189		 * released.
1190		 */
1191		old = casuword32(&m->m_owner, owner, owner | UMUTEX_CONTESTED);
1192
1193		/* The address was invalid. */
1194		if (old == -1) {
1195			umtxq_lock(&uq->uq_key);
1196			umtxq_remove(uq);
1197			umtxq_unbusy(&uq->uq_key);
1198			umtxq_unlock(&uq->uq_key);
1199			umtx_key_release(&uq->uq_key);
1200			return (EFAULT);
1201		}
1202
1203		/*
1204		 * We set the contested bit, sleep. Otherwise the lock changed
1205		 * and we need to retry or we lost a race to the thread
1206		 * unlocking the umtx.
1207		 */
1208		umtxq_lock(&uq->uq_key);
1209		umtxq_unbusy(&uq->uq_key);
1210		if (old == owner)
1211			error = umtxq_sleep(uq, "umtxn", timo);
1212		umtxq_remove(uq);
1213		umtxq_unlock(&uq->uq_key);
1214		umtx_key_release(&uq->uq_key);
1215	}
1216
1217	return (0);
1218}
1219
1220/*
1221 * Lock PTHREAD_PRIO_NONE protocol POSIX mutex.
1222 */
1223/*
1224 * Unlock PTHREAD_PRIO_NONE protocol POSIX mutex.
1225 */
1226static int
1227do_unlock_normal(struct thread *td, struct umutex *m, uint32_t flags)
1228{
1229	struct umtx_key key;
1230	uint32_t owner, old, id;
1231	int error;
1232	int count;
1233
1234	id = td->td_tid;
1235	/*
1236	 * Make sure we own this mtx.
1237	 */
1238	owner = fuword32(__DEVOLATILE(uint32_t *, &m->m_owner));
1239	if (owner == -1)
1240		return (EFAULT);
1241
1242	if ((owner & ~UMUTEX_CONTESTED) != id)
1243		return (EPERM);
1244
1245	if ((owner & UMUTEX_CONTESTED) == 0) {
1246		old = casuword32(&m->m_owner, owner, UMUTEX_UNOWNED);
1247		if (old == -1)
1248			return (EFAULT);
1249		if (old == owner)
1250			return (0);
1251		owner = old;
1252	}
1253
1254	/* We should only ever be in here for contested locks */
1255	if ((error = umtx_key_get(m, TYPE_NORMAL_UMUTEX, GET_SHARE(flags),
1256	    &key)) != 0)
1257		return (error);
1258
1259	umtxq_lock(&key);
1260	umtxq_busy(&key);
1261	count = umtxq_count(&key);
1262	umtxq_unlock(&key);
1263
1264	/*
1265	 * When unlocking the umtx, it must be marked as unowned if
1266	 * there is zero or one thread only waiting for it.
1267	 * Otherwise, it must be marked as contested.
1268	 */
1269	old = casuword32(&m->m_owner, owner,
1270		count <= 1 ? UMUTEX_UNOWNED : UMUTEX_CONTESTED);
1271	umtxq_lock(&key);
1272	umtxq_signal(&key,1);
1273	umtxq_unbusy(&key);
1274	umtxq_unlock(&key);
1275	umtx_key_release(&key);
1276	if (old == -1)
1277		return (EFAULT);
1278	if (old != owner)
1279		return (EINVAL);
1280	return (0);
1281}
1282
1283/*
1284 * Check if the mutex is available and wake up a waiter,
1285 * only for simple mutex.
1286 */
1287static int
1288do_wake_umutex(struct thread *td, struct umutex *m)
1289{
1290	struct umtx_key key;
1291	uint32_t owner;
1292	uint32_t flags;
1293	int error;
1294	int count;
1295
1296	owner = fuword32(__DEVOLATILE(uint32_t *, &m->m_owner));
1297	if (owner == -1)
1298		return (EFAULT);
1299
1300	if ((owner & ~UMUTEX_CONTESTED) != 0)
1301		return (0);
1302
1303	flags = fuword32(&m->m_flags);
1304
1305	/* We should only ever be in here for contested locks */
1306	if ((error = umtx_key_get(m, TYPE_NORMAL_UMUTEX, GET_SHARE(flags),
1307	    &key)) != 0)
1308		return (error);
1309
1310	umtxq_lock(&key);
1311	umtxq_busy(&key);
1312	count = umtxq_count(&key);
1313	umtxq_unlock(&key);
1314
1315	if (count <= 1)
1316		owner = casuword32(&m->m_owner, UMUTEX_CONTESTED, UMUTEX_UNOWNED);
1317
1318	umtxq_lock(&key);
1319	if (count != 0 && (owner & ~UMUTEX_CONTESTED) == 0)
1320		umtxq_signal(&key, 1);
1321	umtxq_unbusy(&key);
1322	umtxq_unlock(&key);
1323	umtx_key_release(&key);
1324	return (0);
1325}
1326
1327static inline struct umtx_pi *
1328umtx_pi_alloc(int flags)
1329{
1330	struct umtx_pi *pi;
1331
1332	pi = uma_zalloc(umtx_pi_zone, M_ZERO | flags);
1333	TAILQ_INIT(&pi->pi_blocked);
1334	atomic_add_int(&umtx_pi_allocated, 1);
1335	return (pi);
1336}
1337
1338static inline void
1339umtx_pi_free(struct umtx_pi *pi)
1340{
1341	uma_zfree(umtx_pi_zone, pi);
1342	atomic_add_int(&umtx_pi_allocated, -1);
1343}
1344
1345/*
1346 * Adjust the thread's position on a pi_state after its priority has been
1347 * changed.
1348 */
1349static int
1350umtx_pi_adjust_thread(struct umtx_pi *pi, struct thread *td)
1351{
1352	struct umtx_q *uq, *uq1, *uq2;
1353	struct thread *td1;
1354
1355	mtx_assert(&umtx_lock, MA_OWNED);
1356	if (pi == NULL)
1357		return (0);
1358
1359	uq = td->td_umtxq;
1360
1361	/*
1362	 * Check if the thread needs to be moved on the blocked chain.
1363	 * It needs to be moved if either its priority is lower than
1364	 * the previous thread or higher than the next thread.
1365	 */
1366	uq1 = TAILQ_PREV(uq, umtxq_head, uq_lockq);
1367	uq2 = TAILQ_NEXT(uq, uq_lockq);
1368	if ((uq1 != NULL && UPRI(td) < UPRI(uq1->uq_thread)) ||
1369	    (uq2 != NULL && UPRI(td) > UPRI(uq2->uq_thread))) {
1370		/*
1371		 * Remove thread from blocked chain and determine where
1372		 * it should be moved to.
1373		 */
1374		TAILQ_REMOVE(&pi->pi_blocked, uq, uq_lockq);
1375		TAILQ_FOREACH(uq1, &pi->pi_blocked, uq_lockq) {
1376			td1 = uq1->uq_thread;
1377			MPASS(td1->td_proc->p_magic == P_MAGIC);
1378			if (UPRI(td1) > UPRI(td))
1379				break;
1380		}
1381
1382		if (uq1 == NULL)
1383			TAILQ_INSERT_TAIL(&pi->pi_blocked, uq, uq_lockq);
1384		else
1385			TAILQ_INSERT_BEFORE(uq1, uq, uq_lockq);
1386	}
1387	return (1);
1388}
1389
1390/*
1391 * Propagate priority when a thread is blocked on POSIX
1392 * PI mutex.
1393 */
1394static void
1395umtx_propagate_priority(struct thread *td)
1396{
1397	struct umtx_q *uq;
1398	struct umtx_pi *pi;
1399	int pri;
1400
1401	mtx_assert(&umtx_lock, MA_OWNED);
1402	pri = UPRI(td);
1403	uq = td->td_umtxq;
1404	pi = uq->uq_pi_blocked;
1405	if (pi == NULL)
1406		return;
1407
1408	for (;;) {
1409		td = pi->pi_owner;
1410		if (td == NULL)
1411			return;
1412
1413		MPASS(td->td_proc != NULL);
1414		MPASS(td->td_proc->p_magic == P_MAGIC);
1415
1416		if (UPRI(td) <= pri)
1417			return;
1418
1419		thread_lock(td);
1420		sched_lend_user_prio(td, pri);
1421		thread_unlock(td);
1422
1423		/*
1424		 * Pick up the lock that td is blocked on.
1425		 */
1426		uq = td->td_umtxq;
1427		pi = uq->uq_pi_blocked;
1428		/* Resort td on the list if needed. */
1429		if (!umtx_pi_adjust_thread(pi, td))
1430			break;
1431	}
1432}
1433
1434/*
1435 * Unpropagate priority for a PI mutex when a thread blocked on
1436 * it is interrupted by signal or resumed by others.
1437 */
1438static void
1439umtx_unpropagate_priority(struct umtx_pi *pi)
1440{
1441	struct umtx_q *uq, *uq_owner;
1442	struct umtx_pi *pi2;
1443	int pri, oldpri;
1444
1445	mtx_assert(&umtx_lock, MA_OWNED);
1446
1447	while (pi != NULL && pi->pi_owner != NULL) {
1448		pri = PRI_MAX;
1449		uq_owner = pi->pi_owner->td_umtxq;
1450
1451		TAILQ_FOREACH(pi2, &uq_owner->uq_pi_contested, pi_link) {
1452			uq = TAILQ_FIRST(&pi2->pi_blocked);
1453			if (uq != NULL) {
1454				if (pri > UPRI(uq->uq_thread))
1455					pri = UPRI(uq->uq_thread);
1456			}
1457		}
1458
1459		if (pri > uq_owner->uq_inherited_pri)
1460			pri = uq_owner->uq_inherited_pri;
1461		thread_lock(pi->pi_owner);
1462		oldpri = pi->pi_owner->td_user_pri;
1463		sched_unlend_user_prio(pi->pi_owner, pri);
1464		thread_unlock(pi->pi_owner);
1465		if (uq_owner->uq_pi_blocked != NULL)
1466			umtx_pi_adjust_locked(pi->pi_owner, oldpri);
1467		pi = uq_owner->uq_pi_blocked;
1468	}
1469}
1470
1471/*
1472 * Insert a PI mutex into owned list.
1473 */
1474static void
1475umtx_pi_setowner(struct umtx_pi *pi, struct thread *owner)
1476{
1477	struct umtx_q *uq_owner;
1478
1479	uq_owner = owner->td_umtxq;
1480	mtx_assert(&umtx_lock, MA_OWNED);
1481	if (pi->pi_owner != NULL)
1482		panic("pi_ower != NULL");
1483	pi->pi_owner = owner;
1484	TAILQ_INSERT_TAIL(&uq_owner->uq_pi_contested, pi, pi_link);
1485}
1486
1487/*
1488 * Claim ownership of a PI mutex.
1489 */
1490static int
1491umtx_pi_claim(struct umtx_pi *pi, struct thread *owner)
1492{
1493	struct umtx_q *uq, *uq_owner;
1494
1495	uq_owner = owner->td_umtxq;
1496	mtx_lock_spin(&umtx_lock);
1497	if (pi->pi_owner == owner) {
1498		mtx_unlock_spin(&umtx_lock);
1499		return (0);
1500	}
1501
1502	if (pi->pi_owner != NULL) {
1503		/*
1504		 * userland may have already messed the mutex, sigh.
1505		 */
1506		mtx_unlock_spin(&umtx_lock);
1507		return (EPERM);
1508	}
1509	umtx_pi_setowner(pi, owner);
1510	uq = TAILQ_FIRST(&pi->pi_blocked);
1511	if (uq != NULL) {
1512		int pri;
1513
1514		pri = UPRI(uq->uq_thread);
1515		thread_lock(owner);
1516		if (pri < UPRI(owner))
1517			sched_lend_user_prio(owner, pri);
1518		thread_unlock(owner);
1519	}
1520	mtx_unlock_spin(&umtx_lock);
1521	return (0);
1522}
1523
1524static void
1525umtx_pi_adjust_locked(struct thread *td, u_char oldpri)
1526{
1527	struct umtx_q *uq;
1528	struct umtx_pi *pi;
1529
1530	uq = td->td_umtxq;
1531	/*
1532	 * Pick up the lock that td is blocked on.
1533	 */
1534	pi = uq->uq_pi_blocked;
1535	MPASS(pi != NULL);
1536
1537	/* Resort the turnstile on the list. */
1538	if (!umtx_pi_adjust_thread(pi, td))
1539		return;
1540
1541	/*
1542	 * If our priority was lowered and we are at the head of the
1543	 * turnstile, then propagate our new priority up the chain.
1544	 */
1545	if (uq == TAILQ_FIRST(&pi->pi_blocked) && UPRI(td) < oldpri)
1546		umtx_propagate_priority(td);
1547}
1548
1549/*
1550 * Adjust a thread's order position in its blocked PI mutex,
1551 * this may result new priority propagating process.
1552 */
1553void
1554umtx_pi_adjust(struct thread *td, u_char oldpri)
1555{
1556	struct umtx_q *uq;
1557	struct umtx_pi *pi;
1558
1559	uq = td->td_umtxq;
1560	mtx_lock_spin(&umtx_lock);
1561	/*
1562	 * Pick up the lock that td is blocked on.
1563	 */
1564	pi = uq->uq_pi_blocked;
1565	if (pi != NULL)
1566		umtx_pi_adjust_locked(td, oldpri);
1567	mtx_unlock_spin(&umtx_lock);
1568}
1569
1570/*
1571 * Sleep on a PI mutex.
1572 */
1573static int
1574umtxq_sleep_pi(struct umtx_q *uq, struct umtx_pi *pi,
1575	uint32_t owner, const char *wmesg, int timo)
1576{
1577	struct umtxq_chain *uc;
1578	struct thread *td, *td1;
1579	struct umtx_q *uq1;
1580	int pri;
1581	int error = 0;
1582
1583	td = uq->uq_thread;
1584	KASSERT(td == curthread, ("inconsistent uq_thread"));
1585	uc = umtxq_getchain(&uq->uq_key);
1586	UMTXQ_LOCKED_ASSERT(uc);
1587	UMTXQ_BUSY_ASSERT(uc);
1588	umtxq_insert(uq);
1589	mtx_lock_spin(&umtx_lock);
1590	if (pi->pi_owner == NULL) {
1591		mtx_unlock_spin(&umtx_lock);
1592		/* XXX Only look up thread in current process. */
1593		td1 = tdfind(owner, curproc->p_pid);
1594		mtx_lock_spin(&umtx_lock);
1595		if (td1 != NULL) {
1596			if (pi->pi_owner == NULL)
1597				umtx_pi_setowner(pi, td1);
1598			PROC_UNLOCK(td1->td_proc);
1599		}
1600	}
1601
1602	TAILQ_FOREACH(uq1, &pi->pi_blocked, uq_lockq) {
1603		pri = UPRI(uq1->uq_thread);
1604		if (pri > UPRI(td))
1605			break;
1606	}
1607
1608	if (uq1 != NULL)
1609		TAILQ_INSERT_BEFORE(uq1, uq, uq_lockq);
1610	else
1611		TAILQ_INSERT_TAIL(&pi->pi_blocked, uq, uq_lockq);
1612
1613	uq->uq_pi_blocked = pi;
1614	thread_lock(td);
1615	td->td_flags |= TDF_UPIBLOCKED;
1616	thread_unlock(td);
1617	umtx_propagate_priority(td);
1618	mtx_unlock_spin(&umtx_lock);
1619	umtxq_unbusy(&uq->uq_key);
1620
1621	if (uq->uq_flags & UQF_UMTXQ) {
1622		error = msleep(uq, &uc->uc_lock, PCATCH, wmesg, timo);
1623		if (error == EWOULDBLOCK)
1624			error = ETIMEDOUT;
1625		if (uq->uq_flags & UQF_UMTXQ) {
1626			umtxq_remove(uq);
1627		}
1628	}
1629	mtx_lock_spin(&umtx_lock);
1630	uq->uq_pi_blocked = NULL;
1631	thread_lock(td);
1632	td->td_flags &= ~TDF_UPIBLOCKED;
1633	thread_unlock(td);
1634	TAILQ_REMOVE(&pi->pi_blocked, uq, uq_lockq);
1635	umtx_unpropagate_priority(pi);
1636	mtx_unlock_spin(&umtx_lock);
1637	umtxq_unlock(&uq->uq_key);
1638
1639	return (error);
1640}
1641
1642/*
1643 * Add reference count for a PI mutex.
1644 */
1645static void
1646umtx_pi_ref(struct umtx_pi *pi)
1647{
1648	struct umtxq_chain *uc;
1649
1650	uc = umtxq_getchain(&pi->pi_key);
1651	UMTXQ_LOCKED_ASSERT(uc);
1652	pi->pi_refcount++;
1653}
1654
1655/*
1656 * Decrease reference count for a PI mutex, if the counter
1657 * is decreased to zero, its memory space is freed.
1658 */
1659static void
1660umtx_pi_unref(struct umtx_pi *pi)
1661{
1662	struct umtxq_chain *uc;
1663
1664	uc = umtxq_getchain(&pi->pi_key);
1665	UMTXQ_LOCKED_ASSERT(uc);
1666	KASSERT(pi->pi_refcount > 0, ("invalid reference count"));
1667	if (--pi->pi_refcount == 0) {
1668		mtx_lock_spin(&umtx_lock);
1669		if (pi->pi_owner != NULL) {
1670			TAILQ_REMOVE(&pi->pi_owner->td_umtxq->uq_pi_contested,
1671				pi, pi_link);
1672			pi->pi_owner = NULL;
1673		}
1674		KASSERT(TAILQ_EMPTY(&pi->pi_blocked),
1675			("blocked queue not empty"));
1676		mtx_unlock_spin(&umtx_lock);
1677		TAILQ_REMOVE(&uc->uc_pi_list, pi, pi_hashlink);
1678		umtx_pi_free(pi);
1679	}
1680}
1681
1682/*
1683 * Find a PI mutex in hash table.
1684 */
1685static struct umtx_pi *
1686umtx_pi_lookup(struct umtx_key *key)
1687{
1688	struct umtxq_chain *uc;
1689	struct umtx_pi *pi;
1690
1691	uc = umtxq_getchain(key);
1692	UMTXQ_LOCKED_ASSERT(uc);
1693
1694	TAILQ_FOREACH(pi, &uc->uc_pi_list, pi_hashlink) {
1695		if (umtx_key_match(&pi->pi_key, key)) {
1696			return (pi);
1697		}
1698	}
1699	return (NULL);
1700}
1701
1702/*
1703 * Insert a PI mutex into hash table.
1704 */
1705static inline void
1706umtx_pi_insert(struct umtx_pi *pi)
1707{
1708	struct umtxq_chain *uc;
1709
1710	uc = umtxq_getchain(&pi->pi_key);
1711	UMTXQ_LOCKED_ASSERT(uc);
1712	TAILQ_INSERT_TAIL(&uc->uc_pi_list, pi, pi_hashlink);
1713}
1714
1715/*
1716 * Lock a PI mutex.
1717 */
1718static int
1719_do_lock_pi(struct thread *td, struct umutex *m, uint32_t flags, int timo,
1720	int try)
1721{
1722	struct umtx_q *uq;
1723	struct umtx_pi *pi, *new_pi;
1724	uint32_t id, owner, old;
1725	int error;
1726
1727	id = td->td_tid;
1728	uq = td->td_umtxq;
1729
1730	if ((error = umtx_key_get(m, TYPE_PI_UMUTEX, GET_SHARE(flags),
1731	    &uq->uq_key)) != 0)
1732		return (error);
1733	umtxq_lock(&uq->uq_key);
1734	pi = umtx_pi_lookup(&uq->uq_key);
1735	if (pi == NULL) {
1736		new_pi = umtx_pi_alloc(M_NOWAIT);
1737		if (new_pi == NULL) {
1738			umtxq_unlock(&uq->uq_key);
1739			new_pi = umtx_pi_alloc(M_WAITOK);
1740			umtxq_lock(&uq->uq_key);
1741			pi = umtx_pi_lookup(&uq->uq_key);
1742			if (pi != NULL) {
1743				umtx_pi_free(new_pi);
1744				new_pi = NULL;
1745			}
1746		}
1747		if (new_pi != NULL) {
1748			new_pi->pi_key = uq->uq_key;
1749			umtx_pi_insert(new_pi);
1750			pi = new_pi;
1751		}
1752	}
1753	umtx_pi_ref(pi);
1754	umtxq_unlock(&uq->uq_key);
1755
1756	/*
1757	 * Care must be exercised when dealing with umtx structure.  It
1758	 * can fault on any access.
1759	 */
1760	for (;;) {
1761		/*
1762		 * Try the uncontested case.  This should be done in userland.
1763		 */
1764		owner = casuword32(&m->m_owner, UMUTEX_UNOWNED, id);
1765
1766		/* The acquire succeeded. */
1767		if (owner == UMUTEX_UNOWNED) {
1768			error = 0;
1769			break;
1770		}
1771
1772		/* The address was invalid. */
1773		if (owner == -1) {
1774			error = EFAULT;
1775			break;
1776		}
1777
1778		/* If no one owns it but it is contested try to acquire it. */
1779		if (owner == UMUTEX_CONTESTED) {
1780			owner = casuword32(&m->m_owner,
1781			    UMUTEX_CONTESTED, id | UMUTEX_CONTESTED);
1782
1783			if (owner == UMUTEX_CONTESTED) {
1784				umtxq_lock(&uq->uq_key);
1785				umtxq_busy(&uq->uq_key);
1786				error = umtx_pi_claim(pi, td);
1787				umtxq_unbusy(&uq->uq_key);
1788				umtxq_unlock(&uq->uq_key);
1789				break;
1790			}
1791
1792			/* The address was invalid. */
1793			if (owner == -1) {
1794				error = EFAULT;
1795				break;
1796			}
1797
1798			/* If this failed the lock has changed, restart. */
1799			continue;
1800		}
1801
1802		if ((flags & UMUTEX_ERROR_CHECK) != 0 &&
1803		    (owner & ~UMUTEX_CONTESTED) == id) {
1804			error = EDEADLK;
1805			break;
1806		}
1807
1808		if (try != 0) {
1809			error = EBUSY;
1810			break;
1811		}
1812
1813		/*
1814		 * If we caught a signal, we have retried and now
1815		 * exit immediately.
1816		 */
1817		if (error != 0)
1818			break;
1819
1820		umtxq_lock(&uq->uq_key);
1821		umtxq_busy(&uq->uq_key);
1822		umtxq_unlock(&uq->uq_key);
1823
1824		/*
1825		 * Set the contested bit so that a release in user space
1826		 * knows to use the system call for unlock.  If this fails
1827		 * either some one else has acquired the lock or it has been
1828		 * released.
1829		 */
1830		old = casuword32(&m->m_owner, owner, owner | UMUTEX_CONTESTED);
1831
1832		/* The address was invalid. */
1833		if (old == -1) {
1834			umtxq_lock(&uq->uq_key);
1835			umtxq_unbusy(&uq->uq_key);
1836			umtxq_unlock(&uq->uq_key);
1837			error = EFAULT;
1838			break;
1839		}
1840
1841		umtxq_lock(&uq->uq_key);
1842		/*
1843		 * We set the contested bit, sleep. Otherwise the lock changed
1844		 * and we need to retry or we lost a race to the thread
1845		 * unlocking the umtx.
1846		 */
1847		if (old == owner)
1848			error = umtxq_sleep_pi(uq, pi, owner & ~UMUTEX_CONTESTED,
1849				 "umtxpi", timo);
1850		else {
1851			umtxq_unbusy(&uq->uq_key);
1852			umtxq_unlock(&uq->uq_key);
1853		}
1854	}
1855
1856	umtxq_lock(&uq->uq_key);
1857	umtx_pi_unref(pi);
1858	umtxq_unlock(&uq->uq_key);
1859
1860	umtx_key_release(&uq->uq_key);
1861	return (error);
1862}
1863
1864/*
1865 * Unlock a PI mutex.
1866 */
1867static int
1868do_unlock_pi(struct thread *td, struct umutex *m, uint32_t flags)
1869{
1870	struct umtx_key key;
1871	struct umtx_q *uq_first, *uq_first2, *uq_me;
1872	struct umtx_pi *pi, *pi2;
1873	uint32_t owner, old, id;
1874	int error;
1875	int count;
1876	int pri;
1877
1878	id = td->td_tid;
1879	/*
1880	 * Make sure we own this mtx.
1881	 */
1882	owner = fuword32(__DEVOLATILE(uint32_t *, &m->m_owner));
1883	if (owner == -1)
1884		return (EFAULT);
1885
1886	if ((owner & ~UMUTEX_CONTESTED) != id)
1887		return (EPERM);
1888
1889	/* This should be done in userland */
1890	if ((owner & UMUTEX_CONTESTED) == 0) {
1891		old = casuword32(&m->m_owner, owner, UMUTEX_UNOWNED);
1892		if (old == -1)
1893			return (EFAULT);
1894		if (old == owner)
1895			return (0);
1896		owner = old;
1897	}
1898
1899	/* We should only ever be in here for contested locks */
1900	if ((error = umtx_key_get(m, TYPE_PI_UMUTEX, GET_SHARE(flags),
1901	    &key)) != 0)
1902		return (error);
1903
1904	umtxq_lock(&key);
1905	umtxq_busy(&key);
1906	count = umtxq_count_pi(&key, &uq_first);
1907	if (uq_first != NULL) {
1908		mtx_lock_spin(&umtx_lock);
1909		pi = uq_first->uq_pi_blocked;
1910		KASSERT(pi != NULL, ("pi == NULL?"));
1911		if (pi->pi_owner != curthread) {
1912			mtx_unlock_spin(&umtx_lock);
1913			umtxq_unbusy(&key);
1914			umtxq_unlock(&key);
1915			umtx_key_release(&key);
1916			/* userland messed the mutex */
1917			return (EPERM);
1918		}
1919		uq_me = curthread->td_umtxq;
1920		pi->pi_owner = NULL;
1921		TAILQ_REMOVE(&uq_me->uq_pi_contested, pi, pi_link);
1922		/* get highest priority thread which is still sleeping. */
1923		uq_first = TAILQ_FIRST(&pi->pi_blocked);
1924		while (uq_first != NULL &&
1925		       (uq_first->uq_flags & UQF_UMTXQ) == 0) {
1926			uq_first = TAILQ_NEXT(uq_first, uq_lockq);
1927		}
1928		pri = PRI_MAX;
1929		TAILQ_FOREACH(pi2, &uq_me->uq_pi_contested, pi_link) {
1930			uq_first2 = TAILQ_FIRST(&pi2->pi_blocked);
1931			if (uq_first2 != NULL) {
1932				if (pri > UPRI(uq_first2->uq_thread))
1933					pri = UPRI(uq_first2->uq_thread);
1934			}
1935		}
1936		thread_lock(curthread);
1937		sched_unlend_user_prio(curthread, pri);
1938		thread_unlock(curthread);
1939		mtx_unlock_spin(&umtx_lock);
1940		if (uq_first)
1941			umtxq_signal_thread(uq_first);
1942	}
1943	umtxq_unlock(&key);
1944
1945	/*
1946	 * When unlocking the umtx, it must be marked as unowned if
1947	 * there is zero or one thread only waiting for it.
1948	 * Otherwise, it must be marked as contested.
1949	 */
1950	old = casuword32(&m->m_owner, owner,
1951		count <= 1 ? UMUTEX_UNOWNED : UMUTEX_CONTESTED);
1952
1953	umtxq_lock(&key);
1954	umtxq_unbusy(&key);
1955	umtxq_unlock(&key);
1956	umtx_key_release(&key);
1957	if (old == -1)
1958		return (EFAULT);
1959	if (old != owner)
1960		return (EINVAL);
1961	return (0);
1962}
1963
1964/*
1965 * Lock a PP mutex.
1966 */
1967static int
1968_do_lock_pp(struct thread *td, struct umutex *m, uint32_t flags, int timo,
1969	int try)
1970{
1971	struct umtx_q *uq, *uq2;
1972	struct umtx_pi *pi;
1973	uint32_t ceiling;
1974	uint32_t owner, id;
1975	int error, pri, old_inherited_pri, su;
1976
1977	id = td->td_tid;
1978	uq = td->td_umtxq;
1979	if ((error = umtx_key_get(m, TYPE_PP_UMUTEX, GET_SHARE(flags),
1980	    &uq->uq_key)) != 0)
1981		return (error);
1982	su = (priv_check(td, PRIV_SCHED_RTPRIO) == 0);
1983	for (;;) {
1984		old_inherited_pri = uq->uq_inherited_pri;
1985		umtxq_lock(&uq->uq_key);
1986		umtxq_busy(&uq->uq_key);
1987		umtxq_unlock(&uq->uq_key);
1988
1989		ceiling = RTP_PRIO_MAX - fuword32(&m->m_ceilings[0]);
1990		if (ceiling > RTP_PRIO_MAX) {
1991			error = EINVAL;
1992			goto out;
1993		}
1994
1995		mtx_lock_spin(&umtx_lock);
1996		if (UPRI(td) < PRI_MIN_REALTIME + ceiling) {
1997			mtx_unlock_spin(&umtx_lock);
1998			error = EINVAL;
1999			goto out;
2000		}
2001		if (su && PRI_MIN_REALTIME + ceiling < uq->uq_inherited_pri) {
2002			uq->uq_inherited_pri = PRI_MIN_REALTIME + ceiling;
2003			thread_lock(td);
2004			if (uq->uq_inherited_pri < UPRI(td))
2005				sched_lend_user_prio(td, uq->uq_inherited_pri);
2006			thread_unlock(td);
2007		}
2008		mtx_unlock_spin(&umtx_lock);
2009
2010		owner = casuword32(&m->m_owner,
2011		    UMUTEX_CONTESTED, id | UMUTEX_CONTESTED);
2012
2013		if (owner == UMUTEX_CONTESTED) {
2014			error = 0;
2015			break;
2016		}
2017
2018		/* The address was invalid. */
2019		if (owner == -1) {
2020			error = EFAULT;
2021			break;
2022		}
2023
2024		if ((flags & UMUTEX_ERROR_CHECK) != 0 &&
2025		    (owner & ~UMUTEX_CONTESTED) == id) {
2026			error = EDEADLK;
2027			break;
2028		}
2029
2030		if (try != 0) {
2031			error = EBUSY;
2032			break;
2033		}
2034
2035		/*
2036		 * If we caught a signal, we have retried and now
2037		 * exit immediately.
2038		 */
2039		if (error != 0)
2040			break;
2041
2042		umtxq_lock(&uq->uq_key);
2043		umtxq_insert(uq);
2044		umtxq_unbusy(&uq->uq_key);
2045		error = umtxq_sleep(uq, "umtxpp", timo);
2046		umtxq_remove(uq);
2047		umtxq_unlock(&uq->uq_key);
2048
2049		mtx_lock_spin(&umtx_lock);
2050		uq->uq_inherited_pri = old_inherited_pri;
2051		pri = PRI_MAX;
2052		TAILQ_FOREACH(pi, &uq->uq_pi_contested, pi_link) {
2053			uq2 = TAILQ_FIRST(&pi->pi_blocked);
2054			if (uq2 != NULL) {
2055				if (pri > UPRI(uq2->uq_thread))
2056					pri = UPRI(uq2->uq_thread);
2057			}
2058		}
2059		if (pri > uq->uq_inherited_pri)
2060			pri = uq->uq_inherited_pri;
2061		thread_lock(td);
2062		sched_unlend_user_prio(td, pri);
2063		thread_unlock(td);
2064		mtx_unlock_spin(&umtx_lock);
2065	}
2066
2067	if (error != 0) {
2068		mtx_lock_spin(&umtx_lock);
2069		uq->uq_inherited_pri = old_inherited_pri;
2070		pri = PRI_MAX;
2071		TAILQ_FOREACH(pi, &uq->uq_pi_contested, pi_link) {
2072			uq2 = TAILQ_FIRST(&pi->pi_blocked);
2073			if (uq2 != NULL) {
2074				if (pri > UPRI(uq2->uq_thread))
2075					pri = UPRI(uq2->uq_thread);
2076			}
2077		}
2078		if (pri > uq->uq_inherited_pri)
2079			pri = uq->uq_inherited_pri;
2080		thread_lock(td);
2081		sched_unlend_user_prio(td, pri);
2082		thread_unlock(td);
2083		mtx_unlock_spin(&umtx_lock);
2084	}
2085
2086out:
2087	umtxq_lock(&uq->uq_key);
2088	umtxq_unbusy(&uq->uq_key);
2089	umtxq_unlock(&uq->uq_key);
2090	umtx_key_release(&uq->uq_key);
2091	return (error);
2092}
2093
2094/*
2095 * Unlock a PP mutex.
2096 */
2097static int
2098do_unlock_pp(struct thread *td, struct umutex *m, uint32_t flags)
2099{
2100	struct umtx_key key;
2101	struct umtx_q *uq, *uq2;
2102	struct umtx_pi *pi;
2103	uint32_t owner, id;
2104	uint32_t rceiling;
2105	int error, pri, new_inherited_pri, su;
2106
2107	id = td->td_tid;
2108	uq = td->td_umtxq;
2109	su = (priv_check(td, PRIV_SCHED_RTPRIO) == 0);
2110
2111	/*
2112	 * Make sure we own this mtx.
2113	 */
2114	owner = fuword32(__DEVOLATILE(uint32_t *, &m->m_owner));
2115	if (owner == -1)
2116		return (EFAULT);
2117
2118	if ((owner & ~UMUTEX_CONTESTED) != id)
2119		return (EPERM);
2120
2121	error = copyin(&m->m_ceilings[1], &rceiling, sizeof(uint32_t));
2122	if (error != 0)
2123		return (error);
2124
2125	if (rceiling == -1)
2126		new_inherited_pri = PRI_MAX;
2127	else {
2128		rceiling = RTP_PRIO_MAX - rceiling;
2129		if (rceiling > RTP_PRIO_MAX)
2130			return (EINVAL);
2131		new_inherited_pri = PRI_MIN_REALTIME + rceiling;
2132	}
2133
2134	if ((error = umtx_key_get(m, TYPE_PP_UMUTEX, GET_SHARE(flags),
2135	    &key)) != 0)
2136		return (error);
2137	umtxq_lock(&key);
2138	umtxq_busy(&key);
2139	umtxq_unlock(&key);
2140	/*
2141	 * For priority protected mutex, always set unlocked state
2142	 * to UMUTEX_CONTESTED, so that userland always enters kernel
2143	 * to lock the mutex, it is necessary because thread priority
2144	 * has to be adjusted for such mutex.
2145	 */
2146	error = suword32(__DEVOLATILE(uint32_t *, &m->m_owner),
2147		UMUTEX_CONTESTED);
2148
2149	umtxq_lock(&key);
2150	if (error == 0)
2151		umtxq_signal(&key, 1);
2152	umtxq_unbusy(&key);
2153	umtxq_unlock(&key);
2154
2155	if (error == -1)
2156		error = EFAULT;
2157	else {
2158		mtx_lock_spin(&umtx_lock);
2159		if (su != 0)
2160			uq->uq_inherited_pri = new_inherited_pri;
2161		pri = PRI_MAX;
2162		TAILQ_FOREACH(pi, &uq->uq_pi_contested, pi_link) {
2163			uq2 = TAILQ_FIRST(&pi->pi_blocked);
2164			if (uq2 != NULL) {
2165				if (pri > UPRI(uq2->uq_thread))
2166					pri = UPRI(uq2->uq_thread);
2167			}
2168		}
2169		if (pri > uq->uq_inherited_pri)
2170			pri = uq->uq_inherited_pri;
2171		thread_lock(td);
2172		sched_unlend_user_prio(td, pri);
2173		thread_unlock(td);
2174		mtx_unlock_spin(&umtx_lock);
2175	}
2176	umtx_key_release(&key);
2177	return (error);
2178}
2179
2180static int
2181do_set_ceiling(struct thread *td, struct umutex *m, uint32_t ceiling,
2182	uint32_t *old_ceiling)
2183{
2184	struct umtx_q *uq;
2185	uint32_t save_ceiling;
2186	uint32_t owner, id;
2187	uint32_t flags;
2188	int error;
2189
2190	flags = fuword32(&m->m_flags);
2191	if ((flags & UMUTEX_PRIO_PROTECT) == 0)
2192		return (EINVAL);
2193	if (ceiling > RTP_PRIO_MAX)
2194		return (EINVAL);
2195	id = td->td_tid;
2196	uq = td->td_umtxq;
2197	if ((error = umtx_key_get(m, TYPE_PP_UMUTEX, GET_SHARE(flags),
2198	   &uq->uq_key)) != 0)
2199		return (error);
2200	for (;;) {
2201		umtxq_lock(&uq->uq_key);
2202		umtxq_busy(&uq->uq_key);
2203		umtxq_unlock(&uq->uq_key);
2204
2205		save_ceiling = fuword32(&m->m_ceilings[0]);
2206
2207		owner = casuword32(&m->m_owner,
2208		    UMUTEX_CONTESTED, id | UMUTEX_CONTESTED);
2209
2210		if (owner == UMUTEX_CONTESTED) {
2211			suword32(&m->m_ceilings[0], ceiling);
2212			suword32(__DEVOLATILE(uint32_t *, &m->m_owner),
2213				UMUTEX_CONTESTED);
2214			error = 0;
2215			break;
2216		}
2217
2218		/* The address was invalid. */
2219		if (owner == -1) {
2220			error = EFAULT;
2221			break;
2222		}
2223
2224		if ((owner & ~UMUTEX_CONTESTED) == id) {
2225			suword32(&m->m_ceilings[0], ceiling);
2226			error = 0;
2227			break;
2228		}
2229
2230		/*
2231		 * If we caught a signal, we have retried and now
2232		 * exit immediately.
2233		 */
2234		if (error != 0)
2235			break;
2236
2237		/*
2238		 * We set the contested bit, sleep. Otherwise the lock changed
2239		 * and we need to retry or we lost a race to the thread
2240		 * unlocking the umtx.
2241		 */
2242		umtxq_lock(&uq->uq_key);
2243		umtxq_insert(uq);
2244		umtxq_unbusy(&uq->uq_key);
2245		error = umtxq_sleep(uq, "umtxpp", 0);
2246		umtxq_remove(uq);
2247		umtxq_unlock(&uq->uq_key);
2248	}
2249	umtxq_lock(&uq->uq_key);
2250	if (error == 0)
2251		umtxq_signal(&uq->uq_key, INT_MAX);
2252	umtxq_unbusy(&uq->uq_key);
2253	umtxq_unlock(&uq->uq_key);
2254	umtx_key_release(&uq->uq_key);
2255	if (error == 0 && old_ceiling != NULL)
2256		suword32(old_ceiling, save_ceiling);
2257	return (error);
2258}
2259
2260static int
2261_do_lock_umutex(struct thread *td, struct umutex *m, int flags, int timo,
2262	int mode)
2263{
2264	switch(flags & (UMUTEX_PRIO_INHERIT | UMUTEX_PRIO_PROTECT)) {
2265	case 0:
2266		return (_do_lock_normal(td, m, flags, timo, mode));
2267	case UMUTEX_PRIO_INHERIT:
2268		return (_do_lock_pi(td, m, flags, timo, mode));
2269	case UMUTEX_PRIO_PROTECT:
2270		return (_do_lock_pp(td, m, flags, timo, mode));
2271	}
2272	return (EINVAL);
2273}
2274
2275/*
2276 * Lock a userland POSIX mutex.
2277 */
2278static int
2279do_lock_umutex(struct thread *td, struct umutex *m,
2280	struct timespec *timeout, int mode)
2281{
2282	struct timespec ts, ts2, ts3;
2283	struct timeval tv;
2284	uint32_t flags;
2285	int error;
2286
2287	flags = fuword32(&m->m_flags);
2288	if (flags == -1)
2289		return (EFAULT);
2290
2291	if (timeout == NULL) {
2292		error = _do_lock_umutex(td, m, flags, 0, mode);
2293		/* Mutex locking is restarted if it is interrupted. */
2294		if (error == EINTR && mode != _UMUTEX_WAIT)
2295			error = ERESTART;
2296	} else {
2297		getnanouptime(&ts);
2298		timespecadd(&ts, timeout);
2299		TIMESPEC_TO_TIMEVAL(&tv, timeout);
2300		for (;;) {
2301			error = _do_lock_umutex(td, m, flags, tvtohz(&tv), mode);
2302			if (error != ETIMEDOUT)
2303				break;
2304			getnanouptime(&ts2);
2305			if (timespeccmp(&ts2, &ts, >=)) {
2306				error = ETIMEDOUT;
2307				break;
2308			}
2309			ts3 = ts;
2310			timespecsub(&ts3, &ts2);
2311			TIMESPEC_TO_TIMEVAL(&tv, &ts3);
2312		}
2313		/* Timed-locking is not restarted. */
2314		if (error == ERESTART)
2315			error = EINTR;
2316	}
2317	return (error);
2318}
2319
2320/*
2321 * Unlock a userland POSIX mutex.
2322 */
2323static int
2324do_unlock_umutex(struct thread *td, struct umutex *m)
2325{
2326	uint32_t flags;
2327
2328	flags = fuword32(&m->m_flags);
2329	if (flags == -1)
2330		return (EFAULT);
2331
2332	switch(flags & (UMUTEX_PRIO_INHERIT | UMUTEX_PRIO_PROTECT)) {
2333	case 0:
2334		return (do_unlock_normal(td, m, flags));
2335	case UMUTEX_PRIO_INHERIT:
2336		return (do_unlock_pi(td, m, flags));
2337	case UMUTEX_PRIO_PROTECT:
2338		return (do_unlock_pp(td, m, flags));
2339	}
2340
2341	return (EINVAL);
2342}
2343
2344static int
2345do_cv_wait(struct thread *td, struct ucond *cv, struct umutex *m,
2346	struct timespec *timeout, u_long wflags)
2347{
2348	struct umtx_q *uq;
2349	struct timeval tv;
2350	struct timespec cts, ets, tts;
2351	uint32_t flags;
2352	int error;
2353
2354	uq = td->td_umtxq;
2355	flags = fuword32(&cv->c_flags);
2356	error = umtx_key_get(cv, TYPE_CV, GET_SHARE(flags), &uq->uq_key);
2357	if (error != 0)
2358		return (error);
2359	umtxq_lock(&uq->uq_key);
2360	umtxq_busy(&uq->uq_key);
2361	umtxq_insert(uq);
2362	umtxq_unlock(&uq->uq_key);
2363
2364	/*
2365	 * The magic thing is we should set c_has_waiters to 1 before
2366	 * releasing user mutex.
2367	 */
2368	suword32(__DEVOLATILE(uint32_t *, &cv->c_has_waiters), 1);
2369
2370	umtxq_lock(&uq->uq_key);
2371	umtxq_unbusy(&uq->uq_key);
2372	umtxq_unlock(&uq->uq_key);
2373
2374	error = do_unlock_umutex(td, m);
2375
2376	umtxq_lock(&uq->uq_key);
2377	if (error == 0) {
2378		if ((wflags & UMTX_CHECK_UNPARKING) &&
2379		    (td->td_pflags & TDP_WAKEUP)) {
2380			td->td_pflags &= ~TDP_WAKEUP;
2381			error = EINTR;
2382		} else if (timeout == NULL) {
2383			error = umtxq_sleep(uq, "ucond", 0);
2384		} else {
2385			getnanouptime(&ets);
2386			timespecadd(&ets, timeout);
2387			TIMESPEC_TO_TIMEVAL(&tv, timeout);
2388			for (;;) {
2389				error = umtxq_sleep(uq, "ucond", tvtohz(&tv));
2390				if (error != ETIMEDOUT)
2391					break;
2392				getnanouptime(&cts);
2393				if (timespeccmp(&cts, &ets, >=)) {
2394					error = ETIMEDOUT;
2395					break;
2396				}
2397				tts = ets;
2398				timespecsub(&tts, &cts);
2399				TIMESPEC_TO_TIMEVAL(&tv, &tts);
2400			}
2401		}
2402	}
2403
2404	if ((uq->uq_flags & UQF_UMTXQ) == 0)
2405		error = 0;
2406	else {
2407		umtxq_remove(uq);
2408		if (error == ERESTART)
2409			error = EINTR;
2410	}
2411
2412	umtxq_unlock(&uq->uq_key);
2413	umtx_key_release(&uq->uq_key);
2414	return (error);
2415}
2416
2417/*
2418 * Signal a userland condition variable.
2419 */
2420static int
2421do_cv_signal(struct thread *td, struct ucond *cv)
2422{
2423	struct umtx_key key;
2424	int error, cnt, nwake;
2425	uint32_t flags;
2426
2427	flags = fuword32(&cv->c_flags);
2428	if ((error = umtx_key_get(cv, TYPE_CV, GET_SHARE(flags), &key)) != 0)
2429		return (error);
2430	umtxq_lock(&key);
2431	umtxq_busy(&key);
2432	cnt = umtxq_count(&key);
2433	nwake = umtxq_signal(&key, 1);
2434	if (cnt <= nwake) {
2435		umtxq_unlock(&key);
2436		error = suword32(
2437		    __DEVOLATILE(uint32_t *, &cv->c_has_waiters), 0);
2438		umtxq_lock(&key);
2439	}
2440	umtxq_unbusy(&key);
2441	umtxq_unlock(&key);
2442	umtx_key_release(&key);
2443	return (error);
2444}
2445
2446static int
2447do_cv_broadcast(struct thread *td, struct ucond *cv)
2448{
2449	struct umtx_key key;
2450	int error;
2451	uint32_t flags;
2452
2453	flags = fuword32(&cv->c_flags);
2454	if ((error = umtx_key_get(cv, TYPE_CV, GET_SHARE(flags), &key)) != 0)
2455		return (error);
2456
2457	umtxq_lock(&key);
2458	umtxq_busy(&key);
2459	umtxq_signal(&key, INT_MAX);
2460	umtxq_unlock(&key);
2461
2462	error = suword32(__DEVOLATILE(uint32_t *, &cv->c_has_waiters), 0);
2463
2464	umtxq_lock(&key);
2465	umtxq_unbusy(&key);
2466	umtxq_unlock(&key);
2467
2468	umtx_key_release(&key);
2469	return (error);
2470}
2471
2472static int
2473do_rw_rdlock(struct thread *td, struct urwlock *rwlock, long fflag, int timo)
2474{
2475	struct umtx_q *uq;
2476	uint32_t flags, wrflags;
2477	int32_t state, oldstate;
2478	int32_t blocked_readers;
2479	int error;
2480
2481	uq = td->td_umtxq;
2482	flags = fuword32(&rwlock->rw_flags);
2483	error = umtx_key_get(rwlock, TYPE_RWLOCK, GET_SHARE(flags), &uq->uq_key);
2484	if (error != 0)
2485		return (error);
2486
2487	wrflags = URWLOCK_WRITE_OWNER;
2488	if (!(fflag & URWLOCK_PREFER_READER) && !(flags & URWLOCK_PREFER_READER))
2489		wrflags |= URWLOCK_WRITE_WAITERS;
2490
2491	for (;;) {
2492		state = fuword32(__DEVOLATILE(int32_t *, &rwlock->rw_state));
2493		/* try to lock it */
2494		while (!(state & wrflags)) {
2495			if (__predict_false(URWLOCK_READER_COUNT(state) == URWLOCK_MAX_READERS)) {
2496				umtx_key_release(&uq->uq_key);
2497				return (EAGAIN);
2498			}
2499			oldstate = casuword32(&rwlock->rw_state, state, state + 1);
2500			if (oldstate == state) {
2501				umtx_key_release(&uq->uq_key);
2502				return (0);
2503			}
2504			state = oldstate;
2505		}
2506
2507		if (error)
2508			break;
2509
2510		/* grab monitor lock */
2511		umtxq_lock(&uq->uq_key);
2512		umtxq_busy(&uq->uq_key);
2513		umtxq_unlock(&uq->uq_key);
2514
2515		/*
2516		 * re-read the state, in case it changed between the try-lock above
2517		 * and the check below
2518		 */
2519		state = fuword32(__DEVOLATILE(int32_t *, &rwlock->rw_state));
2520
2521		/* set read contention bit */
2522		while ((state & wrflags) && !(state & URWLOCK_READ_WAITERS)) {
2523			oldstate = casuword32(&rwlock->rw_state, state, state | URWLOCK_READ_WAITERS);
2524			if (oldstate == state)
2525				goto sleep;
2526			state = oldstate;
2527		}
2528
2529		/* state is changed while setting flags, restart */
2530		if (!(state & wrflags)) {
2531			umtxq_lock(&uq->uq_key);
2532			umtxq_unbusy(&uq->uq_key);
2533			umtxq_unlock(&uq->uq_key);
2534			continue;
2535		}
2536
2537sleep:
2538		/* contention bit is set, before sleeping, increase read waiter count */
2539		blocked_readers = fuword32(&rwlock->rw_blocked_readers);
2540		suword32(&rwlock->rw_blocked_readers, blocked_readers+1);
2541
2542		while (state & wrflags) {
2543			umtxq_lock(&uq->uq_key);
2544			umtxq_insert(uq);
2545			umtxq_unbusy(&uq->uq_key);
2546
2547			error = umtxq_sleep(uq, "urdlck", timo);
2548
2549			umtxq_busy(&uq->uq_key);
2550			umtxq_remove(uq);
2551			umtxq_unlock(&uq->uq_key);
2552			if (error)
2553				break;
2554			state = fuword32(__DEVOLATILE(int32_t *, &rwlock->rw_state));
2555		}
2556
2557		/* decrease read waiter count, and may clear read contention bit */
2558		blocked_readers = fuword32(&rwlock->rw_blocked_readers);
2559		suword32(&rwlock->rw_blocked_readers, blocked_readers-1);
2560		if (blocked_readers == 1) {
2561			state = fuword32(__DEVOLATILE(int32_t *, &rwlock->rw_state));
2562			for (;;) {
2563				oldstate = casuword32(&rwlock->rw_state, state,
2564					 state & ~URWLOCK_READ_WAITERS);
2565				if (oldstate == state)
2566					break;
2567				state = oldstate;
2568			}
2569		}
2570
2571		umtxq_lock(&uq->uq_key);
2572		umtxq_unbusy(&uq->uq_key);
2573		umtxq_unlock(&uq->uq_key);
2574	}
2575	umtx_key_release(&uq->uq_key);
2576	return (error);
2577}
2578
2579static int
2580do_rw_rdlock2(struct thread *td, void *obj, long val, struct timespec *timeout)
2581{
2582	struct timespec ts, ts2, ts3;
2583	struct timeval tv;
2584	int error;
2585
2586	getnanouptime(&ts);
2587	timespecadd(&ts, timeout);
2588	TIMESPEC_TO_TIMEVAL(&tv, timeout);
2589	for (;;) {
2590		error = do_rw_rdlock(td, obj, val, tvtohz(&tv));
2591		if (error != ETIMEDOUT)
2592			break;
2593		getnanouptime(&ts2);
2594		if (timespeccmp(&ts2, &ts, >=)) {
2595			error = ETIMEDOUT;
2596			break;
2597		}
2598		ts3 = ts;
2599		timespecsub(&ts3, &ts2);
2600		TIMESPEC_TO_TIMEVAL(&tv, &ts3);
2601	}
2602	if (error == ERESTART)
2603		error = EINTR;
2604	return (error);
2605}
2606
2607static int
2608do_rw_wrlock(struct thread *td, struct urwlock *rwlock, int timo)
2609{
2610	struct umtx_q *uq;
2611	uint32_t flags;
2612	int32_t state, oldstate;
2613	int32_t blocked_writers;
2614	int32_t blocked_readers;
2615	int error;
2616
2617	uq = td->td_umtxq;
2618	flags = fuword32(&rwlock->rw_flags);
2619	error = umtx_key_get(rwlock, TYPE_RWLOCK, GET_SHARE(flags), &uq->uq_key);
2620	if (error != 0)
2621		return (error);
2622
2623	blocked_readers = 0;
2624	for (;;) {
2625		state = fuword32(__DEVOLATILE(int32_t *, &rwlock->rw_state));
2626		while (!(state & URWLOCK_WRITE_OWNER) && URWLOCK_READER_COUNT(state) == 0) {
2627			oldstate = casuword32(&rwlock->rw_state, state, state | URWLOCK_WRITE_OWNER);
2628			if (oldstate == state) {
2629				umtx_key_release(&uq->uq_key);
2630				return (0);
2631			}
2632			state = oldstate;
2633		}
2634
2635		if (error) {
2636			if (!(state & (URWLOCK_WRITE_OWNER|URWLOCK_WRITE_WAITERS)) &&
2637			    blocked_readers != 0) {
2638				umtxq_lock(&uq->uq_key);
2639				umtxq_busy(&uq->uq_key);
2640				umtxq_signal_queue(&uq->uq_key, INT_MAX, UMTX_SHARED_QUEUE);
2641				umtxq_unbusy(&uq->uq_key);
2642				umtxq_unlock(&uq->uq_key);
2643			}
2644
2645			break;
2646		}
2647
2648		/* grab monitor lock */
2649		umtxq_lock(&uq->uq_key);
2650		umtxq_busy(&uq->uq_key);
2651		umtxq_unlock(&uq->uq_key);
2652
2653		/*
2654		 * re-read the state, in case it changed between the try-lock above
2655		 * and the check below
2656		 */
2657		state = fuword32(__DEVOLATILE(int32_t *, &rwlock->rw_state));
2658
2659		while (((state & URWLOCK_WRITE_OWNER) || URWLOCK_READER_COUNT(state) != 0) &&
2660		       (state & URWLOCK_WRITE_WAITERS) == 0) {
2661			oldstate = casuword32(&rwlock->rw_state, state, state | URWLOCK_WRITE_WAITERS);
2662			if (oldstate == state)
2663				goto sleep;
2664			state = oldstate;
2665		}
2666
2667		if (!(state & URWLOCK_WRITE_OWNER) && URWLOCK_READER_COUNT(state) == 0) {
2668			umtxq_lock(&uq->uq_key);
2669			umtxq_unbusy(&uq->uq_key);
2670			umtxq_unlock(&uq->uq_key);
2671			continue;
2672		}
2673sleep:
2674		blocked_writers = fuword32(&rwlock->rw_blocked_writers);
2675		suword32(&rwlock->rw_blocked_writers, blocked_writers+1);
2676
2677		while ((state & URWLOCK_WRITE_OWNER) || URWLOCK_READER_COUNT(state) != 0) {
2678			umtxq_lock(&uq->uq_key);
2679			umtxq_insert_queue(uq, UMTX_EXCLUSIVE_QUEUE);
2680			umtxq_unbusy(&uq->uq_key);
2681
2682			error = umtxq_sleep(uq, "uwrlck", timo);
2683
2684			umtxq_busy(&uq->uq_key);
2685			umtxq_remove_queue(uq, UMTX_EXCLUSIVE_QUEUE);
2686			umtxq_unlock(&uq->uq_key);
2687			if (error)
2688				break;
2689			state = fuword32(__DEVOLATILE(int32_t *, &rwlock->rw_state));
2690		}
2691
2692		blocked_writers = fuword32(&rwlock->rw_blocked_writers);
2693		suword32(&rwlock->rw_blocked_writers, blocked_writers-1);
2694		if (blocked_writers == 1) {
2695			state = fuword32(__DEVOLATILE(int32_t *, &rwlock->rw_state));
2696			for (;;) {
2697				oldstate = casuword32(&rwlock->rw_state, state,
2698					 state & ~URWLOCK_WRITE_WAITERS);
2699				if (oldstate == state)
2700					break;
2701				state = oldstate;
2702			}
2703			blocked_readers = fuword32(&rwlock->rw_blocked_readers);
2704		} else
2705			blocked_readers = 0;
2706
2707		umtxq_lock(&uq->uq_key);
2708		umtxq_unbusy(&uq->uq_key);
2709		umtxq_unlock(&uq->uq_key);
2710	}
2711
2712	umtx_key_release(&uq->uq_key);
2713	return (error);
2714}
2715
2716static int
2717do_rw_wrlock2(struct thread *td, void *obj, struct timespec *timeout)
2718{
2719	struct timespec ts, ts2, ts3;
2720	struct timeval tv;
2721	int error;
2722
2723	getnanouptime(&ts);
2724	timespecadd(&ts, timeout);
2725	TIMESPEC_TO_TIMEVAL(&tv, timeout);
2726	for (;;) {
2727		error = do_rw_wrlock(td, obj, tvtohz(&tv));
2728		if (error != ETIMEDOUT)
2729			break;
2730		getnanouptime(&ts2);
2731		if (timespeccmp(&ts2, &ts, >=)) {
2732			error = ETIMEDOUT;
2733			break;
2734		}
2735		ts3 = ts;
2736		timespecsub(&ts3, &ts2);
2737		TIMESPEC_TO_TIMEVAL(&tv, &ts3);
2738	}
2739	if (error == ERESTART)
2740		error = EINTR;
2741	return (error);
2742}
2743
2744static int
2745do_rw_unlock(struct thread *td, struct urwlock *rwlock)
2746{
2747	struct umtx_q *uq;
2748	uint32_t flags;
2749	int32_t state, oldstate;
2750	int error, q, count;
2751
2752	uq = td->td_umtxq;
2753	flags = fuword32(&rwlock->rw_flags);
2754	error = umtx_key_get(rwlock, TYPE_RWLOCK, GET_SHARE(flags), &uq->uq_key);
2755	if (error != 0)
2756		return (error);
2757
2758	state = fuword32(__DEVOLATILE(int32_t *, &rwlock->rw_state));
2759	if (state & URWLOCK_WRITE_OWNER) {
2760		for (;;) {
2761			oldstate = casuword32(&rwlock->rw_state, state,
2762				state & ~URWLOCK_WRITE_OWNER);
2763			if (oldstate != state) {
2764				state = oldstate;
2765				if (!(oldstate & URWLOCK_WRITE_OWNER)) {
2766					error = EPERM;
2767					goto out;
2768				}
2769			} else
2770				break;
2771		}
2772	} else if (URWLOCK_READER_COUNT(state) != 0) {
2773		for (;;) {
2774			oldstate = casuword32(&rwlock->rw_state, state,
2775				state - 1);
2776			if (oldstate != state) {
2777				state = oldstate;
2778				if (URWLOCK_READER_COUNT(oldstate) == 0) {
2779					error = EPERM;
2780					goto out;
2781				}
2782			}
2783			else
2784				break;
2785		}
2786	} else {
2787		error = EPERM;
2788		goto out;
2789	}
2790
2791	count = 0;
2792
2793	if (!(flags & URWLOCK_PREFER_READER)) {
2794		if (state & URWLOCK_WRITE_WAITERS) {
2795			count = 1;
2796			q = UMTX_EXCLUSIVE_QUEUE;
2797		} else if (state & URWLOCK_READ_WAITERS) {
2798			count = INT_MAX;
2799			q = UMTX_SHARED_QUEUE;
2800		}
2801	} else {
2802		if (state & URWLOCK_READ_WAITERS) {
2803			count = INT_MAX;
2804			q = UMTX_SHARED_QUEUE;
2805		} else if (state & URWLOCK_WRITE_WAITERS) {
2806			count = 1;
2807			q = UMTX_EXCLUSIVE_QUEUE;
2808		}
2809	}
2810
2811	if (count) {
2812		umtxq_lock(&uq->uq_key);
2813		umtxq_busy(&uq->uq_key);
2814		umtxq_signal_queue(&uq->uq_key, count, q);
2815		umtxq_unbusy(&uq->uq_key);
2816		umtxq_unlock(&uq->uq_key);
2817	}
2818out:
2819	umtx_key_release(&uq->uq_key);
2820	return (error);
2821}
2822
2823static int
2824do_sem_wait(struct thread *td, struct _usem *sem, struct timespec *timeout)
2825{
2826	struct umtx_q *uq;
2827	struct timeval tv;
2828	struct timespec cts, ets, tts;
2829	uint32_t flags, count;
2830	int error;
2831
2832	uq = td->td_umtxq;
2833	flags = fuword32(&sem->_flags);
2834	error = umtx_key_get(sem, TYPE_SEM, GET_SHARE(flags), &uq->uq_key);
2835	if (error != 0)
2836		return (error);
2837	umtxq_lock(&uq->uq_key);
2838	umtxq_busy(&uq->uq_key);
2839	umtxq_insert(uq);
2840	umtxq_unlock(&uq->uq_key);
2841
2842	suword32(__DEVOLATILE(uint32_t *, &sem->_has_waiters), 1);
2843
2844	count = fuword32(__DEVOLATILE(uint32_t *, &sem->_count));
2845	if (count != 0) {
2846		umtxq_lock(&uq->uq_key);
2847		umtxq_unbusy(&uq->uq_key);
2848		umtxq_remove(uq);
2849		umtxq_unlock(&uq->uq_key);
2850		umtx_key_release(&uq->uq_key);
2851		return (0);
2852	}
2853
2854	umtxq_lock(&uq->uq_key);
2855	umtxq_unbusy(&uq->uq_key);
2856	umtxq_unlock(&uq->uq_key);
2857
2858	umtxq_lock(&uq->uq_key);
2859	if (timeout == NULL) {
2860		error = umtxq_sleep(uq, "usem", 0);
2861	} else {
2862		getnanouptime(&ets);
2863		timespecadd(&ets, timeout);
2864		TIMESPEC_TO_TIMEVAL(&tv, timeout);
2865		for (;;) {
2866			error = umtxq_sleep(uq, "usem", tvtohz(&tv));
2867			if (error != ETIMEDOUT)
2868				break;
2869			getnanouptime(&cts);
2870			if (timespeccmp(&cts, &ets, >=)) {
2871				error = ETIMEDOUT;
2872				break;
2873			}
2874			tts = ets;
2875			timespecsub(&tts, &cts);
2876			TIMESPEC_TO_TIMEVAL(&tv, &tts);
2877		}
2878	}
2879
2880	if ((uq->uq_flags & UQF_UMTXQ) == 0)
2881		error = 0;
2882	else {
2883		umtxq_remove(uq);
2884		if (error == ERESTART)
2885			error = EINTR;
2886	}
2887	umtxq_unlock(&uq->uq_key);
2888	umtx_key_release(&uq->uq_key);
2889	return (error);
2890}
2891
2892/*
2893 * Signal a userland condition variable.
2894 */
2895static int
2896do_sem_wake(struct thread *td, struct _usem *sem)
2897{
2898	struct umtx_key key;
2899	int error, cnt, nwake;
2900	uint32_t flags;
2901
2902	flags = fuword32(&sem->_flags);
2903	if ((error = umtx_key_get(sem, TYPE_SEM, GET_SHARE(flags), &key)) != 0)
2904		return (error);
2905	umtxq_lock(&key);
2906	umtxq_busy(&key);
2907	cnt = umtxq_count(&key);
2908	nwake = umtxq_signal(&key, 1);
2909	if (cnt <= nwake) {
2910		umtxq_unlock(&key);
2911		error = suword32(
2912		    __DEVOLATILE(uint32_t *, &sem->_has_waiters), 0);
2913		umtxq_lock(&key);
2914	}
2915	umtxq_unbusy(&key);
2916	umtxq_unlock(&key);
2917	umtx_key_release(&key);
2918	return (error);
2919}
2920
2921int
2922_umtx_lock(struct thread *td, struct _umtx_lock_args *uap)
2923    /* struct umtx *umtx */
2924{
2925	return _do_lock_umtx(td, uap->umtx, td->td_tid, 0);
2926}
2927
2928int
2929_umtx_unlock(struct thread *td, struct _umtx_unlock_args *uap)
2930    /* struct umtx *umtx */
2931{
2932	return do_unlock_umtx(td, uap->umtx, td->td_tid);
2933}
2934
2935static int
2936__umtx_op_lock_umtx(struct thread *td, struct _umtx_op_args *uap)
2937{
2938	struct timespec *ts, timeout;
2939	int error;
2940
2941	/* Allow a null timespec (wait forever). */
2942	if (uap->uaddr2 == NULL)
2943		ts = NULL;
2944	else {
2945		error = copyin(uap->uaddr2, &timeout, sizeof(timeout));
2946		if (error != 0)
2947			return (error);
2948		if (timeout.tv_nsec >= 1000000000 ||
2949		    timeout.tv_nsec < 0) {
2950			return (EINVAL);
2951		}
2952		ts = &timeout;
2953	}
2954	return (do_lock_umtx(td, uap->obj, uap->val, ts));
2955}
2956
2957static int
2958__umtx_op_unlock_umtx(struct thread *td, struct _umtx_op_args *uap)
2959{
2960	return (do_unlock_umtx(td, uap->obj, uap->val));
2961}
2962
2963static int
2964__umtx_op_wait(struct thread *td, struct _umtx_op_args *uap)
2965{
2966	struct timespec *ts, timeout;
2967	int error;
2968
2969	if (uap->uaddr2 == NULL)
2970		ts = NULL;
2971	else {
2972		error = copyin(uap->uaddr2, &timeout, sizeof(timeout));
2973		if (error != 0)
2974			return (error);
2975		if (timeout.tv_nsec >= 1000000000 ||
2976		    timeout.tv_nsec < 0)
2977			return (EINVAL);
2978		ts = &timeout;
2979	}
2980	return do_wait(td, uap->obj, uap->val, ts, 0, 0);
2981}
2982
2983static int
2984__umtx_op_wait_uint(struct thread *td, struct _umtx_op_args *uap)
2985{
2986	struct timespec *ts, timeout;
2987	int error;
2988
2989	if (uap->uaddr2 == NULL)
2990		ts = NULL;
2991	else {
2992		error = copyin(uap->uaddr2, &timeout, sizeof(timeout));
2993		if (error != 0)
2994			return (error);
2995		if (timeout.tv_nsec >= 1000000000 ||
2996		    timeout.tv_nsec < 0)
2997			return (EINVAL);
2998		ts = &timeout;
2999	}
3000	return do_wait(td, uap->obj, uap->val, ts, 1, 0);
3001}
3002
3003static int
3004__umtx_op_wait_uint_private(struct thread *td, struct _umtx_op_args *uap)
3005{
3006	struct timespec *ts, timeout;
3007	int error;
3008
3009	if (uap->uaddr2 == NULL)
3010		ts = NULL;
3011	else {
3012		error = copyin(uap->uaddr2, &timeout, sizeof(timeout));
3013		if (error != 0)
3014			return (error);
3015		if (timeout.tv_nsec >= 1000000000 ||
3016		    timeout.tv_nsec < 0)
3017			return (EINVAL);
3018		ts = &timeout;
3019	}
3020	return do_wait(td, uap->obj, uap->val, ts, 1, 1);
3021}
3022
3023static int
3024__umtx_op_wake(struct thread *td, struct _umtx_op_args *uap)
3025{
3026	return (kern_umtx_wake(td, uap->obj, uap->val, 0));
3027}
3028
3029static int
3030__umtx_op_wake_private(struct thread *td, struct _umtx_op_args *uap)
3031{
3032	return (kern_umtx_wake(td, uap->obj, uap->val, 1));
3033}
3034
3035static int
3036__umtx_op_lock_umutex(struct thread *td, struct _umtx_op_args *uap)
3037{
3038	struct timespec *ts, timeout;
3039	int error;
3040
3041	/* Allow a null timespec (wait forever). */
3042	if (uap->uaddr2 == NULL)
3043		ts = NULL;
3044	else {
3045		error = copyin(uap->uaddr2, &timeout,
3046		    sizeof(timeout));
3047		if (error != 0)
3048			return (error);
3049		if (timeout.tv_nsec >= 1000000000 ||
3050		    timeout.tv_nsec < 0) {
3051			return (EINVAL);
3052		}
3053		ts = &timeout;
3054	}
3055	return do_lock_umutex(td, uap->obj, ts, 0);
3056}
3057
3058static int
3059__umtx_op_trylock_umutex(struct thread *td, struct _umtx_op_args *uap)
3060{
3061	return do_lock_umutex(td, uap->obj, NULL, _UMUTEX_TRY);
3062}
3063
3064static int
3065__umtx_op_wait_umutex(struct thread *td, struct _umtx_op_args *uap)
3066{
3067	struct timespec *ts, timeout;
3068	int error;
3069
3070	/* Allow a null timespec (wait forever). */
3071	if (uap->uaddr2 == NULL)
3072		ts = NULL;
3073	else {
3074		error = copyin(uap->uaddr2, &timeout,
3075		    sizeof(timeout));
3076		if (error != 0)
3077			return (error);
3078		if (timeout.tv_nsec >= 1000000000 ||
3079		    timeout.tv_nsec < 0) {
3080			return (EINVAL);
3081		}
3082		ts = &timeout;
3083	}
3084	return do_lock_umutex(td, uap->obj, ts, _UMUTEX_WAIT);
3085}
3086
3087static int
3088__umtx_op_wake_umutex(struct thread *td, struct _umtx_op_args *uap)
3089{
3090	return do_wake_umutex(td, uap->obj);
3091}
3092
3093static int
3094__umtx_op_unlock_umutex(struct thread *td, struct _umtx_op_args *uap)
3095{
3096	return do_unlock_umutex(td, uap->obj);
3097}
3098
3099static int
3100__umtx_op_set_ceiling(struct thread *td, struct _umtx_op_args *uap)
3101{
3102	return do_set_ceiling(td, uap->obj, uap->val, uap->uaddr1);
3103}
3104
3105static int
3106__umtx_op_cv_wait(struct thread *td, struct _umtx_op_args *uap)
3107{
3108	struct timespec *ts, timeout;
3109	int error;
3110
3111	/* Allow a null timespec (wait forever). */
3112	if (uap->uaddr2 == NULL)
3113		ts = NULL;
3114	else {
3115		error = copyin(uap->uaddr2, &timeout,
3116		    sizeof(timeout));
3117		if (error != 0)
3118			return (error);
3119		if (timeout.tv_nsec >= 1000000000 ||
3120		    timeout.tv_nsec < 0) {
3121			return (EINVAL);
3122		}
3123		ts = &timeout;
3124	}
3125	return (do_cv_wait(td, uap->obj, uap->uaddr1, ts, uap->val));
3126}
3127
3128static int
3129__umtx_op_cv_signal(struct thread *td, struct _umtx_op_args *uap)
3130{
3131	return do_cv_signal(td, uap->obj);
3132}
3133
3134static int
3135__umtx_op_cv_broadcast(struct thread *td, struct _umtx_op_args *uap)
3136{
3137	return do_cv_broadcast(td, uap->obj);
3138}
3139
3140static int
3141__umtx_op_rw_rdlock(struct thread *td, struct _umtx_op_args *uap)
3142{
3143	struct timespec timeout;
3144	int error;
3145
3146	/* Allow a null timespec (wait forever). */
3147	if (uap->uaddr2 == NULL) {
3148		error = do_rw_rdlock(td, uap->obj, uap->val, 0);
3149	} else {
3150		error = copyin(uap->uaddr2, &timeout,
3151		    sizeof(timeout));
3152		if (error != 0)
3153			return (error);
3154		if (timeout.tv_nsec >= 1000000000 ||
3155		    timeout.tv_nsec < 0) {
3156			return (EINVAL);
3157		}
3158		error = do_rw_rdlock2(td, uap->obj, uap->val, &timeout);
3159	}
3160	return (error);
3161}
3162
3163static int
3164__umtx_op_rw_wrlock(struct thread *td, struct _umtx_op_args *uap)
3165{
3166	struct timespec timeout;
3167	int error;
3168
3169	/* Allow a null timespec (wait forever). */
3170	if (uap->uaddr2 == NULL) {
3171		error = do_rw_wrlock(td, uap->obj, 0);
3172	} else {
3173		error = copyin(uap->uaddr2, &timeout,
3174		    sizeof(timeout));
3175		if (error != 0)
3176			return (error);
3177		if (timeout.tv_nsec >= 1000000000 ||
3178		    timeout.tv_nsec < 0) {
3179			return (EINVAL);
3180		}
3181
3182		error = do_rw_wrlock2(td, uap->obj, &timeout);
3183	}
3184	return (error);
3185}
3186
3187static int
3188__umtx_op_rw_unlock(struct thread *td, struct _umtx_op_args *uap)
3189{
3190	return do_rw_unlock(td, uap->obj);
3191}
3192
3193static int
3194__umtx_op_sem_wait(struct thread *td, struct _umtx_op_args *uap)
3195{
3196	struct timespec *ts, timeout;
3197	int error;
3198
3199	/* Allow a null timespec (wait forever). */
3200	if (uap->uaddr2 == NULL)
3201		ts = NULL;
3202	else {
3203		error = copyin(uap->uaddr2, &timeout,
3204		    sizeof(timeout));
3205		if (error != 0)
3206			return (error);
3207		if (timeout.tv_nsec >= 1000000000 ||
3208		    timeout.tv_nsec < 0) {
3209			return (EINVAL);
3210		}
3211		ts = &timeout;
3212	}
3213	return (do_sem_wait(td, uap->obj, ts));
3214}
3215
3216static int
3217__umtx_op_sem_wake(struct thread *td, struct _umtx_op_args *uap)
3218{
3219	return do_sem_wake(td, uap->obj);
3220}
3221
3222typedef int (*_umtx_op_func)(struct thread *td, struct _umtx_op_args *uap);
3223
3224static _umtx_op_func op_table[] = {
3225	__umtx_op_lock_umtx,		/* UMTX_OP_LOCK */
3226	__umtx_op_unlock_umtx,		/* UMTX_OP_UNLOCK */
3227	__umtx_op_wait,			/* UMTX_OP_WAIT */
3228	__umtx_op_wake,			/* UMTX_OP_WAKE */
3229	__umtx_op_trylock_umutex,	/* UMTX_OP_MUTEX_TRYLOCK */
3230	__umtx_op_lock_umutex,		/* UMTX_OP_MUTEX_LOCK */
3231	__umtx_op_unlock_umutex,	/* UMTX_OP_MUTEX_UNLOCK */
3232	__umtx_op_set_ceiling,		/* UMTX_OP_SET_CEILING */
3233	__umtx_op_cv_wait,		/* UMTX_OP_CV_WAIT*/
3234	__umtx_op_cv_signal,		/* UMTX_OP_CV_SIGNAL */
3235	__umtx_op_cv_broadcast,		/* UMTX_OP_CV_BROADCAST */
3236	__umtx_op_wait_uint,		/* UMTX_OP_WAIT_UINT */
3237	__umtx_op_rw_rdlock,		/* UMTX_OP_RW_RDLOCK */
3238	__umtx_op_rw_wrlock,		/* UMTX_OP_RW_WRLOCK */
3239	__umtx_op_rw_unlock,		/* UMTX_OP_RW_UNLOCK */
3240	__umtx_op_wait_uint_private,	/* UMTX_OP_WAIT_UINT_PRIVATE */
3241	__umtx_op_wake_private,		/* UMTX_OP_WAKE_PRIVATE */
3242	__umtx_op_wait_umutex,		/* UMTX_OP_UMUTEX_WAIT */
3243	__umtx_op_wake_umutex,		/* UMTX_OP_UMUTEX_WAKE */
3244	__umtx_op_sem_wait,		/* UMTX_OP_SEM_WAIT */
3245	__umtx_op_sem_wake		/* UMTX_OP_SEM_WAKE */
3246};
3247
3248int
3249_umtx_op(struct thread *td, struct _umtx_op_args *uap)
3250{
3251	if ((unsigned)uap->op < UMTX_OP_MAX)
3252		return (*op_table[uap->op])(td, uap);
3253	return (EINVAL);
3254}
3255
3256#ifdef COMPAT_FREEBSD32
3257int
3258freebsd32_umtx_lock(struct thread *td, struct freebsd32_umtx_lock_args *uap)
3259    /* struct umtx *umtx */
3260{
3261	return (do_lock_umtx32(td, (uint32_t *)uap->umtx, td->td_tid, NULL));
3262}
3263
3264int
3265freebsd32_umtx_unlock(struct thread *td, struct freebsd32_umtx_unlock_args *uap)
3266    /* struct umtx *umtx */
3267{
3268	return (do_unlock_umtx32(td, (uint32_t *)uap->umtx, td->td_tid));
3269}
3270
3271struct timespec32 {
3272	uint32_t tv_sec;
3273	uint32_t tv_nsec;
3274};
3275
3276static inline int
3277copyin_timeout32(void *addr, struct timespec *tsp)
3278{
3279	struct timespec32 ts32;
3280	int error;
3281
3282	error = copyin(addr, &ts32, sizeof(struct timespec32));
3283	if (error == 0) {
3284		tsp->tv_sec = ts32.tv_sec;
3285		tsp->tv_nsec = ts32.tv_nsec;
3286	}
3287	return (error);
3288}
3289
3290static int
3291__umtx_op_lock_umtx_compat32(struct thread *td, struct _umtx_op_args *uap)
3292{
3293	struct timespec *ts, timeout;
3294	int error;
3295
3296	/* Allow a null timespec (wait forever). */
3297	if (uap->uaddr2 == NULL)
3298		ts = NULL;
3299	else {
3300		error = copyin_timeout32(uap->uaddr2, &timeout);
3301		if (error != 0)
3302			return (error);
3303		if (timeout.tv_nsec >= 1000000000 ||
3304		    timeout.tv_nsec < 0) {
3305			return (EINVAL);
3306		}
3307		ts = &timeout;
3308	}
3309	return (do_lock_umtx32(td, uap->obj, uap->val, ts));
3310}
3311
3312static int
3313__umtx_op_unlock_umtx_compat32(struct thread *td, struct _umtx_op_args *uap)
3314{
3315	return (do_unlock_umtx32(td, uap->obj, (uint32_t)uap->val));
3316}
3317
3318static int
3319__umtx_op_wait_compat32(struct thread *td, struct _umtx_op_args *uap)
3320{
3321	struct timespec *ts, timeout;
3322	int error;
3323
3324	if (uap->uaddr2 == NULL)
3325		ts = NULL;
3326	else {
3327		error = copyin_timeout32(uap->uaddr2, &timeout);
3328		if (error != 0)
3329			return (error);
3330		if (timeout.tv_nsec >= 1000000000 ||
3331		    timeout.tv_nsec < 0)
3332			return (EINVAL);
3333		ts = &timeout;
3334	}
3335	return do_wait(td, uap->obj, uap->val, ts, 1, 0);
3336}
3337
3338static int
3339__umtx_op_lock_umutex_compat32(struct thread *td, struct _umtx_op_args *uap)
3340{
3341	struct timespec *ts, timeout;
3342	int error;
3343
3344	/* Allow a null timespec (wait forever). */
3345	if (uap->uaddr2 == NULL)
3346		ts = NULL;
3347	else {
3348		error = copyin_timeout32(uap->uaddr2, &timeout);
3349		if (error != 0)
3350			return (error);
3351		if (timeout.tv_nsec >= 1000000000 ||
3352		    timeout.tv_nsec < 0)
3353			return (EINVAL);
3354		ts = &timeout;
3355	}
3356	return do_lock_umutex(td, uap->obj, ts, 0);
3357}
3358
3359static int
3360__umtx_op_wait_umutex_compat32(struct thread *td, struct _umtx_op_args *uap)
3361{
3362	struct timespec *ts, timeout;
3363	int error;
3364
3365	/* Allow a null timespec (wait forever). */
3366	if (uap->uaddr2 == NULL)
3367		ts = NULL;
3368	else {
3369		error = copyin_timeout32(uap->uaddr2, &timeout);
3370		if (error != 0)
3371			return (error);
3372		if (timeout.tv_nsec >= 1000000000 ||
3373		    timeout.tv_nsec < 0)
3374			return (EINVAL);
3375		ts = &timeout;
3376	}
3377	return do_lock_umutex(td, uap->obj, ts, _UMUTEX_WAIT);
3378}
3379
3380static int
3381__umtx_op_cv_wait_compat32(struct thread *td, struct _umtx_op_args *uap)
3382{
3383	struct timespec *ts, timeout;
3384	int error;
3385
3386	/* Allow a null timespec (wait forever). */
3387	if (uap->uaddr2 == NULL)
3388		ts = NULL;
3389	else {
3390		error = copyin_timeout32(uap->uaddr2, &timeout);
3391		if (error != 0)
3392			return (error);
3393		if (timeout.tv_nsec >= 1000000000 ||
3394		    timeout.tv_nsec < 0)
3395			return (EINVAL);
3396		ts = &timeout;
3397	}
3398	return (do_cv_wait(td, uap->obj, uap->uaddr1, ts, uap->val));
3399}
3400
3401static int
3402__umtx_op_rw_rdlock_compat32(struct thread *td, struct _umtx_op_args *uap)
3403{
3404	struct timespec timeout;
3405	int error;
3406
3407	/* Allow a null timespec (wait forever). */
3408	if (uap->uaddr2 == NULL) {
3409		error = do_rw_rdlock(td, uap->obj, uap->val, 0);
3410	} else {
3411		error = copyin(uap->uaddr2, &timeout,
3412		    sizeof(timeout));
3413		if (error != 0)
3414			return (error);
3415		if (timeout.tv_nsec >= 1000000000 ||
3416		    timeout.tv_nsec < 0) {
3417			return (EINVAL);
3418		}
3419		error = do_rw_rdlock2(td, uap->obj, uap->val, &timeout);
3420	}
3421	return (error);
3422}
3423
3424static int
3425__umtx_op_rw_wrlock_compat32(struct thread *td, struct _umtx_op_args *uap)
3426{
3427	struct timespec timeout;
3428	int error;
3429
3430	/* Allow a null timespec (wait forever). */
3431	if (uap->uaddr2 == NULL) {
3432		error = do_rw_wrlock(td, uap->obj, 0);
3433	} else {
3434		error = copyin_timeout32(uap->uaddr2, &timeout);
3435		if (error != 0)
3436			return (error);
3437		if (timeout.tv_nsec >= 1000000000 ||
3438		    timeout.tv_nsec < 0) {
3439			return (EINVAL);
3440		}
3441
3442		error = do_rw_wrlock2(td, uap->obj, &timeout);
3443	}
3444	return (error);
3445}
3446
3447static int
3448__umtx_op_wait_uint_private_compat32(struct thread *td, struct _umtx_op_args *uap)
3449{
3450	struct timespec *ts, timeout;
3451	int error;
3452
3453	if (uap->uaddr2 == NULL)
3454		ts = NULL;
3455	else {
3456		error = copyin_timeout32(uap->uaddr2, &timeout);
3457		if (error != 0)
3458			return (error);
3459		if (timeout.tv_nsec >= 1000000000 ||
3460		    timeout.tv_nsec < 0)
3461			return (EINVAL);
3462		ts = &timeout;
3463	}
3464	return do_wait(td, uap->obj, uap->val, ts, 1, 1);
3465}
3466
3467static int
3468__umtx_op_sem_wait_compat32(struct thread *td, struct _umtx_op_args *uap)
3469{
3470	struct timespec *ts, timeout;
3471	int error;
3472
3473	/* Allow a null timespec (wait forever). */
3474	if (uap->uaddr2 == NULL)
3475		ts = NULL;
3476	else {
3477		error = copyin_timeout32(uap->uaddr2, &timeout);
3478		if (error != 0)
3479			return (error);
3480		if (timeout.tv_nsec >= 1000000000 ||
3481		    timeout.tv_nsec < 0)
3482			return (EINVAL);
3483		ts = &timeout;
3484	}
3485	return (do_sem_wait(td, uap->obj, ts));
3486}
3487
3488static _umtx_op_func op_table_compat32[] = {
3489	__umtx_op_lock_umtx_compat32,	/* UMTX_OP_LOCK */
3490	__umtx_op_unlock_umtx_compat32,	/* UMTX_OP_UNLOCK */
3491	__umtx_op_wait_compat32,	/* UMTX_OP_WAIT */
3492	__umtx_op_wake,			/* UMTX_OP_WAKE */
3493	__umtx_op_trylock_umutex,	/* UMTX_OP_MUTEX_LOCK */
3494	__umtx_op_lock_umutex_compat32,	/* UMTX_OP_MUTEX_TRYLOCK */
3495	__umtx_op_unlock_umutex,	/* UMTX_OP_MUTEX_UNLOCK	*/
3496	__umtx_op_set_ceiling,		/* UMTX_OP_SET_CEILING */
3497	__umtx_op_cv_wait_compat32,	/* UMTX_OP_CV_WAIT*/
3498	__umtx_op_cv_signal,		/* UMTX_OP_CV_SIGNAL */
3499	__umtx_op_cv_broadcast,		/* UMTX_OP_CV_BROADCAST */
3500	__umtx_op_wait_compat32,	/* UMTX_OP_WAIT_UINT */
3501	__umtx_op_rw_rdlock_compat32,	/* UMTX_OP_RW_RDLOCK */
3502	__umtx_op_rw_wrlock_compat32,	/* UMTX_OP_RW_WRLOCK */
3503	__umtx_op_rw_unlock,		/* UMTX_OP_RW_UNLOCK */
3504	__umtx_op_wait_uint_private_compat32,	/* UMTX_OP_WAIT_UINT_PRIVATE */
3505	__umtx_op_wake_private,		/* UMTX_OP_WAKE_PRIVATE */
3506	__umtx_op_wait_umutex_compat32, /* UMTX_OP_UMUTEX_WAIT */
3507	__umtx_op_wake_umutex,		/* UMTX_OP_UMUTEX_WAKE */
3508	__umtx_op_sem_wait_compat32,	/* UMTX_OP_SEM_WAIT */
3509	__umtx_op_sem_wake		/* UMTX_OP_SEM_WAKE */
3510};
3511
3512int
3513freebsd32_umtx_op(struct thread *td, struct freebsd32_umtx_op_args *uap)
3514{
3515	if ((unsigned)uap->op < UMTX_OP_MAX)
3516		return (*op_table_compat32[uap->op])(td,
3517			(struct _umtx_op_args *)uap);
3518	return (EINVAL);
3519}
3520#endif
3521
3522void
3523umtx_thread_init(struct thread *td)
3524{
3525	td->td_umtxq = umtxq_alloc();
3526	td->td_umtxq->uq_thread = td;
3527}
3528
3529void
3530umtx_thread_fini(struct thread *td)
3531{
3532	umtxq_free(td->td_umtxq);
3533}
3534
3535/*
3536 * It will be called when new thread is created, e.g fork().
3537 */
3538void
3539umtx_thread_alloc(struct thread *td)
3540{
3541	struct umtx_q *uq;
3542
3543	uq = td->td_umtxq;
3544	uq->uq_inherited_pri = PRI_MAX;
3545
3546	KASSERT(uq->uq_flags == 0, ("uq_flags != 0"));
3547	KASSERT(uq->uq_thread == td, ("uq_thread != td"));
3548	KASSERT(uq->uq_pi_blocked == NULL, ("uq_pi_blocked != NULL"));
3549	KASSERT(TAILQ_EMPTY(&uq->uq_pi_contested), ("uq_pi_contested is not empty"));
3550}
3551
3552/*
3553 * exec() hook.
3554 */
3555static void
3556umtx_exec_hook(void *arg __unused, struct proc *p __unused,
3557	struct image_params *imgp __unused)
3558{
3559	umtx_thread_cleanup(curthread);
3560}
3561
3562/*
3563 * thread_exit() hook.
3564 */
3565void
3566umtx_thread_exit(struct thread *td)
3567{
3568	umtx_thread_cleanup(td);
3569}
3570
3571/*
3572 * clean up umtx data.
3573 */
3574static void
3575umtx_thread_cleanup(struct thread *td)
3576{
3577	struct umtx_q *uq;
3578	struct umtx_pi *pi;
3579
3580	if ((uq = td->td_umtxq) == NULL)
3581		return;
3582
3583	mtx_lock_spin(&umtx_lock);
3584	uq->uq_inherited_pri = PRI_MAX;
3585	while ((pi = TAILQ_FIRST(&uq->uq_pi_contested)) != NULL) {
3586		pi->pi_owner = NULL;
3587		TAILQ_REMOVE(&uq->uq_pi_contested, pi, pi_link);
3588	}
3589	thread_lock(td);
3590	td->td_flags &= ~TDF_UBORROWING;
3591	thread_unlock(td);
3592	mtx_unlock_spin(&umtx_lock);
3593}
3594