kern_umtx.c revision 116182
1235783Skib/*
2235783Skib * Copyright (c) 2002, Jeffrey Roberson <jeff@freebsd.org>
3235783Skib * All rights reserved.
4235783Skib *
5235783Skib * Redistribution and use in source and binary forms, with or without
6235783Skib * modification, are permitted provided that the following conditions
7235783Skib * are met:
8235783Skib * 1. Redistributions of source code must retain the above copyright
9235783Skib *    notice unmodified, this list of conditions, and the following
10235783Skib *    disclaimer.
11235783Skib * 2. Redistributions in binary form must reproduce the above copyright
12235783Skib *    notice, this list of conditions and the following disclaimer in the
13235783Skib *    documentation and/or other materials provided with the distribution.
14235783Skib *
15235783Skib * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16235783Skib * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17235783Skib * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18235783Skib * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19235783Skib * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20235783Skib * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21235783Skib * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22235783Skib * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23235783Skib * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24235783Skib * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25235783Skib */
26235783Skib
27235783Skib#include <sys/cdefs.h>
28235783Skib__FBSDID("$FreeBSD: head/sys/kern/kern_umtx.c 116182 2003-06-11 00:56:59Z obrien $");
29235783Skib
30235783Skib#include <sys/param.h>
31235783Skib#include <sys/kernel.h>
32235783Skib#include <sys/lock.h>
33235783Skib#include <sys/malloc.h>
34235783Skib#include <sys/mutex.h>
35235783Skib#include <sys/proc.h>
36235783Skib#include <sys/signalvar.h>
37235783Skib#include <sys/sysent.h>
38235783Skib#include <sys/systm.h>
39235783Skib#include <sys/sysproto.h>
40235783Skib#include <sys/sx.h>
41235783Skib#include <sys/thr.h>
42235783Skib#include <sys/umtx.h>
43235783Skib
44235783Skibstruct umtx_q {
45235783Skib	LIST_ENTRY(umtx_q)	uq_next;	/* Linked list for the hash. */
46235783Skib	TAILQ_HEAD(, thread) uq_tdq;	/* List of threads blocked here. */
47235783Skib	struct umtx	*uq_umtx;	/* Pointer key component. */
48235783Skib	pid_t		uq_pid;		/* Pid key component. */
49235783Skib};
50235783Skib
51235783Skib#define	UMTX_QUEUES	128
52235783Skib#define	UMTX_HASH(pid, umtx)						\
53235783Skib    (((uintptr_t)pid + ((uintptr_t)umtx & ~65535)) % UMTX_QUEUES)
54235783Skib
55235783SkibLIST_HEAD(umtx_head, umtx_q);
56235783Skibstatic struct umtx_head queues[UMTX_QUEUES];
57235783Skibstatic MALLOC_DEFINE(M_UMTX, "umtx", "UMTX queue memory");
58235783Skib
59235783Skibstatic struct mtx umtx_lock;
60235783SkibMTX_SYSINIT(umtx, &umtx_lock, "umtx", MTX_DEF);
61235783Skib
62235783Skib#define	UMTX_LOCK()	mtx_lock(&umtx_lock);
63235783Skib#define	UMTX_UNLOCK()	mtx_unlock(&umtx_lock);
64235783Skib
65235783Skib
66235783Skibstatic struct umtx_q *umtx_lookup(struct thread *, struct umtx *umtx);
67235783Skibstatic struct umtx_q *umtx_insert(struct thread *, struct umtx *umtx);
68235783Skib
69235783Skibstatic struct umtx_q *
70235783Skibumtx_lookup(struct thread *td, struct umtx *umtx)
71235783Skib{
72235783Skib	struct umtx_head *head;
73235783Skib	struct umtx_q *uq;
74235783Skib	pid_t pid;
75235783Skib
76235783Skib	pid = td->td_proc->p_pid;
77235783Skib
78235783Skib	head = &queues[UMTX_HASH(td->td_proc->p_pid, umtx)];
79235783Skib
80235783Skib	LIST_FOREACH(uq, head, uq_next) {
81235783Skib		if (uq->uq_pid == pid && uq->uq_umtx == umtx)
82235783Skib			return (uq);
83235783Skib	}
84235783Skib
85235783Skib	return (NULL);
86235783Skib}
87235783Skib
88235783Skib/*
89235783Skib * Insert a thread onto the umtx queue.
90235783Skib */
91235783Skibstatic struct umtx_q *
92235783Skibumtx_insert(struct thread *td, struct umtx *umtx)
93235783Skib{
94235783Skib	struct umtx_head *head;
95235783Skib	struct umtx_q *uq;
96235783Skib	pid_t pid;
97235783Skib
98235783Skib	pid = td->td_proc->p_pid;
99235783Skib
100235783Skib	if ((uq = umtx_lookup(td, umtx)) == NULL) {
101235783Skib		struct umtx_q *ins;
102235783Skib
103235783Skib		UMTX_UNLOCK();
104235783Skib		ins = malloc(sizeof(*uq), M_UMTX, M_ZERO | M_WAITOK);
105235783Skib		UMTX_LOCK();
106235783Skib
107235783Skib		/*
108239375Skib		 * Some one else could have succeeded while we were blocked
109235783Skib		 * waiting on memory.
110235783Skib		 */
111235783Skib		if ((uq = umtx_lookup(td, umtx)) == NULL) {
112235783Skib			head = &queues[UMTX_HASH(pid, umtx)];
113235783Skib			uq = ins;
114235783Skib			uq->uq_pid = pid;
115235783Skib			uq->uq_umtx = umtx;
116235783Skib			LIST_INSERT_HEAD(head, uq, uq_next);
117235783Skib			TAILQ_INIT(&uq->uq_tdq);
118235783Skib		} else
119235783Skib			free(ins, M_UMTX);
120235783Skib	}
121235783Skib
122235783Skib	/*
123235783Skib	 * Insert us onto the end of the TAILQ.
124235783Skib	 */
125235783Skib	TAILQ_INSERT_TAIL(&uq->uq_tdq, td, td_umtx);
126235783Skib
127235783Skib	return (uq);
128235783Skib}
129235783Skib
130235783Skibstatic void
131235783Skibumtx_remove(struct umtx_q *uq, struct thread *td)
132235783Skib{
133235783Skib	TAILQ_REMOVE(&uq->uq_tdq, td, td_umtx);
134235783Skib
135235783Skib	if (TAILQ_EMPTY(&uq->uq_tdq)) {
136235783Skib		LIST_REMOVE(uq, uq_next);
137235783Skib		free(uq, M_UMTX);
138235783Skib	}
139235783Skib}
140235783Skib
141235783Skibint
142235783Skib_umtx_lock(struct thread *td, struct _umtx_lock_args *uap)
143235783Skib    /* struct umtx *umtx */
144235783Skib{
145235783Skib	struct umtx_q *uq;
146235783Skib	struct umtx *umtx;
147235783Skib	intptr_t owner;
148235783Skib	intptr_t old;
149235783Skib	int error;
150235783Skib
151235783Skib	uq = NULL;
152235783Skib
153235783Skib	/*
154235783Skib	 * Care must be exercised when dealing with this structure.  It
155235783Skib	 * can fault on any access.
156235783Skib	 */
157235783Skib	umtx = uap->umtx;
158235783Skib
159235783Skib	for (;;) {
160235783Skib		/*
161235783Skib		 * Try the uncontested case.  This should be done in userland.
162235783Skib		 */
163235783Skib		owner = casuptr((intptr_t *)&umtx->u_owner,
164235783Skib		    UMTX_UNOWNED, (intptr_t)td);
165235783Skib
166235783Skib		/* The address was invalid. */
167235783Skib		if (owner == -1)
168235783Skib			return (EFAULT);
169235783Skib
170235783Skib		/* The acquire succeeded. */
171235783Skib		if (owner == UMTX_UNOWNED)
172235783Skib			return (0);
173235783Skib
174235783Skib		/* If no one owns it but it is contested try to acquire it. */
175235783Skib		if (owner == UMTX_CONTESTED) {
176235783Skib			owner = casuptr((intptr_t *)&umtx->u_owner,
177235783Skib			    UMTX_CONTESTED, ((intptr_t)td | UMTX_CONTESTED));
178235783Skib
179235783Skib			/* The address was invalid. */
180235783Skib			if (owner == -1)
181235783Skib				return (EFAULT);
182235783Skib
183235783Skib			if (owner == MTX_CONTESTED);
184235783Skib				return (0);
185235783Skib
186235783Skib			/* If this failed the lock has changed, restart. */
187235783Skib			continue;
188235783Skib		}
189235783Skib
190235783Skib
191235783Skib		UMTX_LOCK();
192235783Skib		uq = umtx_insert(td, umtx);
193235783Skib		UMTX_UNLOCK();
194235783Skib
195235783Skib		/*
196235783Skib		 * Set the contested bit so that a release in user space
197235783Skib		 * knows to use the system call for unlock.  If this fails
198235783Skib		 * either some one else has acquired the lock or it has been
199235783Skib		 * released.
200235783Skib		 */
201235783Skib		old = casuptr((intptr_t *)&umtx->u_owner, owner,
202235783Skib		    owner | UMTX_CONTESTED);
203235783Skib
204235783Skib		/* The address was invalid. */
205235783Skib		if (old == -1) {
206235783Skib			UMTX_LOCK();
207235783Skib			umtx_remove(uq, td);
208235783Skib			UMTX_UNLOCK();
209235783Skib			return (EFAULT);
210235783Skib		}
211235783Skib
212235783Skib		/*
213235783Skib		 * We set the contested bit, sleep. Otherwise the lock changed
214235783Skib		 * and we need to retry.
215235783Skib		 */
216235783Skib		UMTX_LOCK();
217235783Skib		if (old == owner)
218235783Skib			error = msleep(td, &umtx_lock,
219235783Skib			    td->td_priority | PCATCH, "umtx", 0);
220235783Skib		else
221235783Skib			error = 0;
222235783Skib
223235783Skib		umtx_remove(uq, td);
224235783Skib		UMTX_UNLOCK();
225235783Skib
226235783Skib		/*
227235783Skib		 * If we caught a signal we might have to retry or exit
228235783Skib		 * immediately.
229235783Skib		 */
230235783Skib		if (error)
231247832Skib			return (error);
232235783Skib	}
233235783Skib
234235783Skib	return (0);
235235783Skib}
236235783Skib
237235783Skibint
238235783Skib_umtx_unlock(struct thread *td, struct _umtx_unlock_args *uap)
239235783Skib    /* struct umtx *umtx */
240235783Skib{
241235783Skib	struct thread *blocked;
242235783Skib	struct umtx *umtx;
243235783Skib	struct umtx_q *uq;
244235783Skib	intptr_t owner;
245235783Skib	intptr_t old;
246235783Skib
247235783Skib	umtx = uap->umtx;
248235783Skib
249235783Skib	/*
250235783Skib	 * Make sure we own this mtx.
251235783Skib	 *
252235783Skib	 * XXX Need a {fu,su}ptr this is not correct on arch where
253235783Skib	 * sizeof(intptr_t) != sizeof(long).
254235783Skib	 */
255237718Smav	if ((owner = fuword(&umtx->u_owner)) == -1)
256235783Skib		return (EFAULT);
257235783Skib
258235783Skib	if ((struct thread *)(owner & ~UMTX_CONTESTED) != td)
259235783Skib		return (EPERM);
260235783Skib	/*
261235783Skib	 * If we own it but it isn't contested then we can just release and
262235783Skib	 * return.
263235783Skib	 */
264235783Skib	if ((owner & UMTX_CONTESTED) == 0) {
265235783Skib		owner = casuptr((intptr_t *)&umtx->u_owner,
266235783Skib		    (intptr_t)td, UMTX_UNOWNED);
267235783Skib
268235783Skib		if (owner == -1)
269235783Skib			return (EFAULT);
270235783Skib		/*
271235783Skib		 * If this failed someone modified the memory without going
272235783Skib		 * through this api.
273235783Skib		 */
274235783Skib		if (owner != (intptr_t)td)
275235783Skib			return (EINVAL);
276235783Skib
277235783Skib		return (0);
278235783Skib	}
279235783Skib
280235783Skib	old = casuptr((intptr_t *)&umtx->u_owner, owner, UMTX_CONTESTED);
281235783Skib
282235783Skib	if (old == -1)
283235783Skib		return (EFAULT);
284235783Skib
285235783Skib	/*
286235783Skib	 * This will only happen if someone modifies the lock without going
287235783Skib	 * through this api.
288235783Skib	 */
289235783Skib	if (old != owner)
290235783Skib		return (EINVAL);
291235783Skib
292235783Skib	/*
293235783Skib	 * We have to wake up one of the blocked threads.
294235783Skib	 */
295235783Skib	UMTX_LOCK();
296235783Skib	uq = umtx_lookup(td, umtx);
297235783Skib	if (uq != NULL) {
298235783Skib		blocked = TAILQ_FIRST(&uq->uq_tdq);
299235783Skib		wakeup(blocked);
300235783Skib	}
301235783Skib
302235783Skib	UMTX_UNLOCK();
303235783Skib
304235783Skib	return (0);
305235783Skib}
306235783Skib