1/*-
2 * Copyright (c) 1998 Alex Nash
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD$
27 */
28
29#include <errno.h>
30#include <limits.h>
31#include <stdlib.h>
32
33#include "namespace.h"
34#include <pthread.h>
35#include "un-namespace.h"
36#include "thr_private.h"
37
38__weak_reference(_pthread_rwlock_destroy, pthread_rwlock_destroy);
39__weak_reference(_pthread_rwlock_init, pthread_rwlock_init);
40__weak_reference(_pthread_rwlock_rdlock, pthread_rwlock_rdlock);
41__weak_reference(_pthread_rwlock_timedrdlock, pthread_rwlock_timedrdlock);
42__weak_reference(_pthread_rwlock_tryrdlock, pthread_rwlock_tryrdlock);
43__weak_reference(_pthread_rwlock_trywrlock, pthread_rwlock_trywrlock);
44__weak_reference(_pthread_rwlock_unlock, pthread_rwlock_unlock);
45__weak_reference(_pthread_rwlock_wrlock, pthread_rwlock_wrlock);
46__weak_reference(_pthread_rwlock_timedwrlock, pthread_rwlock_timedwrlock);
47
48#define CHECK_AND_INIT_RWLOCK							\
49	if (__predict_false((prwlock = (*rwlock)) <= THR_RWLOCK_DESTROYED)) {	\
50		if (prwlock == THR_RWLOCK_INITIALIZER) {			\
51			int ret;						\
52			ret = init_static(_get_curthread(), rwlock);		\
53			if (ret)						\
54				return (ret);					\
55		} else if (prwlock == THR_RWLOCK_DESTROYED) {			\
56			return (EINVAL);					\
57		}								\
58		prwlock = *rwlock;						\
59	}
60
61/*
62 * Prototypes
63 */
64
65static int
66rwlock_init(pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr __unused)
67{
68	pthread_rwlock_t prwlock;
69
70	prwlock = (pthread_rwlock_t)calloc(1, sizeof(struct pthread_rwlock));
71	if (prwlock == NULL)
72		return (ENOMEM);
73	*rwlock = prwlock;
74	return (0);
75}
76
77int
78_pthread_rwlock_destroy (pthread_rwlock_t *rwlock)
79{
80	pthread_rwlock_t prwlock;
81	int ret;
82
83	prwlock = *rwlock;
84	if (prwlock == THR_RWLOCK_INITIALIZER)
85		ret = 0;
86	else if (prwlock == THR_RWLOCK_DESTROYED)
87		ret = EINVAL;
88	else {
89		*rwlock = THR_RWLOCK_DESTROYED;
90
91		free(prwlock);
92		ret = 0;
93	}
94	return (ret);
95}
96
97static int
98init_static(struct pthread *thread, pthread_rwlock_t *rwlock)
99{
100	int ret;
101
102	THR_LOCK_ACQUIRE(thread, &_rwlock_static_lock);
103
104	if (*rwlock == THR_RWLOCK_INITIALIZER)
105		ret = rwlock_init(rwlock, NULL);
106	else
107		ret = 0;
108
109	THR_LOCK_RELEASE(thread, &_rwlock_static_lock);
110
111	return (ret);
112}
113
114int
115_pthread_rwlock_init (pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr)
116{
117	*rwlock = NULL;
118	return (rwlock_init(rwlock, attr));
119}
120
121static int
122rwlock_rdlock_common(pthread_rwlock_t *rwlock, const struct timespec *abstime)
123{
124	struct pthread *curthread = _get_curthread();
125	pthread_rwlock_t prwlock;
126	int flags;
127	int ret;
128
129	CHECK_AND_INIT_RWLOCK
130
131	if (curthread->rdlock_count) {
132		/*
133		 * To avoid having to track all the rdlocks held by
134		 * a thread or all of the threads that hold a rdlock,
135		 * we keep a simple count of all the rdlocks held by
136		 * a thread.  If a thread holds any rdlocks it is
137		 * possible that it is attempting to take a recursive
138		 * rdlock.  If there are blocked writers and precedence
139		 * is given to them, then that would result in the thread
140		 * deadlocking.  So allowing a thread to take the rdlock
141		 * when it already has one or more rdlocks avoids the
142		 * deadlock.  I hope the reader can follow that logic ;-)
143		 */
144		flags = URWLOCK_PREFER_READER;
145	} else {
146		flags = 0;
147	}
148
149	/*
150	 * POSIX said the validity of the abstimeout parameter need
151	 * not be checked if the lock can be immediately acquired.
152	 */
153	ret = _thr_rwlock_tryrdlock(&prwlock->lock, flags);
154	if (ret == 0) {
155		curthread->rdlock_count++;
156		return (ret);
157	}
158
159	if (__predict_false(abstime &&
160		(abstime->tv_nsec >= 1000000000 || abstime->tv_nsec < 0)))
161		return (EINVAL);
162
163	for (;;) {
164		/* goto kernel and lock it */
165		ret = __thr_rwlock_rdlock(&prwlock->lock, flags, abstime);
166		if (ret != EINTR)
167			break;
168
169		/* if interrupted, try to lock it in userland again. */
170		if (_thr_rwlock_tryrdlock(&prwlock->lock, flags) == 0) {
171			ret = 0;
172			break;
173		}
174	}
175	if (ret == 0)
176		curthread->rdlock_count++;
177	return (ret);
178}
179
180int
181_pthread_rwlock_rdlock (pthread_rwlock_t *rwlock)
182{
183	return (rwlock_rdlock_common(rwlock, NULL));
184}
185
186int
187_pthread_rwlock_timedrdlock (pthread_rwlock_t *rwlock,
188	 const struct timespec *abstime)
189{
190	return (rwlock_rdlock_common(rwlock, abstime));
191}
192
193int
194_pthread_rwlock_tryrdlock (pthread_rwlock_t *rwlock)
195{
196	struct pthread *curthread = _get_curthread();
197	pthread_rwlock_t prwlock;
198	int flags;
199	int ret;
200
201	CHECK_AND_INIT_RWLOCK
202
203	if (curthread->rdlock_count) {
204		/*
205		 * To avoid having to track all the rdlocks held by
206		 * a thread or all of the threads that hold a rdlock,
207		 * we keep a simple count of all the rdlocks held by
208		 * a thread.  If a thread holds any rdlocks it is
209		 * possible that it is attempting to take a recursive
210		 * rdlock.  If there are blocked writers and precedence
211		 * is given to them, then that would result in the thread
212		 * deadlocking.  So allowing a thread to take the rdlock
213		 * when it already has one or more rdlocks avoids the
214		 * deadlock.  I hope the reader can follow that logic ;-)
215		 */
216		flags = URWLOCK_PREFER_READER;
217	} else {
218		flags = 0;
219	}
220
221	ret = _thr_rwlock_tryrdlock(&prwlock->lock, flags);
222	if (ret == 0)
223		curthread->rdlock_count++;
224	return (ret);
225}
226
227int
228_pthread_rwlock_trywrlock (pthread_rwlock_t *rwlock)
229{
230	struct pthread *curthread = _get_curthread();
231	pthread_rwlock_t prwlock;
232	int ret;
233
234	CHECK_AND_INIT_RWLOCK
235
236	ret = _thr_rwlock_trywrlock(&prwlock->lock);
237	if (ret == 0)
238		prwlock->owner = curthread;
239	return (ret);
240}
241
242static int
243rwlock_wrlock_common (pthread_rwlock_t *rwlock, const struct timespec *abstime)
244{
245	struct pthread *curthread = _get_curthread();
246	pthread_rwlock_t prwlock;
247	int ret;
248
249	CHECK_AND_INIT_RWLOCK
250
251	/*
252	 * POSIX said the validity of the abstimeout parameter need
253	 * not be checked if the lock can be immediately acquired.
254	 */
255	ret = _thr_rwlock_trywrlock(&prwlock->lock);
256	if (ret == 0) {
257		prwlock->owner = curthread;
258		return (ret);
259	}
260
261	if (__predict_false(abstime &&
262		(abstime->tv_nsec >= 1000000000 || abstime->tv_nsec < 0)))
263		return (EINVAL);
264
265	for (;;) {
266		/* goto kernel and lock it */
267		ret = __thr_rwlock_wrlock(&prwlock->lock, abstime);
268		if (ret == 0) {
269			prwlock->owner = curthread;
270			break;
271		}
272
273		if (ret != EINTR)
274			break;
275
276		/* if interrupted, try to lock it in userland again. */
277		if (_thr_rwlock_trywrlock(&prwlock->lock) == 0) {
278			ret = 0;
279			prwlock->owner = curthread;
280			break;
281		}
282	}
283	return (ret);
284}
285
286int
287_pthread_rwlock_wrlock (pthread_rwlock_t *rwlock)
288{
289	return (rwlock_wrlock_common (rwlock, NULL));
290}
291
292int
293_pthread_rwlock_timedwrlock (pthread_rwlock_t *rwlock,
294    const struct timespec *abstime)
295{
296	return (rwlock_wrlock_common (rwlock, abstime));
297}
298
299int
300_pthread_rwlock_unlock (pthread_rwlock_t *rwlock)
301{
302	struct pthread *curthread = _get_curthread();
303	pthread_rwlock_t prwlock;
304	int ret;
305	int32_t state;
306
307	prwlock = *rwlock;
308
309	if (__predict_false(prwlock <= THR_RWLOCK_DESTROYED))
310		return (EINVAL);
311
312	state = prwlock->lock.rw_state;
313	if (state & URWLOCK_WRITE_OWNER) {
314		if (__predict_false(prwlock->owner != curthread))
315			return (EPERM);
316		prwlock->owner = NULL;
317	}
318
319	ret = _thr_rwlock_unlock(&prwlock->lock);
320	if (ret == 0 && (state & URWLOCK_WRITE_OWNER) == 0)
321		curthread->rdlock_count--;
322
323	return (ret);
324}
325