1/*	$OpenBSD: rthread_rwlock.c,v 1.13 2019/03/03 18:39:10 visa Exp $ */
2/*
3 * Copyright (c) 2019 Martin Pieuchot <mpi@openbsd.org>
4 * Copyright (c) 2012 Philip Guenther <guenther@openbsd.org>
5 *
6 * Permission to use, copy, modify, and distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18
19#include <stdlib.h>
20#include <unistd.h>
21#include <errno.h>
22
23#include <pthread.h>
24
25#include "rthread.h"
26#include "synch.h"
27
28#define UNLOCKED	0
29#define MAXREADER	0x7ffffffe
30#define WRITER		0x7fffffff
31#define WAITING		0x80000000
32#define COUNT(v)	((v) & WRITER)
33
34#define SPIN_COUNT	128
35#if defined(__i386__) || defined(__amd64__)
36#define SPIN_WAIT()	asm volatile("pause": : : "memory")
37#else
38#define SPIN_WAIT()	do { } while (0)
39#endif
40
41static _atomic_lock_t rwlock_init_lock = _SPINLOCK_UNLOCKED;
42
43int
44pthread_rwlock_init(pthread_rwlock_t *lockp,
45    const pthread_rwlockattr_t *attrp __unused)
46{
47	pthread_rwlock_t rwlock;
48
49	rwlock = calloc(1, sizeof(*rwlock));
50	if (!rwlock)
51		return (errno);
52
53	*lockp = rwlock;
54
55	return (0);
56}
57DEF_STD(pthread_rwlock_init);
58
59int
60pthread_rwlock_destroy(pthread_rwlock_t *lockp)
61{
62	pthread_rwlock_t rwlock;
63
64	rwlock = *lockp;
65	if (rwlock) {
66		if (rwlock->value != UNLOCKED) {
67#define MSG "pthread_rwlock_destroy on rwlock with waiters!\n"
68			write(2, MSG, sizeof(MSG) - 1);
69#undef MSG
70			return (EBUSY);
71		}
72		free((void *)rwlock);
73		*lockp = NULL;
74	}
75
76	return (0);
77}
78
79static int
80_rthread_rwlock_ensure_init(pthread_rwlock_t *rwlockp)
81{
82	int ret = 0;
83
84	/*
85	 * If the rwlock is statically initialized, perform the dynamic
86	 * initialization.
87	 */
88	if (*rwlockp == NULL) {
89		_spinlock(&rwlock_init_lock);
90		if (*rwlockp == NULL)
91			ret = pthread_rwlock_init(rwlockp, NULL);
92		_spinunlock(&rwlock_init_lock);
93	}
94	return (ret);
95}
96
97static int
98_rthread_rwlock_tryrdlock(pthread_rwlock_t rwlock)
99{
100	unsigned int val;
101
102	do {
103		val = rwlock->value;
104		if (COUNT(val) == WRITER)
105			return (EBUSY);
106		if (COUNT(val) == MAXREADER)
107			return (EAGAIN);
108	} while (atomic_cas_uint(&rwlock->value, val, val + 1) != val);
109
110	membar_enter_after_atomic();
111	return (0);
112}
113
114static int
115_rthread_rwlock_timedrdlock(pthread_rwlock_t *rwlockp, int trywait,
116    const struct timespec *abs, int timed)
117{
118	pthread_t self = pthread_self();
119	pthread_rwlock_t rwlock;
120	unsigned int val, new;
121	int i, error;
122
123	if ((error = _rthread_rwlock_ensure_init(rwlockp)))
124		return (error);
125
126	rwlock = *rwlockp;
127	_rthread_debug(5, "%p: rwlock_%srdlock %p (%u)\n", self,
128	    (timed ? "timed" : (trywait ? "try" : "")), (void *)rwlock,
129	    rwlock->value);
130
131	error = _rthread_rwlock_tryrdlock(rwlock);
132	if (error != EBUSY || trywait)
133		return (error);
134
135	/* Try hard to not enter the kernel. */
136	for (i = 0; i < SPIN_COUNT; i++) {
137		val = rwlock->value;
138		if (val == UNLOCKED || (val & WAITING))
139			break;
140
141		SPIN_WAIT();
142	}
143
144	while ((error = _rthread_rwlock_tryrdlock(rwlock)) == EBUSY) {
145		val = rwlock->value;
146		if (val == UNLOCKED || (COUNT(val)) != WRITER)
147			continue;
148		new = val | WAITING;
149		if (atomic_cas_uint(&rwlock->value, val, new) == val) {
150			error = _twait(&rwlock->value, new, CLOCK_REALTIME,
151			    abs);
152		}
153		if (error == ETIMEDOUT)
154			break;
155	}
156
157	return (error);
158
159}
160
161int
162pthread_rwlock_tryrdlock(pthread_rwlock_t *rwlockp)
163{
164	return (_rthread_rwlock_timedrdlock(rwlockp, 1, NULL, 0));
165}
166
167int
168pthread_rwlock_timedrdlock(pthread_rwlock_t *rwlockp,
169    const struct timespec *abs)
170{
171	return (_rthread_rwlock_timedrdlock(rwlockp, 0, abs, 1));
172}
173
174int
175pthread_rwlock_rdlock(pthread_rwlock_t *rwlockp)
176{
177	return (_rthread_rwlock_timedrdlock(rwlockp, 0, NULL, 0));
178}
179
180static int
181_rthread_rwlock_tryrwlock(pthread_rwlock_t rwlock)
182{
183	if (atomic_cas_uint(&rwlock->value, UNLOCKED, WRITER) != UNLOCKED)
184		return (EBUSY);
185
186	membar_enter_after_atomic();
187	return (0);
188}
189
190
191static int
192_rthread_rwlock_timedwrlock(pthread_rwlock_t *rwlockp, int trywait,
193    const struct timespec *abs, int timed)
194{
195	pthread_t self = pthread_self();
196	pthread_rwlock_t rwlock;
197	unsigned int val, new;
198	int i, error;
199
200	if ((error = _rthread_rwlock_ensure_init(rwlockp)))
201		return (error);
202
203	rwlock = *rwlockp;
204	_rthread_debug(5, "%p: rwlock_%swrlock %p (%u)\n", self,
205	    (timed ? "timed" : (trywait ? "try" : "")), (void *)rwlock,
206	    rwlock->value);
207
208	error = _rthread_rwlock_tryrwlock(rwlock);
209	if (error != EBUSY || trywait)
210		return (error);
211
212	/* Try hard to not enter the kernel. */
213	for (i = 0; i < SPIN_COUNT; i++) {
214		val = rwlock->value;
215		if (val == UNLOCKED || (val & WAITING))
216			break;
217
218		SPIN_WAIT();
219	}
220
221	while ((error = _rthread_rwlock_tryrwlock(rwlock)) == EBUSY) {
222		val = rwlock->value;
223		if (val == UNLOCKED)
224			continue;
225		new = val | WAITING;
226		if (atomic_cas_uint(&rwlock->value, val, new) == val) {
227			error = _twait(&rwlock->value, new, CLOCK_REALTIME,
228			    abs);
229		}
230		if (error == ETIMEDOUT)
231			break;
232	}
233
234	return (error);
235}
236
237int
238pthread_rwlock_trywrlock(pthread_rwlock_t *rwlockp)
239{
240	return (_rthread_rwlock_timedwrlock(rwlockp, 1, NULL, 0));
241}
242
243int
244pthread_rwlock_timedwrlock(pthread_rwlock_t *rwlockp,
245    const struct timespec *abs)
246{
247	return (_rthread_rwlock_timedwrlock(rwlockp, 0, abs, 1));
248}
249
250int
251pthread_rwlock_wrlock(pthread_rwlock_t *rwlockp)
252{
253	return (_rthread_rwlock_timedwrlock(rwlockp, 0, NULL, 0));
254}
255
256int
257pthread_rwlock_unlock(pthread_rwlock_t *rwlockp)
258{
259	pthread_t self = pthread_self();
260	pthread_rwlock_t rwlock;
261	unsigned int val, new;
262
263	rwlock = *rwlockp;
264	_rthread_debug(5, "%p: rwlock_unlock %p\n", self, (void *)rwlock);
265
266	membar_exit_before_atomic();
267	do {
268		val = rwlock->value;
269		if (COUNT(val) == WRITER || COUNT(val) == 1)
270			new = UNLOCKED;
271		else
272			new = val - 1;
273	} while (atomic_cas_uint(&rwlock->value, val, new) != val);
274
275	if (new == UNLOCKED && (val & WAITING))
276		_wake(&rwlock->value, INT_MAX);
277
278	return (0);
279}
280