1/*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2005 David Xu <davidxu@freebsd.org>
5 * Copyright (c) 2015 The FreeBSD Foundation
6 * All rights reserved.
7 *
8 * Portions of this software were developed by Konstantin Belousov
9 * under sponsorship from the FreeBSD Foundation.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 *    notice unmodified, this list of conditions, and the following
16 *    disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 *    notice, this list of conditions and the following disclaimer in the
19 *    documentation and/or other materials provided with the distribution.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
22 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
23 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
24 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
26 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
30 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33#include "namespace.h"
34#include <stdlib.h>
35#include <errno.h>
36#include <string.h>
37#include <pthread.h>
38#include <limits.h>
39#include "un-namespace.h"
40
41#include "thr_private.h"
42
43_Static_assert(sizeof(struct pthread_cond) <= THR_PAGE_SIZE_MIN,
44    "pthread_cond too large");
45
46/*
47 * Prototypes
48 */
49int	__pthread_cond_timedwait(pthread_cond_t *cond, pthread_mutex_t *mutex,
50		       const struct timespec * abstime);
51static int cond_init(pthread_cond_t *cond, const pthread_condattr_t *attr);
52static int cond_wait_common(pthread_cond_t *cond, pthread_mutex_t *mutex,
53		    const struct timespec *abstime, int cancel);
54static int cond_signal_common(pthread_cond_t *cond);
55static int cond_broadcast_common(pthread_cond_t *cond);
56
57/*
58 * Double underscore versions are cancellation points.  Single underscore
59 * versions are not and are provided for libc internal usage (which
60 * shouldn't introduce cancellation points).
61 */
62__weak_reference(__thr_cond_wait, pthread_cond_wait);
63__weak_reference(__thr_cond_wait, __pthread_cond_wait);
64__weak_reference(_thr_cond_wait, _pthread_cond_wait);
65__weak_reference(__pthread_cond_timedwait, pthread_cond_timedwait);
66__weak_reference(_thr_cond_timedwait, _pthread_cond_timedwait);
67__weak_reference(_thr_cond_init, pthread_cond_init);
68__weak_reference(_thr_cond_init, _pthread_cond_init);
69__weak_reference(_thr_cond_destroy, pthread_cond_destroy);
70__weak_reference(_thr_cond_destroy, _pthread_cond_destroy);
71__weak_reference(_thr_cond_signal, pthread_cond_signal);
72__weak_reference(_thr_cond_signal, _pthread_cond_signal);
73__weak_reference(_thr_cond_broadcast, pthread_cond_broadcast);
74__weak_reference(_thr_cond_broadcast, _pthread_cond_broadcast);
75
76#define CV_PSHARED(cvp)	(((cvp)->kcond.c_flags & USYNC_PROCESS_SHARED) != 0)
77
78static void
79cond_init_body(struct pthread_cond *cvp, const struct pthread_cond_attr *cattr)
80{
81
82	if (cattr == NULL) {
83		cvp->kcond.c_clockid = CLOCK_REALTIME;
84	} else {
85		if (cattr->c_pshared)
86			cvp->kcond.c_flags |= USYNC_PROCESS_SHARED;
87		cvp->kcond.c_clockid = cattr->c_clockid;
88	}
89}
90
91static int
92cond_init(pthread_cond_t *cond, const pthread_condattr_t *cond_attr)
93{
94	struct pthread_cond *cvp;
95	const struct pthread_cond_attr *cattr;
96	int pshared;
97
98	cattr = cond_attr != NULL ? *cond_attr : NULL;
99	if (cattr == NULL || cattr->c_pshared == PTHREAD_PROCESS_PRIVATE) {
100		pshared = 0;
101		cvp = calloc(1, sizeof(struct pthread_cond));
102		if (cvp == NULL)
103			return (ENOMEM);
104	} else {
105		pshared = 1;
106		cvp = __thr_pshared_offpage(cond, 1);
107		if (cvp == NULL)
108			return (EFAULT);
109	}
110
111	/*
112	 * Initialise the condition variable structure:
113	 */
114	cond_init_body(cvp, cattr);
115	*cond = pshared ? THR_PSHARED_PTR : cvp;
116	return (0);
117}
118
119static int
120init_static(struct pthread *thread, pthread_cond_t *cond)
121{
122	int ret;
123
124	THR_LOCK_ACQUIRE(thread, &_cond_static_lock);
125
126	if (*cond == NULL)
127		ret = cond_init(cond, NULL);
128	else
129		ret = 0;
130
131	THR_LOCK_RELEASE(thread, &_cond_static_lock);
132
133	return (ret);
134}
135
136#define CHECK_AND_INIT_COND							\
137	if (*cond == THR_PSHARED_PTR) {						\
138		cvp = __thr_pshared_offpage(cond, 0);				\
139		if (cvp == NULL)						\
140			return (EINVAL);					\
141	} else if (__predict_false((cvp = (*cond)) <= THR_COND_DESTROYED)) {	\
142		if (cvp == THR_COND_INITIALIZER) {				\
143			int ret;						\
144			ret = init_static(_get_curthread(), cond);		\
145			if (ret)						\
146				return (ret);					\
147		} else if (cvp == THR_COND_DESTROYED) {				\
148			return (EINVAL);					\
149		}								\
150		cvp = *cond;							\
151	}
152
153int
154_thr_cond_init(pthread_cond_t * __restrict cond,
155    const pthread_condattr_t * __restrict cond_attr)
156{
157
158	*cond = NULL;
159	return (cond_init(cond, cond_attr));
160}
161
162int
163_thr_cond_destroy(pthread_cond_t *cond)
164{
165	struct pthread_cond *cvp;
166	int error;
167
168	error = 0;
169	if (*cond == THR_PSHARED_PTR) {
170		cvp = __thr_pshared_offpage(cond, 0);
171		if (cvp != NULL) {
172			if (cvp->kcond.c_has_waiters)
173				error = EBUSY;
174			else
175				__thr_pshared_destroy(cond);
176		}
177		if (error == 0)
178			*cond = THR_COND_DESTROYED;
179	} else if ((cvp = *cond) == THR_COND_INITIALIZER) {
180		/* nothing */
181	} else if (cvp == THR_COND_DESTROYED) {
182		error = EINVAL;
183	} else {
184		cvp = *cond;
185		if (cvp->__has_user_waiters || cvp->kcond.c_has_waiters)
186			error = EBUSY;
187		else {
188			*cond = THR_COND_DESTROYED;
189			free(cvp);
190		}
191	}
192	return (error);
193}
194
195/*
196 * Cancellation behavior:
197 *   Thread may be canceled at start, if thread is canceled, it means it
198 *   did not get a wakeup from pthread_cond_signal(), otherwise, it is
199 *   not canceled.
200 *   Thread cancellation never cause wakeup from pthread_cond_signal()
201 *   to be lost.
202 */
203static int
204cond_wait_kernel(struct pthread_cond *cvp, struct pthread_mutex *mp,
205    const struct timespec *abstime, int cancel)
206{
207	struct pthread *curthread;
208	int error, error2, recurse, robust;
209
210	curthread = _get_curthread();
211	robust = _mutex_enter_robust(curthread, mp);
212
213	error = _mutex_cv_detach(mp, &recurse);
214	if (error != 0) {
215		if (robust)
216			_mutex_leave_robust(curthread, mp);
217		return (error);
218	}
219
220	if (cancel)
221		_thr_cancel_enter2(curthread, 0);
222	error = _thr_ucond_wait(&cvp->kcond, &mp->m_lock, abstime,
223	    CVWAIT_ABSTIME | CVWAIT_CLOCKID);
224	if (cancel)
225		_thr_cancel_leave(curthread, 0);
226
227	/*
228	 * Note that PP mutex and ROBUST mutex may return
229	 * interesting error codes.
230	 */
231	if (error == 0) {
232		error2 = _mutex_cv_lock(mp, recurse, true);
233	} else if (error == EINTR || error == ETIMEDOUT) {
234		error2 = _mutex_cv_lock(mp, recurse, true);
235		/*
236		 * Do not do cancellation on EOWNERDEAD there.  The
237		 * cancellation cleanup handler will use the protected
238		 * state and unlock the mutex without making the state
239		 * consistent and the state will be unrecoverable.
240		 */
241		if (error2 == 0 && cancel) {
242			if (robust) {
243				_mutex_leave_robust(curthread, mp);
244				robust = false;
245			}
246			_thr_testcancel(curthread);
247		}
248
249		if (error == EINTR)
250			error = 0;
251	} else {
252		/* We know that it didn't unlock the mutex. */
253		_mutex_cv_attach(mp, recurse);
254		if (cancel) {
255			if (robust) {
256				_mutex_leave_robust(curthread, mp);
257				robust = false;
258			}
259			_thr_testcancel(curthread);
260		}
261		error2 = 0;
262	}
263	if (robust)
264		_mutex_leave_robust(curthread, mp);
265	return (error2 != 0 ? error2 : error);
266}
267
268/*
269 * Thread waits in userland queue whenever possible, when thread
270 * is signaled or broadcasted, it is removed from the queue, and
271 * is saved in curthread's defer_waiters[] buffer, but won't be
272 * woken up until mutex is unlocked.
273 */
274
275static int
276cond_wait_user(struct pthread_cond *cvp, struct pthread_mutex *mp,
277    const struct timespec *abstime, int cancel)
278{
279	struct pthread *curthread;
280	struct sleepqueue *sq;
281	int deferred, error, error2, recurse;
282
283	curthread = _get_curthread();
284	if (curthread->wchan != NULL)
285		PANIC("thread %p was already on queue.", curthread);
286
287	if (cancel)
288		_thr_testcancel(curthread);
289
290	_sleepq_lock(cvp);
291	/*
292	 * set __has_user_waiters before unlocking mutex, this allows
293	 * us to check it without locking in pthread_cond_signal().
294	 */
295	cvp->__has_user_waiters = 1;
296	deferred = 0;
297	(void)_mutex_cv_unlock(mp, &recurse, &deferred);
298	curthread->mutex_obj = mp;
299	_sleepq_add(cvp, curthread);
300	for(;;) {
301		_thr_clear_wake(curthread);
302		_sleepq_unlock(cvp);
303		if (deferred) {
304			deferred = 0;
305			if ((mp->m_lock.m_owner & UMUTEX_CONTESTED) == 0)
306				(void)_umtx_op_err(&mp->m_lock,
307				    UMTX_OP_MUTEX_WAKE2, mp->m_lock.m_flags,
308				    0, 0);
309		}
310		if (curthread->nwaiter_defer > 0) {
311			_thr_wake_all(curthread->defer_waiters,
312			    curthread->nwaiter_defer);
313			curthread->nwaiter_defer = 0;
314		}
315
316		if (cancel)
317			_thr_cancel_enter2(curthread, 0);
318		error = _thr_sleep(curthread, cvp->kcond.c_clockid, abstime);
319		if (cancel)
320			_thr_cancel_leave(curthread, 0);
321
322		_sleepq_lock(cvp);
323		if (curthread->wchan == NULL) {
324			error = 0;
325			break;
326		} else if (cancel && SHOULD_CANCEL(curthread)) {
327			sq = _sleepq_lookup(cvp);
328			cvp->__has_user_waiters = _sleepq_remove(sq, curthread);
329			_sleepq_unlock(cvp);
330			curthread->mutex_obj = NULL;
331			error2 = _mutex_cv_lock(mp, recurse, false);
332			if (!THR_IN_CRITICAL(curthread))
333				_pthread_exit(PTHREAD_CANCELED);
334			else /* this should not happen */
335				return (error2);
336		} else if (error == ETIMEDOUT) {
337			sq = _sleepq_lookup(cvp);
338			cvp->__has_user_waiters =
339			    _sleepq_remove(sq, curthread);
340			break;
341		}
342	}
343	_sleepq_unlock(cvp);
344	curthread->mutex_obj = NULL;
345	error2 = _mutex_cv_lock(mp, recurse, false);
346	if (error == 0)
347		error = error2;
348	return (error);
349}
350
351static int
352cond_wait_common(pthread_cond_t *cond, pthread_mutex_t *mutex,
353	const struct timespec *abstime, int cancel)
354{
355	struct pthread	*curthread = _get_curthread();
356	struct pthread_cond *cvp;
357	struct pthread_mutex *mp;
358	int	error;
359
360	CHECK_AND_INIT_COND
361
362	if (*mutex == THR_PSHARED_PTR) {
363		mp = __thr_pshared_offpage(mutex, 0);
364		if (mp == NULL)
365			return (EINVAL);
366	} else {
367		mp = *mutex;
368	}
369
370	if ((error = _mutex_owned(curthread, mp)) != 0)
371		return (error);
372
373	if (curthread->attr.sched_policy != SCHED_OTHER ||
374	    (mp->m_lock.m_flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT |
375	    USYNC_PROCESS_SHARED)) != 0 || CV_PSHARED(cvp))
376		return (cond_wait_kernel(cvp, mp, abstime, cancel));
377	else
378		return (cond_wait_user(cvp, mp, abstime, cancel));
379}
380
381int
382_thr_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
383{
384
385	return (cond_wait_common(cond, mutex, NULL, 0));
386}
387
388int
389__thr_cond_wait(pthread_cond_t * __restrict cond,
390    pthread_mutex_t * __restrict mutex)
391{
392
393	return (cond_wait_common(cond, mutex, NULL, 1));
394}
395
396int
397_thr_cond_timedwait(pthread_cond_t * __restrict cond,
398    pthread_mutex_t * __restrict mutex,
399    const struct timespec * __restrict abstime)
400{
401
402	if (abstime == NULL || abstime->tv_sec < 0 || abstime->tv_nsec < 0 ||
403	    abstime->tv_nsec >= 1000000000)
404		return (EINVAL);
405
406	return (cond_wait_common(cond, mutex, abstime, 0));
407}
408
409int
410__pthread_cond_timedwait(pthread_cond_t *cond, pthread_mutex_t *mutex,
411		       const struct timespec *abstime)
412{
413
414	if (abstime == NULL || abstime->tv_sec < 0 || abstime->tv_nsec < 0 ||
415	    abstime->tv_nsec >= 1000000000)
416		return (EINVAL);
417
418	return (cond_wait_common(cond, mutex, abstime, 1));
419}
420
421static int
422cond_signal_common(pthread_cond_t *cond)
423{
424	struct pthread	*curthread = _get_curthread();
425	struct pthread *td;
426	struct pthread_cond *cvp;
427	struct pthread_mutex *mp;
428	struct sleepqueue *sq;
429	int	*waddr;
430	int	pshared;
431
432	/*
433	 * If the condition variable is statically initialized, perform dynamic
434	 * initialization.
435	 */
436	CHECK_AND_INIT_COND
437
438	pshared = CV_PSHARED(cvp);
439
440	_thr_ucond_signal(&cvp->kcond);
441
442	if (pshared || cvp->__has_user_waiters == 0)
443		return (0);
444
445	curthread = _get_curthread();
446	waddr = NULL;
447	_sleepq_lock(cvp);
448	sq = _sleepq_lookup(cvp);
449	if (sq == NULL) {
450		_sleepq_unlock(cvp);
451		return (0);
452	}
453
454	td = _sleepq_first(sq);
455	mp = td->mutex_obj;
456	cvp->__has_user_waiters = _sleepq_remove(sq, td);
457	if (PMUTEX_OWNER_ID(mp) == TID(curthread)) {
458		if (curthread->nwaiter_defer >= MAX_DEFER_WAITERS) {
459			_thr_wake_all(curthread->defer_waiters,
460			    curthread->nwaiter_defer);
461			curthread->nwaiter_defer = 0;
462		}
463		curthread->defer_waiters[curthread->nwaiter_defer++] =
464		    &td->wake_addr->value;
465		mp->m_flags |= PMUTEX_FLAG_DEFERRED;
466	} else {
467		waddr = &td->wake_addr->value;
468	}
469	_sleepq_unlock(cvp);
470	if (waddr != NULL)
471		_thr_set_wake(waddr);
472	return (0);
473}
474
475struct broadcast_arg {
476	struct pthread *curthread;
477	unsigned int *waddrs[MAX_DEFER_WAITERS];
478	int count;
479};
480
481static void
482drop_cb(struct pthread *td, void *arg)
483{
484	struct broadcast_arg *ba = arg;
485	struct pthread_mutex *mp;
486	struct pthread *curthread = ba->curthread;
487
488	mp = td->mutex_obj;
489	if (PMUTEX_OWNER_ID(mp) == TID(curthread)) {
490		if (curthread->nwaiter_defer >= MAX_DEFER_WAITERS) {
491			_thr_wake_all(curthread->defer_waiters,
492			    curthread->nwaiter_defer);
493			curthread->nwaiter_defer = 0;
494		}
495		curthread->defer_waiters[curthread->nwaiter_defer++] =
496		    &td->wake_addr->value;
497		mp->m_flags |= PMUTEX_FLAG_DEFERRED;
498	} else {
499		if (ba->count >= MAX_DEFER_WAITERS) {
500			_thr_wake_all(ba->waddrs, ba->count);
501			ba->count = 0;
502		}
503		ba->waddrs[ba->count++] = &td->wake_addr->value;
504	}
505}
506
507static int
508cond_broadcast_common(pthread_cond_t *cond)
509{
510	int    pshared;
511	struct pthread_cond *cvp;
512	struct sleepqueue *sq;
513	struct broadcast_arg ba;
514
515	/*
516	 * If the condition variable is statically initialized, perform dynamic
517	 * initialization.
518	 */
519	CHECK_AND_INIT_COND
520
521	pshared = CV_PSHARED(cvp);
522
523	_thr_ucond_broadcast(&cvp->kcond);
524
525	if (pshared || cvp->__has_user_waiters == 0)
526		return (0);
527
528	ba.curthread = _get_curthread();
529	ba.count = 0;
530
531	_sleepq_lock(cvp);
532	sq = _sleepq_lookup(cvp);
533	if (sq == NULL) {
534		_sleepq_unlock(cvp);
535		return (0);
536	}
537	_sleepq_drop(sq, drop_cb, &ba);
538	cvp->__has_user_waiters = 0;
539	_sleepq_unlock(cvp);
540	if (ba.count > 0)
541		_thr_wake_all(ba.waddrs, ba.count);
542	return (0);
543}
544
545int
546_thr_cond_signal(pthread_cond_t * cond)
547{
548
549	return (cond_signal_common(cond));
550}
551
552int
553_thr_cond_broadcast(pthread_cond_t * cond)
554{
555
556	return (cond_broadcast_common(cond));
557}
558