OsdSynch.c revision 150003
1/*-
2 * Copyright (c) 2000 Michael Smith
3 * Copyright (c) 2000 BSDi
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28/*
29 * 6.1 : Mutual Exclusion and Synchronisation
30 */
31
32#include <sys/cdefs.h>
33__FBSDID("$FreeBSD: head/sys/dev/acpica/Osd/OsdSynch.c 150003 2005-09-11 18:39:03Z obrien $");
34
35#include <contrib/dev/acpica/acpi.h>
36
37#include "opt_acpi.h"
38#include <sys/kernel.h>
39#include <sys/malloc.h>
40#include <sys/sysctl.h>
41#include <sys/lock.h>
42#include <sys/mutex.h>
43
44#define _COMPONENT	ACPI_OS_SERVICES
45ACPI_MODULE_NAME("SYNCH")
46
47MALLOC_DEFINE(M_ACPISEM, "acpisem", "ACPI semaphore");
48
49#define AS_LOCK(as)	mtx_lock(&(as)->as_mtx)
50#define AS_UNLOCK(as)	mtx_unlock(&(as)->as_mtx)
51
52/*
53 * Simple counting semaphore implemented using a mutex.  (Subsequently used
54 * in the OSI code to implement a mutex.  Go figure.)
55 */
56struct acpi_semaphore {
57    struct mtx	as_mtx;
58    UINT32	as_units;
59    UINT32	as_maxunits;
60    UINT32	as_pendings;
61    UINT32	as_resetting;
62    UINT32	as_timeouts;
63};
64
65#ifndef ACPI_NO_SEMAPHORES
66#ifndef ACPI_SEMAPHORES_MAX_PENDING
67#define ACPI_SEMAPHORES_MAX_PENDING	4
68#endif
69static int	acpi_semaphore_debug = 0;
70TUNABLE_INT("debug.acpi_semaphore_debug", &acpi_semaphore_debug);
71SYSCTL_DECL(_debug_acpi);
72SYSCTL_INT(_debug_acpi, OID_AUTO, semaphore_debug, CTLFLAG_RW,
73	   &acpi_semaphore_debug, 0, "Enable ACPI semaphore debug messages");
74#endif /* !ACPI_NO_SEMAPHORES */
75
76ACPI_STATUS
77AcpiOsCreateSemaphore(UINT32 MaxUnits, UINT32 InitialUnits,
78    ACPI_HANDLE *OutHandle)
79{
80#ifndef ACPI_NO_SEMAPHORES
81    struct acpi_semaphore	*as;
82
83    ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
84
85    if (OutHandle == NULL)
86	return_ACPI_STATUS (AE_BAD_PARAMETER);
87    if (InitialUnits > MaxUnits)
88	return_ACPI_STATUS (AE_BAD_PARAMETER);
89
90    if ((as = malloc(sizeof(*as), M_ACPISEM, M_NOWAIT | M_ZERO)) == NULL)
91	return_ACPI_STATUS (AE_NO_MEMORY);
92
93    mtx_init(&as->as_mtx, "ACPI semaphore", NULL, MTX_DEF);
94    as->as_units = InitialUnits;
95    as->as_maxunits = MaxUnits;
96    as->as_pendings = as->as_resetting = as->as_timeouts = 0;
97
98    ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
99	"created semaphore %p max %d, initial %d\n",
100	as, InitialUnits, MaxUnits));
101
102    *OutHandle = (ACPI_HANDLE)as;
103#else
104    *OutHandle = (ACPI_HANDLE)OutHandle;
105#endif /* !ACPI_NO_SEMAPHORES */
106
107    return_ACPI_STATUS (AE_OK);
108}
109
110ACPI_STATUS
111AcpiOsDeleteSemaphore(ACPI_HANDLE Handle)
112{
113#ifndef ACPI_NO_SEMAPHORES
114    struct acpi_semaphore *as = (struct acpi_semaphore *)Handle;
115
116    ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
117
118    ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "destroyed semaphore %p\n", as));
119    mtx_destroy(&as->as_mtx);
120    free(Handle, M_ACPISEM);
121#endif /* !ACPI_NO_SEMAPHORES */
122
123    return_ACPI_STATUS (AE_OK);
124}
125
126/*
127 * This implementation has a bug, in that it has to stall for the entire
128 * timeout before it will return AE_TIME.  A better implementation would
129 * use getmicrotime() to correctly adjust the timeout after being woken up.
130 */
131ACPI_STATUS
132AcpiOsWaitSemaphore(ACPI_HANDLE Handle, UINT32 Units, UINT16 Timeout)
133{
134#ifndef ACPI_NO_SEMAPHORES
135    ACPI_STATUS			result;
136    struct acpi_semaphore	*as = (struct acpi_semaphore *)Handle;
137    int				rv, tmo;
138    struct timeval		timeouttv, currenttv, timelefttv;
139
140    ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
141
142    if (as == NULL)
143	return_ACPI_STATUS (AE_BAD_PARAMETER);
144
145    if (cold)
146	return_ACPI_STATUS (AE_OK);
147
148#if 0
149    if (as->as_units < Units && as->as_timeouts > 10) {
150	printf("%s: semaphore %p too many timeouts, resetting\n", __func__, as);
151	AS_LOCK(as);
152	as->as_units = as->as_maxunits;
153	if (as->as_pendings)
154	    as->as_resetting = 1;
155	as->as_timeouts = 0;
156	wakeup(as);
157	AS_UNLOCK(as);
158	return_ACPI_STATUS (AE_TIME);
159    }
160
161    if (as->as_resetting)
162	return_ACPI_STATUS (AE_TIME);
163#endif
164
165    /* a timeout of ACPI_WAIT_FOREVER means "forever" */
166    if (Timeout == ACPI_WAIT_FOREVER) {
167	tmo = 0;
168	timeouttv.tv_sec = ((0xffff/1000) + 1);	/* cf. ACPI spec */
169	timeouttv.tv_usec = 0;
170    } else {
171	/* compute timeout using microseconds per tick */
172	tmo = (Timeout * 1000) / (1000000 / hz);
173	if (tmo <= 0)
174	    tmo = 1;
175	timeouttv.tv_sec  = Timeout / 1000;
176	timeouttv.tv_usec = (Timeout % 1000) * 1000;
177    }
178
179    /* calculate timeout value in timeval */
180    getmicrotime(&currenttv);
181    timevaladd(&timeouttv, &currenttv);
182
183    AS_LOCK(as);
184    ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
185	"get %d units from semaphore %p (has %d), timeout %d\n",
186	Units, as, as->as_units, Timeout));
187    for (;;) {
188	if (as->as_maxunits == ACPI_NO_UNIT_LIMIT) {
189	    result = AE_OK;
190	    break;
191	}
192	if (as->as_units >= Units) {
193	    as->as_units -= Units;
194	    result = AE_OK;
195	    break;
196	}
197
198	/* limit number of pending treads */
199	if (as->as_pendings >= ACPI_SEMAPHORES_MAX_PENDING) {
200	    result = AE_TIME;
201	    break;
202	}
203
204	/* if timeout values of zero is specified, return immediately */
205	if (Timeout == 0) {
206	    result = AE_TIME;
207	    break;
208	}
209
210	ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
211	    "semaphore blocked, calling msleep(%p, %p, %d, \"acsem\", %d)\n",
212	    as, &as->as_mtx, PCATCH, tmo));
213
214	as->as_pendings++;
215
216	if (acpi_semaphore_debug) {
217	    printf("%s: Sleep %d, pending %d, semaphore %p, thread %d\n",
218		__func__, Timeout, as->as_pendings, as, AcpiOsGetThreadId());
219	}
220
221	rv = msleep(as, &as->as_mtx, PCATCH, "acsem", tmo);
222
223	as->as_pendings--;
224
225#if 0
226	if (as->as_resetting) {
227	    /* semaphore reset, return immediately */
228	    if (as->as_pendings == 0) {
229		as->as_resetting = 0;
230	    }
231	    result = AE_TIME;
232	    break;
233	}
234#endif
235
236	ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "msleep(%d) returned %d\n", tmo, rv));
237	if (rv == EWOULDBLOCK) {
238	    result = AE_TIME;
239	    break;
240	}
241
242	/* check if we already awaited enough */
243	timelefttv = timeouttv;
244	getmicrotime(&currenttv);
245	timevalsub(&timelefttv, &currenttv);
246	if (timelefttv.tv_sec < 0) {
247	    ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "await semaphore %p timeout\n",
248		as));
249	    result = AE_TIME;
250	    break;
251	}
252
253	/* adjust timeout for the next sleep */
254	tmo = (timelefttv.tv_sec * 1000000 + timelefttv.tv_usec) /
255	    (1000000 / hz);
256	if (tmo <= 0)
257	    tmo = 1;
258
259	if (acpi_semaphore_debug) {
260	    printf("%s: Wakeup timeleft(%lu, %lu), tmo %u, sem %p, thread %d\n",
261		__func__, timelefttv.tv_sec, timelefttv.tv_usec, tmo, as,
262		AcpiOsGetThreadId());
263	}
264    }
265
266    if (acpi_semaphore_debug) {
267	if (result == AE_TIME && Timeout > 0) {
268	    printf("%s: Timeout %d, pending %d, semaphore %p\n",
269		__func__, Timeout, as->as_pendings, as);
270	}
271	if (result == AE_OK && (as->as_timeouts > 0 || as->as_pendings > 0)) {
272	    printf("%s: Acquire %d, units %d, pending %d, sem %p, thread %d\n",
273		__func__, Units, as->as_units, as->as_pendings, as,
274		AcpiOsGetThreadId());
275	}
276    }
277
278    if (result == AE_TIME)
279	as->as_timeouts++;
280    else
281	as->as_timeouts = 0;
282
283    AS_UNLOCK(as);
284    return_ACPI_STATUS (result);
285#else
286    return_ACPI_STATUS (AE_OK);
287#endif /* !ACPI_NO_SEMAPHORES */
288}
289
290ACPI_STATUS
291AcpiOsSignalSemaphore(ACPI_HANDLE Handle, UINT32 Units)
292{
293#ifndef ACPI_NO_SEMAPHORES
294    struct acpi_semaphore	*as = (struct acpi_semaphore *)Handle;
295
296    ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
297
298    if (as == NULL)
299	return_ACPI_STATUS(AE_BAD_PARAMETER);
300
301    AS_LOCK(as);
302    ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
303	"return %d units to semaphore %p (has %d)\n",
304	Units, as, as->as_units));
305    if (as->as_maxunits != ACPI_NO_UNIT_LIMIT) {
306	as->as_units += Units;
307	if (as->as_units > as->as_maxunits)
308	    as->as_units = as->as_maxunits;
309    }
310
311    if (acpi_semaphore_debug && (as->as_timeouts > 0 || as->as_pendings > 0)) {
312	printf("%s: Release %d, units %d, pending %d, semaphore %p, thread %d\n",
313	    __func__, Units, as->as_units, as->as_pendings, as, AcpiOsGetThreadId());
314    }
315
316    wakeup(as);
317    AS_UNLOCK(as);
318#endif /* !ACPI_NO_SEMAPHORES */
319
320    return_ACPI_STATUS (AE_OK);
321}
322
323ACPI_STATUS
324AcpiOsCreateLock (ACPI_HANDLE *OutHandle)
325{
326    struct mtx *m;
327
328    if (OutHandle == NULL)
329	return (AE_BAD_PARAMETER);
330    m = malloc(sizeof(*m), M_ACPISEM, M_NOWAIT | M_ZERO);
331    if (m == NULL)
332	return (AE_NO_MEMORY);
333
334    mtx_init(m, "acpica subsystem lock", NULL, MTX_DEF);
335    *OutHandle = (ACPI_HANDLE)m;
336    return (AE_OK);
337}
338
339void
340AcpiOsDeleteLock (ACPI_HANDLE Handle)
341{
342    struct mtx *m = (struct mtx *)Handle;
343
344    if (Handle == NULL)
345        return;
346    mtx_destroy(m);
347}
348
349/*
350 * The Flags parameter seems to state whether or not caller is an ISR
351 * (and thus can't block) but since we have ithreads, we don't worry
352 * about potentially blocking.
353 */
354void
355AcpiOsAcquireLock (ACPI_HANDLE Handle, UINT32 Flags)
356{
357    struct mtx *m = (struct mtx *)Handle;
358
359    if (Handle == NULL)
360	return;
361    mtx_lock(m);
362}
363
364void
365AcpiOsReleaseLock (ACPI_HANDLE Handle, UINT32 Flags)
366{
367    struct mtx *m = (struct mtx *)Handle;
368
369    if (Handle == NULL)
370	return;
371    mtx_unlock(m);
372}
373
374/* Section 5.2.9.1:  global lock acquire/release functions */
375#define GL_ACQUIRED	(-1)
376#define GL_BUSY		0
377#define GL_BIT_PENDING	0x1
378#define GL_BIT_OWNED	0x2
379#define GL_BIT_MASK	(GL_BIT_PENDING | GL_BIT_OWNED)
380
381/*
382 * Acquire the global lock.  If busy, set the pending bit.  The caller
383 * will wait for notification from the BIOS that the lock is available
384 * and then attempt to acquire it again.
385 */
386int
387acpi_acquire_global_lock(uint32_t *lock)
388{
389	uint32_t new, old;
390
391	do {
392		old = *lock;
393		new = ((old & ~GL_BIT_MASK) | GL_BIT_OWNED) |
394			((old >> 1) & GL_BIT_PENDING);
395	} while (atomic_cmpset_acq_int(lock, old, new) == 0);
396
397	return ((new < GL_BIT_MASK) ? GL_ACQUIRED : GL_BUSY);
398}
399
400/*
401 * Release the global lock, returning whether there is a waiter pending.
402 * If the BIOS set the pending bit, OSPM must notify the BIOS when it
403 * releases the lock.
404 */
405int
406acpi_release_global_lock(uint32_t *lock)
407{
408	uint32_t new, old;
409
410	do {
411		old = *lock;
412		new = old & ~GL_BIT_MASK;
413	} while (atomic_cmpset_rel_int(lock, old, new) == 0);
414
415	return (old & GL_BIT_PENDING);
416}
417