OsdSynch.c revision 193530
1/*-
2 * Copyright (c) 2000 Michael Smith
3 * Copyright (c) 2000 BSDi
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28/*
29 * 6.1 : Mutual Exclusion and Synchronisation
30 */
31
32#include <sys/cdefs.h>
33__FBSDID("$FreeBSD: head/sys/dev/acpica/Osd/OsdSynch.c 193530 2009-06-05 18:44:36Z jkim $");
34
35#include <contrib/dev/acpica/include/acpi.h>
36#include <contrib/dev/acpica/include/accommon.h>
37
38#include "opt_acpi.h"
39#include <sys/kernel.h>
40#include <sys/malloc.h>
41#include <sys/sysctl.h>
42#include <sys/lock.h>
43#include <sys/mutex.h>
44
45#define _COMPONENT	ACPI_OS_SERVICES
46ACPI_MODULE_NAME("SYNCH")
47
48MALLOC_DEFINE(M_ACPISEM, "acpisem", "ACPI semaphore");
49
50#define AS_LOCK(as)	mtx_lock(&(as)->as_mtx)
51#define AS_UNLOCK(as)	mtx_unlock(&(as)->as_mtx)
52
53/*
54 * Simple counting semaphore implemented using a mutex.  (Subsequently used
55 * in the OSI code to implement a mutex.  Go figure.)
56 */
57struct acpi_semaphore {
58    struct mtx	as_mtx;
59    UINT32	as_units;
60    UINT32	as_maxunits;
61    UINT32	as_pendings;
62    UINT32	as_resetting;
63    UINT32	as_timeouts;
64};
65
66/* Default number of maximum pending threads. */
67#ifndef ACPI_NO_SEMAPHORES
68#ifndef ACPI_SEMAPHORES_MAX_PENDING
69#define ACPI_SEMAPHORES_MAX_PENDING	4
70#endif
71
72static int	acpi_semaphore_debug = 0;
73TUNABLE_INT("debug.acpi_semaphore_debug", &acpi_semaphore_debug);
74SYSCTL_DECL(_debug_acpi);
75SYSCTL_INT(_debug_acpi, OID_AUTO, semaphore_debug, CTLFLAG_RW,
76	   &acpi_semaphore_debug, 0, "Enable ACPI semaphore debug messages");
77#endif /* !ACPI_NO_SEMAPHORES */
78
79ACPI_STATUS
80AcpiOsCreateSemaphore(UINT32 MaxUnits, UINT32 InitialUnits,
81    ACPI_SEMAPHORE *OutHandle)
82{
83#ifndef ACPI_NO_SEMAPHORES
84    struct acpi_semaphore	*as;
85
86    ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
87
88    if (OutHandle == NULL)
89	return_ACPI_STATUS (AE_BAD_PARAMETER);
90    if (InitialUnits > MaxUnits)
91	return_ACPI_STATUS (AE_BAD_PARAMETER);
92
93    if ((as = malloc(sizeof(*as), M_ACPISEM, M_NOWAIT | M_ZERO)) == NULL)
94	return_ACPI_STATUS (AE_NO_MEMORY);
95
96    mtx_init(&as->as_mtx, "ACPI semaphore", NULL, MTX_DEF);
97    as->as_units = InitialUnits;
98    as->as_maxunits = MaxUnits;
99    as->as_pendings = as->as_resetting = as->as_timeouts = 0;
100
101    ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
102	"created semaphore %p max %d, initial %d\n",
103	as, InitialUnits, MaxUnits));
104
105    *OutHandle = (ACPI_HANDLE)as;
106#else
107    *OutHandle = (ACPI_HANDLE)OutHandle;
108#endif /* !ACPI_NO_SEMAPHORES */
109
110    return_ACPI_STATUS (AE_OK);
111}
112
113ACPI_STATUS
114AcpiOsDeleteSemaphore(ACPI_SEMAPHORE Handle)
115{
116#ifndef ACPI_NO_SEMAPHORES
117    struct acpi_semaphore *as = (struct acpi_semaphore *)Handle;
118
119    ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
120
121    ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "destroyed semaphore %p\n", as));
122    mtx_destroy(&as->as_mtx);
123    free(Handle, M_ACPISEM);
124#endif /* !ACPI_NO_SEMAPHORES */
125
126    return_ACPI_STATUS (AE_OK);
127}
128
129/*
130 * This implementation has a bug, in that it has to stall for the entire
131 * timeout before it will return AE_TIME.  A better implementation would
132 * use getmicrotime() to correctly adjust the timeout after being woken up.
133 */
134ACPI_STATUS
135AcpiOsWaitSemaphore(ACPI_SEMAPHORE Handle, UINT32 Units, UINT16 Timeout)
136{
137#ifndef ACPI_NO_SEMAPHORES
138    ACPI_STATUS			result;
139    struct acpi_semaphore	*as = (struct acpi_semaphore *)Handle;
140    int				rv, tmo;
141    struct timeval		timeouttv, currenttv, timelefttv;
142
143    ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
144
145    if (as == NULL)
146	return_ACPI_STATUS (AE_BAD_PARAMETER);
147
148    if (cold)
149	return_ACPI_STATUS (AE_OK);
150
151#if 0
152    if (as->as_units < Units && as->as_timeouts > 10) {
153	printf("%s: semaphore %p too many timeouts, resetting\n", __func__, as);
154	AS_LOCK(as);
155	as->as_units = as->as_maxunits;
156	if (as->as_pendings)
157	    as->as_resetting = 1;
158	as->as_timeouts = 0;
159	wakeup(as);
160	AS_UNLOCK(as);
161	return_ACPI_STATUS (AE_TIME);
162    }
163
164    if (as->as_resetting)
165	return_ACPI_STATUS (AE_TIME);
166#endif
167
168    /* a timeout of ACPI_WAIT_FOREVER means "forever" */
169    if (Timeout == ACPI_WAIT_FOREVER) {
170	tmo = 0;
171	timeouttv.tv_sec = ((0xffff/1000) + 1);	/* cf. ACPI spec */
172	timeouttv.tv_usec = 0;
173    } else {
174	/* compute timeout using microseconds per tick */
175	tmo = (Timeout * 1000) / (1000000 / hz);
176	if (tmo <= 0)
177	    tmo = 1;
178	timeouttv.tv_sec  = Timeout / 1000;
179	timeouttv.tv_usec = (Timeout % 1000) * 1000;
180    }
181
182    /* calculate timeout value in timeval */
183    getmicrotime(&currenttv);
184    timevaladd(&timeouttv, &currenttv);
185
186    AS_LOCK(as);
187    ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
188	"get %d units from semaphore %p (has %d), timeout %d\n",
189	Units, as, as->as_units, Timeout));
190    for (;;) {
191	if (as->as_maxunits == ACPI_NO_UNIT_LIMIT) {
192	    result = AE_OK;
193	    break;
194	}
195	if (as->as_units >= Units) {
196	    as->as_units -= Units;
197	    result = AE_OK;
198	    break;
199	}
200
201	/* limit number of pending threads */
202	if (as->as_pendings >= ACPI_SEMAPHORES_MAX_PENDING) {
203	    result = AE_TIME;
204	    break;
205	}
206
207	/* if timeout values of zero is specified, return immediately */
208	if (Timeout == 0) {
209	    result = AE_TIME;
210	    break;
211	}
212
213	ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
214	    "semaphore blocked, calling msleep(%p, %p, %d, \"acsem\", %d)\n",
215	    as, &as->as_mtx, PCATCH, tmo));
216
217	as->as_pendings++;
218
219	if (acpi_semaphore_debug) {
220	    printf("%s: Sleep %d, pending %d, semaphore %p, thread %d\n",
221		__func__, Timeout, as->as_pendings, as, AcpiOsGetThreadId());
222	}
223
224	rv = msleep(as, &as->as_mtx, PCATCH, "acsem", tmo);
225
226	as->as_pendings--;
227
228#if 0
229	if (as->as_resetting) {
230	    /* semaphore reset, return immediately */
231	    if (as->as_pendings == 0) {
232		as->as_resetting = 0;
233	    }
234	    result = AE_TIME;
235	    break;
236	}
237#endif
238
239	ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "msleep(%d) returned %d\n", tmo, rv));
240	if (rv == EWOULDBLOCK) {
241	    result = AE_TIME;
242	    break;
243	}
244
245	/* check if we already awaited enough */
246	timelefttv = timeouttv;
247	getmicrotime(&currenttv);
248	timevalsub(&timelefttv, &currenttv);
249	if (timelefttv.tv_sec < 0) {
250	    ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "await semaphore %p timeout\n",
251		as));
252	    result = AE_TIME;
253	    break;
254	}
255
256	/* adjust timeout for the next sleep */
257	tmo = (timelefttv.tv_sec * 1000000 + timelefttv.tv_usec) /
258	    (1000000 / hz);
259	if (tmo <= 0)
260	    tmo = 1;
261
262	if (acpi_semaphore_debug) {
263	    printf("%s: Wakeup timeleft(%jd, %lu), tmo %u, sem %p, thread %d\n",
264		__func__, (intmax_t)timelefttv.tv_sec, timelefttv.tv_usec, tmo, as,
265		AcpiOsGetThreadId());
266	}
267    }
268
269    if (acpi_semaphore_debug) {
270	if (result == AE_TIME && Timeout > 0) {
271	    printf("%s: Timeout %d, pending %d, semaphore %p\n",
272		__func__, Timeout, as->as_pendings, as);
273	}
274	if (result == AE_OK && (as->as_timeouts > 0 || as->as_pendings > 0)) {
275	    printf("%s: Acquire %d, units %d, pending %d, sem %p, thread %d\n",
276		__func__, Units, as->as_units, as->as_pendings, as,
277		AcpiOsGetThreadId());
278	}
279    }
280
281    if (result == AE_TIME)
282	as->as_timeouts++;
283    else
284	as->as_timeouts = 0;
285
286    AS_UNLOCK(as);
287    return_ACPI_STATUS (result);
288#else
289    return_ACPI_STATUS (AE_OK);
290#endif /* !ACPI_NO_SEMAPHORES */
291}
292
293ACPI_STATUS
294AcpiOsSignalSemaphore(ACPI_SEMAPHORE Handle, UINT32 Units)
295{
296#ifndef ACPI_NO_SEMAPHORES
297    struct acpi_semaphore	*as = (struct acpi_semaphore *)Handle;
298
299    ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
300
301    if (as == NULL)
302	return_ACPI_STATUS(AE_BAD_PARAMETER);
303
304    AS_LOCK(as);
305    ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
306	"return %d units to semaphore %p (has %d)\n",
307	Units, as, as->as_units));
308    if (as->as_maxunits != ACPI_NO_UNIT_LIMIT) {
309	as->as_units += Units;
310	if (as->as_units > as->as_maxunits)
311	    as->as_units = as->as_maxunits;
312    }
313
314    if (acpi_semaphore_debug && (as->as_timeouts > 0 || as->as_pendings > 0)) {
315	printf("%s: Release %d, units %d, pending %d, semaphore %p, thread %d\n",
316	    __func__, Units, as->as_units, as->as_pendings, as, AcpiOsGetThreadId());
317    }
318
319    wakeup(as);
320    AS_UNLOCK(as);
321#endif /* !ACPI_NO_SEMAPHORES */
322
323    return_ACPI_STATUS (AE_OK);
324}
325
326/* Combined mutex + mutex name storage since the latter must persist. */
327struct acpi_spinlock {
328    struct mtx	lock;
329    char	name[32];
330};
331
332ACPI_STATUS
333AcpiOsCreateLock (ACPI_SPINLOCK *OutHandle)
334{
335    struct acpi_spinlock *h;
336
337    if (OutHandle == NULL)
338	return (AE_BAD_PARAMETER);
339    h = malloc(sizeof(*h), M_ACPISEM, M_NOWAIT | M_ZERO);
340    if (h == NULL)
341	return (AE_NO_MEMORY);
342
343    /* Build a unique name based on the address of the handle. */
344    if (OutHandle == &AcpiGbl_GpeLock)
345	snprintf(h->name, sizeof(h->name), "acpi subsystem GPE lock");
346    else if (OutHandle == &AcpiGbl_HardwareLock)
347	snprintf(h->name, sizeof(h->name), "acpi subsystem HW lock");
348    else
349	snprintf(h->name, sizeof(h->name), "acpi subsys %p", OutHandle);
350    mtx_init(&h->lock, h->name, NULL, MTX_DEF|MTX_RECURSE);
351    *OutHandle = (ACPI_SPINLOCK)h;
352    return (AE_OK);
353}
354
355void
356AcpiOsDeleteLock (ACPI_SPINLOCK Handle)
357{
358    struct acpi_spinlock *h = (struct acpi_spinlock *)Handle;
359
360    if (Handle == NULL)
361        return;
362    mtx_destroy(&h->lock);
363    free(h, M_ACPISEM);
364}
365
366/*
367 * The Flags parameter seems to state whether or not caller is an ISR
368 * (and thus can't block) but since we have ithreads, we don't worry
369 * about potentially blocking.
370 */
371ACPI_CPU_FLAGS
372AcpiOsAcquireLock (ACPI_SPINLOCK Handle)
373{
374    struct acpi_spinlock *h = (struct acpi_spinlock *)Handle;
375
376    if (Handle == NULL)
377	return (0);
378    mtx_lock(&h->lock);
379    return (0);
380}
381
382void
383AcpiOsReleaseLock (ACPI_SPINLOCK Handle, ACPI_CPU_FLAGS Flags)
384{
385    struct acpi_spinlock *h = (struct acpi_spinlock *)Handle;
386
387    if (Handle == NULL)
388	return;
389    mtx_unlock(&h->lock);
390}
391
392/* Section 5.2.9.1:  global lock acquire/release functions */
393#define GL_ACQUIRED	(-1)
394#define GL_BUSY		0
395#define GL_BIT_PENDING	0x1
396#define GL_BIT_OWNED	0x2
397#define GL_BIT_MASK	(GL_BIT_PENDING | GL_BIT_OWNED)
398
399/*
400 * Acquire the global lock.  If busy, set the pending bit.  The caller
401 * will wait for notification from the BIOS that the lock is available
402 * and then attempt to acquire it again.
403 */
404int
405acpi_acquire_global_lock(uint32_t *lock)
406{
407	uint32_t new, old;
408
409	do {
410		old = *lock;
411		new = ((old & ~GL_BIT_MASK) | GL_BIT_OWNED) |
412			((old >> 1) & GL_BIT_PENDING);
413	} while (atomic_cmpset_acq_int(lock, old, new) == 0);
414
415	return ((new < GL_BIT_MASK) ? GL_ACQUIRED : GL_BUSY);
416}
417
418/*
419 * Release the global lock, returning whether there is a waiter pending.
420 * If the BIOS set the pending bit, OSPM must notify the BIOS when it
421 * releases the lock.
422 */
423int
424acpi_release_global_lock(uint32_t *lock)
425{
426	uint32_t new, old;
427
428	do {
429		old = *lock;
430		new = old & ~GL_BIT_MASK;
431	} while (atomic_cmpset_rel_int(lock, old, new) == 0);
432
433	return (old & GL_BIT_PENDING);
434}
435