OsdSynch.c revision 128981
1/*-
2 * Copyright (c) 2000 Michael Smith
3 * Copyright (c) 2000 BSDi
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 *	$FreeBSD: head/sys/dev/acpica/Osd/OsdSynch.c 128981 2004-05-05 20:07:52Z njl $
28 */
29
30/*
31 * 6.1 : Mutual Exclusion and Synchronisation
32 */
33
34#include "acpi.h"
35
36#include "opt_acpi.h"
37#include <sys/kernel.h>
38#include <sys/malloc.h>
39#include <sys/sysctl.h>
40#if __FreeBSD_version >= 500000
41#include <sys/lock.h>
42#include <sys/mutex.h>
43#endif
44
45#define _COMPONENT	ACPI_OS_SERVICES
46ACPI_MODULE_NAME("SYNCH")
47
48MALLOC_DEFINE(M_ACPISEM, "acpisem", "ACPI semaphore");
49
50#if __FreeBSD_version < 500000
51# define AS_LOCK(as)		s = splhigh()
52# define AS_UNLOCK(as)		splx(s)
53# define AS_LOCK_DECL		int s
54# define msleep(a, b, c, d, e)	tsleep(a, c, d, e)
55#else
56# define AS_LOCK(as)		mtx_lock(&(as)->as_mtx)
57# define AS_UNLOCK(as)		mtx_unlock(&(as)->as_mtx)
58# define AS_LOCK_DECL
59#endif
60
61/*
62 * Simple counting semaphore implemented using a mutex.  (Subsequently used
63 * in the OSI code to implement a mutex.  Go figure.)
64 */
65struct acpi_semaphore {
66#if __FreeBSD_version >= 500000
67    struct mtx	as_mtx;
68#endif
69    UINT32	as_units;
70    UINT32	as_maxunits;
71    UINT32	as_pendings;
72    UINT32	as_resetting;
73    UINT32	as_timeouts;
74};
75
76#ifndef ACPI_NO_SEMAPHORES
77#ifndef ACPI_SEMAPHORES_MAX_PENDING
78#define ACPI_SEMAPHORES_MAX_PENDING	4
79#endif
80static int	acpi_semaphore_debug = 0;
81TUNABLE_INT("debug.acpi_semaphore_debug", &acpi_semaphore_debug);
82SYSCTL_DECL(_debug_acpi);
83SYSCTL_INT(_debug_acpi, OID_AUTO, semaphore_debug, CTLFLAG_RW,
84	   &acpi_semaphore_debug, 0, "Enable ACPI semaphore debug messages");
85#endif /* !ACPI_NO_SEMAPHORES */
86
87ACPI_STATUS
88AcpiOsCreateSemaphore(UINT32 MaxUnits, UINT32 InitialUnits,
89    ACPI_HANDLE *OutHandle)
90{
91#ifndef ACPI_NO_SEMAPHORES
92    struct acpi_semaphore	*as;
93
94    ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
95
96    if (OutHandle == NULL)
97	return_ACPI_STATUS (AE_BAD_PARAMETER);
98    if (InitialUnits > MaxUnits)
99	return_ACPI_STATUS (AE_BAD_PARAMETER);
100
101    if ((as = malloc(sizeof(*as), M_ACPISEM, M_NOWAIT | M_ZERO)) == NULL)
102	return_ACPI_STATUS (AE_NO_MEMORY);
103
104#if __FreeBSD_version >= 500000
105    mtx_init(&as->as_mtx, "ACPI semaphore", NULL, MTX_DEF);
106#endif
107    as->as_units = InitialUnits;
108    as->as_maxunits = MaxUnits;
109    as->as_pendings = as->as_resetting = as->as_timeouts = 0;
110
111    ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
112	"created semaphore %p max %d, initial %d\n",
113	as, InitialUnits, MaxUnits));
114
115    *OutHandle = (ACPI_HANDLE)as;
116#else
117    *OutHandle = (ACPI_HANDLE)OutHandle;
118#endif /* !ACPI_NO_SEMAPHORES */
119
120    return_ACPI_STATUS (AE_OK);
121}
122
123ACPI_STATUS
124AcpiOsDeleteSemaphore(ACPI_HANDLE Handle)
125{
126#ifndef ACPI_NO_SEMAPHORES
127    struct acpi_semaphore *as = (struct acpi_semaphore *)Handle;
128
129    ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
130
131    ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "destroyed semaphore %p\n", as));
132#if __FreeBSD_version >= 500000
133    mtx_destroy(&as->as_mtx);
134#endif
135    free(Handle, M_ACPISEM);
136#endif /* !ACPI_NO_SEMAPHORES */
137
138    return_ACPI_STATUS (AE_OK);
139}
140
141/*
142 * This implementation has a bug, in that it has to stall for the entire
143 * timeout before it will return AE_TIME.  A better implementation would
144 * use getmicrotime() to correctly adjust the timeout after being woken up.
145 */
146ACPI_STATUS
147AcpiOsWaitSemaphore(ACPI_HANDLE Handle, UINT32 Units, UINT16 Timeout)
148{
149#ifndef ACPI_NO_SEMAPHORES
150    ACPI_STATUS			result;
151    struct acpi_semaphore	*as = (struct acpi_semaphore *)Handle;
152    int				rv, tmo;
153    struct timeval		timeouttv, currenttv, timelefttv;
154    AS_LOCK_DECL;
155
156    ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
157
158    if (as == NULL)
159	return_ACPI_STATUS (AE_BAD_PARAMETER);
160
161    if (cold)
162	return_ACPI_STATUS (AE_OK);
163
164#if 0
165    if (as->as_units < Units && as->as_timeouts > 10) {
166	printf("%s: semaphore %p too many timeouts, resetting\n", __func__, as);
167	AS_LOCK(as);
168	as->as_units = as->as_maxunits;
169	if (as->as_pendings)
170	    as->as_resetting = 1;
171	as->as_timeouts = 0;
172	wakeup(as);
173	AS_UNLOCK(as);
174	return_ACPI_STATUS (AE_TIME);
175    }
176
177    if (as->as_resetting)
178	return_ACPI_STATUS (AE_TIME);
179#endif
180
181    /* a timeout of ACPI_WAIT_FOREVER means "forever" */
182    if (Timeout == ACPI_WAIT_FOREVER) {
183	tmo = 0;
184	timeouttv.tv_sec = ((0xffff/1000) + 1);	/* cf. ACPI spec */
185	timeouttv.tv_usec = 0;
186    } else {
187	/* compute timeout using microseconds per tick */
188	tmo = (Timeout * 1000) / (1000000 / hz);
189	if (tmo <= 0)
190	    tmo = 1;
191	timeouttv.tv_sec  = Timeout / 1000;
192	timeouttv.tv_usec = (Timeout % 1000) * 1000;
193    }
194
195    /* calculate timeout value in timeval */
196    getmicrotime(&currenttv);
197    timevaladd(&timeouttv, &currenttv);
198
199    AS_LOCK(as);
200    ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
201	"get %d units from semaphore %p (has %d), timeout %d\n",
202	Units, as, as->as_units, Timeout));
203    for (;;) {
204	if (as->as_maxunits == ACPI_NO_UNIT_LIMIT) {
205	    result = AE_OK;
206	    break;
207	}
208	if (as->as_units >= Units) {
209	    as->as_units -= Units;
210	    result = AE_OK;
211	    break;
212	}
213
214	/* limit number of pending treads */
215	if (as->as_pendings >= ACPI_SEMAPHORES_MAX_PENDING) {
216	    result = AE_TIME;
217	    break;
218	}
219
220	/* if timeout values of zero is specified, return immediately */
221	if (Timeout == 0) {
222	    result = AE_TIME;
223	    break;
224	}
225
226#if __FreeBSD_version >= 500000
227	ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
228	    "semaphore blocked, calling msleep(%p, %p, %d, \"acsem\", %d)\n",
229	    as, &as->as_mtx, PCATCH, tmo));
230#endif
231
232	as->as_pendings++;
233
234	if (acpi_semaphore_debug) {
235	    printf("%s: Sleep %d, pending %d, semaphore %p, thread %d\n",
236		__func__, Timeout, as->as_pendings, as, AcpiOsGetThreadId());
237	}
238
239	rv = msleep(as, &as->as_mtx, PCATCH, "acsem", tmo);
240
241	as->as_pendings--;
242
243#if 0
244	if (as->as_resetting) {
245	    /* semaphore reset, return immediately */
246	    if (as->as_pendings == 0) {
247		as->as_resetting = 0;
248	    }
249	    result = AE_TIME;
250	    break;
251	}
252#endif
253
254	ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "msleep(%d) returned %d\n", tmo, rv));
255	if (rv == EWOULDBLOCK) {
256	    result = AE_TIME;
257	    break;
258	}
259
260	/* check if we already awaited enough */
261	timelefttv = timeouttv;
262	getmicrotime(&currenttv);
263	timevalsub(&timelefttv, &currenttv);
264	if (timelefttv.tv_sec < 0) {
265	    ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "await semaphore %p timeout\n",
266		as));
267	    result = AE_TIME;
268	    break;
269	}
270
271	/* adjust timeout for the next sleep */
272	tmo = (timelefttv.tv_sec * 1000000 + timelefttv.tv_usec) /
273	    (1000000 / hz);
274	if (tmo <= 0)
275	    tmo = 1;
276
277	if (acpi_semaphore_debug) {
278	    printf("%s: Wakeup timeleft(%lu, %lu), tmo %u, sem %p, thread %d\n",
279		__func__, timelefttv.tv_sec, timelefttv.tv_usec, tmo, as,
280		AcpiOsGetThreadId());
281	}
282    }
283
284    if (acpi_semaphore_debug) {
285	if (result == AE_TIME && Timeout > 0) {
286	    printf("%s: Timeout %d, pending %d, semaphore %p\n",
287		__func__, Timeout, as->as_pendings, as);
288	}
289	if (result == AE_OK && (as->as_timeouts > 0 || as->as_pendings > 0)) {
290	    printf("%s: Acquire %d, units %d, pending %d, sem %p, thread %d\n",
291		__func__, Units, as->as_units, as->as_pendings, as,
292		AcpiOsGetThreadId());
293	}
294    }
295
296    if (result == AE_TIME)
297	as->as_timeouts++;
298    else
299	as->as_timeouts = 0;
300
301    AS_UNLOCK(as);
302    return_ACPI_STATUS (result);
303#else
304    return_ACPI_STATUS (AE_OK);
305#endif /* !ACPI_NO_SEMAPHORES */
306}
307
308ACPI_STATUS
309AcpiOsSignalSemaphore(ACPI_HANDLE Handle, UINT32 Units)
310{
311#ifndef ACPI_NO_SEMAPHORES
312    struct acpi_semaphore	*as = (struct acpi_semaphore *)Handle;
313    AS_LOCK_DECL;
314
315    ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
316
317    if (as == NULL)
318	return_ACPI_STATUS(AE_BAD_PARAMETER);
319
320    AS_LOCK(as);
321    ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
322	"return %d units to semaphore %p (has %d)\n",
323	Units, as, as->as_units));
324    if (as->as_maxunits != ACPI_NO_UNIT_LIMIT) {
325	as->as_units += Units;
326	if (as->as_units > as->as_maxunits)
327	    as->as_units = as->as_maxunits;
328    }
329
330    if (acpi_semaphore_debug && (as->as_timeouts > 0 || as->as_pendings > 0)) {
331	printf("%s: Release %d, units %d, pending %d, semaphore %p, thread %d\n",
332	    __func__, Units, as->as_units, as->as_pendings, as, AcpiOsGetThreadId());
333    }
334
335    wakeup(as);
336    AS_UNLOCK(as);
337#endif /* !ACPI_NO_SEMAPHORES */
338
339    return_ACPI_STATUS (AE_OK);
340}
341
342ACPI_STATUS
343AcpiOsCreateLock (ACPI_HANDLE *OutHandle)
344{
345    struct mtx *m;
346
347    if (OutHandle == NULL)
348	return (AE_BAD_PARAMETER);
349    m = malloc(sizeof(*m), M_ACPISEM, M_NOWAIT | M_ZERO);
350    if (m == NULL)
351	return (AE_NO_MEMORY);
352
353    mtx_init(m, "acpica subsystem lock", NULL, MTX_DEF);
354    *OutHandle = (ACPI_HANDLE)m;
355    return (AE_OK);
356}
357
358void
359AcpiOsDeleteLock (ACPI_HANDLE Handle)
360{
361    struct mtx *m = (struct mtx *)Handle;
362
363    if (Handle == NULL)
364        return;
365    mtx_destroy(m);
366}
367
368/*
369 * The Flags parameter seems to state whether or not caller is an ISR
370 * (and thus can't block) but since we have ithreads, we don't worry
371 * about potentially blocking.
372 */
373void
374AcpiOsAcquireLock (ACPI_HANDLE Handle, UINT32 Flags)
375{
376    struct mtx *m = (struct mtx *)Handle;
377
378    if (Handle == NULL)
379	return;
380    mtx_lock(m);
381}
382
383void
384AcpiOsReleaseLock (ACPI_HANDLE Handle, UINT32 Flags)
385{
386    struct mtx *m = (struct mtx *)Handle;
387
388    if (Handle == NULL)
389	return;
390    mtx_unlock(m);
391}
392
393/* Section 5.2.9.1:  global lock acquire/release functions */
394#define GL_ACQUIRED	(-1)
395#define GL_BUSY		0
396#define GL_BIT_PENDING	0x1
397#define GL_BIT_OWNED	0x2
398#define GL_BIT_MASK	(GL_BIT_PENDING | GL_BIT_OWNED)
399
400/*
401 * Acquire the global lock.  If busy, set the pending bit.  The caller
402 * will wait for notification from the BIOS that the lock is available
403 * and then attempt to acquire it again.
404 */
405int
406acpi_acquire_global_lock(uint32_t *lock)
407{
408	uint32_t new, old;
409
410	do {
411		old = *lock;
412		new = ((old & ~GL_BIT_MASK) | GL_BIT_OWNED) |
413			((old >> 1) & GL_BIT_PENDING);
414	} while (atomic_cmpset_acq_int(lock, old, new) == 0);
415
416	return ((new < GL_BIT_MASK) ? GL_ACQUIRED : GL_BUSY);
417}
418
419/*
420 * Release the global lock, returning whether there is a waiter pending.
421 * If the BIOS set the pending bit, OSPM must notify the BIOS when it
422 * releases the lock.
423 */
424int
425acpi_release_global_lock(uint32_t *lock)
426{
427	uint32_t new, old;
428
429	do {
430		old = *lock;
431		new = old & ~GL_BIT_MASK;
432	} while (atomic_cmpset_rel_int(lock, old, new) == 0);
433
434	return (old & GL_BIT_PENDING);
435}
436