1/*-
2 * Copyright (c) 2000 Takanori Watanabe <takawata@jp.freebsd.org>
3 * Copyright (c) 2000 Mitsuru IWASAKI <iwasaki@jp.freebsd.org>
4 * Copyright (c) 2000, 2001 Michael Smith
5 * Copyright (c) 2000 BSDi
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30#include <sys/cdefs.h>
31#include "opt_acpi.h"
32
33#include <sys/param.h>
34#include <sys/eventhandler.h>
35#include <sys/kernel.h>
36#include <sys/proc.h>
37#include <sys/fcntl.h>
38#include <sys/malloc.h>
39#include <sys/module.h>
40#include <sys/bus.h>
41#include <sys/conf.h>
42#include <sys/ioccom.h>
43#include <sys/reboot.h>
44#include <sys/sysctl.h>
45#include <sys/ctype.h>
46#include <sys/linker.h>
47#include <sys/mount.h>
48#include <sys/power.h>
49#include <sys/sbuf.h>
50#include <sys/sched.h>
51#include <sys/smp.h>
52#include <sys/timetc.h>
53#include <sys/uuid.h>
54
55#if defined(__i386__) || defined(__amd64__)
56#include <machine/clock.h>
57#include <machine/pci_cfgreg.h>
58#endif
59#include <machine/resource.h>
60#include <machine/bus.h>
61#include <sys/rman.h>
62#include <isa/isavar.h>
63#include <isa/pnpvar.h>
64
65#include <contrib/dev/acpica/include/acpi.h>
66#include <contrib/dev/acpica/include/accommon.h>
67#include <contrib/dev/acpica/include/acnamesp.h>
68
69#include <dev/acpica/acpivar.h>
70#include <dev/acpica/acpiio.h>
71
72#include <dev/pci/pcivar.h>
73
74#include <vm/vm_param.h>
75
76static MALLOC_DEFINE(M_ACPIDEV, "acpidev", "ACPI devices");
77
78/* Hooks for the ACPI CA debugging infrastructure */
79#define _COMPONENT	ACPI_BUS
80ACPI_MODULE_NAME("ACPI")
81
82static d_open_t		acpiopen;
83static d_close_t	acpiclose;
84static d_ioctl_t	acpiioctl;
85
86static struct cdevsw acpi_cdevsw = {
87	.d_version =	D_VERSION,
88	.d_open =	acpiopen,
89	.d_close =	acpiclose,
90	.d_ioctl =	acpiioctl,
91	.d_name =	"acpi",
92};
93
94struct acpi_interface {
95	ACPI_STRING	*data;
96	int		num;
97};
98
99static char *sysres_ids[] = { "PNP0C01", "PNP0C02", NULL };
100
101/* Global mutex for locking access to the ACPI subsystem. */
102struct mtx	acpi_mutex;
103struct callout	acpi_sleep_timer;
104
105/* Bitmap of device quirks. */
106int		acpi_quirks;
107
108/* Supported sleep states. */
109static BOOLEAN	acpi_sleep_states[ACPI_S_STATE_COUNT];
110
111static void	acpi_lookup(void *arg, const char *name, device_t *dev);
112static int	acpi_modevent(struct module *mod, int event, void *junk);
113
114static device_probe_t		acpi_probe;
115static device_attach_t		acpi_attach;
116static device_suspend_t		acpi_suspend;
117static device_resume_t		acpi_resume;
118static device_shutdown_t	acpi_shutdown;
119
120static bus_add_child_t		acpi_add_child;
121static bus_print_child_t	acpi_print_child;
122static bus_probe_nomatch_t	acpi_probe_nomatch;
123static bus_driver_added_t	acpi_driver_added;
124static bus_child_deleted_t	acpi_child_deleted;
125static bus_read_ivar_t		acpi_read_ivar;
126static bus_write_ivar_t		acpi_write_ivar;
127static bus_get_resource_list_t	acpi_get_rlist;
128static bus_get_rman_t		acpi_get_rman;
129static bus_set_resource_t	acpi_set_resource;
130static bus_alloc_resource_t	acpi_alloc_resource;
131static bus_adjust_resource_t	acpi_adjust_resource;
132static bus_release_resource_t	acpi_release_resource;
133static bus_delete_resource_t	acpi_delete_resource;
134static bus_activate_resource_t	acpi_activate_resource;
135static bus_deactivate_resource_t acpi_deactivate_resource;
136static bus_map_resource_t	acpi_map_resource;
137static bus_unmap_resource_t	acpi_unmap_resource;
138static bus_child_pnpinfo_t	acpi_child_pnpinfo_method;
139static bus_child_location_t	acpi_child_location_method;
140static bus_hint_device_unit_t	acpi_hint_device_unit;
141static bus_get_property_t	acpi_bus_get_prop;
142static bus_get_device_path_t	acpi_get_device_path;
143
144static acpi_id_probe_t		acpi_device_id_probe;
145static acpi_evaluate_object_t	acpi_device_eval_obj;
146static acpi_get_property_t	acpi_device_get_prop;
147static acpi_scan_children_t	acpi_device_scan_children;
148
149static isa_pnp_probe_t		acpi_isa_pnp_probe;
150
151static void	acpi_reserve_resources(device_t dev);
152static int	acpi_sysres_alloc(device_t dev);
153static uint32_t	acpi_isa_get_logicalid(device_t dev);
154static int	acpi_isa_get_compatid(device_t dev, uint32_t *cids, int count);
155static ACPI_STATUS acpi_device_scan_cb(ACPI_HANDLE h, UINT32 level,
156		    void *context, void **retval);
157static ACPI_STATUS acpi_find_dsd(struct acpi_device *ad);
158static void	acpi_platform_osc(device_t dev);
159static void	acpi_probe_children(device_t bus);
160static void	acpi_probe_order(ACPI_HANDLE handle, int *order);
161static ACPI_STATUS acpi_probe_child(ACPI_HANDLE handle, UINT32 level,
162		    void *context, void **status);
163static void	acpi_sleep_enable(void *arg);
164static ACPI_STATUS acpi_sleep_disable(struct acpi_softc *sc);
165static ACPI_STATUS acpi_EnterSleepState(struct acpi_softc *sc, int state);
166static void	acpi_shutdown_final(void *arg, int howto);
167static void	acpi_enable_fixed_events(struct acpi_softc *sc);
168static void	acpi_resync_clock(struct acpi_softc *sc);
169static int	acpi_wake_sleep_prep(ACPI_HANDLE handle, int sstate);
170static int	acpi_wake_run_prep(ACPI_HANDLE handle, int sstate);
171static int	acpi_wake_prep_walk(int sstate);
172static int	acpi_wake_sysctl_walk(device_t dev);
173static int	acpi_wake_set_sysctl(SYSCTL_HANDLER_ARGS);
174static void	acpi_system_eventhandler_sleep(void *arg, int state);
175static void	acpi_system_eventhandler_wakeup(void *arg, int state);
176static int	acpi_sname2sstate(const char *sname);
177static const char *acpi_sstate2sname(int sstate);
178static int	acpi_supported_sleep_state_sysctl(SYSCTL_HANDLER_ARGS);
179static int	acpi_sleep_state_sysctl(SYSCTL_HANDLER_ARGS);
180static int	acpi_debug_objects_sysctl(SYSCTL_HANDLER_ARGS);
181static int	acpi_pm_func(u_long cmd, void *arg, ...);
182static void	acpi_enable_pcie(void);
183static void	acpi_reset_interfaces(device_t dev);
184
185static device_method_t acpi_methods[] = {
186    /* Device interface */
187    DEVMETHOD(device_probe,		acpi_probe),
188    DEVMETHOD(device_attach,		acpi_attach),
189    DEVMETHOD(device_shutdown,		acpi_shutdown),
190    DEVMETHOD(device_detach,		bus_generic_detach),
191    DEVMETHOD(device_suspend,		acpi_suspend),
192    DEVMETHOD(device_resume,		acpi_resume),
193
194    /* Bus interface */
195    DEVMETHOD(bus_add_child,		acpi_add_child),
196    DEVMETHOD(bus_print_child,		acpi_print_child),
197    DEVMETHOD(bus_probe_nomatch,	acpi_probe_nomatch),
198    DEVMETHOD(bus_driver_added,		acpi_driver_added),
199    DEVMETHOD(bus_child_deleted,	acpi_child_deleted),
200    DEVMETHOD(bus_read_ivar,		acpi_read_ivar),
201    DEVMETHOD(bus_write_ivar,		acpi_write_ivar),
202    DEVMETHOD(bus_get_resource_list,	acpi_get_rlist),
203    DEVMETHOD(bus_get_rman,		acpi_get_rman),
204    DEVMETHOD(bus_set_resource,		acpi_set_resource),
205    DEVMETHOD(bus_get_resource,		bus_generic_rl_get_resource),
206    DEVMETHOD(bus_alloc_resource,	acpi_alloc_resource),
207    DEVMETHOD(bus_adjust_resource,	acpi_adjust_resource),
208    DEVMETHOD(bus_release_resource,	acpi_release_resource),
209    DEVMETHOD(bus_delete_resource,	acpi_delete_resource),
210    DEVMETHOD(bus_activate_resource,	acpi_activate_resource),
211    DEVMETHOD(bus_deactivate_resource,	acpi_deactivate_resource),
212    DEVMETHOD(bus_map_resource,		acpi_map_resource),
213    DEVMETHOD(bus_unmap_resource,      	acpi_unmap_resource),
214    DEVMETHOD(bus_child_pnpinfo,	acpi_child_pnpinfo_method),
215    DEVMETHOD(bus_child_location,	acpi_child_location_method),
216    DEVMETHOD(bus_setup_intr,		bus_generic_setup_intr),
217    DEVMETHOD(bus_teardown_intr,	bus_generic_teardown_intr),
218    DEVMETHOD(bus_hint_device_unit,	acpi_hint_device_unit),
219    DEVMETHOD(bus_get_cpus,		acpi_get_cpus),
220    DEVMETHOD(bus_get_domain,		acpi_get_domain),
221    DEVMETHOD(bus_get_property,		acpi_bus_get_prop),
222    DEVMETHOD(bus_get_device_path,	acpi_get_device_path),
223
224    /* ACPI bus */
225    DEVMETHOD(acpi_id_probe,		acpi_device_id_probe),
226    DEVMETHOD(acpi_evaluate_object,	acpi_device_eval_obj),
227    DEVMETHOD(acpi_get_property,	acpi_device_get_prop),
228    DEVMETHOD(acpi_pwr_for_sleep,	acpi_device_pwr_for_sleep),
229    DEVMETHOD(acpi_scan_children,	acpi_device_scan_children),
230
231    /* ISA emulation */
232    DEVMETHOD(isa_pnp_probe,		acpi_isa_pnp_probe),
233
234    DEVMETHOD_END
235};
236
237static driver_t acpi_driver = {
238    "acpi",
239    acpi_methods,
240    sizeof(struct acpi_softc),
241};
242
243EARLY_DRIVER_MODULE(acpi, nexus, acpi_driver, acpi_modevent, 0,
244    BUS_PASS_BUS + BUS_PASS_ORDER_MIDDLE);
245MODULE_VERSION(acpi, 1);
246
247ACPI_SERIAL_DECL(acpi, "ACPI root bus");
248
249/* Local pools for managing system resources for ACPI child devices. */
250static struct rman acpi_rman_io, acpi_rman_mem;
251
252#define ACPI_MINIMUM_AWAKETIME	5
253
254/* Holds the description of the acpi0 device. */
255static char acpi_desc[ACPI_OEM_ID_SIZE + ACPI_OEM_TABLE_ID_SIZE + 2];
256
257SYSCTL_NODE(_debug, OID_AUTO, acpi, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
258    "ACPI debugging");
259static char acpi_ca_version[12];
260SYSCTL_STRING(_debug_acpi, OID_AUTO, acpi_ca_version, CTLFLAG_RD,
261	      acpi_ca_version, 0, "Version of Intel ACPI-CA");
262
263/*
264 * Allow overriding _OSI methods.
265 */
266static char acpi_install_interface[256];
267TUNABLE_STR("hw.acpi.install_interface", acpi_install_interface,
268    sizeof(acpi_install_interface));
269static char acpi_remove_interface[256];
270TUNABLE_STR("hw.acpi.remove_interface", acpi_remove_interface,
271    sizeof(acpi_remove_interface));
272
273/* Allow users to dump Debug objects without ACPI debugger. */
274static int acpi_debug_objects;
275TUNABLE_INT("debug.acpi.enable_debug_objects", &acpi_debug_objects);
276SYSCTL_PROC(_debug_acpi, OID_AUTO, enable_debug_objects,
277    CTLFLAG_RW | CTLTYPE_INT | CTLFLAG_MPSAFE, NULL, 0,
278    acpi_debug_objects_sysctl, "I",
279    "Enable Debug objects");
280
281/* Allow the interpreter to ignore common mistakes in BIOS. */
282static int acpi_interpreter_slack = 1;
283TUNABLE_INT("debug.acpi.interpreter_slack", &acpi_interpreter_slack);
284SYSCTL_INT(_debug_acpi, OID_AUTO, interpreter_slack, CTLFLAG_RDTUN,
285    &acpi_interpreter_slack, 1, "Turn on interpreter slack mode.");
286
287/* Ignore register widths set by FADT and use default widths instead. */
288static int acpi_ignore_reg_width = 1;
289TUNABLE_INT("debug.acpi.default_register_width", &acpi_ignore_reg_width);
290SYSCTL_INT(_debug_acpi, OID_AUTO, default_register_width, CTLFLAG_RDTUN,
291    &acpi_ignore_reg_width, 1, "Ignore register widths set by FADT");
292
293/* Allow users to override quirks. */
294TUNABLE_INT("debug.acpi.quirks", &acpi_quirks);
295
296int acpi_susp_bounce;
297SYSCTL_INT(_debug_acpi, OID_AUTO, suspend_bounce, CTLFLAG_RW,
298    &acpi_susp_bounce, 0, "Don't actually suspend, just test devices.");
299
300/*
301 * ACPI standard UUID for Device Specific Data Package
302 * "Device Properties UUID for _DSD" Rev. 2.0
303 */
304static const struct uuid acpi_dsd_uuid = {
305	0xdaffd814, 0x6eba, 0x4d8c, 0x8a, 0x91,
306	{ 0xbc, 0x9b, 0xbf, 0x4a, 0xa3, 0x01 }
307};
308
309/*
310 * ACPI can only be loaded as a module by the loader; activating it after
311 * system bootstrap time is not useful, and can be fatal to the system.
312 * It also cannot be unloaded, since the entire system bus hierarchy hangs
313 * off it.
314 */
315static int
316acpi_modevent(struct module *mod, int event, void *junk)
317{
318    switch (event) {
319    case MOD_LOAD:
320	if (!cold) {
321	    printf("The ACPI driver cannot be loaded after boot.\n");
322	    return (EPERM);
323	}
324	break;
325    case MOD_UNLOAD:
326	if (!cold && power_pm_get_type() == POWER_PM_TYPE_ACPI)
327	    return (EBUSY);
328	break;
329    default:
330	break;
331    }
332    return (0);
333}
334
335/*
336 * Perform early initialization.
337 */
338ACPI_STATUS
339acpi_Startup(void)
340{
341    static int started = 0;
342    ACPI_STATUS status;
343    int val;
344
345    ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
346
347    /* Only run the startup code once.  The MADT driver also calls this. */
348    if (started)
349	return_VALUE (AE_OK);
350    started = 1;
351
352    /*
353     * Initialize the ACPICA subsystem.
354     */
355    if (ACPI_FAILURE(status = AcpiInitializeSubsystem())) {
356	printf("ACPI: Could not initialize Subsystem: %s\n",
357	    AcpiFormatException(status));
358	return_VALUE (status);
359    }
360
361    /*
362     * Pre-allocate space for RSDT/XSDT and DSDT tables and allow resizing
363     * if more tables exist.
364     */
365    if (ACPI_FAILURE(status = AcpiInitializeTables(NULL, 2, TRUE))) {
366	printf("ACPI: Table initialisation failed: %s\n",
367	    AcpiFormatException(status));
368	return_VALUE (status);
369    }
370
371    /* Set up any quirks we have for this system. */
372    if (acpi_quirks == ACPI_Q_OK)
373	acpi_table_quirks(&acpi_quirks);
374
375    /* If the user manually set the disabled hint to 0, force-enable ACPI. */
376    if (resource_int_value("acpi", 0, "disabled", &val) == 0 && val == 0)
377	acpi_quirks &= ~ACPI_Q_BROKEN;
378    if (acpi_quirks & ACPI_Q_BROKEN) {
379	printf("ACPI disabled by blacklist.  Contact your BIOS vendor.\n");
380	status = AE_SUPPORT;
381    }
382
383    return_VALUE (status);
384}
385
386/*
387 * Detect ACPI and perform early initialisation.
388 */
389int
390acpi_identify(void)
391{
392    ACPI_TABLE_RSDP	*rsdp;
393    ACPI_TABLE_HEADER	*rsdt;
394    ACPI_PHYSICAL_ADDRESS paddr;
395    struct sbuf		sb;
396
397    ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
398
399    if (!cold)
400	return (ENXIO);
401
402    /* Check that we haven't been disabled with a hint. */
403    if (resource_disabled("acpi", 0))
404	return (ENXIO);
405
406    /* Check for other PM systems. */
407    if (power_pm_get_type() != POWER_PM_TYPE_NONE &&
408	power_pm_get_type() != POWER_PM_TYPE_ACPI) {
409	printf("ACPI identify failed, other PM system enabled.\n");
410	return (ENXIO);
411    }
412
413    /* Initialize root tables. */
414    if (ACPI_FAILURE(acpi_Startup())) {
415	printf("ACPI: Try disabling either ACPI or apic support.\n");
416	return (ENXIO);
417    }
418
419    if ((paddr = AcpiOsGetRootPointer()) == 0 ||
420	(rsdp = AcpiOsMapMemory(paddr, sizeof(ACPI_TABLE_RSDP))) == NULL)
421	return (ENXIO);
422    if (rsdp->Revision > 1 && rsdp->XsdtPhysicalAddress != 0)
423	paddr = (ACPI_PHYSICAL_ADDRESS)rsdp->XsdtPhysicalAddress;
424    else
425	paddr = (ACPI_PHYSICAL_ADDRESS)rsdp->RsdtPhysicalAddress;
426    AcpiOsUnmapMemory(rsdp, sizeof(ACPI_TABLE_RSDP));
427
428    if ((rsdt = AcpiOsMapMemory(paddr, sizeof(ACPI_TABLE_HEADER))) == NULL)
429	return (ENXIO);
430    sbuf_new(&sb, acpi_desc, sizeof(acpi_desc), SBUF_FIXEDLEN);
431    sbuf_bcat(&sb, rsdt->OemId, ACPI_OEM_ID_SIZE);
432    sbuf_trim(&sb);
433    sbuf_putc(&sb, ' ');
434    sbuf_bcat(&sb, rsdt->OemTableId, ACPI_OEM_TABLE_ID_SIZE);
435    sbuf_trim(&sb);
436    sbuf_finish(&sb);
437    sbuf_delete(&sb);
438    AcpiOsUnmapMemory(rsdt, sizeof(ACPI_TABLE_HEADER));
439
440    snprintf(acpi_ca_version, sizeof(acpi_ca_version), "%x", ACPI_CA_VERSION);
441
442    return (0);
443}
444
445/*
446 * Fetch some descriptive data from ACPI to put in our attach message.
447 */
448static int
449acpi_probe(device_t dev)
450{
451
452    ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
453
454    device_set_desc(dev, acpi_desc);
455
456    return_VALUE (BUS_PROBE_NOWILDCARD);
457}
458
459static int
460acpi_attach(device_t dev)
461{
462    struct acpi_softc	*sc;
463    ACPI_STATUS		status;
464    int			error, state;
465    UINT32		flags;
466    UINT8		TypeA, TypeB;
467    char		*env;
468
469    ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
470
471    sc = device_get_softc(dev);
472    sc->acpi_dev = dev;
473    callout_init(&sc->susp_force_to, 1);
474
475    error = ENXIO;
476
477    /* Initialize resource manager. */
478    acpi_rman_io.rm_type = RMAN_ARRAY;
479    acpi_rman_io.rm_start = 0;
480    acpi_rman_io.rm_end = 0xffff;
481    acpi_rman_io.rm_descr = "ACPI I/O ports";
482    if (rman_init(&acpi_rman_io) != 0)
483	panic("acpi rman_init IO ports failed");
484    acpi_rman_mem.rm_type = RMAN_ARRAY;
485    acpi_rman_mem.rm_descr = "ACPI I/O memory addresses";
486    if (rman_init(&acpi_rman_mem) != 0)
487	panic("acpi rman_init memory failed");
488
489    resource_list_init(&sc->sysres_rl);
490
491    /* Initialise the ACPI mutex */
492    mtx_init(&acpi_mutex, "ACPI global lock", NULL, MTX_DEF);
493
494    /*
495     * Set the globals from our tunables.  This is needed because ACPI-CA
496     * uses UINT8 for some values and we have no tunable_byte.
497     */
498    AcpiGbl_EnableInterpreterSlack = acpi_interpreter_slack ? TRUE : FALSE;
499    AcpiGbl_EnableAmlDebugObject = acpi_debug_objects ? TRUE : FALSE;
500    AcpiGbl_UseDefaultRegisterWidths = acpi_ignore_reg_width ? TRUE : FALSE;
501
502#ifndef ACPI_DEBUG
503    /*
504     * Disable all debugging layers and levels.
505     */
506    AcpiDbgLayer = 0;
507    AcpiDbgLevel = 0;
508#endif
509
510    /* Override OS interfaces if the user requested. */
511    acpi_reset_interfaces(dev);
512
513    /* Load ACPI name space. */
514    status = AcpiLoadTables();
515    if (ACPI_FAILURE(status)) {
516	device_printf(dev, "Could not load Namespace: %s\n",
517		      AcpiFormatException(status));
518	goto out;
519    }
520
521    /* Handle MCFG table if present. */
522    acpi_enable_pcie();
523
524    /*
525     * Note that some systems (specifically, those with namespace evaluation
526     * issues that require the avoidance of parts of the namespace) must
527     * avoid running _INI and _STA on everything, as well as dodging the final
528     * object init pass.
529     *
530     * For these devices, we set ACPI_NO_DEVICE_INIT and ACPI_NO_OBJECT_INIT).
531     *
532     * XXX We should arrange for the object init pass after we have attached
533     *     all our child devices, but on many systems it works here.
534     */
535    flags = 0;
536    if (testenv("debug.acpi.avoid"))
537	flags = ACPI_NO_DEVICE_INIT | ACPI_NO_OBJECT_INIT;
538
539    /* Bring the hardware and basic handlers online. */
540    if (ACPI_FAILURE(status = AcpiEnableSubsystem(flags))) {
541	device_printf(dev, "Could not enable ACPI: %s\n",
542		      AcpiFormatException(status));
543	goto out;
544    }
545
546    /*
547     * Call the ECDT probe function to provide EC functionality before
548     * the namespace has been evaluated.
549     *
550     * XXX This happens before the sysresource devices have been probed and
551     * attached so its resources come from nexus0.  In practice, this isn't
552     * a problem but should be addressed eventually.
553     */
554    acpi_ec_ecdt_probe(dev);
555
556    /* Bring device objects and regions online. */
557    if (ACPI_FAILURE(status = AcpiInitializeObjects(flags))) {
558	device_printf(dev, "Could not initialize ACPI objects: %s\n",
559		      AcpiFormatException(status));
560	goto out;
561    }
562
563    /*
564     * Setup our sysctl tree.
565     *
566     * XXX: This doesn't check to make sure that none of these fail.
567     */
568    sysctl_ctx_init(&sc->acpi_sysctl_ctx);
569    sc->acpi_sysctl_tree = SYSCTL_ADD_NODE(&sc->acpi_sysctl_ctx,
570        SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO, device_get_name(dev),
571	CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "");
572    SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree),
573	OID_AUTO, "supported_sleep_state",
574	CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE,
575	0, 0, acpi_supported_sleep_state_sysctl, "A",
576	"List supported ACPI sleep states.");
577    SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree),
578	OID_AUTO, "power_button_state",
579	CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE,
580	&sc->acpi_power_button_sx, 0, acpi_sleep_state_sysctl, "A",
581	"Power button ACPI sleep state.");
582    SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree),
583	OID_AUTO, "sleep_button_state",
584	CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE,
585	&sc->acpi_sleep_button_sx, 0, acpi_sleep_state_sysctl, "A",
586	"Sleep button ACPI sleep state.");
587    SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree),
588	OID_AUTO, "lid_switch_state",
589	CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE,
590	&sc->acpi_lid_switch_sx, 0, acpi_sleep_state_sysctl, "A",
591	"Lid ACPI sleep state. Set to S3 if you want to suspend your laptop when close the Lid.");
592    SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree),
593	OID_AUTO, "standby_state",
594	CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE,
595	&sc->acpi_standby_sx, 0, acpi_sleep_state_sysctl, "A", "");
596    SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree),
597	OID_AUTO, "suspend_state",
598	CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE,
599	&sc->acpi_suspend_sx, 0, acpi_sleep_state_sysctl, "A", "");
600    SYSCTL_ADD_INT(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree),
601	OID_AUTO, "sleep_delay", CTLFLAG_RW, &sc->acpi_sleep_delay, 0,
602	"sleep delay in seconds");
603    SYSCTL_ADD_INT(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree),
604	OID_AUTO, "s4bios", CTLFLAG_RW, &sc->acpi_s4bios, 0, "S4BIOS mode");
605    SYSCTL_ADD_INT(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree),
606	OID_AUTO, "verbose", CTLFLAG_RW, &sc->acpi_verbose, 0, "verbose mode");
607    SYSCTL_ADD_INT(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree),
608	OID_AUTO, "disable_on_reboot", CTLFLAG_RW,
609	&sc->acpi_do_disable, 0, "Disable ACPI when rebooting/halting system");
610    SYSCTL_ADD_INT(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree),
611	OID_AUTO, "handle_reboot", CTLFLAG_RW,
612	&sc->acpi_handle_reboot, 0, "Use ACPI Reset Register to reboot");
613
614    /*
615     * Default to 1 second before sleeping to give some machines time to
616     * stabilize.
617     */
618    sc->acpi_sleep_delay = 1;
619    if (bootverbose)
620	sc->acpi_verbose = 1;
621    if ((env = kern_getenv("hw.acpi.verbose")) != NULL) {
622	if (strcmp(env, "0") != 0)
623	    sc->acpi_verbose = 1;
624	freeenv(env);
625    }
626
627    /* Only enable reboot by default if the FADT says it is available. */
628    if (AcpiGbl_FADT.Flags & ACPI_FADT_RESET_REGISTER)
629	sc->acpi_handle_reboot = 1;
630
631#if !ACPI_REDUCED_HARDWARE
632    /* Only enable S4BIOS by default if the FACS says it is available. */
633    if (AcpiGbl_FACS != NULL && AcpiGbl_FACS->Flags & ACPI_FACS_S4_BIOS_PRESENT)
634	sc->acpi_s4bios = 1;
635#endif
636
637    /* Probe all supported sleep states. */
638    acpi_sleep_states[ACPI_STATE_S0] = TRUE;
639    for (state = ACPI_STATE_S1; state < ACPI_S_STATE_COUNT; state++)
640	if (ACPI_SUCCESS(AcpiEvaluateObject(ACPI_ROOT_OBJECT,
641	    __DECONST(char *, AcpiGbl_SleepStateNames[state]), NULL, NULL)) &&
642	    ACPI_SUCCESS(AcpiGetSleepTypeData(state, &TypeA, &TypeB)))
643	    acpi_sleep_states[state] = TRUE;
644
645    /*
646     * Dispatch the default sleep state to devices.  The lid switch is set
647     * to UNKNOWN by default to avoid surprising users.
648     */
649    sc->acpi_power_button_sx = acpi_sleep_states[ACPI_STATE_S5] ?
650	ACPI_STATE_S5 : ACPI_STATE_UNKNOWN;
651    sc->acpi_lid_switch_sx = ACPI_STATE_UNKNOWN;
652    sc->acpi_standby_sx = acpi_sleep_states[ACPI_STATE_S1] ?
653	ACPI_STATE_S1 : ACPI_STATE_UNKNOWN;
654    sc->acpi_suspend_sx = acpi_sleep_states[ACPI_STATE_S3] ?
655	ACPI_STATE_S3 : ACPI_STATE_UNKNOWN;
656
657    /* Pick the first valid sleep state for the sleep button default. */
658    sc->acpi_sleep_button_sx = ACPI_STATE_UNKNOWN;
659    for (state = ACPI_STATE_S1; state <= ACPI_STATE_S4; state++)
660	if (acpi_sleep_states[state]) {
661	    sc->acpi_sleep_button_sx = state;
662	    break;
663	}
664
665    acpi_enable_fixed_events(sc);
666
667    /*
668     * Scan the namespace and attach/initialise children.
669     */
670
671    /* Register our shutdown handler. */
672    EVENTHANDLER_REGISTER(shutdown_final, acpi_shutdown_final, sc,
673	SHUTDOWN_PRI_LAST + 150);
674
675    /*
676     * Register our acpi event handlers.
677     * XXX should be configurable eg. via userland policy manager.
678     */
679    EVENTHANDLER_REGISTER(acpi_sleep_event, acpi_system_eventhandler_sleep,
680	sc, ACPI_EVENT_PRI_LAST);
681    EVENTHANDLER_REGISTER(acpi_wakeup_event, acpi_system_eventhandler_wakeup,
682	sc, ACPI_EVENT_PRI_LAST);
683
684    /* Flag our initial states. */
685    sc->acpi_enabled = TRUE;
686    sc->acpi_sstate = ACPI_STATE_S0;
687    sc->acpi_sleep_disabled = TRUE;
688
689    /* Create the control device */
690    sc->acpi_dev_t = make_dev(&acpi_cdevsw, 0, UID_ROOT, GID_OPERATOR, 0664,
691			      "acpi");
692    sc->acpi_dev_t->si_drv1 = sc;
693
694    if ((error = acpi_machdep_init(dev)))
695	goto out;
696
697    /* Register ACPI again to pass the correct argument of pm_func. */
698    power_pm_register(POWER_PM_TYPE_ACPI, acpi_pm_func, sc);
699
700    acpi_platform_osc(dev);
701
702    if (!acpi_disabled("bus")) {
703	EVENTHANDLER_REGISTER(dev_lookup, acpi_lookup, NULL, 1000);
704	acpi_probe_children(dev);
705    }
706
707    /* Update all GPEs and enable runtime GPEs. */
708    status = AcpiUpdateAllGpes();
709    if (ACPI_FAILURE(status))
710	device_printf(dev, "Could not update all GPEs: %s\n",
711	    AcpiFormatException(status));
712
713    /* Allow sleep request after a while. */
714    callout_init_mtx(&acpi_sleep_timer, &acpi_mutex, 0);
715    callout_reset(&acpi_sleep_timer, hz * ACPI_MINIMUM_AWAKETIME,
716	acpi_sleep_enable, sc);
717
718    error = 0;
719
720 out:
721    return_VALUE (error);
722}
723
724static void
725acpi_set_power_children(device_t dev, int state)
726{
727	device_t child;
728	device_t *devlist;
729	int dstate, i, numdevs;
730
731	if (device_get_children(dev, &devlist, &numdevs) != 0)
732		return;
733
734	/*
735	 * Retrieve and set D-state for the sleep state if _SxD is present.
736	 * Skip children who aren't attached since they are handled separately.
737	 */
738	for (i = 0; i < numdevs; i++) {
739		child = devlist[i];
740		dstate = state;
741		if (device_is_attached(child) &&
742		    acpi_device_pwr_for_sleep(dev, child, &dstate) == 0)
743			acpi_set_powerstate(child, dstate);
744	}
745	free(devlist, M_TEMP);
746}
747
748static int
749acpi_suspend(device_t dev)
750{
751    int error;
752
753    bus_topo_assert();
754
755    error = bus_generic_suspend(dev);
756    if (error == 0)
757	acpi_set_power_children(dev, ACPI_STATE_D3);
758
759    return (error);
760}
761
762static int
763acpi_resume(device_t dev)
764{
765
766    bus_topo_assert();
767
768    acpi_set_power_children(dev, ACPI_STATE_D0);
769
770    return (bus_generic_resume(dev));
771}
772
773static int
774acpi_shutdown(device_t dev)
775{
776
777    bus_topo_assert();
778
779    /* Allow children to shutdown first. */
780    bus_generic_shutdown(dev);
781
782    /*
783     * Enable any GPEs that are able to power-on the system (i.e., RTC).
784     * Also, disable any that are not valid for this state (most).
785     */
786    acpi_wake_prep_walk(ACPI_STATE_S5);
787
788    return (0);
789}
790
791/*
792 * Handle a new device being added
793 */
794static device_t
795acpi_add_child(device_t bus, u_int order, const char *name, int unit)
796{
797    struct acpi_device	*ad;
798    device_t		child;
799
800    if ((ad = malloc(sizeof(*ad), M_ACPIDEV, M_NOWAIT | M_ZERO)) == NULL)
801	return (NULL);
802
803    resource_list_init(&ad->ad_rl);
804
805    child = device_add_child_ordered(bus, order, name, unit);
806    if (child != NULL)
807	device_set_ivars(child, ad);
808    else
809	free(ad, M_ACPIDEV);
810    return (child);
811}
812
813static int
814acpi_print_child(device_t bus, device_t child)
815{
816    struct acpi_device	 *adev = device_get_ivars(child);
817    struct resource_list *rl = &adev->ad_rl;
818    int retval = 0;
819
820    retval += bus_print_child_header(bus, child);
821    retval += resource_list_print_type(rl, "port",  SYS_RES_IOPORT, "%#jx");
822    retval += resource_list_print_type(rl, "iomem", SYS_RES_MEMORY, "%#jx");
823    retval += resource_list_print_type(rl, "irq",   SYS_RES_IRQ,    "%jd");
824    retval += resource_list_print_type(rl, "drq",   SYS_RES_DRQ,    "%jd");
825    if (device_get_flags(child))
826	retval += printf(" flags %#x", device_get_flags(child));
827    retval += bus_print_child_domain(bus, child);
828    retval += bus_print_child_footer(bus, child);
829
830    return (retval);
831}
832
833/*
834 * If this device is an ACPI child but no one claimed it, attempt
835 * to power it off.  We'll power it back up when a driver is added.
836 *
837 * XXX Disabled for now since many necessary devices (like fdc and
838 * ATA) don't claim the devices we created for them but still expect
839 * them to be powered up.
840 */
841static void
842acpi_probe_nomatch(device_t bus, device_t child)
843{
844#ifdef ACPI_ENABLE_POWERDOWN_NODRIVER
845    acpi_set_powerstate(child, ACPI_STATE_D3);
846#endif
847}
848
849/*
850 * If a new driver has a chance to probe a child, first power it up.
851 *
852 * XXX Disabled for now (see acpi_probe_nomatch for details).
853 */
854static void
855acpi_driver_added(device_t dev, driver_t *driver)
856{
857    device_t child, *devlist;
858    int i, numdevs;
859
860    DEVICE_IDENTIFY(driver, dev);
861    if (device_get_children(dev, &devlist, &numdevs))
862	    return;
863    for (i = 0; i < numdevs; i++) {
864	child = devlist[i];
865	if (device_get_state(child) == DS_NOTPRESENT) {
866#ifdef ACPI_ENABLE_POWERDOWN_NODRIVER
867	    acpi_set_powerstate(child, ACPI_STATE_D0);
868	    if (device_probe_and_attach(child) != 0)
869		acpi_set_powerstate(child, ACPI_STATE_D3);
870#else
871	    device_probe_and_attach(child);
872#endif
873	}
874    }
875    free(devlist, M_TEMP);
876}
877
878/* Location hint for devctl(8) */
879static int
880acpi_child_location_method(device_t cbdev, device_t child, struct sbuf *sb)
881{
882    struct acpi_device *dinfo = device_get_ivars(child);
883    int pxm;
884
885    if (dinfo->ad_handle) {
886        sbuf_printf(sb, "handle=%s", acpi_name(dinfo->ad_handle));
887        if (ACPI_SUCCESS(acpi_GetInteger(dinfo->ad_handle, "_PXM", &pxm))) {
888            sbuf_printf(sb, " _PXM=%d", pxm);
889	}
890    }
891    return (0);
892}
893
894/* PnP information for devctl(8) */
895int
896acpi_pnpinfo(ACPI_HANDLE handle, struct sbuf *sb)
897{
898    ACPI_DEVICE_INFO *adinfo;
899
900    if (ACPI_FAILURE(AcpiGetObjectInfo(handle, &adinfo))) {
901	sbuf_printf(sb, "unknown");
902	return (0);
903    }
904
905    sbuf_printf(sb, "_HID=%s _UID=%lu _CID=%s",
906	(adinfo->Valid & ACPI_VALID_HID) ?
907	adinfo->HardwareId.String : "none",
908	(adinfo->Valid & ACPI_VALID_UID) ?
909	strtoul(adinfo->UniqueId.String, NULL, 10) : 0UL,
910	((adinfo->Valid & ACPI_VALID_CID) &&
911	 adinfo->CompatibleIdList.Count > 0) ?
912	adinfo->CompatibleIdList.Ids[0].String : "none");
913    AcpiOsFree(adinfo);
914
915    return (0);
916}
917
918static int
919acpi_child_pnpinfo_method(device_t cbdev, device_t child, struct sbuf *sb)
920{
921    struct acpi_device *dinfo = device_get_ivars(child);
922
923    return (acpi_pnpinfo(dinfo->ad_handle, sb));
924}
925
926/*
927 * Note: the check for ACPI locator may be redundant. However, this routine is
928 * suitable for both busses whose only locator is ACPI and as a building block
929 * for busses that have multiple locators to cope with.
930 */
931int
932acpi_get_acpi_device_path(device_t bus, device_t child, const char *locator, struct sbuf *sb)
933{
934	if (strcmp(locator, BUS_LOCATOR_ACPI) == 0) {
935		ACPI_HANDLE *handle = acpi_get_handle(child);
936
937		if (handle != NULL)
938			sbuf_printf(sb, "%s", acpi_name(handle));
939		return (0);
940	}
941
942	return (bus_generic_get_device_path(bus, child, locator, sb));
943}
944
945static int
946acpi_get_device_path(device_t bus, device_t child, const char *locator, struct sbuf *sb)
947{
948	struct acpi_device *dinfo = device_get_ivars(child);
949
950	if (strcmp(locator, BUS_LOCATOR_ACPI) == 0)
951		return (acpi_get_acpi_device_path(bus, child, locator, sb));
952
953	if (strcmp(locator, BUS_LOCATOR_UEFI) == 0) {
954		ACPI_DEVICE_INFO *adinfo;
955		if (!ACPI_FAILURE(AcpiGetObjectInfo(dinfo->ad_handle, &adinfo)) &&
956		    dinfo->ad_handle != 0 && (adinfo->Valid & ACPI_VALID_HID)) {
957			const char *hid = adinfo->HardwareId.String;
958			u_long uid = (adinfo->Valid & ACPI_VALID_UID) ?
959			    strtoul(adinfo->UniqueId.String, NULL, 10) : 0UL;
960			u_long hidval;
961
962			/*
963			 * In UEFI Stanard Version 2.6, Section 9.6.1.6 Text
964			 * Device Node Reference, there's an insanely long table
965			 * 98. This implements the relevant bits from that
966			 * table. Newer versions appear to have not required
967			 * anything new. The EDK2 firmware presents both PciRoot
968			 * and PcieRoot as PciRoot. Follow the EDK2 standard.
969			 */
970			if (strncmp("PNP", hid, 3) != 0)
971				goto nomatch;
972			hidval = strtoul(hid + 3, NULL, 16);
973			switch (hidval) {
974			case 0x0301:
975				sbuf_printf(sb, "Keyboard(0x%lx)", uid);
976				break;
977			case 0x0401:
978				sbuf_printf(sb, "ParallelPort(0x%lx)", uid);
979				break;
980			case 0x0501:
981				sbuf_printf(sb, "Serial(0x%lx)", uid);
982				break;
983			case 0x0604:
984				sbuf_printf(sb, "Floppy(0x%lx)", uid);
985				break;
986			case 0x0a03:
987			case 0x0a08:
988				sbuf_printf(sb, "PciRoot(0x%lx)", uid);
989				break;
990			default: /* Everything else gets a generic encode */
991			nomatch:
992				sbuf_printf(sb, "Acpi(%s,0x%lx)", hid, uid);
993				break;
994			}
995		}
996		/* Not handled: AcpiAdr... unsure how to know it's one */
997	}
998
999	/* For the rest, punt to the default handler */
1000	return (bus_generic_get_device_path(bus, child, locator, sb));
1001}
1002
1003/*
1004 * Handle device deletion.
1005 */
1006static void
1007acpi_child_deleted(device_t dev, device_t child)
1008{
1009    struct acpi_device *dinfo = device_get_ivars(child);
1010
1011    if (acpi_get_device(dinfo->ad_handle) == child)
1012	AcpiDetachData(dinfo->ad_handle, acpi_fake_objhandler);
1013}
1014
1015/*
1016 * Handle per-device ivars
1017 */
1018static int
1019acpi_read_ivar(device_t dev, device_t child, int index, uintptr_t *result)
1020{
1021    struct acpi_device	*ad;
1022
1023    if ((ad = device_get_ivars(child)) == NULL) {
1024	device_printf(child, "device has no ivars\n");
1025	return (ENOENT);
1026    }
1027
1028    /* ACPI and ISA compatibility ivars */
1029    switch(index) {
1030    case ACPI_IVAR_HANDLE:
1031	*(ACPI_HANDLE *)result = ad->ad_handle;
1032	break;
1033    case ACPI_IVAR_PRIVATE:
1034	*(void **)result = ad->ad_private;
1035	break;
1036    case ACPI_IVAR_FLAGS:
1037	*(int *)result = ad->ad_flags;
1038	break;
1039    case ISA_IVAR_VENDORID:
1040    case ISA_IVAR_SERIAL:
1041    case ISA_IVAR_COMPATID:
1042	*(int *)result = -1;
1043	break;
1044    case ISA_IVAR_LOGICALID:
1045	*(int *)result = acpi_isa_get_logicalid(child);
1046	break;
1047    case PCI_IVAR_CLASS:
1048	*(uint8_t*)result = (ad->ad_cls_class >> 16) & 0xff;
1049	break;
1050    case PCI_IVAR_SUBCLASS:
1051	*(uint8_t*)result = (ad->ad_cls_class >> 8) & 0xff;
1052	break;
1053    case PCI_IVAR_PROGIF:
1054	*(uint8_t*)result = (ad->ad_cls_class >> 0) & 0xff;
1055	break;
1056    default:
1057	return (ENOENT);
1058    }
1059
1060    return (0);
1061}
1062
1063static int
1064acpi_write_ivar(device_t dev, device_t child, int index, uintptr_t value)
1065{
1066    struct acpi_device	*ad;
1067
1068    if ((ad = device_get_ivars(child)) == NULL) {
1069	device_printf(child, "device has no ivars\n");
1070	return (ENOENT);
1071    }
1072
1073    switch(index) {
1074    case ACPI_IVAR_HANDLE:
1075	ad->ad_handle = (ACPI_HANDLE)value;
1076	break;
1077    case ACPI_IVAR_PRIVATE:
1078	ad->ad_private = (void *)value;
1079	break;
1080    case ACPI_IVAR_FLAGS:
1081	ad->ad_flags = (int)value;
1082	break;
1083    default:
1084	panic("bad ivar write request (%d)", index);
1085	return (ENOENT);
1086    }
1087
1088    return (0);
1089}
1090
1091/*
1092 * Handle child resource allocation/removal
1093 */
1094static struct resource_list *
1095acpi_get_rlist(device_t dev, device_t child)
1096{
1097    struct acpi_device		*ad;
1098
1099    ad = device_get_ivars(child);
1100    return (&ad->ad_rl);
1101}
1102
1103static int
1104acpi_match_resource_hint(device_t dev, int type, long value)
1105{
1106    struct acpi_device *ad = device_get_ivars(dev);
1107    struct resource_list *rl = &ad->ad_rl;
1108    struct resource_list_entry *rle;
1109
1110    STAILQ_FOREACH(rle, rl, link) {
1111	if (rle->type != type)
1112	    continue;
1113	if (rle->start <= value && rle->end >= value)
1114	    return (1);
1115    }
1116    return (0);
1117}
1118
1119/*
1120 * Does this device match because the resources match?
1121 */
1122static bool
1123acpi_hint_device_matches_resources(device_t child, const char *name,
1124    int unit)
1125{
1126	long value;
1127	bool matches;
1128
1129	/*
1130	 * Check for matching resources.  We must have at least one match.
1131	 * Since I/O and memory resources cannot be shared, if we get a
1132	 * match on either of those, ignore any mismatches in IRQs or DRQs.
1133	 *
1134	 * XXX: We may want to revisit this to be more lenient and wire
1135	 * as long as it gets one match.
1136	 */
1137	matches = false;
1138	if (resource_long_value(name, unit, "port", &value) == 0) {
1139		/*
1140		 * Floppy drive controllers are notorious for having a
1141		 * wide variety of resources not all of which include the
1142		 * first port that is specified by the hint (typically
1143		 * 0x3f0) (see the comment above fdc_isa_alloc_resources()
1144		 * in fdc_isa.c).  However, they do all seem to include
1145		 * port + 2 (e.g. 0x3f2) so for a floppy device, look for
1146		 * 'value + 2' in the port resources instead of the hint
1147		 * value.
1148		 */
1149		if (strcmp(name, "fdc") == 0)
1150			value += 2;
1151		if (acpi_match_resource_hint(child, SYS_RES_IOPORT, value))
1152			matches = true;
1153		else
1154			return false;
1155	}
1156	if (resource_long_value(name, unit, "maddr", &value) == 0) {
1157		if (acpi_match_resource_hint(child, SYS_RES_MEMORY, value))
1158			matches = true;
1159		else
1160			return false;
1161	}
1162
1163	/*
1164	 * If either the I/O address and/or the memory address matched, then
1165	 * assumed this devices matches and that any mismatch in other resources
1166	 * will be resolved by siltently ignoring those other resources. Otherwise
1167	 * all further resources must match.
1168	 */
1169	if (matches) {
1170		return (true);
1171	}
1172	if (resource_long_value(name, unit, "irq", &value) == 0) {
1173		if (acpi_match_resource_hint(child, SYS_RES_IRQ, value))
1174			matches = true;
1175		else
1176			return false;
1177	}
1178	if (resource_long_value(name, unit, "drq", &value) == 0) {
1179		if (acpi_match_resource_hint(child, SYS_RES_DRQ, value))
1180			matches = true;
1181		else
1182			return false;
1183	}
1184	return matches;
1185}
1186
1187
1188/*
1189 * Wire device unit numbers based on resource matches in hints.
1190 */
1191static void
1192acpi_hint_device_unit(device_t acdev, device_t child, const char *name,
1193    int *unitp)
1194{
1195    device_location_cache_t *cache;
1196    const char *s;
1197    int line, unit;
1198    bool matches;
1199
1200    /*
1201     * Iterate over all the hints for the devices with the specified
1202     * name to see if one's resources are a subset of this device.
1203     */
1204    line = 0;
1205    cache = dev_wired_cache_init();
1206    while (resource_find_dev(&line, name, &unit, "at", NULL) == 0) {
1207	/* Must have an "at" for acpi or isa. */
1208	resource_string_value(name, unit, "at", &s);
1209	matches = false;
1210	if (strcmp(s, "acpi0") == 0 || strcmp(s, "acpi") == 0 ||
1211	    strcmp(s, "isa0") == 0 || strcmp(s, "isa") == 0)
1212	    matches = acpi_hint_device_matches_resources(child, name, unit);
1213	else
1214	    matches = dev_wired_cache_match(cache, child, s);
1215
1216	if (matches) {
1217	    /* We have a winner! */
1218	    *unitp = unit;
1219	    break;
1220	}
1221    }
1222    dev_wired_cache_fini(cache);
1223}
1224
1225/*
1226 * Fetch the NUMA domain for a device by mapping the value returned by
1227 * _PXM to a NUMA domain.  If the device does not have a _PXM method,
1228 * -2 is returned.  If any other error occurs, -1 is returned.
1229 */
1230static int
1231acpi_parse_pxm(device_t dev)
1232{
1233#ifdef NUMA
1234#if defined(__i386__) || defined(__amd64__) || defined(__aarch64__)
1235	ACPI_HANDLE handle;
1236	ACPI_STATUS status;
1237	int pxm;
1238
1239	handle = acpi_get_handle(dev);
1240	if (handle == NULL)
1241		return (-2);
1242	status = acpi_GetInteger(handle, "_PXM", &pxm);
1243	if (ACPI_SUCCESS(status))
1244		return (acpi_map_pxm_to_vm_domainid(pxm));
1245	if (status == AE_NOT_FOUND)
1246		return (-2);
1247#endif
1248#endif
1249	return (-1);
1250}
1251
1252int
1253acpi_get_cpus(device_t dev, device_t child, enum cpu_sets op, size_t setsize,
1254    cpuset_t *cpuset)
1255{
1256	int d, error;
1257
1258	d = acpi_parse_pxm(child);
1259	if (d < 0)
1260		return (bus_generic_get_cpus(dev, child, op, setsize, cpuset));
1261
1262	switch (op) {
1263	case LOCAL_CPUS:
1264		if (setsize != sizeof(cpuset_t))
1265			return (EINVAL);
1266		*cpuset = cpuset_domain[d];
1267		return (0);
1268	case INTR_CPUS:
1269		error = bus_generic_get_cpus(dev, child, op, setsize, cpuset);
1270		if (error != 0)
1271			return (error);
1272		if (setsize != sizeof(cpuset_t))
1273			return (EINVAL);
1274		CPU_AND(cpuset, cpuset, &cpuset_domain[d]);
1275		return (0);
1276	default:
1277		return (bus_generic_get_cpus(dev, child, op, setsize, cpuset));
1278	}
1279}
1280
1281/*
1282 * Fetch the NUMA domain for the given device 'dev'.
1283 *
1284 * If a device has a _PXM method, map that to a NUMA domain.
1285 * Otherwise, pass the request up to the parent.
1286 * If there's no matching domain or the domain cannot be
1287 * determined, return ENOENT.
1288 */
1289int
1290acpi_get_domain(device_t dev, device_t child, int *domain)
1291{
1292	int d;
1293
1294	d = acpi_parse_pxm(child);
1295	if (d >= 0) {
1296		*domain = d;
1297		return (0);
1298	}
1299	if (d == -1)
1300		return (ENOENT);
1301
1302	/* No _PXM node; go up a level */
1303	return (bus_generic_get_domain(dev, child, domain));
1304}
1305
1306static struct rman *
1307acpi_get_rman(device_t bus, int type, u_int flags)
1308{
1309	/* Only memory and IO resources are managed. */
1310	switch (type) {
1311	case SYS_RES_IOPORT:
1312		return (&acpi_rman_io);
1313	case SYS_RES_MEMORY:
1314		return (&acpi_rman_mem);
1315	default:
1316		return (NULL);
1317	}
1318}
1319
1320/*
1321 * Pre-allocate/manage all memory and IO resources.  Since rman can't handle
1322 * duplicates, we merge any in the sysresource attach routine.
1323 */
1324static int
1325acpi_sysres_alloc(device_t dev)
1326{
1327    struct acpi_softc *sc = device_get_softc(dev);
1328    struct resource *res;
1329    struct resource_list_entry *rle;
1330    struct rman *rm;
1331    device_t *children;
1332    int child_count, i;
1333
1334    /*
1335     * Probe/attach any sysresource devices.  This would be unnecessary if we
1336     * had multi-pass probe/attach.
1337     */
1338    if (device_get_children(dev, &children, &child_count) != 0)
1339	return (ENXIO);
1340    for (i = 0; i < child_count; i++) {
1341	if (ACPI_ID_PROBE(dev, children[i], sysres_ids, NULL) <= 0)
1342	    device_probe_and_attach(children[i]);
1343    }
1344    free(children, M_TEMP);
1345
1346    STAILQ_FOREACH(rle, &sc->sysres_rl, link) {
1347	if (rle->res != NULL) {
1348	    device_printf(dev, "duplicate resource for %jx\n", rle->start);
1349	    continue;
1350	}
1351
1352	/* Only memory and IO resources are valid here. */
1353	rm = acpi_get_rman(dev, rle->type, 0);
1354	if (rm == NULL)
1355	    continue;
1356
1357	/* Pre-allocate resource and add to our rman pool. */
1358	res = bus_alloc_resource(dev, rle->type,
1359	    &rle->rid, rle->start, rle->start + rle->count - 1, rle->count,
1360	    RF_ACTIVE | RF_UNMAPPED);
1361	if (res != NULL) {
1362	    rman_manage_region(rm, rman_get_start(res), rman_get_end(res));
1363	    rle->res = res;
1364	} else if (bootverbose)
1365	    device_printf(dev, "reservation of %jx, %jx (%d) failed\n",
1366		rle->start, rle->count, rle->type);
1367    }
1368    return (0);
1369}
1370
1371/*
1372 * Reserve declared resources for active devices found during the
1373 * namespace scan once the boot-time attach of devices has completed.
1374 *
1375 * Ideally reserving firmware-assigned resources would work in a
1376 * depth-first traversal of the device namespace, but this is
1377 * complicated.  In particular, not all resources are enumerated by
1378 * ACPI (e.g. PCI bridges and devices enumerate their resources via
1379 * other means).  Some systems also enumerate devices via ACPI behind
1380 * PCI bridges but without a matching a PCI device_t enumerated via
1381 * PCI bus scanning, the device_t's end up as direct children of
1382 * acpi0.  Doing this scan late is not ideal, but works for now.
1383 */
1384static void
1385acpi_reserve_resources(device_t dev)
1386{
1387    struct resource_list_entry *rle;
1388    struct resource_list *rl;
1389    struct acpi_device *ad;
1390    device_t *children;
1391    int child_count, i;
1392
1393    if (device_get_children(dev, &children, &child_count) != 0)
1394	return;
1395    for (i = 0; i < child_count; i++) {
1396	ad = device_get_ivars(children[i]);
1397	rl = &ad->ad_rl;
1398
1399	/* Don't reserve system resources. */
1400	if (ACPI_ID_PROBE(dev, children[i], sysres_ids, NULL) <= 0)
1401	    continue;
1402
1403	STAILQ_FOREACH(rle, rl, link) {
1404	    /*
1405	     * Don't reserve IRQ resources.  There are many sticky things
1406	     * to get right otherwise (e.g. IRQs for psm, atkbd, and HPET
1407	     * when using legacy routing).
1408	     */
1409	    if (rle->type == SYS_RES_IRQ)
1410		continue;
1411
1412	    /*
1413	     * Don't reserve the resource if it is already allocated.
1414	     * The acpi_ec(4) driver can allocate its resources early
1415	     * if ECDT is present.
1416	     */
1417	    if (rle->res != NULL)
1418		continue;
1419
1420	    /*
1421	     * Try to reserve the resource from our parent.  If this
1422	     * fails because the resource is a system resource, just
1423	     * let it be.  The resource range is already reserved so
1424	     * that other devices will not use it.  If the driver
1425	     * needs to allocate the resource, then
1426	     * acpi_alloc_resource() will sub-alloc from the system
1427	     * resource.
1428	     */
1429	    resource_list_reserve(rl, dev, children[i], rle->type, &rle->rid,
1430		rle->start, rle->end, rle->count, 0);
1431	}
1432    }
1433    free(children, M_TEMP);
1434}
1435
1436static int
1437acpi_set_resource(device_t dev, device_t child, int type, int rid,
1438    rman_res_t start, rman_res_t count)
1439{
1440    struct acpi_device *ad = device_get_ivars(child);
1441    struct resource_list *rl = &ad->ad_rl;
1442    rman_res_t end;
1443
1444#ifdef INTRNG
1445    /* map with default for now */
1446    if (type == SYS_RES_IRQ)
1447	start = (rman_res_t)acpi_map_intr(child, (u_int)start,
1448			acpi_get_handle(child));
1449#endif
1450
1451    /* If the resource is already allocated, fail. */
1452    if (resource_list_busy(rl, type, rid))
1453	return (EBUSY);
1454
1455    /* If the resource is already reserved, release it. */
1456    if (resource_list_reserved(rl, type, rid))
1457	resource_list_unreserve(rl, dev, child, type, rid);
1458
1459    /* Add the resource. */
1460    end = (start + count - 1);
1461    resource_list_add(rl, type, rid, start, end, count);
1462    return (0);
1463}
1464
1465static struct resource *
1466acpi_alloc_resource(device_t bus, device_t child, int type, int *rid,
1467    rman_res_t start, rman_res_t end, rman_res_t count, u_int flags)
1468{
1469#ifndef INTRNG
1470    ACPI_RESOURCE ares;
1471#endif
1472    struct acpi_device *ad;
1473    struct resource_list_entry *rle;
1474    struct resource_list *rl;
1475    struct resource *res;
1476    int isdefault = RMAN_IS_DEFAULT_RANGE(start, end);
1477
1478    /*
1479     * First attempt at allocating the resource.  For direct children,
1480     * use resource_list_alloc() to handle reserved resources.  For
1481     * other devices, pass the request up to our parent.
1482     */
1483    if (bus == device_get_parent(child)) {
1484	ad = device_get_ivars(child);
1485	rl = &ad->ad_rl;
1486
1487	/*
1488	 * Simulate the behavior of the ISA bus for direct children
1489	 * devices.  That is, if a non-default range is specified for
1490	 * a resource that doesn't exist, use bus_set_resource() to
1491	 * add the resource before allocating it.  Note that these
1492	 * resources will not be reserved.
1493	 */
1494	if (!isdefault && resource_list_find(rl, type, *rid) == NULL)
1495		resource_list_add(rl, type, *rid, start, end, count);
1496	res = resource_list_alloc(rl, bus, child, type, rid, start, end, count,
1497	    flags);
1498#ifndef INTRNG
1499	if (res != NULL && type == SYS_RES_IRQ) {
1500	    /*
1501	     * Since bus_config_intr() takes immediate effect, we cannot
1502	     * configure the interrupt associated with a device when we
1503	     * parse the resources but have to defer it until a driver
1504	     * actually allocates the interrupt via bus_alloc_resource().
1505	     *
1506	     * XXX: Should we handle the lookup failing?
1507	     */
1508	    if (ACPI_SUCCESS(acpi_lookup_irq_resource(child, *rid, res, &ares)))
1509		acpi_config_intr(child, &ares);
1510	}
1511#endif
1512
1513	/*
1514	 * If this is an allocation of the "default" range for a given
1515	 * RID, fetch the exact bounds for this resource from the
1516	 * resource list entry to try to allocate the range from the
1517	 * system resource regions.
1518	 */
1519	if (res == NULL && isdefault) {
1520	    rle = resource_list_find(rl, type, *rid);
1521	    if (rle != NULL) {
1522		start = rle->start;
1523		end = rle->end;
1524		count = rle->count;
1525	    }
1526	}
1527    } else
1528	res = bus_generic_alloc_resource(bus, child, type, rid,
1529	    start, end, count, flags);
1530
1531    /*
1532     * If the first attempt failed and this is an allocation of a
1533     * specific range, try to satisfy the request via a suballocation
1534     * from our system resource regions.
1535     */
1536    if (res == NULL && start + count - 1 == end)
1537	res = bus_generic_rman_alloc_resource(bus, child, type, rid, start, end,
1538	    count, flags);
1539    return (res);
1540}
1541
1542static bool
1543acpi_is_resource_managed(device_t bus, struct resource *r)
1544{
1545	struct rman *rm;
1546
1547	rm = acpi_get_rman(bus, rman_get_type(r), rman_get_flags(r));
1548	if (rm == NULL)
1549		return (false);
1550	return (rman_is_region_manager(r, rm));
1551}
1552
1553static struct resource *
1554acpi_managed_resource(device_t bus, struct resource *r)
1555{
1556	struct acpi_softc *sc = device_get_softc(bus);
1557	struct resource_list_entry *rle;
1558
1559	KASSERT(acpi_is_resource_managed(bus, r),
1560	    ("resource %p is not suballocated", r));
1561
1562	STAILQ_FOREACH(rle, &sc->sysres_rl, link) {
1563		if (rle->type != rman_get_type(r) || rle->res == NULL)
1564			continue;
1565		if (rman_get_start(r) >= rman_get_start(rle->res) &&
1566		    rman_get_end(r) <= rman_get_end(rle->res))
1567			return (rle->res);
1568	}
1569	return (NULL);
1570}
1571
1572static int
1573acpi_adjust_resource(device_t bus, device_t child, struct resource *r,
1574    rman_res_t start, rman_res_t end)
1575{
1576
1577    if (acpi_is_resource_managed(bus, r))
1578	return (rman_adjust_resource(r, start, end));
1579    return (bus_generic_adjust_resource(bus, child, r, start, end));
1580}
1581
1582static int
1583acpi_release_resource(device_t bus, device_t child, struct resource *r)
1584{
1585    /*
1586     * If this resource belongs to one of our internal managers,
1587     * deactivate it and release it to the local pool.
1588     */
1589    if (acpi_is_resource_managed(bus, r))
1590	return (bus_generic_rman_release_resource(bus, child, r));
1591
1592    return (bus_generic_rl_release_resource(bus, child, r));
1593}
1594
1595static void
1596acpi_delete_resource(device_t bus, device_t child, int type, int rid)
1597{
1598    struct resource_list *rl;
1599
1600    rl = acpi_get_rlist(bus, child);
1601    if (resource_list_busy(rl, type, rid)) {
1602	device_printf(bus, "delete_resource: Resource still owned by child"
1603	    " (type=%d, rid=%d)\n", type, rid);
1604	return;
1605    }
1606    if (resource_list_reserved(rl, type, rid))
1607	resource_list_unreserve(rl, bus, child, type, rid);
1608    resource_list_delete(rl, type, rid);
1609}
1610
1611static int
1612acpi_activate_resource(device_t bus, device_t child, struct resource *r)
1613{
1614	if (acpi_is_resource_managed(bus, r))
1615		return (bus_generic_rman_activate_resource(bus, child, r));
1616	return (bus_generic_activate_resource(bus, child, r));
1617}
1618
1619static int
1620acpi_deactivate_resource(device_t bus, device_t child, struct resource *r)
1621{
1622	if (acpi_is_resource_managed(bus, r))
1623		return (bus_generic_rman_deactivate_resource(bus, child, r));
1624	return (bus_generic_deactivate_resource(bus, child, r));
1625}
1626
1627static int
1628acpi_map_resource(device_t bus, device_t child, struct resource *r,
1629    struct resource_map_request *argsp, struct resource_map *map)
1630{
1631	struct resource_map_request args;
1632	struct resource *sysres;
1633	rman_res_t length, start;
1634	int error;
1635
1636	if (!acpi_is_resource_managed(bus, r))
1637		return (bus_generic_map_resource(bus, child, r, argsp, map));
1638
1639	/* Resources must be active to be mapped. */
1640	if (!(rman_get_flags(r) & RF_ACTIVE))
1641		return (ENXIO);
1642
1643	resource_init_map_request(&args);
1644	error = resource_validate_map_request(r, argsp, &args, &start, &length);
1645	if (error)
1646		return (error);
1647
1648	sysres = acpi_managed_resource(bus, r);
1649	if (sysres == NULL)
1650		return (ENOENT);
1651
1652	args.offset = start - rman_get_start(sysres);
1653	args.length = length;
1654	return (bus_map_resource(bus, sysres, &args, map));
1655}
1656
1657static int
1658acpi_unmap_resource(device_t bus, device_t child, struct resource *r,
1659    struct resource_map *map)
1660{
1661	struct resource *sysres;
1662
1663	if (!acpi_is_resource_managed(bus, r))
1664		return (bus_generic_unmap_resource(bus, child, r, map));
1665
1666	sysres = acpi_managed_resource(bus, r);
1667	if (sysres == NULL)
1668		return (ENOENT);
1669	return (bus_unmap_resource(bus, sysres, map));
1670}
1671
1672/* Allocate an IO port or memory resource, given its GAS. */
1673int
1674acpi_bus_alloc_gas(device_t dev, int *type, int *rid, ACPI_GENERIC_ADDRESS *gas,
1675    struct resource **res, u_int flags)
1676{
1677    int error, res_type;
1678
1679    error = ENOMEM;
1680    if (type == NULL || rid == NULL || gas == NULL || res == NULL)
1681	return (EINVAL);
1682
1683    /* We only support memory and IO spaces. */
1684    switch (gas->SpaceId) {
1685    case ACPI_ADR_SPACE_SYSTEM_MEMORY:
1686	res_type = SYS_RES_MEMORY;
1687	break;
1688    case ACPI_ADR_SPACE_SYSTEM_IO:
1689	res_type = SYS_RES_IOPORT;
1690	break;
1691    default:
1692	return (EOPNOTSUPP);
1693    }
1694
1695    /*
1696     * If the register width is less than 8, assume the BIOS author means
1697     * it is a bit field and just allocate a byte.
1698     */
1699    if (gas->BitWidth && gas->BitWidth < 8)
1700	gas->BitWidth = 8;
1701
1702    /* Validate the address after we're sure we support the space. */
1703    if (gas->Address == 0 || gas->BitWidth == 0)
1704	return (EINVAL);
1705
1706    bus_set_resource(dev, res_type, *rid, gas->Address,
1707	gas->BitWidth / 8);
1708    *res = bus_alloc_resource_any(dev, res_type, rid, RF_ACTIVE | flags);
1709    if (*res != NULL) {
1710	*type = res_type;
1711	error = 0;
1712    } else
1713	bus_delete_resource(dev, res_type, *rid);
1714
1715    return (error);
1716}
1717
1718/* Probe _HID and _CID for compatible ISA PNP ids. */
1719static uint32_t
1720acpi_isa_get_logicalid(device_t dev)
1721{
1722    ACPI_DEVICE_INFO	*devinfo;
1723    ACPI_HANDLE		h;
1724    uint32_t		pnpid;
1725
1726    ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
1727
1728    /* Fetch and validate the HID. */
1729    if ((h = acpi_get_handle(dev)) == NULL ||
1730	ACPI_FAILURE(AcpiGetObjectInfo(h, &devinfo)))
1731	return_VALUE (0);
1732
1733    pnpid = (devinfo->Valid & ACPI_VALID_HID) != 0 &&
1734	devinfo->HardwareId.Length >= ACPI_EISAID_STRING_SIZE ?
1735	PNP_EISAID(devinfo->HardwareId.String) : 0;
1736    AcpiOsFree(devinfo);
1737
1738    return_VALUE (pnpid);
1739}
1740
1741static int
1742acpi_isa_get_compatid(device_t dev, uint32_t *cids, int count)
1743{
1744    ACPI_DEVICE_INFO	*devinfo;
1745    ACPI_PNP_DEVICE_ID	*ids;
1746    ACPI_HANDLE		h;
1747    uint32_t		*pnpid;
1748    int			i, valid;
1749
1750    ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
1751
1752    pnpid = cids;
1753
1754    /* Fetch and validate the CID */
1755    if ((h = acpi_get_handle(dev)) == NULL ||
1756	ACPI_FAILURE(AcpiGetObjectInfo(h, &devinfo)))
1757	return_VALUE (0);
1758
1759    if ((devinfo->Valid & ACPI_VALID_CID) == 0) {
1760	AcpiOsFree(devinfo);
1761	return_VALUE (0);
1762    }
1763
1764    if (devinfo->CompatibleIdList.Count < count)
1765	count = devinfo->CompatibleIdList.Count;
1766    ids = devinfo->CompatibleIdList.Ids;
1767    for (i = 0, valid = 0; i < count; i++)
1768	if (ids[i].Length >= ACPI_EISAID_STRING_SIZE &&
1769	    strncmp(ids[i].String, "PNP", 3) == 0) {
1770	    *pnpid++ = PNP_EISAID(ids[i].String);
1771	    valid++;
1772	}
1773    AcpiOsFree(devinfo);
1774
1775    return_VALUE (valid);
1776}
1777
1778static int
1779acpi_device_id_probe(device_t bus, device_t dev, char **ids, char **match)
1780{
1781    ACPI_HANDLE h;
1782    ACPI_OBJECT_TYPE t;
1783    int rv;
1784    int i;
1785
1786    h = acpi_get_handle(dev);
1787    if (ids == NULL || h == NULL)
1788	return (ENXIO);
1789    t = acpi_get_type(dev);
1790    if (t != ACPI_TYPE_DEVICE && t != ACPI_TYPE_PROCESSOR)
1791	return (ENXIO);
1792
1793    /* Try to match one of the array of IDs with a HID or CID. */
1794    for (i = 0; ids[i] != NULL; i++) {
1795	rv = acpi_MatchHid(h, ids[i]);
1796	if (rv == ACPI_MATCHHID_NOMATCH)
1797	    continue;
1798
1799	if (match != NULL) {
1800	    *match = ids[i];
1801	}
1802	return ((rv == ACPI_MATCHHID_HID)?
1803		    BUS_PROBE_DEFAULT : BUS_PROBE_LOW_PRIORITY);
1804    }
1805    return (ENXIO);
1806}
1807
1808static ACPI_STATUS
1809acpi_device_eval_obj(device_t bus, device_t dev, ACPI_STRING pathname,
1810    ACPI_OBJECT_LIST *parameters, ACPI_BUFFER *ret)
1811{
1812    ACPI_HANDLE h;
1813
1814    if (dev == NULL)
1815	h = ACPI_ROOT_OBJECT;
1816    else if ((h = acpi_get_handle(dev)) == NULL)
1817	return (AE_BAD_PARAMETER);
1818    return (AcpiEvaluateObject(h, pathname, parameters, ret));
1819}
1820
1821static ACPI_STATUS
1822acpi_device_get_prop(device_t bus, device_t dev, ACPI_STRING propname,
1823    const ACPI_OBJECT **value)
1824{
1825	const ACPI_OBJECT *pkg, *name, *val;
1826	struct acpi_device *ad;
1827	ACPI_STATUS status;
1828	int i;
1829
1830	ad = device_get_ivars(dev);
1831
1832	if (ad == NULL || propname == NULL)
1833		return (AE_BAD_PARAMETER);
1834	if (ad->dsd_pkg == NULL) {
1835		if (ad->dsd.Pointer == NULL) {
1836			status = acpi_find_dsd(ad);
1837			if (ACPI_FAILURE(status))
1838				return (status);
1839		} else {
1840			return (AE_NOT_FOUND);
1841		}
1842	}
1843
1844	for (i = 0; i < ad->dsd_pkg->Package.Count; i ++) {
1845		pkg = &ad->dsd_pkg->Package.Elements[i];
1846		if (pkg->Type != ACPI_TYPE_PACKAGE || pkg->Package.Count != 2)
1847			continue;
1848
1849		name = &pkg->Package.Elements[0];
1850		val = &pkg->Package.Elements[1];
1851		if (name->Type != ACPI_TYPE_STRING)
1852			continue;
1853		if (strncmp(propname, name->String.Pointer, name->String.Length) == 0) {
1854			if (value != NULL)
1855				*value = val;
1856
1857			return (AE_OK);
1858		}
1859	}
1860
1861	return (AE_NOT_FOUND);
1862}
1863
1864static ACPI_STATUS
1865acpi_find_dsd(struct acpi_device *ad)
1866{
1867	const ACPI_OBJECT *dsd, *guid, *pkg;
1868	ACPI_STATUS status;
1869
1870	ad->dsd.Length = ACPI_ALLOCATE_BUFFER;
1871	ad->dsd.Pointer = NULL;
1872	ad->dsd_pkg = NULL;
1873
1874	status = AcpiEvaluateObject(ad->ad_handle, "_DSD", NULL, &ad->dsd);
1875	if (ACPI_FAILURE(status))
1876		return (status);
1877
1878	dsd = ad->dsd.Pointer;
1879	guid = &dsd->Package.Elements[0];
1880	pkg = &dsd->Package.Elements[1];
1881
1882	if (guid->Type != ACPI_TYPE_BUFFER || pkg->Type != ACPI_TYPE_PACKAGE ||
1883		guid->Buffer.Length != sizeof(acpi_dsd_uuid))
1884		return (AE_NOT_FOUND);
1885	if (memcmp(guid->Buffer.Pointer, &acpi_dsd_uuid,
1886		sizeof(acpi_dsd_uuid)) == 0) {
1887
1888		ad->dsd_pkg = pkg;
1889		return (AE_OK);
1890	}
1891
1892	return (AE_NOT_FOUND);
1893}
1894
1895static ssize_t
1896acpi_bus_get_prop_handle(const ACPI_OBJECT *hobj, void *propvalue, size_t size)
1897{
1898	ACPI_OBJECT *pobj;
1899	ACPI_HANDLE h;
1900
1901	if (hobj->Type != ACPI_TYPE_PACKAGE)
1902		goto err;
1903	if (hobj->Package.Count != 1)
1904		goto err;
1905
1906	pobj = &hobj->Package.Elements[0];
1907	if (pobj == NULL)
1908		goto err;
1909	if (pobj->Type != ACPI_TYPE_LOCAL_REFERENCE)
1910		goto err;
1911
1912	h = acpi_GetReference(NULL, pobj);
1913	if (h == NULL)
1914		goto err;
1915
1916	if (propvalue != NULL && size >= sizeof(ACPI_HANDLE))
1917		*(ACPI_HANDLE *)propvalue = h;
1918	return (sizeof(ACPI_HANDLE));
1919
1920err:
1921	return (-1);
1922}
1923
1924static ssize_t
1925acpi_bus_get_prop(device_t bus, device_t child, const char *propname,
1926    void *propvalue, size_t size, device_property_type_t type)
1927{
1928	ACPI_STATUS status;
1929	const ACPI_OBJECT *obj;
1930
1931	status = acpi_device_get_prop(bus, child, __DECONST(char *, propname),
1932		&obj);
1933	if (ACPI_FAILURE(status))
1934		return (-1);
1935
1936	switch (type) {
1937	case DEVICE_PROP_ANY:
1938	case DEVICE_PROP_BUFFER:
1939	case DEVICE_PROP_UINT32:
1940	case DEVICE_PROP_UINT64:
1941		break;
1942	case DEVICE_PROP_HANDLE:
1943		return (acpi_bus_get_prop_handle(obj, propvalue, size));
1944	default:
1945		return (-1);
1946	}
1947
1948	switch (obj->Type) {
1949	case ACPI_TYPE_INTEGER:
1950		if (type == DEVICE_PROP_UINT32) {
1951			if (propvalue != NULL && size >= sizeof(uint32_t))
1952				*((uint32_t *)propvalue) = obj->Integer.Value;
1953			return (sizeof(uint32_t));
1954		}
1955		if (propvalue != NULL && size >= sizeof(uint64_t))
1956			*((uint64_t *) propvalue) = obj->Integer.Value;
1957		return (sizeof(uint64_t));
1958
1959	case ACPI_TYPE_STRING:
1960		if (type != DEVICE_PROP_ANY &&
1961		    type != DEVICE_PROP_BUFFER)
1962			return (-1);
1963
1964		if (propvalue != NULL && size > 0)
1965			memcpy(propvalue, obj->String.Pointer,
1966			    MIN(size, obj->String.Length));
1967		return (obj->String.Length);
1968
1969	case ACPI_TYPE_BUFFER:
1970		if (propvalue != NULL && size > 0)
1971			memcpy(propvalue, obj->Buffer.Pointer,
1972			    MIN(size, obj->Buffer.Length));
1973		return (obj->Buffer.Length);
1974
1975	case ACPI_TYPE_PACKAGE:
1976		if (propvalue != NULL && size >= sizeof(ACPI_OBJECT *)) {
1977			*((ACPI_OBJECT **) propvalue) =
1978			    __DECONST(ACPI_OBJECT *, obj);
1979		}
1980		return (sizeof(ACPI_OBJECT *));
1981
1982	case ACPI_TYPE_LOCAL_REFERENCE:
1983		if (propvalue != NULL && size >= sizeof(ACPI_HANDLE)) {
1984			ACPI_HANDLE h;
1985
1986			h = acpi_GetReference(NULL,
1987			    __DECONST(ACPI_OBJECT *, obj));
1988			memcpy(propvalue, h, sizeof(ACPI_HANDLE));
1989		}
1990		return (sizeof(ACPI_HANDLE));
1991	default:
1992		return (0);
1993	}
1994}
1995
1996int
1997acpi_device_pwr_for_sleep(device_t bus, device_t dev, int *dstate)
1998{
1999    struct acpi_softc *sc;
2000    ACPI_HANDLE handle;
2001    ACPI_STATUS status;
2002    char sxd[8];
2003
2004    handle = acpi_get_handle(dev);
2005
2006    /*
2007     * XXX If we find these devices, don't try to power them down.
2008     * The serial and IRDA ports on my T23 hang the system when
2009     * set to D3 and it appears that such legacy devices may
2010     * need special handling in their drivers.
2011     */
2012    if (dstate == NULL || handle == NULL ||
2013	acpi_MatchHid(handle, "PNP0500") ||
2014	acpi_MatchHid(handle, "PNP0501") ||
2015	acpi_MatchHid(handle, "PNP0502") ||
2016	acpi_MatchHid(handle, "PNP0510") ||
2017	acpi_MatchHid(handle, "PNP0511"))
2018	return (ENXIO);
2019
2020    /*
2021     * Override next state with the value from _SxD, if present.
2022     * Note illegal _S0D is evaluated because some systems expect this.
2023     */
2024    sc = device_get_softc(bus);
2025    snprintf(sxd, sizeof(sxd), "_S%dD", sc->acpi_sstate);
2026    status = acpi_GetInteger(handle, sxd, dstate);
2027    if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
2028	    device_printf(dev, "failed to get %s on %s: %s\n", sxd,
2029		acpi_name(handle), AcpiFormatException(status));
2030	    return (ENXIO);
2031    }
2032
2033    return (0);
2034}
2035
2036/* Callback arg for our implementation of walking the namespace. */
2037struct acpi_device_scan_ctx {
2038    acpi_scan_cb_t	user_fn;
2039    void		*arg;
2040    ACPI_HANDLE		parent;
2041};
2042
2043static ACPI_STATUS
2044acpi_device_scan_cb(ACPI_HANDLE h, UINT32 level, void *arg, void **retval)
2045{
2046    struct acpi_device_scan_ctx *ctx;
2047    device_t dev, old_dev;
2048    ACPI_STATUS status;
2049    ACPI_OBJECT_TYPE type;
2050
2051    /*
2052     * Skip this device if we think we'll have trouble with it or it is
2053     * the parent where the scan began.
2054     */
2055    ctx = (struct acpi_device_scan_ctx *)arg;
2056    if (acpi_avoid(h) || h == ctx->parent)
2057	return (AE_OK);
2058
2059    /* If this is not a valid device type (e.g., a method), skip it. */
2060    if (ACPI_FAILURE(AcpiGetType(h, &type)))
2061	return (AE_OK);
2062    if (type != ACPI_TYPE_DEVICE && type != ACPI_TYPE_PROCESSOR &&
2063	type != ACPI_TYPE_THERMAL && type != ACPI_TYPE_POWER)
2064	return (AE_OK);
2065
2066    /*
2067     * Call the user function with the current device.  If it is unchanged
2068     * afterwards, return.  Otherwise, we update the handle to the new dev.
2069     */
2070    old_dev = acpi_get_device(h);
2071    dev = old_dev;
2072    status = ctx->user_fn(h, &dev, level, ctx->arg);
2073    if (ACPI_FAILURE(status) || old_dev == dev)
2074	return (status);
2075
2076    /* Remove the old child and its connection to the handle. */
2077    if (old_dev != NULL)
2078	device_delete_child(device_get_parent(old_dev), old_dev);
2079
2080    /* Recreate the handle association if the user created a device. */
2081    if (dev != NULL)
2082	AcpiAttachData(h, acpi_fake_objhandler, dev);
2083
2084    return (AE_OK);
2085}
2086
2087static ACPI_STATUS
2088acpi_device_scan_children(device_t bus, device_t dev, int max_depth,
2089    acpi_scan_cb_t user_fn, void *arg)
2090{
2091    ACPI_HANDLE h;
2092    struct acpi_device_scan_ctx ctx;
2093
2094    if (acpi_disabled("children"))
2095	return (AE_OK);
2096
2097    if (dev == NULL)
2098	h = ACPI_ROOT_OBJECT;
2099    else if ((h = acpi_get_handle(dev)) == NULL)
2100	return (AE_BAD_PARAMETER);
2101    ctx.user_fn = user_fn;
2102    ctx.arg = arg;
2103    ctx.parent = h;
2104    return (AcpiWalkNamespace(ACPI_TYPE_ANY, h, max_depth,
2105	acpi_device_scan_cb, NULL, &ctx, NULL));
2106}
2107
2108/*
2109 * Even though ACPI devices are not PCI, we use the PCI approach for setting
2110 * device power states since it's close enough to ACPI.
2111 */
2112int
2113acpi_set_powerstate(device_t child, int state)
2114{
2115    ACPI_HANDLE h;
2116    ACPI_STATUS status;
2117
2118    h = acpi_get_handle(child);
2119    if (state < ACPI_STATE_D0 || state > ACPI_D_STATES_MAX)
2120	return (EINVAL);
2121    if (h == NULL)
2122	return (0);
2123
2124    /* Ignore errors if the power methods aren't present. */
2125    status = acpi_pwr_switch_consumer(h, state);
2126    if (ACPI_SUCCESS(status)) {
2127	if (bootverbose)
2128	    device_printf(child, "set ACPI power state D%d on %s\n",
2129		state, acpi_name(h));
2130    } else if (status != AE_NOT_FOUND)
2131	device_printf(child,
2132	    "failed to set ACPI power state D%d on %s: %s\n", state,
2133	    acpi_name(h), AcpiFormatException(status));
2134
2135    return (0);
2136}
2137
2138static int
2139acpi_isa_pnp_probe(device_t bus, device_t child, struct isa_pnp_id *ids)
2140{
2141    int			result, cid_count, i;
2142    uint32_t		lid, cids[8];
2143
2144    ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
2145
2146    /*
2147     * ISA-style drivers attached to ACPI may persist and
2148     * probe manually if we return ENOENT.  We never want
2149     * that to happen, so don't ever return it.
2150     */
2151    result = ENXIO;
2152
2153    /* Scan the supplied IDs for a match */
2154    lid = acpi_isa_get_logicalid(child);
2155    cid_count = acpi_isa_get_compatid(child, cids, 8);
2156    while (ids && ids->ip_id) {
2157	if (lid == ids->ip_id) {
2158	    result = 0;
2159	    goto out;
2160	}
2161	for (i = 0; i < cid_count; i++) {
2162	    if (cids[i] == ids->ip_id) {
2163		result = 0;
2164		goto out;
2165	    }
2166	}
2167	ids++;
2168    }
2169
2170 out:
2171    if (result == 0 && ids->ip_desc)
2172	device_set_desc(child, ids->ip_desc);
2173
2174    return_VALUE (result);
2175}
2176
2177/*
2178 * Look for a MCFG table.  If it is present, use the settings for
2179 * domain (segment) 0 to setup PCI config space access via the memory
2180 * map.
2181 *
2182 * On non-x86 architectures (arm64 for now), this will be done from the
2183 * PCI host bridge driver.
2184 */
2185static void
2186acpi_enable_pcie(void)
2187{
2188#if defined(__i386__) || defined(__amd64__)
2189	ACPI_TABLE_HEADER *hdr;
2190	ACPI_MCFG_ALLOCATION *alloc, *end;
2191	ACPI_STATUS status;
2192
2193	status = AcpiGetTable(ACPI_SIG_MCFG, 1, &hdr);
2194	if (ACPI_FAILURE(status))
2195		return;
2196
2197	end = (ACPI_MCFG_ALLOCATION *)((char *)hdr + hdr->Length);
2198	alloc = (ACPI_MCFG_ALLOCATION *)((ACPI_TABLE_MCFG *)hdr + 1);
2199	while (alloc < end) {
2200		pcie_cfgregopen(alloc->Address, alloc->PciSegment,
2201		    alloc->StartBusNumber, alloc->EndBusNumber);
2202		alloc++;
2203	}
2204#endif
2205}
2206
2207static void
2208acpi_platform_osc(device_t dev)
2209{
2210	ACPI_HANDLE sb_handle;
2211	ACPI_STATUS status;
2212	uint32_t cap_set[2];
2213
2214	/* 0811B06E-4A27-44F9-8D60-3CBBC22E7B48 */
2215	static uint8_t acpi_platform_uuid[ACPI_UUID_LENGTH] = {
2216		0x6e, 0xb0, 0x11, 0x08, 0x27, 0x4a, 0xf9, 0x44,
2217		0x8d, 0x60, 0x3c, 0xbb, 0xc2, 0x2e, 0x7b, 0x48
2218	};
2219
2220	if (ACPI_FAILURE(AcpiGetHandle(ACPI_ROOT_OBJECT, "\\_SB_", &sb_handle)))
2221		return;
2222
2223	cap_set[1] = 0x10;	/* APEI Support */
2224	status = acpi_EvaluateOSC(sb_handle, acpi_platform_uuid, 1,
2225	    nitems(cap_set), cap_set, cap_set, false);
2226	if (ACPI_FAILURE(status)) {
2227		if (status == AE_NOT_FOUND)
2228			return;
2229		device_printf(dev, "_OSC failed: %s\n",
2230		    AcpiFormatException(status));
2231		return;
2232	}
2233}
2234
2235/*
2236 * Scan all of the ACPI namespace and attach child devices.
2237 *
2238 * We should only expect to find devices in the \_PR, \_TZ, \_SI, and
2239 * \_SB scopes, and \_PR and \_TZ became obsolete in the ACPI 2.0 spec.
2240 * However, in violation of the spec, some systems place their PCI link
2241 * devices in \, so we have to walk the whole namespace.  We check the
2242 * type of namespace nodes, so this should be ok.
2243 */
2244static void
2245acpi_probe_children(device_t bus)
2246{
2247
2248    ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
2249
2250    /*
2251     * Scan the namespace and insert placeholders for all the devices that
2252     * we find.  We also probe/attach any early devices.
2253     *
2254     * Note that we use AcpiWalkNamespace rather than AcpiGetDevices because
2255     * we want to create nodes for all devices, not just those that are
2256     * currently present. (This assumes that we don't want to create/remove
2257     * devices as they appear, which might be smarter.)
2258     */
2259    ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "namespace scan\n"));
2260    AcpiWalkNamespace(ACPI_TYPE_ANY, ACPI_ROOT_OBJECT, 100, acpi_probe_child,
2261	NULL, bus, NULL);
2262
2263    /* Pre-allocate resources for our rman from any sysresource devices. */
2264    acpi_sysres_alloc(bus);
2265
2266    /* Create any static children by calling device identify methods. */
2267    ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "device identify routines\n"));
2268    bus_generic_probe(bus);
2269
2270    /* Probe/attach all children, created statically and from the namespace. */
2271    ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "acpi bus_generic_attach\n"));
2272    bus_generic_attach(bus);
2273
2274    /*
2275     * Reserve resources allocated to children but not yet allocated
2276     * by a driver.
2277     */
2278    acpi_reserve_resources(bus);
2279
2280    /* Attach wake sysctls. */
2281    acpi_wake_sysctl_walk(bus);
2282
2283    ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "done attaching children\n"));
2284    return_VOID;
2285}
2286
2287/*
2288 * Determine the probe order for a given device.
2289 */
2290static void
2291acpi_probe_order(ACPI_HANDLE handle, int *order)
2292{
2293	ACPI_OBJECT_TYPE type;
2294
2295	/*
2296	 * 0. CPUs
2297	 * 1. I/O port and memory system resource holders
2298	 * 2. Clocks and timers (to handle early accesses)
2299	 * 3. Embedded controllers (to handle early accesses)
2300	 * 4. PCI Link Devices
2301	 */
2302	AcpiGetType(handle, &type);
2303	if (type == ACPI_TYPE_PROCESSOR)
2304		*order = 0;
2305	else if (acpi_MatchHid(handle, "PNP0C01") ||
2306	    acpi_MatchHid(handle, "PNP0C02"))
2307		*order = 1;
2308	else if (acpi_MatchHid(handle, "PNP0100") ||
2309	    acpi_MatchHid(handle, "PNP0103") ||
2310	    acpi_MatchHid(handle, "PNP0B00"))
2311		*order = 2;
2312	else if (acpi_MatchHid(handle, "PNP0C09"))
2313		*order = 3;
2314	else if (acpi_MatchHid(handle, "PNP0C0F"))
2315		*order = 4;
2316}
2317
2318/*
2319 * Evaluate a child device and determine whether we might attach a device to
2320 * it.
2321 */
2322static ACPI_STATUS
2323acpi_probe_child(ACPI_HANDLE handle, UINT32 level, void *context, void **status)
2324{
2325    ACPI_DEVICE_INFO *devinfo;
2326    struct acpi_device	*ad;
2327    struct acpi_prw_data prw;
2328    ACPI_OBJECT_TYPE type;
2329    ACPI_HANDLE h;
2330    device_t bus, child;
2331    char *handle_str;
2332    int order;
2333
2334    ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
2335
2336    if (acpi_disabled("children"))
2337	return_ACPI_STATUS (AE_OK);
2338
2339    /* Skip this device if we think we'll have trouble with it. */
2340    if (acpi_avoid(handle))
2341	return_ACPI_STATUS (AE_OK);
2342
2343    bus = (device_t)context;
2344    if (ACPI_SUCCESS(AcpiGetType(handle, &type))) {
2345	handle_str = acpi_name(handle);
2346	switch (type) {
2347	case ACPI_TYPE_DEVICE:
2348	    /*
2349	     * Since we scan from \, be sure to skip system scope objects.
2350	     * \_SB_ and \_TZ_ are defined in ACPICA as devices to work around
2351	     * BIOS bugs.  For example, \_SB_ is to allow \_SB_._INI to be run
2352	     * during the initialization and \_TZ_ is to support Notify() on it.
2353	     */
2354	    if (strcmp(handle_str, "\\_SB_") == 0 ||
2355		strcmp(handle_str, "\\_TZ_") == 0)
2356		break;
2357	    if (acpi_parse_prw(handle, &prw) == 0)
2358		AcpiSetupGpeForWake(handle, prw.gpe_handle, prw.gpe_bit);
2359
2360	    /*
2361	     * Ignore devices that do not have a _HID or _CID.  They should
2362	     * be discovered by other buses (e.g. the PCI bus driver).
2363	     */
2364	    if (!acpi_has_hid(handle))
2365		break;
2366	    /* FALLTHROUGH */
2367	case ACPI_TYPE_PROCESSOR:
2368	case ACPI_TYPE_THERMAL:
2369	case ACPI_TYPE_POWER:
2370	    /*
2371	     * Create a placeholder device for this node.  Sort the
2372	     * placeholder so that the probe/attach passes will run
2373	     * breadth-first.  Orders less than ACPI_DEV_BASE_ORDER
2374	     * are reserved for special objects (i.e., system
2375	     * resources).
2376	     */
2377	    ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "scanning '%s'\n", handle_str));
2378	    order = level * 10 + ACPI_DEV_BASE_ORDER;
2379	    acpi_probe_order(handle, &order);
2380	    child = BUS_ADD_CHILD(bus, order, NULL, -1);
2381	    if (child == NULL)
2382		break;
2383
2384	    /* Associate the handle with the device_t and vice versa. */
2385	    acpi_set_handle(child, handle);
2386	    AcpiAttachData(handle, acpi_fake_objhandler, child);
2387
2388	    /*
2389	     * Check that the device is present.  If it's not present,
2390	     * leave it disabled (so that we have a device_t attached to
2391	     * the handle, but we don't probe it).
2392	     *
2393	     * XXX PCI link devices sometimes report "present" but not
2394	     * "functional" (i.e. if disabled).  Go ahead and probe them
2395	     * anyway since we may enable them later.
2396	     */
2397	    if (type == ACPI_TYPE_DEVICE && !acpi_DeviceIsPresent(child)) {
2398		/* Never disable PCI link devices. */
2399		if (acpi_MatchHid(handle, "PNP0C0F"))
2400		    break;
2401
2402		/*
2403		 * RTC Device should be enabled for CMOS register space
2404		 * unless FADT indicate it is not present.
2405		 * (checked in RTC probe routine.)
2406		 */
2407		if (acpi_MatchHid(handle, "PNP0B00"))
2408		    break;
2409
2410		/*
2411		 * Docking stations should remain enabled since the system
2412		 * may be undocked at boot.
2413		 */
2414		if (ACPI_SUCCESS(AcpiGetHandle(handle, "_DCK", &h)))
2415		    break;
2416
2417		device_disable(child);
2418		break;
2419	    }
2420
2421	    /*
2422	     * Get the device's resource settings and attach them.
2423	     * Note that if the device has _PRS but no _CRS, we need
2424	     * to decide when it's appropriate to try to configure the
2425	     * device.  Ignore the return value here; it's OK for the
2426	     * device not to have any resources.
2427	     */
2428	    acpi_parse_resources(child, handle, &acpi_res_parse_set, NULL);
2429
2430	    ad = device_get_ivars(child);
2431	    ad->ad_cls_class = 0xffffff;
2432	    if (ACPI_SUCCESS(AcpiGetObjectInfo(handle, &devinfo))) {
2433		if ((devinfo->Valid & ACPI_VALID_CLS) != 0 &&
2434		    devinfo->ClassCode.Length >= ACPI_PCICLS_STRING_SIZE) {
2435		    ad->ad_cls_class = strtoul(devinfo->ClassCode.String,
2436			NULL, 16);
2437		}
2438		AcpiOsFree(devinfo);
2439	    }
2440	    break;
2441	}
2442    }
2443
2444    return_ACPI_STATUS (AE_OK);
2445}
2446
2447/*
2448 * AcpiAttachData() requires an object handler but never uses it.  This is a
2449 * placeholder object handler so we can store a device_t in an ACPI_HANDLE.
2450 */
2451void
2452acpi_fake_objhandler(ACPI_HANDLE h, void *data)
2453{
2454}
2455
2456static void
2457acpi_shutdown_final(void *arg, int howto)
2458{
2459    struct acpi_softc *sc = (struct acpi_softc *)arg;
2460    register_t intr;
2461    ACPI_STATUS status;
2462
2463    /*
2464     * XXX Shutdown code should only run on the BSP (cpuid 0).
2465     * Some chipsets do not power off the system correctly if called from
2466     * an AP.
2467     */
2468    if ((howto & RB_POWEROFF) != 0) {
2469	status = AcpiEnterSleepStatePrep(ACPI_STATE_S5);
2470	if (ACPI_FAILURE(status)) {
2471	    device_printf(sc->acpi_dev, "AcpiEnterSleepStatePrep failed - %s\n",
2472		AcpiFormatException(status));
2473	    return;
2474	}
2475	device_printf(sc->acpi_dev, "Powering system off\n");
2476	intr = intr_disable();
2477	status = AcpiEnterSleepState(ACPI_STATE_S5);
2478	if (ACPI_FAILURE(status)) {
2479	    intr_restore(intr);
2480	    device_printf(sc->acpi_dev, "power-off failed - %s\n",
2481		AcpiFormatException(status));
2482	} else {
2483	    DELAY(1000000);
2484	    intr_restore(intr);
2485	    device_printf(sc->acpi_dev, "power-off failed - timeout\n");
2486	}
2487    } else if ((howto & RB_HALT) == 0 && sc->acpi_handle_reboot) {
2488	/* Reboot using the reset register. */
2489	status = AcpiReset();
2490	if (ACPI_SUCCESS(status)) {
2491	    DELAY(1000000);
2492	    device_printf(sc->acpi_dev, "reset failed - timeout\n");
2493	} else if (status != AE_NOT_EXIST)
2494	    device_printf(sc->acpi_dev, "reset failed - %s\n",
2495		AcpiFormatException(status));
2496    } else if (sc->acpi_do_disable && !KERNEL_PANICKED()) {
2497	/*
2498	 * Only disable ACPI if the user requested.  On some systems, writing
2499	 * the disable value to SMI_CMD hangs the system.
2500	 */
2501	device_printf(sc->acpi_dev, "Shutting down\n");
2502	AcpiTerminate();
2503    }
2504}
2505
2506static void
2507acpi_enable_fixed_events(struct acpi_softc *sc)
2508{
2509    static int	first_time = 1;
2510
2511    /* Enable and clear fixed events and install handlers. */
2512    if ((AcpiGbl_FADT.Flags & ACPI_FADT_POWER_BUTTON) == 0) {
2513	AcpiClearEvent(ACPI_EVENT_POWER_BUTTON);
2514	AcpiInstallFixedEventHandler(ACPI_EVENT_POWER_BUTTON,
2515				     acpi_event_power_button_sleep, sc);
2516	if (first_time)
2517	    device_printf(sc->acpi_dev, "Power Button (fixed)\n");
2518    }
2519    if ((AcpiGbl_FADT.Flags & ACPI_FADT_SLEEP_BUTTON) == 0) {
2520	AcpiClearEvent(ACPI_EVENT_SLEEP_BUTTON);
2521	AcpiInstallFixedEventHandler(ACPI_EVENT_SLEEP_BUTTON,
2522				     acpi_event_sleep_button_sleep, sc);
2523	if (first_time)
2524	    device_printf(sc->acpi_dev, "Sleep Button (fixed)\n");
2525    }
2526
2527    first_time = 0;
2528}
2529
2530/*
2531 * Returns true if the device is actually present and should
2532 * be attached to.  This requires the present, enabled, UI-visible
2533 * and diagnostics-passed bits to be set.
2534 */
2535BOOLEAN
2536acpi_DeviceIsPresent(device_t dev)
2537{
2538	ACPI_HANDLE h;
2539	UINT32 s;
2540	ACPI_STATUS status;
2541
2542	h = acpi_get_handle(dev);
2543	if (h == NULL)
2544		return (FALSE);
2545
2546#ifdef ACPI_EARLY_EPYC_WAR
2547	/*
2548	 * Certain Treadripper boards always returns 0 for FreeBSD because it
2549	 * only returns non-zero for the OS string "Windows 2015". Otherwise it
2550	 * will return zero. Force them to always be treated as present.
2551	 * Beata versions were worse: they always returned 0.
2552	 */
2553	if (acpi_MatchHid(h, "AMDI0020") || acpi_MatchHid(h, "AMDI0010"))
2554		return (TRUE);
2555#endif
2556
2557	status = acpi_GetInteger(h, "_STA", &s);
2558
2559	/*
2560	 * If no _STA method or if it failed, then assume that
2561	 * the device is present.
2562	 */
2563	if (ACPI_FAILURE(status))
2564		return (TRUE);
2565
2566	return (ACPI_DEVICE_PRESENT(s) ? TRUE : FALSE);
2567}
2568
2569/*
2570 * Returns true if the battery is actually present and inserted.
2571 */
2572BOOLEAN
2573acpi_BatteryIsPresent(device_t dev)
2574{
2575	ACPI_HANDLE h;
2576	UINT32 s;
2577	ACPI_STATUS status;
2578
2579	h = acpi_get_handle(dev);
2580	if (h == NULL)
2581		return (FALSE);
2582	status = acpi_GetInteger(h, "_STA", &s);
2583
2584	/*
2585	 * If no _STA method or if it failed, then assume that
2586	 * the device is present.
2587	 */
2588	if (ACPI_FAILURE(status))
2589		return (TRUE);
2590
2591	return (ACPI_BATTERY_PRESENT(s) ? TRUE : FALSE);
2592}
2593
2594/*
2595 * Returns true if a device has at least one valid device ID.
2596 */
2597BOOLEAN
2598acpi_has_hid(ACPI_HANDLE h)
2599{
2600    ACPI_DEVICE_INFO	*devinfo;
2601    BOOLEAN		ret;
2602
2603    if (h == NULL ||
2604	ACPI_FAILURE(AcpiGetObjectInfo(h, &devinfo)))
2605	return (FALSE);
2606
2607    ret = FALSE;
2608    if ((devinfo->Valid & ACPI_VALID_HID) != 0)
2609	ret = TRUE;
2610    else if ((devinfo->Valid & ACPI_VALID_CID) != 0)
2611	if (devinfo->CompatibleIdList.Count > 0)
2612	    ret = TRUE;
2613
2614    AcpiOsFree(devinfo);
2615    return (ret);
2616}
2617
2618/*
2619 * Match a HID string against a handle
2620 * returns ACPI_MATCHHID_HID if _HID match
2621 *         ACPI_MATCHHID_CID if _CID match and not _HID match.
2622 *         ACPI_MATCHHID_NOMATCH=0 if no match.
2623 */
2624int
2625acpi_MatchHid(ACPI_HANDLE h, const char *hid)
2626{
2627    ACPI_DEVICE_INFO	*devinfo;
2628    BOOLEAN		ret;
2629    int			i;
2630
2631    if (hid == NULL || h == NULL ||
2632	ACPI_FAILURE(AcpiGetObjectInfo(h, &devinfo)))
2633	return (ACPI_MATCHHID_NOMATCH);
2634
2635    ret = ACPI_MATCHHID_NOMATCH;
2636    if ((devinfo->Valid & ACPI_VALID_HID) != 0 &&
2637	strcmp(hid, devinfo->HardwareId.String) == 0)
2638	    ret = ACPI_MATCHHID_HID;
2639    else if ((devinfo->Valid & ACPI_VALID_CID) != 0)
2640	for (i = 0; i < devinfo->CompatibleIdList.Count; i++) {
2641	    if (strcmp(hid, devinfo->CompatibleIdList.Ids[i].String) == 0) {
2642		ret = ACPI_MATCHHID_CID;
2643		break;
2644	    }
2645	}
2646
2647    AcpiOsFree(devinfo);
2648    return (ret);
2649}
2650
2651/*
2652 * Return the handle of a named object within our scope, ie. that of (parent)
2653 * or one if its parents.
2654 */
2655ACPI_STATUS
2656acpi_GetHandleInScope(ACPI_HANDLE parent, char *path, ACPI_HANDLE *result)
2657{
2658    ACPI_HANDLE		r;
2659    ACPI_STATUS		status;
2660
2661    /* Walk back up the tree to the root */
2662    for (;;) {
2663	status = AcpiGetHandle(parent, path, &r);
2664	if (ACPI_SUCCESS(status)) {
2665	    *result = r;
2666	    return (AE_OK);
2667	}
2668	/* XXX Return error here? */
2669	if (status != AE_NOT_FOUND)
2670	    return (AE_OK);
2671	if (ACPI_FAILURE(AcpiGetParent(parent, &r)))
2672	    return (AE_NOT_FOUND);
2673	parent = r;
2674    }
2675}
2676
2677ACPI_STATUS
2678acpi_GetProperty(device_t dev, ACPI_STRING propname,
2679    const ACPI_OBJECT **value)
2680{
2681	device_t bus = device_get_parent(dev);
2682
2683	return (ACPI_GET_PROPERTY(bus, dev, propname, value));
2684}
2685
2686/*
2687 * Allocate a buffer with a preset data size.
2688 */
2689ACPI_BUFFER *
2690acpi_AllocBuffer(int size)
2691{
2692    ACPI_BUFFER	*buf;
2693
2694    if ((buf = malloc(size + sizeof(*buf), M_ACPIDEV, M_NOWAIT)) == NULL)
2695	return (NULL);
2696    buf->Length = size;
2697    buf->Pointer = (void *)(buf + 1);
2698    return (buf);
2699}
2700
2701ACPI_STATUS
2702acpi_SetInteger(ACPI_HANDLE handle, char *path, UINT32 number)
2703{
2704    ACPI_OBJECT arg1;
2705    ACPI_OBJECT_LIST args;
2706
2707    arg1.Type = ACPI_TYPE_INTEGER;
2708    arg1.Integer.Value = number;
2709    args.Count = 1;
2710    args.Pointer = &arg1;
2711
2712    return (AcpiEvaluateObject(handle, path, &args, NULL));
2713}
2714
2715/*
2716 * Evaluate a path that should return an integer.
2717 */
2718ACPI_STATUS
2719acpi_GetInteger(ACPI_HANDLE handle, char *path, UINT32 *number)
2720{
2721    ACPI_STATUS	status;
2722    ACPI_BUFFER	buf;
2723    ACPI_OBJECT	param;
2724
2725    if (handle == NULL)
2726	handle = ACPI_ROOT_OBJECT;
2727
2728    /*
2729     * Assume that what we've been pointed at is an Integer object, or
2730     * a method that will return an Integer.
2731     */
2732    buf.Pointer = &param;
2733    buf.Length = sizeof(param);
2734    status = AcpiEvaluateObject(handle, path, NULL, &buf);
2735    if (ACPI_SUCCESS(status)) {
2736	if (param.Type == ACPI_TYPE_INTEGER)
2737	    *number = param.Integer.Value;
2738	else
2739	    status = AE_TYPE;
2740    }
2741
2742    /*
2743     * In some applications, a method that's expected to return an Integer
2744     * may instead return a Buffer (probably to simplify some internal
2745     * arithmetic).  We'll try to fetch whatever it is, and if it's a Buffer,
2746     * convert it into an Integer as best we can.
2747     *
2748     * This is a hack.
2749     */
2750    if (status == AE_BUFFER_OVERFLOW) {
2751	if ((buf.Pointer = AcpiOsAllocate(buf.Length)) == NULL) {
2752	    status = AE_NO_MEMORY;
2753	} else {
2754	    status = AcpiEvaluateObject(handle, path, NULL, &buf);
2755	    if (ACPI_SUCCESS(status))
2756		status = acpi_ConvertBufferToInteger(&buf, number);
2757	    AcpiOsFree(buf.Pointer);
2758	}
2759    }
2760    return (status);
2761}
2762
2763ACPI_STATUS
2764acpi_ConvertBufferToInteger(ACPI_BUFFER *bufp, UINT32 *number)
2765{
2766    ACPI_OBJECT	*p;
2767    UINT8	*val;
2768    int		i;
2769
2770    p = (ACPI_OBJECT *)bufp->Pointer;
2771    if (p->Type == ACPI_TYPE_INTEGER) {
2772	*number = p->Integer.Value;
2773	return (AE_OK);
2774    }
2775    if (p->Type != ACPI_TYPE_BUFFER)
2776	return (AE_TYPE);
2777    if (p->Buffer.Length > sizeof(int))
2778	return (AE_BAD_DATA);
2779
2780    *number = 0;
2781    val = p->Buffer.Pointer;
2782    for (i = 0; i < p->Buffer.Length; i++)
2783	*number += val[i] << (i * 8);
2784    return (AE_OK);
2785}
2786
2787/*
2788 * Iterate over the elements of an a package object, calling the supplied
2789 * function for each element.
2790 *
2791 * XXX possible enhancement might be to abort traversal on error.
2792 */
2793ACPI_STATUS
2794acpi_ForeachPackageObject(ACPI_OBJECT *pkg,
2795	void (*func)(ACPI_OBJECT *comp, void *arg), void *arg)
2796{
2797    ACPI_OBJECT	*comp;
2798    int		i;
2799
2800    if (pkg == NULL || pkg->Type != ACPI_TYPE_PACKAGE)
2801	return (AE_BAD_PARAMETER);
2802
2803    /* Iterate over components */
2804    i = 0;
2805    comp = pkg->Package.Elements;
2806    for (; i < pkg->Package.Count; i++, comp++)
2807	func(comp, arg);
2808
2809    return (AE_OK);
2810}
2811
2812/*
2813 * Find the (index)th resource object in a set.
2814 */
2815ACPI_STATUS
2816acpi_FindIndexedResource(ACPI_BUFFER *buf, int index, ACPI_RESOURCE **resp)
2817{
2818    ACPI_RESOURCE	*rp;
2819    int			i;
2820
2821    rp = (ACPI_RESOURCE *)buf->Pointer;
2822    i = index;
2823    while (i-- > 0) {
2824	/* Range check */
2825	if (rp > (ACPI_RESOURCE *)((u_int8_t *)buf->Pointer + buf->Length))
2826	    return (AE_BAD_PARAMETER);
2827
2828	/* Check for terminator */
2829	if (rp->Type == ACPI_RESOURCE_TYPE_END_TAG || rp->Length == 0)
2830	    return (AE_NOT_FOUND);
2831	rp = ACPI_NEXT_RESOURCE(rp);
2832    }
2833    if (resp != NULL)
2834	*resp = rp;
2835
2836    return (AE_OK);
2837}
2838
2839/*
2840 * Append an ACPI_RESOURCE to an ACPI_BUFFER.
2841 *
2842 * Given a pointer to an ACPI_RESOURCE structure, expand the ACPI_BUFFER
2843 * provided to contain it.  If the ACPI_BUFFER is empty, allocate a sensible
2844 * backing block.  If the ACPI_RESOURCE is NULL, return an empty set of
2845 * resources.
2846 */
2847#define ACPI_INITIAL_RESOURCE_BUFFER_SIZE	512
2848
2849ACPI_STATUS
2850acpi_AppendBufferResource(ACPI_BUFFER *buf, ACPI_RESOURCE *res)
2851{
2852    ACPI_RESOURCE	*rp;
2853    void		*newp;
2854
2855    /* Initialise the buffer if necessary. */
2856    if (buf->Pointer == NULL) {
2857	buf->Length = ACPI_INITIAL_RESOURCE_BUFFER_SIZE;
2858	if ((buf->Pointer = AcpiOsAllocate(buf->Length)) == NULL)
2859	    return (AE_NO_MEMORY);
2860	rp = (ACPI_RESOURCE *)buf->Pointer;
2861	rp->Type = ACPI_RESOURCE_TYPE_END_TAG;
2862	rp->Length = ACPI_RS_SIZE_MIN;
2863    }
2864    if (res == NULL)
2865	return (AE_OK);
2866
2867    /*
2868     * Scan the current buffer looking for the terminator.
2869     * This will either find the terminator or hit the end
2870     * of the buffer and return an error.
2871     */
2872    rp = (ACPI_RESOURCE *)buf->Pointer;
2873    for (;;) {
2874	/* Range check, don't go outside the buffer */
2875	if (rp >= (ACPI_RESOURCE *)((u_int8_t *)buf->Pointer + buf->Length))
2876	    return (AE_BAD_PARAMETER);
2877	if (rp->Type == ACPI_RESOURCE_TYPE_END_TAG || rp->Length == 0)
2878	    break;
2879	rp = ACPI_NEXT_RESOURCE(rp);
2880    }
2881
2882    /*
2883     * Check the size of the buffer and expand if required.
2884     *
2885     * Required size is:
2886     *	size of existing resources before terminator +
2887     *	size of new resource and header +
2888     * 	size of terminator.
2889     *
2890     * Note that this loop should really only run once, unless
2891     * for some reason we are stuffing a *really* huge resource.
2892     */
2893    while ((((u_int8_t *)rp - (u_int8_t *)buf->Pointer) +
2894	    res->Length + ACPI_RS_SIZE_NO_DATA +
2895	    ACPI_RS_SIZE_MIN) >= buf->Length) {
2896	if ((newp = AcpiOsAllocate(buf->Length * 2)) == NULL)
2897	    return (AE_NO_MEMORY);
2898	bcopy(buf->Pointer, newp, buf->Length);
2899	rp = (ACPI_RESOURCE *)((u_int8_t *)newp +
2900			       ((u_int8_t *)rp - (u_int8_t *)buf->Pointer));
2901	AcpiOsFree(buf->Pointer);
2902	buf->Pointer = newp;
2903	buf->Length += buf->Length;
2904    }
2905
2906    /* Insert the new resource. */
2907    bcopy(res, rp, res->Length + ACPI_RS_SIZE_NO_DATA);
2908
2909    /* And add the terminator. */
2910    rp = ACPI_NEXT_RESOURCE(rp);
2911    rp->Type = ACPI_RESOURCE_TYPE_END_TAG;
2912    rp->Length = ACPI_RS_SIZE_MIN;
2913
2914    return (AE_OK);
2915}
2916
2917UINT64
2918acpi_DSMQuery(ACPI_HANDLE h, const uint8_t *uuid, int revision)
2919{
2920    /*
2921     * ACPI spec 9.1.1 defines this.
2922     *
2923     * "Arg2: Function Index Represents a specific function whose meaning is
2924     * specific to the UUID and Revision ID. Function indices should start
2925     * with 1. Function number zero is a query function (see the special
2926     * return code defined below)."
2927     */
2928    ACPI_BUFFER buf;
2929    ACPI_OBJECT *obj;
2930    UINT64 ret = 0;
2931    int i;
2932
2933    if (!ACPI_SUCCESS(acpi_EvaluateDSM(h, uuid, revision, 0, NULL, &buf))) {
2934	ACPI_INFO(("Failed to enumerate DSM functions\n"));
2935	return (0);
2936    }
2937
2938    obj = (ACPI_OBJECT *)buf.Pointer;
2939    KASSERT(obj, ("Object not allowed to be NULL\n"));
2940
2941    /*
2942     * From ACPI 6.2 spec 9.1.1:
2943     * If Function Index = 0, a Buffer containing a function index bitfield.
2944     * Otherwise, the return value and type depends on the UUID and revision
2945     * ID (see below).
2946     */
2947    switch (obj->Type) {
2948    case ACPI_TYPE_BUFFER:
2949	for (i = 0; i < MIN(obj->Buffer.Length, sizeof(ret)); i++)
2950	    ret |= (((uint64_t)obj->Buffer.Pointer[i]) << (i * 8));
2951	break;
2952    case ACPI_TYPE_INTEGER:
2953	ACPI_BIOS_WARNING((AE_INFO,
2954	    "Possibly buggy BIOS with ACPI_TYPE_INTEGER for function enumeration\n"));
2955	ret = obj->Integer.Value;
2956	break;
2957    default:
2958	ACPI_WARNING((AE_INFO, "Unexpected return type %u\n", obj->Type));
2959    };
2960
2961    AcpiOsFree(obj);
2962    return ret;
2963}
2964
2965/*
2966 * DSM may return multiple types depending on the function. It is therefore
2967 * unsafe to use the typed evaluation. It is highly recommended that the caller
2968 * check the type of the returned object.
2969 */
2970ACPI_STATUS
2971acpi_EvaluateDSM(ACPI_HANDLE handle, const uint8_t *uuid, int revision,
2972    UINT64 function, ACPI_OBJECT *package, ACPI_BUFFER *out_buf)
2973{
2974	return (acpi_EvaluateDSMTyped(handle, uuid, revision, function,
2975	    package, out_buf, ACPI_TYPE_ANY));
2976}
2977
2978ACPI_STATUS
2979acpi_EvaluateDSMTyped(ACPI_HANDLE handle, const uint8_t *uuid, int revision,
2980    UINT64 function, ACPI_OBJECT *package, ACPI_BUFFER *out_buf,
2981    ACPI_OBJECT_TYPE type)
2982{
2983    ACPI_OBJECT arg[4];
2984    ACPI_OBJECT_LIST arglist;
2985    ACPI_BUFFER buf;
2986    ACPI_STATUS status;
2987
2988    if (out_buf == NULL)
2989	return (AE_NO_MEMORY);
2990
2991    arg[0].Type = ACPI_TYPE_BUFFER;
2992    arg[0].Buffer.Length = ACPI_UUID_LENGTH;
2993    arg[0].Buffer.Pointer = __DECONST(uint8_t *, uuid);
2994    arg[1].Type = ACPI_TYPE_INTEGER;
2995    arg[1].Integer.Value = revision;
2996    arg[2].Type = ACPI_TYPE_INTEGER;
2997    arg[2].Integer.Value = function;
2998    if (package) {
2999	arg[3] = *package;
3000    } else {
3001	arg[3].Type = ACPI_TYPE_PACKAGE;
3002	arg[3].Package.Count = 0;
3003	arg[3].Package.Elements = NULL;
3004    }
3005
3006    arglist.Pointer = arg;
3007    arglist.Count = 4;
3008    buf.Pointer = NULL;
3009    buf.Length = ACPI_ALLOCATE_BUFFER;
3010    status = AcpiEvaluateObjectTyped(handle, "_DSM", &arglist, &buf, type);
3011    if (ACPI_FAILURE(status))
3012	return (status);
3013
3014    KASSERT(ACPI_SUCCESS(status), ("Unexpected status"));
3015
3016    *out_buf = buf;
3017    return (status);
3018}
3019
3020ACPI_STATUS
3021acpi_EvaluateOSC(ACPI_HANDLE handle, uint8_t *uuid, int revision, int count,
3022    uint32_t *caps_in, uint32_t *caps_out, bool query)
3023{
3024	ACPI_OBJECT arg[4], *ret;
3025	ACPI_OBJECT_LIST arglist;
3026	ACPI_BUFFER buf;
3027	ACPI_STATUS status;
3028
3029	arglist.Pointer = arg;
3030	arglist.Count = 4;
3031	arg[0].Type = ACPI_TYPE_BUFFER;
3032	arg[0].Buffer.Length = ACPI_UUID_LENGTH;
3033	arg[0].Buffer.Pointer = uuid;
3034	arg[1].Type = ACPI_TYPE_INTEGER;
3035	arg[1].Integer.Value = revision;
3036	arg[2].Type = ACPI_TYPE_INTEGER;
3037	arg[2].Integer.Value = count;
3038	arg[3].Type = ACPI_TYPE_BUFFER;
3039	arg[3].Buffer.Length = count * sizeof(*caps_in);
3040	arg[3].Buffer.Pointer = (uint8_t *)caps_in;
3041	caps_in[0] = query ? 1 : 0;
3042	buf.Pointer = NULL;
3043	buf.Length = ACPI_ALLOCATE_BUFFER;
3044	status = AcpiEvaluateObjectTyped(handle, "_OSC", &arglist, &buf,
3045	    ACPI_TYPE_BUFFER);
3046	if (ACPI_FAILURE(status))
3047		return (status);
3048	if (caps_out != NULL) {
3049		ret = buf.Pointer;
3050		if (ret->Buffer.Length != count * sizeof(*caps_out)) {
3051			AcpiOsFree(buf.Pointer);
3052			return (AE_BUFFER_OVERFLOW);
3053		}
3054		bcopy(ret->Buffer.Pointer, caps_out, ret->Buffer.Length);
3055	}
3056	AcpiOsFree(buf.Pointer);
3057	return (status);
3058}
3059
3060/*
3061 * Set interrupt model.
3062 */
3063ACPI_STATUS
3064acpi_SetIntrModel(int model)
3065{
3066
3067    return (acpi_SetInteger(ACPI_ROOT_OBJECT, "_PIC", model));
3068}
3069
3070/*
3071 * Walk subtables of a table and call a callback routine for each
3072 * subtable.  The caller should provide the first subtable and a
3073 * pointer to the end of the table.  This can be used to walk tables
3074 * such as MADT and SRAT that use subtable entries.
3075 */
3076void
3077acpi_walk_subtables(void *first, void *end, acpi_subtable_handler *handler,
3078    void *arg)
3079{
3080    ACPI_SUBTABLE_HEADER *entry;
3081
3082    for (entry = first; (void *)entry < end; ) {
3083	/* Avoid an infinite loop if we hit a bogus entry. */
3084	if (entry->Length < sizeof(ACPI_SUBTABLE_HEADER))
3085	    return;
3086
3087	handler(entry, arg);
3088	entry = ACPI_ADD_PTR(ACPI_SUBTABLE_HEADER, entry, entry->Length);
3089    }
3090}
3091
3092/*
3093 * DEPRECATED.  This interface has serious deficiencies and will be
3094 * removed.
3095 *
3096 * Immediately enter the sleep state.  In the old model, acpiconf(8) ran
3097 * rc.suspend and rc.resume so we don't have to notify devd(8) to do this.
3098 */
3099ACPI_STATUS
3100acpi_SetSleepState(struct acpi_softc *sc, int state)
3101{
3102    static int once;
3103
3104    if (!once) {
3105	device_printf(sc->acpi_dev,
3106"warning: acpi_SetSleepState() deprecated, need to update your software\n");
3107	once = 1;
3108    }
3109    return (acpi_EnterSleepState(sc, state));
3110}
3111
3112#if defined(__amd64__) || defined(__i386__)
3113static void
3114acpi_sleep_force_task(void *context)
3115{
3116    struct acpi_softc *sc = (struct acpi_softc *)context;
3117
3118    if (ACPI_FAILURE(acpi_EnterSleepState(sc, sc->acpi_next_sstate)))
3119	device_printf(sc->acpi_dev, "force sleep state S%d failed\n",
3120	    sc->acpi_next_sstate);
3121}
3122
3123static void
3124acpi_sleep_force(void *arg)
3125{
3126    struct acpi_softc *sc = (struct acpi_softc *)arg;
3127
3128    device_printf(sc->acpi_dev,
3129	"suspend request timed out, forcing sleep now\n");
3130    /*
3131     * XXX Suspending from callout causes freezes in DEVICE_SUSPEND().
3132     * Suspend from acpi_task thread instead.
3133     */
3134    if (ACPI_FAILURE(AcpiOsExecute(OSL_NOTIFY_HANDLER,
3135	acpi_sleep_force_task, sc)))
3136	device_printf(sc->acpi_dev, "AcpiOsExecute() for sleeping failed\n");
3137}
3138#endif
3139
3140/*
3141 * Request that the system enter the given suspend state.  All /dev/apm
3142 * devices and devd(8) will be notified.  Userland then has a chance to
3143 * save state and acknowledge the request.  The system sleeps once all
3144 * acks are in.
3145 */
3146int
3147acpi_ReqSleepState(struct acpi_softc *sc, int state)
3148{
3149#if defined(__amd64__) || defined(__i386__)
3150    struct apm_clone_data *clone;
3151    ACPI_STATUS status;
3152
3153    if (state < ACPI_STATE_S1 || state > ACPI_S_STATES_MAX)
3154	return (EINVAL);
3155    if (!acpi_sleep_states[state])
3156	return (EOPNOTSUPP);
3157
3158    /*
3159     * If a reboot/shutdown/suspend request is already in progress or
3160     * suspend is blocked due to an upcoming shutdown, just return.
3161     */
3162    if (rebooting || sc->acpi_next_sstate != 0 || suspend_blocked) {
3163	return (0);
3164    }
3165
3166    /* Wait until sleep is enabled. */
3167    while (sc->acpi_sleep_disabled) {
3168	AcpiOsSleep(1000);
3169    }
3170
3171    ACPI_LOCK(acpi);
3172
3173    sc->acpi_next_sstate = state;
3174
3175    /* S5 (soft-off) should be entered directly with no waiting. */
3176    if (state == ACPI_STATE_S5) {
3177    	ACPI_UNLOCK(acpi);
3178	status = acpi_EnterSleepState(sc, state);
3179	return (ACPI_SUCCESS(status) ? 0 : ENXIO);
3180    }
3181
3182    /* Record the pending state and notify all apm devices. */
3183    STAILQ_FOREACH(clone, &sc->apm_cdevs, entries) {
3184	clone->notify_status = APM_EV_NONE;
3185	if ((clone->flags & ACPI_EVF_DEVD) == 0) {
3186	    selwakeuppri(&clone->sel_read, PZERO);
3187	    KNOTE_LOCKED(&clone->sel_read.si_note, 0);
3188	}
3189    }
3190
3191    /* If devd(8) is not running, immediately enter the sleep state. */
3192    if (!devctl_process_running()) {
3193	ACPI_UNLOCK(acpi);
3194	status = acpi_EnterSleepState(sc, state);
3195	return (ACPI_SUCCESS(status) ? 0 : ENXIO);
3196    }
3197
3198    /*
3199     * Set a timeout to fire if userland doesn't ack the suspend request
3200     * in time.  This way we still eventually go to sleep if we were
3201     * overheating or running low on battery, even if userland is hung.
3202     * We cancel this timeout once all userland acks are in or the
3203     * suspend request is aborted.
3204     */
3205    callout_reset(&sc->susp_force_to, 10 * hz, acpi_sleep_force, sc);
3206    ACPI_UNLOCK(acpi);
3207
3208    /* Now notify devd(8) also. */
3209    acpi_UserNotify("Suspend", ACPI_ROOT_OBJECT, state);
3210
3211    return (0);
3212#else
3213    /* This platform does not support acpi suspend/resume. */
3214    return (EOPNOTSUPP);
3215#endif
3216}
3217
3218/*
3219 * Acknowledge (or reject) a pending sleep state.  The caller has
3220 * prepared for suspend and is now ready for it to proceed.  If the
3221 * error argument is non-zero, it indicates suspend should be cancelled
3222 * and gives an errno value describing why.  Once all votes are in,
3223 * we suspend the system.
3224 */
3225int
3226acpi_AckSleepState(struct apm_clone_data *clone, int error)
3227{
3228#if defined(__amd64__) || defined(__i386__)
3229    struct acpi_softc *sc;
3230    int ret, sleeping;
3231
3232    /* If no pending sleep state, return an error. */
3233    ACPI_LOCK(acpi);
3234    sc = clone->acpi_sc;
3235    if (sc->acpi_next_sstate == 0) {
3236    	ACPI_UNLOCK(acpi);
3237	return (ENXIO);
3238    }
3239
3240    /* Caller wants to abort suspend process. */
3241    if (error) {
3242	sc->acpi_next_sstate = 0;
3243	callout_stop(&sc->susp_force_to);
3244	device_printf(sc->acpi_dev,
3245	    "listener on %s cancelled the pending suspend\n",
3246	    devtoname(clone->cdev));
3247    	ACPI_UNLOCK(acpi);
3248	return (0);
3249    }
3250
3251    /*
3252     * Mark this device as acking the suspend request.  Then, walk through
3253     * all devices, seeing if they agree yet.  We only count devices that
3254     * are writable since read-only devices couldn't ack the request.
3255     */
3256    sleeping = TRUE;
3257    clone->notify_status = APM_EV_ACKED;
3258    STAILQ_FOREACH(clone, &sc->apm_cdevs, entries) {
3259	if ((clone->flags & ACPI_EVF_WRITE) != 0 &&
3260	    clone->notify_status != APM_EV_ACKED) {
3261	    sleeping = FALSE;
3262	    break;
3263	}
3264    }
3265
3266    /* If all devices have voted "yes", we will suspend now. */
3267    if (sleeping)
3268	callout_stop(&sc->susp_force_to);
3269    ACPI_UNLOCK(acpi);
3270    ret = 0;
3271    if (sleeping) {
3272	if (ACPI_FAILURE(acpi_EnterSleepState(sc, sc->acpi_next_sstate)))
3273		ret = ENODEV;
3274    }
3275    return (ret);
3276#else
3277    /* This platform does not support acpi suspend/resume. */
3278    return (EOPNOTSUPP);
3279#endif
3280}
3281
3282static void
3283acpi_sleep_enable(void *arg)
3284{
3285    struct acpi_softc	*sc = (struct acpi_softc *)arg;
3286
3287    ACPI_LOCK_ASSERT(acpi);
3288
3289    /* Reschedule if the system is not fully up and running. */
3290    if (!AcpiGbl_SystemAwakeAndRunning) {
3291	callout_schedule(&acpi_sleep_timer, hz * ACPI_MINIMUM_AWAKETIME);
3292	return;
3293    }
3294
3295    sc->acpi_sleep_disabled = FALSE;
3296}
3297
3298static ACPI_STATUS
3299acpi_sleep_disable(struct acpi_softc *sc)
3300{
3301    ACPI_STATUS		status;
3302
3303    /* Fail if the system is not fully up and running. */
3304    if (!AcpiGbl_SystemAwakeAndRunning)
3305	return (AE_ERROR);
3306
3307    ACPI_LOCK(acpi);
3308    status = sc->acpi_sleep_disabled ? AE_ERROR : AE_OK;
3309    sc->acpi_sleep_disabled = TRUE;
3310    ACPI_UNLOCK(acpi);
3311
3312    return (status);
3313}
3314
3315enum acpi_sleep_state {
3316    ACPI_SS_NONE,
3317    ACPI_SS_GPE_SET,
3318    ACPI_SS_DEV_SUSPEND,
3319    ACPI_SS_SLP_PREP,
3320    ACPI_SS_SLEPT,
3321};
3322
3323/*
3324 * Enter the desired system sleep state.
3325 *
3326 * Currently we support S1-S5 but S4 is only S4BIOS
3327 */
3328static ACPI_STATUS
3329acpi_EnterSleepState(struct acpi_softc *sc, int state)
3330{
3331    register_t intr;
3332    ACPI_STATUS status;
3333    ACPI_EVENT_STATUS power_button_status;
3334    enum acpi_sleep_state slp_state;
3335    int sleep_result;
3336
3337    ACPI_FUNCTION_TRACE_U32((char *)(uintptr_t)__func__, state);
3338
3339    if (state < ACPI_STATE_S1 || state > ACPI_S_STATES_MAX)
3340	return_ACPI_STATUS (AE_BAD_PARAMETER);
3341    if (!acpi_sleep_states[state]) {
3342	device_printf(sc->acpi_dev, "Sleep state S%d not supported by BIOS\n",
3343	    state);
3344	return (AE_SUPPORT);
3345    }
3346
3347    /* Re-entry once we're suspending is not allowed. */
3348    status = acpi_sleep_disable(sc);
3349    if (ACPI_FAILURE(status)) {
3350	device_printf(sc->acpi_dev,
3351	    "suspend request ignored (not ready yet)\n");
3352	return (status);
3353    }
3354
3355    if (state == ACPI_STATE_S5) {
3356	/*
3357	 * Shut down cleanly and power off.  This will call us back through the
3358	 * shutdown handlers.
3359	 */
3360	shutdown_nice(RB_POWEROFF);
3361	return_ACPI_STATUS (AE_OK);
3362    }
3363
3364    EVENTHANDLER_INVOKE(power_suspend_early);
3365    stop_all_proc();
3366    suspend_all_fs();
3367    EVENTHANDLER_INVOKE(power_suspend);
3368
3369#ifdef EARLY_AP_STARTUP
3370    MPASS(mp_ncpus == 1 || smp_started);
3371    thread_lock(curthread);
3372    sched_bind(curthread, 0);
3373    thread_unlock(curthread);
3374#else
3375    if (smp_started) {
3376	thread_lock(curthread);
3377	sched_bind(curthread, 0);
3378	thread_unlock(curthread);
3379    }
3380#endif
3381
3382    /*
3383     * Be sure to hold Giant across DEVICE_SUSPEND/RESUME
3384     */
3385    bus_topo_lock();
3386
3387    slp_state = ACPI_SS_NONE;
3388
3389    sc->acpi_sstate = state;
3390
3391    /* Enable any GPEs as appropriate and requested by the user. */
3392    acpi_wake_prep_walk(state);
3393    slp_state = ACPI_SS_GPE_SET;
3394
3395    /*
3396     * Inform all devices that we are going to sleep.  If at least one
3397     * device fails, DEVICE_SUSPEND() automatically resumes the tree.
3398     *
3399     * XXX Note that a better two-pass approach with a 'veto' pass
3400     * followed by a "real thing" pass would be better, but the current
3401     * bus interface does not provide for this.
3402     */
3403    if (DEVICE_SUSPEND(root_bus) != 0) {
3404	device_printf(sc->acpi_dev, "device_suspend failed\n");
3405	goto backout;
3406    }
3407    slp_state = ACPI_SS_DEV_SUSPEND;
3408
3409    status = AcpiEnterSleepStatePrep(state);
3410    if (ACPI_FAILURE(status)) {
3411	device_printf(sc->acpi_dev, "AcpiEnterSleepStatePrep failed - %s\n",
3412		      AcpiFormatException(status));
3413	goto backout;
3414    }
3415    slp_state = ACPI_SS_SLP_PREP;
3416
3417    if (sc->acpi_sleep_delay > 0)
3418	DELAY(sc->acpi_sleep_delay * 1000000);
3419
3420    suspendclock();
3421    intr = intr_disable();
3422    if (state != ACPI_STATE_S1) {
3423	sleep_result = acpi_sleep_machdep(sc, state);
3424	acpi_wakeup_machdep(sc, state, sleep_result, 0);
3425
3426	/*
3427	 * XXX According to ACPI specification SCI_EN bit should be restored
3428	 * by ACPI platform (BIOS, firmware) to its pre-sleep state.
3429	 * Unfortunately some BIOSes fail to do that and that leads to
3430	 * unexpected and serious consequences during wake up like a system
3431	 * getting stuck in SMI handlers.
3432	 * This hack is picked up from Linux, which claims that it follows
3433	 * Windows behavior.
3434	 */
3435	if (sleep_result == 1 && state != ACPI_STATE_S4)
3436	    AcpiWriteBitRegister(ACPI_BITREG_SCI_ENABLE, ACPI_ENABLE_EVENT);
3437
3438	if (sleep_result == 1 && state == ACPI_STATE_S3) {
3439	    /*
3440	     * Prevent mis-interpretation of the wakeup by power button
3441	     * as a request for power off.
3442	     * Ideally we should post an appropriate wakeup event,
3443	     * perhaps using acpi_event_power_button_wake or alike.
3444	     *
3445	     * Clearing of power button status after wakeup is mandated
3446	     * by ACPI specification in section "Fixed Power Button".
3447	     *
3448	     * XXX As of ACPICA 20121114 AcpiGetEventStatus provides
3449	     * status as 0/1 corressponding to inactive/active despite
3450	     * its type being ACPI_EVENT_STATUS.  In other words,
3451	     * we should not test for ACPI_EVENT_FLAG_SET for time being.
3452	     */
3453	    if (ACPI_SUCCESS(AcpiGetEventStatus(ACPI_EVENT_POWER_BUTTON,
3454		&power_button_status)) && power_button_status != 0) {
3455		AcpiClearEvent(ACPI_EVENT_POWER_BUTTON);
3456		device_printf(sc->acpi_dev,
3457		    "cleared fixed power button status\n");
3458	    }
3459	}
3460
3461	intr_restore(intr);
3462
3463	/* call acpi_wakeup_machdep() again with interrupt enabled */
3464	acpi_wakeup_machdep(sc, state, sleep_result, 1);
3465
3466	AcpiLeaveSleepStatePrep(state);
3467
3468	if (sleep_result == -1)
3469		goto backout;
3470
3471	/* Re-enable ACPI hardware on wakeup from sleep state 4. */
3472	if (state == ACPI_STATE_S4)
3473	    AcpiEnable();
3474    } else {
3475	status = AcpiEnterSleepState(state);
3476	intr_restore(intr);
3477	AcpiLeaveSleepStatePrep(state);
3478	if (ACPI_FAILURE(status)) {
3479	    device_printf(sc->acpi_dev, "AcpiEnterSleepState failed - %s\n",
3480			  AcpiFormatException(status));
3481	    goto backout;
3482	}
3483    }
3484    slp_state = ACPI_SS_SLEPT;
3485
3486    /*
3487     * Back out state according to how far along we got in the suspend
3488     * process.  This handles both the error and success cases.
3489     */
3490backout:
3491    if (slp_state >= ACPI_SS_SLP_PREP)
3492	resumeclock();
3493    if (slp_state >= ACPI_SS_GPE_SET) {
3494	acpi_wake_prep_walk(state);
3495	sc->acpi_sstate = ACPI_STATE_S0;
3496    }
3497    if (slp_state >= ACPI_SS_DEV_SUSPEND)
3498	DEVICE_RESUME(root_bus);
3499    if (slp_state >= ACPI_SS_SLP_PREP)
3500	AcpiLeaveSleepState(state);
3501    if (slp_state >= ACPI_SS_SLEPT) {
3502#if defined(__i386__) || defined(__amd64__)
3503	/* NB: we are still using ACPI timecounter at this point. */
3504	resume_TSC();
3505#endif
3506	acpi_resync_clock(sc);
3507	acpi_enable_fixed_events(sc);
3508    }
3509    sc->acpi_next_sstate = 0;
3510
3511    bus_topo_unlock();
3512
3513#ifdef EARLY_AP_STARTUP
3514    thread_lock(curthread);
3515    sched_unbind(curthread);
3516    thread_unlock(curthread);
3517#else
3518    if (smp_started) {
3519	thread_lock(curthread);
3520	sched_unbind(curthread);
3521	thread_unlock(curthread);
3522    }
3523#endif
3524
3525    resume_all_fs();
3526    resume_all_proc();
3527
3528    EVENTHANDLER_INVOKE(power_resume);
3529
3530    /* Allow another sleep request after a while. */
3531    callout_schedule(&acpi_sleep_timer, hz * ACPI_MINIMUM_AWAKETIME);
3532
3533    /* Run /etc/rc.resume after we are back. */
3534    if (devctl_process_running())
3535	acpi_UserNotify("Resume", ACPI_ROOT_OBJECT, state);
3536
3537    return_ACPI_STATUS (status);
3538}
3539
3540static void
3541acpi_resync_clock(struct acpi_softc *sc)
3542{
3543
3544    /*
3545     * Warm up timecounter again and reset system clock.
3546     */
3547    (void)timecounter->tc_get_timecount(timecounter);
3548    inittodr(time_second + sc->acpi_sleep_delay);
3549}
3550
3551/* Enable or disable the device's wake GPE. */
3552int
3553acpi_wake_set_enable(device_t dev, int enable)
3554{
3555    struct acpi_prw_data prw;
3556    ACPI_STATUS status;
3557    int flags;
3558
3559    /* Make sure the device supports waking the system and get the GPE. */
3560    if (acpi_parse_prw(acpi_get_handle(dev), &prw) != 0)
3561	return (ENXIO);
3562
3563    flags = acpi_get_flags(dev);
3564    if (enable) {
3565	status = AcpiSetGpeWakeMask(prw.gpe_handle, prw.gpe_bit,
3566	    ACPI_GPE_ENABLE);
3567	if (ACPI_FAILURE(status)) {
3568	    device_printf(dev, "enable wake failed\n");
3569	    return (ENXIO);
3570	}
3571	acpi_set_flags(dev, flags | ACPI_FLAG_WAKE_ENABLED);
3572    } else {
3573	status = AcpiSetGpeWakeMask(prw.gpe_handle, prw.gpe_bit,
3574	    ACPI_GPE_DISABLE);
3575	if (ACPI_FAILURE(status)) {
3576	    device_printf(dev, "disable wake failed\n");
3577	    return (ENXIO);
3578	}
3579	acpi_set_flags(dev, flags & ~ACPI_FLAG_WAKE_ENABLED);
3580    }
3581
3582    return (0);
3583}
3584
3585static int
3586acpi_wake_sleep_prep(ACPI_HANDLE handle, int sstate)
3587{
3588    struct acpi_prw_data prw;
3589    device_t dev;
3590
3591    /* Check that this is a wake-capable device and get its GPE. */
3592    if (acpi_parse_prw(handle, &prw) != 0)
3593	return (ENXIO);
3594    dev = acpi_get_device(handle);
3595
3596    /*
3597     * The destination sleep state must be less than (i.e., higher power)
3598     * or equal to the value specified by _PRW.  If this GPE cannot be
3599     * enabled for the next sleep state, then disable it.  If it can and
3600     * the user requested it be enabled, turn on any required power resources
3601     * and set _PSW.
3602     */
3603    if (sstate > prw.lowest_wake) {
3604	AcpiSetGpeWakeMask(prw.gpe_handle, prw.gpe_bit, ACPI_GPE_DISABLE);
3605	if (bootverbose)
3606	    device_printf(dev, "wake_prep disabled wake for %s (S%d)\n",
3607		acpi_name(handle), sstate);
3608    } else if (dev && (acpi_get_flags(dev) & ACPI_FLAG_WAKE_ENABLED) != 0) {
3609	acpi_pwr_wake_enable(handle, 1);
3610	acpi_SetInteger(handle, "_PSW", 1);
3611	if (bootverbose)
3612	    device_printf(dev, "wake_prep enabled for %s (S%d)\n",
3613		acpi_name(handle), sstate);
3614    }
3615
3616    return (0);
3617}
3618
3619static int
3620acpi_wake_run_prep(ACPI_HANDLE handle, int sstate)
3621{
3622    struct acpi_prw_data prw;
3623    device_t dev;
3624
3625    /*
3626     * Check that this is a wake-capable device and get its GPE.  Return
3627     * now if the user didn't enable this device for wake.
3628     */
3629    if (acpi_parse_prw(handle, &prw) != 0)
3630	return (ENXIO);
3631    dev = acpi_get_device(handle);
3632    if (dev == NULL || (acpi_get_flags(dev) & ACPI_FLAG_WAKE_ENABLED) == 0)
3633	return (0);
3634
3635    /*
3636     * If this GPE couldn't be enabled for the previous sleep state, it was
3637     * disabled before going to sleep so re-enable it.  If it was enabled,
3638     * clear _PSW and turn off any power resources it used.
3639     */
3640    if (sstate > prw.lowest_wake) {
3641	AcpiSetGpeWakeMask(prw.gpe_handle, prw.gpe_bit, ACPI_GPE_ENABLE);
3642	if (bootverbose)
3643	    device_printf(dev, "run_prep re-enabled %s\n", acpi_name(handle));
3644    } else {
3645	acpi_SetInteger(handle, "_PSW", 0);
3646	acpi_pwr_wake_enable(handle, 0);
3647	if (bootverbose)
3648	    device_printf(dev, "run_prep cleaned up for %s\n",
3649		acpi_name(handle));
3650    }
3651
3652    return (0);
3653}
3654
3655static ACPI_STATUS
3656acpi_wake_prep(ACPI_HANDLE handle, UINT32 level, void *context, void **status)
3657{
3658    int sstate;
3659
3660    /* If suspending, run the sleep prep function, otherwise wake. */
3661    sstate = *(int *)context;
3662    if (AcpiGbl_SystemAwakeAndRunning)
3663	acpi_wake_sleep_prep(handle, sstate);
3664    else
3665	acpi_wake_run_prep(handle, sstate);
3666    return (AE_OK);
3667}
3668
3669/* Walk the tree rooted at acpi0 to prep devices for suspend/resume. */
3670static int
3671acpi_wake_prep_walk(int sstate)
3672{
3673    ACPI_HANDLE sb_handle;
3674
3675    if (ACPI_SUCCESS(AcpiGetHandle(ACPI_ROOT_OBJECT, "\\_SB_", &sb_handle)))
3676	AcpiWalkNamespace(ACPI_TYPE_DEVICE, sb_handle, 100,
3677	    acpi_wake_prep, NULL, &sstate, NULL);
3678    return (0);
3679}
3680
3681/* Walk the tree rooted at acpi0 to attach per-device wake sysctls. */
3682static int
3683acpi_wake_sysctl_walk(device_t dev)
3684{
3685    int error, i, numdevs;
3686    device_t *devlist;
3687    device_t child;
3688    ACPI_STATUS status;
3689
3690    error = device_get_children(dev, &devlist, &numdevs);
3691    if (error != 0 || numdevs == 0) {
3692	if (numdevs == 0)
3693	    free(devlist, M_TEMP);
3694	return (error);
3695    }
3696    for (i = 0; i < numdevs; i++) {
3697	child = devlist[i];
3698	acpi_wake_sysctl_walk(child);
3699	if (!device_is_attached(child))
3700	    continue;
3701	status = AcpiEvaluateObject(acpi_get_handle(child), "_PRW", NULL, NULL);
3702	if (ACPI_SUCCESS(status)) {
3703	    SYSCTL_ADD_PROC(device_get_sysctl_ctx(child),
3704		SYSCTL_CHILDREN(device_get_sysctl_tree(child)), OID_AUTO,
3705		"wake", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, child, 0,
3706		acpi_wake_set_sysctl, "I", "Device set to wake the system");
3707	}
3708    }
3709    free(devlist, M_TEMP);
3710
3711    return (0);
3712}
3713
3714/* Enable or disable wake from userland. */
3715static int
3716acpi_wake_set_sysctl(SYSCTL_HANDLER_ARGS)
3717{
3718    int enable, error;
3719    device_t dev;
3720
3721    dev = (device_t)arg1;
3722    enable = (acpi_get_flags(dev) & ACPI_FLAG_WAKE_ENABLED) ? 1 : 0;
3723
3724    error = sysctl_handle_int(oidp, &enable, 0, req);
3725    if (error != 0 || req->newptr == NULL)
3726	return (error);
3727    if (enable != 0 && enable != 1)
3728	return (EINVAL);
3729
3730    return (acpi_wake_set_enable(dev, enable));
3731}
3732
3733/* Parse a device's _PRW into a structure. */
3734int
3735acpi_parse_prw(ACPI_HANDLE h, struct acpi_prw_data *prw)
3736{
3737    ACPI_STATUS			status;
3738    ACPI_BUFFER			prw_buffer;
3739    ACPI_OBJECT			*res, *res2;
3740    int				error, i, power_count;
3741
3742    if (h == NULL || prw == NULL)
3743	return (EINVAL);
3744
3745    /*
3746     * The _PRW object (7.2.9) is only required for devices that have the
3747     * ability to wake the system from a sleeping state.
3748     */
3749    error = EINVAL;
3750    prw_buffer.Pointer = NULL;
3751    prw_buffer.Length = ACPI_ALLOCATE_BUFFER;
3752    status = AcpiEvaluateObject(h, "_PRW", NULL, &prw_buffer);
3753    if (ACPI_FAILURE(status))
3754	return (ENOENT);
3755    res = (ACPI_OBJECT *)prw_buffer.Pointer;
3756    if (res == NULL)
3757	return (ENOENT);
3758    if (!ACPI_PKG_VALID(res, 2))
3759	goto out;
3760
3761    /*
3762     * Element 1 of the _PRW object:
3763     * The lowest power system sleeping state that can be entered while still
3764     * providing wake functionality.  The sleeping state being entered must
3765     * be less than (i.e., higher power) or equal to this value.
3766     */
3767    if (acpi_PkgInt32(res, 1, &prw->lowest_wake) != 0)
3768	goto out;
3769
3770    /*
3771     * Element 0 of the _PRW object:
3772     */
3773    switch (res->Package.Elements[0].Type) {
3774    case ACPI_TYPE_INTEGER:
3775	/*
3776	 * If the data type of this package element is numeric, then this
3777	 * _PRW package element is the bit index in the GPEx_EN, in the
3778	 * GPE blocks described in the FADT, of the enable bit that is
3779	 * enabled for the wake event.
3780	 */
3781	prw->gpe_handle = NULL;
3782	prw->gpe_bit = res->Package.Elements[0].Integer.Value;
3783	error = 0;
3784	break;
3785    case ACPI_TYPE_PACKAGE:
3786	/*
3787	 * If the data type of this package element is a package, then this
3788	 * _PRW package element is itself a package containing two
3789	 * elements.  The first is an object reference to the GPE Block
3790	 * device that contains the GPE that will be triggered by the wake
3791	 * event.  The second element is numeric and it contains the bit
3792	 * index in the GPEx_EN, in the GPE Block referenced by the
3793	 * first element in the package, of the enable bit that is enabled for
3794	 * the wake event.
3795	 *
3796	 * For example, if this field is a package then it is of the form:
3797	 * Package() {\_SB.PCI0.ISA.GPE, 2}
3798	 */
3799	res2 = &res->Package.Elements[0];
3800	if (!ACPI_PKG_VALID(res2, 2))
3801	    goto out;
3802	prw->gpe_handle = acpi_GetReference(NULL, &res2->Package.Elements[0]);
3803	if (prw->gpe_handle == NULL)
3804	    goto out;
3805	if (acpi_PkgInt32(res2, 1, &prw->gpe_bit) != 0)
3806	    goto out;
3807	error = 0;
3808	break;
3809    default:
3810	goto out;
3811    }
3812
3813    /* Elements 2 to N of the _PRW object are power resources. */
3814    power_count = res->Package.Count - 2;
3815    if (power_count > ACPI_PRW_MAX_POWERRES) {
3816	printf("ACPI device %s has too many power resources\n", acpi_name(h));
3817	power_count = 0;
3818    }
3819    prw->power_res_count = power_count;
3820    for (i = 0; i < power_count; i++)
3821	prw->power_res[i] = res->Package.Elements[i];
3822
3823out:
3824    if (prw_buffer.Pointer != NULL)
3825	AcpiOsFree(prw_buffer.Pointer);
3826    return (error);
3827}
3828
3829/*
3830 * ACPI Event Handlers
3831 */
3832
3833/* System Event Handlers (registered by EVENTHANDLER_REGISTER) */
3834
3835static void
3836acpi_system_eventhandler_sleep(void *arg, int state)
3837{
3838    struct acpi_softc *sc = (struct acpi_softc *)arg;
3839    int ret;
3840
3841    ACPI_FUNCTION_TRACE_U32((char *)(uintptr_t)__func__, state);
3842
3843    /* Check if button action is disabled or unknown. */
3844    if (state == ACPI_STATE_UNKNOWN)
3845	return;
3846
3847    /* Request that the system prepare to enter the given suspend state. */
3848    ret = acpi_ReqSleepState(sc, state);
3849    if (ret != 0)
3850	device_printf(sc->acpi_dev,
3851	    "request to enter state S%d failed (err %d)\n", state, ret);
3852
3853    return_VOID;
3854}
3855
3856static void
3857acpi_system_eventhandler_wakeup(void *arg, int state)
3858{
3859
3860    ACPI_FUNCTION_TRACE_U32((char *)(uintptr_t)__func__, state);
3861
3862    /* Currently, nothing to do for wakeup. */
3863
3864    return_VOID;
3865}
3866
3867/*
3868 * ACPICA Event Handlers (FixedEvent, also called from button notify handler)
3869 */
3870static void
3871acpi_invoke_sleep_eventhandler(void *context)
3872{
3873
3874    EVENTHANDLER_INVOKE(acpi_sleep_event, *(int *)context);
3875}
3876
3877static void
3878acpi_invoke_wake_eventhandler(void *context)
3879{
3880
3881    EVENTHANDLER_INVOKE(acpi_wakeup_event, *(int *)context);
3882}
3883
3884UINT32
3885acpi_event_power_button_sleep(void *context)
3886{
3887    struct acpi_softc	*sc = (struct acpi_softc *)context;
3888
3889    ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
3890
3891    if (ACPI_FAILURE(AcpiOsExecute(OSL_NOTIFY_HANDLER,
3892	acpi_invoke_sleep_eventhandler, &sc->acpi_power_button_sx)))
3893	return_VALUE (ACPI_INTERRUPT_NOT_HANDLED);
3894    return_VALUE (ACPI_INTERRUPT_HANDLED);
3895}
3896
3897UINT32
3898acpi_event_power_button_wake(void *context)
3899{
3900    struct acpi_softc	*sc = (struct acpi_softc *)context;
3901
3902    ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
3903
3904    if (ACPI_FAILURE(AcpiOsExecute(OSL_NOTIFY_HANDLER,
3905	acpi_invoke_wake_eventhandler, &sc->acpi_power_button_sx)))
3906	return_VALUE (ACPI_INTERRUPT_NOT_HANDLED);
3907    return_VALUE (ACPI_INTERRUPT_HANDLED);
3908}
3909
3910UINT32
3911acpi_event_sleep_button_sleep(void *context)
3912{
3913    struct acpi_softc	*sc = (struct acpi_softc *)context;
3914
3915    ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
3916
3917    if (ACPI_FAILURE(AcpiOsExecute(OSL_NOTIFY_HANDLER,
3918	acpi_invoke_sleep_eventhandler, &sc->acpi_sleep_button_sx)))
3919	return_VALUE (ACPI_INTERRUPT_NOT_HANDLED);
3920    return_VALUE (ACPI_INTERRUPT_HANDLED);
3921}
3922
3923UINT32
3924acpi_event_sleep_button_wake(void *context)
3925{
3926    struct acpi_softc	*sc = (struct acpi_softc *)context;
3927
3928    ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
3929
3930    if (ACPI_FAILURE(AcpiOsExecute(OSL_NOTIFY_HANDLER,
3931	acpi_invoke_wake_eventhandler, &sc->acpi_sleep_button_sx)))
3932	return_VALUE (ACPI_INTERRUPT_NOT_HANDLED);
3933    return_VALUE (ACPI_INTERRUPT_HANDLED);
3934}
3935
3936/*
3937 * XXX This static buffer is suboptimal.  There is no locking so only
3938 * use this for single-threaded callers.
3939 */
3940char *
3941acpi_name(ACPI_HANDLE handle)
3942{
3943    ACPI_BUFFER buf;
3944    static char data[256];
3945
3946    buf.Length = sizeof(data);
3947    buf.Pointer = data;
3948
3949    if (handle && ACPI_SUCCESS(AcpiGetName(handle, ACPI_FULL_PATHNAME, &buf)))
3950	return (data);
3951    return ("(unknown)");
3952}
3953
3954/*
3955 * Debugging/bug-avoidance.  Avoid trying to fetch info on various
3956 * parts of the namespace.
3957 */
3958int
3959acpi_avoid(ACPI_HANDLE handle)
3960{
3961    char	*cp, *env, *np;
3962    int		len;
3963
3964    np = acpi_name(handle);
3965    if (*np == '\\')
3966	np++;
3967    if ((env = kern_getenv("debug.acpi.avoid")) == NULL)
3968	return (0);
3969
3970    /* Scan the avoid list checking for a match */
3971    cp = env;
3972    for (;;) {
3973	while (*cp != 0 && isspace(*cp))
3974	    cp++;
3975	if (*cp == 0)
3976	    break;
3977	len = 0;
3978	while (cp[len] != 0 && !isspace(cp[len]))
3979	    len++;
3980	if (!strncmp(cp, np, len)) {
3981	    freeenv(env);
3982	    return(1);
3983	}
3984	cp += len;
3985    }
3986    freeenv(env);
3987
3988    return (0);
3989}
3990
3991/*
3992 * Debugging/bug-avoidance.  Disable ACPI subsystem components.
3993 */
3994int
3995acpi_disabled(char *subsys)
3996{
3997    char	*cp, *env;
3998    int		len;
3999
4000    if ((env = kern_getenv("debug.acpi.disabled")) == NULL)
4001	return (0);
4002    if (strcmp(env, "all") == 0) {
4003	freeenv(env);
4004	return (1);
4005    }
4006
4007    /* Scan the disable list, checking for a match. */
4008    cp = env;
4009    for (;;) {
4010	while (*cp != '\0' && isspace(*cp))
4011	    cp++;
4012	if (*cp == '\0')
4013	    break;
4014	len = 0;
4015	while (cp[len] != '\0' && !isspace(cp[len]))
4016	    len++;
4017	if (strncmp(cp, subsys, len) == 0) {
4018	    freeenv(env);
4019	    return (1);
4020	}
4021	cp += len;
4022    }
4023    freeenv(env);
4024
4025    return (0);
4026}
4027
4028static void
4029acpi_lookup(void *arg, const char *name, device_t *dev)
4030{
4031    ACPI_HANDLE handle;
4032
4033    if (*dev != NULL)
4034	return;
4035
4036    /*
4037     * Allow any handle name that is specified as an absolute path and
4038     * starts with '\'.  We could restrict this to \_SB and friends,
4039     * but see acpi_probe_children() for notes on why we scan the entire
4040     * namespace for devices.
4041     *
4042     * XXX: The pathname argument to AcpiGetHandle() should be fixed to
4043     * be const.
4044     */
4045    if (name[0] != '\\')
4046	return;
4047    if (ACPI_FAILURE(AcpiGetHandle(ACPI_ROOT_OBJECT, __DECONST(char *, name),
4048	&handle)))
4049	return;
4050    *dev = acpi_get_device(handle);
4051}
4052
4053/*
4054 * Control interface.
4055 *
4056 * We multiplex ioctls for all participating ACPI devices here.  Individual
4057 * drivers wanting to be accessible via /dev/acpi should use the
4058 * register/deregister interface to make their handlers visible.
4059 */
4060struct acpi_ioctl_hook
4061{
4062    TAILQ_ENTRY(acpi_ioctl_hook) link;
4063    u_long			 cmd;
4064    acpi_ioctl_fn		 fn;
4065    void			 *arg;
4066};
4067
4068static TAILQ_HEAD(,acpi_ioctl_hook)	acpi_ioctl_hooks;
4069static int				acpi_ioctl_hooks_initted;
4070
4071int
4072acpi_register_ioctl(u_long cmd, acpi_ioctl_fn fn, void *arg)
4073{
4074    struct acpi_ioctl_hook	*hp;
4075
4076    if ((hp = malloc(sizeof(*hp), M_ACPIDEV, M_NOWAIT)) == NULL)
4077	return (ENOMEM);
4078    hp->cmd = cmd;
4079    hp->fn = fn;
4080    hp->arg = arg;
4081
4082    ACPI_LOCK(acpi);
4083    if (acpi_ioctl_hooks_initted == 0) {
4084	TAILQ_INIT(&acpi_ioctl_hooks);
4085	acpi_ioctl_hooks_initted = 1;
4086    }
4087    TAILQ_INSERT_TAIL(&acpi_ioctl_hooks, hp, link);
4088    ACPI_UNLOCK(acpi);
4089
4090    return (0);
4091}
4092
4093void
4094acpi_deregister_ioctl(u_long cmd, acpi_ioctl_fn fn)
4095{
4096    struct acpi_ioctl_hook	*hp;
4097
4098    ACPI_LOCK(acpi);
4099    TAILQ_FOREACH(hp, &acpi_ioctl_hooks, link)
4100	if (hp->cmd == cmd && hp->fn == fn)
4101	    break;
4102
4103    if (hp != NULL) {
4104	TAILQ_REMOVE(&acpi_ioctl_hooks, hp, link);
4105	free(hp, M_ACPIDEV);
4106    }
4107    ACPI_UNLOCK(acpi);
4108}
4109
4110static int
4111acpiopen(struct cdev *dev, int flag, int fmt, struct thread *td)
4112{
4113    return (0);
4114}
4115
4116static int
4117acpiclose(struct cdev *dev, int flag, int fmt, struct thread *td)
4118{
4119    return (0);
4120}
4121
4122static int
4123acpiioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td)
4124{
4125    struct acpi_softc		*sc;
4126    struct acpi_ioctl_hook	*hp;
4127    int				error, state;
4128
4129    error = 0;
4130    hp = NULL;
4131    sc = dev->si_drv1;
4132
4133    /*
4134     * Scan the list of registered ioctls, looking for handlers.
4135     */
4136    ACPI_LOCK(acpi);
4137    if (acpi_ioctl_hooks_initted)
4138	TAILQ_FOREACH(hp, &acpi_ioctl_hooks, link) {
4139	    if (hp->cmd == cmd)
4140		break;
4141	}
4142    ACPI_UNLOCK(acpi);
4143    if (hp)
4144	return (hp->fn(cmd, addr, hp->arg));
4145
4146    /*
4147     * Core ioctls are not permitted for non-writable user.
4148     * Currently, other ioctls just fetch information.
4149     * Not changing system behavior.
4150     */
4151    if ((flag & FWRITE) == 0)
4152	return (EPERM);
4153
4154    /* Core system ioctls. */
4155    switch (cmd) {
4156    case ACPIIO_REQSLPSTATE:
4157	state = *(int *)addr;
4158	if (state != ACPI_STATE_S5)
4159	    return (acpi_ReqSleepState(sc, state));
4160	device_printf(sc->acpi_dev, "power off via acpi ioctl not supported\n");
4161	error = EOPNOTSUPP;
4162	break;
4163    case ACPIIO_ACKSLPSTATE:
4164	error = *(int *)addr;
4165	error = acpi_AckSleepState(sc->acpi_clone, error);
4166	break;
4167    case ACPIIO_SETSLPSTATE:	/* DEPRECATED */
4168	state = *(int *)addr;
4169	if (state < ACPI_STATE_S0 || state > ACPI_S_STATES_MAX)
4170	    return (EINVAL);
4171	if (!acpi_sleep_states[state])
4172	    return (EOPNOTSUPP);
4173	if (ACPI_FAILURE(acpi_SetSleepState(sc, state)))
4174	    error = ENXIO;
4175	break;
4176    default:
4177	error = ENXIO;
4178	break;
4179    }
4180
4181    return (error);
4182}
4183
4184static int
4185acpi_sname2sstate(const char *sname)
4186{
4187    int sstate;
4188
4189    if (toupper(sname[0]) == 'S') {
4190	sstate = sname[1] - '0';
4191	if (sstate >= ACPI_STATE_S0 && sstate <= ACPI_STATE_S5 &&
4192	    sname[2] == '\0')
4193	    return (sstate);
4194    } else if (strcasecmp(sname, "NONE") == 0)
4195	return (ACPI_STATE_UNKNOWN);
4196    return (-1);
4197}
4198
4199static const char *
4200acpi_sstate2sname(int sstate)
4201{
4202    static const char *snames[] = { "S0", "S1", "S2", "S3", "S4", "S5" };
4203
4204    if (sstate >= ACPI_STATE_S0 && sstate <= ACPI_STATE_S5)
4205	return (snames[sstate]);
4206    else if (sstate == ACPI_STATE_UNKNOWN)
4207	return ("NONE");
4208    return (NULL);
4209}
4210
4211static int
4212acpi_supported_sleep_state_sysctl(SYSCTL_HANDLER_ARGS)
4213{
4214    int error;
4215    struct sbuf sb;
4216    UINT8 state;
4217
4218    sbuf_new(&sb, NULL, 32, SBUF_AUTOEXTEND);
4219    for (state = ACPI_STATE_S1; state < ACPI_S_STATE_COUNT; state++)
4220	if (acpi_sleep_states[state])
4221	    sbuf_printf(&sb, "%s ", acpi_sstate2sname(state));
4222    sbuf_trim(&sb);
4223    sbuf_finish(&sb);
4224    error = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req);
4225    sbuf_delete(&sb);
4226    return (error);
4227}
4228
4229static int
4230acpi_sleep_state_sysctl(SYSCTL_HANDLER_ARGS)
4231{
4232    char sleep_state[10];
4233    int error, new_state, old_state;
4234
4235    old_state = *(int *)oidp->oid_arg1;
4236    strlcpy(sleep_state, acpi_sstate2sname(old_state), sizeof(sleep_state));
4237    error = sysctl_handle_string(oidp, sleep_state, sizeof(sleep_state), req);
4238    if (error == 0 && req->newptr != NULL) {
4239	new_state = acpi_sname2sstate(sleep_state);
4240	if (new_state < ACPI_STATE_S1)
4241	    return (EINVAL);
4242	if (new_state < ACPI_S_STATE_COUNT && !acpi_sleep_states[new_state])
4243	    return (EOPNOTSUPP);
4244	if (new_state != old_state)
4245	    *(int *)oidp->oid_arg1 = new_state;
4246    }
4247    return (error);
4248}
4249
4250/* Inform devctl(4) when we receive a Notify. */
4251void
4252acpi_UserNotify(const char *subsystem, ACPI_HANDLE h, uint8_t notify)
4253{
4254    char		notify_buf[16];
4255    ACPI_BUFFER		handle_buf;
4256    ACPI_STATUS		status;
4257
4258    if (subsystem == NULL)
4259	return;
4260
4261    handle_buf.Pointer = NULL;
4262    handle_buf.Length = ACPI_ALLOCATE_BUFFER;
4263    status = AcpiNsHandleToPathname(h, &handle_buf, FALSE);
4264    if (ACPI_FAILURE(status))
4265	return;
4266    snprintf(notify_buf, sizeof(notify_buf), "notify=0x%02x", notify);
4267    devctl_notify("ACPI", subsystem, handle_buf.Pointer, notify_buf);
4268    AcpiOsFree(handle_buf.Pointer);
4269}
4270
4271#ifdef ACPI_DEBUG
4272/*
4273 * Support for parsing debug options from the kernel environment.
4274 *
4275 * Bits may be set in the AcpiDbgLayer and AcpiDbgLevel debug registers
4276 * by specifying the names of the bits in the debug.acpi.layer and
4277 * debug.acpi.level environment variables.  Bits may be unset by
4278 * prefixing the bit name with !.
4279 */
4280struct debugtag
4281{
4282    char	*name;
4283    UINT32	value;
4284};
4285
4286static struct debugtag	dbg_layer[] = {
4287    {"ACPI_UTILITIES",		ACPI_UTILITIES},
4288    {"ACPI_HARDWARE",		ACPI_HARDWARE},
4289    {"ACPI_EVENTS",		ACPI_EVENTS},
4290    {"ACPI_TABLES",		ACPI_TABLES},
4291    {"ACPI_NAMESPACE",		ACPI_NAMESPACE},
4292    {"ACPI_PARSER",		ACPI_PARSER},
4293    {"ACPI_DISPATCHER",		ACPI_DISPATCHER},
4294    {"ACPI_EXECUTER",		ACPI_EXECUTER},
4295    {"ACPI_RESOURCES",		ACPI_RESOURCES},
4296    {"ACPI_CA_DEBUGGER",	ACPI_CA_DEBUGGER},
4297    {"ACPI_OS_SERVICES",	ACPI_OS_SERVICES},
4298    {"ACPI_CA_DISASSEMBLER",	ACPI_CA_DISASSEMBLER},
4299    {"ACPI_ALL_COMPONENTS",	ACPI_ALL_COMPONENTS},
4300
4301    {"ACPI_AC_ADAPTER",		ACPI_AC_ADAPTER},
4302    {"ACPI_BATTERY",		ACPI_BATTERY},
4303    {"ACPI_BUS",		ACPI_BUS},
4304    {"ACPI_BUTTON",		ACPI_BUTTON},
4305    {"ACPI_EC", 		ACPI_EC},
4306    {"ACPI_FAN",		ACPI_FAN},
4307    {"ACPI_POWERRES",		ACPI_POWERRES},
4308    {"ACPI_PROCESSOR",		ACPI_PROCESSOR},
4309    {"ACPI_THERMAL",		ACPI_THERMAL},
4310    {"ACPI_TIMER",		ACPI_TIMER},
4311    {"ACPI_ALL_DRIVERS",	ACPI_ALL_DRIVERS},
4312    {NULL, 0}
4313};
4314
4315static struct debugtag dbg_level[] = {
4316    {"ACPI_LV_INIT",		ACPI_LV_INIT},
4317    {"ACPI_LV_DEBUG_OBJECT",	ACPI_LV_DEBUG_OBJECT},
4318    {"ACPI_LV_INFO",		ACPI_LV_INFO},
4319    {"ACPI_LV_REPAIR",		ACPI_LV_REPAIR},
4320    {"ACPI_LV_ALL_EXCEPTIONS",	ACPI_LV_ALL_EXCEPTIONS},
4321
4322    /* Trace verbosity level 1 [Standard Trace Level] */
4323    {"ACPI_LV_INIT_NAMES",	ACPI_LV_INIT_NAMES},
4324    {"ACPI_LV_PARSE",		ACPI_LV_PARSE},
4325    {"ACPI_LV_LOAD",		ACPI_LV_LOAD},
4326    {"ACPI_LV_DISPATCH",	ACPI_LV_DISPATCH},
4327    {"ACPI_LV_EXEC",		ACPI_LV_EXEC},
4328    {"ACPI_LV_NAMES",		ACPI_LV_NAMES},
4329    {"ACPI_LV_OPREGION",	ACPI_LV_OPREGION},
4330    {"ACPI_LV_BFIELD",		ACPI_LV_BFIELD},
4331    {"ACPI_LV_TABLES",		ACPI_LV_TABLES},
4332    {"ACPI_LV_VALUES",		ACPI_LV_VALUES},
4333    {"ACPI_LV_OBJECTS",		ACPI_LV_OBJECTS},
4334    {"ACPI_LV_RESOURCES",	ACPI_LV_RESOURCES},
4335    {"ACPI_LV_USER_REQUESTS",	ACPI_LV_USER_REQUESTS},
4336    {"ACPI_LV_PACKAGE",		ACPI_LV_PACKAGE},
4337    {"ACPI_LV_VERBOSITY1",	ACPI_LV_VERBOSITY1},
4338
4339    /* Trace verbosity level 2 [Function tracing and memory allocation] */
4340    {"ACPI_LV_ALLOCATIONS",	ACPI_LV_ALLOCATIONS},
4341    {"ACPI_LV_FUNCTIONS",	ACPI_LV_FUNCTIONS},
4342    {"ACPI_LV_OPTIMIZATIONS",	ACPI_LV_OPTIMIZATIONS},
4343    {"ACPI_LV_VERBOSITY2",	ACPI_LV_VERBOSITY2},
4344    {"ACPI_LV_ALL",		ACPI_LV_ALL},
4345
4346    /* Trace verbosity level 3 [Threading, I/O, and Interrupts] */
4347    {"ACPI_LV_MUTEX",		ACPI_LV_MUTEX},
4348    {"ACPI_LV_THREADS",		ACPI_LV_THREADS},
4349    {"ACPI_LV_IO",		ACPI_LV_IO},
4350    {"ACPI_LV_INTERRUPTS",	ACPI_LV_INTERRUPTS},
4351    {"ACPI_LV_VERBOSITY3",	ACPI_LV_VERBOSITY3},
4352
4353    /* Exceptionally verbose output -- also used in the global "DebugLevel"  */
4354    {"ACPI_LV_AML_DISASSEMBLE",	ACPI_LV_AML_DISASSEMBLE},
4355    {"ACPI_LV_VERBOSE_INFO",	ACPI_LV_VERBOSE_INFO},
4356    {"ACPI_LV_FULL_TABLES",	ACPI_LV_FULL_TABLES},
4357    {"ACPI_LV_EVENTS",		ACPI_LV_EVENTS},
4358    {"ACPI_LV_VERBOSE",		ACPI_LV_VERBOSE},
4359    {NULL, 0}
4360};
4361
4362static void
4363acpi_parse_debug(char *cp, struct debugtag *tag, UINT32 *flag)
4364{
4365    char	*ep;
4366    int		i, l;
4367    int		set;
4368
4369    while (*cp) {
4370	if (isspace(*cp)) {
4371	    cp++;
4372	    continue;
4373	}
4374	ep = cp;
4375	while (*ep && !isspace(*ep))
4376	    ep++;
4377	if (*cp == '!') {
4378	    set = 0;
4379	    cp++;
4380	    if (cp == ep)
4381		continue;
4382	} else {
4383	    set = 1;
4384	}
4385	l = ep - cp;
4386	for (i = 0; tag[i].name != NULL; i++) {
4387	    if (!strncmp(cp, tag[i].name, l)) {
4388		if (set)
4389		    *flag |= tag[i].value;
4390		else
4391		    *flag &= ~tag[i].value;
4392	    }
4393	}
4394	cp = ep;
4395    }
4396}
4397
4398static void
4399acpi_set_debugging(void *junk)
4400{
4401    char	*layer, *level;
4402
4403    if (cold) {
4404	AcpiDbgLayer = 0;
4405	AcpiDbgLevel = 0;
4406    }
4407
4408    layer = kern_getenv("debug.acpi.layer");
4409    level = kern_getenv("debug.acpi.level");
4410    if (layer == NULL && level == NULL)
4411	return;
4412
4413    printf("ACPI set debug");
4414    if (layer != NULL) {
4415	if (strcmp("NONE", layer) != 0)
4416	    printf(" layer '%s'", layer);
4417	acpi_parse_debug(layer, &dbg_layer[0], &AcpiDbgLayer);
4418	freeenv(layer);
4419    }
4420    if (level != NULL) {
4421	if (strcmp("NONE", level) != 0)
4422	    printf(" level '%s'", level);
4423	acpi_parse_debug(level, &dbg_level[0], &AcpiDbgLevel);
4424	freeenv(level);
4425    }
4426    printf("\n");
4427}
4428
4429SYSINIT(acpi_debugging, SI_SUB_TUNABLES, SI_ORDER_ANY, acpi_set_debugging,
4430	NULL);
4431
4432static int
4433acpi_debug_sysctl(SYSCTL_HANDLER_ARGS)
4434{
4435    int		 error, *dbg;
4436    struct	 debugtag *tag;
4437    struct	 sbuf sb;
4438    char	 temp[128];
4439
4440    if (sbuf_new(&sb, NULL, 128, SBUF_AUTOEXTEND) == NULL)
4441	return (ENOMEM);
4442    if (strcmp(oidp->oid_arg1, "debug.acpi.layer") == 0) {
4443	tag = &dbg_layer[0];
4444	dbg = &AcpiDbgLayer;
4445    } else {
4446	tag = &dbg_level[0];
4447	dbg = &AcpiDbgLevel;
4448    }
4449
4450    /* Get old values if this is a get request. */
4451    ACPI_SERIAL_BEGIN(acpi);
4452    if (*dbg == 0) {
4453	sbuf_cpy(&sb, "NONE");
4454    } else if (req->newptr == NULL) {
4455	for (; tag->name != NULL; tag++) {
4456	    if ((*dbg & tag->value) == tag->value)
4457		sbuf_printf(&sb, "%s ", tag->name);
4458	}
4459    }
4460    sbuf_trim(&sb);
4461    sbuf_finish(&sb);
4462    strlcpy(temp, sbuf_data(&sb), sizeof(temp));
4463    sbuf_delete(&sb);
4464
4465    error = sysctl_handle_string(oidp, temp, sizeof(temp), req);
4466
4467    /* Check for error or no change */
4468    if (error == 0 && req->newptr != NULL) {
4469	*dbg = 0;
4470	kern_setenv((char *)oidp->oid_arg1, temp);
4471	acpi_set_debugging(NULL);
4472    }
4473    ACPI_SERIAL_END(acpi);
4474
4475    return (error);
4476}
4477
4478SYSCTL_PROC(_debug_acpi, OID_AUTO, layer,
4479    CTLFLAG_RW | CTLTYPE_STRING | CTLFLAG_MPSAFE, "debug.acpi.layer", 0,
4480    acpi_debug_sysctl, "A",
4481    "");
4482SYSCTL_PROC(_debug_acpi, OID_AUTO, level,
4483    CTLFLAG_RW | CTLTYPE_STRING | CTLFLAG_MPSAFE, "debug.acpi.level", 0,
4484    acpi_debug_sysctl, "A",
4485    "");
4486#endif /* ACPI_DEBUG */
4487
4488static int
4489acpi_debug_objects_sysctl(SYSCTL_HANDLER_ARGS)
4490{
4491	int	error;
4492	int	old;
4493
4494	old = acpi_debug_objects;
4495	error = sysctl_handle_int(oidp, &acpi_debug_objects, 0, req);
4496	if (error != 0 || req->newptr == NULL)
4497		return (error);
4498	if (old == acpi_debug_objects || (old && acpi_debug_objects))
4499		return (0);
4500
4501	ACPI_SERIAL_BEGIN(acpi);
4502	AcpiGbl_EnableAmlDebugObject = acpi_debug_objects ? TRUE : FALSE;
4503	ACPI_SERIAL_END(acpi);
4504
4505	return (0);
4506}
4507
4508static int
4509acpi_parse_interfaces(char *str, struct acpi_interface *iface)
4510{
4511	char *p;
4512	size_t len;
4513	int i, j;
4514
4515	p = str;
4516	while (isspace(*p) || *p == ',')
4517		p++;
4518	len = strlen(p);
4519	if (len == 0)
4520		return (0);
4521	p = strdup(p, M_TEMP);
4522	for (i = 0; i < len; i++)
4523		if (p[i] == ',')
4524			p[i] = '\0';
4525	i = j = 0;
4526	while (i < len)
4527		if (isspace(p[i]) || p[i] == '\0')
4528			i++;
4529		else {
4530			i += strlen(p + i) + 1;
4531			j++;
4532		}
4533	if (j == 0) {
4534		free(p, M_TEMP);
4535		return (0);
4536	}
4537	iface->data = malloc(sizeof(*iface->data) * j, M_TEMP, M_WAITOK);
4538	iface->num = j;
4539	i = j = 0;
4540	while (i < len)
4541		if (isspace(p[i]) || p[i] == '\0')
4542			i++;
4543		else {
4544			iface->data[j] = p + i;
4545			i += strlen(p + i) + 1;
4546			j++;
4547		}
4548
4549	return (j);
4550}
4551
4552static void
4553acpi_free_interfaces(struct acpi_interface *iface)
4554{
4555
4556	free(iface->data[0], M_TEMP);
4557	free(iface->data, M_TEMP);
4558}
4559
4560static void
4561acpi_reset_interfaces(device_t dev)
4562{
4563	struct acpi_interface list;
4564	ACPI_STATUS status;
4565	int i;
4566
4567	if (acpi_parse_interfaces(acpi_install_interface, &list) > 0) {
4568		for (i = 0; i < list.num; i++) {
4569			status = AcpiInstallInterface(list.data[i]);
4570			if (ACPI_FAILURE(status))
4571				device_printf(dev,
4572				    "failed to install _OSI(\"%s\"): %s\n",
4573				    list.data[i], AcpiFormatException(status));
4574			else if (bootverbose)
4575				device_printf(dev, "installed _OSI(\"%s\")\n",
4576				    list.data[i]);
4577		}
4578		acpi_free_interfaces(&list);
4579	}
4580	if (acpi_parse_interfaces(acpi_remove_interface, &list) > 0) {
4581		for (i = 0; i < list.num; i++) {
4582			status = AcpiRemoveInterface(list.data[i]);
4583			if (ACPI_FAILURE(status))
4584				device_printf(dev,
4585				    "failed to remove _OSI(\"%s\"): %s\n",
4586				    list.data[i], AcpiFormatException(status));
4587			else if (bootverbose)
4588				device_printf(dev, "removed _OSI(\"%s\")\n",
4589				    list.data[i]);
4590		}
4591		acpi_free_interfaces(&list);
4592	}
4593}
4594
4595static int
4596acpi_pm_func(u_long cmd, void *arg, ...)
4597{
4598	int	state, acpi_state;
4599	int	error;
4600	struct	acpi_softc *sc;
4601	va_list	ap;
4602
4603	error = 0;
4604	switch (cmd) {
4605	case POWER_CMD_SUSPEND:
4606		sc = (struct acpi_softc *)arg;
4607		if (sc == NULL) {
4608			error = EINVAL;
4609			goto out;
4610		}
4611
4612		va_start(ap, arg);
4613		state = va_arg(ap, int);
4614		va_end(ap);
4615
4616		switch (state) {
4617		case POWER_SLEEP_STATE_STANDBY:
4618			acpi_state = sc->acpi_standby_sx;
4619			break;
4620		case POWER_SLEEP_STATE_SUSPEND:
4621			acpi_state = sc->acpi_suspend_sx;
4622			break;
4623		case POWER_SLEEP_STATE_HIBERNATE:
4624			acpi_state = ACPI_STATE_S4;
4625			break;
4626		default:
4627			error = EINVAL;
4628			goto out;
4629		}
4630
4631		if (ACPI_FAILURE(acpi_EnterSleepState(sc, acpi_state)))
4632			error = ENXIO;
4633		break;
4634	default:
4635		error = EINVAL;
4636		goto out;
4637	}
4638
4639out:
4640	return (error);
4641}
4642
4643static void
4644acpi_pm_register(void *arg)
4645{
4646    if (!cold || resource_disabled("acpi", 0))
4647	return;
4648
4649    power_pm_register(POWER_PM_TYPE_ACPI, acpi_pm_func, NULL);
4650}
4651
4652SYSINIT(power, SI_SUB_KLD, SI_ORDER_ANY, acpi_pm_register, NULL);
4653