summaryrefslogtreecommitdiffstats
path: root/sys/dev/acpica
diff options
context:
space:
mode:
Diffstat (limited to 'sys/dev/acpica')
-rw-r--r--sys/dev/acpica/Osd/OsdDebug.c114
-rw-r--r--sys/dev/acpica/Osd/OsdHardware.c120
-rw-r--r--sys/dev/acpica/Osd/OsdInterrupt.c220
-rw-r--r--sys/dev/acpica/Osd/OsdMemory.c145
-rw-r--r--sys/dev/acpica/Osd/OsdSchedule.c288
-rw-r--r--sys/dev/acpica/Osd/OsdStream.c51
-rw-r--r--sys/dev/acpica/Osd/OsdSynch.c608
-rw-r--r--sys/dev/acpica/Osd/OsdTable.c106
-rw-r--r--sys/dev/acpica/acpi.c3895
-rw-r--r--sys/dev/acpica/acpi_acad.c283
-rw-r--r--sys/dev/acpica/acpi_battery.c513
-rw-r--r--sys/dev/acpica/acpi_button.c279
-rw-r--r--sys/dev/acpica/acpi_cmbat.c483
-rw-r--r--sys/dev/acpica/acpi_cpu.c1275
-rw-r--r--sys/dev/acpica/acpi_dock.c537
-rw-r--r--sys/dev/acpica/acpi_ec.c1023
-rw-r--r--sys/dev/acpica/acpi_hpet.c861
-rw-r--r--sys/dev/acpica/acpi_hpet.h67
-rw-r--r--sys/dev/acpica/acpi_if.m224
-rw-r--r--sys/dev/acpica/acpi_isab.c130
-rw-r--r--sys/dev/acpica/acpi_lid.c200
-rw-r--r--sys/dev/acpica/acpi_package.c152
-rw-r--r--sys/dev/acpica/acpi_pci.c310
-rw-r--r--sys/dev/acpica/acpi_pci_link.c1113
-rw-r--r--sys/dev/acpica/acpi_pcib.c288
-rw-r--r--sys/dev/acpica/acpi_pcib_acpi.c562
-rw-r--r--sys/dev/acpica/acpi_pcib_pci.c154
-rw-r--r--sys/dev/acpica/acpi_pcibvar.h46
-rw-r--r--sys/dev/acpica/acpi_perf.c596
-rw-r--r--sys/dev/acpica/acpi_powerres.c757
-rw-r--r--sys/dev/acpica/acpi_quirk.c185
-rw-r--r--sys/dev/acpica/acpi_quirks497
-rw-r--r--sys/dev/acpica/acpi_resource.c711
-rw-r--r--sys/dev/acpica/acpi_smbat.c493
-rw-r--r--sys/dev/acpica/acpi_smbus.h285
-rw-r--r--sys/dev/acpica/acpi_thermal.c1195
-rw-r--r--sys/dev/acpica/acpi_throttle.c443
-rw-r--r--sys/dev/acpica/acpi_timer.c456
-rw-r--r--sys/dev/acpica/acpi_video.c1081
-rw-r--r--sys/dev/acpica/acpiio.h125
-rw-r--r--sys/dev/acpica/acpivar.h493
41 files changed, 21364 insertions, 0 deletions
diff --git a/sys/dev/acpica/Osd/OsdDebug.c b/sys/dev/acpica/Osd/OsdDebug.c
new file mode 100644
index 0000000..0547f75
--- /dev/null
+++ b/sys/dev/acpica/Osd/OsdDebug.c
@@ -0,0 +1,114 @@
+/*-
+ * Copyright (c) 2000 Michael Smith
+ * Copyright (c) 2000 BSDi
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * 6.8 : Debugging support
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "opt_ddb.h"
+#include <sys/param.h>
+#include <sys/kdb.h>
+#include <sys/kernel.h>
+#include <sys/bus.h>
+#include <machine/bus.h>
+#include <ddb/ddb.h>
+#include <ddb/db_output.h>
+
+#include <contrib/dev/acpica/include/acpi.h>
+#include <contrib/dev/acpica/include/accommon.h>
+#include <contrib/dev/acpica/include/acdebug.h>
+
+#include <dev/acpica/acpivar.h>
+
+ACPI_STATUS
+AcpiOsGetLine(char *Buffer, UINT32 BufferLength, UINT32 *BytesRead)
+{
+#ifdef DDB
+ char *cp;
+
+ cp = Buffer;
+ if (db_readline(Buffer, BufferLength) > 0)
+ while (*cp != '\0' && *cp != '\n' && *cp != '\r')
+ cp++;
+ *cp = '\0';
+ if (BytesRead != NULL)
+ *BytesRead = cp - Buffer;
+ return (AE_OK);
+#else
+ printf("AcpiOsGetLine called but no input support");
+ return (AE_NOT_EXIST);
+#endif /* DDB */
+}
+
+ACPI_STATUS
+AcpiOsSignal(UINT32 Function, void *Info)
+{
+ ACPI_SIGNAL_FATAL_INFO *fatal;
+
+ switch (Function) {
+ case ACPI_SIGNAL_FATAL:
+ fatal = (ACPI_SIGNAL_FATAL_INFO *)Info;
+ printf("ACPI fatal signal, type 0x%x code 0x%x argument 0x%x",
+ fatal->Type, fatal->Code, fatal->Argument);
+#ifdef ACPI_DEBUG
+ kdb_enter(KDB_WHY_ACPI, "AcpiOsSignal");
+#endif
+ break;
+
+ case ACPI_SIGNAL_BREAKPOINT:
+#ifdef ACPI_DEBUG
+ kdb_enter(KDB_WHY_ACPI, (char *)Info);
+#endif
+ break;
+
+ default:
+ return (AE_BAD_PARAMETER);
+ }
+
+ return (AE_OK);
+}
+
+#ifdef ACPI_DEBUGGER
+void
+acpi_EnterDebugger(void)
+{
+ ACPI_PARSE_OBJECT obj;
+ static int initted = 0;
+
+ if (!initted) {
+ printf("Initialising ACPICA debugger...\n");
+ AcpiDbInitialize();
+ initted = 1;
+ }
+
+ printf("Entering ACPICA debugger...\n");
+ AcpiDbUserCommands('A', &obj);
+}
+#endif /* ACPI_DEBUGGER */
diff --git a/sys/dev/acpica/Osd/OsdHardware.c b/sys/dev/acpica/Osd/OsdHardware.c
new file mode 100644
index 0000000..1761670
--- /dev/null
+++ b/sys/dev/acpica/Osd/OsdHardware.c
@@ -0,0 +1,120 @@
+/*-
+ * Copyright (c) 2000, 2001 Michael Smith
+ * Copyright (c) 2000 BSDi
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * 6.7 : Hardware Abstraction
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <contrib/dev/acpica/include/acpi.h>
+
+#include <machine/iodev.h>
+#include <machine/pci_cfgreg.h>
+
+/*
+ * ACPICA's rather gung-ho approach to hardware resource ownership is a little
+ * troublesome insofar as there is no easy way for us to know in advance
+ * exactly which I/O resources it's going to want to use.
+ *
+ * In order to deal with this, we ignore resource ownership entirely, and simply
+ * use the native I/O space accessor functionality. This is Evil, but it works.
+ */
+
+ACPI_STATUS
+AcpiOsReadPort(ACPI_IO_ADDRESS InPort, UINT32 *Value, UINT32 Width)
+{
+
+ switch (Width) {
+ case 8:
+ *Value = iodev_read_1(InPort);
+ break;
+ case 16:
+ *Value = iodev_read_2(InPort);
+ break;
+ case 32:
+ *Value = iodev_read_4(InPort);
+ break;
+ }
+
+ return (AE_OK);
+}
+
+ACPI_STATUS
+AcpiOsWritePort(ACPI_IO_ADDRESS OutPort, UINT32 Value, UINT32 Width)
+{
+
+ switch (Width) {
+ case 8:
+ iodev_write_1(OutPort, Value);
+ break;
+ case 16:
+ iodev_write_2(OutPort, Value);
+ break;
+ case 32:
+ iodev_write_4(OutPort, Value);
+ break;
+ }
+
+ return (AE_OK);
+}
+
+ACPI_STATUS
+AcpiOsReadPciConfiguration(ACPI_PCI_ID *PciId, UINT32 Register, UINT64 *Value,
+ UINT32 Width)
+{
+
+ if (Width == 64)
+ return (AE_SUPPORT);
+
+ if (!pci_cfgregopen())
+ return (AE_NOT_EXIST);
+
+ *(UINT64 *)Value = pci_cfgregread(PciId->Bus, PciId->Device,
+ PciId->Function, Register, Width / 8);
+
+ return (AE_OK);
+}
+
+
+ACPI_STATUS
+AcpiOsWritePciConfiguration (ACPI_PCI_ID *PciId, UINT32 Register,
+ UINT64 Value, UINT32 Width)
+{
+
+ if (Width == 64)
+ return (AE_SUPPORT);
+
+ if (!pci_cfgregopen())
+ return (AE_NOT_EXIST);
+
+ pci_cfgregwrite(PciId->Bus, PciId->Device, PciId->Function, Register,
+ Value, Width / 8);
+
+ return (AE_OK);
+}
diff --git a/sys/dev/acpica/Osd/OsdInterrupt.c b/sys/dev/acpica/Osd/OsdInterrupt.c
new file mode 100644
index 0000000..3fb1e70
--- /dev/null
+++ b/sys/dev/acpica/Osd/OsdInterrupt.c
@@ -0,0 +1,220 @@
+/*-
+ * Copyright (c) 2000 Michael Smith
+ * Copyright (c) 2000 BSDi
+ * Copyright (c) 2011 Jung-uk Kim <jkim@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * 6.5 : Interrupt handling
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/kernel.h>
+#include <sys/bus.h>
+#include <sys/lock.h>
+#include <sys/malloc.h>
+#include <sys/mutex.h>
+#include <machine/bus.h>
+#include <machine/resource.h>
+#include <sys/rman.h>
+
+#include <contrib/dev/acpica/include/acpi.h>
+#include <contrib/dev/acpica/include/accommon.h>
+
+#include <dev/acpica/acpivar.h>
+
+#define _COMPONENT ACPI_OS_SERVICES
+ACPI_MODULE_NAME("INTERRUPT")
+
+static MALLOC_DEFINE(M_ACPIINTR, "acpiintr", "ACPI interrupt");
+
+struct acpi_intr {
+ SLIST_ENTRY(acpi_intr) ai_link;
+ struct resource *ai_irq;
+ int ai_rid;
+ void *ai_handle;
+ int ai_number;
+ ACPI_OSD_HANDLER ai_handler;
+ void *ai_context;
+};
+static SLIST_HEAD(, acpi_intr) acpi_intr_list =
+ SLIST_HEAD_INITIALIZER(acpi_intr_list);
+static struct mtx acpi_intr_lock;
+
+static UINT32 InterruptOverride;
+
+static void
+acpi_intr_init(struct mtx *lock)
+{
+
+ mtx_init(lock, "ACPI interrupt lock", NULL, MTX_DEF);
+}
+
+SYSINIT(acpi_intr, SI_SUB_DRIVERS, SI_ORDER_FIRST, acpi_intr_init,
+ &acpi_intr_lock);
+
+static int
+acpi_intr_handler(void *arg)
+{
+ struct acpi_intr *ai;
+
+ ai = arg;
+ KASSERT(ai != NULL && ai->ai_handler != NULL,
+ ("invalid ACPI interrupt handler"));
+ if (ai->ai_handler(ai->ai_context) == ACPI_INTERRUPT_HANDLED)
+ return (FILTER_HANDLED);
+ return (FILTER_STRAY);
+}
+
+static void
+acpi_intr_destroy(device_t dev, struct acpi_intr *ai)
+{
+
+ if (ai->ai_handle != NULL)
+ bus_teardown_intr(dev, ai->ai_irq, ai->ai_handle);
+ if (ai->ai_irq != NULL)
+ bus_release_resource(dev, SYS_RES_IRQ, ai->ai_rid, ai->ai_irq);
+ bus_delete_resource(dev, SYS_RES_IRQ, ai->ai_rid);
+ free(ai, M_ACPIINTR);
+}
+
+ACPI_STATUS
+AcpiOsInstallInterruptHandler(UINT32 InterruptNumber,
+ ACPI_OSD_HANDLER ServiceRoutine, void *Context)
+{
+ struct acpi_softc *sc;
+ struct acpi_intr *ai, *ap;
+
+ ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
+
+ sc = devclass_get_softc(devclass_find("acpi"), 0);
+ KASSERT(sc != NULL && sc->acpi_dev != NULL,
+ ("can't find ACPI device to register interrupt"));
+
+ if (InterruptNumber > 255 || ServiceRoutine == NULL)
+ return_ACPI_STATUS (AE_BAD_PARAMETER);
+
+ ai = malloc(sizeof(*ai), M_ACPIINTR, M_WAITOK | M_ZERO);
+ mtx_lock(&acpi_intr_lock);
+ SLIST_FOREACH(ap, &acpi_intr_list, ai_link) {
+ if (InterruptNumber == ap->ai_number ||
+ (InterruptNumber == InterruptOverride &&
+ InterruptNumber != AcpiGbl_FADT.SciInterrupt)) {
+ mtx_unlock(&acpi_intr_lock);
+ free(ai, M_ACPIINTR);
+ return_ACPI_STATUS (AE_ALREADY_EXISTS);
+ }
+ if (ai->ai_rid <= ap->ai_rid)
+ ai->ai_rid = ap->ai_rid + 1;
+ }
+ ai->ai_number = InterruptNumber;
+ ai->ai_handler = ServiceRoutine;
+ ai->ai_context = Context;
+ SLIST_INSERT_HEAD(&acpi_intr_list, ai, ai_link);
+ mtx_unlock(&acpi_intr_lock);
+
+ /*
+ * If the MADT contained an interrupt override directive for the SCI,
+ * we use that value instead of the one from the FADT.
+ */
+ if (InterruptOverride != 0 &&
+ InterruptNumber == AcpiGbl_FADT.SciInterrupt) {
+ device_printf(sc->acpi_dev,
+ "Overriding SCI from IRQ %u to IRQ %u\n",
+ InterruptNumber, InterruptOverride);
+ InterruptNumber = InterruptOverride;
+ }
+
+ /* Set up the interrupt resource. */
+ bus_set_resource(sc->acpi_dev, SYS_RES_IRQ, ai->ai_rid,
+ InterruptNumber, 1);
+ ai->ai_irq = bus_alloc_resource_any(sc->acpi_dev, SYS_RES_IRQ,
+ &ai->ai_rid, RF_SHAREABLE | RF_ACTIVE);
+ if (ai->ai_irq == NULL) {
+ device_printf(sc->acpi_dev, "could not allocate interrupt\n");
+ goto error;
+ }
+ if (bus_setup_intr(sc->acpi_dev, ai->ai_irq,
+ INTR_TYPE_MISC | INTR_MPSAFE, acpi_intr_handler, NULL, ai,
+ &ai->ai_handle) != 0) {
+ device_printf(sc->acpi_dev, "could not set up interrupt\n");
+ goto error;
+ }
+ return_ACPI_STATUS (AE_OK);
+
+error:
+ mtx_lock(&acpi_intr_lock);
+ SLIST_REMOVE(&acpi_intr_list, ai, acpi_intr, ai_link);
+ mtx_unlock(&acpi_intr_lock);
+ acpi_intr_destroy(sc->acpi_dev, ai);
+ return_ACPI_STATUS (AE_ALREADY_EXISTS);
+}
+
+ACPI_STATUS
+AcpiOsRemoveInterruptHandler(UINT32 InterruptNumber,
+ ACPI_OSD_HANDLER ServiceRoutine)
+{
+ struct acpi_softc *sc;
+ struct acpi_intr *ai;
+
+ ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
+
+ sc = devclass_get_softc(devclass_find("acpi"), 0);
+ KASSERT(sc != NULL && sc->acpi_dev != NULL,
+ ("can't find ACPI device to deregister interrupt"));
+
+ if (InterruptNumber > 255 || ServiceRoutine == NULL)
+ return_ACPI_STATUS (AE_BAD_PARAMETER);
+ mtx_lock(&acpi_intr_lock);
+ SLIST_FOREACH(ai, &acpi_intr_list, ai_link)
+ if (InterruptNumber == ai->ai_number) {
+ if (ServiceRoutine != ai->ai_handler) {
+ mtx_unlock(&acpi_intr_lock);
+ return_ACPI_STATUS (AE_BAD_PARAMETER);
+ }
+ SLIST_REMOVE(&acpi_intr_list, ai, acpi_intr, ai_link);
+ break;
+ }
+ mtx_unlock(&acpi_intr_lock);
+ if (ai == NULL)
+ return_ACPI_STATUS (AE_NOT_EXIST);
+ acpi_intr_destroy(sc->acpi_dev, ai);
+ return_ACPI_STATUS (AE_OK);
+}
+
+ACPI_STATUS
+acpi_OverrideInterruptLevel(UINT32 InterruptNumber)
+{
+
+ ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
+
+ if (InterruptOverride != 0)
+ return_ACPI_STATUS (AE_ALREADY_EXISTS);
+ InterruptOverride = InterruptNumber;
+ return_ACPI_STATUS (AE_OK);
+}
diff --git a/sys/dev/acpica/Osd/OsdMemory.c b/sys/dev/acpica/Osd/OsdMemory.c
new file mode 100644
index 0000000..b806642
--- /dev/null
+++ b/sys/dev/acpica/Osd/OsdMemory.c
@@ -0,0 +1,145 @@
+/*-
+ * Copyright (c) 2000 Mitsaru Iwasaki
+ * Copyright (c) 2000 Michael Smith
+ * Copyright (c) 2000 BSDi
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * 6.2 : Memory Management
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <contrib/dev/acpica/include/acpi.h>
+
+#include <sys/kernel.h>
+#include <sys/malloc.h>
+#include <vm/vm.h>
+#include <vm/pmap.h>
+
+static MALLOC_DEFINE(M_ACPICA, "acpica", "ACPI CA memory pool");
+
+void *
+AcpiOsAllocate(ACPI_SIZE Size)
+{
+ return (malloc(Size, M_ACPICA, M_NOWAIT));
+}
+
+void
+AcpiOsFree(void *Memory)
+{
+ free(Memory, M_ACPICA);
+}
+
+void *
+AcpiOsMapMemory(ACPI_PHYSICAL_ADDRESS PhysicalAddress, ACPI_SIZE Length)
+{
+ return (pmap_mapbios((vm_offset_t)PhysicalAddress, Length));
+}
+
+void
+AcpiOsUnmapMemory(void *LogicalAddress, ACPI_SIZE Length)
+{
+ pmap_unmapbios((vm_offset_t)LogicalAddress, Length);
+}
+
+ACPI_STATUS
+AcpiOsGetPhysicalAddress(void *LogicalAddress,
+ ACPI_PHYSICAL_ADDRESS *PhysicalAddress)
+{
+ /* We can't necessarily do this, so cop out. */
+ return (AE_BAD_ADDRESS);
+}
+
+BOOLEAN
+AcpiOsReadable (void *Pointer, ACPI_SIZE Length)
+{
+ return (TRUE);
+}
+
+BOOLEAN
+AcpiOsWritable (void *Pointer, ACPI_SIZE Length)
+{
+ return (TRUE);
+}
+
+ACPI_STATUS
+AcpiOsReadMemory(ACPI_PHYSICAL_ADDRESS Address, UINT64 *Value, UINT32 Width)
+{
+ void *LogicalAddress;
+
+ LogicalAddress = pmap_mapdev(Address, Width / 8);
+ if (LogicalAddress == NULL)
+ return (AE_NOT_EXIST);
+
+ switch (Width) {
+ case 8:
+ *Value = *(volatile uint8_t *)LogicalAddress;
+ break;
+ case 16:
+ *Value = *(volatile uint16_t *)LogicalAddress;
+ break;
+ case 32:
+ *Value = *(volatile uint32_t *)LogicalAddress;
+ break;
+ case 64:
+ *Value = *(volatile uint64_t *)LogicalAddress;
+ break;
+ }
+
+ pmap_unmapdev((vm_offset_t)LogicalAddress, Width / 8);
+
+ return (AE_OK);
+}
+
+ACPI_STATUS
+AcpiOsWriteMemory(ACPI_PHYSICAL_ADDRESS Address, UINT64 Value, UINT32 Width)
+{
+ void *LogicalAddress;
+
+ LogicalAddress = pmap_mapdev(Address, Width / 8);
+ if (LogicalAddress == NULL)
+ return (AE_NOT_EXIST);
+
+ switch (Width) {
+ case 8:
+ *(volatile uint8_t *)LogicalAddress = Value;
+ break;
+ case 16:
+ *(volatile uint16_t *)LogicalAddress = Value;
+ break;
+ case 32:
+ *(volatile uint32_t *)LogicalAddress = Value;
+ break;
+ case 64:
+ *(volatile uint64_t *)LogicalAddress = Value;
+ break;
+ }
+
+ pmap_unmapdev((vm_offset_t)LogicalAddress, Width / 8);
+
+ return (AE_OK);
+}
diff --git a/sys/dev/acpica/Osd/OsdSchedule.c b/sys/dev/acpica/Osd/OsdSchedule.c
new file mode 100644
index 0000000..6d517cb
--- /dev/null
+++ b/sys/dev/acpica/Osd/OsdSchedule.c
@@ -0,0 +1,288 @@
+/*-
+ * Copyright (c) 2000 Michael Smith
+ * Copyright (c) 2000 BSDi
+ * Copyright (c) 2007-2012 Jung-uk Kim <jkim@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * 6.3 : Scheduling services
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "opt_acpi.h"
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/interrupt.h>
+#include <sys/kernel.h>
+#include <sys/kthread.h>
+#include <sys/malloc.h>
+#include <sys/proc.h>
+#include <sys/taskqueue.h>
+
+#include <contrib/dev/acpica/include/acpi.h>
+#include <contrib/dev/acpica/include/accommon.h>
+
+#include <dev/acpica/acpivar.h>
+
+#define _COMPONENT ACPI_OS_SERVICES
+ACPI_MODULE_NAME("SCHEDULE")
+
+/*
+ * Allow the user to tune the maximum number of tasks we may enqueue.
+ */
+static int acpi_max_tasks = ACPI_MAX_TASKS;
+TUNABLE_INT("debug.acpi.max_tasks", &acpi_max_tasks);
+
+/*
+ * Allow the user to tune the number of task threads we start. It seems
+ * some systems have problems with increased parallelism.
+ */
+static int acpi_max_threads = ACPI_MAX_THREADS;
+TUNABLE_INT("debug.acpi.max_threads", &acpi_max_threads);
+
+static MALLOC_DEFINE(M_ACPITASK, "acpitask", "ACPI deferred task");
+
+struct acpi_task_ctx {
+ struct task at_task;
+ ACPI_OSD_EXEC_CALLBACK at_function;
+ void *at_context;
+ int at_flag;
+#define ACPI_TASK_FREE 0
+#define ACPI_TASK_USED 1
+#define ACPI_TASK_ENQUEUED 2
+};
+
+struct taskqueue *acpi_taskq;
+static struct acpi_task_ctx *acpi_tasks;
+static int acpi_task_count;
+static int acpi_taskq_started;
+
+/*
+ * Preallocate some memory for tasks early enough.
+ * malloc(9) cannot be used with spin lock held.
+ */
+static void
+acpi_task_init(void *arg)
+{
+
+ acpi_tasks = malloc(sizeof(*acpi_tasks) * acpi_max_tasks, M_ACPITASK,
+ M_WAITOK | M_ZERO);
+}
+
+SYSINIT(acpi_tasks, SI_SUB_DRIVERS, SI_ORDER_FIRST, acpi_task_init, NULL);
+
+/*
+ * Initialize ACPI task queue.
+ */
+static void
+acpi_taskq_init(void *arg)
+{
+ int i;
+
+ acpi_taskq = taskqueue_create_fast("acpi_task", M_NOWAIT,
+ &taskqueue_thread_enqueue, &acpi_taskq);
+ taskqueue_start_threads(&acpi_taskq, acpi_max_threads, PWAIT, "acpi_task");
+ if (acpi_task_count > 0) {
+ if (bootverbose)
+ printf("AcpiOsExecute: enqueue %d pending tasks\n",
+ acpi_task_count);
+ for (i = 0; i < acpi_max_tasks; i++)
+ if (atomic_cmpset_int(&acpi_tasks[i].at_flag, ACPI_TASK_USED,
+ ACPI_TASK_USED | ACPI_TASK_ENQUEUED))
+ taskqueue_enqueue(acpi_taskq, &acpi_tasks[i].at_task);
+ }
+ acpi_taskq_started = 1;
+}
+
+SYSINIT(acpi_taskq, SI_SUB_CONFIGURE, SI_ORDER_SECOND, acpi_taskq_init, NULL);
+
+/*
+ * Bounce through this wrapper function since ACPI-CA doesn't understand
+ * the pending argument for its callbacks.
+ */
+static void
+acpi_task_execute(void *context, int pending)
+{
+ struct acpi_task_ctx *at;
+
+ at = (struct acpi_task_ctx *)context;
+ at->at_function(at->at_context);
+ atomic_clear_int(&at->at_flag, ACPI_TASK_USED | ACPI_TASK_ENQUEUED);
+ acpi_task_count--;
+}
+
+static ACPI_STATUS
+acpi_task_enqueue(int priority, ACPI_OSD_EXEC_CALLBACK Function, void *Context)
+{
+ struct acpi_task_ctx *at;
+ int i;
+
+ for (at = NULL, i = 0; i < acpi_max_tasks; i++)
+ if (atomic_cmpset_int(&acpi_tasks[i].at_flag, ACPI_TASK_FREE,
+ ACPI_TASK_USED)) {
+ at = &acpi_tasks[i];
+ acpi_task_count++;
+ break;
+ }
+ if (at == NULL) {
+ printf("AcpiOsExecute: failed to enqueue task, consider increasing "
+ "the debug.acpi.max_tasks tunable\n");
+ return (AE_NO_MEMORY);
+ }
+
+ TASK_INIT(&at->at_task, priority, acpi_task_execute, at);
+ at->at_function = Function;
+ at->at_context = Context;
+
+ /*
+ * If the task queue is ready, enqueue it now.
+ */
+ if (acpi_taskq_started) {
+ atomic_set_int(&at->at_flag, ACPI_TASK_ENQUEUED);
+ taskqueue_enqueue(acpi_taskq, &at->at_task);
+ return (AE_OK);
+ }
+ if (bootverbose)
+ printf("AcpiOsExecute: task queue not started\n");
+
+ return (AE_OK);
+}
+
+/*
+ * This function may be called in interrupt context, i.e. when a GPE fires.
+ * We allocate and queue a task for one of our taskqueue threads to process.
+ */
+ACPI_STATUS
+AcpiOsExecute(ACPI_EXECUTE_TYPE Type, ACPI_OSD_EXEC_CALLBACK Function,
+ void *Context)
+{
+ int pri;
+
+ ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
+
+ if (Function == NULL)
+ return_ACPI_STATUS (AE_BAD_PARAMETER);
+
+ switch (Type) {
+ case OSL_GPE_HANDLER:
+ case OSL_NOTIFY_HANDLER:
+ /*
+ * Run GPEs and Notifies at the same priority. This allows
+ * Notifies that are generated by running a GPE's method (e.g., _L00)
+ * to not be pre-empted by a later GPE that arrives during the
+ * Notify handler execution.
+ */
+ pri = 10;
+ break;
+ case OSL_GLOBAL_LOCK_HANDLER:
+ case OSL_EC_POLL_HANDLER:
+ case OSL_EC_BURST_HANDLER:
+ pri = 5;
+ break;
+ case OSL_DEBUGGER_THREAD:
+ pri = 0;
+ break;
+ default:
+ return_ACPI_STATUS (AE_BAD_PARAMETER);
+ }
+
+ return_ACPI_STATUS (acpi_task_enqueue(pri, Function, Context));
+}
+
+void
+AcpiOsWaitEventsComplete(void)
+{
+ int i;
+
+ ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
+
+ for (i = 0; i < acpi_max_tasks; i++)
+ if ((atomic_load_acq_int(&acpi_tasks[i].at_flag) &
+ ACPI_TASK_ENQUEUED) != 0)
+ taskqueue_drain(acpi_taskq, &acpi_tasks[i].at_task);
+ return_VOID;
+}
+
+void
+AcpiOsSleep(UINT64 Milliseconds)
+{
+ int timo;
+
+ ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
+
+ timo = Milliseconds * hz / 1000;
+
+ /*
+ * If requested sleep time is less than our hz resolution, use
+ * DELAY instead for better granularity.
+ */
+ if (timo > 0)
+ pause("acpislp", timo);
+ else
+ DELAY(Milliseconds * 1000);
+
+ return_VOID;
+}
+
+/*
+ * Return the current time in 100 nanosecond units
+ */
+UINT64
+AcpiOsGetTimer(void)
+{
+ struct bintime bt;
+ UINT64 t;
+
+ /* XXX During early boot there is no (decent) timer available yet. */
+ KASSERT(cold == 0, ("acpi: timer op not yet supported during boot"));
+
+ binuptime(&bt);
+ t = (uint64_t)bt.sec * 10000000;
+ t += ((uint64_t)10000000 * (uint32_t)(bt.frac >> 32)) >> 32;
+
+ return (t);
+}
+
+void
+AcpiOsStall(UINT32 Microseconds)
+{
+ ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
+
+ DELAY(Microseconds);
+ return_VOID;
+}
+
+ACPI_THREAD_ID
+AcpiOsGetThreadId(void)
+{
+
+ /* XXX do not add ACPI_FUNCTION_TRACE here, results in recursive call. */
+
+ /* Returning 0 is not allowed. */
+ return (curthread->td_tid);
+}
diff --git a/sys/dev/acpica/Osd/OsdStream.c b/sys/dev/acpica/Osd/OsdStream.c
new file mode 100644
index 0000000..81181db
--- /dev/null
+++ b/sys/dev/acpica/Osd/OsdStream.c
@@ -0,0 +1,51 @@
+/*-
+ * Copyright (c) 2000 Michael Smith
+ * Copyright (c) 2000 BSDi
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * 6.6 : Stream I/O
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <contrib/dev/acpica/include/acpi.h>
+
+void
+AcpiOsPrintf(const char *Format, ...)
+{
+ va_list ap;
+
+ va_start(ap, Format);
+ vprintf(Format, ap);
+ va_end(ap);
+}
+
+void
+AcpiOsVprintf(const char *Format, va_list Args)
+{
+ vprintf(Format, Args);
+}
diff --git a/sys/dev/acpica/Osd/OsdSynch.c b/sys/dev/acpica/Osd/OsdSynch.c
new file mode 100644
index 0000000..b70a120
--- /dev/null
+++ b/sys/dev/acpica/Osd/OsdSynch.c
@@ -0,0 +1,608 @@
+/*-
+ * Copyright (c) 2000 Michael Smith
+ * Copyright (c) 2000 BSDi
+ * Copyright (c) 2007-2009 Jung-uk Kim <jkim@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * 6.1 : Mutual Exclusion and Synchronisation
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <contrib/dev/acpica/include/acpi.h>
+#include <contrib/dev/acpica/include/accommon.h>
+
+#include <sys/condvar.h>
+#include <sys/kernel.h>
+#include <sys/lock.h>
+#include <sys/malloc.h>
+#include <sys/mutex.h>
+
+#define _COMPONENT ACPI_OS_SERVICES
+ACPI_MODULE_NAME("SYNCH")
+
+static MALLOC_DEFINE(M_ACPISEM, "acpisem", "ACPI semaphore");
+
+/*
+ * Convert milliseconds to ticks.
+ */
+static int
+timeout2hz(UINT16 Timeout)
+{
+ struct timeval tv;
+
+ tv.tv_sec = (time_t)(Timeout / 1000);
+ tv.tv_usec = (suseconds_t)(Timeout % 1000) * 1000;
+
+ return (tvtohz(&tv));
+}
+
+/*
+ * ACPI_SEMAPHORE
+ */
+struct acpi_sema {
+ struct mtx as_lock;
+ char as_name[32];
+ struct cv as_cv;
+ UINT32 as_maxunits;
+ UINT32 as_units;
+ int as_waiters;
+ int as_reset;
+};
+
+ACPI_STATUS
+AcpiOsCreateSemaphore(UINT32 MaxUnits, UINT32 InitialUnits,
+ ACPI_SEMAPHORE *OutHandle)
+{
+ struct acpi_sema *as;
+
+ ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
+
+ if (OutHandle == NULL || MaxUnits == 0 || InitialUnits > MaxUnits)
+ return_ACPI_STATUS (AE_BAD_PARAMETER);
+
+ if ((as = malloc(sizeof(*as), M_ACPISEM, M_NOWAIT | M_ZERO)) == NULL)
+ return_ACPI_STATUS (AE_NO_MEMORY);
+
+ snprintf(as->as_name, sizeof(as->as_name), "ACPI sema (%p)", as);
+ mtx_init(&as->as_lock, as->as_name, NULL, MTX_DEF);
+ cv_init(&as->as_cv, as->as_name);
+ as->as_maxunits = MaxUnits;
+ as->as_units = InitialUnits;
+
+ *OutHandle = (ACPI_SEMAPHORE)as;
+
+ ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "created %s, max %u, initial %u\n",
+ as->as_name, MaxUnits, InitialUnits));
+
+ return_ACPI_STATUS (AE_OK);
+}
+
+ACPI_STATUS
+AcpiOsDeleteSemaphore(ACPI_SEMAPHORE Handle)
+{
+ struct acpi_sema *as = (struct acpi_sema *)Handle;
+
+ ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
+
+ if (as == NULL)
+ return_ACPI_STATUS (AE_BAD_PARAMETER);
+
+ mtx_lock(&as->as_lock);
+
+ ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "delete %s\n", as->as_name));
+
+ if (as->as_waiters > 0) {
+ ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
+ "reset %s, units %u, waiters %d\n",
+ as->as_name, as->as_units, as->as_waiters));
+ as->as_reset = 1;
+ cv_broadcast(&as->as_cv);
+ while (as->as_waiters > 0) {
+ if (mtx_sleep(&as->as_reset, &as->as_lock,
+ PCATCH, "acsrst", hz) == EINTR) {
+ ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
+ "failed to reset %s, waiters %d\n",
+ as->as_name, as->as_waiters));
+ mtx_unlock(&as->as_lock);
+ return_ACPI_STATUS (AE_ERROR);
+ }
+ ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
+ "wait %s, units %u, waiters %d\n",
+ as->as_name, as->as_units, as->as_waiters));
+ }
+ }
+
+ mtx_unlock(&as->as_lock);
+
+ mtx_destroy(&as->as_lock);
+ cv_destroy(&as->as_cv);
+ free(as, M_ACPISEM);
+
+ return_ACPI_STATUS (AE_OK);
+}
+
+#define ACPISEM_AVAIL(s, u) ((s)->as_units >= (u))
+
+ACPI_STATUS
+AcpiOsWaitSemaphore(ACPI_SEMAPHORE Handle, UINT32 Units, UINT16 Timeout)
+{
+ struct acpi_sema *as = (struct acpi_sema *)Handle;
+ int error, prevtick, slptick, tmo;
+ ACPI_STATUS status = AE_OK;
+
+ ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
+
+ if (as == NULL || Units == 0)
+ return_ACPI_STATUS (AE_BAD_PARAMETER);
+
+ mtx_lock(&as->as_lock);
+
+ ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
+ "get %u unit(s) from %s, units %u, waiters %d, timeout %u\n",
+ Units, as->as_name, as->as_units, as->as_waiters, Timeout));
+
+ if (as->as_maxunits != ACPI_NO_UNIT_LIMIT && as->as_maxunits < Units) {
+ mtx_unlock(&as->as_lock);
+ return_ACPI_STATUS (AE_LIMIT);
+ }
+
+ switch (Timeout) {
+ case ACPI_DO_NOT_WAIT:
+ if (!ACPISEM_AVAIL(as, Units))
+ status = AE_TIME;
+ break;
+ case ACPI_WAIT_FOREVER:
+ while (!ACPISEM_AVAIL(as, Units)) {
+ as->as_waiters++;
+ error = cv_wait_sig(&as->as_cv, &as->as_lock);
+ as->as_waiters--;
+ if (error == EINTR || as->as_reset) {
+ status = AE_ERROR;
+ break;
+ }
+ }
+ break;
+ default:
+ tmo = timeout2hz(Timeout);
+ while (!ACPISEM_AVAIL(as, Units)) {
+ prevtick = ticks;
+ as->as_waiters++;
+ error = cv_timedwait_sig(&as->as_cv, &as->as_lock, tmo);
+ as->as_waiters--;
+ if (error == EINTR || as->as_reset) {
+ status = AE_ERROR;
+ break;
+ }
+ if (ACPISEM_AVAIL(as, Units))
+ break;
+ slptick = ticks - prevtick;
+ if (slptick >= tmo || slptick < 0) {
+ status = AE_TIME;
+ break;
+ }
+ tmo -= slptick;
+ }
+ }
+ if (ACPI_SUCCESS(status))
+ as->as_units -= Units;
+
+ mtx_unlock(&as->as_lock);
+
+ return_ACPI_STATUS (status);
+}
+
+ACPI_STATUS
+AcpiOsSignalSemaphore(ACPI_SEMAPHORE Handle, UINT32 Units)
+{
+ struct acpi_sema *as = (struct acpi_sema *)Handle;
+ UINT32 i;
+
+ ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
+
+ if (as == NULL || Units == 0)
+ return_ACPI_STATUS (AE_BAD_PARAMETER);
+
+ mtx_lock(&as->as_lock);
+
+ ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
+ "return %u units to %s, units %u, waiters %d\n",
+ Units, as->as_name, as->as_units, as->as_waiters));
+
+ if (as->as_maxunits != ACPI_NO_UNIT_LIMIT &&
+ (as->as_maxunits < Units ||
+ as->as_maxunits - Units < as->as_units)) {
+ ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
+ "exceeded max units %u\n", as->as_maxunits));
+ mtx_unlock(&as->as_lock);
+ return_ACPI_STATUS (AE_LIMIT);
+ }
+
+ as->as_units += Units;
+ if (as->as_waiters > 0 && ACPISEM_AVAIL(as, Units))
+ for (i = 0; i < Units; i++)
+ cv_signal(&as->as_cv);
+
+ mtx_unlock(&as->as_lock);
+
+ return_ACPI_STATUS (AE_OK);
+}
+
+#undef ACPISEM_AVAIL
+
+/*
+ * ACPI_MUTEX
+ */
+struct acpi_mutex {
+ struct mtx am_lock;
+ char am_name[32];
+ struct thread *am_owner;
+ int am_nested;
+ int am_waiters;
+ int am_reset;
+};
+
+ACPI_STATUS
+AcpiOsCreateMutex(ACPI_MUTEX *OutHandle)
+{
+ struct acpi_mutex *am;
+
+ ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
+
+ if (OutHandle == NULL)
+ return_ACPI_STATUS (AE_BAD_PARAMETER);
+
+ if ((am = malloc(sizeof(*am), M_ACPISEM, M_NOWAIT | M_ZERO)) == NULL)
+ return_ACPI_STATUS (AE_NO_MEMORY);
+
+ snprintf(am->am_name, sizeof(am->am_name), "ACPI mutex (%p)", am);
+ mtx_init(&am->am_lock, am->am_name, NULL, MTX_DEF);
+
+ ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "created %s\n", am->am_name));
+
+ *OutHandle = (ACPI_MUTEX)am;
+
+ return_ACPI_STATUS (AE_OK);
+}
+
+#define ACPIMTX_AVAIL(m) ((m)->am_owner == NULL)
+#define ACPIMTX_OWNED(m) ((m)->am_owner == curthread)
+
+void
+AcpiOsDeleteMutex(ACPI_MUTEX Handle)
+{
+ struct acpi_mutex *am = (struct acpi_mutex *)Handle;
+
+ ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
+
+ if (am == NULL) {
+ ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "cannot delete null mutex\n"));
+ return_VOID;
+ }
+
+ mtx_lock(&am->am_lock);
+
+ ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "delete %s\n", am->am_name));
+
+ if (am->am_waiters > 0) {
+ ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
+ "reset %s, owner %p\n", am->am_name, am->am_owner));
+ am->am_reset = 1;
+ wakeup(am);
+ while (am->am_waiters > 0) {
+ if (mtx_sleep(&am->am_reset, &am->am_lock,
+ PCATCH, "acmrst", hz) == EINTR) {
+ ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
+ "failed to reset %s, waiters %d\n",
+ am->am_name, am->am_waiters));
+ mtx_unlock(&am->am_lock);
+ return_VOID;
+ }
+ if (ACPIMTX_AVAIL(am))
+ ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
+ "wait %s, waiters %d\n",
+ am->am_name, am->am_waiters));
+ else
+ ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
+ "wait %s, owner %p, waiters %d\n",
+ am->am_name, am->am_owner, am->am_waiters));
+ }
+ }
+
+ mtx_unlock(&am->am_lock);
+
+ mtx_destroy(&am->am_lock);
+ free(am, M_ACPISEM);
+}
+
+ACPI_STATUS
+AcpiOsAcquireMutex(ACPI_MUTEX Handle, UINT16 Timeout)
+{
+ struct acpi_mutex *am = (struct acpi_mutex *)Handle;
+ int error, prevtick, slptick, tmo;
+ ACPI_STATUS status = AE_OK;
+
+ ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
+
+ if (am == NULL)
+ return_ACPI_STATUS (AE_BAD_PARAMETER);
+
+ mtx_lock(&am->am_lock);
+
+ ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "acquire %s\n", am->am_name));
+
+ if (ACPIMTX_OWNED(am)) {
+ am->am_nested++;
+ ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
+ "acquire nested %s, depth %d\n",
+ am->am_name, am->am_nested));
+ mtx_unlock(&am->am_lock);
+ return_ACPI_STATUS (AE_OK);
+ }
+
+ switch (Timeout) {
+ case ACPI_DO_NOT_WAIT:
+ if (!ACPIMTX_AVAIL(am))
+ status = AE_TIME;
+ break;
+ case ACPI_WAIT_FOREVER:
+ while (!ACPIMTX_AVAIL(am)) {
+ am->am_waiters++;
+ error = mtx_sleep(am, &am->am_lock, PCATCH, "acmtx", 0);
+ am->am_waiters--;
+ if (error == EINTR || am->am_reset) {
+ status = AE_ERROR;
+ break;
+ }
+ }
+ break;
+ default:
+ tmo = timeout2hz(Timeout);
+ while (!ACPIMTX_AVAIL(am)) {
+ prevtick = ticks;
+ am->am_waiters++;
+ error = mtx_sleep(am, &am->am_lock, PCATCH,
+ "acmtx", tmo);
+ am->am_waiters--;
+ if (error == EINTR || am->am_reset) {
+ status = AE_ERROR;
+ break;
+ }
+ if (ACPIMTX_AVAIL(am))
+ break;
+ slptick = ticks - prevtick;
+ if (slptick >= tmo || slptick < 0) {
+ status = AE_TIME;
+ break;
+ }
+ tmo -= slptick;
+ }
+ }
+ if (ACPI_SUCCESS(status))
+ am->am_owner = curthread;
+
+ mtx_unlock(&am->am_lock);
+
+ return_ACPI_STATUS (status);
+}
+
+void
+AcpiOsReleaseMutex(ACPI_MUTEX Handle)
+{
+ struct acpi_mutex *am = (struct acpi_mutex *)Handle;
+
+ ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
+
+ if (am == NULL) {
+ ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
+ "cannot release null mutex\n"));
+ return_VOID;
+ }
+
+ mtx_lock(&am->am_lock);
+
+ ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "release %s\n", am->am_name));
+
+ if (ACPIMTX_OWNED(am)) {
+ if (am->am_nested > 0) {
+ ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
+ "release nested %s, depth %d\n",
+ am->am_name, am->am_nested));
+ am->am_nested--;
+ } else
+ am->am_owner = NULL;
+ } else {
+ if (ACPIMTX_AVAIL(am))
+ ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
+ "release already available %s\n", am->am_name));
+ else
+ ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
+ "release unowned %s from %p, depth %d\n",
+ am->am_name, am->am_owner, am->am_nested));
+ }
+ if (am->am_waiters > 0 && ACPIMTX_AVAIL(am))
+ wakeup_one(am);
+
+ mtx_unlock(&am->am_lock);
+}
+
+#undef ACPIMTX_AVAIL
+#undef ACPIMTX_OWNED
+
+/*
+ * ACPI_SPINLOCK
+ */
+struct acpi_spinlock {
+ struct mtx al_lock;
+ char al_name[32];
+ int al_nested;
+};
+
+ACPI_STATUS
+AcpiOsCreateLock(ACPI_SPINLOCK *OutHandle)
+{
+ struct acpi_spinlock *al;
+
+ ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
+
+ if (OutHandle == NULL)
+ return_ACPI_STATUS (AE_BAD_PARAMETER);
+
+ if ((al = malloc(sizeof(*al), M_ACPISEM, M_NOWAIT | M_ZERO)) == NULL)
+ return_ACPI_STATUS (AE_NO_MEMORY);
+
+#ifdef ACPI_DEBUG
+ if (OutHandle == &AcpiGbl_GpeLock)
+ snprintf(al->al_name, sizeof(al->al_name), "ACPI lock (GPE)");
+ else if (OutHandle == &AcpiGbl_HardwareLock)
+ snprintf(al->al_name, sizeof(al->al_name), "ACPI lock (HW)");
+ else
+#endif
+ snprintf(al->al_name, sizeof(al->al_name), "ACPI lock (%p)", al);
+ mtx_init(&al->al_lock, al->al_name, NULL, MTX_SPIN);
+
+ ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "created %s\n", al->al_name));
+
+ *OutHandle = (ACPI_SPINLOCK)al;
+
+ return_ACPI_STATUS (AE_OK);
+}
+
+void
+AcpiOsDeleteLock(ACPI_SPINLOCK Handle)
+{
+ struct acpi_spinlock *al = (struct acpi_spinlock *)Handle;
+
+ ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
+
+ if (al == NULL) {
+ ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
+ "cannot delete null spinlock\n"));
+ return_VOID;
+ }
+
+ ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "delete %s\n", al->al_name));
+
+ mtx_destroy(&al->al_lock);
+ free(al, M_ACPISEM);
+}
+
+ACPI_CPU_FLAGS
+AcpiOsAcquireLock(ACPI_SPINLOCK Handle)
+{
+ struct acpi_spinlock *al = (struct acpi_spinlock *)Handle;
+
+ ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
+
+ if (al == NULL) {
+ ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
+ "cannot acquire null spinlock\n"));
+ return (0);
+ }
+
+ ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "acquire %s\n", al->al_name));
+
+ if (mtx_owned(&al->al_lock)) {
+ al->al_nested++;
+ ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
+ "acquire nested %s, depth %d\n",
+ al->al_name, al->al_nested));
+ } else
+ mtx_lock_spin(&al->al_lock);
+
+ return (0);
+}
+
+void
+AcpiOsReleaseLock(ACPI_SPINLOCK Handle, ACPI_CPU_FLAGS Flags)
+{
+ struct acpi_spinlock *al = (struct acpi_spinlock *)Handle;
+
+ ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
+
+ if (al == NULL) {
+ ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
+ "cannot release null spinlock\n"));
+ return_VOID;
+ }
+
+ ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "release %s\n", al->al_name));
+
+ if (mtx_owned(&al->al_lock)) {
+ if (al->al_nested > 0) {
+ ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
+ "release nested %s, depth %d\n",
+ al->al_name, al->al_nested));
+ al->al_nested--;
+ } else
+ mtx_unlock_spin(&al->al_lock);
+ } else
+ ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
+ "cannot release unowned %s\n", al->al_name));
+}
+
+/* Section 5.2.10.1: global lock acquire/release functions */
+#define GL_BIT_PENDING 0x01
+#define GL_BIT_OWNED 0x02
+
+/*
+ * Acquire the global lock. If busy, set the pending bit. The caller
+ * will wait for notification from the BIOS that the lock is available
+ * and then attempt to acquire it again.
+ */
+int
+acpi_acquire_global_lock(uint32_t *lock)
+{
+ uint32_t new, old;
+
+ do {
+ old = *lock;
+ new = (old & ~GL_BIT_PENDING) | GL_BIT_OWNED;
+ if ((old & GL_BIT_OWNED) != 0)
+ new |= GL_BIT_PENDING;
+ } while (atomic_cmpset_acq_int(lock, old, new) == 0);
+
+ return ((new & GL_BIT_PENDING) == 0);
+}
+
+/*
+ * Release the global lock, returning whether there is a waiter pending.
+ * If the BIOS set the pending bit, OSPM must notify the BIOS when it
+ * releases the lock.
+ */
+int
+acpi_release_global_lock(uint32_t *lock)
+{
+ uint32_t new, old;
+
+ do {
+ old = *lock;
+ new = old & ~(GL_BIT_PENDING | GL_BIT_OWNED);
+ } while (atomic_cmpset_rel_int(lock, old, new) == 0);
+
+ return ((old & GL_BIT_PENDING) != 0);
+}
diff --git a/sys/dev/acpica/Osd/OsdTable.c b/sys/dev/acpica/Osd/OsdTable.c
new file mode 100644
index 0000000..da220e5
--- /dev/null
+++ b/sys/dev/acpica/Osd/OsdTable.c
@@ -0,0 +1,106 @@
+/*-
+ * Copyright (c) 2002 Mitsaru Iwasaki
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * ACPI Table interfaces
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/endian.h>
+#include <sys/kernel.h>
+#include <sys/linker.h>
+
+#include <contrib/dev/acpica/include/acpi.h>
+#include <contrib/dev/acpica/include/actables.h>
+
+#undef _COMPONENT
+#define _COMPONENT ACPI_TABLES
+
+static char acpi_osname[128];
+TUNABLE_STR("hw.acpi.osname", acpi_osname, sizeof(acpi_osname));
+
+ACPI_STATUS
+AcpiOsPredefinedOverride(const ACPI_PREDEFINED_NAMES *InitVal,
+ ACPI_STRING *NewVal)
+{
+
+ if (InitVal == NULL || NewVal == NULL)
+ return (AE_BAD_PARAMETER);
+
+ *NewVal = NULL;
+ if (strncmp(InitVal->Name, "_OS_", ACPI_NAME_SIZE) == 0 &&
+ strlen(acpi_osname) > 0) {
+ printf("ACPI: Overriding _OS definition with \"%s\"\n", acpi_osname);
+ *NewVal = acpi_osname;
+ }
+
+ return (AE_OK);
+}
+
+ACPI_STATUS
+AcpiOsTableOverride(ACPI_TABLE_HEADER *ExistingTable,
+ ACPI_TABLE_HEADER **NewTable)
+{
+ char modname[] = "acpi_dsdt";
+ caddr_t acpi_table;
+ ACPI_TABLE_HEADER *hdr;
+ size_t sz;
+
+ if (ExistingTable == NULL || NewTable == NULL)
+ return (AE_BAD_PARAMETER);
+
+ *NewTable = NULL;
+#ifdef notyet
+ for (int i = 0; i < ACPI_NAME_SIZE; i++)
+ modname[i + 5] = tolower(ExistingTable->Signature[i]);
+#else
+ /* If we're not overriding the DSDT, just return. */
+ if (strncmp(ExistingTable->Signature, ACPI_SIG_DSDT, ACPI_NAME_SIZE) != 0)
+ return (AE_OK);
+#endif
+
+ acpi_table = preload_search_by_type(modname);
+ if (acpi_table == NULL)
+ return (AE_OK);
+
+ hdr = preload_fetch_addr(acpi_table);
+ sz = preload_fetch_size(acpi_table);
+ if (hdr != NULL && sz != 0)
+ *NewTable = hdr;
+
+ return (AE_OK);
+}
+
+ACPI_STATUS
+AcpiOsPhysicalTableOverride(ACPI_TABLE_HEADER *ExistingTable,
+ ACPI_PHYSICAL_ADDRESS *NewAddress, UINT32 *NewTableLength)
+{
+
+ return (AE_SUPPORT);
+}
diff --git a/sys/dev/acpica/acpi.c b/sys/dev/acpica/acpi.c
new file mode 100644
index 0000000..cfe08ed
--- /dev/null
+++ b/sys/dev/acpica/acpi.c
@@ -0,0 +1,3895 @@
+/*-
+ * Copyright (c) 2000 Takanori Watanabe <takawata@jp.freebsd.org>
+ * Copyright (c) 2000 Mitsuru IWASAKI <iwasaki@jp.freebsd.org>
+ * Copyright (c) 2000, 2001 Michael Smith
+ * Copyright (c) 2000 BSDi
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "opt_acpi.h"
+#include <sys/param.h>
+#include <sys/kernel.h>
+#include <sys/proc.h>
+#include <sys/fcntl.h>
+#include <sys/malloc.h>
+#include <sys/module.h>
+#include <sys/bus.h>
+#include <sys/conf.h>
+#include <sys/ioccom.h>
+#include <sys/reboot.h>
+#include <sys/sysctl.h>
+#include <sys/ctype.h>
+#include <sys/linker.h>
+#include <sys/power.h>
+#include <sys/sbuf.h>
+#include <sys/sched.h>
+#include <sys/smp.h>
+#include <sys/timetc.h>
+
+#if defined(__i386__) || defined(__amd64__)
+#include <machine/pci_cfgreg.h>
+#endif
+#include <machine/resource.h>
+#include <machine/bus.h>
+#include <sys/rman.h>
+#include <isa/isavar.h>
+#include <isa/pnpvar.h>
+
+#include <contrib/dev/acpica/include/acpi.h>
+#include <contrib/dev/acpica/include/accommon.h>
+#include <contrib/dev/acpica/include/acnamesp.h>
+
+#include <dev/acpica/acpivar.h>
+#include <dev/acpica/acpiio.h>
+
+#include <vm/vm_param.h>
+
+static MALLOC_DEFINE(M_ACPIDEV, "acpidev", "ACPI devices");
+
+/* Hooks for the ACPI CA debugging infrastructure */
+#define _COMPONENT ACPI_BUS
+ACPI_MODULE_NAME("ACPI")
+
+static d_open_t acpiopen;
+static d_close_t acpiclose;
+static d_ioctl_t acpiioctl;
+
+static struct cdevsw acpi_cdevsw = {
+ .d_version = D_VERSION,
+ .d_open = acpiopen,
+ .d_close = acpiclose,
+ .d_ioctl = acpiioctl,
+ .d_name = "acpi",
+};
+
+struct acpi_interface {
+ ACPI_STRING *data;
+ int num;
+};
+
+/* Global mutex for locking access to the ACPI subsystem. */
+struct mtx acpi_mutex;
+
+/* Bitmap of device quirks. */
+int acpi_quirks;
+
+/* Supported sleep states. */
+static BOOLEAN acpi_sleep_states[ACPI_S_STATE_COUNT];
+
+static int acpi_modevent(struct module *mod, int event, void *junk);
+static int acpi_probe(device_t dev);
+static int acpi_attach(device_t dev);
+static int acpi_suspend(device_t dev);
+static int acpi_resume(device_t dev);
+static int acpi_shutdown(device_t dev);
+static device_t acpi_add_child(device_t bus, u_int order, const char *name,
+ int unit);
+static int acpi_print_child(device_t bus, device_t child);
+static void acpi_probe_nomatch(device_t bus, device_t child);
+static void acpi_driver_added(device_t dev, driver_t *driver);
+static int acpi_read_ivar(device_t dev, device_t child, int index,
+ uintptr_t *result);
+static int acpi_write_ivar(device_t dev, device_t child, int index,
+ uintptr_t value);
+static struct resource_list *acpi_get_rlist(device_t dev, device_t child);
+static void acpi_reserve_resources(device_t dev);
+static int acpi_sysres_alloc(device_t dev);
+static int acpi_set_resource(device_t dev, device_t child, int type,
+ int rid, u_long start, u_long count);
+static struct resource *acpi_alloc_resource(device_t bus, device_t child,
+ int type, int *rid, u_long start, u_long end,
+ u_long count, u_int flags);
+static int acpi_adjust_resource(device_t bus, device_t child, int type,
+ struct resource *r, u_long start, u_long end);
+static int acpi_release_resource(device_t bus, device_t child, int type,
+ int rid, struct resource *r);
+static void acpi_delete_resource(device_t bus, device_t child, int type,
+ int rid);
+static uint32_t acpi_isa_get_logicalid(device_t dev);
+static int acpi_isa_get_compatid(device_t dev, uint32_t *cids, int count);
+static char *acpi_device_id_probe(device_t bus, device_t dev, char **ids);
+static ACPI_STATUS acpi_device_eval_obj(device_t bus, device_t dev,
+ ACPI_STRING pathname, ACPI_OBJECT_LIST *parameters,
+ ACPI_BUFFER *ret);
+static ACPI_STATUS acpi_device_scan_cb(ACPI_HANDLE h, UINT32 level,
+ void *context, void **retval);
+static ACPI_STATUS acpi_device_scan_children(device_t bus, device_t dev,
+ int max_depth, acpi_scan_cb_t user_fn, void *arg);
+static int acpi_set_powerstate(device_t child, int state);
+static int acpi_isa_pnp_probe(device_t bus, device_t child,
+ struct isa_pnp_id *ids);
+static void acpi_probe_children(device_t bus);
+static void acpi_probe_order(ACPI_HANDLE handle, int *order);
+static ACPI_STATUS acpi_probe_child(ACPI_HANDLE handle, UINT32 level,
+ void *context, void **status);
+static void acpi_sleep_enable(void *arg);
+static ACPI_STATUS acpi_sleep_disable(struct acpi_softc *sc);
+static ACPI_STATUS acpi_EnterSleepState(struct acpi_softc *sc, int state);
+static void acpi_shutdown_final(void *arg, int howto);
+static void acpi_enable_fixed_events(struct acpi_softc *sc);
+static BOOLEAN acpi_has_hid(ACPI_HANDLE handle);
+static void acpi_resync_clock(struct acpi_softc *sc);
+static int acpi_wake_sleep_prep(ACPI_HANDLE handle, int sstate);
+static int acpi_wake_run_prep(ACPI_HANDLE handle, int sstate);
+static int acpi_wake_prep_walk(int sstate);
+static int acpi_wake_sysctl_walk(device_t dev);
+static int acpi_wake_set_sysctl(SYSCTL_HANDLER_ARGS);
+static void acpi_system_eventhandler_sleep(void *arg, int state);
+static void acpi_system_eventhandler_wakeup(void *arg, int state);
+static int acpi_sname2sstate(const char *sname);
+static const char *acpi_sstate2sname(int sstate);
+static int acpi_supported_sleep_state_sysctl(SYSCTL_HANDLER_ARGS);
+static int acpi_sleep_state_sysctl(SYSCTL_HANDLER_ARGS);
+static int acpi_debug_objects_sysctl(SYSCTL_HANDLER_ARGS);
+static int acpi_pm_func(u_long cmd, void *arg, ...);
+static int acpi_child_location_str_method(device_t acdev, device_t child,
+ char *buf, size_t buflen);
+static int acpi_child_pnpinfo_str_method(device_t acdev, device_t child,
+ char *buf, size_t buflen);
+#if defined(__i386__) || defined(__amd64__)
+static void acpi_enable_pcie(void);
+#endif
+static void acpi_hint_device_unit(device_t acdev, device_t child,
+ const char *name, int *unitp);
+static void acpi_reset_interfaces(device_t dev);
+
+static device_method_t acpi_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, acpi_probe),
+ DEVMETHOD(device_attach, acpi_attach),
+ DEVMETHOD(device_shutdown, acpi_shutdown),
+ DEVMETHOD(device_detach, bus_generic_detach),
+ DEVMETHOD(device_suspend, acpi_suspend),
+ DEVMETHOD(device_resume, acpi_resume),
+
+ /* Bus interface */
+ DEVMETHOD(bus_add_child, acpi_add_child),
+ DEVMETHOD(bus_print_child, acpi_print_child),
+ DEVMETHOD(bus_probe_nomatch, acpi_probe_nomatch),
+ DEVMETHOD(bus_driver_added, acpi_driver_added),
+ DEVMETHOD(bus_read_ivar, acpi_read_ivar),
+ DEVMETHOD(bus_write_ivar, acpi_write_ivar),
+ DEVMETHOD(bus_get_resource_list, acpi_get_rlist),
+ DEVMETHOD(bus_set_resource, acpi_set_resource),
+ DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource),
+ DEVMETHOD(bus_alloc_resource, acpi_alloc_resource),
+ DEVMETHOD(bus_adjust_resource, acpi_adjust_resource),
+ DEVMETHOD(bus_release_resource, acpi_release_resource),
+ DEVMETHOD(bus_delete_resource, acpi_delete_resource),
+ DEVMETHOD(bus_child_pnpinfo_str, acpi_child_pnpinfo_str_method),
+ DEVMETHOD(bus_child_location_str, acpi_child_location_str_method),
+ DEVMETHOD(bus_activate_resource, bus_generic_activate_resource),
+ DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource),
+ DEVMETHOD(bus_setup_intr, bus_generic_setup_intr),
+ DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr),
+ DEVMETHOD(bus_hint_device_unit, acpi_hint_device_unit),
+
+ /* ACPI bus */
+ DEVMETHOD(acpi_id_probe, acpi_device_id_probe),
+ DEVMETHOD(acpi_evaluate_object, acpi_device_eval_obj),
+ DEVMETHOD(acpi_pwr_for_sleep, acpi_device_pwr_for_sleep),
+ DEVMETHOD(acpi_scan_children, acpi_device_scan_children),
+
+ /* ISA emulation */
+ DEVMETHOD(isa_pnp_probe, acpi_isa_pnp_probe),
+
+ {0, 0}
+};
+
+static driver_t acpi_driver = {
+ "acpi",
+ acpi_methods,
+ sizeof(struct acpi_softc),
+};
+
+static devclass_t acpi_devclass;
+DRIVER_MODULE(acpi, nexus, acpi_driver, acpi_devclass, acpi_modevent, 0);
+MODULE_VERSION(acpi, 1);
+
+ACPI_SERIAL_DECL(acpi, "ACPI root bus");
+
+/* Local pools for managing system resources for ACPI child devices. */
+static struct rman acpi_rman_io, acpi_rman_mem;
+
+#define ACPI_MINIMUM_AWAKETIME 5
+
+/* Holds the description of the acpi0 device. */
+static char acpi_desc[ACPI_OEM_ID_SIZE + ACPI_OEM_TABLE_ID_SIZE + 2];
+
+SYSCTL_NODE(_debug, OID_AUTO, acpi, CTLFLAG_RD, NULL, "ACPI debugging");
+static char acpi_ca_version[12];
+SYSCTL_STRING(_debug_acpi, OID_AUTO, acpi_ca_version, CTLFLAG_RD,
+ acpi_ca_version, 0, "Version of Intel ACPI-CA");
+
+/*
+ * Allow overriding _OSI methods.
+ */
+static char acpi_install_interface[256];
+TUNABLE_STR("hw.acpi.install_interface", acpi_install_interface,
+ sizeof(acpi_install_interface));
+static char acpi_remove_interface[256];
+TUNABLE_STR("hw.acpi.remove_interface", acpi_remove_interface,
+ sizeof(acpi_remove_interface));
+
+/*
+ * Allow override of whether methods execute in parallel or not.
+ * Enable this for serial behavior, which fixes "AE_ALREADY_EXISTS"
+ * errors for AML that really can't handle parallel method execution.
+ * It is off by default since this breaks recursive methods and
+ * some IBMs use such code.
+ */
+static int acpi_serialize_methods;
+TUNABLE_INT("hw.acpi.serialize_methods", &acpi_serialize_methods);
+
+/* Allow users to dump Debug objects without ACPI debugger. */
+static int acpi_debug_objects;
+TUNABLE_INT("debug.acpi.enable_debug_objects", &acpi_debug_objects);
+SYSCTL_PROC(_debug_acpi, OID_AUTO, enable_debug_objects,
+ CTLFLAG_RW | CTLTYPE_INT, NULL, 0, acpi_debug_objects_sysctl, "I",
+ "Enable Debug objects");
+
+/* Allow the interpreter to ignore common mistakes in BIOS. */
+static int acpi_interpreter_slack = 1;
+TUNABLE_INT("debug.acpi.interpreter_slack", &acpi_interpreter_slack);
+SYSCTL_INT(_debug_acpi, OID_AUTO, interpreter_slack, CTLFLAG_RDTUN,
+ &acpi_interpreter_slack, 1, "Turn on interpreter slack mode.");
+
+#ifdef __amd64__
+/* Reset system clock while resuming. XXX Remove once tested. */
+static int acpi_reset_clock = 1;
+TUNABLE_INT("debug.acpi.reset_clock", &acpi_reset_clock);
+SYSCTL_INT(_debug_acpi, OID_AUTO, reset_clock, CTLFLAG_RW,
+ &acpi_reset_clock, 1, "Reset system clock while resuming.");
+#endif
+
+/* Allow users to override quirks. */
+TUNABLE_INT("debug.acpi.quirks", &acpi_quirks);
+
+static int acpi_susp_bounce;
+SYSCTL_INT(_debug_acpi, OID_AUTO, suspend_bounce, CTLFLAG_RW,
+ &acpi_susp_bounce, 0, "Don't actually suspend, just test devices.");
+
+/*
+ * ACPI can only be loaded as a module by the loader; activating it after
+ * system bootstrap time is not useful, and can be fatal to the system.
+ * It also cannot be unloaded, since the entire system bus hierarchy hangs
+ * off it.
+ */
+static int
+acpi_modevent(struct module *mod, int event, void *junk)
+{
+ switch (event) {
+ case MOD_LOAD:
+ if (!cold) {
+ printf("The ACPI driver cannot be loaded after boot.\n");
+ return (EPERM);
+ }
+ break;
+ case MOD_UNLOAD:
+ if (!cold && power_pm_get_type() == POWER_PM_TYPE_ACPI)
+ return (EBUSY);
+ break;
+ default:
+ break;
+ }
+ return (0);
+}
+
+/*
+ * Perform early initialization.
+ */
+ACPI_STATUS
+acpi_Startup(void)
+{
+ static int started = 0;
+ ACPI_STATUS status;
+ int val;
+
+ ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
+
+ /* Only run the startup code once. The MADT driver also calls this. */
+ if (started)
+ return_VALUE (AE_OK);
+ started = 1;
+
+ /*
+ * Pre-allocate space for RSDT/XSDT and DSDT tables and allow resizing
+ * if more tables exist.
+ */
+ if (ACPI_FAILURE(status = AcpiInitializeTables(NULL, 2, TRUE))) {
+ printf("ACPI: Table initialisation failed: %s\n",
+ AcpiFormatException(status));
+ return_VALUE (status);
+ }
+
+ /* Set up any quirks we have for this system. */
+ if (acpi_quirks == ACPI_Q_OK)
+ acpi_table_quirks(&acpi_quirks);
+
+ /* If the user manually set the disabled hint to 0, force-enable ACPI. */
+ if (resource_int_value("acpi", 0, "disabled", &val) == 0 && val == 0)
+ acpi_quirks &= ~ACPI_Q_BROKEN;
+ if (acpi_quirks & ACPI_Q_BROKEN) {
+ printf("ACPI disabled by blacklist. Contact your BIOS vendor.\n");
+ status = AE_SUPPORT;
+ }
+
+ return_VALUE (status);
+}
+
+/*
+ * Detect ACPI and perform early initialisation.
+ */
+int
+acpi_identify(void)
+{
+ ACPI_TABLE_RSDP *rsdp;
+ ACPI_TABLE_HEADER *rsdt;
+ ACPI_PHYSICAL_ADDRESS paddr;
+ struct sbuf sb;
+
+ ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
+
+ if (!cold)
+ return (ENXIO);
+
+ /* Check that we haven't been disabled with a hint. */
+ if (resource_disabled("acpi", 0))
+ return (ENXIO);
+
+ /* Check for other PM systems. */
+ if (power_pm_get_type() != POWER_PM_TYPE_NONE &&
+ power_pm_get_type() != POWER_PM_TYPE_ACPI) {
+ printf("ACPI identify failed, other PM system enabled.\n");
+ return (ENXIO);
+ }
+
+ /* Initialize root tables. */
+ if (ACPI_FAILURE(acpi_Startup())) {
+ printf("ACPI: Try disabling either ACPI or apic support.\n");
+ return (ENXIO);
+ }
+
+ if ((paddr = AcpiOsGetRootPointer()) == 0 ||
+ (rsdp = AcpiOsMapMemory(paddr, sizeof(ACPI_TABLE_RSDP))) == NULL)
+ return (ENXIO);
+ if (rsdp->Revision > 1 && rsdp->XsdtPhysicalAddress != 0)
+ paddr = (ACPI_PHYSICAL_ADDRESS)rsdp->XsdtPhysicalAddress;
+ else
+ paddr = (ACPI_PHYSICAL_ADDRESS)rsdp->RsdtPhysicalAddress;
+ AcpiOsUnmapMemory(rsdp, sizeof(ACPI_TABLE_RSDP));
+
+ if ((rsdt = AcpiOsMapMemory(paddr, sizeof(ACPI_TABLE_HEADER))) == NULL)
+ return (ENXIO);
+ sbuf_new(&sb, acpi_desc, sizeof(acpi_desc), SBUF_FIXEDLEN);
+ sbuf_bcat(&sb, rsdt->OemId, ACPI_OEM_ID_SIZE);
+ sbuf_trim(&sb);
+ sbuf_putc(&sb, ' ');
+ sbuf_bcat(&sb, rsdt->OemTableId, ACPI_OEM_TABLE_ID_SIZE);
+ sbuf_trim(&sb);
+ sbuf_finish(&sb);
+ sbuf_delete(&sb);
+ AcpiOsUnmapMemory(rsdt, sizeof(ACPI_TABLE_HEADER));
+
+ snprintf(acpi_ca_version, sizeof(acpi_ca_version), "%x", ACPI_CA_VERSION);
+
+ return (0);
+}
+
+/*
+ * Fetch some descriptive data from ACPI to put in our attach message.
+ */
+static int
+acpi_probe(device_t dev)
+{
+
+ ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
+
+ device_set_desc(dev, acpi_desc);
+
+ return_VALUE (0);
+}
+
+static int
+acpi_attach(device_t dev)
+{
+ struct acpi_softc *sc;
+ ACPI_STATUS status;
+ int error, state;
+ UINT32 flags;
+ UINT8 TypeA, TypeB;
+ char *env;
+
+ ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
+
+ sc = device_get_softc(dev);
+ sc->acpi_dev = dev;
+ callout_init(&sc->susp_force_to, TRUE);
+
+ error = ENXIO;
+
+ /* Initialize resource manager. */
+ acpi_rman_io.rm_type = RMAN_ARRAY;
+ acpi_rman_io.rm_start = 0;
+ acpi_rman_io.rm_end = 0xffff;
+ acpi_rman_io.rm_descr = "ACPI I/O ports";
+ if (rman_init(&acpi_rman_io) != 0)
+ panic("acpi rman_init IO ports failed");
+ acpi_rman_mem.rm_type = RMAN_ARRAY;
+ acpi_rman_mem.rm_start = 0;
+ acpi_rman_mem.rm_end = ~0ul;
+ acpi_rman_mem.rm_descr = "ACPI I/O memory addresses";
+ if (rman_init(&acpi_rman_mem) != 0)
+ panic("acpi rman_init memory failed");
+
+ /* Initialise the ACPI mutex */
+ mtx_init(&acpi_mutex, "ACPI global lock", NULL, MTX_DEF);
+
+ /*
+ * Set the globals from our tunables. This is needed because ACPI-CA
+ * uses UINT8 for some values and we have no tunable_byte.
+ */
+ AcpiGbl_AllMethodsSerialized = acpi_serialize_methods ? TRUE : FALSE;
+ AcpiGbl_EnableInterpreterSlack = acpi_interpreter_slack ? TRUE : FALSE;
+ AcpiGbl_EnableAmlDebugObject = acpi_debug_objects ? TRUE : FALSE;
+
+#ifndef ACPI_DEBUG
+ /*
+ * Disable all debugging layers and levels.
+ */
+ AcpiDbgLayer = 0;
+ AcpiDbgLevel = 0;
+#endif
+
+ /* Start up the ACPI CA subsystem. */
+ status = AcpiInitializeSubsystem();
+ if (ACPI_FAILURE(status)) {
+ device_printf(dev, "Could not initialize Subsystem: %s\n",
+ AcpiFormatException(status));
+ goto out;
+ }
+
+ /* Override OS interfaces if the user requested. */
+ acpi_reset_interfaces(dev);
+
+ /* Load ACPI name space. */
+ status = AcpiLoadTables();
+ if (ACPI_FAILURE(status)) {
+ device_printf(dev, "Could not load Namespace: %s\n",
+ AcpiFormatException(status));
+ goto out;
+ }
+
+#if defined(__i386__) || defined(__amd64__)
+ /* Handle MCFG table if present. */
+ acpi_enable_pcie();
+#endif
+
+ /*
+ * Note that some systems (specifically, those with namespace evaluation
+ * issues that require the avoidance of parts of the namespace) must
+ * avoid running _INI and _STA on everything, as well as dodging the final
+ * object init pass.
+ *
+ * For these devices, we set ACPI_NO_DEVICE_INIT and ACPI_NO_OBJECT_INIT).
+ *
+ * XXX We should arrange for the object init pass after we have attached
+ * all our child devices, but on many systems it works here.
+ */
+ flags = 0;
+ if (testenv("debug.acpi.avoid"))
+ flags = ACPI_NO_DEVICE_INIT | ACPI_NO_OBJECT_INIT;
+
+ /* Bring the hardware and basic handlers online. */
+ if (ACPI_FAILURE(status = AcpiEnableSubsystem(flags))) {
+ device_printf(dev, "Could not enable ACPI: %s\n",
+ AcpiFormatException(status));
+ goto out;
+ }
+
+ /*
+ * Call the ECDT probe function to provide EC functionality before
+ * the namespace has been evaluated.
+ *
+ * XXX This happens before the sysresource devices have been probed and
+ * attached so its resources come from nexus0. In practice, this isn't
+ * a problem but should be addressed eventually.
+ */
+ acpi_ec_ecdt_probe(dev);
+
+ /* Bring device objects and regions online. */
+ if (ACPI_FAILURE(status = AcpiInitializeObjects(flags))) {
+ device_printf(dev, "Could not initialize ACPI objects: %s\n",
+ AcpiFormatException(status));
+ goto out;
+ }
+
+ /*
+ * Setup our sysctl tree.
+ *
+ * XXX: This doesn't check to make sure that none of these fail.
+ */
+ sysctl_ctx_init(&sc->acpi_sysctl_ctx);
+ sc->acpi_sysctl_tree = SYSCTL_ADD_NODE(&sc->acpi_sysctl_ctx,
+ SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO,
+ device_get_name(dev), CTLFLAG_RD, 0, "");
+ SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree),
+ OID_AUTO, "supported_sleep_state", CTLTYPE_STRING | CTLFLAG_RD,
+ 0, 0, acpi_supported_sleep_state_sysctl, "A", "");
+ SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree),
+ OID_AUTO, "power_button_state", CTLTYPE_STRING | CTLFLAG_RW,
+ &sc->acpi_power_button_sx, 0, acpi_sleep_state_sysctl, "A", "");
+ SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree),
+ OID_AUTO, "sleep_button_state", CTLTYPE_STRING | CTLFLAG_RW,
+ &sc->acpi_sleep_button_sx, 0, acpi_sleep_state_sysctl, "A", "");
+ SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree),
+ OID_AUTO, "lid_switch_state", CTLTYPE_STRING | CTLFLAG_RW,
+ &sc->acpi_lid_switch_sx, 0, acpi_sleep_state_sysctl, "A", "");
+ SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree),
+ OID_AUTO, "standby_state", CTLTYPE_STRING | CTLFLAG_RW,
+ &sc->acpi_standby_sx, 0, acpi_sleep_state_sysctl, "A", "");
+ SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree),
+ OID_AUTO, "suspend_state", CTLTYPE_STRING | CTLFLAG_RW,
+ &sc->acpi_suspend_sx, 0, acpi_sleep_state_sysctl, "A", "");
+ SYSCTL_ADD_INT(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree),
+ OID_AUTO, "sleep_delay", CTLFLAG_RW, &sc->acpi_sleep_delay, 0,
+ "sleep delay in seconds");
+ SYSCTL_ADD_INT(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree),
+ OID_AUTO, "s4bios", CTLFLAG_RW, &sc->acpi_s4bios, 0, "S4BIOS mode");
+ SYSCTL_ADD_INT(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree),
+ OID_AUTO, "verbose", CTLFLAG_RW, &sc->acpi_verbose, 0, "verbose mode");
+ SYSCTL_ADD_INT(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree),
+ OID_AUTO, "disable_on_reboot", CTLFLAG_RW,
+ &sc->acpi_do_disable, 0, "Disable ACPI when rebooting/halting system");
+ SYSCTL_ADD_INT(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree),
+ OID_AUTO, "handle_reboot", CTLFLAG_RW,
+ &sc->acpi_handle_reboot, 0, "Use ACPI Reset Register to reboot");
+
+ /*
+ * Default to 1 second before sleeping to give some machines time to
+ * stabilize.
+ */
+ sc->acpi_sleep_delay = 1;
+ if (bootverbose)
+ sc->acpi_verbose = 1;
+ if ((env = getenv("hw.acpi.verbose")) != NULL) {
+ if (strcmp(env, "0") != 0)
+ sc->acpi_verbose = 1;
+ freeenv(env);
+ }
+
+ /* Only enable reboot by default if the FADT says it is available. */
+ if (AcpiGbl_FADT.Flags & ACPI_FADT_RESET_REGISTER)
+ sc->acpi_handle_reboot = 1;
+
+ /* Only enable S4BIOS by default if the FACS says it is available. */
+ if (AcpiGbl_FACS->Flags & ACPI_FACS_S4_BIOS_PRESENT)
+ sc->acpi_s4bios = 1;
+
+ /* Probe all supported sleep states. */
+ acpi_sleep_states[ACPI_STATE_S0] = TRUE;
+ for (state = ACPI_STATE_S1; state < ACPI_S_STATE_COUNT; state++)
+ if (ACPI_SUCCESS(AcpiGetSleepTypeData(state, &TypeA, &TypeB)))
+ acpi_sleep_states[state] = TRUE;
+
+ /*
+ * Dispatch the default sleep state to devices. The lid switch is set
+ * to UNKNOWN by default to avoid surprising users.
+ */
+ sc->acpi_power_button_sx = acpi_sleep_states[ACPI_STATE_S5] ?
+ ACPI_STATE_S5 : ACPI_STATE_UNKNOWN;
+ sc->acpi_lid_switch_sx = ACPI_STATE_UNKNOWN;
+ sc->acpi_standby_sx = acpi_sleep_states[ACPI_STATE_S1] ?
+ ACPI_STATE_S1 : ACPI_STATE_UNKNOWN;
+ sc->acpi_suspend_sx = acpi_sleep_states[ACPI_STATE_S3] ?
+ ACPI_STATE_S3 : ACPI_STATE_UNKNOWN;
+
+ /* Pick the first valid sleep state for the sleep button default. */
+ sc->acpi_sleep_button_sx = ACPI_STATE_UNKNOWN;
+ for (state = ACPI_STATE_S1; state <= ACPI_STATE_S4; state++)
+ if (acpi_sleep_states[state]) {
+ sc->acpi_sleep_button_sx = state;
+ break;
+ }
+
+ acpi_enable_fixed_events(sc);
+
+ /*
+ * Scan the namespace and attach/initialise children.
+ */
+
+ /* Register our shutdown handler. */
+ EVENTHANDLER_REGISTER(shutdown_final, acpi_shutdown_final, sc,
+ SHUTDOWN_PRI_LAST);
+
+ /*
+ * Register our acpi event handlers.
+ * XXX should be configurable eg. via userland policy manager.
+ */
+ EVENTHANDLER_REGISTER(acpi_sleep_event, acpi_system_eventhandler_sleep,
+ sc, ACPI_EVENT_PRI_LAST);
+ EVENTHANDLER_REGISTER(acpi_wakeup_event, acpi_system_eventhandler_wakeup,
+ sc, ACPI_EVENT_PRI_LAST);
+
+ /* Flag our initial states. */
+ sc->acpi_enabled = TRUE;
+ sc->acpi_sstate = ACPI_STATE_S0;
+ sc->acpi_sleep_disabled = TRUE;
+
+ /* Create the control device */
+ sc->acpi_dev_t = make_dev(&acpi_cdevsw, 0, UID_ROOT, GID_WHEEL, 0644,
+ "acpi");
+ sc->acpi_dev_t->si_drv1 = sc;
+
+ if ((error = acpi_machdep_init(dev)))
+ goto out;
+
+ /* Register ACPI again to pass the correct argument of pm_func. */
+ power_pm_register(POWER_PM_TYPE_ACPI, acpi_pm_func, sc);
+
+ if (!acpi_disabled("bus"))
+ acpi_probe_children(dev);
+
+ /* Update all GPEs and enable runtime GPEs. */
+ status = AcpiUpdateAllGpes();
+ if (ACPI_FAILURE(status))
+ device_printf(dev, "Could not update all GPEs: %s\n",
+ AcpiFormatException(status));
+
+ /* Allow sleep request after a while. */
+ timeout(acpi_sleep_enable, sc, hz * ACPI_MINIMUM_AWAKETIME);
+
+ error = 0;
+
+ out:
+ return_VALUE (error);
+}
+
+static void
+acpi_set_power_children(device_t dev, int state)
+{
+ device_t child, parent;
+ device_t *devlist;
+ struct pci_devinfo *dinfo;
+ int dstate, i, numdevs;
+
+ if (device_get_children(dev, &devlist, &numdevs) != 0)
+ return;
+
+ /*
+ * Retrieve and set D-state for the sleep state if _SxD is present.
+ * Skip children who aren't attached since they are handled separately.
+ */
+ parent = device_get_parent(dev);
+ for (i = 0; i < numdevs; i++) {
+ child = devlist[i];
+ dinfo = device_get_ivars(child);
+ dstate = state;
+ if (device_is_attached(child) &&
+ acpi_device_pwr_for_sleep(parent, dev, &dstate) == 0)
+ acpi_set_powerstate(child, dstate);
+ }
+ free(devlist, M_TEMP);
+}
+
+static int
+acpi_suspend(device_t dev)
+{
+ int error;
+
+ GIANT_REQUIRED;
+
+ error = bus_generic_suspend(dev);
+ if (error == 0)
+ acpi_set_power_children(dev, ACPI_STATE_D3);
+
+ return (error);
+}
+
+static int
+acpi_resume(device_t dev)
+{
+
+ GIANT_REQUIRED;
+
+ acpi_set_power_children(dev, ACPI_STATE_D0);
+
+ return (bus_generic_resume(dev));
+}
+
+static int
+acpi_shutdown(device_t dev)
+{
+
+ GIANT_REQUIRED;
+
+ /* Allow children to shutdown first. */
+ bus_generic_shutdown(dev);
+
+ /*
+ * Enable any GPEs that are able to power-on the system (i.e., RTC).
+ * Also, disable any that are not valid for this state (most).
+ */
+ acpi_wake_prep_walk(ACPI_STATE_S5);
+
+ return (0);
+}
+
+/*
+ * Handle a new device being added
+ */
+static device_t
+acpi_add_child(device_t bus, u_int order, const char *name, int unit)
+{
+ struct acpi_device *ad;
+ device_t child;
+
+ if ((ad = malloc(sizeof(*ad), M_ACPIDEV, M_NOWAIT | M_ZERO)) == NULL)
+ return (NULL);
+
+ resource_list_init(&ad->ad_rl);
+
+ child = device_add_child_ordered(bus, order, name, unit);
+ if (child != NULL)
+ device_set_ivars(child, ad);
+ else
+ free(ad, M_ACPIDEV);
+ return (child);
+}
+
+static int
+acpi_print_child(device_t bus, device_t child)
+{
+ struct acpi_device *adev = device_get_ivars(child);
+ struct resource_list *rl = &adev->ad_rl;
+ int retval = 0;
+
+ retval += bus_print_child_header(bus, child);
+ retval += resource_list_print_type(rl, "port", SYS_RES_IOPORT, "%#lx");
+ retval += resource_list_print_type(rl, "iomem", SYS_RES_MEMORY, "%#lx");
+ retval += resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%ld");
+ retval += resource_list_print_type(rl, "drq", SYS_RES_DRQ, "%ld");
+ if (device_get_flags(child))
+ retval += printf(" flags %#x", device_get_flags(child));
+ retval += bus_print_child_footer(bus, child);
+
+ return (retval);
+}
+
+/*
+ * If this device is an ACPI child but no one claimed it, attempt
+ * to power it off. We'll power it back up when a driver is added.
+ *
+ * XXX Disabled for now since many necessary devices (like fdc and
+ * ATA) don't claim the devices we created for them but still expect
+ * them to be powered up.
+ */
+static void
+acpi_probe_nomatch(device_t bus, device_t child)
+{
+#ifdef ACPI_ENABLE_POWERDOWN_NODRIVER
+ acpi_set_powerstate(child, ACPI_STATE_D3);
+#endif
+}
+
+/*
+ * If a new driver has a chance to probe a child, first power it up.
+ *
+ * XXX Disabled for now (see acpi_probe_nomatch for details).
+ */
+static void
+acpi_driver_added(device_t dev, driver_t *driver)
+{
+ device_t child, *devlist;
+ int i, numdevs;
+
+ DEVICE_IDENTIFY(driver, dev);
+ if (device_get_children(dev, &devlist, &numdevs))
+ return;
+ for (i = 0; i < numdevs; i++) {
+ child = devlist[i];
+ if (device_get_state(child) == DS_NOTPRESENT) {
+#ifdef ACPI_ENABLE_POWERDOWN_NODRIVER
+ acpi_set_powerstate(child, ACPI_STATE_D0);
+ if (device_probe_and_attach(child) != 0)
+ acpi_set_powerstate(child, ACPI_STATE_D3);
+#else
+ device_probe_and_attach(child);
+#endif
+ }
+ }
+ free(devlist, M_TEMP);
+}
+
+/* Location hint for devctl(8) */
+static int
+acpi_child_location_str_method(device_t cbdev, device_t child, char *buf,
+ size_t buflen)
+{
+ struct acpi_device *dinfo = device_get_ivars(child);
+
+ if (dinfo->ad_handle)
+ snprintf(buf, buflen, "handle=%s", acpi_name(dinfo->ad_handle));
+ else
+ snprintf(buf, buflen, "unknown");
+ return (0);
+}
+
+/* PnP information for devctl(8) */
+static int
+acpi_child_pnpinfo_str_method(device_t cbdev, device_t child, char *buf,
+ size_t buflen)
+{
+ struct acpi_device *dinfo = device_get_ivars(child);
+ ACPI_DEVICE_INFO *adinfo;
+
+ if (ACPI_FAILURE(AcpiGetObjectInfo(dinfo->ad_handle, &adinfo))) {
+ snprintf(buf, buflen, "unknown");
+ return (0);
+ }
+
+ snprintf(buf, buflen, "_HID=%s _UID=%lu",
+ (adinfo->Valid & ACPI_VALID_HID) ?
+ adinfo->HardwareId.String : "none",
+ (adinfo->Valid & ACPI_VALID_UID) ?
+ strtoul(adinfo->UniqueId.String, NULL, 10) : 0UL);
+ AcpiOsFree(adinfo);
+
+ return (0);
+}
+
+/*
+ * Handle per-device ivars
+ */
+static int
+acpi_read_ivar(device_t dev, device_t child, int index, uintptr_t *result)
+{
+ struct acpi_device *ad;
+
+ if ((ad = device_get_ivars(child)) == NULL) {
+ device_printf(child, "device has no ivars\n");
+ return (ENOENT);
+ }
+
+ /* ACPI and ISA compatibility ivars */
+ switch(index) {
+ case ACPI_IVAR_HANDLE:
+ *(ACPI_HANDLE *)result = ad->ad_handle;
+ break;
+ case ACPI_IVAR_PRIVATE:
+ *(void **)result = ad->ad_private;
+ break;
+ case ACPI_IVAR_FLAGS:
+ *(int *)result = ad->ad_flags;
+ break;
+ case ISA_IVAR_VENDORID:
+ case ISA_IVAR_SERIAL:
+ case ISA_IVAR_COMPATID:
+ *(int *)result = -1;
+ break;
+ case ISA_IVAR_LOGICALID:
+ *(int *)result = acpi_isa_get_logicalid(child);
+ break;
+ default:
+ return (ENOENT);
+ }
+
+ return (0);
+}
+
+static int
+acpi_write_ivar(device_t dev, device_t child, int index, uintptr_t value)
+{
+ struct acpi_device *ad;
+
+ if ((ad = device_get_ivars(child)) == NULL) {
+ device_printf(child, "device has no ivars\n");
+ return (ENOENT);
+ }
+
+ switch(index) {
+ case ACPI_IVAR_HANDLE:
+ ad->ad_handle = (ACPI_HANDLE)value;
+ break;
+ case ACPI_IVAR_PRIVATE:
+ ad->ad_private = (void *)value;
+ break;
+ case ACPI_IVAR_FLAGS:
+ ad->ad_flags = (int)value;
+ break;
+ default:
+ panic("bad ivar write request (%d)", index);
+ return (ENOENT);
+ }
+
+ return (0);
+}
+
+/*
+ * Handle child resource allocation/removal
+ */
+static struct resource_list *
+acpi_get_rlist(device_t dev, device_t child)
+{
+ struct acpi_device *ad;
+
+ ad = device_get_ivars(child);
+ return (&ad->ad_rl);
+}
+
+static int
+acpi_match_resource_hint(device_t dev, int type, long value)
+{
+ struct acpi_device *ad = device_get_ivars(dev);
+ struct resource_list *rl = &ad->ad_rl;
+ struct resource_list_entry *rle;
+
+ STAILQ_FOREACH(rle, rl, link) {
+ if (rle->type != type)
+ continue;
+ if (rle->start <= value && rle->end >= value)
+ return (1);
+ }
+ return (0);
+}
+
+/*
+ * Wire device unit numbers based on resource matches in hints.
+ */
+static void
+acpi_hint_device_unit(device_t acdev, device_t child, const char *name,
+ int *unitp)
+{
+ const char *s;
+ long value;
+ int line, matches, unit;
+
+ /*
+ * Iterate over all the hints for the devices with the specified
+ * name to see if one's resources are a subset of this device.
+ */
+ line = 0;
+ for (;;) {
+ if (resource_find_dev(&line, name, &unit, "at", NULL) != 0)
+ break;
+
+ /* Must have an "at" for acpi or isa. */
+ resource_string_value(name, unit, "at", &s);
+ if (!(strcmp(s, "acpi0") == 0 || strcmp(s, "acpi") == 0 ||
+ strcmp(s, "isa0") == 0 || strcmp(s, "isa") == 0))
+ continue;
+
+ /*
+ * Check for matching resources. We must have at least one match.
+ * Since I/O and memory resources cannot be shared, if we get a
+ * match on either of those, ignore any mismatches in IRQs or DRQs.
+ *
+ * XXX: We may want to revisit this to be more lenient and wire
+ * as long as it gets one match.
+ */
+ matches = 0;
+ if (resource_long_value(name, unit, "port", &value) == 0) {
+ /*
+ * Floppy drive controllers are notorious for having a
+ * wide variety of resources not all of which include the
+ * first port that is specified by the hint (typically
+ * 0x3f0) (see the comment above fdc_isa_alloc_resources()
+ * in fdc_isa.c). However, they do all seem to include
+ * port + 2 (e.g. 0x3f2) so for a floppy device, look for
+ * 'value + 2' in the port resources instead of the hint
+ * value.
+ */
+ if (strcmp(name, "fdc") == 0)
+ value += 2;
+ if (acpi_match_resource_hint(child, SYS_RES_IOPORT, value))
+ matches++;
+ else
+ continue;
+ }
+ if (resource_long_value(name, unit, "maddr", &value) == 0) {
+ if (acpi_match_resource_hint(child, SYS_RES_MEMORY, value))
+ matches++;
+ else
+ continue;
+ }
+ if (matches > 0)
+ goto matched;
+ if (resource_long_value(name, unit, "irq", &value) == 0) {
+ if (acpi_match_resource_hint(child, SYS_RES_IRQ, value))
+ matches++;
+ else
+ continue;
+ }
+ if (resource_long_value(name, unit, "drq", &value) == 0) {
+ if (acpi_match_resource_hint(child, SYS_RES_DRQ, value))
+ matches++;
+ else
+ continue;
+ }
+
+ matched:
+ if (matches > 0) {
+ /* We have a winner! */
+ *unitp = unit;
+ break;
+ }
+ }
+}
+
+/*
+ * Pre-allocate/manage all memory and IO resources. Since rman can't handle
+ * duplicates, we merge any in the sysresource attach routine.
+ */
+static int
+acpi_sysres_alloc(device_t dev)
+{
+ struct resource *res;
+ struct resource_list *rl;
+ struct resource_list_entry *rle;
+ struct rman *rm;
+ char *sysres_ids[] = { "PNP0C01", "PNP0C02", NULL };
+ device_t *children;
+ int child_count, i;
+
+ /*
+ * Probe/attach any sysresource devices. This would be unnecessary if we
+ * had multi-pass probe/attach.
+ */
+ if (device_get_children(dev, &children, &child_count) != 0)
+ return (ENXIO);
+ for (i = 0; i < child_count; i++) {
+ if (ACPI_ID_PROBE(dev, children[i], sysres_ids) != NULL)
+ device_probe_and_attach(children[i]);
+ }
+ free(children, M_TEMP);
+
+ rl = BUS_GET_RESOURCE_LIST(device_get_parent(dev), dev);
+ STAILQ_FOREACH(rle, rl, link) {
+ if (rle->res != NULL) {
+ device_printf(dev, "duplicate resource for %lx\n", rle->start);
+ continue;
+ }
+
+ /* Only memory and IO resources are valid here. */
+ switch (rle->type) {
+ case SYS_RES_IOPORT:
+ rm = &acpi_rman_io;
+ break;
+ case SYS_RES_MEMORY:
+ rm = &acpi_rman_mem;
+ break;
+ default:
+ continue;
+ }
+
+ /* Pre-allocate resource and add to our rman pool. */
+ res = BUS_ALLOC_RESOURCE(device_get_parent(dev), dev, rle->type,
+ &rle->rid, rle->start, rle->start + rle->count - 1, rle->count, 0);
+ if (res != NULL) {
+ rman_manage_region(rm, rman_get_start(res), rman_get_end(res));
+ rle->res = res;
+ } else
+ device_printf(dev, "reservation of %lx, %lx (%d) failed\n",
+ rle->start, rle->count, rle->type);
+ }
+ return (0);
+}
+
+static char *pcilink_ids[] = { "PNP0C0F", NULL };
+static char *sysres_ids[] = { "PNP0C01", "PNP0C02", NULL };
+
+/*
+ * Reserve declared resources for devices found during attach once system
+ * resources have been allocated.
+ */
+static void
+acpi_reserve_resources(device_t dev)
+{
+ struct resource_list_entry *rle;
+ struct resource_list *rl;
+ struct acpi_device *ad;
+ struct acpi_softc *sc;
+ device_t *children;
+ int child_count, i;
+
+ sc = device_get_softc(dev);
+ if (device_get_children(dev, &children, &child_count) != 0)
+ return;
+ for (i = 0; i < child_count; i++) {
+ ad = device_get_ivars(children[i]);
+ rl = &ad->ad_rl;
+
+ /* Don't reserve system resources. */
+ if (ACPI_ID_PROBE(dev, children[i], sysres_ids) != NULL)
+ continue;
+
+ STAILQ_FOREACH(rle, rl, link) {
+ /*
+ * Don't reserve IRQ resources. There are many sticky things
+ * to get right otherwise (e.g. IRQs for psm, atkbd, and HPET
+ * when using legacy routing).
+ */
+ if (rle->type == SYS_RES_IRQ)
+ continue;
+
+ /*
+ * Don't reserve the resource if it is already allocated.
+ * The acpi_ec(4) driver can allocate its resources early
+ * if ECDT is present.
+ */
+ if (rle->res != NULL)
+ continue;
+
+ /*
+ * Try to reserve the resource from our parent. If this
+ * fails because the resource is a system resource, just
+ * let it be. The resource range is already reserved so
+ * that other devices will not use it. If the driver
+ * needs to allocate the resource, then
+ * acpi_alloc_resource() will sub-alloc from the system
+ * resource.
+ */
+ resource_list_reserve(rl, dev, children[i], rle->type, &rle->rid,
+ rle->start, rle->end, rle->count, 0);
+ }
+ }
+ free(children, M_TEMP);
+ sc->acpi_resources_reserved = 1;
+}
+
+static int
+acpi_set_resource(device_t dev, device_t child, int type, int rid,
+ u_long start, u_long count)
+{
+ struct acpi_softc *sc = device_get_softc(dev);
+ struct acpi_device *ad = device_get_ivars(child);
+ struct resource_list *rl = &ad->ad_rl;
+ u_long end;
+
+ /* Ignore IRQ resources for PCI link devices. */
+ if (type == SYS_RES_IRQ && ACPI_ID_PROBE(dev, child, pcilink_ids) != NULL)
+ return (0);
+
+ /* If the resource is already allocated, fail. */
+ if (resource_list_busy(rl, type, rid))
+ return (EBUSY);
+
+ /* If the resource is already reserved, release it. */
+ if (resource_list_reserved(rl, type, rid))
+ resource_list_unreserve(rl, dev, child, type, rid);
+
+ /* Add the resource. */
+ end = (start + count - 1);
+ resource_list_add(rl, type, rid, start, end, count);
+
+ /* Don't reserve resources until the system resources are allocated. */
+ if (!sc->acpi_resources_reserved)
+ return (0);
+
+ /* Don't reserve system resources. */
+ if (ACPI_ID_PROBE(dev, child, sysres_ids) != NULL)
+ return (0);
+
+ /*
+ * Don't reserve IRQ resources. There are many sticky things to
+ * get right otherwise (e.g. IRQs for psm, atkbd, and HPET when
+ * using legacy routing).
+ */
+ if (type == SYS_RES_IRQ)
+ return (0);
+
+ /*
+ * Reserve the resource.
+ *
+ * XXX: Ignores failure for now. Failure here is probably a
+ * BIOS/firmware bug?
+ */
+ resource_list_reserve(rl, dev, child, type, &rid, start, end, count, 0);
+ return (0);
+}
+
+static struct resource *
+acpi_alloc_resource(device_t bus, device_t child, int type, int *rid,
+ u_long start, u_long end, u_long count, u_int flags)
+{
+ ACPI_RESOURCE ares;
+ struct acpi_device *ad;
+ struct resource_list_entry *rle;
+ struct resource_list *rl;
+ struct resource *res;
+ int isdefault = (start == 0UL && end == ~0UL);
+
+ /*
+ * First attempt at allocating the resource. For direct children,
+ * use resource_list_alloc() to handle reserved resources. For
+ * other devices, pass the request up to our parent.
+ */
+ if (bus == device_get_parent(child)) {
+ ad = device_get_ivars(child);
+ rl = &ad->ad_rl;
+
+ /*
+ * Simulate the behavior of the ISA bus for direct children
+ * devices. That is, if a non-default range is specified for
+ * a resource that doesn't exist, use bus_set_resource() to
+ * add the resource before allocating it. Note that these
+ * resources will not be reserved.
+ */
+ if (!isdefault && resource_list_find(rl, type, *rid) == NULL)
+ resource_list_add(rl, type, *rid, start, end, count);
+ res = resource_list_alloc(rl, bus, child, type, rid, start, end, count,
+ flags);
+ if (res != NULL && type == SYS_RES_IRQ) {
+ /*
+ * Since bus_config_intr() takes immediate effect, we cannot
+ * configure the interrupt associated with a device when we
+ * parse the resources but have to defer it until a driver
+ * actually allocates the interrupt via bus_alloc_resource().
+ *
+ * XXX: Should we handle the lookup failing?
+ */
+ if (ACPI_SUCCESS(acpi_lookup_irq_resource(child, *rid, res, &ares)))
+ acpi_config_intr(child, &ares);
+ }
+
+ /*
+ * If this is an allocation of the "default" range for a given
+ * RID, fetch the exact bounds for this resource from the
+ * resource list entry to try to allocate the range from the
+ * system resource regions.
+ */
+ if (res == NULL && isdefault) {
+ rle = resource_list_find(rl, type, *rid);
+ if (rle != NULL) {
+ start = rle->start;
+ end = rle->end;
+ count = rle->count;
+ }
+ }
+ } else
+ res = BUS_ALLOC_RESOURCE(device_get_parent(bus), child, type, rid,
+ start, end, count, flags);
+
+ /*
+ * If the first attempt failed and this is an allocation of a
+ * specific range, try to satisfy the request via a suballocation
+ * from our system resource regions.
+ */
+ if (res == NULL && start + count - 1 == end)
+ res = acpi_alloc_sysres(child, type, rid, start, end, count, flags);
+ return (res);
+}
+
+/*
+ * Attempt to allocate a specific resource range from the system
+ * resource ranges. Note that we only handle memory and I/O port
+ * system resources.
+ */
+struct resource *
+acpi_alloc_sysres(device_t child, int type, int *rid, u_long start, u_long end,
+ u_long count, u_int flags)
+{
+ struct rman *rm;
+ struct resource *res;
+
+ switch (type) {
+ case SYS_RES_IOPORT:
+ rm = &acpi_rman_io;
+ break;
+ case SYS_RES_MEMORY:
+ rm = &acpi_rman_mem;
+ break;
+ default:
+ return (NULL);
+ }
+
+ KASSERT(start + count - 1 == end, ("wildcard resource range"));
+ res = rman_reserve_resource(rm, start, end, count, flags & ~RF_ACTIVE,
+ child);
+ if (res == NULL)
+ return (NULL);
+
+ rman_set_rid(res, *rid);
+
+ /* If requested, activate the resource using the parent's method. */
+ if (flags & RF_ACTIVE)
+ if (bus_activate_resource(child, type, *rid, res) != 0) {
+ rman_release_resource(res);
+ return (NULL);
+ }
+
+ return (res);
+}
+
+static int
+acpi_is_resource_managed(int type, struct resource *r)
+{
+
+ /* We only handle memory and IO resources through rman. */
+ switch (type) {
+ case SYS_RES_IOPORT:
+ return (rman_is_region_manager(r, &acpi_rman_io));
+ case SYS_RES_MEMORY:
+ return (rman_is_region_manager(r, &acpi_rman_mem));
+ }
+ return (0);
+}
+
+static int
+acpi_adjust_resource(device_t bus, device_t child, int type, struct resource *r,
+ u_long start, u_long end)
+{
+
+ if (acpi_is_resource_managed(type, r))
+ return (rman_adjust_resource(r, start, end));
+ return (bus_generic_adjust_resource(bus, child, type, r, start, end));
+}
+
+static int
+acpi_release_resource(device_t bus, device_t child, int type, int rid,
+ struct resource *r)
+{
+ int ret;
+
+ /*
+ * If this resource belongs to one of our internal managers,
+ * deactivate it and release it to the local pool.
+ */
+ if (acpi_is_resource_managed(type, r)) {
+ if (rman_get_flags(r) & RF_ACTIVE) {
+ ret = bus_deactivate_resource(child, type, rid, r);
+ if (ret != 0)
+ return (ret);
+ }
+ return (rman_release_resource(r));
+ }
+
+ return (bus_generic_rl_release_resource(bus, child, type, rid, r));
+}
+
+static void
+acpi_delete_resource(device_t bus, device_t child, int type, int rid)
+{
+ struct resource_list *rl;
+
+ rl = acpi_get_rlist(bus, child);
+ if (resource_list_busy(rl, type, rid)) {
+ device_printf(bus, "delete_resource: Resource still owned by child"
+ " (type=%d, rid=%d)\n", type, rid);
+ return;
+ }
+ resource_list_unreserve(rl, bus, child, type, rid);
+ resource_list_delete(rl, type, rid);
+}
+
+/* Allocate an IO port or memory resource, given its GAS. */
+int
+acpi_bus_alloc_gas(device_t dev, int *type, int *rid, ACPI_GENERIC_ADDRESS *gas,
+ struct resource **res, u_int flags)
+{
+ int error, res_type;
+
+ error = ENOMEM;
+ if (type == NULL || rid == NULL || gas == NULL || res == NULL)
+ return (EINVAL);
+
+ /* We only support memory and IO spaces. */
+ switch (gas->SpaceId) {
+ case ACPI_ADR_SPACE_SYSTEM_MEMORY:
+ res_type = SYS_RES_MEMORY;
+ break;
+ case ACPI_ADR_SPACE_SYSTEM_IO:
+ res_type = SYS_RES_IOPORT;
+ break;
+ default:
+ return (EOPNOTSUPP);
+ }
+
+ /*
+ * If the register width is less than 8, assume the BIOS author means
+ * it is a bit field and just allocate a byte.
+ */
+ if (gas->BitWidth && gas->BitWidth < 8)
+ gas->BitWidth = 8;
+
+ /* Validate the address after we're sure we support the space. */
+ if (gas->Address == 0 || gas->BitWidth == 0)
+ return (EINVAL);
+
+ bus_set_resource(dev, res_type, *rid, gas->Address,
+ gas->BitWidth / 8);
+ *res = bus_alloc_resource_any(dev, res_type, rid, RF_ACTIVE | flags);
+ if (*res != NULL) {
+ *type = res_type;
+ error = 0;
+ } else
+ bus_delete_resource(dev, res_type, *rid);
+
+ return (error);
+}
+
+/* Probe _HID and _CID for compatible ISA PNP ids. */
+static uint32_t
+acpi_isa_get_logicalid(device_t dev)
+{
+ ACPI_DEVICE_INFO *devinfo;
+ ACPI_HANDLE h;
+ uint32_t pnpid;
+
+ ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
+
+ /* Fetch and validate the HID. */
+ if ((h = acpi_get_handle(dev)) == NULL ||
+ ACPI_FAILURE(AcpiGetObjectInfo(h, &devinfo)))
+ return_VALUE (0);
+
+ pnpid = (devinfo->Valid & ACPI_VALID_HID) != 0 &&
+ devinfo->HardwareId.Length >= ACPI_EISAID_STRING_SIZE ?
+ PNP_EISAID(devinfo->HardwareId.String) : 0;
+ AcpiOsFree(devinfo);
+
+ return_VALUE (pnpid);
+}
+
+static int
+acpi_isa_get_compatid(device_t dev, uint32_t *cids, int count)
+{
+ ACPI_DEVICE_INFO *devinfo;
+ ACPI_DEVICE_ID *ids;
+ ACPI_HANDLE h;
+ uint32_t *pnpid;
+ int i, valid;
+
+ ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
+
+ pnpid = cids;
+
+ /* Fetch and validate the CID */
+ if ((h = acpi_get_handle(dev)) == NULL ||
+ ACPI_FAILURE(AcpiGetObjectInfo(h, &devinfo)))
+ return_VALUE (0);
+
+ if ((devinfo->Valid & ACPI_VALID_CID) == 0) {
+ AcpiOsFree(devinfo);
+ return_VALUE (0);
+ }
+
+ if (devinfo->CompatibleIdList.Count < count)
+ count = devinfo->CompatibleIdList.Count;
+ ids = devinfo->CompatibleIdList.Ids;
+ for (i = 0, valid = 0; i < count; i++)
+ if (ids[i].Length >= ACPI_EISAID_STRING_SIZE &&
+ strncmp(ids[i].String, "PNP", 3) == 0) {
+ *pnpid++ = PNP_EISAID(ids[i].String);
+ valid++;
+ }
+ AcpiOsFree(devinfo);
+
+ return_VALUE (valid);
+}
+
+static char *
+acpi_device_id_probe(device_t bus, device_t dev, char **ids)
+{
+ ACPI_HANDLE h;
+ ACPI_OBJECT_TYPE t;
+ int i;
+
+ h = acpi_get_handle(dev);
+ if (ids == NULL || h == NULL)
+ return (NULL);
+ t = acpi_get_type(dev);
+ if (t != ACPI_TYPE_DEVICE && t != ACPI_TYPE_PROCESSOR)
+ return (NULL);
+
+ /* Try to match one of the array of IDs with a HID or CID. */
+ for (i = 0; ids[i] != NULL; i++) {
+ if (acpi_MatchHid(h, ids[i]))
+ return (ids[i]);
+ }
+ return (NULL);
+}
+
+static ACPI_STATUS
+acpi_device_eval_obj(device_t bus, device_t dev, ACPI_STRING pathname,
+ ACPI_OBJECT_LIST *parameters, ACPI_BUFFER *ret)
+{
+ ACPI_HANDLE h;
+
+ if (dev == NULL)
+ h = ACPI_ROOT_OBJECT;
+ else if ((h = acpi_get_handle(dev)) == NULL)
+ return (AE_BAD_PARAMETER);
+ return (AcpiEvaluateObject(h, pathname, parameters, ret));
+}
+
+int
+acpi_device_pwr_for_sleep(device_t bus, device_t dev, int *dstate)
+{
+ struct acpi_softc *sc;
+ ACPI_HANDLE handle;
+ ACPI_STATUS status;
+ char sxd[8];
+
+ handle = acpi_get_handle(dev);
+
+ /*
+ * XXX If we find these devices, don't try to power them down.
+ * The serial and IRDA ports on my T23 hang the system when
+ * set to D3 and it appears that such legacy devices may
+ * need special handling in their drivers.
+ */
+ if (dstate == NULL || handle == NULL ||
+ acpi_MatchHid(handle, "PNP0500") ||
+ acpi_MatchHid(handle, "PNP0501") ||
+ acpi_MatchHid(handle, "PNP0502") ||
+ acpi_MatchHid(handle, "PNP0510") ||
+ acpi_MatchHid(handle, "PNP0511"))
+ return (ENXIO);
+
+ /*
+ * Override next state with the value from _SxD, if present.
+ * Note illegal _S0D is evaluated because some systems expect this.
+ */
+ sc = device_get_softc(bus);
+ snprintf(sxd, sizeof(sxd), "_S%dD", sc->acpi_sstate);
+ status = acpi_GetInteger(handle, sxd, dstate);
+ if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
+ device_printf(dev, "failed to get %s on %s: %s\n", sxd,
+ acpi_name(handle), AcpiFormatException(status));
+ return (ENXIO);
+ }
+
+ return (0);
+}
+
+/* Callback arg for our implementation of walking the namespace. */
+struct acpi_device_scan_ctx {
+ acpi_scan_cb_t user_fn;
+ void *arg;
+ ACPI_HANDLE parent;
+};
+
+static ACPI_STATUS
+acpi_device_scan_cb(ACPI_HANDLE h, UINT32 level, void *arg, void **retval)
+{
+ struct acpi_device_scan_ctx *ctx;
+ device_t dev, old_dev;
+ ACPI_STATUS status;
+ ACPI_OBJECT_TYPE type;
+
+ /*
+ * Skip this device if we think we'll have trouble with it or it is
+ * the parent where the scan began.
+ */
+ ctx = (struct acpi_device_scan_ctx *)arg;
+ if (acpi_avoid(h) || h == ctx->parent)
+ return (AE_OK);
+
+ /* If this is not a valid device type (e.g., a method), skip it. */
+ if (ACPI_FAILURE(AcpiGetType(h, &type)))
+ return (AE_OK);
+ if (type != ACPI_TYPE_DEVICE && type != ACPI_TYPE_PROCESSOR &&
+ type != ACPI_TYPE_THERMAL && type != ACPI_TYPE_POWER)
+ return (AE_OK);
+
+ /*
+ * Call the user function with the current device. If it is unchanged
+ * afterwards, return. Otherwise, we update the handle to the new dev.
+ */
+ old_dev = acpi_get_device(h);
+ dev = old_dev;
+ status = ctx->user_fn(h, &dev, level, ctx->arg);
+ if (ACPI_FAILURE(status) || old_dev == dev)
+ return (status);
+
+ /* Remove the old child and its connection to the handle. */
+ if (old_dev != NULL) {
+ device_delete_child(device_get_parent(old_dev), old_dev);
+ AcpiDetachData(h, acpi_fake_objhandler);
+ }
+
+ /* Recreate the handle association if the user created a device. */
+ if (dev != NULL)
+ AcpiAttachData(h, acpi_fake_objhandler, dev);
+
+ return (AE_OK);
+}
+
+static ACPI_STATUS
+acpi_device_scan_children(device_t bus, device_t dev, int max_depth,
+ acpi_scan_cb_t user_fn, void *arg)
+{
+ ACPI_HANDLE h;
+ struct acpi_device_scan_ctx ctx;
+
+ if (acpi_disabled("children"))
+ return (AE_OK);
+
+ if (dev == NULL)
+ h = ACPI_ROOT_OBJECT;
+ else if ((h = acpi_get_handle(dev)) == NULL)
+ return (AE_BAD_PARAMETER);
+ ctx.user_fn = user_fn;
+ ctx.arg = arg;
+ ctx.parent = h;
+ return (AcpiWalkNamespace(ACPI_TYPE_ANY, h, max_depth,
+ acpi_device_scan_cb, NULL, &ctx, NULL));
+}
+
+/*
+ * Even though ACPI devices are not PCI, we use the PCI approach for setting
+ * device power states since it's close enough to ACPI.
+ */
+static int
+acpi_set_powerstate(device_t child, int state)
+{
+ ACPI_HANDLE h;
+ ACPI_STATUS status;
+
+ h = acpi_get_handle(child);
+ if (state < ACPI_STATE_D0 || state > ACPI_D_STATES_MAX)
+ return (EINVAL);
+ if (h == NULL)
+ return (0);
+
+ /* Ignore errors if the power methods aren't present. */
+ status = acpi_pwr_switch_consumer(h, state);
+ if (ACPI_SUCCESS(status)) {
+ if (bootverbose)
+ device_printf(child, "set ACPI power state D%d on %s\n",
+ state, acpi_name(h));
+ } else if (status != AE_NOT_FOUND)
+ device_printf(child,
+ "failed to set ACPI power state D%d on %s: %s\n", state,
+ acpi_name(h), AcpiFormatException(status));
+
+ return (0);
+}
+
+static int
+acpi_isa_pnp_probe(device_t bus, device_t child, struct isa_pnp_id *ids)
+{
+ int result, cid_count, i;
+ uint32_t lid, cids[8];
+
+ ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
+
+ /*
+ * ISA-style drivers attached to ACPI may persist and
+ * probe manually if we return ENOENT. We never want
+ * that to happen, so don't ever return it.
+ */
+ result = ENXIO;
+
+ /* Scan the supplied IDs for a match */
+ lid = acpi_isa_get_logicalid(child);
+ cid_count = acpi_isa_get_compatid(child, cids, 8);
+ while (ids && ids->ip_id) {
+ if (lid == ids->ip_id) {
+ result = 0;
+ goto out;
+ }
+ for (i = 0; i < cid_count; i++) {
+ if (cids[i] == ids->ip_id) {
+ result = 0;
+ goto out;
+ }
+ }
+ ids++;
+ }
+
+ out:
+ if (result == 0 && ids->ip_desc)
+ device_set_desc(child, ids->ip_desc);
+
+ return_VALUE (result);
+}
+
+#if defined(__i386__) || defined(__amd64__)
+/*
+ * Look for a MCFG table. If it is present, use the settings for
+ * domain (segment) 0 to setup PCI config space access via the memory
+ * map.
+ */
+static void
+acpi_enable_pcie(void)
+{
+ ACPI_TABLE_HEADER *hdr;
+ ACPI_MCFG_ALLOCATION *alloc, *end;
+ ACPI_STATUS status;
+
+ status = AcpiGetTable(ACPI_SIG_MCFG, 1, &hdr);
+ if (ACPI_FAILURE(status))
+ return;
+
+ end = (ACPI_MCFG_ALLOCATION *)((char *)hdr + hdr->Length);
+ alloc = (ACPI_MCFG_ALLOCATION *)((ACPI_TABLE_MCFG *)hdr + 1);
+ while (alloc < end) {
+ if (alloc->PciSegment == 0) {
+ pcie_cfgregopen(alloc->Address, alloc->StartBusNumber,
+ alloc->EndBusNumber);
+ return;
+ }
+ alloc++;
+ }
+}
+#endif
+
+/*
+ * Scan all of the ACPI namespace and attach child devices.
+ *
+ * We should only expect to find devices in the \_PR, \_TZ, \_SI, and
+ * \_SB scopes, and \_PR and \_TZ became obsolete in the ACPI 2.0 spec.
+ * However, in violation of the spec, some systems place their PCI link
+ * devices in \, so we have to walk the whole namespace. We check the
+ * type of namespace nodes, so this should be ok.
+ */
+static void
+acpi_probe_children(device_t bus)
+{
+
+ ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
+
+ /*
+ * Scan the namespace and insert placeholders for all the devices that
+ * we find. We also probe/attach any early devices.
+ *
+ * Note that we use AcpiWalkNamespace rather than AcpiGetDevices because
+ * we want to create nodes for all devices, not just those that are
+ * currently present. (This assumes that we don't want to create/remove
+ * devices as they appear, which might be smarter.)
+ */
+ ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "namespace scan\n"));
+ AcpiWalkNamespace(ACPI_TYPE_ANY, ACPI_ROOT_OBJECT, 100, acpi_probe_child,
+ NULL, bus, NULL);
+
+ /* Pre-allocate resources for our rman from any sysresource devices. */
+ acpi_sysres_alloc(bus);
+
+ /* Reserve resources already allocated to children. */
+ acpi_reserve_resources(bus);
+
+ /* Create any static children by calling device identify methods. */
+ ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "device identify routines\n"));
+ bus_generic_probe(bus);
+
+ /* Probe/attach all children, created statically and from the namespace. */
+ ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "acpi bus_generic_attach\n"));
+ bus_generic_attach(bus);
+
+ /* Attach wake sysctls. */
+ acpi_wake_sysctl_walk(bus);
+
+ ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "done attaching children\n"));
+ return_VOID;
+}
+
+/*
+ * Determine the probe order for a given device.
+ */
+static void
+acpi_probe_order(ACPI_HANDLE handle, int *order)
+{
+ ACPI_OBJECT_TYPE type;
+
+ /*
+ * 0. CPUs
+ * 1. I/O port and memory system resource holders
+ * 2. Clocks and timers (to handle early accesses)
+ * 3. Embedded controllers (to handle early accesses)
+ * 4. PCI Link Devices
+ */
+ AcpiGetType(handle, &type);
+ if (type == ACPI_TYPE_PROCESSOR)
+ *order = 0;
+ else if (acpi_MatchHid(handle, "PNP0C01") ||
+ acpi_MatchHid(handle, "PNP0C02"))
+ *order = 1;
+ else if (acpi_MatchHid(handle, "PNP0100") ||
+ acpi_MatchHid(handle, "PNP0103") ||
+ acpi_MatchHid(handle, "PNP0B00"))
+ *order = 2;
+ else if (acpi_MatchHid(handle, "PNP0C09"))
+ *order = 3;
+ else if (acpi_MatchHid(handle, "PNP0C0F"))
+ *order = 4;
+}
+
+/*
+ * Evaluate a child device and determine whether we might attach a device to
+ * it.
+ */
+static ACPI_STATUS
+acpi_probe_child(ACPI_HANDLE handle, UINT32 level, void *context, void **status)
+{
+ struct acpi_prw_data prw;
+ ACPI_OBJECT_TYPE type;
+ ACPI_HANDLE h;
+ device_t bus, child;
+ char *handle_str;
+ int order;
+
+ ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
+
+ if (acpi_disabled("children"))
+ return_ACPI_STATUS (AE_OK);
+
+ /* Skip this device if we think we'll have trouble with it. */
+ if (acpi_avoid(handle))
+ return_ACPI_STATUS (AE_OK);
+
+ bus = (device_t)context;
+ if (ACPI_SUCCESS(AcpiGetType(handle, &type))) {
+ handle_str = acpi_name(handle);
+ switch (type) {
+ case ACPI_TYPE_DEVICE:
+ /*
+ * Since we scan from \, be sure to skip system scope objects.
+ * \_SB_ and \_TZ_ are defined in ACPICA as devices to work around
+ * BIOS bugs. For example, \_SB_ is to allow \_SB_._INI to be run
+ * during the intialization and \_TZ_ is to support Notify() on it.
+ */
+ if (strcmp(handle_str, "\\_SB_") == 0 ||
+ strcmp(handle_str, "\\_TZ_") == 0)
+ break;
+ if (acpi_parse_prw(handle, &prw) == 0)
+ AcpiSetupGpeForWake(handle, prw.gpe_handle, prw.gpe_bit);
+
+ /*
+ * Ignore devices that do not have a _HID or _CID. They should
+ * be discovered by other buses (e.g. the PCI bus driver).
+ */
+ if (!acpi_has_hid(handle))
+ break;
+ /* FALLTHROUGH */
+ case ACPI_TYPE_PROCESSOR:
+ case ACPI_TYPE_THERMAL:
+ case ACPI_TYPE_POWER:
+ /*
+ * Create a placeholder device for this node. Sort the
+ * placeholder so that the probe/attach passes will run
+ * breadth-first. Orders less than ACPI_DEV_BASE_ORDER
+ * are reserved for special objects (i.e., system
+ * resources).
+ */
+ ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "scanning '%s'\n", handle_str));
+ order = level * 10 + ACPI_DEV_BASE_ORDER;
+ acpi_probe_order(handle, &order);
+ child = BUS_ADD_CHILD(bus, order, NULL, -1);
+ if (child == NULL)
+ break;
+
+ /* Associate the handle with the device_t and vice versa. */
+ acpi_set_handle(child, handle);
+ AcpiAttachData(handle, acpi_fake_objhandler, child);
+
+ /*
+ * Check that the device is present. If it's not present,
+ * leave it disabled (so that we have a device_t attached to
+ * the handle, but we don't probe it).
+ *
+ * XXX PCI link devices sometimes report "present" but not
+ * "functional" (i.e. if disabled). Go ahead and probe them
+ * anyway since we may enable them later.
+ */
+ if (type == ACPI_TYPE_DEVICE && !acpi_DeviceIsPresent(child)) {
+ /* Never disable PCI link devices. */
+ if (acpi_MatchHid(handle, "PNP0C0F"))
+ break;
+ /*
+ * Docking stations should remain enabled since the system
+ * may be undocked at boot.
+ */
+ if (ACPI_SUCCESS(AcpiGetHandle(handle, "_DCK", &h)))
+ break;
+
+ device_disable(child);
+ break;
+ }
+
+ /*
+ * Get the device's resource settings and attach them.
+ * Note that if the device has _PRS but no _CRS, we need
+ * to decide when it's appropriate to try to configure the
+ * device. Ignore the return value here; it's OK for the
+ * device not to have any resources.
+ */
+ acpi_parse_resources(child, handle, &acpi_res_parse_set, NULL);
+ break;
+ }
+ }
+
+ return_ACPI_STATUS (AE_OK);
+}
+
+/*
+ * AcpiAttachData() requires an object handler but never uses it. This is a
+ * placeholder object handler so we can store a device_t in an ACPI_HANDLE.
+ */
+void
+acpi_fake_objhandler(ACPI_HANDLE h, void *data)
+{
+}
+
+static void
+acpi_shutdown_final(void *arg, int howto)
+{
+ struct acpi_softc *sc = (struct acpi_softc *)arg;
+ register_t intr;
+ ACPI_STATUS status;
+
+ /*
+ * XXX Shutdown code should only run on the BSP (cpuid 0).
+ * Some chipsets do not power off the system correctly if called from
+ * an AP.
+ */
+ if ((howto & RB_POWEROFF) != 0) {
+ status = AcpiEnterSleepStatePrep(ACPI_STATE_S5);
+ if (ACPI_FAILURE(status)) {
+ device_printf(sc->acpi_dev, "AcpiEnterSleepStatePrep failed - %s\n",
+ AcpiFormatException(status));
+ return;
+ }
+ device_printf(sc->acpi_dev, "Powering system off\n");
+ intr = intr_disable();
+ status = AcpiEnterSleepState(ACPI_STATE_S5);
+ if (ACPI_FAILURE(status)) {
+ intr_restore(intr);
+ device_printf(sc->acpi_dev, "power-off failed - %s\n",
+ AcpiFormatException(status));
+ } else {
+ DELAY(1000000);
+ intr_restore(intr);
+ device_printf(sc->acpi_dev, "power-off failed - timeout\n");
+ }
+ } else if ((howto & RB_HALT) == 0 && sc->acpi_handle_reboot) {
+ /* Reboot using the reset register. */
+ status = AcpiReset();
+ if (ACPI_SUCCESS(status)) {
+ DELAY(1000000);
+ device_printf(sc->acpi_dev, "reset failed - timeout\n");
+ } else if (status != AE_NOT_EXIST)
+ device_printf(sc->acpi_dev, "reset failed - %s\n",
+ AcpiFormatException(status));
+ } else if (sc->acpi_do_disable && panicstr == NULL) {
+ /*
+ * Only disable ACPI if the user requested. On some systems, writing
+ * the disable value to SMI_CMD hangs the system.
+ */
+ device_printf(sc->acpi_dev, "Shutting down\n");
+ AcpiTerminate();
+ }
+}
+
+static void
+acpi_enable_fixed_events(struct acpi_softc *sc)
+{
+ static int first_time = 1;
+
+ /* Enable and clear fixed events and install handlers. */
+ if ((AcpiGbl_FADT.Flags & ACPI_FADT_POWER_BUTTON) == 0) {
+ AcpiClearEvent(ACPI_EVENT_POWER_BUTTON);
+ AcpiInstallFixedEventHandler(ACPI_EVENT_POWER_BUTTON,
+ acpi_event_power_button_sleep, sc);
+ if (first_time)
+ device_printf(sc->acpi_dev, "Power Button (fixed)\n");
+ }
+ if ((AcpiGbl_FADT.Flags & ACPI_FADT_SLEEP_BUTTON) == 0) {
+ AcpiClearEvent(ACPI_EVENT_SLEEP_BUTTON);
+ AcpiInstallFixedEventHandler(ACPI_EVENT_SLEEP_BUTTON,
+ acpi_event_sleep_button_sleep, sc);
+ if (first_time)
+ device_printf(sc->acpi_dev, "Sleep Button (fixed)\n");
+ }
+
+ first_time = 0;
+}
+
+/*
+ * Returns true if the device is actually present and should
+ * be attached to. This requires the present, enabled, UI-visible
+ * and diagnostics-passed bits to be set.
+ */
+BOOLEAN
+acpi_DeviceIsPresent(device_t dev)
+{
+ ACPI_DEVICE_INFO *devinfo;
+ ACPI_HANDLE h;
+ BOOLEAN present;
+
+ if ((h = acpi_get_handle(dev)) == NULL ||
+ ACPI_FAILURE(AcpiGetObjectInfo(h, &devinfo)))
+ return (FALSE);
+
+ /* If no _STA method, must be present */
+ present = (devinfo->Valid & ACPI_VALID_STA) == 0 ||
+ ACPI_DEVICE_PRESENT(devinfo->CurrentStatus) ? TRUE : FALSE;
+
+ AcpiOsFree(devinfo);
+ return (present);
+}
+
+/*
+ * Returns true if the battery is actually present and inserted.
+ */
+BOOLEAN
+acpi_BatteryIsPresent(device_t dev)
+{
+ ACPI_DEVICE_INFO *devinfo;
+ ACPI_HANDLE h;
+ BOOLEAN present;
+
+ if ((h = acpi_get_handle(dev)) == NULL ||
+ ACPI_FAILURE(AcpiGetObjectInfo(h, &devinfo)))
+ return (FALSE);
+
+ /* If no _STA method, must be present */
+ present = (devinfo->Valid & ACPI_VALID_STA) == 0 ||
+ ACPI_BATTERY_PRESENT(devinfo->CurrentStatus) ? TRUE : FALSE;
+
+ AcpiOsFree(devinfo);
+ return (present);
+}
+
+/*
+ * Returns true if a device has at least one valid device ID.
+ */
+static BOOLEAN
+acpi_has_hid(ACPI_HANDLE h)
+{
+ ACPI_DEVICE_INFO *devinfo;
+ BOOLEAN ret;
+
+ if (h == NULL ||
+ ACPI_FAILURE(AcpiGetObjectInfo(h, &devinfo)))
+ return (FALSE);
+
+ ret = FALSE;
+ if ((devinfo->Valid & ACPI_VALID_HID) != 0)
+ ret = TRUE;
+ else if ((devinfo->Valid & ACPI_VALID_CID) != 0)
+ if (devinfo->CompatibleIdList.Count > 0)
+ ret = TRUE;
+
+ AcpiOsFree(devinfo);
+ return (ret);
+}
+
+/*
+ * Match a HID string against a handle
+ */
+BOOLEAN
+acpi_MatchHid(ACPI_HANDLE h, const char *hid)
+{
+ ACPI_DEVICE_INFO *devinfo;
+ BOOLEAN ret;
+ int i;
+
+ if (hid == NULL || h == NULL ||
+ ACPI_FAILURE(AcpiGetObjectInfo(h, &devinfo)))
+ return (FALSE);
+
+ ret = FALSE;
+ if ((devinfo->Valid & ACPI_VALID_HID) != 0 &&
+ strcmp(hid, devinfo->HardwareId.String) == 0)
+ ret = TRUE;
+ else if ((devinfo->Valid & ACPI_VALID_CID) != 0)
+ for (i = 0; i < devinfo->CompatibleIdList.Count; i++) {
+ if (strcmp(hid, devinfo->CompatibleIdList.Ids[i].String) == 0) {
+ ret = TRUE;
+ break;
+ }
+ }
+
+ AcpiOsFree(devinfo);
+ return (ret);
+}
+
+/*
+ * Return the handle of a named object within our scope, ie. that of (parent)
+ * or one if its parents.
+ */
+ACPI_STATUS
+acpi_GetHandleInScope(ACPI_HANDLE parent, char *path, ACPI_HANDLE *result)
+{
+ ACPI_HANDLE r;
+ ACPI_STATUS status;
+
+ /* Walk back up the tree to the root */
+ for (;;) {
+ status = AcpiGetHandle(parent, path, &r);
+ if (ACPI_SUCCESS(status)) {
+ *result = r;
+ return (AE_OK);
+ }
+ /* XXX Return error here? */
+ if (status != AE_NOT_FOUND)
+ return (AE_OK);
+ if (ACPI_FAILURE(AcpiGetParent(parent, &r)))
+ return (AE_NOT_FOUND);
+ parent = r;
+ }
+}
+
+/*
+ * Allocate a buffer with a preset data size.
+ */
+ACPI_BUFFER *
+acpi_AllocBuffer(int size)
+{
+ ACPI_BUFFER *buf;
+
+ if ((buf = malloc(size + sizeof(*buf), M_ACPIDEV, M_NOWAIT)) == NULL)
+ return (NULL);
+ buf->Length = size;
+ buf->Pointer = (void *)(buf + 1);
+ return (buf);
+}
+
+ACPI_STATUS
+acpi_SetInteger(ACPI_HANDLE handle, char *path, UINT32 number)
+{
+ ACPI_OBJECT arg1;
+ ACPI_OBJECT_LIST args;
+
+ arg1.Type = ACPI_TYPE_INTEGER;
+ arg1.Integer.Value = number;
+ args.Count = 1;
+ args.Pointer = &arg1;
+
+ return (AcpiEvaluateObject(handle, path, &args, NULL));
+}
+
+/*
+ * Evaluate a path that should return an integer.
+ */
+ACPI_STATUS
+acpi_GetInteger(ACPI_HANDLE handle, char *path, UINT32 *number)
+{
+ ACPI_STATUS status;
+ ACPI_BUFFER buf;
+ ACPI_OBJECT param;
+
+ if (handle == NULL)
+ handle = ACPI_ROOT_OBJECT;
+
+ /*
+ * Assume that what we've been pointed at is an Integer object, or
+ * a method that will return an Integer.
+ */
+ buf.Pointer = &param;
+ buf.Length = sizeof(param);
+ status = AcpiEvaluateObject(handle, path, NULL, &buf);
+ if (ACPI_SUCCESS(status)) {
+ if (param.Type == ACPI_TYPE_INTEGER)
+ *number = param.Integer.Value;
+ else
+ status = AE_TYPE;
+ }
+
+ /*
+ * In some applications, a method that's expected to return an Integer
+ * may instead return a Buffer (probably to simplify some internal
+ * arithmetic). We'll try to fetch whatever it is, and if it's a Buffer,
+ * convert it into an Integer as best we can.
+ *
+ * This is a hack.
+ */
+ if (status == AE_BUFFER_OVERFLOW) {
+ if ((buf.Pointer = AcpiOsAllocate(buf.Length)) == NULL) {
+ status = AE_NO_MEMORY;
+ } else {
+ status = AcpiEvaluateObject(handle, path, NULL, &buf);
+ if (ACPI_SUCCESS(status))
+ status = acpi_ConvertBufferToInteger(&buf, number);
+ AcpiOsFree(buf.Pointer);
+ }
+ }
+ return (status);
+}
+
+ACPI_STATUS
+acpi_ConvertBufferToInteger(ACPI_BUFFER *bufp, UINT32 *number)
+{
+ ACPI_OBJECT *p;
+ UINT8 *val;
+ int i;
+
+ p = (ACPI_OBJECT *)bufp->Pointer;
+ if (p->Type == ACPI_TYPE_INTEGER) {
+ *number = p->Integer.Value;
+ return (AE_OK);
+ }
+ if (p->Type != ACPI_TYPE_BUFFER)
+ return (AE_TYPE);
+ if (p->Buffer.Length > sizeof(int))
+ return (AE_BAD_DATA);
+
+ *number = 0;
+ val = p->Buffer.Pointer;
+ for (i = 0; i < p->Buffer.Length; i++)
+ *number += val[i] << (i * 8);
+ return (AE_OK);
+}
+
+/*
+ * Iterate over the elements of an a package object, calling the supplied
+ * function for each element.
+ *
+ * XXX possible enhancement might be to abort traversal on error.
+ */
+ACPI_STATUS
+acpi_ForeachPackageObject(ACPI_OBJECT *pkg,
+ void (*func)(ACPI_OBJECT *comp, void *arg), void *arg)
+{
+ ACPI_OBJECT *comp;
+ int i;
+
+ if (pkg == NULL || pkg->Type != ACPI_TYPE_PACKAGE)
+ return (AE_BAD_PARAMETER);
+
+ /* Iterate over components */
+ i = 0;
+ comp = pkg->Package.Elements;
+ for (; i < pkg->Package.Count; i++, comp++)
+ func(comp, arg);
+
+ return (AE_OK);
+}
+
+/*
+ * Find the (index)th resource object in a set.
+ */
+ACPI_STATUS
+acpi_FindIndexedResource(ACPI_BUFFER *buf, int index, ACPI_RESOURCE **resp)
+{
+ ACPI_RESOURCE *rp;
+ int i;
+
+ rp = (ACPI_RESOURCE *)buf->Pointer;
+ i = index;
+ while (i-- > 0) {
+ /* Range check */
+ if (rp > (ACPI_RESOURCE *)((u_int8_t *)buf->Pointer + buf->Length))
+ return (AE_BAD_PARAMETER);
+
+ /* Check for terminator */
+ if (rp->Type == ACPI_RESOURCE_TYPE_END_TAG || rp->Length == 0)
+ return (AE_NOT_FOUND);
+ rp = ACPI_NEXT_RESOURCE(rp);
+ }
+ if (resp != NULL)
+ *resp = rp;
+
+ return (AE_OK);
+}
+
+/*
+ * Append an ACPI_RESOURCE to an ACPI_BUFFER.
+ *
+ * Given a pointer to an ACPI_RESOURCE structure, expand the ACPI_BUFFER
+ * provided to contain it. If the ACPI_BUFFER is empty, allocate a sensible
+ * backing block. If the ACPI_RESOURCE is NULL, return an empty set of
+ * resources.
+ */
+#define ACPI_INITIAL_RESOURCE_BUFFER_SIZE 512
+
+ACPI_STATUS
+acpi_AppendBufferResource(ACPI_BUFFER *buf, ACPI_RESOURCE *res)
+{
+ ACPI_RESOURCE *rp;
+ void *newp;
+
+ /* Initialise the buffer if necessary. */
+ if (buf->Pointer == NULL) {
+ buf->Length = ACPI_INITIAL_RESOURCE_BUFFER_SIZE;
+ if ((buf->Pointer = AcpiOsAllocate(buf->Length)) == NULL)
+ return (AE_NO_MEMORY);
+ rp = (ACPI_RESOURCE *)buf->Pointer;
+ rp->Type = ACPI_RESOURCE_TYPE_END_TAG;
+ rp->Length = 0;
+ }
+ if (res == NULL)
+ return (AE_OK);
+
+ /*
+ * Scan the current buffer looking for the terminator.
+ * This will either find the terminator or hit the end
+ * of the buffer and return an error.
+ */
+ rp = (ACPI_RESOURCE *)buf->Pointer;
+ for (;;) {
+ /* Range check, don't go outside the buffer */
+ if (rp >= (ACPI_RESOURCE *)((u_int8_t *)buf->Pointer + buf->Length))
+ return (AE_BAD_PARAMETER);
+ if (rp->Type == ACPI_RESOURCE_TYPE_END_TAG || rp->Length == 0)
+ break;
+ rp = ACPI_NEXT_RESOURCE(rp);
+ }
+
+ /*
+ * Check the size of the buffer and expand if required.
+ *
+ * Required size is:
+ * size of existing resources before terminator +
+ * size of new resource and header +
+ * size of terminator.
+ *
+ * Note that this loop should really only run once, unless
+ * for some reason we are stuffing a *really* huge resource.
+ */
+ while ((((u_int8_t *)rp - (u_int8_t *)buf->Pointer) +
+ res->Length + ACPI_RS_SIZE_NO_DATA +
+ ACPI_RS_SIZE_MIN) >= buf->Length) {
+ if ((newp = AcpiOsAllocate(buf->Length * 2)) == NULL)
+ return (AE_NO_MEMORY);
+ bcopy(buf->Pointer, newp, buf->Length);
+ rp = (ACPI_RESOURCE *)((u_int8_t *)newp +
+ ((u_int8_t *)rp - (u_int8_t *)buf->Pointer));
+ AcpiOsFree(buf->Pointer);
+ buf->Pointer = newp;
+ buf->Length += buf->Length;
+ }
+
+ /* Insert the new resource. */
+ bcopy(res, rp, res->Length + ACPI_RS_SIZE_NO_DATA);
+
+ /* And add the terminator. */
+ rp = ACPI_NEXT_RESOURCE(rp);
+ rp->Type = ACPI_RESOURCE_TYPE_END_TAG;
+ rp->Length = 0;
+
+ return (AE_OK);
+}
+
+/*
+ * Set interrupt model.
+ */
+ACPI_STATUS
+acpi_SetIntrModel(int model)
+{
+
+ return (acpi_SetInteger(ACPI_ROOT_OBJECT, "_PIC", model));
+}
+
+/*
+ * Walk subtables of a table and call a callback routine for each
+ * subtable. The caller should provide the first subtable and a
+ * pointer to the end of the table. This can be used to walk tables
+ * such as MADT and SRAT that use subtable entries.
+ */
+void
+acpi_walk_subtables(void *first, void *end, acpi_subtable_handler *handler,
+ void *arg)
+{
+ ACPI_SUBTABLE_HEADER *entry;
+
+ for (entry = first; (void *)entry < end; ) {
+ /* Avoid an infinite loop if we hit a bogus entry. */
+ if (entry->Length < sizeof(ACPI_SUBTABLE_HEADER))
+ return;
+
+ handler(entry, arg);
+ entry = ACPI_ADD_PTR(ACPI_SUBTABLE_HEADER, entry, entry->Length);
+ }
+}
+
+/*
+ * DEPRECATED. This interface has serious deficiencies and will be
+ * removed.
+ *
+ * Immediately enter the sleep state. In the old model, acpiconf(8) ran
+ * rc.suspend and rc.resume so we don't have to notify devd(8) to do this.
+ */
+ACPI_STATUS
+acpi_SetSleepState(struct acpi_softc *sc, int state)
+{
+ static int once;
+
+ if (!once) {
+ device_printf(sc->acpi_dev,
+"warning: acpi_SetSleepState() deprecated, need to update your software\n");
+ once = 1;
+ }
+ return (acpi_EnterSleepState(sc, state));
+}
+
+#if defined(__amd64__) || defined(__i386__)
+static void
+acpi_sleep_force_task(void *context)
+{
+ struct acpi_softc *sc = (struct acpi_softc *)context;
+
+ if (ACPI_FAILURE(acpi_EnterSleepState(sc, sc->acpi_next_sstate)))
+ device_printf(sc->acpi_dev, "force sleep state S%d failed\n",
+ sc->acpi_next_sstate);
+}
+
+static void
+acpi_sleep_force(void *arg)
+{
+ struct acpi_softc *sc = (struct acpi_softc *)arg;
+
+ device_printf(sc->acpi_dev,
+ "suspend request timed out, forcing sleep now\n");
+ /*
+ * XXX Suspending from callout cause the freeze in DEVICE_SUSPEND().
+ * Suspend from acpi_task thread in stead.
+ */
+ if (ACPI_FAILURE(AcpiOsExecute(OSL_NOTIFY_HANDLER,
+ acpi_sleep_force_task, sc)))
+ device_printf(sc->acpi_dev, "AcpiOsExecute() for sleeping failed\n");
+}
+#endif
+
+/*
+ * Request that the system enter the given suspend state. All /dev/apm
+ * devices and devd(8) will be notified. Userland then has a chance to
+ * save state and acknowledge the request. The system sleeps once all
+ * acks are in.
+ */
+int
+acpi_ReqSleepState(struct acpi_softc *sc, int state)
+{
+#if defined(__amd64__) || defined(__i386__)
+ struct apm_clone_data *clone;
+ ACPI_STATUS status;
+
+ if (state < ACPI_STATE_S1 || state > ACPI_S_STATES_MAX)
+ return (EINVAL);
+ if (!acpi_sleep_states[state])
+ return (EOPNOTSUPP);
+
+ /* If a suspend request is already in progress, just return. */
+ if (sc->acpi_next_sstate != 0) {
+ return (0);
+ }
+
+ /* Wait until sleep is enabled. */
+ while (sc->acpi_sleep_disabled) {
+ AcpiOsSleep(1000);
+ }
+
+ ACPI_LOCK(acpi);
+
+ sc->acpi_next_sstate = state;
+
+ /* S5 (soft-off) should be entered directly with no waiting. */
+ if (state == ACPI_STATE_S5) {
+ ACPI_UNLOCK(acpi);
+ status = acpi_EnterSleepState(sc, state);
+ return (ACPI_SUCCESS(status) ? 0 : ENXIO);
+ }
+
+ /* Record the pending state and notify all apm devices. */
+ STAILQ_FOREACH(clone, &sc->apm_cdevs, entries) {
+ clone->notify_status = APM_EV_NONE;
+ if ((clone->flags & ACPI_EVF_DEVD) == 0) {
+ selwakeuppri(&clone->sel_read, PZERO);
+ KNOTE_LOCKED(&clone->sel_read.si_note, 0);
+ }
+ }
+
+ /* If devd(8) is not running, immediately enter the sleep state. */
+ if (!devctl_process_running()) {
+ ACPI_UNLOCK(acpi);
+ status = acpi_EnterSleepState(sc, state);
+ return (ACPI_SUCCESS(status) ? 0 : ENXIO);
+ }
+
+ /*
+ * Set a timeout to fire if userland doesn't ack the suspend request
+ * in time. This way we still eventually go to sleep if we were
+ * overheating or running low on battery, even if userland is hung.
+ * We cancel this timeout once all userland acks are in or the
+ * suspend request is aborted.
+ */
+ callout_reset(&sc->susp_force_to, 10 * hz, acpi_sleep_force, sc);
+ ACPI_UNLOCK(acpi);
+
+ /* Now notify devd(8) also. */
+ acpi_UserNotify("Suspend", ACPI_ROOT_OBJECT, state);
+
+ return (0);
+#else
+ /* This platform does not support acpi suspend/resume. */
+ return (EOPNOTSUPP);
+#endif
+}
+
+/*
+ * Acknowledge (or reject) a pending sleep state. The caller has
+ * prepared for suspend and is now ready for it to proceed. If the
+ * error argument is non-zero, it indicates suspend should be cancelled
+ * and gives an errno value describing why. Once all votes are in,
+ * we suspend the system.
+ */
+int
+acpi_AckSleepState(struct apm_clone_data *clone, int error)
+{
+#if defined(__amd64__) || defined(__i386__)
+ struct acpi_softc *sc;
+ int ret, sleeping;
+
+ /* If no pending sleep state, return an error. */
+ ACPI_LOCK(acpi);
+ sc = clone->acpi_sc;
+ if (sc->acpi_next_sstate == 0) {
+ ACPI_UNLOCK(acpi);
+ return (ENXIO);
+ }
+
+ /* Caller wants to abort suspend process. */
+ if (error) {
+ sc->acpi_next_sstate = 0;
+ callout_stop(&sc->susp_force_to);
+ device_printf(sc->acpi_dev,
+ "listener on %s cancelled the pending suspend\n",
+ devtoname(clone->cdev));
+ ACPI_UNLOCK(acpi);
+ return (0);
+ }
+
+ /*
+ * Mark this device as acking the suspend request. Then, walk through
+ * all devices, seeing if they agree yet. We only count devices that
+ * are writable since read-only devices couldn't ack the request.
+ */
+ sleeping = TRUE;
+ clone->notify_status = APM_EV_ACKED;
+ STAILQ_FOREACH(clone, &sc->apm_cdevs, entries) {
+ if ((clone->flags & ACPI_EVF_WRITE) != 0 &&
+ clone->notify_status != APM_EV_ACKED) {
+ sleeping = FALSE;
+ break;
+ }
+ }
+
+ /* If all devices have voted "yes", we will suspend now. */
+ if (sleeping)
+ callout_stop(&sc->susp_force_to);
+ ACPI_UNLOCK(acpi);
+ ret = 0;
+ if (sleeping) {
+ if (ACPI_FAILURE(acpi_EnterSleepState(sc, sc->acpi_next_sstate)))
+ ret = ENODEV;
+ }
+ return (ret);
+#else
+ /* This platform does not support acpi suspend/resume. */
+ return (EOPNOTSUPP);
+#endif
+}
+
+static void
+acpi_sleep_enable(void *arg)
+{
+ struct acpi_softc *sc = (struct acpi_softc *)arg;
+
+ /* Reschedule if the system is not fully up and running. */
+ if (!AcpiGbl_SystemAwakeAndRunning) {
+ timeout(acpi_sleep_enable, sc, hz * ACPI_MINIMUM_AWAKETIME);
+ return;
+ }
+
+ ACPI_LOCK(acpi);
+ sc->acpi_sleep_disabled = FALSE;
+ ACPI_UNLOCK(acpi);
+}
+
+static ACPI_STATUS
+acpi_sleep_disable(struct acpi_softc *sc)
+{
+ ACPI_STATUS status;
+
+ /* Fail if the system is not fully up and running. */
+ if (!AcpiGbl_SystemAwakeAndRunning)
+ return (AE_ERROR);
+
+ ACPI_LOCK(acpi);
+ status = sc->acpi_sleep_disabled ? AE_ERROR : AE_OK;
+ sc->acpi_sleep_disabled = TRUE;
+ ACPI_UNLOCK(acpi);
+
+ return (status);
+}
+
+enum acpi_sleep_state {
+ ACPI_SS_NONE,
+ ACPI_SS_GPE_SET,
+ ACPI_SS_DEV_SUSPEND,
+ ACPI_SS_SLP_PREP,
+ ACPI_SS_SLEPT,
+};
+
+/*
+ * Enter the desired system sleep state.
+ *
+ * Currently we support S1-S5 but S4 is only S4BIOS
+ */
+static ACPI_STATUS
+acpi_EnterSleepState(struct acpi_softc *sc, int state)
+{
+ register_t intr;
+ ACPI_STATUS status;
+ enum acpi_sleep_state slp_state;
+ int sleep_result;
+
+ ACPI_FUNCTION_TRACE_U32((char *)(uintptr_t)__func__, state);
+
+ if (state < ACPI_STATE_S1 || state > ACPI_S_STATES_MAX)
+ return_ACPI_STATUS (AE_BAD_PARAMETER);
+ if (!acpi_sleep_states[state]) {
+ device_printf(sc->acpi_dev, "Sleep state S%d not supported by BIOS\n",
+ state);
+ return (AE_SUPPORT);
+ }
+
+ /* Re-entry once we're suspending is not allowed. */
+ status = acpi_sleep_disable(sc);
+ if (ACPI_FAILURE(status)) {
+ device_printf(sc->acpi_dev,
+ "suspend request ignored (not ready yet)\n");
+ return (status);
+ }
+
+ if (state == ACPI_STATE_S5) {
+ /*
+ * Shut down cleanly and power off. This will call us back through the
+ * shutdown handlers.
+ */
+ shutdown_nice(RB_POWEROFF);
+ return_ACPI_STATUS (AE_OK);
+ }
+
+ EVENTHANDLER_INVOKE(power_suspend);
+
+ if (smp_started) {
+ thread_lock(curthread);
+ sched_bind(curthread, 0);
+ thread_unlock(curthread);
+ }
+
+ /*
+ * Be sure to hold Giant across DEVICE_SUSPEND/RESUME since non-MPSAFE
+ * drivers need this.
+ */
+ mtx_lock(&Giant);
+
+ slp_state = ACPI_SS_NONE;
+
+ sc->acpi_sstate = state;
+
+ /* Enable any GPEs as appropriate and requested by the user. */
+ acpi_wake_prep_walk(state);
+ slp_state = ACPI_SS_GPE_SET;
+
+ /*
+ * Inform all devices that we are going to sleep. If at least one
+ * device fails, DEVICE_SUSPEND() automatically resumes the tree.
+ *
+ * XXX Note that a better two-pass approach with a 'veto' pass
+ * followed by a "real thing" pass would be better, but the current
+ * bus interface does not provide for this.
+ */
+ if (DEVICE_SUSPEND(root_bus) != 0) {
+ device_printf(sc->acpi_dev, "device_suspend failed\n");
+ goto backout;
+ }
+ slp_state = ACPI_SS_DEV_SUSPEND;
+
+ /* If testing device suspend only, back out of everything here. */
+ if (acpi_susp_bounce)
+ goto backout;
+
+ status = AcpiEnterSleepStatePrep(state);
+ if (ACPI_FAILURE(status)) {
+ device_printf(sc->acpi_dev, "AcpiEnterSleepStatePrep failed - %s\n",
+ AcpiFormatException(status));
+ goto backout;
+ }
+ slp_state = ACPI_SS_SLP_PREP;
+
+ if (sc->acpi_sleep_delay > 0)
+ DELAY(sc->acpi_sleep_delay * 1000000);
+
+ intr = intr_disable();
+ if (state != ACPI_STATE_S1) {
+ sleep_result = acpi_sleep_machdep(sc, state);
+ acpi_wakeup_machdep(sc, state, sleep_result, 0);
+ AcpiLeaveSleepStatePrep(state);
+ intr_restore(intr);
+
+ /* call acpi_wakeup_machdep() again with interrupt enabled */
+ acpi_wakeup_machdep(sc, state, sleep_result, 1);
+
+ if (sleep_result == -1)
+ goto backout;
+
+ /* Re-enable ACPI hardware on wakeup from sleep state 4. */
+ if (state == ACPI_STATE_S4)
+ AcpiEnable();
+ } else {
+ status = AcpiEnterSleepState(state);
+ AcpiLeaveSleepStatePrep(state);
+ intr_restore(intr);
+ if (ACPI_FAILURE(status)) {
+ device_printf(sc->acpi_dev, "AcpiEnterSleepState failed - %s\n",
+ AcpiFormatException(status));
+ goto backout;
+ }
+ }
+ slp_state = ACPI_SS_SLEPT;
+
+ /*
+ * Back out state according to how far along we got in the suspend
+ * process. This handles both the error and success cases.
+ */
+backout:
+ if (slp_state >= ACPI_SS_GPE_SET) {
+ acpi_wake_prep_walk(state);
+ sc->acpi_sstate = ACPI_STATE_S0;
+ }
+ if (slp_state >= ACPI_SS_DEV_SUSPEND)
+ DEVICE_RESUME(root_bus);
+ if (slp_state >= ACPI_SS_SLP_PREP)
+ AcpiLeaveSleepState(state);
+ if (slp_state >= ACPI_SS_SLEPT) {
+ acpi_resync_clock(sc);
+ acpi_enable_fixed_events(sc);
+ }
+ sc->acpi_next_sstate = 0;
+
+ mtx_unlock(&Giant);
+
+ if (smp_started) {
+ thread_lock(curthread);
+ sched_unbind(curthread);
+ thread_unlock(curthread);
+ }
+
+ EVENTHANDLER_INVOKE(power_resume);
+
+ /* Allow another sleep request after a while. */
+ timeout(acpi_sleep_enable, sc, hz * ACPI_MINIMUM_AWAKETIME);
+
+ /* Run /etc/rc.resume after we are back. */
+ if (devctl_process_running())
+ acpi_UserNotify("Resume", ACPI_ROOT_OBJECT, state);
+
+ return_ACPI_STATUS (status);
+}
+
+static void
+acpi_resync_clock(struct acpi_softc *sc)
+{
+#ifdef __amd64__
+ if (!acpi_reset_clock)
+ return;
+
+ /*
+ * Warm up timecounter again and reset system clock.
+ */
+ (void)timecounter->tc_get_timecount(timecounter);
+ (void)timecounter->tc_get_timecount(timecounter);
+ inittodr(time_second + sc->acpi_sleep_delay);
+#endif
+}
+
+/* Enable or disable the device's wake GPE. */
+int
+acpi_wake_set_enable(device_t dev, int enable)
+{
+ struct acpi_prw_data prw;
+ ACPI_STATUS status;
+ int flags;
+
+ /* Make sure the device supports waking the system and get the GPE. */
+ if (acpi_parse_prw(acpi_get_handle(dev), &prw) != 0)
+ return (ENXIO);
+
+ flags = acpi_get_flags(dev);
+ if (enable) {
+ status = AcpiSetGpeWakeMask(prw.gpe_handle, prw.gpe_bit,
+ ACPI_GPE_ENABLE);
+ if (ACPI_FAILURE(status)) {
+ device_printf(dev, "enable wake failed\n");
+ return (ENXIO);
+ }
+ acpi_set_flags(dev, flags | ACPI_FLAG_WAKE_ENABLED);
+ } else {
+ status = AcpiSetGpeWakeMask(prw.gpe_handle, prw.gpe_bit,
+ ACPI_GPE_DISABLE);
+ if (ACPI_FAILURE(status)) {
+ device_printf(dev, "disable wake failed\n");
+ return (ENXIO);
+ }
+ acpi_set_flags(dev, flags & ~ACPI_FLAG_WAKE_ENABLED);
+ }
+
+ return (0);
+}
+
+static int
+acpi_wake_sleep_prep(ACPI_HANDLE handle, int sstate)
+{
+ struct acpi_prw_data prw;
+ device_t dev;
+
+ /* Check that this is a wake-capable device and get its GPE. */
+ if (acpi_parse_prw(handle, &prw) != 0)
+ return (ENXIO);
+ dev = acpi_get_device(handle);
+
+ /*
+ * The destination sleep state must be less than (i.e., higher power)
+ * or equal to the value specified by _PRW. If this GPE cannot be
+ * enabled for the next sleep state, then disable it. If it can and
+ * the user requested it be enabled, turn on any required power resources
+ * and set _PSW.
+ */
+ if (sstate > prw.lowest_wake) {
+ AcpiSetGpeWakeMask(prw.gpe_handle, prw.gpe_bit, ACPI_GPE_DISABLE);
+ if (bootverbose)
+ device_printf(dev, "wake_prep disabled wake for %s (S%d)\n",
+ acpi_name(handle), sstate);
+ } else if (dev && (acpi_get_flags(dev) & ACPI_FLAG_WAKE_ENABLED) != 0) {
+ acpi_pwr_wake_enable(handle, 1);
+ acpi_SetInteger(handle, "_PSW", 1);
+ if (bootverbose)
+ device_printf(dev, "wake_prep enabled for %s (S%d)\n",
+ acpi_name(handle), sstate);
+ }
+
+ return (0);
+}
+
+static int
+acpi_wake_run_prep(ACPI_HANDLE handle, int sstate)
+{
+ struct acpi_prw_data prw;
+ device_t dev;
+
+ /*
+ * Check that this is a wake-capable device and get its GPE. Return
+ * now if the user didn't enable this device for wake.
+ */
+ if (acpi_parse_prw(handle, &prw) != 0)
+ return (ENXIO);
+ dev = acpi_get_device(handle);
+ if (dev == NULL || (acpi_get_flags(dev) & ACPI_FLAG_WAKE_ENABLED) == 0)
+ return (0);
+
+ /*
+ * If this GPE couldn't be enabled for the previous sleep state, it was
+ * disabled before going to sleep so re-enable it. If it was enabled,
+ * clear _PSW and turn off any power resources it used.
+ */
+ if (sstate > prw.lowest_wake) {
+ AcpiSetGpeWakeMask(prw.gpe_handle, prw.gpe_bit, ACPI_GPE_ENABLE);
+ if (bootverbose)
+ device_printf(dev, "run_prep re-enabled %s\n", acpi_name(handle));
+ } else {
+ acpi_SetInteger(handle, "_PSW", 0);
+ acpi_pwr_wake_enable(handle, 0);
+ if (bootverbose)
+ device_printf(dev, "run_prep cleaned up for %s\n",
+ acpi_name(handle));
+ }
+
+ return (0);
+}
+
+static ACPI_STATUS
+acpi_wake_prep(ACPI_HANDLE handle, UINT32 level, void *context, void **status)
+{
+ int sstate;
+
+ /* If suspending, run the sleep prep function, otherwise wake. */
+ sstate = *(int *)context;
+ if (AcpiGbl_SystemAwakeAndRunning)
+ acpi_wake_sleep_prep(handle, sstate);
+ else
+ acpi_wake_run_prep(handle, sstate);
+ return (AE_OK);
+}
+
+/* Walk the tree rooted at acpi0 to prep devices for suspend/resume. */
+static int
+acpi_wake_prep_walk(int sstate)
+{
+ ACPI_HANDLE sb_handle;
+
+ if (ACPI_SUCCESS(AcpiGetHandle(ACPI_ROOT_OBJECT, "\\_SB_", &sb_handle)))
+ AcpiWalkNamespace(ACPI_TYPE_DEVICE, sb_handle, 100,
+ acpi_wake_prep, NULL, &sstate, NULL);
+ return (0);
+}
+
+/* Walk the tree rooted at acpi0 to attach per-device wake sysctls. */
+static int
+acpi_wake_sysctl_walk(device_t dev)
+{
+ int error, i, numdevs;
+ device_t *devlist;
+ device_t child;
+ ACPI_STATUS status;
+
+ error = device_get_children(dev, &devlist, &numdevs);
+ if (error != 0 || numdevs == 0) {
+ if (numdevs == 0)
+ free(devlist, M_TEMP);
+ return (error);
+ }
+ for (i = 0; i < numdevs; i++) {
+ child = devlist[i];
+ acpi_wake_sysctl_walk(child);
+ if (!device_is_attached(child))
+ continue;
+ status = AcpiEvaluateObject(acpi_get_handle(child), "_PRW", NULL, NULL);
+ if (ACPI_SUCCESS(status)) {
+ SYSCTL_ADD_PROC(device_get_sysctl_ctx(child),
+ SYSCTL_CHILDREN(device_get_sysctl_tree(child)), OID_AUTO,
+ "wake", CTLTYPE_INT | CTLFLAG_RW, child, 0,
+ acpi_wake_set_sysctl, "I", "Device set to wake the system");
+ }
+ }
+ free(devlist, M_TEMP);
+
+ return (0);
+}
+
+/* Enable or disable wake from userland. */
+static int
+acpi_wake_set_sysctl(SYSCTL_HANDLER_ARGS)
+{
+ int enable, error;
+ device_t dev;
+
+ dev = (device_t)arg1;
+ enable = (acpi_get_flags(dev) & ACPI_FLAG_WAKE_ENABLED) ? 1 : 0;
+
+ error = sysctl_handle_int(oidp, &enable, 0, req);
+ if (error != 0 || req->newptr == NULL)
+ return (error);
+ if (enable != 0 && enable != 1)
+ return (EINVAL);
+
+ return (acpi_wake_set_enable(dev, enable));
+}
+
+/* Parse a device's _PRW into a structure. */
+int
+acpi_parse_prw(ACPI_HANDLE h, struct acpi_prw_data *prw)
+{
+ ACPI_STATUS status;
+ ACPI_BUFFER prw_buffer;
+ ACPI_OBJECT *res, *res2;
+ int error, i, power_count;
+
+ if (h == NULL || prw == NULL)
+ return (EINVAL);
+
+ /*
+ * The _PRW object (7.2.9) is only required for devices that have the
+ * ability to wake the system from a sleeping state.
+ */
+ error = EINVAL;
+ prw_buffer.Pointer = NULL;
+ prw_buffer.Length = ACPI_ALLOCATE_BUFFER;
+ status = AcpiEvaluateObject(h, "_PRW", NULL, &prw_buffer);
+ if (ACPI_FAILURE(status))
+ return (ENOENT);
+ res = (ACPI_OBJECT *)prw_buffer.Pointer;
+ if (res == NULL)
+ return (ENOENT);
+ if (!ACPI_PKG_VALID(res, 2))
+ goto out;
+
+ /*
+ * Element 1 of the _PRW object:
+ * The lowest power system sleeping state that can be entered while still
+ * providing wake functionality. The sleeping state being entered must
+ * be less than (i.e., higher power) or equal to this value.
+ */
+ if (acpi_PkgInt32(res, 1, &prw->lowest_wake) != 0)
+ goto out;
+
+ /*
+ * Element 0 of the _PRW object:
+ */
+ switch (res->Package.Elements[0].Type) {
+ case ACPI_TYPE_INTEGER:
+ /*
+ * If the data type of this package element is numeric, then this
+ * _PRW package element is the bit index in the GPEx_EN, in the
+ * GPE blocks described in the FADT, of the enable bit that is
+ * enabled for the wake event.
+ */
+ prw->gpe_handle = NULL;
+ prw->gpe_bit = res->Package.Elements[0].Integer.Value;
+ error = 0;
+ break;
+ case ACPI_TYPE_PACKAGE:
+ /*
+ * If the data type of this package element is a package, then this
+ * _PRW package element is itself a package containing two
+ * elements. The first is an object reference to the GPE Block
+ * device that contains the GPE that will be triggered by the wake
+ * event. The second element is numeric and it contains the bit
+ * index in the GPEx_EN, in the GPE Block referenced by the
+ * first element in the package, of the enable bit that is enabled for
+ * the wake event.
+ *
+ * For example, if this field is a package then it is of the form:
+ * Package() {\_SB.PCI0.ISA.GPE, 2}
+ */
+ res2 = &res->Package.Elements[0];
+ if (!ACPI_PKG_VALID(res2, 2))
+ goto out;
+ prw->gpe_handle = acpi_GetReference(NULL, &res2->Package.Elements[0]);
+ if (prw->gpe_handle == NULL)
+ goto out;
+ if (acpi_PkgInt32(res2, 1, &prw->gpe_bit) != 0)
+ goto out;
+ error = 0;
+ break;
+ default:
+ goto out;
+ }
+
+ /* Elements 2 to N of the _PRW object are power resources. */
+ power_count = res->Package.Count - 2;
+ if (power_count > ACPI_PRW_MAX_POWERRES) {
+ printf("ACPI device %s has too many power resources\n", acpi_name(h));
+ power_count = 0;
+ }
+ prw->power_res_count = power_count;
+ for (i = 0; i < power_count; i++)
+ prw->power_res[i] = res->Package.Elements[i];
+
+out:
+ if (prw_buffer.Pointer != NULL)
+ AcpiOsFree(prw_buffer.Pointer);
+ return (error);
+}
+
+/*
+ * ACPI Event Handlers
+ */
+
+/* System Event Handlers (registered by EVENTHANDLER_REGISTER) */
+
+static void
+acpi_system_eventhandler_sleep(void *arg, int state)
+{
+ struct acpi_softc *sc = (struct acpi_softc *)arg;
+ int ret;
+
+ ACPI_FUNCTION_TRACE_U32((char *)(uintptr_t)__func__, state);
+
+ /* Check if button action is disabled or unknown. */
+ if (state == ACPI_STATE_UNKNOWN)
+ return;
+
+ /* Request that the system prepare to enter the given suspend state. */
+ ret = acpi_ReqSleepState(sc, state);
+ if (ret != 0)
+ device_printf(sc->acpi_dev,
+ "request to enter state S%d failed (err %d)\n", state, ret);
+
+ return_VOID;
+}
+
+static void
+acpi_system_eventhandler_wakeup(void *arg, int state)
+{
+
+ ACPI_FUNCTION_TRACE_U32((char *)(uintptr_t)__func__, state);
+
+ /* Currently, nothing to do for wakeup. */
+
+ return_VOID;
+}
+
+/*
+ * ACPICA Event Handlers (FixedEvent, also called from button notify handler)
+ */
+static void
+acpi_invoke_sleep_eventhandler(void *context)
+{
+
+ EVENTHANDLER_INVOKE(acpi_sleep_event, *(int *)context);
+}
+
+static void
+acpi_invoke_wake_eventhandler(void *context)
+{
+
+ EVENTHANDLER_INVOKE(acpi_wakeup_event, *(int *)context);
+}
+
+UINT32
+acpi_event_power_button_sleep(void *context)
+{
+ struct acpi_softc *sc = (struct acpi_softc *)context;
+
+ ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
+
+ if (ACPI_FAILURE(AcpiOsExecute(OSL_NOTIFY_HANDLER,
+ acpi_invoke_sleep_eventhandler, &sc->acpi_power_button_sx)))
+ return_VALUE (ACPI_INTERRUPT_NOT_HANDLED);
+ return_VALUE (ACPI_INTERRUPT_HANDLED);
+}
+
+UINT32
+acpi_event_power_button_wake(void *context)
+{
+ struct acpi_softc *sc = (struct acpi_softc *)context;
+
+ ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
+
+ if (ACPI_FAILURE(AcpiOsExecute(OSL_NOTIFY_HANDLER,
+ acpi_invoke_wake_eventhandler, &sc->acpi_power_button_sx)))
+ return_VALUE (ACPI_INTERRUPT_NOT_HANDLED);
+ return_VALUE (ACPI_INTERRUPT_HANDLED);
+}
+
+UINT32
+acpi_event_sleep_button_sleep(void *context)
+{
+ struct acpi_softc *sc = (struct acpi_softc *)context;
+
+ ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
+
+ if (ACPI_FAILURE(AcpiOsExecute(OSL_NOTIFY_HANDLER,
+ acpi_invoke_sleep_eventhandler, &sc->acpi_sleep_button_sx)))
+ return_VALUE (ACPI_INTERRUPT_NOT_HANDLED);
+ return_VALUE (ACPI_INTERRUPT_HANDLED);
+}
+
+UINT32
+acpi_event_sleep_button_wake(void *context)
+{
+ struct acpi_softc *sc = (struct acpi_softc *)context;
+
+ ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
+
+ if (ACPI_FAILURE(AcpiOsExecute(OSL_NOTIFY_HANDLER,
+ acpi_invoke_wake_eventhandler, &sc->acpi_sleep_button_sx)))
+ return_VALUE (ACPI_INTERRUPT_NOT_HANDLED);
+ return_VALUE (ACPI_INTERRUPT_HANDLED);
+}
+
+/*
+ * XXX This static buffer is suboptimal. There is no locking so only
+ * use this for single-threaded callers.
+ */
+char *
+acpi_name(ACPI_HANDLE handle)
+{
+ ACPI_BUFFER buf;
+ static char data[256];
+
+ buf.Length = sizeof(data);
+ buf.Pointer = data;
+
+ if (handle && ACPI_SUCCESS(AcpiGetName(handle, ACPI_FULL_PATHNAME, &buf)))
+ return (data);
+ return ("(unknown)");
+}
+
+/*
+ * Debugging/bug-avoidance. Avoid trying to fetch info on various
+ * parts of the namespace.
+ */
+int
+acpi_avoid(ACPI_HANDLE handle)
+{
+ char *cp, *env, *np;
+ int len;
+
+ np = acpi_name(handle);
+ if (*np == '\\')
+ np++;
+ if ((env = getenv("debug.acpi.avoid")) == NULL)
+ return (0);
+
+ /* Scan the avoid list checking for a match */
+ cp = env;
+ for (;;) {
+ while (*cp != 0 && isspace(*cp))
+ cp++;
+ if (*cp == 0)
+ break;
+ len = 0;
+ while (cp[len] != 0 && !isspace(cp[len]))
+ len++;
+ if (!strncmp(cp, np, len)) {
+ freeenv(env);
+ return(1);
+ }
+ cp += len;
+ }
+ freeenv(env);
+
+ return (0);
+}
+
+/*
+ * Debugging/bug-avoidance. Disable ACPI subsystem components.
+ */
+int
+acpi_disabled(char *subsys)
+{
+ char *cp, *env;
+ int len;
+
+ if ((env = getenv("debug.acpi.disabled")) == NULL)
+ return (0);
+ if (strcmp(env, "all") == 0) {
+ freeenv(env);
+ return (1);
+ }
+
+ /* Scan the disable list, checking for a match. */
+ cp = env;
+ for (;;) {
+ while (*cp != '\0' && isspace(*cp))
+ cp++;
+ if (*cp == '\0')
+ break;
+ len = 0;
+ while (cp[len] != '\0' && !isspace(cp[len]))
+ len++;
+ if (strncmp(cp, subsys, len) == 0) {
+ freeenv(env);
+ return (1);
+ }
+ cp += len;
+ }
+ freeenv(env);
+
+ return (0);
+}
+
+/*
+ * Control interface.
+ *
+ * We multiplex ioctls for all participating ACPI devices here. Individual
+ * drivers wanting to be accessible via /dev/acpi should use the
+ * register/deregister interface to make their handlers visible.
+ */
+struct acpi_ioctl_hook
+{
+ TAILQ_ENTRY(acpi_ioctl_hook) link;
+ u_long cmd;
+ acpi_ioctl_fn fn;
+ void *arg;
+};
+
+static TAILQ_HEAD(,acpi_ioctl_hook) acpi_ioctl_hooks;
+static int acpi_ioctl_hooks_initted;
+
+int
+acpi_register_ioctl(u_long cmd, acpi_ioctl_fn fn, void *arg)
+{
+ struct acpi_ioctl_hook *hp;
+
+ if ((hp = malloc(sizeof(*hp), M_ACPIDEV, M_NOWAIT)) == NULL)
+ return (ENOMEM);
+ hp->cmd = cmd;
+ hp->fn = fn;
+ hp->arg = arg;
+
+ ACPI_LOCK(acpi);
+ if (acpi_ioctl_hooks_initted == 0) {
+ TAILQ_INIT(&acpi_ioctl_hooks);
+ acpi_ioctl_hooks_initted = 1;
+ }
+ TAILQ_INSERT_TAIL(&acpi_ioctl_hooks, hp, link);
+ ACPI_UNLOCK(acpi);
+
+ return (0);
+}
+
+void
+acpi_deregister_ioctl(u_long cmd, acpi_ioctl_fn fn)
+{
+ struct acpi_ioctl_hook *hp;
+
+ ACPI_LOCK(acpi);
+ TAILQ_FOREACH(hp, &acpi_ioctl_hooks, link)
+ if (hp->cmd == cmd && hp->fn == fn)
+ break;
+
+ if (hp != NULL) {
+ TAILQ_REMOVE(&acpi_ioctl_hooks, hp, link);
+ free(hp, M_ACPIDEV);
+ }
+ ACPI_UNLOCK(acpi);
+}
+
+static int
+acpiopen(struct cdev *dev, int flag, int fmt, struct thread *td)
+{
+ return (0);
+}
+
+static int
+acpiclose(struct cdev *dev, int flag, int fmt, struct thread *td)
+{
+ return (0);
+}
+
+static int
+acpiioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td)
+{
+ struct acpi_softc *sc;
+ struct acpi_ioctl_hook *hp;
+ int error, state;
+
+ error = 0;
+ hp = NULL;
+ sc = dev->si_drv1;
+
+ /*
+ * Scan the list of registered ioctls, looking for handlers.
+ */
+ ACPI_LOCK(acpi);
+ if (acpi_ioctl_hooks_initted)
+ TAILQ_FOREACH(hp, &acpi_ioctl_hooks, link) {
+ if (hp->cmd == cmd)
+ break;
+ }
+ ACPI_UNLOCK(acpi);
+ if (hp)
+ return (hp->fn(cmd, addr, hp->arg));
+
+ /*
+ * Core ioctls are not permitted for non-writable user.
+ * Currently, other ioctls just fetch information.
+ * Not changing system behavior.
+ */
+ if ((flag & FWRITE) == 0)
+ return (EPERM);
+
+ /* Core system ioctls. */
+ switch (cmd) {
+ case ACPIIO_REQSLPSTATE:
+ state = *(int *)addr;
+ if (state != ACPI_STATE_S5)
+ return (acpi_ReqSleepState(sc, state));
+ device_printf(sc->acpi_dev, "power off via acpi ioctl not supported\n");
+ error = EOPNOTSUPP;
+ break;
+ case ACPIIO_ACKSLPSTATE:
+ error = *(int *)addr;
+ error = acpi_AckSleepState(sc->acpi_clone, error);
+ break;
+ case ACPIIO_SETSLPSTATE: /* DEPRECATED */
+ state = *(int *)addr;
+ if (state < ACPI_STATE_S0 || state > ACPI_S_STATES_MAX)
+ return (EINVAL);
+ if (!acpi_sleep_states[state])
+ return (EOPNOTSUPP);
+ if (ACPI_FAILURE(acpi_SetSleepState(sc, state)))
+ error = ENXIO;
+ break;
+ default:
+ error = ENXIO;
+ break;
+ }
+
+ return (error);
+}
+
+static int
+acpi_sname2sstate(const char *sname)
+{
+ int sstate;
+
+ if (toupper(sname[0]) == 'S') {
+ sstate = sname[1] - '0';
+ if (sstate >= ACPI_STATE_S0 && sstate <= ACPI_STATE_S5 &&
+ sname[2] == '\0')
+ return (sstate);
+ } else if (strcasecmp(sname, "NONE") == 0)
+ return (ACPI_STATE_UNKNOWN);
+ return (-1);
+}
+
+static const char *
+acpi_sstate2sname(int sstate)
+{
+ static const char *snames[] = { "S0", "S1", "S2", "S3", "S4", "S5" };
+
+ if (sstate >= ACPI_STATE_S0 && sstate <= ACPI_STATE_S5)
+ return (snames[sstate]);
+ else if (sstate == ACPI_STATE_UNKNOWN)
+ return ("NONE");
+ return (NULL);
+}
+
+static int
+acpi_supported_sleep_state_sysctl(SYSCTL_HANDLER_ARGS)
+{
+ int error;
+ struct sbuf sb;
+ UINT8 state;
+
+ sbuf_new(&sb, NULL, 32, SBUF_AUTOEXTEND);
+ for (state = ACPI_STATE_S1; state < ACPI_S_STATE_COUNT; state++)
+ if (acpi_sleep_states[state])
+ sbuf_printf(&sb, "%s ", acpi_sstate2sname(state));
+ sbuf_trim(&sb);
+ sbuf_finish(&sb);
+ error = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req);
+ sbuf_delete(&sb);
+ return (error);
+}
+
+static int
+acpi_sleep_state_sysctl(SYSCTL_HANDLER_ARGS)
+{
+ char sleep_state[10];
+ int error, new_state, old_state;
+
+ old_state = *(int *)oidp->oid_arg1;
+ strlcpy(sleep_state, acpi_sstate2sname(old_state), sizeof(sleep_state));
+ error = sysctl_handle_string(oidp, sleep_state, sizeof(sleep_state), req);
+ if (error == 0 && req->newptr != NULL) {
+ new_state = acpi_sname2sstate(sleep_state);
+ if (new_state < ACPI_STATE_S1)
+ return (EINVAL);
+ if (new_state < ACPI_S_STATE_COUNT && !acpi_sleep_states[new_state])
+ return (EOPNOTSUPP);
+ if (new_state != old_state)
+ *(int *)oidp->oid_arg1 = new_state;
+ }
+ return (error);
+}
+
+/* Inform devctl(4) when we receive a Notify. */
+void
+acpi_UserNotify(const char *subsystem, ACPI_HANDLE h, uint8_t notify)
+{
+ char notify_buf[16];
+ ACPI_BUFFER handle_buf;
+ ACPI_STATUS status;
+
+ if (subsystem == NULL)
+ return;
+
+ handle_buf.Pointer = NULL;
+ handle_buf.Length = ACPI_ALLOCATE_BUFFER;
+ status = AcpiNsHandleToPathname(h, &handle_buf);
+ if (ACPI_FAILURE(status))
+ return;
+ snprintf(notify_buf, sizeof(notify_buf), "notify=0x%02x", notify);
+ devctl_notify("ACPI", subsystem, handle_buf.Pointer, notify_buf);
+ AcpiOsFree(handle_buf.Pointer);
+}
+
+#ifdef ACPI_DEBUG
+/*
+ * Support for parsing debug options from the kernel environment.
+ *
+ * Bits may be set in the AcpiDbgLayer and AcpiDbgLevel debug registers
+ * by specifying the names of the bits in the debug.acpi.layer and
+ * debug.acpi.level environment variables. Bits may be unset by
+ * prefixing the bit name with !.
+ */
+struct debugtag
+{
+ char *name;
+ UINT32 value;
+};
+
+static struct debugtag dbg_layer[] = {
+ {"ACPI_UTILITIES", ACPI_UTILITIES},
+ {"ACPI_HARDWARE", ACPI_HARDWARE},
+ {"ACPI_EVENTS", ACPI_EVENTS},
+ {"ACPI_TABLES", ACPI_TABLES},
+ {"ACPI_NAMESPACE", ACPI_NAMESPACE},
+ {"ACPI_PARSER", ACPI_PARSER},
+ {"ACPI_DISPATCHER", ACPI_DISPATCHER},
+ {"ACPI_EXECUTER", ACPI_EXECUTER},
+ {"ACPI_RESOURCES", ACPI_RESOURCES},
+ {"ACPI_CA_DEBUGGER", ACPI_CA_DEBUGGER},
+ {"ACPI_OS_SERVICES", ACPI_OS_SERVICES},
+ {"ACPI_CA_DISASSEMBLER", ACPI_CA_DISASSEMBLER},
+ {"ACPI_ALL_COMPONENTS", ACPI_ALL_COMPONENTS},
+
+ {"ACPI_AC_ADAPTER", ACPI_AC_ADAPTER},
+ {"ACPI_BATTERY", ACPI_BATTERY},
+ {"ACPI_BUS", ACPI_BUS},
+ {"ACPI_BUTTON", ACPI_BUTTON},
+ {"ACPI_EC", ACPI_EC},
+ {"ACPI_FAN", ACPI_FAN},
+ {"ACPI_POWERRES", ACPI_POWERRES},
+ {"ACPI_PROCESSOR", ACPI_PROCESSOR},
+ {"ACPI_THERMAL", ACPI_THERMAL},
+ {"ACPI_TIMER", ACPI_TIMER},
+ {"ACPI_ALL_DRIVERS", ACPI_ALL_DRIVERS},
+ {NULL, 0}
+};
+
+static struct debugtag dbg_level[] = {
+ {"ACPI_LV_INIT", ACPI_LV_INIT},
+ {"ACPI_LV_DEBUG_OBJECT", ACPI_LV_DEBUG_OBJECT},
+ {"ACPI_LV_INFO", ACPI_LV_INFO},
+ {"ACPI_LV_REPAIR", ACPI_LV_REPAIR},
+ {"ACPI_LV_ALL_EXCEPTIONS", ACPI_LV_ALL_EXCEPTIONS},
+
+ /* Trace verbosity level 1 [Standard Trace Level] */
+ {"ACPI_LV_INIT_NAMES", ACPI_LV_INIT_NAMES},
+ {"ACPI_LV_PARSE", ACPI_LV_PARSE},
+ {"ACPI_LV_LOAD", ACPI_LV_LOAD},
+ {"ACPI_LV_DISPATCH", ACPI_LV_DISPATCH},
+ {"ACPI_LV_EXEC", ACPI_LV_EXEC},
+ {"ACPI_LV_NAMES", ACPI_LV_NAMES},
+ {"ACPI_LV_OPREGION", ACPI_LV_OPREGION},
+ {"ACPI_LV_BFIELD", ACPI_LV_BFIELD},
+ {"ACPI_LV_TABLES", ACPI_LV_TABLES},
+ {"ACPI_LV_VALUES", ACPI_LV_VALUES},
+ {"ACPI_LV_OBJECTS", ACPI_LV_OBJECTS},
+ {"ACPI_LV_RESOURCES", ACPI_LV_RESOURCES},
+ {"ACPI_LV_USER_REQUESTS", ACPI_LV_USER_REQUESTS},
+ {"ACPI_LV_PACKAGE", ACPI_LV_PACKAGE},
+ {"ACPI_LV_VERBOSITY1", ACPI_LV_VERBOSITY1},
+
+ /* Trace verbosity level 2 [Function tracing and memory allocation] */
+ {"ACPI_LV_ALLOCATIONS", ACPI_LV_ALLOCATIONS},
+ {"ACPI_LV_FUNCTIONS", ACPI_LV_FUNCTIONS},
+ {"ACPI_LV_OPTIMIZATIONS", ACPI_LV_OPTIMIZATIONS},
+ {"ACPI_LV_VERBOSITY2", ACPI_LV_VERBOSITY2},
+ {"ACPI_LV_ALL", ACPI_LV_ALL},
+
+ /* Trace verbosity level 3 [Threading, I/O, and Interrupts] */
+ {"ACPI_LV_MUTEX", ACPI_LV_MUTEX},
+ {"ACPI_LV_THREADS", ACPI_LV_THREADS},
+ {"ACPI_LV_IO", ACPI_LV_IO},
+ {"ACPI_LV_INTERRUPTS", ACPI_LV_INTERRUPTS},
+ {"ACPI_LV_VERBOSITY3", ACPI_LV_VERBOSITY3},
+
+ /* Exceptionally verbose output -- also used in the global "DebugLevel" */
+ {"ACPI_LV_AML_DISASSEMBLE", ACPI_LV_AML_DISASSEMBLE},
+ {"ACPI_LV_VERBOSE_INFO", ACPI_LV_VERBOSE_INFO},
+ {"ACPI_LV_FULL_TABLES", ACPI_LV_FULL_TABLES},
+ {"ACPI_LV_EVENTS", ACPI_LV_EVENTS},
+ {"ACPI_LV_VERBOSE", ACPI_LV_VERBOSE},
+ {NULL, 0}
+};
+
+static void
+acpi_parse_debug(char *cp, struct debugtag *tag, UINT32 *flag)
+{
+ char *ep;
+ int i, l;
+ int set;
+
+ while (*cp) {
+ if (isspace(*cp)) {
+ cp++;
+ continue;
+ }
+ ep = cp;
+ while (*ep && !isspace(*ep))
+ ep++;
+ if (*cp == '!') {
+ set = 0;
+ cp++;
+ if (cp == ep)
+ continue;
+ } else {
+ set = 1;
+ }
+ l = ep - cp;
+ for (i = 0; tag[i].name != NULL; i++) {
+ if (!strncmp(cp, tag[i].name, l)) {
+ if (set)
+ *flag |= tag[i].value;
+ else
+ *flag &= ~tag[i].value;
+ }
+ }
+ cp = ep;
+ }
+}
+
+static void
+acpi_set_debugging(void *junk)
+{
+ char *layer, *level;
+
+ if (cold) {
+ AcpiDbgLayer = 0;
+ AcpiDbgLevel = 0;
+ }
+
+ layer = getenv("debug.acpi.layer");
+ level = getenv("debug.acpi.level");
+ if (layer == NULL && level == NULL)
+ return;
+
+ printf("ACPI set debug");
+ if (layer != NULL) {
+ if (strcmp("NONE", layer) != 0)
+ printf(" layer '%s'", layer);
+ acpi_parse_debug(layer, &dbg_layer[0], &AcpiDbgLayer);
+ freeenv(layer);
+ }
+ if (level != NULL) {
+ if (strcmp("NONE", level) != 0)
+ printf(" level '%s'", level);
+ acpi_parse_debug(level, &dbg_level[0], &AcpiDbgLevel);
+ freeenv(level);
+ }
+ printf("\n");
+}
+
+SYSINIT(acpi_debugging, SI_SUB_TUNABLES, SI_ORDER_ANY, acpi_set_debugging,
+ NULL);
+
+static int
+acpi_debug_sysctl(SYSCTL_HANDLER_ARGS)
+{
+ int error, *dbg;
+ struct debugtag *tag;
+ struct sbuf sb;
+
+ if (sbuf_new(&sb, NULL, 128, SBUF_AUTOEXTEND) == NULL)
+ return (ENOMEM);
+ if (strcmp(oidp->oid_arg1, "debug.acpi.layer") == 0) {
+ tag = &dbg_layer[0];
+ dbg = &AcpiDbgLayer;
+ } else {
+ tag = &dbg_level[0];
+ dbg = &AcpiDbgLevel;
+ }
+
+ /* Get old values if this is a get request. */
+ ACPI_SERIAL_BEGIN(acpi);
+ if (*dbg == 0) {
+ sbuf_cpy(&sb, "NONE");
+ } else if (req->newptr == NULL) {
+ for (; tag->name != NULL; tag++) {
+ if ((*dbg & tag->value) == tag->value)
+ sbuf_printf(&sb, "%s ", tag->name);
+ }
+ }
+ sbuf_trim(&sb);
+ sbuf_finish(&sb);
+
+ /* Copy out the old values to the user. */
+ error = SYSCTL_OUT(req, sbuf_data(&sb), sbuf_len(&sb));
+ sbuf_delete(&sb);
+
+ /* If the user is setting a string, parse it. */
+ if (error == 0 && req->newptr != NULL) {
+ *dbg = 0;
+ setenv((char *)oidp->oid_arg1, (char *)req->newptr);
+ acpi_set_debugging(NULL);
+ }
+ ACPI_SERIAL_END(acpi);
+
+ return (error);
+}
+
+SYSCTL_PROC(_debug_acpi, OID_AUTO, layer, CTLFLAG_RW | CTLTYPE_STRING,
+ "debug.acpi.layer", 0, acpi_debug_sysctl, "A", "");
+SYSCTL_PROC(_debug_acpi, OID_AUTO, level, CTLFLAG_RW | CTLTYPE_STRING,
+ "debug.acpi.level", 0, acpi_debug_sysctl, "A", "");
+#endif /* ACPI_DEBUG */
+
+static int
+acpi_debug_objects_sysctl(SYSCTL_HANDLER_ARGS)
+{
+ int error;
+ int old;
+
+ old = acpi_debug_objects;
+ error = sysctl_handle_int(oidp, &acpi_debug_objects, 0, req);
+ if (error != 0 || req->newptr == NULL)
+ return (error);
+ if (old == acpi_debug_objects || (old && acpi_debug_objects))
+ return (0);
+
+ ACPI_SERIAL_BEGIN(acpi);
+ AcpiGbl_EnableAmlDebugObject = acpi_debug_objects ? TRUE : FALSE;
+ ACPI_SERIAL_END(acpi);
+
+ return (0);
+}
+
+static int
+acpi_parse_interfaces(char *str, struct acpi_interface *iface)
+{
+ char *p;
+ size_t len;
+ int i, j;
+
+ p = str;
+ while (isspace(*p) || *p == ',')
+ p++;
+ len = strlen(p);
+ if (len == 0)
+ return (0);
+ p = strdup(p, M_TEMP);
+ for (i = 0; i < len; i++)
+ if (p[i] == ',')
+ p[i] = '\0';
+ i = j = 0;
+ while (i < len)
+ if (isspace(p[i]) || p[i] == '\0')
+ i++;
+ else {
+ i += strlen(p + i) + 1;
+ j++;
+ }
+ if (j == 0) {
+ free(p, M_TEMP);
+ return (0);
+ }
+ iface->data = malloc(sizeof(*iface->data) * j, M_TEMP, M_WAITOK);
+ iface->num = j;
+ i = j = 0;
+ while (i < len)
+ if (isspace(p[i]) || p[i] == '\0')
+ i++;
+ else {
+ iface->data[j] = p + i;
+ i += strlen(p + i) + 1;
+ j++;
+ }
+
+ return (j);
+}
+
+static void
+acpi_free_interfaces(struct acpi_interface *iface)
+{
+
+ free(iface->data[0], M_TEMP);
+ free(iface->data, M_TEMP);
+}
+
+static void
+acpi_reset_interfaces(device_t dev)
+{
+ struct acpi_interface list;
+ ACPI_STATUS status;
+ int i;
+
+ if (acpi_parse_interfaces(acpi_install_interface, &list) > 0) {
+ for (i = 0; i < list.num; i++) {
+ status = AcpiInstallInterface(list.data[i]);
+ if (ACPI_FAILURE(status))
+ device_printf(dev,
+ "failed to install _OSI(\"%s\"): %s\n",
+ list.data[i], AcpiFormatException(status));
+ else if (bootverbose)
+ device_printf(dev, "installed _OSI(\"%s\")\n",
+ list.data[i]);
+ }
+ acpi_free_interfaces(&list);
+ }
+ if (acpi_parse_interfaces(acpi_remove_interface, &list) > 0) {
+ for (i = 0; i < list.num; i++) {
+ status = AcpiRemoveInterface(list.data[i]);
+ if (ACPI_FAILURE(status))
+ device_printf(dev,
+ "failed to remove _OSI(\"%s\"): %s\n",
+ list.data[i], AcpiFormatException(status));
+ else if (bootverbose)
+ device_printf(dev, "removed _OSI(\"%s\")\n",
+ list.data[i]);
+ }
+ acpi_free_interfaces(&list);
+ }
+}
+
+static int
+acpi_pm_func(u_long cmd, void *arg, ...)
+{
+ int state, acpi_state;
+ int error;
+ struct acpi_softc *sc;
+ va_list ap;
+
+ error = 0;
+ switch (cmd) {
+ case POWER_CMD_SUSPEND:
+ sc = (struct acpi_softc *)arg;
+ if (sc == NULL) {
+ error = EINVAL;
+ goto out;
+ }
+
+ va_start(ap, arg);
+ state = va_arg(ap, int);
+ va_end(ap);
+
+ switch (state) {
+ case POWER_SLEEP_STATE_STANDBY:
+ acpi_state = sc->acpi_standby_sx;
+ break;
+ case POWER_SLEEP_STATE_SUSPEND:
+ acpi_state = sc->acpi_suspend_sx;
+ break;
+ case POWER_SLEEP_STATE_HIBERNATE:
+ acpi_state = ACPI_STATE_S4;
+ break;
+ default:
+ error = EINVAL;
+ goto out;
+ }
+
+ if (ACPI_FAILURE(acpi_EnterSleepState(sc, acpi_state)))
+ error = ENXIO;
+ break;
+ default:
+ error = EINVAL;
+ goto out;
+ }
+
+out:
+ return (error);
+}
+
+static void
+acpi_pm_register(void *arg)
+{
+ if (!cold || resource_disabled("acpi", 0))
+ return;
+
+ power_pm_register(POWER_PM_TYPE_ACPI, acpi_pm_func, NULL);
+}
+
+SYSINIT(power, SI_SUB_KLD, SI_ORDER_ANY, acpi_pm_register, 0);
diff --git a/sys/dev/acpica/acpi_acad.c b/sys/dev/acpica/acpi_acad.c
new file mode 100644
index 0000000..8153abc
--- /dev/null
+++ b/sys/dev/acpica/acpi_acad.c
@@ -0,0 +1,283 @@
+/*-
+ * Copyright (c) 2000 Takanori Watanabe
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "opt_acpi.h"
+#include <sys/param.h>
+#include <sys/kernel.h>
+#include <sys/bus.h>
+
+#include <machine/bus.h>
+#include <sys/rman.h>
+#include <sys/ioccom.h>
+#include <sys/malloc.h>
+#include <sys/module.h>
+#include <sys/conf.h>
+#include <sys/power.h>
+
+#include <contrib/dev/acpica/include/acpi.h>
+
+#include <dev/acpica/acpivar.h>
+#include <dev/acpica/acpiio.h>
+#include <isa/isavar.h>
+#include <isa/pnpvar.h>
+
+/* Hooks for the ACPI CA debugging infrastructure */
+#define _COMPONENT ACPI_AC_ADAPTER
+ACPI_MODULE_NAME("AC_ADAPTER")
+
+/* Number of times to retry initialization before giving up. */
+#define ACPI_ACAD_RETRY_MAX 6
+
+#define ACPI_POWERSOURCE_STAT_CHANGE 0x80
+
+struct acpi_acad_softc {
+ int status;
+};
+
+static void acpi_acad_get_status(void *);
+static void acpi_acad_notify_handler(ACPI_HANDLE, UINT32, void *);
+static int acpi_acad_probe(device_t);
+static int acpi_acad_attach(device_t);
+static int acpi_acad_ioctl(u_long, caddr_t, void *);
+static int acpi_acad_sysctl(SYSCTL_HANDLER_ARGS);
+static void acpi_acad_init_acline(void *arg);
+static void acpi_acad_ac_only(void *arg);
+
+static device_method_t acpi_acad_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, acpi_acad_probe),
+ DEVMETHOD(device_attach, acpi_acad_attach),
+
+ {0, 0}
+};
+
+static driver_t acpi_acad_driver = {
+ "acpi_acad",
+ acpi_acad_methods,
+ sizeof(struct acpi_acad_softc),
+};
+
+static devclass_t acpi_acad_devclass;
+DRIVER_MODULE(acpi_acad, acpi, acpi_acad_driver, acpi_acad_devclass, 0, 0);
+MODULE_DEPEND(acpi_acad, acpi, 1, 1, 1);
+
+ACPI_SERIAL_DECL(acad, "ACPI AC adapter");
+
+SYSINIT(acad, SI_SUB_KTHREAD_IDLE, SI_ORDER_FIRST, acpi_acad_ac_only, NULL);
+
+static void
+acpi_acad_get_status(void *context)
+{
+ struct acpi_acad_softc *sc;
+ device_t dev;
+ ACPI_HANDLE h;
+ int newstatus;
+
+ dev = context;
+ sc = device_get_softc(dev);
+ h = acpi_get_handle(dev);
+ newstatus = -1;
+ acpi_GetInteger(h, "_PSR", &newstatus);
+
+ /* If status is valid and has changed, notify the system. */
+ ACPI_SERIAL_BEGIN(acad);
+ if (newstatus != -1 && sc->status != newstatus) {
+ sc->status = newstatus;
+ ACPI_SERIAL_END(acad);
+ power_profile_set_state(newstatus ? POWER_PROFILE_PERFORMANCE :
+ POWER_PROFILE_ECONOMY);
+ ACPI_VPRINT(dev, acpi_device_get_parent_softc(dev),
+ "%s Line\n", newstatus ? "On" : "Off");
+ acpi_UserNotify("ACAD", h, newstatus);
+ } else
+ ACPI_SERIAL_END(acad);
+}
+
+static void
+acpi_acad_notify_handler(ACPI_HANDLE h, UINT32 notify, void *context)
+{
+ device_t dev;
+
+ dev = (device_t)context;
+ switch (notify) {
+ case ACPI_NOTIFY_BUS_CHECK:
+ case ACPI_NOTIFY_DEVICE_CHECK:
+ case ACPI_POWERSOURCE_STAT_CHANGE:
+ /* Temporarily. It is better to notify policy manager */
+ AcpiOsExecute(OSL_NOTIFY_HANDLER, acpi_acad_get_status, context);
+ break;
+ default:
+ device_printf(dev, "unknown notify %#x\n", notify);
+ break;
+ }
+}
+
+static int
+acpi_acad_probe(device_t dev)
+{
+ static char *acad_ids[] = { "ACPI0003", NULL };
+
+ if (acpi_disabled("acad") ||
+ ACPI_ID_PROBE(device_get_parent(dev), dev, acad_ids) == NULL)
+ return (ENXIO);
+
+ device_set_desc(dev, "AC Adapter");
+ return (0);
+}
+
+static int
+acpi_acad_attach(device_t dev)
+{
+ struct acpi_acad_softc *sc;
+ struct acpi_softc *acpi_sc;
+ ACPI_HANDLE handle;
+ int error;
+
+ sc = device_get_softc(dev);
+ handle = acpi_get_handle(dev);
+
+ error = acpi_register_ioctl(ACPIIO_ACAD_GET_STATUS, acpi_acad_ioctl, dev);
+ if (error != 0)
+ return (error);
+
+ if (device_get_unit(dev) == 0) {
+ acpi_sc = acpi_device_get_parent_softc(dev);
+ SYSCTL_ADD_PROC(&acpi_sc->acpi_sysctl_ctx,
+ SYSCTL_CHILDREN(acpi_sc->acpi_sysctl_tree),
+ OID_AUTO, "acline", CTLTYPE_INT | CTLFLAG_RD,
+ &sc->status, 0, acpi_acad_sysctl, "I", "");
+ }
+
+ /* Get initial status after whole system is up. */
+ sc->status = -1;
+
+ /*
+ * Install both system and device notify handlers since the Casio
+ * FIVA needs them.
+ */
+ AcpiInstallNotifyHandler(handle, ACPI_ALL_NOTIFY,
+ acpi_acad_notify_handler, dev);
+ AcpiOsExecute(OSL_NOTIFY_HANDLER, acpi_acad_init_acline, dev);
+
+ return (0);
+}
+
+static int
+acpi_acad_ioctl(u_long cmd, caddr_t addr, void *arg)
+{
+ struct acpi_acad_softc *sc;
+ device_t dev;
+
+ dev = (device_t)arg;
+ sc = device_get_softc(dev);
+
+ /*
+ * No security check required: information retrieval only. If
+ * new functions are added here, a check might be required.
+ */
+ switch (cmd) {
+ case ACPIIO_ACAD_GET_STATUS:
+ acpi_acad_get_status(dev);
+ *(int *)addr = sc->status;
+ break;
+ default:
+ break;
+ }
+
+ return (0);
+}
+
+static int
+acpi_acad_sysctl(SYSCTL_HANDLER_ARGS)
+{
+ int val, error;
+
+ if (acpi_acad_get_acline(&val) != 0)
+ return (ENXIO);
+
+ val = *(u_int *)oidp->oid_arg1;
+ error = sysctl_handle_int(oidp, &val, 0, req);
+ return (error);
+}
+
+static void
+acpi_acad_init_acline(void *arg)
+{
+ struct acpi_acad_softc *sc;
+ device_t dev;
+ int retry;
+
+ dev = (device_t)arg;
+ sc = device_get_softc(dev);
+ ACPI_VPRINT(dev, acpi_device_get_parent_softc(dev),
+ "acline initialization start\n");
+
+ for (retry = 0; retry < ACPI_ACAD_RETRY_MAX; retry++) {
+ acpi_acad_get_status(dev);
+ if (sc->status != -1)
+ break;
+ AcpiOsSleep(10000);
+ }
+
+ ACPI_VPRINT(dev, acpi_device_get_parent_softc(dev),
+ "acline initialization done, tried %d times\n", retry + 1);
+}
+
+/*
+ * If no AC line devices detected after boot, create an "online" event
+ * so that userland code can adjust power settings accordingly. The default
+ * power profile is "performance" so we don't need to repeat that here.
+ */
+static void
+acpi_acad_ac_only(void __unused *arg)
+{
+
+ if (devclass_get_count(acpi_acad_devclass) == 0)
+ acpi_UserNotify("ACAD", ACPI_ROOT_OBJECT, 1);
+}
+
+/*
+ * Public interfaces.
+ */
+int
+acpi_acad_get_acline(int *status)
+{
+ struct acpi_acad_softc *sc;
+ device_t dev;
+
+ dev = devclass_get_device(acpi_acad_devclass, 0);
+ if (dev == NULL)
+ return (ENXIO);
+ sc = device_get_softc(dev);
+
+ acpi_acad_get_status(dev);
+ *status = sc->status;
+
+ return (0);
+}
diff --git a/sys/dev/acpica/acpi_battery.c b/sys/dev/acpica/acpi_battery.c
new file mode 100644
index 0000000..3d7d94a
--- /dev/null
+++ b/sys/dev/acpica/acpi_battery.c
@@ -0,0 +1,513 @@
+/*-
+ * Copyright (c) 2005 Nate Lawson
+ * Copyright (c) 2000 Mitsuru IWASAKI <iwasaki@jp.freebsd.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "opt_acpi.h"
+#include <sys/param.h>
+#include <sys/kernel.h>
+#include <sys/malloc.h>
+#include <sys/bus.h>
+#include <sys/ioccom.h>
+#include <sys/sysctl.h>
+
+#include <contrib/dev/acpica/include/acpi.h>
+
+#include <dev/acpica/acpivar.h>
+#include <dev/acpica/acpiio.h>
+
+/* Default seconds before re-sampling the battery state. */
+#define ACPI_BATTERY_INFO_EXPIRE 5
+
+static int acpi_batteries_initted;
+static int acpi_battery_info_expire = ACPI_BATTERY_INFO_EXPIRE;
+static struct acpi_battinfo acpi_battery_battinfo;
+static struct sysctl_ctx_list acpi_battery_sysctl_ctx;
+static struct sysctl_oid *acpi_battery_sysctl_tree;
+
+ACPI_SERIAL_DECL(battery, "ACPI generic battery");
+
+static void acpi_reset_battinfo(struct acpi_battinfo *info);
+static void acpi_battery_clean_str(char *str, int len);
+static device_t acpi_battery_find_dev(u_int logical_unit);
+static int acpi_battery_ioctl(u_long cmd, caddr_t addr, void *arg);
+static int acpi_battery_sysctl(SYSCTL_HANDLER_ARGS);
+static int acpi_battery_units_sysctl(SYSCTL_HANDLER_ARGS);
+static int acpi_battery_init(void);
+
+int
+acpi_battery_register(device_t dev)
+{
+ int error;
+
+ error = 0;
+ ACPI_SERIAL_BEGIN(battery);
+ if (!acpi_batteries_initted)
+ error = acpi_battery_init();
+ ACPI_SERIAL_END(battery);
+ return (error);
+}
+
+int
+acpi_battery_remove(device_t dev)
+{
+
+ return (0);
+}
+
+int
+acpi_battery_get_units(void)
+{
+ devclass_t batt_dc;
+
+ batt_dc = devclass_find("battery");
+ if (batt_dc == NULL)
+ return (0);
+ return (devclass_get_count(batt_dc));
+}
+
+int
+acpi_battery_get_info_expire(void)
+{
+
+ return (acpi_battery_info_expire);
+}
+
+/* Check _BST results for validity. */
+int
+acpi_battery_bst_valid(struct acpi_bst *bst)
+{
+
+ return (bst->state != ACPI_BATT_STAT_NOT_PRESENT &&
+ bst->cap != ACPI_BATT_UNKNOWN && bst->volt != ACPI_BATT_UNKNOWN);
+}
+
+/* Check _BIF results for validity. */
+int
+acpi_battery_bif_valid(struct acpi_bif *bif)
+{
+ return (bif->lfcap != 0);
+}
+
+/* Get info about one or all batteries. */
+int
+acpi_battery_get_battinfo(device_t dev, struct acpi_battinfo *battinfo)
+{
+ int batt_stat, devcount, dev_idx, error, i;
+ int total_cap, total_min, valid_rate, valid_units;
+ devclass_t batt_dc;
+ device_t batt_dev;
+ struct acpi_bst *bst;
+ struct acpi_bif *bif;
+ struct acpi_battinfo *bi;
+
+ /*
+ * Get the battery devclass and max unit for battery devices. If there
+ * are none or error, return immediately.
+ */
+ batt_dc = devclass_find("battery");
+ if (batt_dc == NULL)
+ return (ENXIO);
+ devcount = devclass_get_maxunit(batt_dc);
+ if (devcount == 0)
+ return (ENXIO);
+
+ /*
+ * Allocate storage for all _BST data, their derived battinfo data,
+ * and the current battery's _BIF data.
+ */
+ bst = malloc(devcount * sizeof(*bst), M_TEMP, M_WAITOK | M_ZERO);
+ bi = malloc(devcount * sizeof(*bi), M_TEMP, M_WAITOK | M_ZERO);
+ bif = malloc(sizeof(*bif), M_TEMP, M_WAITOK | M_ZERO);
+
+ /*
+ * Pass 1: for each battery that is present and valid, get its status,
+ * calculate percent capacity remaining, and sum all the current
+ * discharge rates.
+ */
+ dev_idx = -1;
+ batt_stat = valid_rate = valid_units = 0;
+ for (i = 0; i < devcount; i++) {
+ /* Default info for every battery is "not present". */
+ acpi_reset_battinfo(&bi[i]);
+
+ /*
+ * Find the device. Since devcount is in terms of max units, this
+ * may be a sparse array so skip devices that aren't present.
+ */
+ batt_dev = devclass_get_device(batt_dc, i);
+ if (batt_dev == NULL)
+ continue;
+
+ /* If examining a specific battery and this is it, record its index. */
+ if (dev != NULL && dev == batt_dev)
+ dev_idx = i;
+
+ /*
+ * Be sure we can get various info from the battery. Note that
+ * acpi_BatteryIsPresent() is not enough because smart batteries only
+ * return that the device is present.
+ */
+ if (!acpi_BatteryIsPresent(batt_dev) ||
+ ACPI_BATT_GET_STATUS(batt_dev, &bst[i]) != 0 ||
+ ACPI_BATT_GET_INFO(batt_dev, bif) != 0)
+ continue;
+
+ /* If a battery is not installed, we sometimes get strange values. */
+ if (!acpi_battery_bst_valid(&bst[i]) ||
+ !acpi_battery_bif_valid(bif))
+ continue;
+
+ /*
+ * Record current state. If both charging and discharging are set,
+ * ignore the charging flag.
+ */
+ valid_units++;
+ if ((bst[i].state & ACPI_BATT_STAT_DISCHARG) != 0)
+ bst[i].state &= ~ACPI_BATT_STAT_CHARGING;
+ batt_stat |= bst[i].state;
+ bi[i].state = bst[i].state;
+
+ /*
+ * If the battery info is in terms of mA, convert to mW by
+ * multiplying by the design voltage. If the design voltage
+ * is 0 (due to some error reading the battery), skip this
+ * conversion.
+ */
+ if (bif->units == ACPI_BIF_UNITS_MA && bif->dvol != 0 && dev == NULL) {
+ bst[i].rate = (bst[i].rate * bif->dvol) / 1000;
+ bst[i].cap = (bst[i].cap * bif->dvol) / 1000;
+ bif->lfcap = (bif->lfcap * bif->dvol) / 1000;
+ }
+
+ /*
+ * The calculation above may set bif->lfcap to zero. This was
+ * seen on a laptop with a broken battery. The result of the
+ * division was rounded to zero.
+ */
+ if (!acpi_battery_bif_valid(bif))
+ continue;
+
+ /* Calculate percent capacity remaining. */
+ bi[i].cap = (100 * bst[i].cap) / bif->lfcap;
+
+ /*
+ * Some laptops report the "design-capacity" instead of the
+ * "real-capacity" when the battery is fully charged. That breaks
+ * the above arithmetic as it needs to be 100% maximum.
+ */
+ if (bi[i].cap > 100)
+ bi[i].cap = 100;
+
+ /*
+ * On systems with more than one battery, they may get used
+ * sequentially, thus bst.rate may only signify the one currently
+ * in use. For the remaining batteries, bst.rate will be zero,
+ * which makes it impossible to calculate the total remaining time.
+ * Therefore, we sum the bst.rate for batteries in the discharging
+ * state and use the sum to calculate the total remaining time.
+ */
+ if (bst[i].rate != ACPI_BATT_UNKNOWN &&
+ (bst[i].state & ACPI_BATT_STAT_DISCHARG) != 0)
+ valid_rate += bst[i].rate;
+ }
+
+ /* If the caller asked for a device but we didn't find it, error. */
+ if (dev != NULL && dev_idx == -1) {
+ error = ENXIO;
+ goto out;
+ }
+
+ /* Pass 2: calculate capacity and remaining time for all batteries. */
+ total_cap = total_min = 0;
+ for (i = 0; i < devcount; i++) {
+ /*
+ * If any batteries are discharging, use the sum of the bst.rate
+ * values. Otherwise, we are on AC power, and there is infinite
+ * time remaining for this battery until we go offline.
+ */
+ if (valid_rate > 0)
+ bi[i].min = (60 * bst[i].cap) / valid_rate;
+ else
+ bi[i].min = 0;
+ total_min += bi[i].min;
+
+ /* If this battery is not present, don't use its capacity. */
+ if (bi[i].cap != -1)
+ total_cap += bi[i].cap;
+ }
+
+ /*
+ * Return total battery percent and time remaining. If there are
+ * no valid batteries, report values as unknown.
+ */
+ if (valid_units > 0) {
+ if (dev == NULL) {
+ battinfo->cap = total_cap / valid_units;
+ battinfo->min = total_min;
+ battinfo->state = batt_stat;
+ battinfo->rate = valid_rate;
+ } else {
+ battinfo->cap = bi[dev_idx].cap;
+ battinfo->min = bi[dev_idx].min;
+ battinfo->state = bi[dev_idx].state;
+ battinfo->rate = bst[dev_idx].rate;
+ }
+
+ /*
+ * If the queried battery has no discharge rate or is charging,
+ * report that we don't know the remaining time.
+ */
+ if (valid_rate == 0 || (battinfo->state & ACPI_BATT_STAT_CHARGING))
+ battinfo->min = -1;
+ } else
+ acpi_reset_battinfo(battinfo);
+
+ error = 0;
+
+out:
+ if (bi)
+ free(bi, M_TEMP);
+ if (bif)
+ free(bif, M_TEMP);
+ if (bst)
+ free(bst, M_TEMP);
+ return (error);
+}
+
+static void
+acpi_reset_battinfo(struct acpi_battinfo *info)
+{
+ info->cap = -1;
+ info->min = -1;
+ info->state = ACPI_BATT_STAT_NOT_PRESENT;
+ info->rate = -1;
+}
+
+/* Make string printable, removing invalid chars. */
+static void
+acpi_battery_clean_str(char *str, int len)
+{
+ int i;
+
+ for (i = 0; i < len && *str != '\0'; i++, str++) {
+ if (!isprint(*str))
+ *str = '?';
+ }
+
+ /* NUL-terminate the string if we reached the end. */
+ if (i == len)
+ *str = '\0';
+}
+
+/*
+ * The battery interface deals with devices and methods but userland
+ * expects a logical unit number. Convert a logical unit to a device_t.
+ */
+static device_t
+acpi_battery_find_dev(u_int logical_unit)
+{
+ int found_unit, i, maxunit;
+ device_t dev;
+ devclass_t batt_dc;
+
+ dev = NULL;
+ found_unit = 0;
+ batt_dc = devclass_find("battery");
+ maxunit = devclass_get_maxunit(batt_dc);
+ for (i = 0; i < maxunit; i++) {
+ dev = devclass_get_device(batt_dc, i);
+ if (dev == NULL)
+ continue;
+ if (logical_unit == found_unit)
+ break;
+ found_unit++;
+ dev = NULL;
+ }
+
+ return (dev);
+}
+
+static int
+acpi_battery_ioctl(u_long cmd, caddr_t addr, void *arg)
+{
+ union acpi_battery_ioctl_arg *ioctl_arg;
+ int error, unit;
+ device_t dev;
+
+ /* For commands that use the ioctl_arg struct, validate it first. */
+ error = ENXIO;
+ unit = 0;
+ dev = NULL;
+ ioctl_arg = NULL;
+ if (IOCPARM_LEN(cmd) == sizeof(*ioctl_arg)) {
+ ioctl_arg = (union acpi_battery_ioctl_arg *)addr;
+ unit = ioctl_arg->unit;
+ if (unit != ACPI_BATTERY_ALL_UNITS)
+ dev = acpi_battery_find_dev(unit);
+ }
+
+ /*
+ * No security check required: information retrieval only. If
+ * new functions are added here, a check might be required.
+ */
+ switch (cmd) {
+ case ACPIIO_BATT_GET_UNITS:
+ *(int *)addr = acpi_battery_get_units();
+ error = 0;
+ break;
+ case ACPIIO_BATT_GET_BATTINFO:
+ if (dev != NULL || unit == ACPI_BATTERY_ALL_UNITS) {
+ bzero(&ioctl_arg->battinfo, sizeof(ioctl_arg->battinfo));
+ error = acpi_battery_get_battinfo(dev, &ioctl_arg->battinfo);
+ }
+ break;
+ case ACPIIO_BATT_GET_BIF:
+ if (dev != NULL) {
+ bzero(&ioctl_arg->bif, sizeof(ioctl_arg->bif));
+ error = ACPI_BATT_GET_INFO(dev, &ioctl_arg->bif);
+
+ /*
+ * Remove invalid characters. Perhaps this should be done
+ * within a convenience function so all callers get the
+ * benefit.
+ */
+ acpi_battery_clean_str(ioctl_arg->bif.model,
+ sizeof(ioctl_arg->bif.model));
+ acpi_battery_clean_str(ioctl_arg->bif.serial,
+ sizeof(ioctl_arg->bif.serial));
+ acpi_battery_clean_str(ioctl_arg->bif.type,
+ sizeof(ioctl_arg->bif.type));
+ acpi_battery_clean_str(ioctl_arg->bif.oeminfo,
+ sizeof(ioctl_arg->bif.oeminfo));
+ }
+ break;
+ case ACPIIO_BATT_GET_BST:
+ if (dev != NULL) {
+ bzero(&ioctl_arg->bst, sizeof(ioctl_arg->bst));
+ error = ACPI_BATT_GET_STATUS(dev, &ioctl_arg->bst);
+ }
+ break;
+ default:
+ error = EINVAL;
+ }
+
+ return (error);
+}
+
+static int
+acpi_battery_sysctl(SYSCTL_HANDLER_ARGS)
+{
+ int val, error;
+
+ acpi_battery_get_battinfo(NULL, &acpi_battery_battinfo);
+ val = *(u_int *)oidp->oid_arg1;
+ error = sysctl_handle_int(oidp, &val, 0, req);
+ return (error);
+}
+
+static int
+acpi_battery_units_sysctl(SYSCTL_HANDLER_ARGS)
+{
+ int count, error;
+
+ count = acpi_battery_get_units();
+ error = sysctl_handle_int(oidp, &count, 0, req);
+ return (error);
+}
+
+static int
+acpi_battery_init(void)
+{
+ struct acpi_softc *sc;
+ device_t dev;
+ int error;
+
+ ACPI_SERIAL_ASSERT(battery);
+
+ error = ENXIO;
+ dev = devclass_get_device(devclass_find("acpi"), 0);
+ if (dev == NULL)
+ goto out;
+ sc = device_get_softc(dev);
+
+ error = acpi_register_ioctl(ACPIIO_BATT_GET_UNITS, acpi_battery_ioctl,
+ NULL);
+ if (error != 0)
+ goto out;
+ error = acpi_register_ioctl(ACPIIO_BATT_GET_BATTINFO, acpi_battery_ioctl,
+ NULL);
+ if (error != 0)
+ goto out;
+ error = acpi_register_ioctl(ACPIIO_BATT_GET_BIF, acpi_battery_ioctl, NULL);
+ if (error != 0)
+ goto out;
+ error = acpi_register_ioctl(ACPIIO_BATT_GET_BST, acpi_battery_ioctl, NULL);
+ if (error != 0)
+ goto out;
+
+ sysctl_ctx_init(&acpi_battery_sysctl_ctx);
+ acpi_battery_sysctl_tree = SYSCTL_ADD_NODE(&acpi_battery_sysctl_ctx,
+ SYSCTL_CHILDREN(sc->acpi_sysctl_tree), OID_AUTO, "battery", CTLFLAG_RD,
+ 0, "battery status and info");
+ SYSCTL_ADD_PROC(&acpi_battery_sysctl_ctx,
+ SYSCTL_CHILDREN(acpi_battery_sysctl_tree),
+ OID_AUTO, "life", CTLTYPE_INT | CTLFLAG_RD,
+ &acpi_battery_battinfo.cap, 0, acpi_battery_sysctl, "I",
+ "percent capacity remaining");
+ SYSCTL_ADD_PROC(&acpi_battery_sysctl_ctx,
+ SYSCTL_CHILDREN(acpi_battery_sysctl_tree),
+ OID_AUTO, "time", CTLTYPE_INT | CTLFLAG_RD,
+ &acpi_battery_battinfo.min, 0, acpi_battery_sysctl, "I",
+ "remaining time in minutes");
+ SYSCTL_ADD_PROC(&acpi_battery_sysctl_ctx,
+ SYSCTL_CHILDREN(acpi_battery_sysctl_tree),
+ OID_AUTO, "state", CTLTYPE_INT | CTLFLAG_RD,
+ &acpi_battery_battinfo.state, 0, acpi_battery_sysctl, "I",
+ "current status flags");
+ SYSCTL_ADD_PROC(&acpi_battery_sysctl_ctx,
+ SYSCTL_CHILDREN(acpi_battery_sysctl_tree),
+ OID_AUTO, "units", CTLTYPE_INT | CTLFLAG_RD,
+ NULL, 0, acpi_battery_units_sysctl, "I", "number of batteries");
+ SYSCTL_ADD_INT(&acpi_battery_sysctl_ctx,
+ SYSCTL_CHILDREN(acpi_battery_sysctl_tree),
+ OID_AUTO, "info_expire", CTLFLAG_RW,
+ &acpi_battery_info_expire, 0,
+ "time in seconds until info is refreshed");
+
+ acpi_batteries_initted = TRUE;
+
+out:
+ if (error != 0) {
+ acpi_deregister_ioctl(ACPIIO_BATT_GET_UNITS, acpi_battery_ioctl);
+ acpi_deregister_ioctl(ACPIIO_BATT_GET_BATTINFO, acpi_battery_ioctl);
+ acpi_deregister_ioctl(ACPIIO_BATT_GET_BIF, acpi_battery_ioctl);
+ acpi_deregister_ioctl(ACPIIO_BATT_GET_BST, acpi_battery_ioctl);
+ }
+ return (error);
+}
diff --git a/sys/dev/acpica/acpi_button.c b/sys/dev/acpica/acpi_button.c
new file mode 100644
index 0000000..d1f774a
--- /dev/null
+++ b/sys/dev/acpica/acpi_button.c
@@ -0,0 +1,279 @@
+/*-
+ * Copyright (c) 2000 Mitsaru IWASAKI <iwasaki@jp.freebsd.org>
+ * Copyright (c) 2000 Michael Smith <msmith@freebsd.org>
+ * Copyright (c) 2000 BSDi
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "opt_acpi.h"
+#include <sys/param.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <sys/bus.h>
+
+#include <contrib/dev/acpica/include/acpi.h>
+#include <contrib/dev/acpica/include/accommon.h>
+
+#include <dev/acpica/acpivar.h>
+
+/* Hooks for the ACPI CA debugging infrastructure */
+#define _COMPONENT ACPI_BUTTON
+ACPI_MODULE_NAME("BUTTON")
+
+struct acpi_button_softc {
+ device_t button_dev;
+ ACPI_HANDLE button_handle;
+ boolean_t button_type;
+#define ACPI_POWER_BUTTON 0
+#define ACPI_SLEEP_BUTTON 1
+ boolean_t fixed;
+};
+
+#define ACPI_NOTIFY_BUTTON_PRESSED_FOR_SLEEP 0x80
+#define ACPI_NOTIFY_BUTTON_PRESSED_FOR_WAKEUP 0x02
+
+static int acpi_button_probe(device_t dev);
+static int acpi_button_attach(device_t dev);
+static int acpi_button_suspend(device_t dev);
+static int acpi_button_resume(device_t dev);
+static void acpi_button_notify_handler(ACPI_HANDLE h, UINT32 notify,
+ void *context);
+static ACPI_STATUS
+ acpi_button_fixed_handler(void *context);
+static void acpi_button_notify_sleep(void *arg);
+static void acpi_button_notify_wakeup(void *arg);
+
+static char *btn_ids[] = {
+ "PNP0C0C", "ACPI_FPB", "PNP0C0E", "ACPI_FSB",
+ NULL
+};
+
+static device_method_t acpi_button_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, acpi_button_probe),
+ DEVMETHOD(device_attach, acpi_button_attach),
+ DEVMETHOD(device_suspend, acpi_button_suspend),
+ DEVMETHOD(device_shutdown, acpi_button_suspend),
+ DEVMETHOD(device_resume, acpi_button_resume),
+
+ {0, 0}
+};
+
+static driver_t acpi_button_driver = {
+ "acpi_button",
+ acpi_button_methods,
+ sizeof(struct acpi_button_softc),
+};
+
+static devclass_t acpi_button_devclass;
+DRIVER_MODULE(acpi_button, acpi, acpi_button_driver, acpi_button_devclass,
+ 0, 0);
+MODULE_DEPEND(acpi_button, acpi, 1, 1, 1);
+
+static int
+acpi_button_probe(device_t dev)
+{
+ struct acpi_button_softc *sc;
+ char *str;
+
+ if (acpi_disabled("button") ||
+ (str = ACPI_ID_PROBE(device_get_parent(dev), dev, btn_ids)) == NULL)
+ return (ENXIO);
+
+ sc = device_get_softc(dev);
+ if (strcmp(str, "PNP0C0C") == 0) {
+ device_set_desc(dev, "Power Button");
+ sc->button_type = ACPI_POWER_BUTTON;
+ } else if (strcmp(str, "ACPI_FPB") == 0) {
+ device_set_desc(dev, "Power Button (fixed)");
+ sc->button_type = ACPI_POWER_BUTTON;
+ sc->fixed = 1;
+ } else if (strcmp(str, "PNP0C0E") == 0) {
+ device_set_desc(dev, "Sleep Button");
+ sc->button_type = ACPI_SLEEP_BUTTON;
+ } else if (strcmp(str, "ACPI_FSB") == 0) {
+ device_set_desc(dev, "Sleep Button (fixed)");
+ sc->button_type = ACPI_SLEEP_BUTTON;
+ sc->fixed = 1;
+ }
+
+ return (0);
+}
+
+static int
+acpi_button_attach(device_t dev)
+{
+ struct acpi_prw_data prw;
+ struct acpi_button_softc *sc;
+ ACPI_STATUS status;
+ int event;
+
+ ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
+
+ sc = device_get_softc(dev);
+ sc->button_dev = dev;
+ sc->button_handle = acpi_get_handle(dev);
+ event = (sc->button_type == ACPI_SLEEP_BUTTON) ?
+ ACPI_EVENT_SLEEP_BUTTON : ACPI_EVENT_POWER_BUTTON;
+
+ /*
+ * Install the new handler. We could remove any fixed handlers added
+ * from the FADT once we have a duplicate from the AML but some systems
+ * only return events on one or the other so we have to keep both.
+ */
+ if (sc->fixed) {
+ AcpiClearEvent(event);
+ status = AcpiInstallFixedEventHandler(event,
+ acpi_button_fixed_handler, sc);
+ } else {
+ /*
+ * If a system does not get lid events, it may make sense to change
+ * the type to ACPI_ALL_NOTIFY. Some systems generate both a wake
+ * and runtime notify in that case though.
+ */
+ status = AcpiInstallNotifyHandler(sc->button_handle,
+ ACPI_DEVICE_NOTIFY, acpi_button_notify_handler, sc);
+ }
+ if (ACPI_FAILURE(status)) {
+ device_printf(sc->button_dev, "couldn't install notify handler - %s\n",
+ AcpiFormatException(status));
+ return_VALUE (ENXIO);
+ }
+
+ /* Enable the GPE for wake/runtime. */
+ acpi_wake_set_enable(dev, 1);
+ if (acpi_parse_prw(sc->button_handle, &prw) == 0)
+ AcpiEnableGpe(prw.gpe_handle, prw.gpe_bit);
+
+ return_VALUE (0);
+}
+
+static int
+acpi_button_suspend(device_t dev)
+{
+ return (0);
+}
+
+static int
+acpi_button_resume(device_t dev)
+{
+ return (0);
+}
+
+static void
+acpi_button_notify_sleep(void *arg)
+{
+ struct acpi_button_softc *sc;
+ struct acpi_softc *acpi_sc;
+
+ ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
+
+ sc = (struct acpi_button_softc *)arg;
+ acpi_sc = acpi_device_get_parent_softc(sc->button_dev);
+ if (acpi_sc == NULL)
+ return_VOID;
+
+ acpi_UserNotify("Button", sc->button_handle, sc->button_type);
+
+ switch (sc->button_type) {
+ case ACPI_POWER_BUTTON:
+ ACPI_VPRINT(sc->button_dev, acpi_sc, "power button pressed\n");
+ acpi_event_power_button_sleep(acpi_sc);
+ break;
+ case ACPI_SLEEP_BUTTON:
+ ACPI_VPRINT(sc->button_dev, acpi_sc, "sleep button pressed\n");
+ acpi_event_sleep_button_sleep(acpi_sc);
+ break;
+ default:
+ break; /* unknown button type */
+ }
+}
+
+static void
+acpi_button_notify_wakeup(void *arg)
+{
+ struct acpi_button_softc *sc;
+ struct acpi_softc *acpi_sc;
+
+ ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
+
+ sc = (struct acpi_button_softc *)arg;
+ acpi_sc = acpi_device_get_parent_softc(sc->button_dev);
+ if (acpi_sc == NULL)
+ return_VOID;
+
+ acpi_UserNotify("Button", sc->button_handle, sc->button_type);
+
+ switch (sc->button_type) {
+ case ACPI_POWER_BUTTON:
+ ACPI_VPRINT(sc->button_dev, acpi_sc, "wakeup by power button\n");
+ acpi_event_power_button_wake(acpi_sc);
+ break;
+ case ACPI_SLEEP_BUTTON:
+ ACPI_VPRINT(sc->button_dev, acpi_sc, "wakeup by sleep button\n");
+ acpi_event_sleep_button_wake(acpi_sc);
+ break;
+ default:
+ break; /* unknown button type */
+ }
+}
+
+static void
+acpi_button_notify_handler(ACPI_HANDLE h, UINT32 notify, void *context)
+{
+ struct acpi_button_softc *sc;
+
+ ACPI_FUNCTION_TRACE_U32((char *)(uintptr_t)__func__, notify);
+
+ sc = (struct acpi_button_softc *)context;
+ switch (notify) {
+ case ACPI_NOTIFY_BUTTON_PRESSED_FOR_SLEEP:
+ AcpiOsExecute(OSL_NOTIFY_HANDLER, acpi_button_notify_sleep, sc);
+ break;
+ case ACPI_NOTIFY_BUTTON_PRESSED_FOR_WAKEUP:
+ AcpiOsExecute(OSL_NOTIFY_HANDLER, acpi_button_notify_wakeup, sc);
+ break;
+ default:
+ device_printf(sc->button_dev, "unknown notify %#x\n", notify);
+ break;
+ }
+}
+
+static ACPI_STATUS
+acpi_button_fixed_handler(void *context)
+{
+ struct acpi_button_softc *sc = (struct acpi_button_softc *)context;
+
+ ACPI_FUNCTION_TRACE_PTR((char *)(uintptr_t)__func__, context);
+
+ if (context == NULL)
+ return_ACPI_STATUS (AE_BAD_PARAMETER);
+
+ acpi_button_notify_handler(sc->button_handle,
+ ACPI_NOTIFY_BUTTON_PRESSED_FOR_SLEEP, sc);
+ return_ACPI_STATUS (AE_OK);
+}
diff --git a/sys/dev/acpica/acpi_cmbat.c b/sys/dev/acpica/acpi_cmbat.c
new file mode 100644
index 0000000..ba44da8
--- /dev/null
+++ b/sys/dev/acpica/acpi_cmbat.c
@@ -0,0 +1,483 @@
+/*-
+ * Copyright (c) 2005 Nate Lawson
+ * Copyright (c) 2000 Munehiro Matsuda
+ * Copyright (c) 2000 Takanori Watanabe
+ * Copyright (c) 2000 Mitsuru IWASAKI <iwasaki@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "opt_acpi.h"
+#include <sys/param.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <sys/bus.h>
+#include <sys/ioccom.h>
+
+#include <machine/bus.h>
+#include <sys/rman.h>
+#include <sys/malloc.h>
+
+#include <contrib/dev/acpica/include/acpi.h>
+
+#include <dev/acpica/acpivar.h>
+#include <dev/acpica/acpiio.h>
+
+static MALLOC_DEFINE(M_ACPICMBAT, "acpicmbat",
+ "ACPI control method battery data");
+
+/* Number of times to retry initialization before giving up. */
+#define ACPI_CMBAT_RETRY_MAX 6
+
+/* Check the battery once a minute. */
+#define CMBAT_POLLRATE (60 * hz)
+
+/* Hooks for the ACPI CA debugging infrastructure */
+#define _COMPONENT ACPI_BATTERY
+ACPI_MODULE_NAME("BATTERY")
+
+#define ACPI_BATTERY_BST_CHANGE 0x80
+#define ACPI_BATTERY_BIF_CHANGE 0x81
+
+struct acpi_cmbat_softc {
+ device_t dev;
+ int flags;
+
+ struct acpi_bif bif;
+ struct acpi_bst bst;
+ struct timespec bst_lastupdated;
+};
+
+ACPI_SERIAL_DECL(cmbat, "ACPI cmbat");
+
+static int acpi_cmbat_probe(device_t dev);
+static int acpi_cmbat_attach(device_t dev);
+static int acpi_cmbat_detach(device_t dev);
+static int acpi_cmbat_resume(device_t dev);
+static void acpi_cmbat_notify_handler(ACPI_HANDLE h, UINT32 notify,
+ void *context);
+static int acpi_cmbat_info_expired(struct timespec *lastupdated);
+static void acpi_cmbat_info_updated(struct timespec *lastupdated);
+static void acpi_cmbat_get_bst(void *arg);
+static void acpi_cmbat_get_bif_task(void *arg);
+static void acpi_cmbat_get_bif(void *arg);
+static int acpi_cmbat_bst(device_t dev, struct acpi_bst *bstp);
+static int acpi_cmbat_bif(device_t dev, struct acpi_bif *bifp);
+static void acpi_cmbat_init_battery(void *arg);
+
+static device_method_t acpi_cmbat_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, acpi_cmbat_probe),
+ DEVMETHOD(device_attach, acpi_cmbat_attach),
+ DEVMETHOD(device_detach, acpi_cmbat_detach),
+ DEVMETHOD(device_resume, acpi_cmbat_resume),
+
+ /* ACPI battery interface */
+ DEVMETHOD(acpi_batt_get_info, acpi_cmbat_bif),
+ DEVMETHOD(acpi_batt_get_status, acpi_cmbat_bst),
+
+ {0, 0}
+};
+
+static driver_t acpi_cmbat_driver = {
+ "battery",
+ acpi_cmbat_methods,
+ sizeof(struct acpi_cmbat_softc),
+};
+
+static devclass_t acpi_cmbat_devclass;
+DRIVER_MODULE(acpi_cmbat, acpi, acpi_cmbat_driver, acpi_cmbat_devclass, 0, 0);
+MODULE_DEPEND(acpi_cmbat, acpi, 1, 1, 1);
+
+static int
+acpi_cmbat_probe(device_t dev)
+{
+ static char *cmbat_ids[] = { "PNP0C0A", NULL };
+
+ if (acpi_disabled("cmbat") ||
+ ACPI_ID_PROBE(device_get_parent(dev), dev, cmbat_ids) == NULL)
+ return (ENXIO);
+
+ device_set_desc(dev, "ACPI Control Method Battery");
+ return (0);
+}
+
+static int
+acpi_cmbat_attach(device_t dev)
+{
+ int error;
+ ACPI_HANDLE handle;
+ struct acpi_cmbat_softc *sc;
+
+ sc = device_get_softc(dev);
+ handle = acpi_get_handle(dev);
+ sc->dev = dev;
+
+ timespecclear(&sc->bst_lastupdated);
+
+ error = acpi_battery_register(dev);
+ if (error != 0) {
+ device_printf(dev, "registering battery failed\n");
+ return (error);
+ }
+
+ /*
+ * Install a system notify handler in addition to the device notify.
+ * Toshiba notebook uses this alternate notify for its battery.
+ */
+ AcpiInstallNotifyHandler(handle, ACPI_ALL_NOTIFY,
+ acpi_cmbat_notify_handler, dev);
+
+ AcpiOsExecute(OSL_NOTIFY_HANDLER, acpi_cmbat_init_battery, dev);
+
+ return (0);
+}
+
+static int
+acpi_cmbat_detach(device_t dev)
+{
+ ACPI_HANDLE handle;
+
+ handle = acpi_get_handle(dev);
+ AcpiRemoveNotifyHandler(handle, ACPI_ALL_NOTIFY, acpi_cmbat_notify_handler);
+ acpi_battery_remove(dev);
+ return (0);
+}
+
+static int
+acpi_cmbat_resume(device_t dev)
+{
+
+ AcpiOsExecute(OSL_NOTIFY_HANDLER, acpi_cmbat_init_battery, dev);
+ return (0);
+}
+
+static void
+acpi_cmbat_notify_handler(ACPI_HANDLE h, UINT32 notify, void *context)
+{
+ struct acpi_cmbat_softc *sc;
+ device_t dev;
+
+ dev = (device_t)context;
+ sc = device_get_softc(dev);
+
+ switch (notify) {
+ case ACPI_NOTIFY_DEVICE_CHECK:
+ case ACPI_BATTERY_BST_CHANGE:
+ /*
+ * Clear the last updated time. The next call to retrieve the
+ * battery status will get the new value for us.
+ */
+ timespecclear(&sc->bst_lastupdated);
+ break;
+ case ACPI_NOTIFY_BUS_CHECK:
+ case ACPI_BATTERY_BIF_CHANGE:
+ /*
+ * Queue a callback to get the current battery info from thread
+ * context. It's not safe to block in a notify handler.
+ */
+ AcpiOsExecute(OSL_NOTIFY_HANDLER, acpi_cmbat_get_bif_task, dev);
+ break;
+ }
+
+ acpi_UserNotify("CMBAT", h, notify);
+}
+
+static int
+acpi_cmbat_info_expired(struct timespec *lastupdated)
+{
+ struct timespec curtime;
+
+ ACPI_SERIAL_ASSERT(cmbat);
+
+ if (lastupdated == NULL)
+ return (TRUE);
+ if (!timespecisset(lastupdated))
+ return (TRUE);
+
+ getnanotime(&curtime);
+ timespecsub(&curtime, lastupdated);
+ return (curtime.tv_sec < 0 ||
+ curtime.tv_sec > acpi_battery_get_info_expire());
+}
+
+static void
+acpi_cmbat_info_updated(struct timespec *lastupdated)
+{
+
+ ACPI_SERIAL_ASSERT(cmbat);
+
+ if (lastupdated != NULL)
+ getnanotime(lastupdated);
+}
+
+static void
+acpi_cmbat_get_bst(void *arg)
+{
+ struct acpi_cmbat_softc *sc;
+ ACPI_STATUS as;
+ ACPI_OBJECT *res;
+ ACPI_HANDLE h;
+ ACPI_BUFFER bst_buffer;
+ device_t dev;
+
+ ACPI_SERIAL_ASSERT(cmbat);
+
+ dev = arg;
+ sc = device_get_softc(dev);
+ h = acpi_get_handle(dev);
+ bst_buffer.Pointer = NULL;
+ bst_buffer.Length = ACPI_ALLOCATE_BUFFER;
+
+ if (!acpi_cmbat_info_expired(&sc->bst_lastupdated))
+ goto end;
+
+ as = AcpiEvaluateObject(h, "_BST", NULL, &bst_buffer);
+ if (ACPI_FAILURE(as)) {
+ ACPI_VPRINT(dev, acpi_device_get_parent_softc(dev),
+ "error fetching current battery status -- %s\n",
+ AcpiFormatException(as));
+ goto end;
+ }
+
+ res = (ACPI_OBJECT *)bst_buffer.Pointer;
+ if (!ACPI_PKG_VALID(res, 4)) {
+ ACPI_VPRINT(dev, acpi_device_get_parent_softc(dev),
+ "battery status corrupted\n");
+ goto end;
+ }
+
+ if (acpi_PkgInt32(res, 0, &sc->bst.state) != 0)
+ goto end;
+ if (acpi_PkgInt32(res, 1, &sc->bst.rate) != 0)
+ goto end;
+ if (acpi_PkgInt32(res, 2, &sc->bst.cap) != 0)
+ goto end;
+ if (acpi_PkgInt32(res, 3, &sc->bst.volt) != 0)
+ goto end;
+ acpi_cmbat_info_updated(&sc->bst_lastupdated);
+
+ /* Clear out undefined/extended bits that might be set by hardware. */
+ sc->bst.state &= ACPI_BATT_STAT_BST_MASK;
+ if ((sc->bst.state & ACPI_BATT_STAT_INVALID) == ACPI_BATT_STAT_INVALID)
+ ACPI_VPRINT(dev, acpi_device_get_parent_softc(dev),
+ "battery reports simultaneous charging and discharging\n");
+
+ /* XXX If all batteries are critical, perhaps we should suspend. */
+ if (sc->bst.state & ACPI_BATT_STAT_CRITICAL) {
+ if ((sc->flags & ACPI_BATT_STAT_CRITICAL) == 0) {
+ sc->flags |= ACPI_BATT_STAT_CRITICAL;
+ device_printf(dev, "critically low charge!\n");
+ }
+ } else
+ sc->flags &= ~ACPI_BATT_STAT_CRITICAL;
+
+end:
+ if (bst_buffer.Pointer != NULL)
+ AcpiOsFree(bst_buffer.Pointer);
+}
+
+/* XXX There should be a cleaner way to do this locking. */
+static void
+acpi_cmbat_get_bif_task(void *arg)
+{
+
+ ACPI_SERIAL_BEGIN(cmbat);
+ acpi_cmbat_get_bif(arg);
+ ACPI_SERIAL_END(cmbat);
+}
+
+static void
+acpi_cmbat_get_bif(void *arg)
+{
+ struct acpi_cmbat_softc *sc;
+ ACPI_STATUS as;
+ ACPI_OBJECT *res;
+ ACPI_HANDLE h;
+ ACPI_BUFFER bif_buffer;
+ device_t dev;
+
+ ACPI_SERIAL_ASSERT(cmbat);
+
+ dev = arg;
+ sc = device_get_softc(dev);
+ h = acpi_get_handle(dev);
+ bif_buffer.Pointer = NULL;
+ bif_buffer.Length = ACPI_ALLOCATE_BUFFER;
+
+ as = AcpiEvaluateObject(h, "_BIF", NULL, &bif_buffer);
+ if (ACPI_FAILURE(as)) {
+ ACPI_VPRINT(dev, acpi_device_get_parent_softc(dev),
+ "error fetching current battery info -- %s\n",
+ AcpiFormatException(as));
+ goto end;
+ }
+
+ res = (ACPI_OBJECT *)bif_buffer.Pointer;
+ if (!ACPI_PKG_VALID(res, 13)) {
+ ACPI_VPRINT(dev, acpi_device_get_parent_softc(dev),
+ "battery info corrupted\n");
+ goto end;
+ }
+
+ if (acpi_PkgInt32(res, 0, &sc->bif.units) != 0)
+ goto end;
+ if (acpi_PkgInt32(res, 1, &sc->bif.dcap) != 0)
+ goto end;
+ if (acpi_PkgInt32(res, 2, &sc->bif.lfcap) != 0)
+ goto end;
+ if (acpi_PkgInt32(res, 3, &sc->bif.btech) != 0)
+ goto end;
+ if (acpi_PkgInt32(res, 4, &sc->bif.dvol) != 0)
+ goto end;
+ if (acpi_PkgInt32(res, 5, &sc->bif.wcap) != 0)
+ goto end;
+ if (acpi_PkgInt32(res, 6, &sc->bif.lcap) != 0)
+ goto end;
+ if (acpi_PkgInt32(res, 7, &sc->bif.gra1) != 0)
+ goto end;
+ if (acpi_PkgInt32(res, 8, &sc->bif.gra2) != 0)
+ goto end;
+ if (acpi_PkgStr(res, 9, sc->bif.model, ACPI_CMBAT_MAXSTRLEN) != 0)
+ goto end;
+ if (acpi_PkgStr(res, 10, sc->bif.serial, ACPI_CMBAT_MAXSTRLEN) != 0)
+ goto end;
+ if (acpi_PkgStr(res, 11, sc->bif.type, ACPI_CMBAT_MAXSTRLEN) != 0)
+ goto end;
+ if (acpi_PkgStr(res, 12, sc->bif.oeminfo, ACPI_CMBAT_MAXSTRLEN) != 0)
+ goto end;
+
+end:
+ if (bif_buffer.Pointer != NULL)
+ AcpiOsFree(bif_buffer.Pointer);
+}
+
+static int
+acpi_cmbat_bif(device_t dev, struct acpi_bif *bifp)
+{
+ struct acpi_cmbat_softc *sc;
+
+ sc = device_get_softc(dev);
+
+ /*
+ * Just copy the data. The only value that should change is the
+ * last-full capacity, so we only update when we get a notify that says
+ * the info has changed. Many systems apparently take a long time to
+ * process a _BIF call so we avoid it if possible.
+ */
+ ACPI_SERIAL_BEGIN(cmbat);
+ bifp->units = sc->bif.units;
+ bifp->dcap = sc->bif.dcap;
+ bifp->lfcap = sc->bif.lfcap;
+ bifp->btech = sc->bif.btech;
+ bifp->dvol = sc->bif.dvol;
+ bifp->wcap = sc->bif.wcap;
+ bifp->lcap = sc->bif.lcap;
+ bifp->gra1 = sc->bif.gra1;
+ bifp->gra2 = sc->bif.gra2;
+ strncpy(bifp->model, sc->bif.model, sizeof(sc->bif.model));
+ strncpy(bifp->serial, sc->bif.serial, sizeof(sc->bif.serial));
+ strncpy(bifp->type, sc->bif.type, sizeof(sc->bif.type));
+ strncpy(bifp->oeminfo, sc->bif.oeminfo, sizeof(sc->bif.oeminfo));
+ ACPI_SERIAL_END(cmbat);
+
+ return (0);
+}
+
+static int
+acpi_cmbat_bst(device_t dev, struct acpi_bst *bstp)
+{
+ struct acpi_cmbat_softc *sc;
+
+ sc = device_get_softc(dev);
+
+ ACPI_SERIAL_BEGIN(cmbat);
+ if (acpi_BatteryIsPresent(dev)) {
+ acpi_cmbat_get_bst(dev);
+ bstp->state = sc->bst.state;
+ bstp->rate = sc->bst.rate;
+ bstp->cap = sc->bst.cap;
+ bstp->volt = sc->bst.volt;
+ } else
+ bstp->state = ACPI_BATT_STAT_NOT_PRESENT;
+ ACPI_SERIAL_END(cmbat);
+
+ return (0);
+}
+
+static void
+acpi_cmbat_init_battery(void *arg)
+{
+ struct acpi_cmbat_softc *sc;
+ int retry, valid;
+ device_t dev;
+
+ dev = (device_t)arg;
+ sc = device_get_softc(dev);
+ ACPI_VPRINT(dev, acpi_device_get_parent_softc(dev),
+ "battery initialization start\n");
+
+ /*
+ * Try repeatedly to get valid data from the battery. Since the
+ * embedded controller isn't always ready just after boot, we may have
+ * to wait a while.
+ */
+ for (retry = 0; retry < ACPI_CMBAT_RETRY_MAX; retry++, AcpiOsSleep(10000)) {
+ /* batteries on DOCK can be ejected w/ DOCK during retrying */
+ if (!device_is_attached(dev))
+ return;
+
+ if (!acpi_BatteryIsPresent(dev))
+ continue;
+
+ /*
+ * Only query the battery if this is the first try or the specific
+ * type of info is still invalid.
+ */
+ ACPI_SERIAL_BEGIN(cmbat);
+ if (retry == 0 || !acpi_battery_bst_valid(&sc->bst)) {
+ timespecclear(&sc->bst_lastupdated);
+ acpi_cmbat_get_bst(dev);
+ }
+ if (retry == 0 || !acpi_battery_bif_valid(&sc->bif))
+ acpi_cmbat_get_bif(dev);
+
+ valid = acpi_battery_bst_valid(&sc->bst) &&
+ acpi_battery_bif_valid(&sc->bif);
+ ACPI_SERIAL_END(cmbat);
+
+ if (valid)
+ break;
+ }
+
+ if (retry == ACPI_CMBAT_RETRY_MAX) {
+ ACPI_VPRINT(dev, acpi_device_get_parent_softc(dev),
+ "battery initialization failed, giving up\n");
+ } else {
+ ACPI_VPRINT(dev, acpi_device_get_parent_softc(dev),
+ "battery initialization done, tried %d times\n", retry + 1);
+ }
+}
diff --git a/sys/dev/acpica/acpi_cpu.c b/sys/dev/acpica/acpi_cpu.c
new file mode 100644
index 0000000..d35a526
--- /dev/null
+++ b/sys/dev/acpica/acpi_cpu.c
@@ -0,0 +1,1275 @@
+/*-
+ * Copyright (c) 2003-2005 Nate Lawson (SDG)
+ * Copyright (c) 2001 Michael Smith
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "opt_acpi.h"
+#include <sys/param.h>
+#include <sys/bus.h>
+#include <sys/cpu.h>
+#include <sys/kernel.h>
+#include <sys/malloc.h>
+#include <sys/module.h>
+#include <sys/pcpu.h>
+#include <sys/power.h>
+#include <sys/proc.h>
+#include <sys/sbuf.h>
+#include <sys/smp.h>
+
+#include <dev/pci/pcivar.h>
+#include <machine/atomic.h>
+#include <machine/bus.h>
+#if defined(__amd64__) || defined(__i386__)
+#include <machine/clock.h>
+#endif
+#include <sys/rman.h>
+
+#include <contrib/dev/acpica/include/acpi.h>
+#include <contrib/dev/acpica/include/accommon.h>
+
+#include <dev/acpica/acpivar.h>
+
+/*
+ * Support for ACPI Processor devices, including C[1-3] sleep states.
+ */
+
+/* Hooks for the ACPI CA debugging infrastructure */
+#define _COMPONENT ACPI_PROCESSOR
+ACPI_MODULE_NAME("PROCESSOR")
+
+struct acpi_cx {
+ struct resource *p_lvlx; /* Register to read to enter state. */
+ uint32_t type; /* C1-3 (C4 and up treated as C3). */
+ uint32_t trans_lat; /* Transition latency (usec). */
+ uint32_t power; /* Power consumed (mW). */
+ int res_type; /* Resource type for p_lvlx. */
+};
+#define MAX_CX_STATES 8
+
+struct acpi_cpu_softc {
+ device_t cpu_dev;
+ ACPI_HANDLE cpu_handle;
+ struct pcpu *cpu_pcpu;
+ uint32_t cpu_acpi_id; /* ACPI processor id */
+ uint32_t cpu_p_blk; /* ACPI P_BLK location */
+ uint32_t cpu_p_blk_len; /* P_BLK length (must be 6). */
+ struct acpi_cx cpu_cx_states[MAX_CX_STATES];
+ int cpu_cx_count; /* Number of valid Cx states. */
+ int cpu_prev_sleep;/* Last idle sleep duration. */
+ int cpu_features; /* Child driver supported features. */
+ /* Runtime state. */
+ int cpu_non_c3; /* Index of lowest non-C3 state. */
+ u_int cpu_cx_stats[MAX_CX_STATES];/* Cx usage history. */
+ /* Values for sysctl. */
+ struct sysctl_ctx_list cpu_sysctl_ctx;
+ struct sysctl_oid *cpu_sysctl_tree;
+ int cpu_cx_lowest;
+ int cpu_cx_lowest_lim;
+ char cpu_cx_supported[64];
+ int cpu_rid;
+};
+
+struct acpi_cpu_device {
+ struct resource_list ad_rl;
+};
+
+#define CPU_GET_REG(reg, width) \
+ (bus_space_read_ ## width(rman_get_bustag((reg)), \
+ rman_get_bushandle((reg)), 0))
+#define CPU_SET_REG(reg, width, val) \
+ (bus_space_write_ ## width(rman_get_bustag((reg)), \
+ rman_get_bushandle((reg)), 0, (val)))
+
+#define PM_USEC(x) ((x) >> 2) /* ~4 clocks per usec (3.57955 Mhz) */
+
+#define ACPI_NOTIFY_CX_STATES 0x81 /* _CST changed. */
+
+#define CPU_QUIRK_NO_C3 (1<<0) /* C3-type states are not usable. */
+#define CPU_QUIRK_NO_BM_CTRL (1<<2) /* No bus mastering control. */
+
+#define PCI_VENDOR_INTEL 0x8086
+#define PCI_DEVICE_82371AB_3 0x7113 /* PIIX4 chipset for quirks. */
+#define PCI_REVISION_A_STEP 0
+#define PCI_REVISION_B_STEP 1
+#define PCI_REVISION_4E 2
+#define PCI_REVISION_4M 3
+#define PIIX4_DEVACTB_REG 0x58
+#define PIIX4_BRLD_EN_IRQ0 (1<<0)
+#define PIIX4_BRLD_EN_IRQ (1<<1)
+#define PIIX4_BRLD_EN_IRQ8 (1<<5)
+#define PIIX4_STOP_BREAK_MASK (PIIX4_BRLD_EN_IRQ0 | PIIX4_BRLD_EN_IRQ | PIIX4_BRLD_EN_IRQ8)
+#define PIIX4_PCNTRL_BST_EN (1<<10)
+
+/* Allow users to ignore processor orders in MADT. */
+static int cpu_unordered;
+TUNABLE_INT("debug.acpi.cpu_unordered", &cpu_unordered);
+SYSCTL_INT(_debug_acpi, OID_AUTO, cpu_unordered, CTLFLAG_RDTUN,
+ &cpu_unordered, 0,
+ "Do not use the MADT to match ACPI Processor objects to CPUs.");
+
+/* Platform hardware resource information. */
+static uint32_t cpu_smi_cmd; /* Value to write to SMI_CMD. */
+static uint8_t cpu_cst_cnt; /* Indicate we are _CST aware. */
+static int cpu_quirks; /* Indicate any hardware bugs. */
+
+/* Runtime state. */
+static int cpu_disable_idle; /* Disable entry to idle function */
+
+/* Values for sysctl. */
+static struct sysctl_ctx_list cpu_sysctl_ctx;
+static struct sysctl_oid *cpu_sysctl_tree;
+static int cpu_cx_generic;
+static int cpu_cx_lowest_lim;
+
+static device_t *cpu_devices;
+static int cpu_ndevices;
+static struct acpi_cpu_softc **cpu_softc;
+ACPI_SERIAL_DECL(cpu, "ACPI CPU");
+
+static int acpi_cpu_probe(device_t dev);
+static int acpi_cpu_attach(device_t dev);
+static int acpi_cpu_suspend(device_t dev);
+static int acpi_cpu_resume(device_t dev);
+static int acpi_pcpu_get_id(device_t dev, uint32_t *acpi_id,
+ uint32_t *cpu_id);
+static struct resource_list *acpi_cpu_get_rlist(device_t dev, device_t child);
+static device_t acpi_cpu_add_child(device_t dev, u_int order, const char *name,
+ int unit);
+static int acpi_cpu_read_ivar(device_t dev, device_t child, int index,
+ uintptr_t *result);
+static int acpi_cpu_shutdown(device_t dev);
+static void acpi_cpu_cx_probe(struct acpi_cpu_softc *sc);
+static void acpi_cpu_generic_cx_probe(struct acpi_cpu_softc *sc);
+static int acpi_cpu_cx_cst(struct acpi_cpu_softc *sc);
+static void acpi_cpu_startup(void *arg);
+static void acpi_cpu_startup_cx(struct acpi_cpu_softc *sc);
+static void acpi_cpu_cx_list(struct acpi_cpu_softc *sc);
+static void acpi_cpu_idle(void);
+static void acpi_cpu_notify(ACPI_HANDLE h, UINT32 notify, void *context);
+static int acpi_cpu_quirks(void);
+static int acpi_cpu_usage_sysctl(SYSCTL_HANDLER_ARGS);
+static int acpi_cpu_set_cx_lowest(struct acpi_cpu_softc *sc);
+static int acpi_cpu_cx_lowest_sysctl(SYSCTL_HANDLER_ARGS);
+static int acpi_cpu_global_cx_lowest_sysctl(SYSCTL_HANDLER_ARGS);
+
+static device_method_t acpi_cpu_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, acpi_cpu_probe),
+ DEVMETHOD(device_attach, acpi_cpu_attach),
+ DEVMETHOD(device_detach, bus_generic_detach),
+ DEVMETHOD(device_shutdown, acpi_cpu_shutdown),
+ DEVMETHOD(device_suspend, acpi_cpu_suspend),
+ DEVMETHOD(device_resume, acpi_cpu_resume),
+
+ /* Bus interface */
+ DEVMETHOD(bus_add_child, acpi_cpu_add_child),
+ DEVMETHOD(bus_read_ivar, acpi_cpu_read_ivar),
+ DEVMETHOD(bus_get_resource_list, acpi_cpu_get_rlist),
+ DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource),
+ DEVMETHOD(bus_set_resource, bus_generic_rl_set_resource),
+ DEVMETHOD(bus_alloc_resource, bus_generic_rl_alloc_resource),
+ DEVMETHOD(bus_release_resource, bus_generic_rl_release_resource),
+ DEVMETHOD(bus_activate_resource, bus_generic_activate_resource),
+ DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource),
+ DEVMETHOD(bus_setup_intr, bus_generic_setup_intr),
+ DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr),
+
+ DEVMETHOD_END
+};
+
+static driver_t acpi_cpu_driver = {
+ "cpu",
+ acpi_cpu_methods,
+ sizeof(struct acpi_cpu_softc),
+};
+
+static devclass_t acpi_cpu_devclass;
+DRIVER_MODULE(cpu, acpi, acpi_cpu_driver, acpi_cpu_devclass, 0, 0);
+MODULE_DEPEND(cpu, acpi, 1, 1, 1);
+
+static int
+acpi_cpu_probe(device_t dev)
+{
+ int acpi_id, cpu_id;
+ ACPI_BUFFER buf;
+ ACPI_HANDLE handle;
+ ACPI_OBJECT *obj;
+ ACPI_STATUS status;
+
+ if (acpi_disabled("cpu") || acpi_get_type(dev) != ACPI_TYPE_PROCESSOR)
+ return (ENXIO);
+
+ handle = acpi_get_handle(dev);
+ if (cpu_softc == NULL)
+ cpu_softc = malloc(sizeof(struct acpi_cpu_softc *) *
+ (mp_maxid + 1), M_TEMP /* XXX */, M_WAITOK | M_ZERO);
+
+ /* Get our Processor object. */
+ buf.Pointer = NULL;
+ buf.Length = ACPI_ALLOCATE_BUFFER;
+ status = AcpiEvaluateObject(handle, NULL, NULL, &buf);
+ if (ACPI_FAILURE(status)) {
+ device_printf(dev, "probe failed to get Processor obj - %s\n",
+ AcpiFormatException(status));
+ return (ENXIO);
+ }
+ obj = (ACPI_OBJECT *)buf.Pointer;
+ if (obj->Type != ACPI_TYPE_PROCESSOR) {
+ device_printf(dev, "Processor object has bad type %d\n", obj->Type);
+ AcpiOsFree(obj);
+ return (ENXIO);
+ }
+
+ /*
+ * Find the processor associated with our unit. We could use the
+ * ProcId as a key, however, some boxes do not have the same values
+ * in their Processor object as the ProcId values in the MADT.
+ */
+ acpi_id = obj->Processor.ProcId;
+ AcpiOsFree(obj);
+ if (acpi_pcpu_get_id(dev, &acpi_id, &cpu_id) != 0)
+ return (ENXIO);
+
+ /*
+ * Check if we already probed this processor. We scan the bus twice
+ * so it's possible we've already seen this one.
+ */
+ if (cpu_softc[cpu_id] != NULL)
+ return (ENXIO);
+
+ /* Mark this processor as in-use and save our derived id for attach. */
+ cpu_softc[cpu_id] = (void *)1;
+ acpi_set_private(dev, (void*)(intptr_t)cpu_id);
+ device_set_desc(dev, "ACPI CPU");
+
+ return (0);
+}
+
+static int
+acpi_cpu_attach(device_t dev)
+{
+ ACPI_BUFFER buf;
+ ACPI_OBJECT arg[4], *obj;
+ ACPI_OBJECT_LIST arglist;
+ struct pcpu *pcpu_data;
+ struct acpi_cpu_softc *sc;
+ struct acpi_softc *acpi_sc;
+ ACPI_STATUS status;
+ u_int features;
+ int cpu_id, drv_count, i;
+ driver_t **drivers;
+ uint32_t cap_set[3];
+
+ /* UUID needed by _OSC evaluation */
+ static uint8_t cpu_oscuuid[16] = { 0x16, 0xA6, 0x77, 0x40, 0x0C, 0x29,
+ 0xBE, 0x47, 0x9E, 0xBD, 0xD8, 0x70,
+ 0x58, 0x71, 0x39, 0x53 };
+
+ ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
+
+ sc = device_get_softc(dev);
+ sc->cpu_dev = dev;
+ sc->cpu_handle = acpi_get_handle(dev);
+ cpu_id = (int)(intptr_t)acpi_get_private(dev);
+ cpu_softc[cpu_id] = sc;
+ pcpu_data = pcpu_find(cpu_id);
+ pcpu_data->pc_device = dev;
+ sc->cpu_pcpu = pcpu_data;
+ cpu_smi_cmd = AcpiGbl_FADT.SmiCommand;
+ cpu_cst_cnt = AcpiGbl_FADT.CstControl;
+
+ buf.Pointer = NULL;
+ buf.Length = ACPI_ALLOCATE_BUFFER;
+ status = AcpiEvaluateObject(sc->cpu_handle, NULL, NULL, &buf);
+ if (ACPI_FAILURE(status)) {
+ device_printf(dev, "attach failed to get Processor obj - %s\n",
+ AcpiFormatException(status));
+ return (ENXIO);
+ }
+ obj = (ACPI_OBJECT *)buf.Pointer;
+ sc->cpu_p_blk = obj->Processor.PblkAddress;
+ sc->cpu_p_blk_len = obj->Processor.PblkLength;
+ sc->cpu_acpi_id = obj->Processor.ProcId;
+ AcpiOsFree(obj);
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "acpi_cpu%d: P_BLK at %#x/%d\n",
+ device_get_unit(dev), sc->cpu_p_blk, sc->cpu_p_blk_len));
+
+ /*
+ * If this is the first cpu we attach, create and initialize the generic
+ * resources that will be used by all acpi cpu devices.
+ */
+ if (device_get_unit(dev) == 0) {
+ /* Assume we won't be using generic Cx mode by default */
+ cpu_cx_generic = FALSE;
+
+ /* Install hw.acpi.cpu sysctl tree */
+ acpi_sc = acpi_device_get_parent_softc(dev);
+ sysctl_ctx_init(&cpu_sysctl_ctx);
+ cpu_sysctl_tree = SYSCTL_ADD_NODE(&cpu_sysctl_ctx,
+ SYSCTL_CHILDREN(acpi_sc->acpi_sysctl_tree), OID_AUTO, "cpu",
+ CTLFLAG_RD, 0, "node for CPU children");
+
+ /* Queue post cpu-probing task handler */
+ AcpiOsExecute(OSL_NOTIFY_HANDLER, acpi_cpu_startup, NULL);
+ }
+
+ /*
+ * Before calling any CPU methods, collect child driver feature hints
+ * and notify ACPI of them. We support unified SMP power control
+ * so advertise this ourselves. Note this is not the same as independent
+ * SMP control where each CPU can have different settings.
+ */
+ sc->cpu_features = ACPI_CAP_SMP_SAME | ACPI_CAP_SMP_SAME_C3;
+ if (devclass_get_drivers(acpi_cpu_devclass, &drivers, &drv_count) == 0) {
+ for (i = 0; i < drv_count; i++) {
+ if (ACPI_GET_FEATURES(drivers[i], &features) == 0)
+ sc->cpu_features |= features;
+ }
+ free(drivers, M_TEMP);
+ }
+
+ /*
+ * CPU capabilities are specified in
+ * Intel Processor Vendor-Specific ACPI Interface Specification.
+ */
+ if (sc->cpu_features) {
+ arglist.Pointer = arg;
+ arglist.Count = 4;
+ arg[0].Type = ACPI_TYPE_BUFFER;
+ arg[0].Buffer.Length = sizeof(cpu_oscuuid);
+ arg[0].Buffer.Pointer = cpu_oscuuid; /* UUID */
+ arg[1].Type = ACPI_TYPE_INTEGER;
+ arg[1].Integer.Value = 1; /* revision */
+ arg[2].Type = ACPI_TYPE_INTEGER;
+ arg[2].Integer.Value = 1; /* count */
+ arg[3].Type = ACPI_TYPE_BUFFER;
+ arg[3].Buffer.Length = sizeof(cap_set); /* Capabilities buffer */
+ arg[3].Buffer.Pointer = (uint8_t *)cap_set;
+ cap_set[0] = 0; /* status */
+ cap_set[1] = sc->cpu_features;
+ status = AcpiEvaluateObject(sc->cpu_handle, "_OSC", &arglist, NULL);
+ if (ACPI_SUCCESS(status)) {
+ if (cap_set[0] != 0)
+ device_printf(dev, "_OSC returned status %#x\n", cap_set[0]);
+ }
+ else {
+ arglist.Pointer = arg;
+ arglist.Count = 1;
+ arg[0].Type = ACPI_TYPE_BUFFER;
+ arg[0].Buffer.Length = sizeof(cap_set);
+ arg[0].Buffer.Pointer = (uint8_t *)cap_set;
+ cap_set[0] = 1; /* revision */
+ cap_set[1] = 1; /* number of capabilities integers */
+ cap_set[2] = sc->cpu_features;
+ AcpiEvaluateObject(sc->cpu_handle, "_PDC", &arglist, NULL);
+ }
+ }
+
+ /* Probe for Cx state support. */
+ acpi_cpu_cx_probe(sc);
+
+ return (0);
+}
+
+static void
+acpi_cpu_postattach(void *unused __unused)
+{
+ device_t *devices;
+ int err;
+ int i, n;
+
+ err = devclass_get_devices(acpi_cpu_devclass, &devices, &n);
+ if (err != 0) {
+ printf("devclass_get_devices(acpi_cpu_devclass) failed\n");
+ return;
+ }
+ for (i = 0; i < n; i++)
+ bus_generic_probe(devices[i]);
+ for (i = 0; i < n; i++)
+ bus_generic_attach(devices[i]);
+ free(devices, M_TEMP);
+}
+
+SYSINIT(acpi_cpu, SI_SUB_CONFIGURE, SI_ORDER_MIDDLE,
+ acpi_cpu_postattach, NULL);
+
+/*
+ * Disable any entry to the idle function during suspend and re-enable it
+ * during resume.
+ */
+static int
+acpi_cpu_suspend(device_t dev)
+{
+ int error;
+
+ error = bus_generic_suspend(dev);
+ if (error)
+ return (error);
+ cpu_disable_idle = TRUE;
+ return (0);
+}
+
+static int
+acpi_cpu_resume(device_t dev)
+{
+
+ cpu_disable_idle = FALSE;
+ return (bus_generic_resume(dev));
+}
+
+/*
+ * Find the processor associated with a given ACPI ID. By default,
+ * use the MADT to map ACPI IDs to APIC IDs and use that to locate a
+ * processor. Some systems have inconsistent ASL and MADT however.
+ * For these systems the cpu_unordered tunable can be set in which
+ * case we assume that Processor objects are listed in the same order
+ * in both the MADT and ASL.
+ */
+static int
+acpi_pcpu_get_id(device_t dev, uint32_t *acpi_id, uint32_t *cpu_id)
+{
+ struct pcpu *pc;
+ uint32_t i, idx;
+
+ KASSERT(acpi_id != NULL, ("Null acpi_id"));
+ KASSERT(cpu_id != NULL, ("Null cpu_id"));
+ idx = device_get_unit(dev);
+
+ /*
+ * If pc_acpi_id for CPU 0 is not initialized (e.g. a non-APIC
+ * UP box) use the ACPI ID from the first processor we find.
+ */
+ if (idx == 0 && mp_ncpus == 1) {
+ pc = pcpu_find(0);
+ if (pc->pc_acpi_id == 0xffffffff)
+ pc->pc_acpi_id = *acpi_id;
+ *cpu_id = 0;
+ return (0);
+ }
+
+ CPU_FOREACH(i) {
+ pc = pcpu_find(i);
+ KASSERT(pc != NULL, ("no pcpu data for %d", i));
+ if (cpu_unordered) {
+ if (idx-- == 0) {
+ /*
+ * If pc_acpi_id doesn't match the ACPI ID from the
+ * ASL, prefer the MADT-derived value.
+ */
+ if (pc->pc_acpi_id != *acpi_id)
+ *acpi_id = pc->pc_acpi_id;
+ *cpu_id = pc->pc_cpuid;
+ return (0);
+ }
+ } else {
+ if (pc->pc_acpi_id == *acpi_id) {
+ if (bootverbose)
+ device_printf(dev,
+ "Processor %s (ACPI ID %u) -> APIC ID %d\n",
+ acpi_name(acpi_get_handle(dev)), *acpi_id,
+ pc->pc_cpuid);
+ *cpu_id = pc->pc_cpuid;
+ return (0);
+ }
+ }
+ }
+
+ if (bootverbose)
+ printf("ACPI: Processor %s (ACPI ID %u) ignored\n",
+ acpi_name(acpi_get_handle(dev)), *acpi_id);
+
+ return (ESRCH);
+}
+
+static struct resource_list *
+acpi_cpu_get_rlist(device_t dev, device_t child)
+{
+ struct acpi_cpu_device *ad;
+
+ ad = device_get_ivars(child);
+ if (ad == NULL)
+ return (NULL);
+ return (&ad->ad_rl);
+}
+
+static device_t
+acpi_cpu_add_child(device_t dev, u_int order, const char *name, int unit)
+{
+ struct acpi_cpu_device *ad;
+ device_t child;
+
+ if ((ad = malloc(sizeof(*ad), M_TEMP, M_NOWAIT | M_ZERO)) == NULL)
+ return (NULL);
+
+ resource_list_init(&ad->ad_rl);
+
+ child = device_add_child_ordered(dev, order, name, unit);
+ if (child != NULL)
+ device_set_ivars(child, ad);
+ else
+ free(ad, M_TEMP);
+ return (child);
+}
+
+static int
+acpi_cpu_read_ivar(device_t dev, device_t child, int index, uintptr_t *result)
+{
+ struct acpi_cpu_softc *sc;
+
+ sc = device_get_softc(dev);
+ switch (index) {
+ case ACPI_IVAR_HANDLE:
+ *result = (uintptr_t)sc->cpu_handle;
+ break;
+ case CPU_IVAR_PCPU:
+ *result = (uintptr_t)sc->cpu_pcpu;
+ break;
+#if defined(__amd64__) || defined(__i386__)
+ case CPU_IVAR_NOMINAL_MHZ:
+ if (tsc_is_invariant) {
+ *result = (uintptr_t)(atomic_load_acq_64(&tsc_freq) / 1000000);
+ break;
+ }
+ /* FALLTHROUGH */
+#endif
+ default:
+ return (ENOENT);
+ }
+ return (0);
+}
+
+static int
+acpi_cpu_shutdown(device_t dev)
+{
+ ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
+
+ /* Allow children to shutdown first. */
+ bus_generic_shutdown(dev);
+
+ /*
+ * Disable any entry to the idle function. There is a small race where
+ * an idle thread have passed this check but not gone to sleep. This
+ * is ok since device_shutdown() does not free the softc, otherwise
+ * we'd have to be sure all threads were evicted before returning.
+ */
+ cpu_disable_idle = TRUE;
+
+ return_VALUE (0);
+}
+
+static void
+acpi_cpu_cx_probe(struct acpi_cpu_softc *sc)
+{
+ ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
+
+ /* Use initial sleep value of 1 sec. to start with lowest idle state. */
+ sc->cpu_prev_sleep = 1000000;
+ sc->cpu_cx_lowest = 0;
+ sc->cpu_cx_lowest_lim = 0;
+
+ /*
+ * Check for the ACPI 2.0 _CST sleep states object. If we can't find
+ * any, we'll revert to generic FADT/P_BLK Cx control method which will
+ * be handled by acpi_cpu_startup. We need to defer to after having
+ * probed all the cpus in the system before probing for generic Cx
+ * states as we may already have found cpus with valid _CST packages
+ */
+ if (!cpu_cx_generic && acpi_cpu_cx_cst(sc) != 0) {
+ /*
+ * We were unable to find a _CST package for this cpu or there
+ * was an error parsing it. Switch back to generic mode.
+ */
+ cpu_cx_generic = TRUE;
+ if (bootverbose)
+ device_printf(sc->cpu_dev, "switching to generic Cx mode\n");
+ }
+
+ /*
+ * TODO: _CSD Package should be checked here.
+ */
+}
+
+static void
+acpi_cpu_generic_cx_probe(struct acpi_cpu_softc *sc)
+{
+ ACPI_GENERIC_ADDRESS gas;
+ struct acpi_cx *cx_ptr;
+
+ sc->cpu_cx_count = 0;
+ cx_ptr = sc->cpu_cx_states;
+
+ /* Use initial sleep value of 1 sec. to start with lowest idle state. */
+ sc->cpu_prev_sleep = 1000000;
+
+ /* C1 has been required since just after ACPI 1.0 */
+ cx_ptr->type = ACPI_STATE_C1;
+ cx_ptr->trans_lat = 0;
+ cx_ptr++;
+ sc->cpu_non_c3 = sc->cpu_cx_count;
+ sc->cpu_cx_count++;
+
+ /*
+ * The spec says P_BLK must be 6 bytes long. However, some systems
+ * use it to indicate a fractional set of features present so we
+ * take 5 as C2. Some may also have a value of 7 to indicate
+ * another C3 but most use _CST for this (as required) and having
+ * "only" C1-C3 is not a hardship.
+ */
+ if (sc->cpu_p_blk_len < 5)
+ return;
+
+ /* Validate and allocate resources for C2 (P_LVL2). */
+ gas.SpaceId = ACPI_ADR_SPACE_SYSTEM_IO;
+ gas.BitWidth = 8;
+ if (AcpiGbl_FADT.C2Latency <= 100) {
+ gas.Address = sc->cpu_p_blk + 4;
+ acpi_bus_alloc_gas(sc->cpu_dev, &cx_ptr->res_type, &sc->cpu_rid,
+ &gas, &cx_ptr->p_lvlx, RF_SHAREABLE);
+ if (cx_ptr->p_lvlx != NULL) {
+ sc->cpu_rid++;
+ cx_ptr->type = ACPI_STATE_C2;
+ cx_ptr->trans_lat = AcpiGbl_FADT.C2Latency;
+ cx_ptr++;
+ sc->cpu_non_c3 = sc->cpu_cx_count;
+ sc->cpu_cx_count++;
+ }
+ }
+ if (sc->cpu_p_blk_len < 6)
+ return;
+
+ /* Validate and allocate resources for C3 (P_LVL3). */
+ if (AcpiGbl_FADT.C3Latency <= 1000 && !(cpu_quirks & CPU_QUIRK_NO_C3)) {
+ gas.Address = sc->cpu_p_blk + 5;
+ acpi_bus_alloc_gas(sc->cpu_dev, &cx_ptr->res_type, &sc->cpu_rid, &gas,
+ &cx_ptr->p_lvlx, RF_SHAREABLE);
+ if (cx_ptr->p_lvlx != NULL) {
+ sc->cpu_rid++;
+ cx_ptr->type = ACPI_STATE_C3;
+ cx_ptr->trans_lat = AcpiGbl_FADT.C3Latency;
+ cx_ptr++;
+ sc->cpu_cx_count++;
+ cpu_can_deep_sleep = 1;
+ }
+ }
+}
+
+/*
+ * Parse a _CST package and set up its Cx states. Since the _CST object
+ * can change dynamically, our notify handler may call this function
+ * to clean up and probe the new _CST package.
+ */
+static int
+acpi_cpu_cx_cst(struct acpi_cpu_softc *sc)
+{
+ struct acpi_cx *cx_ptr;
+ ACPI_STATUS status;
+ ACPI_BUFFER buf;
+ ACPI_OBJECT *top;
+ ACPI_OBJECT *pkg;
+ uint32_t count;
+ int i;
+
+ ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
+
+ buf.Pointer = NULL;
+ buf.Length = ACPI_ALLOCATE_BUFFER;
+ status = AcpiEvaluateObject(sc->cpu_handle, "_CST", NULL, &buf);
+ if (ACPI_FAILURE(status))
+ return (ENXIO);
+
+ /* _CST is a package with a count and at least one Cx package. */
+ top = (ACPI_OBJECT *)buf.Pointer;
+ if (!ACPI_PKG_VALID(top, 2) || acpi_PkgInt32(top, 0, &count) != 0) {
+ device_printf(sc->cpu_dev, "invalid _CST package\n");
+ AcpiOsFree(buf.Pointer);
+ return (ENXIO);
+ }
+ if (count != top->Package.Count - 1) {
+ device_printf(sc->cpu_dev, "invalid _CST state count (%d != %d)\n",
+ count, top->Package.Count - 1);
+ count = top->Package.Count - 1;
+ }
+ if (count > MAX_CX_STATES) {
+ device_printf(sc->cpu_dev, "_CST has too many states (%d)\n", count);
+ count = MAX_CX_STATES;
+ }
+
+ sc->cpu_non_c3 = 0;
+ sc->cpu_cx_count = 0;
+ cx_ptr = sc->cpu_cx_states;
+
+ /*
+ * C1 has been required since just after ACPI 1.0.
+ * Reserve the first slot for it.
+ */
+ cx_ptr->type = ACPI_STATE_C0;
+ cx_ptr++;
+ sc->cpu_cx_count++;
+
+ /* Set up all valid states. */
+ for (i = 0; i < count; i++) {
+ pkg = &top->Package.Elements[i + 1];
+ if (!ACPI_PKG_VALID(pkg, 4) ||
+ acpi_PkgInt32(pkg, 1, &cx_ptr->type) != 0 ||
+ acpi_PkgInt32(pkg, 2, &cx_ptr->trans_lat) != 0 ||
+ acpi_PkgInt32(pkg, 3, &cx_ptr->power) != 0) {
+
+ device_printf(sc->cpu_dev, "skipping invalid Cx state package\n");
+ continue;
+ }
+
+ /* Validate the state to see if we should use it. */
+ switch (cx_ptr->type) {
+ case ACPI_STATE_C1:
+ if (sc->cpu_cx_states[0].type == ACPI_STATE_C0) {
+ /* This is the first C1 state. Use the reserved slot. */
+ sc->cpu_cx_states[0] = *cx_ptr;
+ } else {
+ sc->cpu_non_c3 = sc->cpu_cx_count;
+ cx_ptr++;
+ sc->cpu_cx_count++;
+ }
+ continue;
+ case ACPI_STATE_C2:
+ sc->cpu_non_c3 = sc->cpu_cx_count;
+ break;
+ case ACPI_STATE_C3:
+ default:
+ if ((cpu_quirks & CPU_QUIRK_NO_C3) != 0) {
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "acpi_cpu%d: C3[%d] not available.\n",
+ device_get_unit(sc->cpu_dev), i));
+ continue;
+ } else
+ cpu_can_deep_sleep = 1;
+ break;
+ }
+
+#ifdef notyet
+ /* Free up any previous register. */
+ if (cx_ptr->p_lvlx != NULL) {
+ bus_release_resource(sc->cpu_dev, 0, 0, cx_ptr->p_lvlx);
+ cx_ptr->p_lvlx = NULL;
+ }
+#endif
+
+ /* Allocate the control register for C2 or C3. */
+ acpi_PkgGas(sc->cpu_dev, pkg, 0, &cx_ptr->res_type, &sc->cpu_rid,
+ &cx_ptr->p_lvlx, RF_SHAREABLE);
+ if (cx_ptr->p_lvlx) {
+ sc->cpu_rid++;
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "acpi_cpu%d: Got C%d - %d latency\n",
+ device_get_unit(sc->cpu_dev), cx_ptr->type,
+ cx_ptr->trans_lat));
+ cx_ptr++;
+ sc->cpu_cx_count++;
+ }
+ }
+ AcpiOsFree(buf.Pointer);
+
+ /* If C1 state was not found, we need one now. */
+ cx_ptr = sc->cpu_cx_states;
+ if (cx_ptr->type == ACPI_STATE_C0) {
+ cx_ptr->type = ACPI_STATE_C1;
+ cx_ptr->trans_lat = 0;
+ }
+
+ return (0);
+}
+
+/*
+ * Call this *after* all CPUs have been attached.
+ */
+static void
+acpi_cpu_startup(void *arg)
+{
+ struct acpi_cpu_softc *sc;
+ int i;
+
+ /* Get set of CPU devices */
+ devclass_get_devices(acpi_cpu_devclass, &cpu_devices, &cpu_ndevices);
+
+ /*
+ * Setup any quirks that might necessary now that we have probed
+ * all the CPUs
+ */
+ acpi_cpu_quirks();
+
+ if (cpu_cx_generic) {
+ /*
+ * We are using generic Cx mode, probe for available Cx states
+ * for all processors.
+ */
+ for (i = 0; i < cpu_ndevices; i++) {
+ sc = device_get_softc(cpu_devices[i]);
+ acpi_cpu_generic_cx_probe(sc);
+ }
+ } else {
+ /*
+ * We are using _CST mode, remove C3 state if necessary.
+ * As we now know for sure that we will be using _CST mode
+ * install our notify handler.
+ */
+ for (i = 0; i < cpu_ndevices; i++) {
+ sc = device_get_softc(cpu_devices[i]);
+ if (cpu_quirks & CPU_QUIRK_NO_C3) {
+ sc->cpu_cx_count = sc->cpu_non_c3 + 1;
+ }
+ AcpiInstallNotifyHandler(sc->cpu_handle, ACPI_DEVICE_NOTIFY,
+ acpi_cpu_notify, sc);
+ }
+ }
+
+ /* Perform Cx final initialization. */
+ for (i = 0; i < cpu_ndevices; i++) {
+ sc = device_get_softc(cpu_devices[i]);
+ acpi_cpu_startup_cx(sc);
+ }
+
+ /* Add a sysctl handler to handle global Cx lowest setting */
+ SYSCTL_ADD_PROC(&cpu_sysctl_ctx, SYSCTL_CHILDREN(cpu_sysctl_tree),
+ OID_AUTO, "cx_lowest", CTLTYPE_STRING | CTLFLAG_RW,
+ NULL, 0, acpi_cpu_global_cx_lowest_sysctl, "A",
+ "Global lowest Cx sleep state to use");
+
+ /* Take over idling from cpu_idle_default(). */
+ cpu_cx_lowest_lim = 0;
+ cpu_disable_idle = FALSE;
+ cpu_idle_hook = acpi_cpu_idle;
+}
+
+static void
+acpi_cpu_cx_list(struct acpi_cpu_softc *sc)
+{
+ struct sbuf sb;
+ int i;
+
+ /*
+ * Set up the list of Cx states
+ */
+ sbuf_new(&sb, sc->cpu_cx_supported, sizeof(sc->cpu_cx_supported),
+ SBUF_FIXEDLEN);
+ for (i = 0; i < sc->cpu_cx_count; i++)
+ sbuf_printf(&sb, "C%d/%d/%d ", i + 1, sc->cpu_cx_states[i].type,
+ sc->cpu_cx_states[i].trans_lat);
+ sbuf_trim(&sb);
+ sbuf_finish(&sb);
+}
+
+static void
+acpi_cpu_startup_cx(struct acpi_cpu_softc *sc)
+{
+ acpi_cpu_cx_list(sc);
+
+ SYSCTL_ADD_STRING(&sc->cpu_sysctl_ctx,
+ SYSCTL_CHILDREN(device_get_sysctl_tree(sc->cpu_dev)),
+ OID_AUTO, "cx_supported", CTLFLAG_RD,
+ sc->cpu_cx_supported, 0,
+ "Cx/microsecond values for supported Cx states");
+ SYSCTL_ADD_PROC(&sc->cpu_sysctl_ctx,
+ SYSCTL_CHILDREN(device_get_sysctl_tree(sc->cpu_dev)),
+ OID_AUTO, "cx_lowest", CTLTYPE_STRING | CTLFLAG_RW,
+ (void *)sc, 0, acpi_cpu_cx_lowest_sysctl, "A",
+ "lowest Cx sleep state to use");
+ SYSCTL_ADD_PROC(&sc->cpu_sysctl_ctx,
+ SYSCTL_CHILDREN(device_get_sysctl_tree(sc->cpu_dev)),
+ OID_AUTO, "cx_usage", CTLTYPE_STRING | CTLFLAG_RD,
+ (void *)sc, 0, acpi_cpu_usage_sysctl, "A",
+ "percent usage for each Cx state");
+
+ /* Signal platform that we can handle _CST notification. */
+ if (!cpu_cx_generic && cpu_cst_cnt != 0) {
+ ACPI_LOCK(acpi);
+ AcpiOsWritePort(cpu_smi_cmd, cpu_cst_cnt, 8);
+ ACPI_UNLOCK(acpi);
+ }
+}
+
+/*
+ * Idle the CPU in the lowest state possible. This function is called with
+ * interrupts disabled. Note that once it re-enables interrupts, a task
+ * switch can occur so do not access shared data (i.e. the softc) after
+ * interrupts are re-enabled.
+ */
+static void
+acpi_cpu_idle()
+{
+ struct acpi_cpu_softc *sc;
+ struct acpi_cx *cx_next;
+ uint64_t cputicks;
+ uint32_t start_time, end_time;
+ int bm_active, cx_next_idx, i;
+
+ /* If disabled, return immediately. */
+ if (cpu_disable_idle) {
+ ACPI_ENABLE_IRQS();
+ return;
+ }
+
+ /*
+ * Look up our CPU id to get our softc. If it's NULL, we'll use C1
+ * since there is no ACPI processor object for this CPU. This occurs
+ * for logical CPUs in the HTT case.
+ */
+ sc = cpu_softc[PCPU_GET(cpuid)];
+ if (sc == NULL) {
+ acpi_cpu_c1();
+ return;
+ }
+
+ /* Find the lowest state that has small enough latency. */
+ cx_next_idx = 0;
+ if (cpu_disable_deep_sleep)
+ i = min(sc->cpu_cx_lowest, sc->cpu_non_c3);
+ else
+ i = sc->cpu_cx_lowest;
+ for (; i >= 0; i--) {
+ if (sc->cpu_cx_states[i].trans_lat * 3 <= sc->cpu_prev_sleep) {
+ cx_next_idx = i;
+ break;
+ }
+ }
+
+ /*
+ * Check for bus master activity. If there was activity, clear
+ * the bit and use the lowest non-C3 state. Note that the USB
+ * driver polling for new devices keeps this bit set all the
+ * time if USB is loaded.
+ */
+ if ((cpu_quirks & CPU_QUIRK_NO_BM_CTRL) == 0 &&
+ cx_next_idx > sc->cpu_non_c3) {
+ AcpiReadBitRegister(ACPI_BITREG_BUS_MASTER_STATUS, &bm_active);
+ if (bm_active != 0) {
+ AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_STATUS, 1);
+ cx_next_idx = sc->cpu_non_c3;
+ }
+ }
+
+ /* Select the next state and update statistics. */
+ cx_next = &sc->cpu_cx_states[cx_next_idx];
+ sc->cpu_cx_stats[cx_next_idx]++;
+ KASSERT(cx_next->type != ACPI_STATE_C0, ("acpi_cpu_idle: C0 sleep"));
+
+ /*
+ * Execute HLT (or equivalent) and wait for an interrupt. We can't
+ * precisely calculate the time spent in C1 since the place we wake up
+ * is an ISR. Assume we slept no more then half of quantum, unless
+ * we are called inside critical section, delaying context switch.
+ */
+ if (cx_next->type == ACPI_STATE_C1) {
+ cputicks = cpu_ticks();
+ acpi_cpu_c1();
+ end_time = ((cpu_ticks() - cputicks) << 20) / cpu_tickrate();
+ if (curthread->td_critnest == 0)
+ end_time = min(end_time, 500000 / hz);
+ sc->cpu_prev_sleep = (sc->cpu_prev_sleep * 3 + end_time) / 4;
+ return;
+ }
+
+ /*
+ * For C3, disable bus master arbitration and enable bus master wake
+ * if BM control is available, otherwise flush the CPU cache.
+ */
+ if (cx_next->type == ACPI_STATE_C3) {
+ if ((cpu_quirks & CPU_QUIRK_NO_BM_CTRL) == 0) {
+ AcpiWriteBitRegister(ACPI_BITREG_ARB_DISABLE, 1);
+ AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_RLD, 1);
+ } else
+ ACPI_FLUSH_CPU_CACHE();
+ }
+
+ /*
+ * Read from P_LVLx to enter C2(+), checking time spent asleep.
+ * Use the ACPI timer for measuring sleep time. Since we need to
+ * get the time very close to the CPU start/stop clock logic, this
+ * is the only reliable time source.
+ */
+ if (cx_next->type == ACPI_STATE_C3) {
+ AcpiHwRead(&start_time, &AcpiGbl_FADT.XPmTimerBlock);
+ cputicks = 0;
+ } else {
+ start_time = 0;
+ cputicks = cpu_ticks();
+ }
+ CPU_GET_REG(cx_next->p_lvlx, 1);
+
+ /*
+ * Read the end time twice. Since it may take an arbitrary time
+ * to enter the idle state, the first read may be executed before
+ * the processor has stopped. Doing it again provides enough
+ * margin that we are certain to have a correct value.
+ */
+ AcpiHwRead(&end_time, &AcpiGbl_FADT.XPmTimerBlock);
+ if (cx_next->type == ACPI_STATE_C3) {
+ AcpiHwRead(&end_time, &AcpiGbl_FADT.XPmTimerBlock);
+ end_time = acpi_TimerDelta(end_time, start_time);
+ } else
+ end_time = ((cpu_ticks() - cputicks) << 20) / cpu_tickrate();
+
+ /* Enable bus master arbitration and disable bus master wakeup. */
+ if (cx_next->type == ACPI_STATE_C3 &&
+ (cpu_quirks & CPU_QUIRK_NO_BM_CTRL) == 0) {
+ AcpiWriteBitRegister(ACPI_BITREG_ARB_DISABLE, 0);
+ AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_RLD, 0);
+ }
+ ACPI_ENABLE_IRQS();
+
+ sc->cpu_prev_sleep = (sc->cpu_prev_sleep * 3 + PM_USEC(end_time)) / 4;
+}
+
+/*
+ * Re-evaluate the _CST object when we are notified that it changed.
+ *
+ * XXX Re-evaluation disabled until locking is done.
+ */
+static void
+acpi_cpu_notify(ACPI_HANDLE h, UINT32 notify, void *context)
+{
+ struct acpi_cpu_softc *sc = (struct acpi_cpu_softc *)context;
+
+ if (notify != ACPI_NOTIFY_CX_STATES)
+ return;
+
+ /* Update the list of Cx states. */
+ acpi_cpu_cx_cst(sc);
+ acpi_cpu_cx_list(sc);
+
+ ACPI_SERIAL_BEGIN(cpu);
+ acpi_cpu_set_cx_lowest(sc);
+ ACPI_SERIAL_END(cpu);
+}
+
+static int
+acpi_cpu_quirks(void)
+{
+ device_t acpi_dev;
+ uint32_t val;
+
+ ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
+
+ /*
+ * Bus mastering arbitration control is needed to keep caches coherent
+ * while sleeping in C3. If it's not present but a working flush cache
+ * instruction is present, flush the caches before entering C3 instead.
+ * Otherwise, just disable C3 completely.
+ */
+ if (AcpiGbl_FADT.Pm2ControlBlock == 0 ||
+ AcpiGbl_FADT.Pm2ControlLength == 0) {
+ if ((AcpiGbl_FADT.Flags & ACPI_FADT_WBINVD) &&
+ (AcpiGbl_FADT.Flags & ACPI_FADT_WBINVD_FLUSH) == 0) {
+ cpu_quirks |= CPU_QUIRK_NO_BM_CTRL;
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "acpi_cpu: no BM control, using flush cache method\n"));
+ } else {
+ cpu_quirks |= CPU_QUIRK_NO_C3;
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "acpi_cpu: no BM control, C3 not available\n"));
+ }
+ }
+
+ /*
+ * If we are using generic Cx mode, C3 on multiple CPUs requires using
+ * the expensive flush cache instruction.
+ */
+ if (cpu_cx_generic && mp_ncpus > 1) {
+ cpu_quirks |= CPU_QUIRK_NO_BM_CTRL;
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "acpi_cpu: SMP, using flush cache mode for C3\n"));
+ }
+
+ /* Look for various quirks of the PIIX4 part. */
+ acpi_dev = pci_find_device(PCI_VENDOR_INTEL, PCI_DEVICE_82371AB_3);
+ if (acpi_dev != NULL) {
+ switch (pci_get_revid(acpi_dev)) {
+ /*
+ * Disable C3 support for all PIIX4 chipsets. Some of these parts
+ * do not report the BMIDE status to the BM status register and
+ * others have a livelock bug if Type-F DMA is enabled. Linux
+ * works around the BMIDE bug by reading the BM status directly
+ * but we take the simpler approach of disabling C3 for these
+ * parts.
+ *
+ * See erratum #18 ("C3 Power State/BMIDE and Type-F DMA
+ * Livelock") from the January 2002 PIIX4 specification update.
+ * Applies to all PIIX4 models.
+ *
+ * Also, make sure that all interrupts cause a "Stop Break"
+ * event to exit from C2 state.
+ * Also, BRLD_EN_BM (ACPI_BITREG_BUS_MASTER_RLD in ACPI-speak)
+ * should be set to zero, otherwise it causes C2 to short-sleep.
+ * PIIX4 doesn't properly support C3 and bus master activity
+ * need not break out of C2.
+ */
+ case PCI_REVISION_A_STEP:
+ case PCI_REVISION_B_STEP:
+ case PCI_REVISION_4E:
+ case PCI_REVISION_4M:
+ cpu_quirks |= CPU_QUIRK_NO_C3;
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "acpi_cpu: working around PIIX4 bug, disabling C3\n"));
+
+ val = pci_read_config(acpi_dev, PIIX4_DEVACTB_REG, 4);
+ if ((val & PIIX4_STOP_BREAK_MASK) != PIIX4_STOP_BREAK_MASK) {
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "acpi_cpu: PIIX4: enabling IRQs to generate Stop Break\n"));
+ val |= PIIX4_STOP_BREAK_MASK;
+ pci_write_config(acpi_dev, PIIX4_DEVACTB_REG, val, 4);
+ }
+ AcpiReadBitRegister(ACPI_BITREG_BUS_MASTER_RLD, &val);
+ if (val) {
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "acpi_cpu: PIIX4: reset BRLD_EN_BM\n"));
+ AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_RLD, 0);
+ }
+ break;
+ default:
+ break;
+ }
+ }
+
+ return (0);
+}
+
+static int
+acpi_cpu_usage_sysctl(SYSCTL_HANDLER_ARGS)
+{
+ struct acpi_cpu_softc *sc;
+ struct sbuf sb;
+ char buf[128];
+ int i;
+ uintmax_t fract, sum, whole;
+
+ sc = (struct acpi_cpu_softc *) arg1;
+ sum = 0;
+ for (i = 0; i < sc->cpu_cx_count; i++)
+ sum += sc->cpu_cx_stats[i];
+ sbuf_new(&sb, buf, sizeof(buf), SBUF_FIXEDLEN);
+ for (i = 0; i < sc->cpu_cx_count; i++) {
+ if (sum > 0) {
+ whole = (uintmax_t)sc->cpu_cx_stats[i] * 100;
+ fract = (whole % sum) * 100;
+ sbuf_printf(&sb, "%u.%02u%% ", (u_int)(whole / sum),
+ (u_int)(fract / sum));
+ } else
+ sbuf_printf(&sb, "0.00%% ");
+ }
+ sbuf_printf(&sb, "last %dus", sc->cpu_prev_sleep);
+ sbuf_trim(&sb);
+ sbuf_finish(&sb);
+ sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req);
+ sbuf_delete(&sb);
+
+ return (0);
+}
+
+static int
+acpi_cpu_set_cx_lowest(struct acpi_cpu_softc *sc)
+{
+ int i;
+
+ ACPI_SERIAL_ASSERT(cpu);
+ sc->cpu_cx_lowest = min(sc->cpu_cx_lowest_lim, sc->cpu_cx_count - 1);
+
+ /* If not disabling, cache the new lowest non-C3 state. */
+ sc->cpu_non_c3 = 0;
+ for (i = sc->cpu_cx_lowest; i >= 0; i--) {
+ if (sc->cpu_cx_states[i].type < ACPI_STATE_C3) {
+ sc->cpu_non_c3 = i;
+ break;
+ }
+ }
+
+ /* Reset the statistics counters. */
+ bzero(sc->cpu_cx_stats, sizeof(sc->cpu_cx_stats));
+ return (0);
+}
+
+static int
+acpi_cpu_cx_lowest_sysctl(SYSCTL_HANDLER_ARGS)
+{
+ struct acpi_cpu_softc *sc;
+ char state[8];
+ int val, error;
+
+ sc = (struct acpi_cpu_softc *) arg1;
+ snprintf(state, sizeof(state), "C%d", sc->cpu_cx_lowest_lim + 1);
+ error = sysctl_handle_string(oidp, state, sizeof(state), req);
+ if (error != 0 || req->newptr == NULL)
+ return (error);
+ if (strlen(state) < 2 || toupper(state[0]) != 'C')
+ return (EINVAL);
+ if (strcasecmp(state, "Cmax") == 0)
+ val = MAX_CX_STATES;
+ else {
+ val = (int) strtol(state + 1, NULL, 10);
+ if (val < 1 || val > MAX_CX_STATES)
+ return (EINVAL);
+ }
+
+ ACPI_SERIAL_BEGIN(cpu);
+ sc->cpu_cx_lowest_lim = val - 1;
+ acpi_cpu_set_cx_lowest(sc);
+ ACPI_SERIAL_END(cpu);
+
+ return (0);
+}
+
+static int
+acpi_cpu_global_cx_lowest_sysctl(SYSCTL_HANDLER_ARGS)
+{
+ struct acpi_cpu_softc *sc;
+ char state[8];
+ int val, error, i;
+
+ snprintf(state, sizeof(state), "C%d", cpu_cx_lowest_lim + 1);
+ error = sysctl_handle_string(oidp, state, sizeof(state), req);
+ if (error != 0 || req->newptr == NULL)
+ return (error);
+ if (strlen(state) < 2 || toupper(state[0]) != 'C')
+ return (EINVAL);
+ if (strcasecmp(state, "Cmax") == 0)
+ val = MAX_CX_STATES;
+ else {
+ val = (int) strtol(state + 1, NULL, 10);
+ if (val < 1 || val > MAX_CX_STATES)
+ return (EINVAL);
+ }
+
+ /* Update the new lowest useable Cx state for all CPUs. */
+ ACPI_SERIAL_BEGIN(cpu);
+ cpu_cx_lowest_lim = val - 1;
+ for (i = 0; i < cpu_ndevices; i++) {
+ sc = device_get_softc(cpu_devices[i]);
+ sc->cpu_cx_lowest_lim = cpu_cx_lowest_lim;
+ acpi_cpu_set_cx_lowest(sc);
+ }
+ ACPI_SERIAL_END(cpu);
+
+ return (0);
+}
diff --git a/sys/dev/acpica/acpi_dock.c b/sys/dev/acpica/acpi_dock.c
new file mode 100644
index 0000000..b184053
--- /dev/null
+++ b/sys/dev/acpica/acpi_dock.c
@@ -0,0 +1,537 @@
+/*-
+ * Copyright (c) 2005-2006 Mitsuru IWASAKI <iwasaki@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#include "opt_acpi.h"
+#include <sys/param.h>
+#include <sys/bus.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+
+#include <contrib/dev/acpica/include/acpi.h>
+#include <contrib/dev/acpica/include/accommon.h>
+
+#include <dev/acpica/acpivar.h>
+#include <dev/acpica/acpiio.h>
+
+/* Hooks for the ACPI CA debugging infrastructure */
+#define _COMPONENT ACPI_DOCK
+ACPI_MODULE_NAME("DOCK")
+
+/* For Docking status */
+#define ACPI_DOCK_STATUS_UNKNOWN -1
+#define ACPI_DOCK_STATUS_UNDOCKED 0
+#define ACPI_DOCK_STATUS_DOCKED 1
+
+#define ACPI_DOCK_UNLOCK 0 /* Allow device to be ejected */
+#define ACPI_DOCK_LOCK 1 /* Prevent dev from being removed */
+
+#define ACPI_DOCK_ISOLATE 0 /* Isolate from dock connector */
+#define ACPI_DOCK_CONNECT 1 /* Connect to dock */
+
+struct acpi_dock_softc {
+ int _sta;
+ int _bdn;
+ int _uid;
+ int status;
+ struct sysctl_ctx_list *sysctl_ctx;
+ struct sysctl_oid *sysctl_tree;
+};
+
+ACPI_SERIAL_DECL(dock, "ACPI Docking Station");
+
+/*
+ * Utility functions
+ */
+
+static void
+acpi_dock_get_info(device_t dev)
+{
+ struct acpi_dock_softc *sc;
+ ACPI_HANDLE h;
+
+ sc = device_get_softc(dev);
+ h = acpi_get_handle(dev);
+
+ if (ACPI_FAILURE(acpi_GetInteger(h, "_STA", &sc->_sta)))
+ sc->_sta = ACPI_DOCK_STATUS_UNKNOWN;
+ if (ACPI_FAILURE(acpi_GetInteger(h, "_BDN", &sc->_bdn)))
+ sc->_bdn = ACPI_DOCK_STATUS_UNKNOWN;
+ if (ACPI_FAILURE(acpi_GetInteger(h, "_UID", &sc->_uid)))
+ sc->_uid = ACPI_DOCK_STATUS_UNKNOWN;
+ ACPI_VPRINT(dev, acpi_device_get_parent_softc(dev),
+ "_STA: %04x, _BDN: %04x, _UID: %04x\n", sc->_sta,
+ sc->_bdn, sc->_uid);
+}
+
+static int
+acpi_dock_execute_dck(device_t dev, int dock)
+{
+ ACPI_HANDLE h;
+ ACPI_OBJECT argobj;
+ ACPI_OBJECT_LIST args;
+ ACPI_BUFFER buf;
+ ACPI_OBJECT retobj;
+ ACPI_STATUS status;
+
+ h = acpi_get_handle(dev);
+
+ argobj.Type = ACPI_TYPE_INTEGER;
+ argobj.Integer.Value = dock;
+ args.Count = 1;
+ args.Pointer = &argobj;
+ buf.Pointer = &retobj;
+ buf.Length = sizeof(retobj);
+ status = AcpiEvaluateObject(h, "_DCK", &args, &buf);
+
+ /*
+ * When _DCK is called with 0, OSPM will ignore the return value.
+ */
+ if (dock == ACPI_DOCK_ISOLATE)
+ return (0);
+
+ /* If _DCK returned 1, the request succeeded. */
+ if (ACPI_SUCCESS(status) && retobj.Type == ACPI_TYPE_INTEGER &&
+ retobj.Integer.Value == 1)
+ return (0);
+
+ return (-1);
+}
+
+/* Lock devices while docked to prevent surprise removal. */
+static void
+acpi_dock_execute_lck(device_t dev, int lock)
+{
+ ACPI_HANDLE h;
+
+ h = acpi_get_handle(dev);
+ acpi_SetInteger(h, "_LCK", lock);
+}
+
+/* Eject a device (i.e., motorized). */
+static int
+acpi_dock_execute_ejx(device_t dev, int eject, int state)
+{
+ ACPI_HANDLE h;
+ ACPI_STATUS status;
+ char ejx[5];
+
+ h = acpi_get_handle(dev);
+ snprintf(ejx, sizeof(ejx), "_EJ%d", state);
+ status = acpi_SetInteger(h, ejx, eject);
+ if (ACPI_SUCCESS(status))
+ return (0);
+
+ return (-1);
+}
+
+/* Find dependent devices. When their parent is removed, so are they. */
+static int
+acpi_dock_is_ejd_device(ACPI_HANDLE dock_handle, ACPI_HANDLE handle)
+{
+ int ret;
+ ACPI_STATUS ret_status;
+ ACPI_BUFFER ejd_buffer;
+ ACPI_OBJECT *obj;
+
+ ret = 0;
+
+ ejd_buffer.Pointer = NULL;
+ ejd_buffer.Length = ACPI_ALLOCATE_BUFFER;
+ ret_status = AcpiEvaluateObject(handle, "_EJD", NULL, &ejd_buffer);
+ if (ACPI_FAILURE(ret_status))
+ goto out;
+
+ obj = (ACPI_OBJECT *)ejd_buffer.Pointer;
+ if (dock_handle == acpi_GetReference(NULL, obj))
+ ret = 1;
+
+out:
+ if (ejd_buffer.Pointer != NULL)
+ AcpiOsFree(ejd_buffer.Pointer);
+
+ return (ret);
+}
+
+/*
+ * Docking functions
+ */
+
+static void
+acpi_dock_attach_later(void *context)
+{
+ device_t dev;
+
+ dev = (device_t)context;
+
+ if (!device_is_enabled(dev))
+ device_enable(dev);
+
+ mtx_lock(&Giant);
+ device_probe_and_attach(dev);
+ mtx_unlock(&Giant);
+}
+
+static ACPI_STATUS
+acpi_dock_insert_child(ACPI_HANDLE handle, UINT32 level, void *context,
+ void **status)
+{
+ device_t dock_dev, dev;
+ ACPI_HANDLE dock_handle;
+
+ dock_dev = (device_t)context;
+ dock_handle = acpi_get_handle(dock_dev);
+
+ if (!acpi_dock_is_ejd_device(dock_handle, handle))
+ goto out;
+
+ ACPI_VPRINT(dock_dev, acpi_device_get_parent_softc(dock_dev),
+ "inserting device for %s\n", acpi_name(handle));
+
+#if 0
+ /*
+ * If the system boot up w/o Docking, the devices under the dock
+ * still un-initialized, also control methods such as _INI, _STA
+ * are not executed.
+ * Normal devices are initialized at booting by calling
+ * AcpiInitializeObjects(), however the devices under the dock
+ * need to be initialized here on the scheme of ACPICA.
+ */
+ ACPI_INIT_WALK_INFO Info;
+
+ AcpiNsWalkNamespace(ACPI_TYPE_ANY, handle,
+ 100, TRUE, AcpiNsInitOneDevice, NULL, &Info, NULL);
+#endif
+
+ dev = acpi_get_device(handle);
+ if (dev == NULL) {
+ device_printf(dock_dev, "error: %s has no associated device\n",
+ acpi_name(handle));
+ goto out;
+ }
+
+ AcpiOsExecute(OSL_NOTIFY_HANDLER, acpi_dock_attach_later, dev);
+
+out:
+ return (AE_OK);
+}
+
+static void
+acpi_dock_insert_children(device_t dev)
+{
+ ACPI_STATUS status;
+ ACPI_HANDLE sb_handle;
+
+ status = AcpiGetHandle(ACPI_ROOT_OBJECT, "\\_SB_", &sb_handle);
+ if (ACPI_SUCCESS(status)) {
+ AcpiWalkNamespace(ACPI_TYPE_DEVICE, sb_handle,
+ 100, acpi_dock_insert_child, NULL, dev, NULL);
+ }
+}
+
+static void
+acpi_dock_insert(device_t dev)
+{
+ struct acpi_dock_softc *sc;
+
+ ACPI_SERIAL_ASSERT(dock);
+
+ sc = device_get_softc(dev);
+
+ if (sc->status == ACPI_DOCK_STATUS_UNDOCKED ||
+ sc->status == ACPI_DOCK_STATUS_UNKNOWN) {
+ acpi_dock_execute_lck(dev, ACPI_DOCK_LOCK);
+ if (acpi_dock_execute_dck(dev, ACPI_DOCK_CONNECT) != 0) {
+ device_printf(dev, "_DCK failed\n");
+ return;
+ }
+
+ if (!cold)
+ acpi_dock_insert_children(dev);
+ sc->status = ACPI_DOCK_STATUS_DOCKED;
+ }
+}
+
+/*
+ * Undock
+ */
+
+static ACPI_STATUS
+acpi_dock_eject_child(ACPI_HANDLE handle, UINT32 level, void *context,
+ void **status)
+{
+ device_t dock_dev, dev;
+ ACPI_HANDLE dock_handle;
+
+ dock_dev = *(device_t *)context;
+ dock_handle = acpi_get_handle(dock_dev);
+
+ if (!acpi_dock_is_ejd_device(dock_handle, handle))
+ goto out;
+
+ ACPI_VPRINT(dock_dev, acpi_device_get_parent_softc(dock_dev),
+ "ejecting device for %s\n", acpi_name(handle));
+
+ dev = acpi_get_device(handle);
+ if (dev != NULL && device_is_attached(dev)) {
+ mtx_lock(&Giant);
+ device_detach(dev);
+ mtx_unlock(&Giant);
+ }
+
+ acpi_SetInteger(handle, "_EJ0", 0);
+out:
+ return (AE_OK);
+}
+
+static void
+acpi_dock_eject_children(device_t dev)
+{
+ ACPI_HANDLE sb_handle;
+ ACPI_STATUS status;
+
+ status = AcpiGetHandle(ACPI_ROOT_OBJECT, "\\_SB_", &sb_handle);
+ if (ACPI_SUCCESS(status)) {
+ AcpiWalkNamespace(ACPI_TYPE_DEVICE, sb_handle,
+ 100, acpi_dock_eject_child, NULL, &dev, NULL);
+ }
+}
+
+static void
+acpi_dock_removal(device_t dev)
+{
+ struct acpi_dock_softc *sc;
+
+ ACPI_SERIAL_ASSERT(dock);
+
+ sc = device_get_softc(dev);
+ if (sc->status == ACPI_DOCK_STATUS_DOCKED ||
+ sc->status == ACPI_DOCK_STATUS_UNKNOWN) {
+ acpi_dock_eject_children(dev);
+ if (acpi_dock_execute_dck(dev, ACPI_DOCK_ISOLATE) != 0)
+ return;
+
+ acpi_dock_execute_lck(dev, ACPI_DOCK_UNLOCK);
+
+ if (acpi_dock_execute_ejx(dev, 1, 0) != 0) {
+ device_printf(dev, "_EJ0 failed\n");
+ return;
+ }
+
+ sc->status = ACPI_DOCK_STATUS_UNDOCKED;
+ }
+
+ acpi_dock_get_info(dev);
+ if (sc->_sta != 0)
+ device_printf(dev, "mechanical failure (%#x).\n", sc->_sta);
+}
+
+/*
+ * Device/Bus check
+ */
+
+static void
+acpi_dock_device_check(device_t dev)
+{
+ struct acpi_dock_softc *sc;
+
+ ACPI_SERIAL_ASSERT(dock);
+
+ sc = device_get_softc(dev);
+ acpi_dock_get_info(dev);
+
+ /*
+ * If the _STA method indicates 'present' and 'functioning', the
+ * system is docked. If _STA does not exist for this device, it
+ * is always present.
+ */
+ if (sc->_sta == ACPI_DOCK_STATUS_UNKNOWN ||
+ ACPI_DEVICE_PRESENT(sc->_sta))
+ acpi_dock_insert(dev);
+ else if (sc->_sta == 0)
+ acpi_dock_removal(dev);
+}
+
+/*
+ * Notify Handler
+ */
+
+static void
+acpi_dock_notify_handler(ACPI_HANDLE h, UINT32 notify, void *context)
+{
+ device_t dev;
+
+ dev = (device_t) context;
+ ACPI_VPRINT(dev, acpi_device_get_parent_softc(dev),
+ "got notification %#x\n", notify);
+
+ ACPI_SERIAL_BEGIN(dock);
+ switch (notify) {
+ case ACPI_NOTIFY_BUS_CHECK:
+ case ACPI_NOTIFY_DEVICE_CHECK:
+ acpi_dock_device_check(dev);
+ break;
+ case ACPI_NOTIFY_EJECT_REQUEST:
+ acpi_dock_removal(dev);
+ break;
+ default:
+ device_printf(dev, "unknown notify %#x\n", notify);
+ break;
+ }
+ ACPI_SERIAL_END(dock);
+}
+
+static int
+acpi_dock_status_sysctl(SYSCTL_HANDLER_ARGS)
+{
+ struct acpi_dock_softc *sc;
+ device_t dev;
+ int status, err;
+
+ dev = (device_t)arg1;
+
+ sc = device_get_softc(dev);
+ status = sc->status;
+
+ ACPI_SERIAL_BEGIN(dock);
+ err = sysctl_handle_int(oidp, &status, 0, req);
+ if (err != 0 || req->newptr == NULL)
+ goto out;
+
+ if (status != ACPI_DOCK_STATUS_UNDOCKED &&
+ status != ACPI_DOCK_STATUS_DOCKED) {
+ err = EINVAL;
+ goto out;
+ }
+
+ if (status == sc->status)
+ goto out;
+
+ switch (status) {
+ case ACPI_DOCK_STATUS_UNDOCKED:
+ acpi_dock_removal(dev);
+ break;
+ case ACPI_DOCK_STATUS_DOCKED:
+ acpi_dock_device_check(dev);
+ break;
+ default:
+ err = EINVAL;
+ break;
+ }
+out:
+ ACPI_SERIAL_END(dock);
+ return (err);
+}
+
+static int
+acpi_dock_probe(device_t dev)
+{
+ ACPI_HANDLE h, tmp;
+
+ h = acpi_get_handle(dev);
+ if (acpi_disabled("dock") ||
+ ACPI_FAILURE(AcpiGetHandle(h, "_DCK", &tmp)))
+ return (ENXIO);
+
+ device_set_desc(dev, "ACPI Docking Station");
+
+ /*
+ * XXX Somewhere else in the kernel panics on "sysctl kern" if we
+ * return a negative value here (reprobe ok).
+ */
+ return (0);
+}
+
+static int
+acpi_dock_attach(device_t dev)
+{
+ struct acpi_dock_softc *sc;
+ ACPI_HANDLE h;
+
+ sc = device_get_softc(dev);
+ h = acpi_get_handle(dev);
+ if (sc == NULL || h == NULL)
+ return (ENXIO);
+
+ sc->status = ACPI_DOCK_STATUS_UNKNOWN;
+
+ AcpiEvaluateObject(h, "_INI", NULL, NULL);
+
+ ACPI_SERIAL_BEGIN(dock);
+
+ acpi_dock_device_check(dev);
+
+ /* Get the sysctl tree */
+ sc->sysctl_ctx = device_get_sysctl_ctx(dev);
+ sc->sysctl_tree = device_get_sysctl_tree(dev);
+
+ SYSCTL_ADD_INT(sc->sysctl_ctx,
+ SYSCTL_CHILDREN(sc->sysctl_tree),
+ OID_AUTO, "_sta", CTLFLAG_RD,
+ &sc->_sta, 0, "Dock _STA");
+ SYSCTL_ADD_INT(sc->sysctl_ctx,
+ SYSCTL_CHILDREN(sc->sysctl_tree),
+ OID_AUTO, "_bdn", CTLFLAG_RD,
+ &sc->_bdn, 0, "Dock _BDN");
+ SYSCTL_ADD_INT(sc->sysctl_ctx,
+ SYSCTL_CHILDREN(sc->sysctl_tree),
+ OID_AUTO, "_uid", CTLFLAG_RD,
+ &sc->_uid, 0, "Dock _UID");
+ SYSCTL_ADD_PROC(sc->sysctl_ctx,
+ SYSCTL_CHILDREN(sc->sysctl_tree),
+ OID_AUTO, "status",
+ CTLTYPE_INT|CTLFLAG_RW, dev, 0,
+ acpi_dock_status_sysctl, "I",
+ "Dock/Undock operation");
+
+ ACPI_SERIAL_END(dock);
+
+ AcpiInstallNotifyHandler(h, ACPI_ALL_NOTIFY,
+ acpi_dock_notify_handler, dev);
+
+ return (0);
+}
+
+static device_method_t acpi_dock_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, acpi_dock_probe),
+ DEVMETHOD(device_attach, acpi_dock_attach),
+
+ {0, 0}
+};
+
+static driver_t acpi_dock_driver = {
+ "acpi_dock",
+ acpi_dock_methods,
+ sizeof(struct acpi_dock_softc),
+};
+
+static devclass_t acpi_dock_devclass;
+
+DRIVER_MODULE(acpi_dock, acpi, acpi_dock_driver, acpi_dock_devclass, 0, 0);
+MODULE_DEPEND(acpi_dock, acpi, 1, 1, 1);
+
diff --git a/sys/dev/acpica/acpi_ec.c b/sys/dev/acpica/acpi_ec.c
new file mode 100644
index 0000000..00a3073
--- /dev/null
+++ b/sys/dev/acpica/acpi_ec.c
@@ -0,0 +1,1023 @@
+/*-
+ * Copyright (c) 2003-2007 Nate Lawson
+ * Copyright (c) 2000 Michael Smith
+ * Copyright (c) 2000 BSDi
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "opt_acpi.h"
+#include <sys/param.h>
+#include <sys/kernel.h>
+#include <sys/bus.h>
+#include <sys/lock.h>
+#include <sys/malloc.h>
+#include <sys/module.h>
+#include <sys/sx.h>
+
+#include <machine/bus.h>
+#include <machine/resource.h>
+#include <sys/rman.h>
+
+#include <contrib/dev/acpica/include/acpi.h>
+#include <contrib/dev/acpica/include/accommon.h>
+
+#include <dev/acpica/acpivar.h>
+
+/* Hooks for the ACPI CA debugging infrastructure */
+#define _COMPONENT ACPI_EC
+ACPI_MODULE_NAME("EC")
+
+/*
+ * EC_COMMAND:
+ * -----------
+ */
+typedef UINT8 EC_COMMAND;
+
+#define EC_COMMAND_UNKNOWN ((EC_COMMAND) 0x00)
+#define EC_COMMAND_READ ((EC_COMMAND) 0x80)
+#define EC_COMMAND_WRITE ((EC_COMMAND) 0x81)
+#define EC_COMMAND_BURST_ENABLE ((EC_COMMAND) 0x82)
+#define EC_COMMAND_BURST_DISABLE ((EC_COMMAND) 0x83)
+#define EC_COMMAND_QUERY ((EC_COMMAND) 0x84)
+
+/*
+ * EC_STATUS:
+ * ----------
+ * The encoding of the EC status register is illustrated below.
+ * Note that a set bit (1) indicates the property is TRUE
+ * (e.g. if bit 0 is set then the output buffer is full).
+ * +-+-+-+-+-+-+-+-+
+ * |7|6|5|4|3|2|1|0|
+ * +-+-+-+-+-+-+-+-+
+ * | | | | | | | |
+ * | | | | | | | +- Output Buffer Full?
+ * | | | | | | +--- Input Buffer Full?
+ * | | | | | +----- <reserved>
+ * | | | | +------- Data Register is Command Byte?
+ * | | | +--------- Burst Mode Enabled?
+ * | | +----------- SCI Event?
+ * | +------------- SMI Event?
+ * +--------------- <reserved>
+ *
+ */
+typedef UINT8 EC_STATUS;
+
+#define EC_FLAG_OUTPUT_BUFFER ((EC_STATUS) 0x01)
+#define EC_FLAG_INPUT_BUFFER ((EC_STATUS) 0x02)
+#define EC_FLAG_DATA_IS_CMD ((EC_STATUS) 0x08)
+#define EC_FLAG_BURST_MODE ((EC_STATUS) 0x10)
+
+/*
+ * EC_EVENT:
+ * ---------
+ */
+typedef UINT8 EC_EVENT;
+
+#define EC_EVENT_UNKNOWN ((EC_EVENT) 0x00)
+#define EC_EVENT_OUTPUT_BUFFER_FULL ((EC_EVENT) 0x01)
+#define EC_EVENT_INPUT_BUFFER_EMPTY ((EC_EVENT) 0x02)
+#define EC_EVENT_SCI ((EC_EVENT) 0x20)
+#define EC_EVENT_SMI ((EC_EVENT) 0x40)
+
+/* Data byte returned after burst enable indicating it was successful. */
+#define EC_BURST_ACK 0x90
+
+/*
+ * Register access primitives
+ */
+#define EC_GET_DATA(sc) \
+ bus_space_read_1((sc)->ec_data_tag, (sc)->ec_data_handle, 0)
+
+#define EC_SET_DATA(sc, v) \
+ bus_space_write_1((sc)->ec_data_tag, (sc)->ec_data_handle, 0, (v))
+
+#define EC_GET_CSR(sc) \
+ bus_space_read_1((sc)->ec_csr_tag, (sc)->ec_csr_handle, 0)
+
+#define EC_SET_CSR(sc, v) \
+ bus_space_write_1((sc)->ec_csr_tag, (sc)->ec_csr_handle, 0, (v))
+
+/* Additional params to pass from the probe routine */
+struct acpi_ec_params {
+ int glk;
+ int gpe_bit;
+ ACPI_HANDLE gpe_handle;
+ int uid;
+};
+
+/*
+ * Driver softc.
+ */
+struct acpi_ec_softc {
+ device_t ec_dev;
+ ACPI_HANDLE ec_handle;
+ int ec_uid;
+ ACPI_HANDLE ec_gpehandle;
+ UINT8 ec_gpebit;
+
+ int ec_data_rid;
+ struct resource *ec_data_res;
+ bus_space_tag_t ec_data_tag;
+ bus_space_handle_t ec_data_handle;
+
+ int ec_csr_rid;
+ struct resource *ec_csr_res;
+ bus_space_tag_t ec_csr_tag;
+ bus_space_handle_t ec_csr_handle;
+
+ int ec_glk;
+ int ec_glkhandle;
+ int ec_burstactive;
+ int ec_sci_pend;
+ volatile u_int ec_gencount;
+ int ec_suspending;
+};
+
+/*
+ * XXX njl
+ * I couldn't find it in the spec but other implementations also use a
+ * value of 1 ms for the time to acquire global lock.
+ */
+#define EC_LOCK_TIMEOUT 1000
+
+/* Default delay in microseconds between each run of the status polling loop. */
+#define EC_POLL_DELAY 50
+
+/* Total time in ms spent waiting for a response from EC. */
+#define EC_TIMEOUT 750
+
+#define EVENT_READY(event, status) \
+ (((event) == EC_EVENT_OUTPUT_BUFFER_FULL && \
+ ((status) & EC_FLAG_OUTPUT_BUFFER) != 0) || \
+ ((event) == EC_EVENT_INPUT_BUFFER_EMPTY && \
+ ((status) & EC_FLAG_INPUT_BUFFER) == 0))
+
+ACPI_SERIAL_DECL(ec, "ACPI embedded controller");
+
+static SYSCTL_NODE(_debug_acpi, OID_AUTO, ec, CTLFLAG_RD, NULL, "EC debugging");
+
+static int ec_burst_mode;
+TUNABLE_INT("debug.acpi.ec.burst", &ec_burst_mode);
+SYSCTL_INT(_debug_acpi_ec, OID_AUTO, burst, CTLFLAG_RW, &ec_burst_mode, 0,
+ "Enable use of burst mode (faster for nearly all systems)");
+static int ec_polled_mode;
+TUNABLE_INT("debug.acpi.ec.polled", &ec_polled_mode);
+SYSCTL_INT(_debug_acpi_ec, OID_AUTO, polled, CTLFLAG_RW, &ec_polled_mode, 0,
+ "Force use of polled mode (only if interrupt mode doesn't work)");
+static int ec_timeout = EC_TIMEOUT;
+TUNABLE_INT("debug.acpi.ec.timeout", &ec_timeout);
+SYSCTL_INT(_debug_acpi_ec, OID_AUTO, timeout, CTLFLAG_RW, &ec_timeout,
+ EC_TIMEOUT, "Total time spent waiting for a response (poll+sleep)");
+
+static ACPI_STATUS
+EcLock(struct acpi_ec_softc *sc)
+{
+ ACPI_STATUS status;
+
+ /* If _GLK is non-zero, acquire the global lock. */
+ status = AE_OK;
+ if (sc->ec_glk) {
+ status = AcpiAcquireGlobalLock(EC_LOCK_TIMEOUT, &sc->ec_glkhandle);
+ if (ACPI_FAILURE(status))
+ return (status);
+ }
+ ACPI_SERIAL_BEGIN(ec);
+ return (status);
+}
+
+static void
+EcUnlock(struct acpi_ec_softc *sc)
+{
+ ACPI_SERIAL_END(ec);
+ if (sc->ec_glk)
+ AcpiReleaseGlobalLock(sc->ec_glkhandle);
+}
+
+static UINT32 EcGpeHandler(ACPI_HANDLE, UINT32, void *);
+static ACPI_STATUS EcSpaceSetup(ACPI_HANDLE Region, UINT32 Function,
+ void *Context, void **return_Context);
+static ACPI_STATUS EcSpaceHandler(UINT32 Function,
+ ACPI_PHYSICAL_ADDRESS Address,
+ UINT32 Width, UINT64 *Value,
+ void *Context, void *RegionContext);
+static ACPI_STATUS EcWaitEvent(struct acpi_ec_softc *sc, EC_EVENT Event,
+ u_int gen_count);
+static ACPI_STATUS EcCommand(struct acpi_ec_softc *sc, EC_COMMAND cmd);
+static ACPI_STATUS EcRead(struct acpi_ec_softc *sc, UINT8 Address,
+ UINT8 *Data);
+static ACPI_STATUS EcWrite(struct acpi_ec_softc *sc, UINT8 Address,
+ UINT8 Data);
+static int acpi_ec_probe(device_t dev);
+static int acpi_ec_attach(device_t dev);
+static int acpi_ec_suspend(device_t dev);
+static int acpi_ec_resume(device_t dev);
+static int acpi_ec_shutdown(device_t dev);
+static int acpi_ec_read_method(device_t dev, u_int addr,
+ UINT64 *val, int width);
+static int acpi_ec_write_method(device_t dev, u_int addr,
+ UINT64 val, int width);
+
+static device_method_t acpi_ec_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, acpi_ec_probe),
+ DEVMETHOD(device_attach, acpi_ec_attach),
+ DEVMETHOD(device_suspend, acpi_ec_suspend),
+ DEVMETHOD(device_resume, acpi_ec_resume),
+ DEVMETHOD(device_shutdown, acpi_ec_shutdown),
+
+ /* Embedded controller interface */
+ DEVMETHOD(acpi_ec_read, acpi_ec_read_method),
+ DEVMETHOD(acpi_ec_write, acpi_ec_write_method),
+
+ {0, 0}
+};
+
+static driver_t acpi_ec_driver = {
+ "acpi_ec",
+ acpi_ec_methods,
+ sizeof(struct acpi_ec_softc),
+};
+
+static devclass_t acpi_ec_devclass;
+DRIVER_MODULE(acpi_ec, acpi, acpi_ec_driver, acpi_ec_devclass, 0, 0);
+MODULE_DEPEND(acpi_ec, acpi, 1, 1, 1);
+
+/*
+ * Look for an ECDT and if we find one, set up default GPE and
+ * space handlers to catch attempts to access EC space before
+ * we have a real driver instance in place.
+ *
+ * TODO: Some old Gateway laptops need us to fake up an ECDT or
+ * otherwise attach early so that _REG methods can run.
+ */
+void
+acpi_ec_ecdt_probe(device_t parent)
+{
+ ACPI_TABLE_ECDT *ecdt;
+ ACPI_STATUS status;
+ device_t child;
+ ACPI_HANDLE h;
+ struct acpi_ec_params *params;
+
+ ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
+
+ /* Find and validate the ECDT. */
+ status = AcpiGetTable(ACPI_SIG_ECDT, 1, (ACPI_TABLE_HEADER **)&ecdt);
+ if (ACPI_FAILURE(status) ||
+ ecdt->Control.BitWidth != 8 ||
+ ecdt->Data.BitWidth != 8) {
+ return;
+ }
+
+ /* Create the child device with the given unit number. */
+ child = BUS_ADD_CHILD(parent, 3, "acpi_ec", ecdt->Uid);
+ if (child == NULL) {
+ printf("%s: can't add child\n", __func__);
+ return;
+ }
+
+ /* Find and save the ACPI handle for this device. */
+ status = AcpiGetHandle(NULL, ecdt->Id, &h);
+ if (ACPI_FAILURE(status)) {
+ device_delete_child(parent, child);
+ printf("%s: can't get handle\n", __func__);
+ return;
+ }
+ acpi_set_handle(child, h);
+
+ /* Set the data and CSR register addresses. */
+ bus_set_resource(child, SYS_RES_IOPORT, 0, ecdt->Data.Address,
+ /*count*/1);
+ bus_set_resource(child, SYS_RES_IOPORT, 1, ecdt->Control.Address,
+ /*count*/1);
+
+ /*
+ * Store values for the probe/attach routines to use. Store the
+ * ECDT GPE bit and set the global lock flag according to _GLK.
+ * Note that it is not perfectly correct to be evaluating a method
+ * before initializing devices, but in practice this function
+ * should be safe to call at this point.
+ */
+ params = malloc(sizeof(struct acpi_ec_params), M_TEMP, M_WAITOK | M_ZERO);
+ params->gpe_handle = NULL;
+ params->gpe_bit = ecdt->Gpe;
+ params->uid = ecdt->Uid;
+ acpi_GetInteger(h, "_GLK", &params->glk);
+ acpi_set_private(child, params);
+
+ /* Finish the attach process. */
+ if (device_probe_and_attach(child) != 0)
+ device_delete_child(parent, child);
+}
+
+static int
+acpi_ec_probe(device_t dev)
+{
+ ACPI_BUFFER buf;
+ ACPI_HANDLE h;
+ ACPI_OBJECT *obj;
+ ACPI_STATUS status;
+ device_t peer;
+ char desc[64];
+ int ecdt;
+ int ret;
+ struct acpi_ec_params *params;
+ static char *ec_ids[] = { "PNP0C09", NULL };
+
+ /* Check that this is a device and that EC is not disabled. */
+ if (acpi_get_type(dev) != ACPI_TYPE_DEVICE || acpi_disabled("ec"))
+ return (ENXIO);
+
+ /*
+ * If probed via ECDT, set description and continue. Otherwise,
+ * we can access the namespace and make sure this is not a
+ * duplicate probe.
+ */
+ ret = ENXIO;
+ ecdt = 0;
+ buf.Pointer = NULL;
+ buf.Length = ACPI_ALLOCATE_BUFFER;
+ params = acpi_get_private(dev);
+ if (params != NULL) {
+ ecdt = 1;
+ ret = 0;
+ } else if (ACPI_ID_PROBE(device_get_parent(dev), dev, ec_ids)) {
+ params = malloc(sizeof(struct acpi_ec_params), M_TEMP,
+ M_WAITOK | M_ZERO);
+ h = acpi_get_handle(dev);
+
+ /*
+ * Read the unit ID to check for duplicate attach and the
+ * global lock value to see if we should acquire it when
+ * accessing the EC.
+ */
+ status = acpi_GetInteger(h, "_UID", &params->uid);
+ if (ACPI_FAILURE(status))
+ params->uid = 0;
+ status = acpi_GetInteger(h, "_GLK", &params->glk);
+ if (ACPI_FAILURE(status))
+ params->glk = 0;
+
+ /*
+ * Evaluate the _GPE method to find the GPE bit used by the EC to
+ * signal status (SCI). If it's a package, it contains a reference
+ * and GPE bit, similar to _PRW.
+ */
+ status = AcpiEvaluateObject(h, "_GPE", NULL, &buf);
+ if (ACPI_FAILURE(status)) {
+ device_printf(dev, "can't evaluate _GPE - %s\n",
+ AcpiFormatException(status));
+ goto out;
+ }
+ obj = (ACPI_OBJECT *)buf.Pointer;
+ if (obj == NULL)
+ goto out;
+
+ switch (obj->Type) {
+ case ACPI_TYPE_INTEGER:
+ params->gpe_handle = NULL;
+ params->gpe_bit = obj->Integer.Value;
+ break;
+ case ACPI_TYPE_PACKAGE:
+ if (!ACPI_PKG_VALID(obj, 2))
+ goto out;
+ params->gpe_handle =
+ acpi_GetReference(NULL, &obj->Package.Elements[0]);
+ if (params->gpe_handle == NULL ||
+ acpi_PkgInt32(obj, 1, &params->gpe_bit) != 0)
+ goto out;
+ break;
+ default:
+ device_printf(dev, "_GPE has invalid type %d\n", obj->Type);
+ goto out;
+ }
+
+ /* Store the values we got from the namespace for attach. */
+ acpi_set_private(dev, params);
+
+ /*
+ * Check for a duplicate probe. This can happen when a probe
+ * via ECDT succeeded already. If this is a duplicate, disable
+ * this device.
+ */
+ peer = devclass_get_device(acpi_ec_devclass, params->uid);
+ if (peer == NULL || !device_is_alive(peer))
+ ret = 0;
+ else
+ device_disable(dev);
+ }
+
+out:
+ if (ret == 0) {
+ snprintf(desc, sizeof(desc), "Embedded Controller: GPE %#x%s%s",
+ params->gpe_bit, (params->glk) ? ", GLK" : "",
+ ecdt ? ", ECDT" : "");
+ device_set_desc_copy(dev, desc);
+ }
+
+ if (ret > 0 && params)
+ free(params, M_TEMP);
+ if (buf.Pointer)
+ AcpiOsFree(buf.Pointer);
+ return (ret);
+}
+
+static int
+acpi_ec_attach(device_t dev)
+{
+ struct acpi_ec_softc *sc;
+ struct acpi_ec_params *params;
+ ACPI_STATUS Status;
+
+ ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
+
+ /* Fetch/initialize softc (assumes softc is pre-zeroed). */
+ sc = device_get_softc(dev);
+ params = acpi_get_private(dev);
+ sc->ec_dev = dev;
+ sc->ec_handle = acpi_get_handle(dev);
+
+ /* Retrieve previously probed values via device ivars. */
+ sc->ec_glk = params->glk;
+ sc->ec_gpebit = params->gpe_bit;
+ sc->ec_gpehandle = params->gpe_handle;
+ sc->ec_uid = params->uid;
+ sc->ec_suspending = FALSE;
+ acpi_set_private(dev, NULL);
+ free(params, M_TEMP);
+
+ /* Attach bus resources for data and command/status ports. */
+ sc->ec_data_rid = 0;
+ sc->ec_data_res = bus_alloc_resource_any(sc->ec_dev, SYS_RES_IOPORT,
+ &sc->ec_data_rid, RF_ACTIVE);
+ if (sc->ec_data_res == NULL) {
+ device_printf(dev, "can't allocate data port\n");
+ goto error;
+ }
+ sc->ec_data_tag = rman_get_bustag(sc->ec_data_res);
+ sc->ec_data_handle = rman_get_bushandle(sc->ec_data_res);
+
+ sc->ec_csr_rid = 1;
+ sc->ec_csr_res = bus_alloc_resource_any(sc->ec_dev, SYS_RES_IOPORT,
+ &sc->ec_csr_rid, RF_ACTIVE);
+ if (sc->ec_csr_res == NULL) {
+ device_printf(dev, "can't allocate command/status port\n");
+ goto error;
+ }
+ sc->ec_csr_tag = rman_get_bustag(sc->ec_csr_res);
+ sc->ec_csr_handle = rman_get_bushandle(sc->ec_csr_res);
+
+ /*
+ * Install a handler for this EC's GPE bit. We want edge-triggered
+ * behavior.
+ */
+ ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES, "attaching GPE handler\n"));
+ Status = AcpiInstallGpeHandler(sc->ec_gpehandle, sc->ec_gpebit,
+ ACPI_GPE_EDGE_TRIGGERED, EcGpeHandler, sc);
+ if (ACPI_FAILURE(Status)) {
+ device_printf(dev, "can't install GPE handler for %s - %s\n",
+ acpi_name(sc->ec_handle), AcpiFormatException(Status));
+ goto error;
+ }
+
+ /*
+ * Install address space handler
+ */
+ ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES, "attaching address space handler\n"));
+ Status = AcpiInstallAddressSpaceHandler(sc->ec_handle, ACPI_ADR_SPACE_EC,
+ &EcSpaceHandler, &EcSpaceSetup, sc);
+ if (ACPI_FAILURE(Status)) {
+ device_printf(dev, "can't install address space handler for %s - %s\n",
+ acpi_name(sc->ec_handle), AcpiFormatException(Status));
+ goto error;
+ }
+
+ /* Enable runtime GPEs for the handler. */
+ Status = AcpiEnableGpe(sc->ec_gpehandle, sc->ec_gpebit);
+ if (ACPI_FAILURE(Status)) {
+ device_printf(dev, "AcpiEnableGpe failed: %s\n",
+ AcpiFormatException(Status));
+ goto error;
+ }
+
+ ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES, "acpi_ec_attach complete\n"));
+ return (0);
+
+error:
+ AcpiRemoveGpeHandler(sc->ec_gpehandle, sc->ec_gpebit, EcGpeHandler);
+ AcpiRemoveAddressSpaceHandler(sc->ec_handle, ACPI_ADR_SPACE_EC,
+ EcSpaceHandler);
+ if (sc->ec_csr_res)
+ bus_release_resource(sc->ec_dev, SYS_RES_IOPORT, sc->ec_csr_rid,
+ sc->ec_csr_res);
+ if (sc->ec_data_res)
+ bus_release_resource(sc->ec_dev, SYS_RES_IOPORT, sc->ec_data_rid,
+ sc->ec_data_res);
+ return (ENXIO);
+}
+
+static int
+acpi_ec_suspend(device_t dev)
+{
+ struct acpi_ec_softc *sc;
+
+ sc = device_get_softc(dev);
+ sc->ec_suspending = TRUE;
+ return (0);
+}
+
+static int
+acpi_ec_resume(device_t dev)
+{
+ struct acpi_ec_softc *sc;
+
+ sc = device_get_softc(dev);
+ sc->ec_suspending = FALSE;
+ return (0);
+}
+
+static int
+acpi_ec_shutdown(device_t dev)
+{
+ struct acpi_ec_softc *sc;
+
+ /* Disable the GPE so we don't get EC events during shutdown. */
+ sc = device_get_softc(dev);
+ AcpiDisableGpe(sc->ec_gpehandle, sc->ec_gpebit);
+ return (0);
+}
+
+/* Methods to allow other devices (e.g., smbat) to read/write EC space. */
+static int
+acpi_ec_read_method(device_t dev, u_int addr, UINT64 *val, int width)
+{
+ struct acpi_ec_softc *sc;
+ ACPI_STATUS status;
+
+ sc = device_get_softc(dev);
+ status = EcSpaceHandler(ACPI_READ, addr, width * 8, val, sc, NULL);
+ if (ACPI_FAILURE(status))
+ return (ENXIO);
+ return (0);
+}
+
+static int
+acpi_ec_write_method(device_t dev, u_int addr, UINT64 val, int width)
+{
+ struct acpi_ec_softc *sc;
+ ACPI_STATUS status;
+
+ sc = device_get_softc(dev);
+ status = EcSpaceHandler(ACPI_WRITE, addr, width * 8, &val, sc, NULL);
+ if (ACPI_FAILURE(status))
+ return (ENXIO);
+ return (0);
+}
+
+static ACPI_STATUS
+EcCheckStatus(struct acpi_ec_softc *sc, const char *msg, EC_EVENT event)
+{
+ ACPI_STATUS status;
+ EC_STATUS ec_status;
+
+ status = AE_NO_HARDWARE_RESPONSE;
+ ec_status = EC_GET_CSR(sc);
+ if (sc->ec_burstactive && !(ec_status & EC_FLAG_BURST_MODE)) {
+ CTR1(KTR_ACPI, "ec burst disabled in waitevent (%s)", msg);
+ sc->ec_burstactive = FALSE;
+ }
+ if (EVENT_READY(event, ec_status)) {
+ CTR2(KTR_ACPI, "ec %s wait ready, status %#x", msg, ec_status);
+ status = AE_OK;
+ }
+ return (status);
+}
+
+static void
+EcGpeQueryHandler(void *Context)
+{
+ struct acpi_ec_softc *sc = (struct acpi_ec_softc *)Context;
+ UINT8 Data;
+ ACPI_STATUS Status;
+ int retry, sci_enqueued;
+ char qxx[5];
+
+ ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
+ KASSERT(Context != NULL, ("EcGpeQueryHandler called with NULL"));
+
+ /* Serialize user access with EcSpaceHandler(). */
+ Status = EcLock(sc);
+ if (ACPI_FAILURE(Status)) {
+ device_printf(sc->ec_dev, "GpeQuery lock error: %s\n",
+ AcpiFormatException(Status));
+ return;
+ }
+
+ /*
+ * Send a query command to the EC to find out which _Qxx call it
+ * wants to make. This command clears the SCI bit and also the
+ * interrupt source since we are edge-triggered. To prevent the GPE
+ * that may arise from running the query from causing another query
+ * to be queued, we clear the pending flag only after running it.
+ */
+ sci_enqueued = sc->ec_sci_pend;
+ for (retry = 0; retry < 2; retry++) {
+ Status = EcCommand(sc, EC_COMMAND_QUERY);
+ if (ACPI_SUCCESS(Status))
+ break;
+ if (ACPI_SUCCESS(EcCheckStatus(sc, "retr_check",
+ EC_EVENT_INPUT_BUFFER_EMPTY)))
+ continue;
+ else
+ break;
+ }
+ sc->ec_sci_pend = FALSE;
+ if (ACPI_FAILURE(Status)) {
+ EcUnlock(sc);
+ device_printf(sc->ec_dev, "GPE query failed: %s\n",
+ AcpiFormatException(Status));
+ return;
+ }
+ Data = EC_GET_DATA(sc);
+
+ /*
+ * We have to unlock before running the _Qxx method below since that
+ * method may attempt to read/write from EC address space, causing
+ * recursive acquisition of the lock.
+ */
+ EcUnlock(sc);
+
+ /* Ignore the value for "no outstanding event". (13.3.5) */
+ CTR2(KTR_ACPI, "ec query ok,%s running _Q%02X", Data ? "" : " not", Data);
+ if (Data == 0)
+ return;
+
+ /* Evaluate _Qxx to respond to the controller. */
+ snprintf(qxx, sizeof(qxx), "_Q%02X", Data);
+ AcpiUtStrupr(qxx);
+ Status = AcpiEvaluateObject(sc->ec_handle, qxx, NULL, NULL);
+ if (ACPI_FAILURE(Status) && Status != AE_NOT_FOUND) {
+ device_printf(sc->ec_dev, "evaluation of query method %s failed: %s\n",
+ qxx, AcpiFormatException(Status));
+ }
+
+ /* Reenable runtime GPE if its execution was deferred. */
+ if (sci_enqueued) {
+ Status = AcpiFinishGpe(sc->ec_gpehandle, sc->ec_gpebit);
+ if (ACPI_FAILURE(Status))
+ device_printf(sc->ec_dev, "reenabling runtime GPE failed: %s\n",
+ AcpiFormatException(Status));
+ }
+}
+
+/*
+ * The GPE handler is called when IBE/OBF or SCI events occur. We are
+ * called from an unknown lock context.
+ */
+static UINT32
+EcGpeHandler(ACPI_HANDLE GpeDevice, UINT32 GpeNumber, void *Context)
+{
+ struct acpi_ec_softc *sc = Context;
+ ACPI_STATUS Status;
+ EC_STATUS EcStatus;
+
+ KASSERT(Context != NULL, ("EcGpeHandler called with NULL"));
+ CTR0(KTR_ACPI, "ec gpe handler start");
+
+ /*
+ * Notify EcWaitEvent() that the status register is now fresh. If we
+ * didn't do this, it wouldn't be possible to distinguish an old IBE
+ * from a new one, for example when doing a write transaction (writing
+ * address and then data values.)
+ */
+ atomic_add_int(&sc->ec_gencount, 1);
+ wakeup(sc);
+
+ /*
+ * If the EC_SCI bit of the status register is set, queue a query handler.
+ * It will run the query and _Qxx method later, under the lock.
+ */
+ EcStatus = EC_GET_CSR(sc);
+ if ((EcStatus & EC_EVENT_SCI) && !sc->ec_sci_pend) {
+ CTR0(KTR_ACPI, "ec gpe queueing query handler");
+ Status = AcpiOsExecute(OSL_GPE_HANDLER, EcGpeQueryHandler, Context);
+ if (ACPI_SUCCESS(Status)) {
+ sc->ec_sci_pend = TRUE;
+ return (0);
+ } else
+ printf("EcGpeHandler: queuing GPE query handler failed\n");
+ }
+ return (ACPI_REENABLE_GPE);
+}
+
+static ACPI_STATUS
+EcSpaceSetup(ACPI_HANDLE Region, UINT32 Function, void *Context,
+ void **RegionContext)
+{
+
+ ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
+
+ /*
+ * If deactivating a region, always set the output to NULL. Otherwise,
+ * just pass the context through.
+ */
+ if (Function == ACPI_REGION_DEACTIVATE)
+ *RegionContext = NULL;
+ else
+ *RegionContext = Context;
+
+ return_ACPI_STATUS (AE_OK);
+}
+
+static ACPI_STATUS
+EcSpaceHandler(UINT32 Function, ACPI_PHYSICAL_ADDRESS Address, UINT32 Width,
+ UINT64 *Value, void *Context, void *RegionContext)
+{
+ struct acpi_ec_softc *sc = (struct acpi_ec_softc *)Context;
+ ACPI_PHYSICAL_ADDRESS EcAddr;
+ UINT8 *EcData;
+ ACPI_STATUS Status;
+
+ ACPI_FUNCTION_TRACE_U32((char *)(uintptr_t)__func__, (UINT32)Address);
+
+ if (Function != ACPI_READ && Function != ACPI_WRITE)
+ return_ACPI_STATUS (AE_BAD_PARAMETER);
+ if (Width % 8 != 0 || Value == NULL || Context == NULL)
+ return_ACPI_STATUS (AE_BAD_PARAMETER);
+ if (Address + Width / 8 > 256)
+ return_ACPI_STATUS (AE_BAD_ADDRESS);
+
+ /*
+ * If booting, check if we need to run the query handler. If so, we
+ * we call it directly here since our thread taskq is not active yet.
+ */
+ if (cold || rebooting || sc->ec_suspending) {
+ if ((EC_GET_CSR(sc) & EC_EVENT_SCI)) {
+ CTR0(KTR_ACPI, "ec running gpe handler directly");
+ EcGpeQueryHandler(sc);
+ }
+ }
+
+ /* Serialize with EcGpeQueryHandler() at transaction granularity. */
+ Status = EcLock(sc);
+ if (ACPI_FAILURE(Status))
+ return_ACPI_STATUS (Status);
+
+ /* If we can't start burst mode, continue anyway. */
+ Status = EcCommand(sc, EC_COMMAND_BURST_ENABLE);
+ if (ACPI_SUCCESS(Status)) {
+ if (EC_GET_DATA(sc) == EC_BURST_ACK) {
+ CTR0(KTR_ACPI, "ec burst enabled");
+ sc->ec_burstactive = TRUE;
+ }
+ }
+
+ /* Perform the transaction(s), based on Width. */
+ EcAddr = Address;
+ EcData = (UINT8 *)Value;
+ if (Function == ACPI_READ)
+ *Value = 0;
+ do {
+ switch (Function) {
+ case ACPI_READ:
+ Status = EcRead(sc, EcAddr, EcData);
+ break;
+ case ACPI_WRITE:
+ Status = EcWrite(sc, EcAddr, *EcData);
+ break;
+ }
+ if (ACPI_FAILURE(Status))
+ break;
+ EcAddr++;
+ EcData++;
+ } while (EcAddr < Address + Width / 8);
+
+ if (sc->ec_burstactive) {
+ sc->ec_burstactive = FALSE;
+ if (ACPI_SUCCESS(EcCommand(sc, EC_COMMAND_BURST_DISABLE)))
+ CTR0(KTR_ACPI, "ec disabled burst ok");
+ }
+
+ EcUnlock(sc);
+ return_ACPI_STATUS (Status);
+}
+
+static ACPI_STATUS
+EcWaitEvent(struct acpi_ec_softc *sc, EC_EVENT Event, u_int gen_count)
+{
+ static int no_intr = 0;
+ ACPI_STATUS Status;
+ int count, i, need_poll, slp_ival;
+
+ ACPI_SERIAL_ASSERT(ec);
+ Status = AE_NO_HARDWARE_RESPONSE;
+ need_poll = cold || rebooting || ec_polled_mode || sc->ec_suspending;
+
+ /* Wait for event by polling or GPE (interrupt). */
+ if (need_poll) {
+ count = (ec_timeout * 1000) / EC_POLL_DELAY;
+ if (count == 0)
+ count = 1;
+ DELAY(10);
+ for (i = 0; i < count; i++) {
+ Status = EcCheckStatus(sc, "poll", Event);
+ if (ACPI_SUCCESS(Status))
+ break;
+ DELAY(EC_POLL_DELAY);
+ }
+ } else {
+ slp_ival = hz / 1000;
+ if (slp_ival != 0) {
+ count = ec_timeout;
+ } else {
+ /* hz has less than 1 ms resolution so scale timeout. */
+ slp_ival = 1;
+ count = ec_timeout / (1000 / hz);
+ }
+
+ /*
+ * Wait for the GPE to signal the status changed, checking the
+ * status register each time we get one. It's possible to get a
+ * GPE for an event we're not interested in here (i.e., SCI for
+ * EC query).
+ */
+ for (i = 0; i < count; i++) {
+ if (gen_count == sc->ec_gencount)
+ tsleep(sc, 0, "ecgpe", slp_ival);
+ /*
+ * Record new generation count. It's possible the GPE was
+ * just to notify us that a query is needed and we need to
+ * wait for a second GPE to signal the completion of the
+ * event we are actually waiting for.
+ */
+ Status = EcCheckStatus(sc, "sleep", Event);
+ if (ACPI_SUCCESS(Status)) {
+ if (gen_count == sc->ec_gencount)
+ no_intr++;
+ else
+ no_intr = 0;
+ break;
+ }
+ gen_count = sc->ec_gencount;
+ }
+
+ /*
+ * We finished waiting for the GPE and it never arrived. Try to
+ * read the register once and trust whatever value we got. This is
+ * the best we can do at this point.
+ */
+ if (ACPI_FAILURE(Status))
+ Status = EcCheckStatus(sc, "sleep_end", Event);
+ }
+ if (!need_poll && no_intr > 10) {
+ device_printf(sc->ec_dev,
+ "not getting interrupts, switched to polled mode\n");
+ ec_polled_mode = 1;
+ }
+ if (ACPI_FAILURE(Status))
+ CTR0(KTR_ACPI, "error: ec wait timed out");
+ return (Status);
+}
+
+static ACPI_STATUS
+EcCommand(struct acpi_ec_softc *sc, EC_COMMAND cmd)
+{
+ ACPI_STATUS status;
+ EC_EVENT event;
+ EC_STATUS ec_status;
+ u_int gen_count;
+
+ ACPI_SERIAL_ASSERT(ec);
+
+ /* Don't use burst mode if user disabled it. */
+ if (!ec_burst_mode && cmd == EC_COMMAND_BURST_ENABLE)
+ return (AE_ERROR);
+
+ /* Decide what to wait for based on command type. */
+ switch (cmd) {
+ case EC_COMMAND_READ:
+ case EC_COMMAND_WRITE:
+ case EC_COMMAND_BURST_DISABLE:
+ event = EC_EVENT_INPUT_BUFFER_EMPTY;
+ break;
+ case EC_COMMAND_QUERY:
+ case EC_COMMAND_BURST_ENABLE:
+ event = EC_EVENT_OUTPUT_BUFFER_FULL;
+ break;
+ default:
+ device_printf(sc->ec_dev, "EcCommand: invalid command %#x\n", cmd);
+ return (AE_BAD_PARAMETER);
+ }
+
+ /*
+ * Ensure empty input buffer before issuing command.
+ * Use generation count of zero to force a quick check.
+ */
+ status = EcWaitEvent(sc, EC_EVENT_INPUT_BUFFER_EMPTY, 0);
+ if (ACPI_FAILURE(status))
+ return (status);
+
+ /* Run the command and wait for the chosen event. */
+ CTR1(KTR_ACPI, "ec running command %#x", cmd);
+ gen_count = sc->ec_gencount;
+ EC_SET_CSR(sc, cmd);
+ status = EcWaitEvent(sc, event, gen_count);
+ if (ACPI_SUCCESS(status)) {
+ /* If we succeeded, burst flag should now be present. */
+ if (cmd == EC_COMMAND_BURST_ENABLE) {
+ ec_status = EC_GET_CSR(sc);
+ if ((ec_status & EC_FLAG_BURST_MODE) == 0)
+ status = AE_ERROR;
+ }
+ } else
+ device_printf(sc->ec_dev, "EcCommand: no response to %#x\n", cmd);
+ return (status);
+}
+
+static ACPI_STATUS
+EcRead(struct acpi_ec_softc *sc, UINT8 Address, UINT8 *Data)
+{
+ ACPI_STATUS status;
+ u_int gen_count;
+ int retry;
+
+ ACPI_SERIAL_ASSERT(ec);
+ CTR1(KTR_ACPI, "ec read from %#x", Address);
+
+ for (retry = 0; retry < 2; retry++) {
+ status = EcCommand(sc, EC_COMMAND_READ);
+ if (ACPI_FAILURE(status))
+ return (status);
+
+ gen_count = sc->ec_gencount;
+ EC_SET_DATA(sc, Address);
+ status = EcWaitEvent(sc, EC_EVENT_OUTPUT_BUFFER_FULL, gen_count);
+ if (ACPI_FAILURE(status)) {
+ if (ACPI_SUCCESS(EcCheckStatus(sc, "retr_check",
+ EC_EVENT_INPUT_BUFFER_EMPTY)))
+ continue;
+ else
+ break;
+ }
+ *Data = EC_GET_DATA(sc);
+ return (AE_OK);
+ }
+ device_printf(sc->ec_dev, "EcRead: failed waiting to get data\n");
+ return (status);
+}
+
+static ACPI_STATUS
+EcWrite(struct acpi_ec_softc *sc, UINT8 Address, UINT8 Data)
+{
+ ACPI_STATUS status;
+ u_int gen_count;
+
+ ACPI_SERIAL_ASSERT(ec);
+ CTR2(KTR_ACPI, "ec write to %#x, data %#x", Address, Data);
+
+ status = EcCommand(sc, EC_COMMAND_WRITE);
+ if (ACPI_FAILURE(status))
+ return (status);
+
+ gen_count = sc->ec_gencount;
+ EC_SET_DATA(sc, Address);
+ status = EcWaitEvent(sc, EC_EVENT_INPUT_BUFFER_EMPTY, gen_count);
+ if (ACPI_FAILURE(status)) {
+ device_printf(sc->ec_dev, "EcWrite: failed waiting for sent address\n");
+ return (status);
+ }
+
+ gen_count = sc->ec_gencount;
+ EC_SET_DATA(sc, Data);
+ status = EcWaitEvent(sc, EC_EVENT_INPUT_BUFFER_EMPTY, gen_count);
+ if (ACPI_FAILURE(status)) {
+ device_printf(sc->ec_dev, "EcWrite: failed waiting for sent data\n");
+ return (status);
+ }
+
+ return (AE_OK);
+}
diff --git a/sys/dev/acpica/acpi_hpet.c b/sys/dev/acpica/acpi_hpet.c
new file mode 100644
index 0000000..5991c15
--- /dev/null
+++ b/sys/dev/acpica/acpi_hpet.c
@@ -0,0 +1,861 @@
+/*-
+ * Copyright (c) 2005 Poul-Henning Kamp
+ * Copyright (c) 2010 Alexander Motin <mav@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "opt_acpi.h"
+#if defined(__amd64__) || defined(__ia64__)
+#define DEV_APIC
+#else
+#include "opt_apic.h"
+#endif
+#include <sys/param.h>
+#include <sys/bus.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <sys/proc.h>
+#include <sys/rman.h>
+#include <sys/time.h>
+#include <sys/smp.h>
+#include <sys/sysctl.h>
+#include <sys/timeet.h>
+#include <sys/timetc.h>
+
+#include <contrib/dev/acpica/include/acpi.h>
+#include <contrib/dev/acpica/include/accommon.h>
+
+#include <dev/acpica/acpivar.h>
+#include <dev/acpica/acpi_hpet.h>
+
+#ifdef DEV_APIC
+#include "pcib_if.h"
+#endif
+
+#define HPET_VENDID_AMD 0x4353
+#define HPET_VENDID_INTEL 0x8086
+#define HPET_VENDID_NVIDIA 0x10de
+#define HPET_VENDID_SW 0x1166
+
+ACPI_SERIAL_DECL(hpet, "ACPI HPET support");
+
+static devclass_t hpet_devclass;
+
+/* ACPI CA debugging */
+#define _COMPONENT ACPI_TIMER
+ACPI_MODULE_NAME("HPET")
+
+struct hpet_softc {
+ device_t dev;
+ int mem_rid;
+ int intr_rid;
+ int irq;
+ int useirq;
+ int legacy_route;
+ int per_cpu;
+ uint32_t allowed_irqs;
+ struct resource *mem_res;
+ struct resource *intr_res;
+ void *intr_handle;
+ ACPI_HANDLE handle;
+ uint64_t freq;
+ uint32_t caps;
+ struct timecounter tc;
+ struct hpet_timer {
+ struct eventtimer et;
+ struct hpet_softc *sc;
+ int num;
+ int mode;
+ int intr_rid;
+ int irq;
+ int pcpu_cpu;
+ int pcpu_misrouted;
+ int pcpu_master;
+ int pcpu_slaves[MAXCPU];
+ struct resource *intr_res;
+ void *intr_handle;
+ uint32_t caps;
+ uint32_t vectors;
+ uint32_t div;
+ uint32_t next;
+ char name[8];
+ } t[32];
+ int num_timers;
+};
+
+static u_int hpet_get_timecount(struct timecounter *tc);
+static void hpet_test(struct hpet_softc *sc);
+
+static char *hpet_ids[] = { "PNP0103", NULL };
+
+static u_int
+hpet_get_timecount(struct timecounter *tc)
+{
+ struct hpet_softc *sc;
+
+ sc = tc->tc_priv;
+ return (bus_read_4(sc->mem_res, HPET_MAIN_COUNTER));
+}
+
+static void
+hpet_enable(struct hpet_softc *sc)
+{
+ uint32_t val;
+
+ val = bus_read_4(sc->mem_res, HPET_CONFIG);
+ if (sc->legacy_route)
+ val |= HPET_CNF_LEG_RT;
+ else
+ val &= ~HPET_CNF_LEG_RT;
+ val |= HPET_CNF_ENABLE;
+ bus_write_4(sc->mem_res, HPET_CONFIG, val);
+}
+
+static void
+hpet_disable(struct hpet_softc *sc)
+{
+ uint32_t val;
+
+ val = bus_read_4(sc->mem_res, HPET_CONFIG);
+ val &= ~HPET_CNF_ENABLE;
+ bus_write_4(sc->mem_res, HPET_CONFIG, val);
+}
+
+static int
+hpet_start(struct eventtimer *et,
+ struct bintime *first, struct bintime *period)
+{
+ struct hpet_timer *mt = (struct hpet_timer *)et->et_priv;
+ struct hpet_timer *t;
+ struct hpet_softc *sc = mt->sc;
+ uint32_t fdiv, now;
+
+ t = (mt->pcpu_master < 0) ? mt : &sc->t[mt->pcpu_slaves[curcpu]];
+ if (period != NULL) {
+ t->mode = 1;
+ t->div = (sc->freq * (period->frac >> 32)) >> 32;
+ if (period->sec != 0)
+ t->div += sc->freq * period->sec;
+ } else {
+ t->mode = 2;
+ t->div = 0;
+ }
+ if (first != NULL) {
+ fdiv = (sc->freq * (first->frac >> 32)) >> 32;
+ if (first->sec != 0)
+ fdiv += sc->freq * first->sec;
+ } else
+ fdiv = t->div;
+ if (t->irq < 0)
+ bus_write_4(sc->mem_res, HPET_ISR, 1 << t->num);
+ t->caps |= HPET_TCNF_INT_ENB;
+ now = bus_read_4(sc->mem_res, HPET_MAIN_COUNTER);
+restart:
+ t->next = now + fdiv;
+ if (t->mode == 1 && (t->caps & HPET_TCAP_PER_INT)) {
+ t->caps |= HPET_TCNF_TYPE;
+ bus_write_4(sc->mem_res, HPET_TIMER_CAP_CNF(t->num),
+ t->caps | HPET_TCNF_VAL_SET);
+ bus_write_4(sc->mem_res, HPET_TIMER_COMPARATOR(t->num),
+ t->next);
+ bus_write_4(sc->mem_res, HPET_TIMER_COMPARATOR(t->num),
+ t->div);
+ } else {
+ t->caps &= ~HPET_TCNF_TYPE;
+ bus_write_4(sc->mem_res, HPET_TIMER_CAP_CNF(t->num),
+ t->caps);
+ bus_write_4(sc->mem_res, HPET_TIMER_COMPARATOR(t->num),
+ t->next);
+ }
+ now = bus_read_4(sc->mem_res, HPET_MAIN_COUNTER);
+ if ((int32_t)(now - t->next + HPET_MIN_CYCLES) >= 0) {
+ fdiv *= 2;
+ goto restart;
+ }
+ return (0);
+}
+
+static int
+hpet_stop(struct eventtimer *et)
+{
+ struct hpet_timer *mt = (struct hpet_timer *)et->et_priv;
+ struct hpet_timer *t;
+ struct hpet_softc *sc = mt->sc;
+
+ t = (mt->pcpu_master < 0) ? mt : &sc->t[mt->pcpu_slaves[curcpu]];
+ t->mode = 0;
+ t->caps &= ~(HPET_TCNF_INT_ENB | HPET_TCNF_TYPE);
+ bus_write_4(sc->mem_res, HPET_TIMER_CAP_CNF(t->num), t->caps);
+ return (0);
+}
+
+static int
+hpet_intr_single(void *arg)
+{
+ struct hpet_timer *t = (struct hpet_timer *)arg;
+ struct hpet_timer *mt;
+ struct hpet_softc *sc = t->sc;
+ uint32_t now;
+
+ if (t->mode == 0)
+ return (FILTER_STRAY);
+ /* Check that per-CPU timer interrupt reached right CPU. */
+ if (t->pcpu_cpu >= 0 && t->pcpu_cpu != curcpu) {
+ if ((++t->pcpu_misrouted) % 32 == 0) {
+ printf("HPET interrupt routed to the wrong CPU"
+ " (timer %d CPU %d -> %d)!\n",
+ t->num, t->pcpu_cpu, curcpu);
+ }
+
+ /*
+ * Reload timer, hoping that next time may be more lucky
+ * (system will manage proper interrupt binding).
+ */
+ if ((t->mode == 1 && (t->caps & HPET_TCAP_PER_INT) == 0) ||
+ t->mode == 2) {
+ t->next = bus_read_4(sc->mem_res, HPET_MAIN_COUNTER) +
+ sc->freq / 8;
+ bus_write_4(sc->mem_res, HPET_TIMER_COMPARATOR(t->num),
+ t->next);
+ }
+ return (FILTER_HANDLED);
+ }
+ if (t->mode == 1 &&
+ (t->caps & HPET_TCAP_PER_INT) == 0) {
+ t->next += t->div;
+ now = bus_read_4(sc->mem_res, HPET_MAIN_COUNTER);
+ if ((int32_t)((now + t->div / 2) - t->next) > 0)
+ t->next = now + t->div / 2;
+ bus_write_4(sc->mem_res,
+ HPET_TIMER_COMPARATOR(t->num), t->next);
+ } else if (t->mode == 2)
+ t->mode = 0;
+ mt = (t->pcpu_master < 0) ? t : &sc->t[t->pcpu_master];
+ if (mt->et.et_active)
+ mt->et.et_event_cb(&mt->et, mt->et.et_arg);
+ return (FILTER_HANDLED);
+}
+
+static int
+hpet_intr(void *arg)
+{
+ struct hpet_softc *sc = (struct hpet_softc *)arg;
+ int i;
+ uint32_t val;
+
+ val = bus_read_4(sc->mem_res, HPET_ISR);
+ if (val) {
+ bus_write_4(sc->mem_res, HPET_ISR, val);
+ val &= sc->useirq;
+ for (i = 0; i < sc->num_timers; i++) {
+ if ((val & (1 << i)) == 0)
+ continue;
+ hpet_intr_single(&sc->t[i]);
+ }
+ return (FILTER_HANDLED);
+ }
+ return (FILTER_STRAY);
+}
+
+static ACPI_STATUS
+hpet_find(ACPI_HANDLE handle, UINT32 level, void *context,
+ void **status)
+{
+ char **ids;
+ uint32_t id = (uint32_t)(uintptr_t)context;
+ uint32_t uid = 0;
+
+ for (ids = hpet_ids; *ids != NULL; ids++) {
+ if (acpi_MatchHid(handle, *ids))
+ break;
+ }
+ if (*ids == NULL)
+ return (AE_OK);
+ if (ACPI_FAILURE(acpi_GetInteger(handle, "_UID", &uid)) ||
+ id == uid)
+ *((int *)status) = 1;
+ return (AE_OK);
+}
+
+/*
+ * Find an existing IRQ resource that matches the requested IRQ range
+ * and return its RID. If one is not found, use a new RID.
+ */
+static int
+hpet_find_irq_rid(device_t dev, u_long start, u_long end)
+{
+ u_long irq;
+ int error, rid;
+
+ for (rid = 0;; rid++) {
+ error = bus_get_resource(dev, SYS_RES_IRQ, rid, &irq, NULL);
+ if (error != 0 || (start <= irq && irq <= end))
+ return (rid);
+ }
+}
+
+/* Discover the HPET via the ACPI table of the same name. */
+static void
+hpet_identify(driver_t *driver, device_t parent)
+{
+ ACPI_TABLE_HPET *hpet;
+ ACPI_STATUS status;
+ device_t child;
+ int i, found;
+
+ /* Only one HPET device can be added. */
+ if (devclass_get_device(hpet_devclass, 0))
+ return;
+ for (i = 1; ; i++) {
+ /* Search for HPET table. */
+ status = AcpiGetTable(ACPI_SIG_HPET, i, (ACPI_TABLE_HEADER **)&hpet);
+ if (ACPI_FAILURE(status))
+ return;
+ /* Search for HPET device with same ID. */
+ found = 0;
+ AcpiWalkNamespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
+ 100, hpet_find, NULL, (void *)(uintptr_t)hpet->Sequence, (void *)&found);
+ /* If found - let it be probed in normal way. */
+ if (found)
+ continue;
+ /* If not - create it from table info. */
+ child = BUS_ADD_CHILD(parent, 2, "hpet", 0);
+ if (child == NULL) {
+ printf("%s: can't add child\n", __func__);
+ continue;
+ }
+ bus_set_resource(child, SYS_RES_MEMORY, 0, hpet->Address.Address,
+ HPET_MEM_WIDTH);
+ }
+}
+
+static int
+hpet_probe(device_t dev)
+{
+ ACPI_FUNCTION_TRACE((char *)(uintptr_t) __func__);
+
+ if (acpi_disabled("hpet"))
+ return (ENXIO);
+ if (acpi_get_handle(dev) != NULL &&
+ ACPI_ID_PROBE(device_get_parent(dev), dev, hpet_ids) == NULL)
+ return (ENXIO);
+
+ device_set_desc(dev, "High Precision Event Timer");
+ return (0);
+}
+
+static int
+hpet_attach(device_t dev)
+{
+ struct hpet_softc *sc;
+ struct hpet_timer *t;
+ int i, j, num_msi, num_timers, num_percpu_et, num_percpu_t, cur_cpu;
+ int pcpu_master;
+ static int maxhpetet = 0;
+ uint32_t val, val2, cvectors, dvectors;
+ uint16_t vendor, rev;
+
+ ACPI_FUNCTION_TRACE((char *)(uintptr_t) __func__);
+
+ sc = device_get_softc(dev);
+ sc->dev = dev;
+ sc->handle = acpi_get_handle(dev);
+
+ sc->mem_rid = 0;
+ sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->mem_rid,
+ RF_ACTIVE);
+ if (sc->mem_res == NULL)
+ return (ENOMEM);
+
+ /* Validate that we can access the whole region. */
+ if (rman_get_size(sc->mem_res) < HPET_MEM_WIDTH) {
+ device_printf(dev, "memory region width %ld too small\n",
+ rman_get_size(sc->mem_res));
+ bus_free_resource(dev, SYS_RES_MEMORY, sc->mem_res);
+ return (ENXIO);
+ }
+
+ /* Be sure timer is enabled. */
+ hpet_enable(sc);
+
+ /* Read basic statistics about the timer. */
+ val = bus_read_4(sc->mem_res, HPET_PERIOD);
+ if (val == 0) {
+ device_printf(dev, "invalid period\n");
+ hpet_disable(sc);
+ bus_free_resource(dev, SYS_RES_MEMORY, sc->mem_res);
+ return (ENXIO);
+ }
+
+ sc->freq = (1000000000000000LL + val / 2) / val;
+ sc->caps = bus_read_4(sc->mem_res, HPET_CAPABILITIES);
+ vendor = (sc->caps & HPET_CAP_VENDOR_ID) >> 16;
+ rev = sc->caps & HPET_CAP_REV_ID;
+ num_timers = 1 + ((sc->caps & HPET_CAP_NUM_TIM) >> 8);
+ /*
+ * ATI/AMD violates IA-PC HPET (High Precision Event Timers)
+ * Specification and provides an off by one number
+ * of timers/comparators.
+ * Additionally, they use unregistered value in VENDOR_ID field.
+ */
+ if (vendor == HPET_VENDID_AMD && rev < 0x10 && num_timers > 0)
+ num_timers--;
+ sc->num_timers = num_timers;
+ if (bootverbose) {
+ device_printf(dev,
+ "vendor 0x%x, rev 0x%x, %jdHz%s, %d timers,%s\n",
+ vendor, rev, sc->freq,
+ (sc->caps & HPET_CAP_COUNT_SIZE) ? " 64bit" : "",
+ num_timers,
+ (sc->caps & HPET_CAP_LEG_RT) ? " legacy route" : "");
+ }
+ for (i = 0; i < num_timers; i++) {
+ t = &sc->t[i];
+ t->sc = sc;
+ t->num = i;
+ t->mode = 0;
+ t->intr_rid = -1;
+ t->irq = -1;
+ t->pcpu_cpu = -1;
+ t->pcpu_misrouted = 0;
+ t->pcpu_master = -1;
+ t->caps = bus_read_4(sc->mem_res, HPET_TIMER_CAP_CNF(i));
+ t->vectors = bus_read_4(sc->mem_res, HPET_TIMER_CAP_CNF(i) + 4);
+ if (bootverbose) {
+ device_printf(dev,
+ " t%d: irqs 0x%08x (%d)%s%s%s\n", i,
+ t->vectors, (t->caps & HPET_TCNF_INT_ROUTE) >> 9,
+ (t->caps & HPET_TCAP_FSB_INT_DEL) ? ", MSI" : "",
+ (t->caps & HPET_TCAP_SIZE) ? ", 64bit" : "",
+ (t->caps & HPET_TCAP_PER_INT) ? ", periodic" : "");
+ }
+ }
+ if (testenv("debug.acpi.hpet_test"))
+ hpet_test(sc);
+ /*
+ * Don't attach if the timer never increments. Since the spec
+ * requires it to be at least 10 MHz, it has to change in 1 us.
+ */
+ val = bus_read_4(sc->mem_res, HPET_MAIN_COUNTER);
+ DELAY(1);
+ val2 = bus_read_4(sc->mem_res, HPET_MAIN_COUNTER);
+ if (val == val2) {
+ device_printf(dev, "HPET never increments, disabling\n");
+ hpet_disable(sc);
+ bus_free_resource(dev, SYS_RES_MEMORY, sc->mem_res);
+ return (ENXIO);
+ }
+ /* Announce first HPET as timecounter. */
+ if (device_get_unit(dev) == 0) {
+ sc->tc.tc_get_timecount = hpet_get_timecount,
+ sc->tc.tc_counter_mask = ~0u,
+ sc->tc.tc_name = "HPET",
+ sc->tc.tc_quality = 950,
+ sc->tc.tc_frequency = sc->freq;
+ sc->tc.tc_priv = sc;
+ tc_init(&sc->tc);
+ }
+ /* If not disabled - setup and announce event timers. */
+ if (resource_int_value(device_get_name(dev), device_get_unit(dev),
+ "clock", &i) == 0 && i == 0)
+ return (0);
+
+ /* Check whether we can and want legacy routing. */
+ sc->legacy_route = 0;
+ resource_int_value(device_get_name(dev), device_get_unit(dev),
+ "legacy_route", &sc->legacy_route);
+ if ((sc->caps & HPET_CAP_LEG_RT) == 0)
+ sc->legacy_route = 0;
+ if (sc->legacy_route) {
+ sc->t[0].vectors = 0;
+ sc->t[1].vectors = 0;
+ }
+
+ /* Check what IRQs we want use. */
+ /* By default allow any PCI IRQs. */
+ sc->allowed_irqs = 0xffff0000;
+ /*
+ * HPETs in AMD chipsets before SB800 have problems with IRQs >= 16
+ * Lower are also not always working for different reasons.
+ * SB800 fixed it, but seems do not implements level triggering
+ * properly, that makes it very unreliable - it freezes after any
+ * interrupt loss. Avoid legacy IRQs for AMD.
+ */
+ if (vendor == HPET_VENDID_AMD)
+ sc->allowed_irqs = 0x00000000;
+ /*
+ * NVidia MCP5x chipsets have number of unexplained interrupt
+ * problems. For some reason, using HPET interrupts breaks HDA sound.
+ */
+ if (vendor == HPET_VENDID_NVIDIA && rev <= 0x01)
+ sc->allowed_irqs = 0x00000000;
+ /*
+ * ServerWorks HT1000 reported to have problems with IRQs >= 16.
+ * Lower IRQs are working, but allowed mask is not set correctly.
+ * Legacy_route mode works fine.
+ */
+ if (vendor == HPET_VENDID_SW && rev <= 0x01)
+ sc->allowed_irqs = 0x00000000;
+ /*
+ * Neither QEMU nor VirtualBox report supported IRQs correctly.
+ * The only way to use HPET there is to specify IRQs manually
+ * and/or use legacy_route. Legacy_route mode works on both.
+ */
+ if (vm_guest)
+ sc->allowed_irqs = 0x00000000;
+ /* Let user override. */
+ resource_int_value(device_get_name(dev), device_get_unit(dev),
+ "allowed_irqs", &sc->allowed_irqs);
+
+ /* Get how much per-CPU timers we should try to provide. */
+ sc->per_cpu = 1;
+ resource_int_value(device_get_name(dev), device_get_unit(dev),
+ "per_cpu", &sc->per_cpu);
+
+ num_msi = 0;
+ sc->useirq = 0;
+ /* Find IRQ vectors for all timers. */
+ cvectors = sc->allowed_irqs & 0xffff0000;
+ dvectors = sc->allowed_irqs & 0x0000ffff;
+ if (sc->legacy_route)
+ dvectors &= 0x0000fefe;
+ for (i = 0; i < num_timers; i++) {
+ t = &sc->t[i];
+ if (sc->legacy_route && i < 2)
+ t->irq = (i == 0) ? 0 : 8;
+#ifdef DEV_APIC
+ else if (t->caps & HPET_TCAP_FSB_INT_DEL) {
+ if ((j = PCIB_ALLOC_MSIX(
+ device_get_parent(device_get_parent(dev)), dev,
+ &t->irq))) {
+ device_printf(dev,
+ "Can't allocate interrupt for t%d.\n", j);
+ }
+ }
+#endif
+ else if (dvectors & t->vectors) {
+ t->irq = ffs(dvectors & t->vectors) - 1;
+ dvectors &= ~(1 << t->irq);
+ }
+ if (t->irq >= 0) {
+ t->intr_rid = hpet_find_irq_rid(dev, t->irq, t->irq);
+ t->intr_res = bus_alloc_resource(dev, SYS_RES_IRQ,
+ &t->intr_rid, t->irq, t->irq, 1, RF_ACTIVE);
+ if (t->intr_res == NULL) {
+ t->irq = -1;
+ device_printf(dev,
+ "Can't map interrupt for t%d.\n", i);
+ } else if (bus_setup_intr(dev, t->intr_res,
+ INTR_TYPE_CLK, hpet_intr_single, NULL, t,
+ &t->intr_handle) != 0) {
+ t->irq = -1;
+ device_printf(dev,
+ "Can't setup interrupt for t%d.\n", i);
+ } else {
+ bus_describe_intr(dev, t->intr_res,
+ t->intr_handle, "t%d", i);
+ num_msi++;
+ }
+ }
+ if (t->irq < 0 && (cvectors & t->vectors) != 0) {
+ cvectors &= t->vectors;
+ sc->useirq |= (1 << i);
+ }
+ }
+ if (sc->legacy_route && sc->t[0].irq < 0 && sc->t[1].irq < 0)
+ sc->legacy_route = 0;
+ if (sc->legacy_route)
+ hpet_enable(sc);
+ /* Group timers for per-CPU operation. */
+ num_percpu_et = min(num_msi / mp_ncpus, sc->per_cpu);
+ num_percpu_t = num_percpu_et * mp_ncpus;
+ pcpu_master = 0;
+ cur_cpu = CPU_FIRST();
+ for (i = 0; i < num_timers; i++) {
+ t = &sc->t[i];
+ if (t->irq >= 0 && num_percpu_t > 0) {
+ if (cur_cpu == CPU_FIRST())
+ pcpu_master = i;
+ t->pcpu_cpu = cur_cpu;
+ t->pcpu_master = pcpu_master;
+ sc->t[pcpu_master].
+ pcpu_slaves[cur_cpu] = i;
+ bus_bind_intr(dev, t->intr_res, cur_cpu);
+ cur_cpu = CPU_NEXT(cur_cpu);
+ num_percpu_t--;
+ } else if (t->irq >= 0)
+ bus_bind_intr(dev, t->intr_res, CPU_FIRST());
+ }
+ bus_write_4(sc->mem_res, HPET_ISR, 0xffffffff);
+ sc->irq = -1;
+ /* If at least one timer needs legacy IRQ - set it up. */
+ if (sc->useirq) {
+ j = i = fls(cvectors) - 1;
+ while (j > 0 && (cvectors & (1 << (j - 1))) != 0)
+ j--;
+ sc->intr_rid = hpet_find_irq_rid(dev, j, i);
+ sc->intr_res = bus_alloc_resource(dev, SYS_RES_IRQ,
+ &sc->intr_rid, j, i, 1, RF_SHAREABLE | RF_ACTIVE);
+ if (sc->intr_res == NULL)
+ device_printf(dev, "Can't map interrupt.\n");
+ else if (bus_setup_intr(dev, sc->intr_res, INTR_TYPE_CLK,
+ hpet_intr, NULL, sc, &sc->intr_handle) != 0) {
+ device_printf(dev, "Can't setup interrupt.\n");
+ } else {
+ sc->irq = rman_get_start(sc->intr_res);
+ /* Bind IRQ to BSP to avoid live migration. */
+ bus_bind_intr(dev, sc->intr_res, CPU_FIRST());
+ }
+ }
+ /* Program and announce event timers. */
+ for (i = 0; i < num_timers; i++) {
+ t = &sc->t[i];
+ t->caps &= ~(HPET_TCNF_FSB_EN | HPET_TCNF_INT_ROUTE);
+ t->caps &= ~(HPET_TCNF_VAL_SET | HPET_TCNF_INT_ENB);
+ t->caps &= ~(HPET_TCNF_INT_TYPE);
+ t->caps |= HPET_TCNF_32MODE;
+ if (t->irq >= 0 && sc->legacy_route && i < 2) {
+ /* Legacy route doesn't need more configuration. */
+ } else
+#ifdef DEV_APIC
+ if ((t->caps & HPET_TCAP_FSB_INT_DEL) && t->irq >= 0) {
+ uint64_t addr;
+ uint32_t data;
+
+ if (PCIB_MAP_MSI(
+ device_get_parent(device_get_parent(dev)), dev,
+ t->irq, &addr, &data) == 0) {
+ bus_write_4(sc->mem_res,
+ HPET_TIMER_FSB_ADDR(i), addr);
+ bus_write_4(sc->mem_res,
+ HPET_TIMER_FSB_VAL(i), data);
+ t->caps |= HPET_TCNF_FSB_EN;
+ } else
+ t->irq = -2;
+ } else
+#endif
+ if (t->irq >= 0)
+ t->caps |= (t->irq << 9);
+ else if (sc->irq >= 0 && (t->vectors & (1 << sc->irq)))
+ t->caps |= (sc->irq << 9) | HPET_TCNF_INT_TYPE;
+ bus_write_4(sc->mem_res, HPET_TIMER_CAP_CNF(i), t->caps);
+ /* Skip event timers without set up IRQ. */
+ if (t->irq < 0 &&
+ (sc->irq < 0 || (t->vectors & (1 << sc->irq)) == 0))
+ continue;
+ /* Announce the reset. */
+ if (maxhpetet == 0)
+ t->et.et_name = "HPET";
+ else {
+ sprintf(t->name, "HPET%d", maxhpetet);
+ t->et.et_name = t->name;
+ }
+ t->et.et_flags = ET_FLAGS_PERIODIC | ET_FLAGS_ONESHOT;
+ t->et.et_quality = 450;
+ if (t->pcpu_master >= 0) {
+ t->et.et_flags |= ET_FLAGS_PERCPU;
+ t->et.et_quality += 100;
+ }
+ if ((t->caps & HPET_TCAP_PER_INT) == 0)
+ t->et.et_quality -= 10;
+ t->et.et_frequency = sc->freq;
+ t->et.et_min_period.sec = 0;
+ t->et.et_min_period.frac =
+ (((uint64_t)(HPET_MIN_CYCLES * 2) << 32) / sc->freq) << 32;
+ t->et.et_max_period.sec = 0xfffffffeLLU / sc->freq;
+ t->et.et_max_period.frac =
+ ((0xfffffffeLLU << 32) / sc->freq) << 32;
+ t->et.et_start = hpet_start;
+ t->et.et_stop = hpet_stop;
+ t->et.et_priv = &sc->t[i];
+ if (t->pcpu_master < 0 || t->pcpu_master == i) {
+ et_register(&t->et);
+ maxhpetet++;
+ }
+ }
+ return (0);
+}
+
+static int
+hpet_detach(device_t dev)
+{
+ ACPI_FUNCTION_TRACE((char *)(uintptr_t) __func__);
+
+ /* XXX Without a tc_remove() function, we can't detach. */
+ return (EBUSY);
+}
+
+static int
+hpet_suspend(device_t dev)
+{
+// struct hpet_softc *sc;
+
+ /*
+ * Disable the timer during suspend. The timer will not lose
+ * its state in S1 or S2, but we are required to disable
+ * it.
+ */
+// sc = device_get_softc(dev);
+// hpet_disable(sc);
+
+ return (0);
+}
+
+static int
+hpet_resume(device_t dev)
+{
+ struct hpet_softc *sc;
+ struct hpet_timer *t;
+ int i;
+
+ /* Re-enable the timer after a resume to keep the clock advancing. */
+ sc = device_get_softc(dev);
+ hpet_enable(sc);
+ /* Restart event timers that were running on suspend. */
+ for (i = 0; i < sc->num_timers; i++) {
+ t = &sc->t[i];
+#ifdef DEV_APIC
+ if (t->irq >= 0 && (sc->legacy_route == 0 || i >= 2)) {
+ uint64_t addr;
+ uint32_t data;
+
+ if (PCIB_MAP_MSI(
+ device_get_parent(device_get_parent(dev)), dev,
+ t->irq, &addr, &data) == 0) {
+ bus_write_4(sc->mem_res,
+ HPET_TIMER_FSB_ADDR(i), addr);
+ bus_write_4(sc->mem_res,
+ HPET_TIMER_FSB_VAL(i), data);
+ }
+ }
+#endif
+ if (t->mode == 0)
+ continue;
+ t->next = bus_read_4(sc->mem_res, HPET_MAIN_COUNTER);
+ if (t->mode == 1 && (t->caps & HPET_TCAP_PER_INT)) {
+ t->caps |= HPET_TCNF_TYPE;
+ t->next += t->div;
+ bus_write_4(sc->mem_res, HPET_TIMER_CAP_CNF(t->num),
+ t->caps | HPET_TCNF_VAL_SET);
+ bus_write_4(sc->mem_res, HPET_TIMER_COMPARATOR(t->num),
+ t->next);
+ bus_read_4(sc->mem_res, HPET_TIMER_COMPARATOR(t->num));
+ bus_write_4(sc->mem_res, HPET_TIMER_COMPARATOR(t->num),
+ t->div);
+ } else {
+ t->next += sc->freq / 1024;
+ bus_write_4(sc->mem_res, HPET_TIMER_COMPARATOR(t->num),
+ t->next);
+ }
+ bus_write_4(sc->mem_res, HPET_ISR, 1 << t->num);
+ bus_write_4(sc->mem_res, HPET_TIMER_CAP_CNF(t->num), t->caps);
+ }
+ return (0);
+}
+
+/* Print some basic latency/rate information to assist in debugging. */
+static void
+hpet_test(struct hpet_softc *sc)
+{
+ int i;
+ uint32_t u1, u2;
+ struct bintime b0, b1, b2;
+ struct timespec ts;
+
+ binuptime(&b0);
+ binuptime(&b0);
+ binuptime(&b1);
+ u1 = bus_read_4(sc->mem_res, HPET_MAIN_COUNTER);
+ for (i = 1; i < 1000; i++)
+ u2 = bus_read_4(sc->mem_res, HPET_MAIN_COUNTER);
+ binuptime(&b2);
+ u2 = bus_read_4(sc->mem_res, HPET_MAIN_COUNTER);
+
+ bintime_sub(&b2, &b1);
+ bintime_sub(&b1, &b0);
+ bintime_sub(&b2, &b1);
+ bintime2timespec(&b2, &ts);
+
+ device_printf(sc->dev, "%ld.%09ld: %u ... %u = %u\n",
+ (long)ts.tv_sec, ts.tv_nsec, u1, u2, u2 - u1);
+
+ device_printf(sc->dev, "time per call: %ld ns\n", ts.tv_nsec / 1000);
+}
+
+#ifdef DEV_APIC
+static int
+hpet_remap_intr(device_t dev, device_t child, u_int irq)
+{
+ struct hpet_softc *sc = device_get_softc(dev);
+ struct hpet_timer *t;
+ uint64_t addr;
+ uint32_t data;
+ int error, i;
+
+ for (i = 0; i < sc->num_timers; i++) {
+ t = &sc->t[i];
+ if (t->irq != irq)
+ continue;
+ error = PCIB_MAP_MSI(
+ device_get_parent(device_get_parent(dev)), dev,
+ irq, &addr, &data);
+ if (error)
+ return (error);
+ hpet_disable(sc); /* Stop timer to avoid interrupt loss. */
+ bus_write_4(sc->mem_res, HPET_TIMER_FSB_ADDR(i), addr);
+ bus_write_4(sc->mem_res, HPET_TIMER_FSB_VAL(i), data);
+ hpet_enable(sc);
+ return (0);
+ }
+ return (ENOENT);
+}
+#endif
+
+static device_method_t hpet_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_identify, hpet_identify),
+ DEVMETHOD(device_probe, hpet_probe),
+ DEVMETHOD(device_attach, hpet_attach),
+ DEVMETHOD(device_detach, hpet_detach),
+ DEVMETHOD(device_suspend, hpet_suspend),
+ DEVMETHOD(device_resume, hpet_resume),
+
+#ifdef DEV_APIC
+ DEVMETHOD(bus_remap_intr, hpet_remap_intr),
+#endif
+
+ {0, 0}
+};
+
+static driver_t hpet_driver = {
+ "hpet",
+ hpet_methods,
+ sizeof(struct hpet_softc),
+};
+
+DRIVER_MODULE(hpet, acpi, hpet_driver, hpet_devclass, 0, 0);
+MODULE_DEPEND(hpet, acpi, 1, 1, 1);
diff --git a/sys/dev/acpica/acpi_hpet.h b/sys/dev/acpica/acpi_hpet.h
new file mode 100644
index 0000000..fd495d5
--- /dev/null
+++ b/sys/dev/acpica/acpi_hpet.h
@@ -0,0 +1,67 @@
+/*-
+ * Copyright (c) 2005 Poul-Henning Kamp
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef __ACPI_HPET_H__
+#define __ACPI_HPET_H__
+
+#define HPET_MEM_WIDTH 0x400 /* Expected memory region size */
+
+/* General registers */
+#define HPET_CAPABILITIES 0x0 /* General capabilities and ID */
+#define HPET_CAP_VENDOR_ID 0xffff0000
+#define HPET_CAP_LEG_RT 0x00008000
+#define HPET_CAP_COUNT_SIZE 0x00002000 /* 1 = 64-bit, 0 = 32-bit */
+#define HPET_CAP_NUM_TIM 0x00001f00
+#define HPET_CAP_REV_ID 0x000000ff
+#define HPET_PERIOD 0x4 /* Period (1/hz) of timer */
+#define HPET_CONFIG 0x10 /* General configuration register */
+#define HPET_CNF_LEG_RT 0x00000002
+#define HPET_CNF_ENABLE 0x00000001
+#define HPET_ISR 0x20 /* General interrupt status register */
+#define HPET_MAIN_COUNTER 0xf0 /* Main counter register */
+
+/* Timer registers */
+#define HPET_TIMER_CAP_CNF(x) ((x) * 0x20 + 0x100)
+#define HPET_TCAP_INT_ROUTE 0xffffffff00000000
+#define HPET_TCAP_FSB_INT_DEL 0x00008000
+#define HPET_TCNF_FSB_EN 0x00004000
+#define HPET_TCNF_INT_ROUTE 0x00003e00
+#define HPET_TCNF_32MODE 0x00000100
+#define HPET_TCNF_VAL_SET 0x00000040
+#define HPET_TCAP_SIZE 0x00000020 /* 1 = 64-bit, 0 = 32-bit */
+#define HPET_TCAP_PER_INT 0x00000010 /* Supports periodic interrupts */
+#define HPET_TCNF_TYPE 0x00000008 /* 1 = periodic, 0 = one-shot */
+#define HPET_TCNF_INT_ENB 0x00000004
+#define HPET_TCNF_INT_TYPE 0x00000002 /* 1 = level triggered, 0 = edge */
+#define HPET_TIMER_COMPARATOR(x) ((x) * 0x20 + 0x108)
+#define HPET_TIMER_FSB_VAL(x) ((x) * 0x20 + 0x110)
+#define HPET_TIMER_FSB_ADDR(x) ((x) * 0x20 + 0x114)
+
+#define HPET_MIN_CYCLES 128 /* Period considered reliable. */
+
+#endif /* !__ACPI_HPET_H__ */
diff --git a/sys/dev/acpica/acpi_if.m b/sys/dev/acpica/acpi_if.m
new file mode 100644
index 0000000..f0a68e3
--- /dev/null
+++ b/sys/dev/acpica/acpi_if.m
@@ -0,0 +1,224 @@
+#-
+# Copyright (c) 2004 Nate Lawson
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+# SUCH DAMAGE.
+#
+# $FreeBSD$
+#
+
+#include <sys/bus.h>
+#include <sys/types.h>
+
+#include <contrib/dev/acpica/include/acpi.h>
+
+INTERFACE acpi;
+
+#
+# Callback function for each child handle traversed in acpi_scan_children().
+#
+# ACPI_HANDLE h: current child device being considered
+#
+# device_t *dev: pointer to the child's original device_t or NULL if there
+# was none. The callback should store a new device in *dev if it has
+# created one. The method implementation will automatically clean up the
+# previous device and properly associate the current ACPI_HANDLE with it.
+#
+# level: current level being scanned
+#
+# void *arg: argument passed in original call to acpi_scan_children()
+#
+# Returns: AE_OK if the scan should continue, otherwise an error
+#
+HEADER {
+ typedef ACPI_STATUS (*acpi_scan_cb_t)(ACPI_HANDLE h, device_t *dev,
+ int level, void *arg);
+
+ struct acpi_bif;
+ struct acpi_bst;
+};
+
+#
+# Default implementation for acpi_id_probe().
+#
+CODE {
+ static char *
+ acpi_generic_id_probe(device_t bus, device_t dev, char **ids)
+ {
+ return (NULL);
+ }
+};
+
+#
+# Check a device for a match in a list of ID strings. The strings can be
+# EISA PNP IDs or ACPI _HID/_CID values.
+#
+# device_t bus: parent bus for the device
+#
+# device_t dev: device being considered
+#
+# char **ids: array of ID strings to consider
+#
+# Returns: ID string matched or NULL if no match
+#
+METHOD char * id_probe {
+ device_t bus;
+ device_t dev;
+ char **ids;
+} DEFAULT acpi_generic_id_probe;
+
+#
+# Evaluate an ACPI method or object, given its path.
+#
+# device_t bus: parent bus for the device
+#
+# device_t dev: evaluate the object relative to this device's handle.
+# Specify NULL to begin the search at the ACPI root.
+#
+# ACPI_STRING pathname: absolute or relative path to this object
+#
+# ACPI_OBJECT_LIST *parameters: array of arguments to pass to the object.
+# Specify NULL if there are none.
+#
+# ACPI_BUFFER *ret: the result (if any) of the evaluation
+# Specify NULL if there is none.
+#
+# Returns: AE_OK or an error value
+#
+METHOD ACPI_STATUS evaluate_object {
+ device_t bus;
+ device_t dev;
+ ACPI_STRING pathname;
+ ACPI_OBJECT_LIST *parameters;
+ ACPI_BUFFER *ret;
+};
+
+#
+# Get the highest power state (D0-D3) that is usable for a device when
+# suspending/resuming. If a bus calls this when suspending a device, it
+# must also call it when resuming.
+#
+# device_t bus: parent bus for the device
+#
+# device_t dev: check this device's appropriate power state
+#
+# int *dstate: if successful, contains the highest valid sleep state
+#
+# Returns: 0 on success or some other error value.
+#
+METHOD int pwr_for_sleep {
+ device_t bus;
+ device_t dev;
+ int *dstate;
+};
+
+#
+# Rescan a subtree and optionally reattach devices to handles. Users
+# specify a callback that is called for each ACPI_HANDLE of type Device
+# that is a child of "dev".
+#
+# device_t bus: parent bus for the device
+#
+# device_t dev: begin the scan starting with this device's handle.
+# Specify NULL to begin the scan at the ACPI root.
+#
+# int max_depth: number of levels to traverse (i.e., 1 means just the
+# immediate children.
+#
+# acpi_scan_cb_t user_fn: called for each child handle
+#
+# void *arg: argument to pass to the callback function
+#
+# Returns: AE_OK or an error value, based on the callback return value
+#
+METHOD ACPI_STATUS scan_children {
+ device_t bus;
+ device_t dev;
+ int max_depth;
+ acpi_scan_cb_t user_fn;
+ void *arg;
+};
+
+#
+# Query a given driver for its supported feature(s). This should be
+# called by the parent bus before the driver is probed.
+#
+# driver_t *driver: child driver
+#
+# u_int *features: returned bitmask of all supported features
+#
+STATICMETHOD int get_features {
+ driver_t *driver;
+ u_int *features;
+};
+
+#
+# Read embedded controller (EC) address space
+#
+# device_t dev: EC device
+# u_int addr: Address to read from in EC space
+# UINT64 *val: Location to store read value
+# int width: Size of area to read in bytes
+#
+METHOD int ec_read {
+ device_t dev;
+ u_int addr;
+ UINT64 *val;
+ int width;
+};
+
+#
+# Write embedded controller (EC) address space
+#
+# device_t dev: EC device
+# u_int addr: Address to write to in EC space
+# UINT64 val: Value to write
+# int width: Size of value to write in bytes
+#
+METHOD int ec_write {
+ device_t dev;
+ u_int addr;
+ UINT64 val;
+ int width;
+};
+
+#
+# Get battery information (_BIF format)
+#
+# device_t dev: Battery device
+# struct acpi_bif *bif: Pointer to storage for _BIF results
+#
+METHOD int batt_get_info {
+ device_t dev;
+ struct acpi_bif *bif;
+};
+
+#
+# Get battery status (_BST format)
+#
+# device_t dev: Battery device
+# struct acpi_bst *bst: Pointer to storage for _BST results
+#
+METHOD int batt_get_status {
+ device_t dev;
+ struct acpi_bst *bst;
+};
diff --git a/sys/dev/acpica/acpi_isab.c b/sys/dev/acpica/acpi_isab.c
new file mode 100644
index 0000000..3febea7
--- /dev/null
+++ b/sys/dev/acpica/acpi_isab.c
@@ -0,0 +1,130 @@
+/*-
+ * Copyright (c) 2003 John Baldwin <jhb@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/*
+ * ISA Bridge driver for Generic ISA Bus Devices. See section 10.7 of the
+ * ACPI 2.0a specification for details on this device.
+ */
+
+#include "opt_acpi.h"
+#include <sys/param.h>
+#include <sys/bus.h>
+#include <sys/kernel.h>
+#include <sys/malloc.h>
+#include <sys/module.h>
+
+#include <contrib/dev/acpica/include/acpi.h>
+#include <contrib/dev/acpica/include/accommon.h>
+
+#include <dev/acpica/acpivar.h>
+#include <isa/isavar.h>
+
+/* Hooks for the ACPI CA debugging infrastructure. */
+#define _COMPONENT ACPI_BUS
+ACPI_MODULE_NAME("ISA_ACPI")
+
+struct acpi_isab_softc {
+ device_t ap_dev;
+ ACPI_HANDLE ap_handle;
+};
+
+static int acpi_isab_probe(device_t bus);
+static int acpi_isab_attach(device_t bus);
+static int acpi_isab_read_ivar(device_t dev, device_t child, int which,
+ uintptr_t *result);
+
+static device_method_t acpi_isab_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, acpi_isab_probe),
+ DEVMETHOD(device_attach, acpi_isab_attach),
+ DEVMETHOD(device_shutdown, bus_generic_shutdown),
+ DEVMETHOD(device_suspend, bus_generic_suspend),
+ DEVMETHOD(device_resume, bus_generic_resume),
+
+ /* Bus interface */
+ DEVMETHOD(bus_read_ivar, acpi_isab_read_ivar),
+ DEVMETHOD(bus_alloc_resource, bus_generic_alloc_resource),
+ DEVMETHOD(bus_release_resource, bus_generic_release_resource),
+ DEVMETHOD(bus_activate_resource, bus_generic_activate_resource),
+ DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource),
+ DEVMETHOD(bus_setup_intr, bus_generic_setup_intr),
+ DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr),
+
+ DEVMETHOD_END
+};
+
+static driver_t acpi_isab_driver = {
+ "isab",
+ acpi_isab_methods,
+ sizeof(struct acpi_isab_softc),
+};
+
+DRIVER_MODULE(acpi_isab, acpi, acpi_isab_driver, isab_devclass, 0, 0);
+MODULE_DEPEND(acpi_isab, acpi, 1, 1, 1);
+
+static int
+acpi_isab_probe(device_t dev)
+{
+ static char *isa_ids[] = { "PNP0A05", "PNP0A06", NULL };
+
+ if (acpi_disabled("isab") ||
+ ACPI_ID_PROBE(device_get_parent(dev), dev, isa_ids) == NULL ||
+ devclass_get_device(isab_devclass, 0) != dev)
+ return (ENXIO);
+
+ device_set_desc(dev, "ACPI Generic ISA bridge");
+ return (0);
+}
+
+static int
+acpi_isab_attach(device_t dev)
+{
+ struct acpi_isab_softc *sc;
+
+ ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
+
+ sc = device_get_softc(dev);
+ sc->ap_dev = dev;
+ sc->ap_handle = acpi_get_handle(dev);
+
+ return (isab_attach(dev));
+}
+
+static int
+acpi_isab_read_ivar(device_t dev, device_t child, int which, uintptr_t *result)
+{
+ struct acpi_isab_softc *sc = device_get_softc(dev);
+
+ switch (which) {
+ case ACPI_IVAR_HANDLE:
+ *result = (uintptr_t)sc->ap_handle;
+ return (0);
+ }
+ return (ENOENT);
+}
diff --git a/sys/dev/acpica/acpi_lid.c b/sys/dev/acpica/acpi_lid.c
new file mode 100644
index 0000000..d84e3c9
--- /dev/null
+++ b/sys/dev/acpica/acpi_lid.c
@@ -0,0 +1,200 @@
+/*-
+ * Copyright (c) 2000 Takanori Watanabe <takawata@jp.freebsd.org>
+ * Copyright (c) 2000 Mitsuru IWASAKI <iwasaki@jp.freebsd.org>
+ * Copyright (c) 2000 Michael Smith <msmith@freebd.org>
+ * Copyright (c) 2000 BSDi
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "opt_acpi.h"
+#include <sys/param.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <sys/bus.h>
+#include <sys/proc.h>
+
+#include <contrib/dev/acpica/include/acpi.h>
+#include <contrib/dev/acpica/include/accommon.h>
+
+#include <dev/acpica/acpivar.h>
+
+/* Hooks for the ACPI CA debugging infrastructure */
+#define _COMPONENT ACPI_BUTTON
+ACPI_MODULE_NAME("LID")
+
+struct acpi_lid_softc {
+ device_t lid_dev;
+ ACPI_HANDLE lid_handle;
+ int lid_status; /* open or closed */
+};
+
+ACPI_SERIAL_DECL(lid, "ACPI lid");
+
+static int acpi_lid_probe(device_t dev);
+static int acpi_lid_attach(device_t dev);
+static int acpi_lid_suspend(device_t dev);
+static int acpi_lid_resume(device_t dev);
+static void acpi_lid_notify_status_changed(void *arg);
+static void acpi_lid_notify_handler(ACPI_HANDLE h, UINT32 notify,
+ void *context);
+
+static device_method_t acpi_lid_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, acpi_lid_probe),
+ DEVMETHOD(device_attach, acpi_lid_attach),
+ DEVMETHOD(device_suspend, acpi_lid_suspend),
+ DEVMETHOD(device_resume, acpi_lid_resume),
+
+ {0, 0}
+};
+
+static driver_t acpi_lid_driver = {
+ "acpi_lid",
+ acpi_lid_methods,
+ sizeof(struct acpi_lid_softc),
+};
+
+static devclass_t acpi_lid_devclass;
+DRIVER_MODULE(acpi_lid, acpi, acpi_lid_driver, acpi_lid_devclass, 0, 0);
+MODULE_DEPEND(acpi_lid, acpi, 1, 1, 1);
+
+static int
+acpi_lid_probe(device_t dev)
+{
+ static char *lid_ids[] = { "PNP0C0D", NULL };
+
+ if (acpi_disabled("lid") ||
+ ACPI_ID_PROBE(device_get_parent(dev), dev, lid_ids) == NULL)
+ return (ENXIO);
+
+ device_set_desc(dev, "Control Method Lid Switch");
+ return (0);
+}
+
+static int
+acpi_lid_attach(device_t dev)
+{
+ struct acpi_prw_data prw;
+ struct acpi_lid_softc *sc;
+
+ ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
+
+ sc = device_get_softc(dev);
+ sc->lid_dev = dev;
+ sc->lid_handle = acpi_get_handle(dev);
+
+ /*
+ * If a system does not get lid events, it may make sense to change
+ * the type to ACPI_ALL_NOTIFY. Some systems generate both a wake and
+ * runtime notify in that case though.
+ */
+ AcpiInstallNotifyHandler(sc->lid_handle, ACPI_DEVICE_NOTIFY,
+ acpi_lid_notify_handler, sc);
+
+ /* Enable the GPE for wake/runtime. */
+ acpi_wake_set_enable(dev, 1);
+ if (acpi_parse_prw(sc->lid_handle, &prw) == 0)
+ AcpiEnableGpe(prw.gpe_handle, prw.gpe_bit);
+
+ return (0);
+}
+
+static int
+acpi_lid_suspend(device_t dev)
+{
+ return (0);
+}
+
+static int
+acpi_lid_resume(device_t dev)
+{
+ return (0);
+}
+
+static void
+acpi_lid_notify_status_changed(void *arg)
+{
+ struct acpi_lid_softc *sc;
+ struct acpi_softc *acpi_sc;
+ ACPI_STATUS status;
+
+ ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
+
+ sc = (struct acpi_lid_softc *)arg;
+ ACPI_SERIAL_BEGIN(lid);
+
+ /*
+ * Evaluate _LID and check the return value, update lid status.
+ * Zero: The lid is closed
+ * Non-zero: The lid is open
+ */
+ status = acpi_GetInteger(sc->lid_handle, "_LID", &sc->lid_status);
+ if (ACPI_FAILURE(status))
+ goto out;
+
+ acpi_sc = acpi_device_get_parent_softc(sc->lid_dev);
+ if (acpi_sc == NULL)
+ goto out;
+
+ ACPI_VPRINT(sc->lid_dev, acpi_sc, "Lid %s\n",
+ sc->lid_status ? "opened" : "closed");
+
+ acpi_UserNotify("Lid", sc->lid_handle, sc->lid_status);
+
+ if (sc->lid_status == 0)
+ EVENTHANDLER_INVOKE(acpi_sleep_event, acpi_sc->acpi_lid_switch_sx);
+ else
+ EVENTHANDLER_INVOKE(acpi_wakeup_event, acpi_sc->acpi_lid_switch_sx);
+
+out:
+ ACPI_SERIAL_END(lid);
+ return_VOID;
+}
+
+/* XXX maybe not here */
+#define ACPI_NOTIFY_STATUS_CHANGED 0x80
+
+static void
+acpi_lid_notify_handler(ACPI_HANDLE h, UINT32 notify, void *context)
+{
+ struct acpi_lid_softc *sc;
+
+ ACPI_FUNCTION_TRACE_U32((char *)(uintptr_t)__func__, notify);
+
+ sc = (struct acpi_lid_softc *)context;
+ switch (notify) {
+ case ACPI_NOTIFY_STATUS_CHANGED:
+ AcpiOsExecute(OSL_NOTIFY_HANDLER,
+ acpi_lid_notify_status_changed, sc);
+ break;
+ default:
+ device_printf(sc->lid_dev, "unknown notify %#x\n", notify);
+ break;
+ }
+
+ return_VOID;
+}
diff --git a/sys/dev/acpica/acpi_package.c b/sys/dev/acpica/acpi_package.c
new file mode 100644
index 0000000..e38fea5
--- /dev/null
+++ b/sys/dev/acpica/acpi_package.c
@@ -0,0 +1,152 @@
+/*-
+ * Copyright (c) 2003 Nate Lawson
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/kernel.h>
+#include <sys/bus.h>
+#include <sys/sbuf.h>
+
+#include <machine/bus.h>
+#include <machine/resource.h>
+#include <sys/rman.h>
+
+#include <contrib/dev/acpica/include/acpi.h>
+
+#include <dev/acpica/acpivar.h>
+
+/*
+ * Package manipulation convenience functions
+ */
+
+int
+acpi_PkgInt(ACPI_OBJECT *res, int idx, UINT64 *dst)
+{
+ ACPI_OBJECT *obj;
+
+ obj = &res->Package.Elements[idx];
+ if (obj == NULL || obj->Type != ACPI_TYPE_INTEGER)
+ return (EINVAL);
+ *dst = obj->Integer.Value;
+
+ return (0);
+}
+
+int
+acpi_PkgInt32(ACPI_OBJECT *res, int idx, uint32_t *dst)
+{
+ UINT64 tmp;
+ int error;
+
+ error = acpi_PkgInt(res, idx, &tmp);
+ if (error == 0)
+ *dst = (uint32_t)tmp;
+
+ return (error);
+}
+
+int
+acpi_PkgStr(ACPI_OBJECT *res, int idx, void *dst, size_t size)
+{
+ ACPI_OBJECT *obj;
+ void *ptr;
+ size_t length;
+
+ obj = &res->Package.Elements[idx];
+ if (obj == NULL)
+ return (EINVAL);
+ bzero(dst, sizeof(dst));
+
+ switch (obj->Type) {
+ case ACPI_TYPE_STRING:
+ ptr = obj->String.Pointer;
+ length = obj->String.Length;
+ break;
+ case ACPI_TYPE_BUFFER:
+ ptr = obj->Buffer.Pointer;
+ length = obj->Buffer.Length;
+ break;
+ default:
+ return (EINVAL);
+ }
+
+ /* Make sure string will fit, including terminating NUL */
+ if (++length > size)
+ return (E2BIG);
+
+ strlcpy(dst, ptr, length);
+ return (0);
+}
+
+int
+acpi_PkgGas(device_t dev, ACPI_OBJECT *res, int idx, int *type, int *rid,
+ struct resource **dst, u_int flags)
+{
+ ACPI_GENERIC_ADDRESS gas;
+ ACPI_OBJECT *obj;
+
+ obj = &res->Package.Elements[idx];
+ if (obj == NULL || obj->Type != ACPI_TYPE_BUFFER ||
+ obj->Buffer.Length < sizeof(ACPI_GENERIC_ADDRESS) + 3)
+ return (EINVAL);
+
+ memcpy(&gas, obj->Buffer.Pointer + 3, sizeof(gas));
+
+ return (acpi_bus_alloc_gas(dev, type, rid, &gas, dst, flags));
+}
+
+ACPI_HANDLE
+acpi_GetReference(ACPI_HANDLE scope, ACPI_OBJECT *obj)
+{
+ ACPI_HANDLE h;
+
+ if (obj == NULL)
+ return (NULL);
+
+ switch (obj->Type) {
+ case ACPI_TYPE_LOCAL_REFERENCE:
+ case ACPI_TYPE_ANY:
+ h = obj->Reference.Handle;
+ break;
+ case ACPI_TYPE_STRING:
+ /*
+ * The String object usually contains a fully-qualified path, so
+ * scope can be NULL.
+ *
+ * XXX This may not always be the case.
+ */
+ if (ACPI_FAILURE(AcpiGetHandle(scope, obj->String.Pointer, &h)))
+ h = NULL;
+ break;
+ default:
+ h = NULL;
+ break;
+ }
+
+ return (h);
+}
diff --git a/sys/dev/acpica/acpi_pci.c b/sys/dev/acpica/acpi_pci.c
new file mode 100644
index 0000000..185731a
--- /dev/null
+++ b/sys/dev/acpica/acpi_pci.c
@@ -0,0 +1,310 @@
+/*-
+ * Copyright (c) 1997, Stefan Esser <se@freebsd.org>
+ * Copyright (c) 2000, Michael Smith <msmith@freebsd.org>
+ * Copyright (c) 2000, BSDi
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/kernel.h>
+#include <sys/malloc.h>
+#include <sys/module.h>
+
+#include <contrib/dev/acpica/include/acpi.h>
+#include <contrib/dev/acpica/include/accommon.h>
+
+#include <dev/acpica/acpivar.h>
+
+#include <sys/pciio.h>
+#include <dev/pci/pcireg.h>
+#include <dev/pci/pcivar.h>
+#include <dev/pci/pci_private.h>
+
+#include "pcib_if.h"
+#include "pci_if.h"
+
+/* Hooks for the ACPI CA debugging infrastructure. */
+#define _COMPONENT ACPI_BUS
+ACPI_MODULE_NAME("PCI")
+
+struct acpi_pci_devinfo {
+ struct pci_devinfo ap_dinfo;
+ ACPI_HANDLE ap_handle;
+ int ap_flags;
+};
+
+ACPI_SERIAL_DECL(pci_powerstate, "ACPI PCI power methods");
+
+/* Be sure that ACPI and PCI power states are equivalent. */
+CTASSERT(ACPI_STATE_D0 == PCI_POWERSTATE_D0);
+CTASSERT(ACPI_STATE_D1 == PCI_POWERSTATE_D1);
+CTASSERT(ACPI_STATE_D2 == PCI_POWERSTATE_D2);
+CTASSERT(ACPI_STATE_D3 == PCI_POWERSTATE_D3);
+
+static int acpi_pci_attach(device_t dev);
+static int acpi_pci_child_location_str_method(device_t cbdev,
+ device_t child, char *buf, size_t buflen);
+static int acpi_pci_probe(device_t dev);
+static int acpi_pci_read_ivar(device_t dev, device_t child, int which,
+ uintptr_t *result);
+static int acpi_pci_write_ivar(device_t dev, device_t child, int which,
+ uintptr_t value);
+static ACPI_STATUS acpi_pci_save_handle(ACPI_HANDLE handle, UINT32 level,
+ void *context, void **status);
+static int acpi_pci_set_powerstate_method(device_t dev, device_t child,
+ int state);
+static void acpi_pci_update_device(ACPI_HANDLE handle, device_t pci_child);
+
+static device_method_t acpi_pci_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, acpi_pci_probe),
+ DEVMETHOD(device_attach, acpi_pci_attach),
+
+ /* Bus interface */
+ DEVMETHOD(bus_read_ivar, acpi_pci_read_ivar),
+ DEVMETHOD(bus_write_ivar, acpi_pci_write_ivar),
+ DEVMETHOD(bus_child_location_str, acpi_pci_child_location_str_method),
+
+ /* PCI interface */
+ DEVMETHOD(pci_set_powerstate, acpi_pci_set_powerstate_method),
+
+ { 0, 0 }
+};
+
+static devclass_t pci_devclass;
+
+DEFINE_CLASS_1(pci, acpi_pci_driver, acpi_pci_methods, sizeof(struct pci_softc),
+ pci_driver);
+DRIVER_MODULE(acpi_pci, pcib, acpi_pci_driver, pci_devclass, 0, 0);
+MODULE_DEPEND(acpi_pci, acpi, 1, 1, 1);
+MODULE_DEPEND(acpi_pci, pci, 1, 1, 1);
+MODULE_VERSION(acpi_pci, 1);
+
+static int
+acpi_pci_read_ivar(device_t dev, device_t child, int which, uintptr_t *result)
+{
+ struct acpi_pci_devinfo *dinfo;
+
+ dinfo = device_get_ivars(child);
+ switch (which) {
+ case ACPI_IVAR_HANDLE:
+ *result = (uintptr_t)dinfo->ap_handle;
+ return (0);
+ case ACPI_IVAR_FLAGS:
+ *result = (uintptr_t)dinfo->ap_flags;
+ return (0);
+ }
+ return (pci_read_ivar(dev, child, which, result));
+}
+
+static int
+acpi_pci_write_ivar(device_t dev, device_t child, int which, uintptr_t value)
+{
+ struct acpi_pci_devinfo *dinfo;
+
+ dinfo = device_get_ivars(child);
+ switch (which) {
+ case ACPI_IVAR_HANDLE:
+ dinfo->ap_handle = (ACPI_HANDLE)value;
+ return (0);
+ case ACPI_IVAR_FLAGS:
+ dinfo->ap_flags = (int)value;
+ return (0);
+ }
+ return (pci_write_ivar(dev, child, which, value));
+}
+
+static int
+acpi_pci_child_location_str_method(device_t cbdev, device_t child, char *buf,
+ size_t buflen)
+{
+ struct acpi_pci_devinfo *dinfo = device_get_ivars(child);
+
+ pci_child_location_str_method(cbdev, child, buf, buflen);
+
+ if (dinfo->ap_handle) {
+ strlcat(buf, " handle=", buflen);
+ strlcat(buf, acpi_name(dinfo->ap_handle), buflen);
+ }
+ return (0);
+}
+
+/*
+ * PCI power manangement
+ */
+static int
+acpi_pci_set_powerstate_method(device_t dev, device_t child, int state)
+{
+ ACPI_HANDLE h;
+ ACPI_STATUS status;
+ int old_state, error;
+
+ error = 0;
+ if (state < ACPI_STATE_D0 || state > ACPI_STATE_D3)
+ return (EINVAL);
+
+ /*
+ * We set the state using PCI Power Management outside of setting
+ * the ACPI state. This means that when powering down a device, we
+ * first shut it down using PCI, and then using ACPI, which lets ACPI
+ * try to power down any Power Resources that are now no longer used.
+ * When powering up a device, we let ACPI set the state first so that
+ * it can enable any needed Power Resources before changing the PCI
+ * power state.
+ */
+ ACPI_SERIAL_BEGIN(pci_powerstate);
+ old_state = pci_get_powerstate(child);
+ if (old_state < state && pci_do_power_suspend) {
+ error = pci_set_powerstate_method(dev, child, state);
+ if (error)
+ goto out;
+ }
+ h = acpi_get_handle(child);
+ status = acpi_pwr_switch_consumer(h, state);
+ if (ACPI_SUCCESS(status)) {
+ if (bootverbose)
+ device_printf(dev, "set ACPI power state D%d on %s\n",
+ state, acpi_name(h));
+ } else if (status != AE_NOT_FOUND)
+ device_printf(dev,
+ "failed to set ACPI power state D%d on %s: %s\n",
+ state, acpi_name(h), AcpiFormatException(status));
+ if (old_state > state && pci_do_power_resume)
+ error = pci_set_powerstate_method(dev, child, state);
+
+out:
+ ACPI_SERIAL_END(pci_powerstate);
+ return (error);
+}
+
+static void
+acpi_pci_update_device(ACPI_HANDLE handle, device_t pci_child)
+{
+ ACPI_STATUS status;
+ device_t child;
+
+ /*
+ * Occasionally a PCI device may show up as an ACPI device
+ * with a _HID. (For example, the TabletPC TC1000 has a
+ * second PCI-ISA bridge that has a _HID for an
+ * acpi_sysresource device.) In that case, leave ACPI-CA's
+ * device data pointing at the ACPI-enumerated device.
+ */
+ child = acpi_get_device(handle);
+ if (child != NULL) {
+ KASSERT(device_get_parent(child) ==
+ devclass_get_device(devclass_find("acpi"), 0),
+ ("%s: child (%s)'s parent is not acpi0", __func__,
+ acpi_name(handle)));
+ return;
+ }
+
+ /*
+ * Update ACPI-CA to use the PCI enumerated device_t for this handle.
+ */
+ status = AcpiAttachData(handle, acpi_fake_objhandler, pci_child);
+ if (ACPI_FAILURE(status))
+ printf("WARNING: Unable to attach object data to %s - %s\n",
+ acpi_name(handle), AcpiFormatException(status));
+}
+
+static ACPI_STATUS
+acpi_pci_save_handle(ACPI_HANDLE handle, UINT32 level, void *context,
+ void **status)
+{
+ struct acpi_pci_devinfo *dinfo;
+ device_t *devlist;
+ int devcount, i, func, slot;
+ UINT32 address;
+
+ ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
+
+ if (ACPI_FAILURE(acpi_GetInteger(handle, "_ADR", &address)))
+ return_ACPI_STATUS (AE_OK);
+ slot = ACPI_ADR_PCI_SLOT(address);
+ func = ACPI_ADR_PCI_FUNC(address);
+ if (device_get_children((device_t)context, &devlist, &devcount) != 0)
+ return_ACPI_STATUS (AE_OK);
+ for (i = 0; i < devcount; i++) {
+ dinfo = device_get_ivars(devlist[i]);
+ if (dinfo->ap_dinfo.cfg.func == func &&
+ dinfo->ap_dinfo.cfg.slot == slot) {
+ dinfo->ap_handle = handle;
+ acpi_pci_update_device(handle, devlist[i]);
+ break;
+ }
+ }
+ free(devlist, M_TEMP);
+ return_ACPI_STATUS (AE_OK);
+}
+
+static int
+acpi_pci_probe(device_t dev)
+{
+
+ if (acpi_get_handle(dev) == NULL)
+ return (ENXIO);
+ device_set_desc(dev, "ACPI PCI bus");
+ return (0);
+}
+
+static int
+acpi_pci_attach(device_t dev)
+{
+ int busno, domain, error;
+
+ error = pci_attach_common(dev);
+ if (error)
+ return (error);
+
+ /*
+ * Since there can be multiple independantly numbered PCI
+ * busses on systems with multiple PCI domains, we can't use
+ * the unit number to decide which bus we are probing. We ask
+ * the parent pcib what our domain and bus numbers are.
+ */
+ domain = pcib_get_domain(dev);
+ busno = pcib_get_bus(dev);
+
+ /*
+ * First, PCI devices are added as in the normal PCI bus driver.
+ * Afterwards, the ACPI namespace under the bridge driver is
+ * walked to save ACPI handles to all the devices that appear in
+ * the ACPI namespace as immediate descendants of the bridge.
+ *
+ * XXX: Sometimes PCI devices show up in the ACPI namespace that
+ * pci_add_children() doesn't find. We currently just ignore
+ * these devices.
+ */
+ pci_add_children(dev, domain, busno, sizeof(struct acpi_pci_devinfo));
+ AcpiWalkNamespace(ACPI_TYPE_DEVICE, acpi_get_handle(dev), 1,
+ acpi_pci_save_handle, NULL, dev, NULL);
+
+ return (bus_generic_attach(dev));
+}
diff --git a/sys/dev/acpica/acpi_pci_link.c b/sys/dev/acpica/acpi_pci_link.c
new file mode 100644
index 0000000..ba03d72
--- /dev/null
+++ b/sys/dev/acpica/acpi_pci_link.c
@@ -0,0 +1,1113 @@
+/*-
+ * Copyright (c) 2002 Mitsuru IWASAKI <iwasaki@jp.freebsd.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "opt_acpi.h"
+#include <sys/param.h>
+#include <sys/bus.h>
+#include <sys/kernel.h>
+#include <sys/limits.h>
+#include <sys/malloc.h>
+#include <sys/module.h>
+
+#include <contrib/dev/acpica/include/acpi.h>
+
+#include <dev/acpica/acpivar.h>
+#include <dev/acpica/acpi_pcibvar.h>
+
+#include <machine/pci_cfgreg.h>
+#include <dev/pci/pcireg.h>
+#include <dev/pci/pcivar.h>
+#include "pcib_if.h"
+
+/* Hooks for the ACPI CA debugging infrastructure. */
+#define _COMPONENT ACPI_BUS
+ACPI_MODULE_NAME("PCI_LINK")
+
+ACPI_SERIAL_DECL(pci_link, "ACPI PCI link");
+
+#define NUM_ISA_INTERRUPTS 16
+#define NUM_ACPI_INTERRUPTS 256
+
+/*
+ * An ACPI PCI link device may contain multiple links. Each link has its
+ * own ACPI resource. _PRT entries specify which link is being used via
+ * the Source Index.
+ *
+ * XXX: A note about Source Indices and DPFs: Currently we assume that
+ * the DPF start and end tags are not counted towards the index that
+ * Source Index corresponds to. Also, we assume that when DPFs are in use
+ * they various sets overlap in terms of Indices. Here's an example
+ * resource list indicating these assumptions:
+ *
+ * Resource Index
+ * -------- -----
+ * I/O Port 0
+ * Start DPF -
+ * IRQ 1
+ * MemIO 2
+ * Start DPF -
+ * IRQ 1
+ * MemIO 2
+ * End DPF -
+ * DMA Channel 3
+ *
+ * The XXX is because I'm not sure if this is a valid assumption to make.
+ */
+
+/* States during DPF processing. */
+#define DPF_OUTSIDE 0
+#define DPF_FIRST 1
+#define DPF_IGNORE 2
+
+struct link;
+
+struct acpi_pci_link_softc {
+ int pl_num_links;
+ int pl_crs_bad;
+ struct link *pl_links;
+ device_t pl_dev;
+};
+
+struct link {
+ struct acpi_pci_link_softc *l_sc;
+ uint8_t l_bios_irq;
+ uint8_t l_irq;
+ uint8_t l_initial_irq;
+ int l_res_index;
+ int l_num_irqs;
+ int *l_irqs;
+ int l_references;
+ int l_routed:1;
+ int l_isa_irq:1;
+ ACPI_RESOURCE l_prs_template;
+};
+
+struct link_count_request {
+ int in_dpf;
+ int count;
+};
+
+struct link_res_request {
+ struct acpi_pci_link_softc *sc;
+ int in_dpf;
+ int res_index;
+ int link_index;
+};
+
+static MALLOC_DEFINE(M_PCI_LINK, "pci_link", "ACPI PCI Link structures");
+
+static int pci_link_interrupt_weights[NUM_ACPI_INTERRUPTS];
+static int pci_link_bios_isa_irqs;
+
+static char *pci_link_ids[] = { "PNP0C0F", NULL };
+
+/*
+ * Fetch the short name associated with an ACPI handle and save it in the
+ * passed in buffer.
+ */
+static ACPI_STATUS
+acpi_short_name(ACPI_HANDLE handle, char *buffer, size_t buflen)
+{
+ ACPI_BUFFER buf;
+
+ buf.Length = buflen;
+ buf.Pointer = buffer;
+ return (AcpiGetName(handle, ACPI_SINGLE_NAME, &buf));
+}
+
+static int
+acpi_pci_link_probe(device_t dev)
+{
+ char descr[28], name[12];
+
+ /*
+ * We explicitly do not check _STA since not all systems set it to
+ * sensible values.
+ */
+ if (acpi_disabled("pci_link") ||
+ ACPI_ID_PROBE(device_get_parent(dev), dev, pci_link_ids) == NULL)
+ return (ENXIO);
+
+ if (ACPI_SUCCESS(acpi_short_name(acpi_get_handle(dev), name,
+ sizeof(name)))) {
+ snprintf(descr, sizeof(descr), "ACPI PCI Link %s", name);
+ device_set_desc_copy(dev, descr);
+ } else
+ device_set_desc(dev, "ACPI PCI Link");
+ device_quiet(dev);
+ return (0);
+}
+
+static ACPI_STATUS
+acpi_count_irq_resources(ACPI_RESOURCE *res, void *context)
+{
+ struct link_count_request *req;
+
+ req = (struct link_count_request *)context;
+ switch (res->Type) {
+ case ACPI_RESOURCE_TYPE_START_DEPENDENT:
+ switch (req->in_dpf) {
+ case DPF_OUTSIDE:
+ /* We've started the first DPF. */
+ req->in_dpf = DPF_FIRST;
+ break;
+ case DPF_FIRST:
+ /* We've started the second DPF. */
+ req->in_dpf = DPF_IGNORE;
+ break;
+ }
+ break;
+ case ACPI_RESOURCE_TYPE_END_DEPENDENT:
+ /* We are finished with DPF parsing. */
+ KASSERT(req->in_dpf != DPF_OUTSIDE,
+ ("%s: end dpf when not parsing a dpf", __func__));
+ req->in_dpf = DPF_OUTSIDE;
+ break;
+ case ACPI_RESOURCE_TYPE_IRQ:
+ case ACPI_RESOURCE_TYPE_EXTENDED_IRQ:
+ /*
+ * Don't count resources if we are in a DPF set that we are
+ * ignoring.
+ */
+ if (req->in_dpf != DPF_IGNORE)
+ req->count++;
+ }
+ return (AE_OK);
+}
+
+static ACPI_STATUS
+link_add_crs(ACPI_RESOURCE *res, void *context)
+{
+ struct link_res_request *req;
+ struct link *link;
+
+ ACPI_SERIAL_ASSERT(pci_link);
+ req = (struct link_res_request *)context;
+ switch (res->Type) {
+ case ACPI_RESOURCE_TYPE_START_DEPENDENT:
+ switch (req->in_dpf) {
+ case DPF_OUTSIDE:
+ /* We've started the first DPF. */
+ req->in_dpf = DPF_FIRST;
+ break;
+ case DPF_FIRST:
+ /* We've started the second DPF. */
+ panic(
+ "%s: Multiple dependent functions within a current resource",
+ __func__);
+ break;
+ }
+ break;
+ case ACPI_RESOURCE_TYPE_END_DEPENDENT:
+ /* We are finished with DPF parsing. */
+ KASSERT(req->in_dpf != DPF_OUTSIDE,
+ ("%s: end dpf when not parsing a dpf", __func__));
+ req->in_dpf = DPF_OUTSIDE;
+ break;
+ case ACPI_RESOURCE_TYPE_IRQ:
+ case ACPI_RESOURCE_TYPE_EXTENDED_IRQ:
+ KASSERT(req->link_index < req->sc->pl_num_links,
+ ("%s: array boundary violation", __func__));
+ link = &req->sc->pl_links[req->link_index];
+ link->l_res_index = req->res_index;
+ req->link_index++;
+ req->res_index++;
+
+ /*
+ * Only use the current value if there's one IRQ. Some
+ * systems return multiple IRQs (which is nonsense for _CRS)
+ * when the link hasn't been programmed.
+ */
+ if (res->Type == ACPI_RESOURCE_TYPE_IRQ) {
+ if (res->Data.Irq.InterruptCount == 1)
+ link->l_irq = res->Data.Irq.Interrupts[0];
+ } else if (res->Data.ExtendedIrq.InterruptCount == 1)
+ link->l_irq = res->Data.ExtendedIrq.Interrupts[0];
+
+ /*
+ * An IRQ of zero means that the link isn't routed.
+ */
+ if (link->l_irq == 0)
+ link->l_irq = PCI_INVALID_IRQ;
+ break;
+ default:
+ req->res_index++;
+ }
+ return (AE_OK);
+}
+
+/*
+ * Populate the set of possible IRQs for each device.
+ */
+static ACPI_STATUS
+link_add_prs(ACPI_RESOURCE *res, void *context)
+{
+ ACPI_RESOURCE *tmp;
+ struct link_res_request *req;
+ struct link *link;
+ UINT8 *irqs = NULL;
+ UINT32 *ext_irqs = NULL;
+ int i, is_ext_irq = 1;
+
+ ACPI_SERIAL_ASSERT(pci_link);
+ req = (struct link_res_request *)context;
+ switch (res->Type) {
+ case ACPI_RESOURCE_TYPE_START_DEPENDENT:
+ switch (req->in_dpf) {
+ case DPF_OUTSIDE:
+ /* We've started the first DPF. */
+ req->in_dpf = DPF_FIRST;
+ break;
+ case DPF_FIRST:
+ /* We've started the second DPF. */
+ req->in_dpf = DPF_IGNORE;
+ break;
+ }
+ break;
+ case ACPI_RESOURCE_TYPE_END_DEPENDENT:
+ /* We are finished with DPF parsing. */
+ KASSERT(req->in_dpf != DPF_OUTSIDE,
+ ("%s: end dpf when not parsing a dpf", __func__));
+ req->in_dpf = DPF_OUTSIDE;
+ break;
+ case ACPI_RESOURCE_TYPE_IRQ:
+ is_ext_irq = 0;
+ /* fall through */
+ case ACPI_RESOURCE_TYPE_EXTENDED_IRQ:
+ /*
+ * Don't parse resources if we are in a DPF set that we are
+ * ignoring.
+ */
+ if (req->in_dpf == DPF_IGNORE)
+ break;
+
+ KASSERT(req->link_index < req->sc->pl_num_links,
+ ("%s: array boundary violation", __func__));
+ link = &req->sc->pl_links[req->link_index];
+ if (link->l_res_index == -1) {
+ KASSERT(req->sc->pl_crs_bad,
+ ("res_index should be set"));
+ link->l_res_index = req->res_index;
+ }
+ req->link_index++;
+ req->res_index++;
+
+ /*
+ * Stash a copy of the resource for later use when doing
+ * _SRS.
+ */
+ tmp = &link->l_prs_template;
+ if (is_ext_irq) {
+ bcopy(res, tmp, ACPI_RS_SIZE(tmp->Data.ExtendedIrq));
+
+ /*
+ * XXX acpi_AppendBufferResource() cannot handle
+ * optional data.
+ */
+ bzero(&tmp->Data.ExtendedIrq.ResourceSource,
+ sizeof(tmp->Data.ExtendedIrq.ResourceSource));
+ tmp->Length = ACPI_RS_SIZE(tmp->Data.ExtendedIrq);
+
+ link->l_num_irqs =
+ res->Data.ExtendedIrq.InterruptCount;
+ ext_irqs = res->Data.ExtendedIrq.Interrupts;
+ } else {
+ bcopy(res, tmp, ACPI_RS_SIZE(tmp->Data.Irq));
+ link->l_num_irqs = res->Data.Irq.InterruptCount;
+ irqs = res->Data.Irq.Interrupts;
+ }
+ if (link->l_num_irqs == 0)
+ break;
+
+ /*
+ * Save a list of the valid IRQs. Also, if all of the
+ * valid IRQs are ISA IRQs, then mark this link as
+ * routed via an ISA interrupt.
+ */
+ link->l_isa_irq = TRUE;
+ link->l_irqs = malloc(sizeof(int) * link->l_num_irqs,
+ M_PCI_LINK, M_WAITOK | M_ZERO);
+ for (i = 0; i < link->l_num_irqs; i++) {
+ if (is_ext_irq) {
+ link->l_irqs[i] = ext_irqs[i];
+ if (ext_irqs[i] >= NUM_ISA_INTERRUPTS)
+ link->l_isa_irq = FALSE;
+ } else {
+ link->l_irqs[i] = irqs[i];
+ if (irqs[i] >= NUM_ISA_INTERRUPTS)
+ link->l_isa_irq = FALSE;
+ }
+ }
+ break;
+ default:
+ if (req->in_dpf == DPF_IGNORE)
+ break;
+ if (req->sc->pl_crs_bad)
+ device_printf(req->sc->pl_dev,
+ "Warning: possible resource %d will be lost during _SRS\n",
+ req->res_index);
+ req->res_index++;
+ }
+ return (AE_OK);
+}
+
+static int
+link_valid_irq(struct link *link, int irq)
+{
+ int i;
+
+ ACPI_SERIAL_ASSERT(pci_link);
+
+ /* Invalid interrupts are never valid. */
+ if (!PCI_INTERRUPT_VALID(irq))
+ return (FALSE);
+
+ /* Any interrupt in the list of possible interrupts is valid. */
+ for (i = 0; i < link->l_num_irqs; i++)
+ if (link->l_irqs[i] == irq)
+ return (TRUE);
+
+ /*
+ * For links routed via an ISA interrupt, if the SCI is routed via
+ * an ISA interrupt, the SCI is always treated as a valid IRQ.
+ */
+ if (link->l_isa_irq && AcpiGbl_FADT.SciInterrupt == irq &&
+ irq < NUM_ISA_INTERRUPTS)
+ return (TRUE);
+
+ /* If the interrupt wasn't found in the list it is not valid. */
+ return (FALSE);
+}
+
+static void
+acpi_pci_link_dump(struct acpi_pci_link_softc *sc, int header, const char *tag)
+{
+ struct link *link;
+ char buf[16];
+ int i, j;
+
+ ACPI_SERIAL_ASSERT(pci_link);
+ if (header) {
+ snprintf(buf, sizeof(buf), "%s:",
+ device_get_nameunit(sc->pl_dev));
+ printf("%-16.16s Index IRQ Rtd Ref IRQs\n", buf);
+ }
+ for (i = 0; i < sc->pl_num_links; i++) {
+ link = &sc->pl_links[i];
+ printf(" %-14.14s %5d %3d %c %3d ", i == 0 ? tag : "", i,
+ link->l_irq, link->l_routed ? 'Y' : 'N',
+ link->l_references);
+ if (link->l_num_irqs == 0)
+ printf(" none");
+ else for (j = 0; j < link->l_num_irqs; j++)
+ printf(" %d", link->l_irqs[j]);
+ printf("\n");
+ }
+}
+
+static int
+acpi_pci_link_attach(device_t dev)
+{
+ struct acpi_pci_link_softc *sc;
+ struct link_count_request creq;
+ struct link_res_request rreq;
+ ACPI_STATUS status;
+ int i;
+
+ sc = device_get_softc(dev);
+ sc->pl_dev = dev;
+ ACPI_SERIAL_BEGIN(pci_link);
+
+ /*
+ * Count the number of current resources so we know how big of
+ * a link array to allocate. On some systems, _CRS is broken,
+ * so for those systems try to derive the count from _PRS instead.
+ */
+ creq.in_dpf = DPF_OUTSIDE;
+ creq.count = 0;
+ status = AcpiWalkResources(acpi_get_handle(dev), "_CRS",
+ acpi_count_irq_resources, &creq);
+ sc->pl_crs_bad = ACPI_FAILURE(status);
+ if (sc->pl_crs_bad) {
+ creq.in_dpf = DPF_OUTSIDE;
+ creq.count = 0;
+ status = AcpiWalkResources(acpi_get_handle(dev), "_PRS",
+ acpi_count_irq_resources, &creq);
+ if (ACPI_FAILURE(status)) {
+ device_printf(dev,
+ "Unable to parse _CRS or _PRS: %s\n",
+ AcpiFormatException(status));
+ ACPI_SERIAL_END(pci_link);
+ return (ENXIO);
+ }
+ }
+ sc->pl_num_links = creq.count;
+ if (creq.count == 0) {
+ ACPI_SERIAL_END(pci_link);
+ return (0);
+ }
+ sc->pl_links = malloc(sizeof(struct link) * sc->pl_num_links,
+ M_PCI_LINK, M_WAITOK | M_ZERO);
+
+ /* Initialize the child links. */
+ for (i = 0; i < sc->pl_num_links; i++) {
+ sc->pl_links[i].l_irq = PCI_INVALID_IRQ;
+ sc->pl_links[i].l_bios_irq = PCI_INVALID_IRQ;
+ sc->pl_links[i].l_sc = sc;
+ sc->pl_links[i].l_isa_irq = FALSE;
+ sc->pl_links[i].l_res_index = -1;
+ }
+
+ /* Try to read the current settings from _CRS if it is valid. */
+ if (!sc->pl_crs_bad) {
+ rreq.in_dpf = DPF_OUTSIDE;
+ rreq.link_index = 0;
+ rreq.res_index = 0;
+ rreq.sc = sc;
+ status = AcpiWalkResources(acpi_get_handle(dev), "_CRS",
+ link_add_crs, &rreq);
+ if (ACPI_FAILURE(status)) {
+ device_printf(dev, "Unable to parse _CRS: %s\n",
+ AcpiFormatException(status));
+ goto fail;
+ }
+ }
+
+ /*
+ * Try to read the possible settings from _PRS. Note that if the
+ * _CRS is toast, we depend on having a working _PRS. However, if
+ * _CRS works, then it is ok for _PRS to be missing.
+ */
+ rreq.in_dpf = DPF_OUTSIDE;
+ rreq.link_index = 0;
+ rreq.res_index = 0;
+ rreq.sc = sc;
+ status = AcpiWalkResources(acpi_get_handle(dev), "_PRS",
+ link_add_prs, &rreq);
+ if (ACPI_FAILURE(status) &&
+ (status != AE_NOT_FOUND || sc->pl_crs_bad)) {
+ device_printf(dev, "Unable to parse _PRS: %s\n",
+ AcpiFormatException(status));
+ goto fail;
+ }
+ if (bootverbose)
+ acpi_pci_link_dump(sc, 1, "Initial Probe");
+
+ /* Verify initial IRQs if we have _PRS. */
+ if (status != AE_NOT_FOUND)
+ for (i = 0; i < sc->pl_num_links; i++)
+ if (!link_valid_irq(&sc->pl_links[i],
+ sc->pl_links[i].l_irq))
+ sc->pl_links[i].l_irq = PCI_INVALID_IRQ;
+ if (bootverbose)
+ acpi_pci_link_dump(sc, 0, "Validation");
+
+ /* Save initial IRQs. */
+ for (i = 0; i < sc->pl_num_links; i++)
+ sc->pl_links[i].l_initial_irq = sc->pl_links[i].l_irq;
+
+ /*
+ * Try to disable this link. If successful, set the current IRQ to
+ * zero and flags to indicate this link is not routed. If we can't
+ * run _DIS (i.e., the method doesn't exist), assume the initial
+ * IRQ was routed by the BIOS.
+ */
+ if (ACPI_SUCCESS(AcpiEvaluateObject(acpi_get_handle(dev), "_DIS", NULL,
+ NULL)))
+ for (i = 0; i < sc->pl_num_links; i++)
+ sc->pl_links[i].l_irq = PCI_INVALID_IRQ;
+ else
+ for (i = 0; i < sc->pl_num_links; i++)
+ if (PCI_INTERRUPT_VALID(sc->pl_links[i].l_irq))
+ sc->pl_links[i].l_routed = TRUE;
+ if (bootverbose)
+ acpi_pci_link_dump(sc, 0, "After Disable");
+ ACPI_SERIAL_END(pci_link);
+ return (0);
+fail:
+ ACPI_SERIAL_END(pci_link);
+ for (i = 0; i < sc->pl_num_links; i++)
+ if (sc->pl_links[i].l_irqs != NULL)
+ free(sc->pl_links[i].l_irqs, M_PCI_LINK);
+ free(sc->pl_links, M_PCI_LINK);
+ return (ENXIO);
+}
+
+/* XXX: Note that this is identical to pci_pir_search_irq(). */
+static uint8_t
+acpi_pci_link_search_irq(int bus, int device, int pin)
+{
+ uint32_t value;
+ uint8_t func, maxfunc;
+
+ /* See if we have a valid device at function 0. */
+ value = pci_cfgregread(bus, device, 0, PCIR_HDRTYPE, 1);
+ if ((value & PCIM_HDRTYPE) > PCI_MAXHDRTYPE)
+ return (PCI_INVALID_IRQ);
+ if (value & PCIM_MFDEV)
+ maxfunc = PCI_FUNCMAX;
+ else
+ maxfunc = 0;
+
+ /* Scan all possible functions at this device. */
+ for (func = 0; func <= maxfunc; func++) {
+ value = pci_cfgregread(bus, device, func, PCIR_DEVVENDOR, 4);
+ if (value == 0xffffffff)
+ continue;
+ value = pci_cfgregread(bus, device, func, PCIR_INTPIN, 1);
+
+ /*
+ * See if it uses the pin in question. Note that the passed
+ * in pin uses 0 for A, .. 3 for D whereas the intpin
+ * register uses 0 for no interrupt, 1 for A, .. 4 for D.
+ */
+ if (value != pin + 1)
+ continue;
+ value = pci_cfgregread(bus, device, func, PCIR_INTLINE, 1);
+ if (bootverbose)
+ printf(
+ "ACPI: Found matching pin for %d.%d.INT%c at func %d: %d\n",
+ bus, device, pin + 'A', func, value);
+ if (value != PCI_INVALID_IRQ)
+ return (value);
+ }
+ return (PCI_INVALID_IRQ);
+}
+
+/*
+ * Find the link structure that corresponds to the resource index passed in
+ * via 'source_index'.
+ */
+static struct link *
+acpi_pci_link_lookup(device_t dev, int source_index)
+{
+ struct acpi_pci_link_softc *sc;
+ int i;
+
+ ACPI_SERIAL_ASSERT(pci_link);
+ sc = device_get_softc(dev);
+ for (i = 0; i < sc->pl_num_links; i++)
+ if (sc->pl_links[i].l_res_index == source_index)
+ return (&sc->pl_links[i]);
+ return (NULL);
+}
+
+void
+acpi_pci_link_add_reference(device_t dev, int index, device_t pcib, int slot,
+ int pin)
+{
+ struct link *link;
+ uint8_t bios_irq;
+ uintptr_t bus;
+
+ /*
+ * Look up the PCI bus for the specified PCI bridge device. Note
+ * that the PCI bridge device might not have any children yet.
+ * However, looking up its bus number doesn't require a valid child
+ * device, so we just pass NULL.
+ */
+ if (BUS_READ_IVAR(pcib, NULL, PCIB_IVAR_BUS, &bus) != 0) {
+ device_printf(pcib, "Unable to read PCI bus number");
+ panic("PCI bridge without a bus number");
+ }
+
+ /* Bump the reference count. */
+ ACPI_SERIAL_BEGIN(pci_link);
+ link = acpi_pci_link_lookup(dev, index);
+ if (link == NULL) {
+ device_printf(dev, "apparently invalid index %d\n", index);
+ ACPI_SERIAL_END(pci_link);
+ return;
+ }
+ link->l_references++;
+ if (link->l_routed)
+ pci_link_interrupt_weights[link->l_irq]++;
+
+ /*
+ * The BIOS only routes interrupts via ISA IRQs using the ATPICs
+ * (8259As). Thus, if this link is routed via an ISA IRQ, go
+ * look to see if the BIOS routed an IRQ for this link at the
+ * indicated (bus, slot, pin). If so, we prefer that IRQ for
+ * this link and add that IRQ to our list of known-good IRQs.
+ * This provides a good work-around for link devices whose _CRS
+ * method is either broken or bogus. We only use the value
+ * returned by _CRS if we can't find a valid IRQ via this method
+ * in fact.
+ *
+ * If this link is not routed via an ISA IRQ (because we are using
+ * APIC for example), then don't bother looking up the BIOS IRQ
+ * as if we find one it won't be valid anyway.
+ */
+ if (!link->l_isa_irq) {
+ ACPI_SERIAL_END(pci_link);
+ return;
+ }
+
+ /* Try to find a BIOS IRQ setting from any matching devices. */
+ bios_irq = acpi_pci_link_search_irq(bus, slot, pin);
+ if (!PCI_INTERRUPT_VALID(bios_irq)) {
+ ACPI_SERIAL_END(pci_link);
+ return;
+ }
+
+ /* Validate the BIOS IRQ. */
+ if (!link_valid_irq(link, bios_irq)) {
+ device_printf(dev, "BIOS IRQ %u for %d.%d.INT%c is invalid\n",
+ bios_irq, (int)bus, slot, pin + 'A');
+ } else if (!PCI_INTERRUPT_VALID(link->l_bios_irq)) {
+ link->l_bios_irq = bios_irq;
+ if (bios_irq < NUM_ISA_INTERRUPTS)
+ pci_link_bios_isa_irqs |= (1 << bios_irq);
+ if (bios_irq != link->l_initial_irq &&
+ PCI_INTERRUPT_VALID(link->l_initial_irq))
+ device_printf(dev,
+ "BIOS IRQ %u does not match initial IRQ %u\n",
+ bios_irq, link->l_initial_irq);
+ } else if (bios_irq != link->l_bios_irq)
+ device_printf(dev,
+ "BIOS IRQ %u for %d.%d.INT%c does not match previous BIOS IRQ %u\n",
+ bios_irq, (int)bus, slot, pin + 'A',
+ link->l_bios_irq);
+ ACPI_SERIAL_END(pci_link);
+}
+
+static ACPI_STATUS
+acpi_pci_link_srs_from_crs(struct acpi_pci_link_softc *sc, ACPI_BUFFER *srsbuf)
+{
+ ACPI_RESOURCE *end, *res;
+ ACPI_STATUS status;
+ struct link *link;
+ int i, in_dpf;
+
+ /* Fetch the _CRS. */
+ ACPI_SERIAL_ASSERT(pci_link);
+ srsbuf->Pointer = NULL;
+ srsbuf->Length = ACPI_ALLOCATE_BUFFER;
+ status = AcpiGetCurrentResources(acpi_get_handle(sc->pl_dev), srsbuf);
+ if (ACPI_SUCCESS(status) && srsbuf->Pointer == NULL)
+ status = AE_NO_MEMORY;
+ if (ACPI_FAILURE(status)) {
+ if (bootverbose)
+ device_printf(sc->pl_dev,
+ "Unable to fetch current resources: %s\n",
+ AcpiFormatException(status));
+ return (status);
+ }
+
+ /* Fill in IRQ resources via link structures. */
+ link = sc->pl_links;
+ i = 0;
+ in_dpf = DPF_OUTSIDE;
+ res = (ACPI_RESOURCE *)srsbuf->Pointer;
+ end = (ACPI_RESOURCE *)((char *)srsbuf->Pointer + srsbuf->Length);
+ for (;;) {
+ switch (res->Type) {
+ case ACPI_RESOURCE_TYPE_START_DEPENDENT:
+ switch (in_dpf) {
+ case DPF_OUTSIDE:
+ /* We've started the first DPF. */
+ in_dpf = DPF_FIRST;
+ break;
+ case DPF_FIRST:
+ /* We've started the second DPF. */
+ panic(
+ "%s: Multiple dependent functions within a current resource",
+ __func__);
+ break;
+ }
+ break;
+ case ACPI_RESOURCE_TYPE_END_DEPENDENT:
+ /* We are finished with DPF parsing. */
+ KASSERT(in_dpf != DPF_OUTSIDE,
+ ("%s: end dpf when not parsing a dpf", __func__));
+ in_dpf = DPF_OUTSIDE;
+ break;
+ case ACPI_RESOURCE_TYPE_IRQ:
+ MPASS(i < sc->pl_num_links);
+ res->Data.Irq.InterruptCount = 1;
+ if (PCI_INTERRUPT_VALID(link->l_irq)) {
+ KASSERT(link->l_irq < NUM_ISA_INTERRUPTS,
+ ("%s: can't put non-ISA IRQ %d in legacy IRQ resource type",
+ __func__, link->l_irq));
+ res->Data.Irq.Interrupts[0] = link->l_irq;
+ } else
+ res->Data.Irq.Interrupts[0] = 0;
+ link++;
+ i++;
+ break;
+ case ACPI_RESOURCE_TYPE_EXTENDED_IRQ:
+ MPASS(i < sc->pl_num_links);
+ res->Data.ExtendedIrq.InterruptCount = 1;
+ if (PCI_INTERRUPT_VALID(link->l_irq))
+ res->Data.ExtendedIrq.Interrupts[0] =
+ link->l_irq;
+ else
+ res->Data.ExtendedIrq.Interrupts[0] = 0;
+ link++;
+ i++;
+ break;
+ }
+ if (res->Type == ACPI_RESOURCE_TYPE_END_TAG)
+ break;
+ res = ACPI_NEXT_RESOURCE(res);
+ if (res >= end)
+ break;
+ }
+ return (AE_OK);
+}
+
+static ACPI_STATUS
+acpi_pci_link_srs_from_links(struct acpi_pci_link_softc *sc,
+ ACPI_BUFFER *srsbuf)
+{
+ ACPI_RESOURCE newres;
+ ACPI_STATUS status;
+ struct link *link;
+ int i;
+
+ /* Start off with an empty buffer. */
+ srsbuf->Pointer = NULL;
+ link = sc->pl_links;
+ for (i = 0; i < sc->pl_num_links; i++) {
+
+ /* Add a new IRQ resource from each link. */
+ link = &sc->pl_links[i];
+ if (link->l_prs_template.Type == ACPI_RESOURCE_TYPE_IRQ) {
+
+ /* Build an IRQ resource. */
+ bcopy(&link->l_prs_template, &newres,
+ ACPI_RS_SIZE(newres.Data.Irq));
+ newres.Data.Irq.InterruptCount = 1;
+ if (PCI_INTERRUPT_VALID(link->l_irq)) {
+ KASSERT(link->l_irq < NUM_ISA_INTERRUPTS,
+ ("%s: can't put non-ISA IRQ %d in legacy IRQ resource type",
+ __func__, link->l_irq));
+ newres.Data.Irq.Interrupts[0] = link->l_irq;
+ } else
+ newres.Data.Irq.Interrupts[0] = 0;
+ } else {
+
+ /* Build an ExtIRQ resuorce. */
+ bcopy(&link->l_prs_template, &newres,
+ ACPI_RS_SIZE(newres.Data.ExtendedIrq));
+ newres.Data.ExtendedIrq.InterruptCount = 1;
+ if (PCI_INTERRUPT_VALID(link->l_irq))
+ newres.Data.ExtendedIrq.Interrupts[0] =
+ link->l_irq;
+ else
+ newres.Data.ExtendedIrq.Interrupts[0] = 0;
+ }
+
+ /* Add the new resource to the end of the _SRS buffer. */
+ status = acpi_AppendBufferResource(srsbuf, &newres);
+ if (ACPI_FAILURE(status)) {
+ device_printf(sc->pl_dev,
+ "Unable to build resources: %s\n",
+ AcpiFormatException(status));
+ if (srsbuf->Pointer != NULL)
+ AcpiOsFree(srsbuf->Pointer);
+ return (status);
+ }
+ }
+ return (AE_OK);
+}
+
+static ACPI_STATUS
+acpi_pci_link_route_irqs(device_t dev)
+{
+ struct acpi_pci_link_softc *sc;
+ ACPI_RESOURCE *resource, *end;
+ ACPI_BUFFER srsbuf;
+ ACPI_STATUS status;
+ struct link *link;
+ int i;
+
+ ACPI_SERIAL_ASSERT(pci_link);
+ sc = device_get_softc(dev);
+ if (sc->pl_crs_bad)
+ status = acpi_pci_link_srs_from_links(sc, &srsbuf);
+ else
+ status = acpi_pci_link_srs_from_crs(sc, &srsbuf);
+
+ /* Write out new resources via _SRS. */
+ status = AcpiSetCurrentResources(acpi_get_handle(dev), &srsbuf);
+ if (ACPI_FAILURE(status)) {
+ device_printf(dev, "Unable to route IRQs: %s\n",
+ AcpiFormatException(status));
+ AcpiOsFree(srsbuf.Pointer);
+ return (status);
+ }
+
+ /*
+ * Perform acpi_config_intr() on each IRQ resource if it was just
+ * routed for the first time.
+ */
+ link = sc->pl_links;
+ i = 0;
+ resource = (ACPI_RESOURCE *)srsbuf.Pointer;
+ end = (ACPI_RESOURCE *)((char *)srsbuf.Pointer + srsbuf.Length);
+ for (;;) {
+ if (resource->Type == ACPI_RESOURCE_TYPE_END_TAG)
+ break;
+ switch (resource->Type) {
+ case ACPI_RESOURCE_TYPE_IRQ:
+ case ACPI_RESOURCE_TYPE_EXTENDED_IRQ:
+ MPASS(i < sc->pl_num_links);
+
+ /*
+ * Only configure the interrupt and update the
+ * weights if this link has a valid IRQ and was
+ * previously unrouted.
+ */
+ if (!link->l_routed &&
+ PCI_INTERRUPT_VALID(link->l_irq)) {
+ link->l_routed = TRUE;
+ acpi_config_intr(dev, resource);
+ pci_link_interrupt_weights[link->l_irq] +=
+ link->l_references;
+ }
+ link++;
+ i++;
+ break;
+ }
+ resource = ACPI_NEXT_RESOURCE(resource);
+ if (resource >= end)
+ break;
+ }
+ AcpiOsFree(srsbuf.Pointer);
+ return (AE_OK);
+}
+
+static int
+acpi_pci_link_resume(device_t dev)
+{
+ struct acpi_pci_link_softc *sc;
+ ACPI_STATUS status;
+ int i, routed;
+
+ /*
+ * If all of our links are routed, then restore the link via _SRS,
+ * otherwise, disable the link via _DIS.
+ */
+ ACPI_SERIAL_BEGIN(pci_link);
+ sc = device_get_softc(dev);
+ routed = 0;
+ for (i = 0; i < sc->pl_num_links; i++)
+ if (sc->pl_links[i].l_routed)
+ routed++;
+ if (routed == sc->pl_num_links)
+ status = acpi_pci_link_route_irqs(dev);
+ else {
+ AcpiEvaluateObject(acpi_get_handle(dev), "_DIS", NULL, NULL);
+ status = AE_OK;
+ }
+ ACPI_SERIAL_END(pci_link);
+ if (ACPI_FAILURE(status))
+ return (ENXIO);
+ else
+ return (0);
+}
+
+/*
+ * Pick an IRQ to use for this unrouted link.
+ */
+static uint8_t
+acpi_pci_link_choose_irq(device_t dev, struct link *link)
+{
+ char tunable_buffer[64], link_name[5];
+ u_int8_t best_irq, pos_irq;
+ int best_weight, pos_weight, i;
+
+ KASSERT(!link->l_routed, ("%s: link already routed", __func__));
+ KASSERT(!PCI_INTERRUPT_VALID(link->l_irq),
+ ("%s: link already has an IRQ", __func__));
+
+ /* Check for a tunable override. */
+ if (ACPI_SUCCESS(acpi_short_name(acpi_get_handle(dev), link_name,
+ sizeof(link_name)))) {
+ snprintf(tunable_buffer, sizeof(tunable_buffer),
+ "hw.pci.link.%s.%d.irq", link_name, link->l_res_index);
+ if (getenv_int(tunable_buffer, &i) && PCI_INTERRUPT_VALID(i)) {
+ if (!link_valid_irq(link, i))
+ device_printf(dev,
+ "Warning, IRQ %d is not listed as valid\n",
+ i);
+ return (i);
+ }
+ snprintf(tunable_buffer, sizeof(tunable_buffer),
+ "hw.pci.link.%s.irq", link_name);
+ if (getenv_int(tunable_buffer, &i) && PCI_INTERRUPT_VALID(i)) {
+ if (!link_valid_irq(link, i))
+ device_printf(dev,
+ "Warning, IRQ %d is not listed as valid\n",
+ i);
+ return (i);
+ }
+ }
+
+ /*
+ * If we have a valid BIOS IRQ, use that. We trust what the BIOS
+ * says it routed over what _CRS says the link thinks is routed.
+ */
+ if (PCI_INTERRUPT_VALID(link->l_bios_irq))
+ return (link->l_bios_irq);
+
+ /*
+ * If we don't have a BIOS IRQ but do have a valid IRQ from _CRS,
+ * then use that.
+ */
+ if (PCI_INTERRUPT_VALID(link->l_initial_irq))
+ return (link->l_initial_irq);
+
+ /*
+ * Ok, we have no useful hints, so we have to pick from the
+ * possible IRQs. For ISA IRQs we only use interrupts that
+ * have already been used by the BIOS.
+ */
+ best_irq = PCI_INVALID_IRQ;
+ best_weight = INT_MAX;
+ for (i = 0; i < link->l_num_irqs; i++) {
+ pos_irq = link->l_irqs[i];
+ if (pos_irq < NUM_ISA_INTERRUPTS &&
+ (pci_link_bios_isa_irqs & 1 << pos_irq) == 0)
+ continue;
+ pos_weight = pci_link_interrupt_weights[pos_irq];
+ if (pos_weight < best_weight) {
+ best_weight = pos_weight;
+ best_irq = pos_irq;
+ }
+ }
+
+ /*
+ * If this is an ISA IRQ, try using the SCI if it is also an ISA
+ * interrupt as a fallback.
+ */
+ if (link->l_isa_irq) {
+ pos_irq = AcpiGbl_FADT.SciInterrupt;
+ pos_weight = pci_link_interrupt_weights[pos_irq];
+ if (pos_weight < best_weight) {
+ best_weight = pos_weight;
+ best_irq = pos_irq;
+ }
+ }
+
+ if (PCI_INTERRUPT_VALID(best_irq)) {
+ if (bootverbose)
+ device_printf(dev, "Picked IRQ %u with weight %d\n",
+ best_irq, best_weight);
+ } else
+ device_printf(dev, "Unable to choose an IRQ\n");
+ return (best_irq);
+}
+
+int
+acpi_pci_link_route_interrupt(device_t dev, int index)
+{
+ struct link *link;
+
+ if (acpi_disabled("pci_link"))
+ return (PCI_INVALID_IRQ);
+
+ ACPI_SERIAL_BEGIN(pci_link);
+ link = acpi_pci_link_lookup(dev, index);
+ if (link == NULL)
+ panic("%s: apparently invalid index %d", __func__, index);
+
+ /*
+ * If this link device is already routed to an interrupt, just return
+ * the interrupt it is routed to.
+ */
+ if (link->l_routed) {
+ KASSERT(PCI_INTERRUPT_VALID(link->l_irq),
+ ("%s: link is routed but has an invalid IRQ", __func__));
+ ACPI_SERIAL_END(pci_link);
+ return (link->l_irq);
+ }
+
+ /* Choose an IRQ if we need one. */
+ if (!PCI_INTERRUPT_VALID(link->l_irq)) {
+ link->l_irq = acpi_pci_link_choose_irq(dev, link);
+
+ /*
+ * Try to route the interrupt we picked. If it fails, then
+ * assume the interrupt is not routed.
+ */
+ if (PCI_INTERRUPT_VALID(link->l_irq)) {
+ acpi_pci_link_route_irqs(dev);
+ if (!link->l_routed)
+ link->l_irq = PCI_INVALID_IRQ;
+ }
+ }
+ ACPI_SERIAL_END(pci_link);
+
+ return (link->l_irq);
+}
+
+/*
+ * This is gross, but we abuse the identify routine to perform one-time
+ * SYSINIT() style initialization for the driver.
+ */
+static void
+acpi_pci_link_identify(driver_t *driver, device_t parent)
+{
+
+ /*
+ * If the SCI is an ISA IRQ, add it to the bitmask of known good
+ * ISA IRQs.
+ *
+ * XXX: If we are using the APIC, the SCI might have been
+ * rerouted to an APIC pin in which case this is invalid. However,
+ * if we are using the APIC, we also shouldn't be having any PCI
+ * interrupts routed via ISA IRQs, so this is probably ok.
+ */
+ if (AcpiGbl_FADT.SciInterrupt < NUM_ISA_INTERRUPTS)
+ pci_link_bios_isa_irqs |= (1 << AcpiGbl_FADT.SciInterrupt);
+}
+
+static device_method_t acpi_pci_link_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_identify, acpi_pci_link_identify),
+ DEVMETHOD(device_probe, acpi_pci_link_probe),
+ DEVMETHOD(device_attach, acpi_pci_link_attach),
+ DEVMETHOD(device_resume, acpi_pci_link_resume),
+
+ {0, 0}
+};
+
+static driver_t acpi_pci_link_driver = {
+ "pci_link",
+ acpi_pci_link_methods,
+ sizeof(struct acpi_pci_link_softc),
+};
+
+static devclass_t pci_link_devclass;
+
+DRIVER_MODULE(acpi_pci_link, acpi, acpi_pci_link_driver, pci_link_devclass, 0,
+ 0);
+MODULE_DEPEND(acpi_pci_link, acpi, 1, 1, 1);
diff --git a/sys/dev/acpica/acpi_pcib.c b/sys/dev/acpica/acpi_pcib.c
new file mode 100644
index 0000000..1b26b4f
--- /dev/null
+++ b/sys/dev/acpica/acpi_pcib.c
@@ -0,0 +1,288 @@
+/*-
+ * Copyright (c) 2000 Michael Smith
+ * Copyright (c) 2000 BSDi
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "opt_acpi.h"
+#include <sys/param.h>
+#include <sys/bus.h>
+#include <sys/malloc.h>
+#include <sys/kernel.h>
+
+#include <contrib/dev/acpica/include/acpi.h>
+#include <contrib/dev/acpica/include/accommon.h>
+
+#include <dev/acpica/acpivar.h>
+#include <dev/acpica/acpi_pcibvar.h>
+
+#include <dev/pci/pcivar.h>
+#include "pcib_if.h"
+
+/* Hooks for the ACPI CA debugging infrastructure. */
+#define _COMPONENT ACPI_BUS
+ACPI_MODULE_NAME("PCI")
+
+ACPI_SERIAL_DECL(pcib, "ACPI PCI bus methods");
+
+/*
+ * For locking, we assume the caller is not concurrent since this is
+ * triggered by newbus methods.
+ */
+
+struct prt_lookup_request {
+ ACPI_PCI_ROUTING_TABLE *pr_entry;
+ u_int pr_pin;
+ u_int pr_slot;
+};
+
+typedef void prt_entry_handler(ACPI_PCI_ROUTING_TABLE *entry, void *arg);
+
+static void prt_attach_devices(ACPI_PCI_ROUTING_TABLE *entry, void *arg);
+static void prt_lookup_device(ACPI_PCI_ROUTING_TABLE *entry, void *arg);
+static void prt_walk_table(ACPI_BUFFER *prt, prt_entry_handler *handler,
+ void *arg);
+
+static void
+prt_walk_table(ACPI_BUFFER *prt, prt_entry_handler *handler, void *arg)
+{
+ ACPI_PCI_ROUTING_TABLE *entry;
+ char *prtptr;
+
+ /* First check to see if there is a table to walk. */
+ if (prt == NULL || prt->Pointer == NULL)
+ return;
+
+ /* Walk the table executing the handler function for each entry. */
+ prtptr = prt->Pointer;
+ entry = (ACPI_PCI_ROUTING_TABLE *)prtptr;
+ while (entry->Length != 0) {
+ handler(entry, arg);
+ prtptr += entry->Length;
+ entry = (ACPI_PCI_ROUTING_TABLE *)prtptr;
+ }
+}
+
+static void
+prt_attach_devices(ACPI_PCI_ROUTING_TABLE *entry, void *arg)
+{
+ ACPI_HANDLE handle;
+ device_t child, pcib;
+ int error;
+
+ /* We only care about entries that reference a link device. */
+ if (entry->Source == NULL || entry->Source[0] == '\0')
+ return;
+
+ /*
+ * In practice, we only see SourceIndex's of 0 out in the wild.
+ * When indices != 0 have been found, they've been bugs in the ASL.
+ */
+ if (entry->SourceIndex != 0)
+ return;
+
+ /* Lookup the associated handle and device. */
+ pcib = (device_t)arg;
+ if (ACPI_FAILURE(AcpiGetHandle(ACPI_ROOT_OBJECT, entry->Source, &handle)))
+ return;
+ child = acpi_get_device(handle);
+ if (child == NULL)
+ return;
+
+ /* If the device hasn't been probed yet, force it to do so. */
+ error = device_probe_and_attach(child);
+ if (error != 0) {
+ device_printf(pcib, "failed to force attach of %s\n",
+ acpi_name(handle));
+ return;
+ }
+
+ /* Add a reference for a specific bus/device/pin tuple. */
+ acpi_pci_link_add_reference(child, entry->SourceIndex, pcib,
+ ACPI_ADR_PCI_SLOT(entry->Address), entry->Pin);
+}
+
+int
+acpi_pcib_attach(device_t dev, ACPI_BUFFER *prt, int busno)
+{
+ ACPI_STATUS status;
+
+ ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
+
+ /*
+ * Don't attach if we're not really there.
+ *
+ * XXX: This isn't entirely correct since we may be a PCI bus
+ * on a hot-plug docking station, etc.
+ */
+ if (!acpi_DeviceIsPresent(dev))
+ return_VALUE(ENXIO);
+
+ /*
+ * Get the PCI interrupt routing table for this bus. If we can't
+ * get it, this is not an error but may reduce functionality. There
+ * are several valid bridges in the field that do not have a _PRT, so
+ * only warn about missing tables if bootverbose is set.
+ */
+ prt->Length = ACPI_ALLOCATE_BUFFER;
+ status = AcpiGetIrqRoutingTable(acpi_get_handle(dev), prt);
+ if (ACPI_FAILURE(status) && (bootverbose || status != AE_NOT_FOUND))
+ device_printf(dev,
+ "could not get PCI interrupt routing table for %s - %s\n",
+ acpi_name(acpi_get_handle(dev)), AcpiFormatException(status));
+
+ /*
+ * Attach the PCI bus proper.
+ */
+ if (device_add_child(dev, "pci", busno) == NULL) {
+ device_printf(device_get_parent(dev), "couldn't attach pci bus\n");
+ return_VALUE(ENXIO);
+ }
+
+ /*
+ * Now go scan the bus.
+ */
+ prt_walk_table(prt, prt_attach_devices, dev);
+
+ return_VALUE (bus_generic_attach(dev));
+}
+
+static void
+prt_lookup_device(ACPI_PCI_ROUTING_TABLE *entry, void *arg)
+{
+ struct prt_lookup_request *pr;
+
+ pr = (struct prt_lookup_request *)arg;
+ if (pr->pr_entry != NULL)
+ return;
+
+ /*
+ * Compare the slot number (high word of Address) and pin number
+ * (note that ACPI uses 0 for INTA) to check for a match.
+ *
+ * Note that the low word of the Address field (function number)
+ * is required by the specification to be 0xffff. We don't risk
+ * checking it here.
+ */
+ if (ACPI_ADR_PCI_SLOT(entry->Address) == pr->pr_slot &&
+ entry->Pin == pr->pr_pin)
+ pr->pr_entry = entry;
+}
+
+/*
+ * Route an interrupt for a child of the bridge.
+ */
+int
+acpi_pcib_route_interrupt(device_t pcib, device_t dev, int pin,
+ ACPI_BUFFER *prtbuf)
+{
+ ACPI_PCI_ROUTING_TABLE *prt;
+ struct prt_lookup_request pr;
+ ACPI_HANDLE lnkdev;
+ int interrupt;
+
+ ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
+
+ interrupt = PCI_INVALID_IRQ;
+
+ /* ACPI numbers pins 0-3, not 1-4 like the BIOS. */
+ pin--;
+
+ ACPI_SERIAL_BEGIN(pcib);
+
+ /* Search for a matching entry in the routing table. */
+ pr.pr_entry = NULL;
+ pr.pr_pin = pin;
+ pr.pr_slot = pci_get_slot(dev);
+ prt_walk_table(prtbuf, prt_lookup_device, &pr);
+ if (pr.pr_entry == NULL) {
+ device_printf(pcib, "no PRT entry for %d.%d.INT%c\n", pci_get_bus(dev),
+ pci_get_slot(dev), 'A' + pin);
+ goto out;
+ }
+ prt = pr.pr_entry;
+
+ if (bootverbose) {
+ device_printf(pcib, "matched entry for %d.%d.INT%c",
+ pci_get_bus(dev), pci_get_slot(dev), 'A' + pin);
+ if (prt->Source != NULL && prt->Source[0] != '\0')
+ printf(" (src %s:%u)", prt->Source, prt->SourceIndex);
+ printf("\n");
+ }
+
+ /*
+ * If source is empty/NULL, the source index is a global IRQ number
+ * and it's hard-wired so we're done.
+ *
+ * XXX: If the source index is non-zero, ignore the source device and
+ * assume that this is a hard-wired entry.
+ */
+ if (prt->Source == NULL || prt->Source[0] == '\0' ||
+ prt->SourceIndex != 0) {
+ if (bootverbose)
+ device_printf(pcib, "slot %d INT%c hardwired to IRQ %d\n",
+ pci_get_slot(dev), 'A' + pin, prt->SourceIndex);
+ if (prt->SourceIndex) {
+ interrupt = prt->SourceIndex;
+ BUS_CONFIG_INTR(dev, interrupt, INTR_TRIGGER_LEVEL,
+ INTR_POLARITY_LOW);
+ } else
+ device_printf(pcib, "error: invalid hard-wired IRQ of 0\n");
+ goto out;
+ }
+
+ /*
+ * We have to find the source device (PCI interrupt link device).
+ */
+ if (ACPI_FAILURE(AcpiGetHandle(ACPI_ROOT_OBJECT, prt->Source, &lnkdev))) {
+ device_printf(pcib, "couldn't find PCI interrupt link device %s\n",
+ prt->Source);
+ goto out;
+ }
+ interrupt = acpi_pci_link_route_interrupt(acpi_get_device(lnkdev),
+ prt->SourceIndex);
+
+ if (bootverbose && PCI_INTERRUPT_VALID(interrupt))
+ device_printf(pcib, "slot %d INT%c routed to irq %d via %s\n",
+ pci_get_slot(dev), 'A' + pin, interrupt, acpi_name(lnkdev));
+
+out:
+ ACPI_SERIAL_END(pcib);
+
+ return_VALUE (interrupt);
+}
+
+int
+acpi_pcib_power_for_sleep(device_t pcib, device_t dev, int *pstate)
+{
+ device_t acpi_dev;
+
+ acpi_dev = devclass_get_device(devclass_find("acpi"), 0);
+ acpi_device_pwr_for_sleep(acpi_dev, dev, pstate);
+ return (0);
+}
+
diff --git a/sys/dev/acpica/acpi_pcib_acpi.c b/sys/dev/acpica/acpi_pcib_acpi.c
new file mode 100644
index 0000000..8e2cfea
--- /dev/null
+++ b/sys/dev/acpica/acpi_pcib_acpi.c
@@ -0,0 +1,562 @@
+/*-
+ * Copyright (c) 2000 Michael Smith
+ * Copyright (c) 2000 BSDi
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "opt_acpi.h"
+#include <sys/param.h>
+#include <sys/bus.h>
+#include <sys/kernel.h>
+#include <sys/limits.h>
+#include <sys/malloc.h>
+#include <sys/module.h>
+#include <sys/rman.h>
+#include <sys/sysctl.h>
+
+#include <contrib/dev/acpica/include/acpi.h>
+#include <contrib/dev/acpica/include/accommon.h>
+
+#include <dev/acpica/acpivar.h>
+
+#include <machine/pci_cfgreg.h>
+#include <dev/pci/pcivar.h>
+#include <dev/pci/pcib_private.h>
+#include "pcib_if.h"
+
+#include <dev/acpica/acpi_pcibvar.h>
+
+/* Hooks for the ACPI CA debugging infrastructure. */
+#define _COMPONENT ACPI_BUS
+ACPI_MODULE_NAME("PCI_ACPI")
+
+struct acpi_hpcib_softc {
+ device_t ap_dev;
+ ACPI_HANDLE ap_handle;
+ int ap_flags;
+
+ int ap_segment; /* PCI domain */
+ int ap_bus; /* bios-assigned bus number */
+ int ap_addr; /* device/func of PCI-Host bridge */
+
+ ACPI_BUFFER ap_prt; /* interrupt routing table */
+#ifdef NEW_PCIB
+ struct pcib_host_resources ap_host_res;
+#endif
+};
+
+static int acpi_pcib_acpi_probe(device_t bus);
+static int acpi_pcib_acpi_attach(device_t bus);
+static int acpi_pcib_read_ivar(device_t dev, device_t child,
+ int which, uintptr_t *result);
+static int acpi_pcib_write_ivar(device_t dev, device_t child,
+ int which, uintptr_t value);
+static uint32_t acpi_pcib_read_config(device_t dev, u_int bus,
+ u_int slot, u_int func, u_int reg, int bytes);
+static void acpi_pcib_write_config(device_t dev, u_int bus,
+ u_int slot, u_int func, u_int reg, uint32_t data,
+ int bytes);
+static int acpi_pcib_acpi_route_interrupt(device_t pcib,
+ device_t dev, int pin);
+static int acpi_pcib_alloc_msi(device_t pcib, device_t dev,
+ int count, int maxcount, int *irqs);
+static int acpi_pcib_map_msi(device_t pcib, device_t dev,
+ int irq, uint64_t *addr, uint32_t *data);
+static int acpi_pcib_alloc_msix(device_t pcib, device_t dev,
+ int *irq);
+static struct resource *acpi_pcib_acpi_alloc_resource(device_t dev,
+ device_t child, int type, int *rid,
+ u_long start, u_long end, u_long count,
+ u_int flags);
+#ifdef NEW_PCIB
+static int acpi_pcib_acpi_adjust_resource(device_t dev,
+ device_t child, int type, struct resource *r,
+ u_long start, u_long end);
+#endif
+
+static device_method_t acpi_pcib_acpi_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, acpi_pcib_acpi_probe),
+ DEVMETHOD(device_attach, acpi_pcib_acpi_attach),
+ DEVMETHOD(device_shutdown, bus_generic_shutdown),
+ DEVMETHOD(device_suspend, bus_generic_suspend),
+ DEVMETHOD(device_resume, bus_generic_resume),
+
+ /* Bus interface */
+ DEVMETHOD(bus_read_ivar, acpi_pcib_read_ivar),
+ DEVMETHOD(bus_write_ivar, acpi_pcib_write_ivar),
+ DEVMETHOD(bus_alloc_resource, acpi_pcib_acpi_alloc_resource),
+#ifdef NEW_PCIB
+ DEVMETHOD(bus_adjust_resource, acpi_pcib_acpi_adjust_resource),
+#else
+ DEVMETHOD(bus_adjust_resource, bus_generic_adjust_resource),
+#endif
+ DEVMETHOD(bus_release_resource, bus_generic_release_resource),
+ DEVMETHOD(bus_activate_resource, bus_generic_activate_resource),
+ DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource),
+ DEVMETHOD(bus_setup_intr, bus_generic_setup_intr),
+ DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr),
+
+ /* pcib interface */
+ DEVMETHOD(pcib_maxslots, pcib_maxslots),
+ DEVMETHOD(pcib_read_config, acpi_pcib_read_config),
+ DEVMETHOD(pcib_write_config, acpi_pcib_write_config),
+ DEVMETHOD(pcib_route_interrupt, acpi_pcib_acpi_route_interrupt),
+ DEVMETHOD(pcib_alloc_msi, acpi_pcib_alloc_msi),
+ DEVMETHOD(pcib_release_msi, pcib_release_msi),
+ DEVMETHOD(pcib_alloc_msix, acpi_pcib_alloc_msix),
+ DEVMETHOD(pcib_release_msix, pcib_release_msix),
+ DEVMETHOD(pcib_map_msi, acpi_pcib_map_msi),
+ DEVMETHOD(pcib_power_for_sleep, acpi_pcib_power_for_sleep),
+
+ DEVMETHOD_END
+};
+
+static devclass_t pcib_devclass;
+
+DEFINE_CLASS_0(pcib, acpi_pcib_acpi_driver, acpi_pcib_acpi_methods,
+ sizeof(struct acpi_hpcib_softc));
+DRIVER_MODULE(acpi_pcib, acpi, acpi_pcib_acpi_driver, pcib_devclass, 0, 0);
+MODULE_DEPEND(acpi_pcib, acpi, 1, 1, 1);
+
+static int
+acpi_pcib_acpi_probe(device_t dev)
+{
+ ACPI_DEVICE_INFO *devinfo;
+ ACPI_HANDLE h;
+ int root;
+
+ if (acpi_disabled("pcib") || (h = acpi_get_handle(dev)) == NULL ||
+ ACPI_FAILURE(AcpiGetObjectInfo(h, &devinfo)))
+ return (ENXIO);
+ root = (devinfo->Flags & ACPI_PCI_ROOT_BRIDGE) != 0;
+ AcpiOsFree(devinfo);
+ if (!root || pci_cfgregopen() == 0)
+ return (ENXIO);
+
+ device_set_desc(dev, "ACPI Host-PCI bridge");
+ return (0);
+}
+
+#ifdef NEW_PCIB
+static ACPI_STATUS
+acpi_pcib_producer_handler(ACPI_RESOURCE *res, void *context)
+{
+ struct acpi_hpcib_softc *sc;
+ UINT64 length, min, max;
+ u_int flags;
+ int error, type;
+
+ sc = context;
+ switch (res->Type) {
+ case ACPI_RESOURCE_TYPE_START_DEPENDENT:
+ case ACPI_RESOURCE_TYPE_END_DEPENDENT:
+ panic("host bridge has depenedent resources");
+ case ACPI_RESOURCE_TYPE_ADDRESS16:
+ case ACPI_RESOURCE_TYPE_ADDRESS32:
+ case ACPI_RESOURCE_TYPE_ADDRESS64:
+ case ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64:
+ if (res->Data.Address.ProducerConsumer != ACPI_PRODUCER)
+ break;
+ switch (res->Type) {
+ case ACPI_RESOURCE_TYPE_ADDRESS16:
+ min = res->Data.Address16.Minimum;
+ max = res->Data.Address16.Maximum;
+ length = res->Data.Address16.AddressLength;
+ break;
+ case ACPI_RESOURCE_TYPE_ADDRESS32:
+ min = res->Data.Address32.Minimum;
+ max = res->Data.Address32.Maximum;
+ length = res->Data.Address32.AddressLength;
+ break;
+ case ACPI_RESOURCE_TYPE_ADDRESS64:
+ min = res->Data.Address64.Minimum;
+ max = res->Data.Address64.Maximum;
+ length = res->Data.Address64.AddressLength;
+ break;
+ default:
+ KASSERT(res->Type ==
+ ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64,
+ ("should never happen"));
+ min = res->Data.ExtAddress64.Minimum;
+ max = res->Data.ExtAddress64.Maximum;
+ length = res->Data.ExtAddress64.AddressLength;
+ break;
+ }
+ if (length == 0)
+ break;
+ if (min + length - 1 != max &&
+ (res->Data.Address.MinAddressFixed != ACPI_ADDRESS_FIXED ||
+ res->Data.Address.MaxAddressFixed != ACPI_ADDRESS_FIXED))
+ break;
+ flags = 0;
+ switch (res->Data.Address.ResourceType) {
+ case ACPI_MEMORY_RANGE:
+ type = SYS_RES_MEMORY;
+ if (res->Type != ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64) {
+ if (res->Data.Address.Info.Mem.Caching ==
+ ACPI_PREFETCHABLE_MEMORY)
+ flags |= RF_PREFETCHABLE;
+ } else {
+ /*
+ * XXX: Parse prefetch flag out of
+ * TypeSpecific.
+ */
+ }
+ break;
+ case ACPI_IO_RANGE:
+ type = SYS_RES_IOPORT;
+ break;
+#ifdef PCI_RES_BUS
+ case ACPI_BUS_NUMBER_RANGE:
+ type = PCI_RES_BUS;
+ break;
+#endif
+ default:
+ return (AE_OK);
+ }
+
+ if (min + length - 1 != max)
+ device_printf(sc->ap_dev,
+ "Length mismatch for %d range: %jx vs %jx\n", type,
+ (uintmax_t)max - min + 1, (uintmax_t)length);
+#ifdef __i386__
+ if (min > ULONG_MAX) {
+ device_printf(sc->ap_dev,
+ "Ignoring %d range above 4GB (%#jx-%#jx)\n",
+ type, (uintmax_t)min, (uintmax_t)max);
+ break;
+ }
+ if (max > ULONG_MAX) {
+ device_printf(sc->ap_dev,
+ "Truncating end of %d range above 4GB (%#jx-%#jx)\n",
+ type, (uintmax_t)min, (uintmax_t)max);
+ max = ULONG_MAX;
+ }
+#endif
+ error = pcib_host_res_decodes(&sc->ap_host_res, type, min, max,
+ flags);
+ if (error)
+ panic("Failed to manage %d range (%#jx-%#jx): %d",
+ type, (uintmax_t)min, (uintmax_t)max, error);
+ break;
+ default:
+ break;
+ }
+ return (AE_OK);
+}
+#endif
+
+static int
+acpi_pcib_acpi_attach(device_t dev)
+{
+ struct acpi_hpcib_softc *sc;
+ ACPI_STATUS status;
+ static int bus0_seen = 0;
+ u_int slot, func, busok;
+ uint8_t busno;
+
+ ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
+
+ sc = device_get_softc(dev);
+ sc->ap_dev = dev;
+ sc->ap_handle = acpi_get_handle(dev);
+
+ /*
+ * Get our segment number by evaluating _SEG.
+ * It's OK for this to not exist.
+ */
+ status = acpi_GetInteger(sc->ap_handle, "_SEG", &sc->ap_segment);
+ if (ACPI_FAILURE(status)) {
+ if (status != AE_NOT_FOUND) {
+ device_printf(dev, "could not evaluate _SEG - %s\n",
+ AcpiFormatException(status));
+ return_VALUE (ENXIO);
+ }
+ /* If it's not found, assume 0. */
+ sc->ap_segment = 0;
+ }
+
+ /*
+ * Get the address (device and function) of the associated
+ * PCI-Host bridge device from _ADR. Assume we don't have one if
+ * it doesn't exist.
+ */
+ status = acpi_GetInteger(sc->ap_handle, "_ADR", &sc->ap_addr);
+ if (ACPI_FAILURE(status)) {
+ device_printf(dev, "could not evaluate _ADR - %s\n",
+ AcpiFormatException(status));
+ sc->ap_addr = -1;
+ }
+
+#ifdef NEW_PCIB
+ /*
+ * Determine which address ranges this bridge decodes and setup
+ * resource managers for those ranges.
+ */
+ if (pcib_host_res_init(sc->ap_dev, &sc->ap_host_res) != 0)
+ panic("failed to init hostb resources");
+ if (!acpi_disabled("hostres")) {
+ status = AcpiWalkResources(sc->ap_handle, "_CRS",
+ acpi_pcib_producer_handler, sc);
+ if (ACPI_FAILURE(status) && status != AE_NOT_FOUND)
+ device_printf(sc->ap_dev, "failed to parse resources: %s\n",
+ AcpiFormatException(status));
+ }
+#endif
+
+ /*
+ * Get our base bus number by evaluating _BBN.
+ * If this doesn't work, we assume we're bus number 0.
+ *
+ * XXX note that it may also not exist in the case where we are
+ * meant to use a private configuration space mechanism for this bus,
+ * so we should dig out our resources and check to see if we have
+ * anything like that. How do we do this?
+ * XXX If we have the requisite information, and if we don't think the
+ * default PCI configuration space handlers can deal with this bus,
+ * we should attach our own handler.
+ * XXX invoke _REG on this for the PCI config space address space?
+ * XXX It seems many BIOS's with multiple Host-PCI bridges do not set
+ * _BBN correctly. They set _BBN to zero for all bridges. Thus,
+ * if _BBN is zero and PCI bus 0 already exists, we try to read our
+ * bus number from the configuration registers at address _ADR.
+ * We only do this for domain/segment 0 in the hopes that this is
+ * only needed for old single-domain machines.
+ */
+ status = acpi_GetInteger(sc->ap_handle, "_BBN", &sc->ap_bus);
+ if (ACPI_FAILURE(status)) {
+ if (status != AE_NOT_FOUND) {
+ device_printf(dev, "could not evaluate _BBN - %s\n",
+ AcpiFormatException(status));
+ return_VALUE (ENXIO);
+ } else {
+ /* If it's not found, assume 0. */
+ sc->ap_bus = 0;
+ }
+ }
+
+ /*
+ * If this is segment 0, the bus is zero, and PCI bus 0 already
+ * exists, read the bus number via PCI config space.
+ */
+ busok = 1;
+ if (sc->ap_segment == 0 && sc->ap_bus == 0 && bus0_seen) {
+ busok = 0;
+ if (sc->ap_addr != -1) {
+ /* XXX: We assume bus 0. */
+ slot = ACPI_ADR_PCI_SLOT(sc->ap_addr);
+ func = ACPI_ADR_PCI_FUNC(sc->ap_addr);
+ if (bootverbose)
+ device_printf(dev, "reading config registers from 0:%d:%d\n",
+ slot, func);
+ if (host_pcib_get_busno(pci_cfgregread, 0, slot, func, &busno) == 0)
+ device_printf(dev, "couldn't read bus number from cfg space\n");
+ else {
+ sc->ap_bus = busno;
+ busok = 1;
+ }
+ }
+ }
+
+ /*
+ * If nothing else worked, hope that ACPI at least lays out the
+ * host-PCI bridges in order and that as a result our unit number
+ * is actually our bus number. There are several reasons this
+ * might not be true.
+ */
+ if (busok == 0) {
+ sc->ap_bus = device_get_unit(dev);
+ device_printf(dev, "trying bus number %d\n", sc->ap_bus);
+ }
+
+ /* If this is bus 0 on segment 0, note that it has been seen already. */
+ if (sc->ap_segment == 0 && sc->ap_bus == 0)
+ bus0_seen = 1;
+
+ return (acpi_pcib_attach(dev, &sc->ap_prt, sc->ap_bus));
+}
+
+/*
+ * Support for standard PCI bridge ivars.
+ */
+static int
+acpi_pcib_read_ivar(device_t dev, device_t child, int which, uintptr_t *result)
+{
+ struct acpi_hpcib_softc *sc = device_get_softc(dev);
+
+ switch (which) {
+ case PCIB_IVAR_DOMAIN:
+ *result = sc->ap_segment;
+ return (0);
+ case PCIB_IVAR_BUS:
+ *result = sc->ap_bus;
+ return (0);
+ case ACPI_IVAR_HANDLE:
+ *result = (uintptr_t)sc->ap_handle;
+ return (0);
+ case ACPI_IVAR_FLAGS:
+ *result = (uintptr_t)sc->ap_flags;
+ return (0);
+ }
+ return (ENOENT);
+}
+
+static int
+acpi_pcib_write_ivar(device_t dev, device_t child, int which, uintptr_t value)
+{
+ struct acpi_hpcib_softc *sc = device_get_softc(dev);
+
+ switch (which) {
+ case PCIB_IVAR_DOMAIN:
+ return (EINVAL);
+ case PCIB_IVAR_BUS:
+ sc->ap_bus = value;
+ return (0);
+ case ACPI_IVAR_HANDLE:
+ sc->ap_handle = (ACPI_HANDLE)value;
+ return (0);
+ case ACPI_IVAR_FLAGS:
+ sc->ap_flags = (int)value;
+ return (0);
+ }
+ return (ENOENT);
+}
+
+static uint32_t
+acpi_pcib_read_config(device_t dev, u_int bus, u_int slot, u_int func,
+ u_int reg, int bytes)
+{
+ return (pci_cfgregread(bus, slot, func, reg, bytes));
+}
+
+static void
+acpi_pcib_write_config(device_t dev, u_int bus, u_int slot, u_int func,
+ u_int reg, uint32_t data, int bytes)
+{
+ pci_cfgregwrite(bus, slot, func, reg, data, bytes);
+}
+
+static int
+acpi_pcib_acpi_route_interrupt(device_t pcib, device_t dev, int pin)
+{
+ struct acpi_hpcib_softc *sc = device_get_softc(pcib);
+
+ return (acpi_pcib_route_interrupt(pcib, dev, pin, &sc->ap_prt));
+}
+
+static int
+acpi_pcib_alloc_msi(device_t pcib, device_t dev, int count, int maxcount,
+ int *irqs)
+{
+ device_t bus;
+
+ bus = device_get_parent(pcib);
+ return (PCIB_ALLOC_MSI(device_get_parent(bus), dev, count, maxcount,
+ irqs));
+}
+
+static int
+acpi_pcib_alloc_msix(device_t pcib, device_t dev, int *irq)
+{
+ device_t bus;
+
+ bus = device_get_parent(pcib);
+ return (PCIB_ALLOC_MSIX(device_get_parent(bus), dev, irq));
+}
+
+static int
+acpi_pcib_map_msi(device_t pcib, device_t dev, int irq, uint64_t *addr,
+ uint32_t *data)
+{
+ struct acpi_hpcib_softc *sc;
+ device_t bus, hostb;
+ int error;
+
+ bus = device_get_parent(pcib);
+ error = PCIB_MAP_MSI(device_get_parent(bus), dev, irq, addr, data);
+ if (error)
+ return (error);
+
+ sc = device_get_softc(dev);
+ if (sc->ap_addr == -1)
+ return (0);
+ /* XXX: Assumes all bridges are on bus 0. */
+ hostb = pci_find_dbsf(sc->ap_segment, 0, ACPI_ADR_PCI_SLOT(sc->ap_addr),
+ ACPI_ADR_PCI_FUNC(sc->ap_addr));
+ if (hostb != NULL)
+ pci_ht_map_msi(hostb, *addr);
+ return (0);
+}
+
+struct resource *
+acpi_pcib_acpi_alloc_resource(device_t dev, device_t child, int type, int *rid,
+ u_long start, u_long end, u_long count, u_int flags)
+{
+#ifdef NEW_PCIB
+ struct acpi_hpcib_softc *sc;
+ struct resource *res;
+#endif
+
+#if defined(__i386__) || defined(__amd64__)
+ start = hostb_alloc_start(type, start, end, count);
+#endif
+
+#ifdef NEW_PCIB
+ sc = device_get_softc(dev);
+ res = pcib_host_res_alloc(&sc->ap_host_res, child, type, rid, start, end,
+ count, flags);
+
+ /*
+ * XXX: If this is a request for a specific range, assume it is
+ * correct and pass it up to the parent. What we probably want to
+ * do long-term is explicitly trust any firmware-configured
+ * resources during the initial bus scan on boot and then disable
+ * this after that.
+ */
+ if (res == NULL && start + count - 1 == end)
+ res = bus_generic_alloc_resource(dev, child, type, rid, start, end,
+ count, flags);
+ return (res);
+#else
+ return (bus_generic_alloc_resource(dev, child, type, rid, start, end,
+ count, flags));
+#endif
+}
+
+#ifdef NEW_PCIB
+int
+acpi_pcib_acpi_adjust_resource(device_t dev, device_t child, int type,
+ struct resource *r, u_long start, u_long end)
+{
+ struct acpi_hpcib_softc *sc;
+
+ sc = device_get_softc(dev);
+ return (pcib_host_res_adjust(&sc->ap_host_res, child, type, r, start,
+ end));
+}
+#endif
diff --git a/sys/dev/acpica/acpi_pcib_pci.c b/sys/dev/acpica/acpi_pcib_pci.c
new file mode 100644
index 0000000..7dbccd6
--- /dev/null
+++ b/sys/dev/acpica/acpi_pcib_pci.c
@@ -0,0 +1,154 @@
+/*-
+ * Copyright (c) 2000 Michael Smith
+ * Copyright (c) 2000 BSDi
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "opt_acpi.h"
+
+#include <sys/param.h>
+#include <sys/bus.h>
+#include <sys/kernel.h>
+#include <sys/malloc.h>
+#include <sys/module.h>
+#include <sys/rman.h>
+
+#include <contrib/dev/acpica/include/acpi.h>
+#include <contrib/dev/acpica/include/accommon.h>
+
+#include <dev/acpica/acpivar.h>
+#include <dev/acpica/acpi_pcibvar.h>
+
+#include <machine/pci_cfgreg.h>
+#include <dev/pci/pcivar.h>
+#include <dev/pci/pcireg.h>
+#include <dev/pci/pcib_private.h>
+#include "pcib_if.h"
+
+/* Hooks for the ACPI CA debugging infrastructure. */
+#define _COMPONENT ACPI_BUS
+ACPI_MODULE_NAME("PCI_PCI")
+
+struct acpi_pcib_softc {
+ struct pcib_softc ap_pcibsc;
+ ACPI_HANDLE ap_handle;
+ ACPI_BUFFER ap_prt; /* interrupt routing table */
+};
+
+struct acpi_pcib_lookup_info {
+ UINT32 address;
+ ACPI_HANDLE handle;
+};
+
+static int acpi_pcib_pci_probe(device_t bus);
+static int acpi_pcib_pci_attach(device_t bus);
+static int acpi_pcib_read_ivar(device_t dev, device_t child,
+ int which, uintptr_t *result);
+static int acpi_pcib_pci_route_interrupt(device_t pcib,
+ device_t dev, int pin);
+
+static device_method_t acpi_pcib_pci_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, acpi_pcib_pci_probe),
+ DEVMETHOD(device_attach, acpi_pcib_pci_attach),
+
+ /* Bus interface */
+ DEVMETHOD(bus_read_ivar, acpi_pcib_read_ivar),
+
+ /* pcib interface */
+ DEVMETHOD(pcib_route_interrupt, acpi_pcib_pci_route_interrupt),
+ DEVMETHOD(pcib_power_for_sleep, acpi_pcib_power_for_sleep),
+
+ {0, 0}
+};
+
+static devclass_t pcib_devclass;
+
+DEFINE_CLASS_1(pcib, acpi_pcib_pci_driver, acpi_pcib_pci_methods,
+ sizeof(struct acpi_pcib_softc), pcib_driver);
+DRIVER_MODULE(acpi_pcib, pci, acpi_pcib_pci_driver, pcib_devclass, 0, 0);
+MODULE_DEPEND(acpi_pcib, acpi, 1, 1, 1);
+
+static int
+acpi_pcib_pci_probe(device_t dev)
+{
+
+ if (pci_get_class(dev) != PCIC_BRIDGE ||
+ pci_get_subclass(dev) != PCIS_BRIDGE_PCI ||
+ acpi_disabled("pci"))
+ return (ENXIO);
+ if (acpi_get_handle(dev) == NULL)
+ return (ENXIO);
+ if (pci_cfgregopen() == 0)
+ return (ENXIO);
+
+ device_set_desc(dev, "ACPI PCI-PCI bridge");
+ return (-1000);
+}
+
+static int
+acpi_pcib_pci_attach(device_t dev)
+{
+ struct acpi_pcib_softc *sc;
+
+ ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
+
+ pcib_attach_common(dev);
+ sc = device_get_softc(dev);
+ sc->ap_handle = acpi_get_handle(dev);
+ return (acpi_pcib_attach(dev, &sc->ap_prt, sc->ap_pcibsc.secbus));
+}
+
+static int
+acpi_pcib_read_ivar(device_t dev, device_t child, int which, uintptr_t *result)
+{
+ struct acpi_pcib_softc *sc = device_get_softc(dev);
+
+ switch (which) {
+ case ACPI_IVAR_HANDLE:
+ *result = (uintptr_t)sc->ap_handle;
+ return (0);
+ }
+ return (pcib_read_ivar(dev, child, which, result));
+}
+
+static int
+acpi_pcib_pci_route_interrupt(device_t pcib, device_t dev, int pin)
+{
+ struct acpi_pcib_softc *sc;
+
+ sc = device_get_softc(pcib);
+
+ /*
+ * If we don't have a _PRT, fall back to the swizzle method
+ * for routing interrupts.
+ */
+ if (sc->ap_prt.Pointer == NULL)
+ return (pcib_route_interrupt(pcib, dev, pin));
+ else
+ return (acpi_pcib_route_interrupt(pcib, dev, pin, &sc->ap_prt));
+}
diff --git a/sys/dev/acpica/acpi_pcibvar.h b/sys/dev/acpica/acpi_pcibvar.h
new file mode 100644
index 0000000..9a4be07
--- /dev/null
+++ b/sys/dev/acpica/acpi_pcibvar.h
@@ -0,0 +1,46 @@
+/*-
+ * Copyright (c) 2000 Michael Smith
+ * Copyright (c) 2000 BSDi
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _ACPI_PCIBVAR_H_
+#define _ACPI_PCIBVAR_H_
+
+#ifdef _KERNEL
+
+void acpi_pci_link_add_reference(device_t dev, int index, device_t pcib,
+ int slot, int pin);
+int acpi_pci_link_route_interrupt(device_t dev, int index);
+int acpi_pcib_attach(device_t bus, ACPI_BUFFER *prt, int busno);
+int acpi_pcib_route_interrupt(device_t pcib, device_t dev, int pin,
+ ACPI_BUFFER *prtbuf);
+int acpi_pcib_power_for_sleep(device_t pcib, device_t dev,
+ int *pstate);
+
+#endif /* _KERNEL */
+
+#endif /* !_ACPI_PCIBVAR_H_ */
diff --git a/sys/dev/acpica/acpi_perf.c b/sys/dev/acpica/acpi_perf.c
new file mode 100644
index 0000000..3f047cc
--- /dev/null
+++ b/sys/dev/acpica/acpi_perf.c
@@ -0,0 +1,596 @@
+/*-
+ * Copyright (c) 2003-2005 Nate Lawson (SDG)
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "opt_acpi.h"
+#include <sys/param.h>
+#include <sys/kernel.h>
+#include <sys/proc.h>
+#include <sys/sched.h>
+#include <sys/bus.h>
+#include <sys/cpu.h>
+#include <sys/power.h>
+#include <sys/malloc.h>
+#include <sys/module.h>
+#include <sys/sbuf.h>
+#include <sys/pcpu.h>
+
+#include <machine/bus.h>
+#include <machine/resource.h>
+#include <sys/rman.h>
+
+#include <contrib/dev/acpica/include/acpi.h>
+
+#include <dev/acpica/acpivar.h>
+
+#include "cpufreq_if.h"
+
+/*
+ * Support for ACPI processor performance states (Px) according to
+ * section 8.3.3 of the ACPI 2.0c specification.
+ */
+
+struct acpi_px {
+ uint32_t core_freq;
+ uint32_t power;
+ uint32_t trans_lat;
+ uint32_t bm_lat;
+ uint32_t ctrl_val;
+ uint32_t sts_val;
+};
+
+/* Offsets in struct cf_setting array for storing driver-specific values. */
+#define PX_SPEC_CONTROL 0
+#define PX_SPEC_STATUS 1
+
+#define MAX_PX_STATES 16
+
+struct acpi_perf_softc {
+ device_t dev;
+ ACPI_HANDLE handle;
+ struct resource *perf_ctrl; /* Set new performance state. */
+ int perf_ctrl_type; /* Resource type for perf_ctrl. */
+ struct resource *perf_status; /* Check that transition succeeded. */
+ int perf_sts_type; /* Resource type for perf_status. */
+ struct acpi_px *px_states; /* ACPI perf states. */
+ uint32_t px_count; /* Total number of perf states. */
+ uint32_t px_max_avail; /* Lowest index state available. */
+ int px_curr_state; /* Active state index. */
+ int px_rid;
+ int info_only; /* Can we set new states? */
+};
+
+#define PX_GET_REG(reg) \
+ (bus_space_read_4(rman_get_bustag((reg)), \
+ rman_get_bushandle((reg)), 0))
+#define PX_SET_REG(reg, val) \
+ (bus_space_write_4(rman_get_bustag((reg)), \
+ rman_get_bushandle((reg)), 0, (val)))
+
+#define ACPI_NOTIFY_PERF_STATES 0x80 /* _PSS changed. */
+
+static void acpi_perf_identify(driver_t *driver, device_t parent);
+static int acpi_perf_probe(device_t dev);
+static int acpi_perf_attach(device_t dev);
+static int acpi_perf_detach(device_t dev);
+static int acpi_perf_evaluate(device_t dev);
+static int acpi_px_to_set(device_t dev, struct acpi_px *px,
+ struct cf_setting *set);
+static void acpi_px_available(struct acpi_perf_softc *sc);
+static void acpi_px_startup(void *arg);
+static void acpi_px_notify(ACPI_HANDLE h, UINT32 notify, void *context);
+static int acpi_px_settings(device_t dev, struct cf_setting *sets,
+ int *count);
+static int acpi_px_set(device_t dev, const struct cf_setting *set);
+static int acpi_px_get(device_t dev, struct cf_setting *set);
+static int acpi_px_type(device_t dev, int *type);
+
+static device_method_t acpi_perf_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_identify, acpi_perf_identify),
+ DEVMETHOD(device_probe, acpi_perf_probe),
+ DEVMETHOD(device_attach, acpi_perf_attach),
+ DEVMETHOD(device_detach, acpi_perf_detach),
+
+ /* cpufreq interface */
+ DEVMETHOD(cpufreq_drv_set, acpi_px_set),
+ DEVMETHOD(cpufreq_drv_get, acpi_px_get),
+ DEVMETHOD(cpufreq_drv_type, acpi_px_type),
+ DEVMETHOD(cpufreq_drv_settings, acpi_px_settings),
+ {0, 0}
+};
+
+static driver_t acpi_perf_driver = {
+ "acpi_perf",
+ acpi_perf_methods,
+ sizeof(struct acpi_perf_softc),
+};
+
+static devclass_t acpi_perf_devclass;
+DRIVER_MODULE(acpi_perf, cpu, acpi_perf_driver, acpi_perf_devclass, 0, 0);
+MODULE_DEPEND(acpi_perf, acpi, 1, 1, 1);
+
+static MALLOC_DEFINE(M_ACPIPERF, "acpi_perf", "ACPI Performance states");
+
+static void
+acpi_perf_identify(driver_t *driver, device_t parent)
+{
+ ACPI_HANDLE handle;
+ device_t dev;
+
+ /* Make sure we're not being doubly invoked. */
+ if (device_find_child(parent, "acpi_perf", -1) != NULL)
+ return;
+
+ /* Get the handle for the Processor object and check for perf states. */
+ handle = acpi_get_handle(parent);
+ if (handle == NULL)
+ return;
+ if (ACPI_FAILURE(AcpiEvaluateObject(handle, "_PSS", NULL, NULL)))
+ return;
+
+ /*
+ * Add a child to every CPU that has the right methods. In future
+ * versions of the ACPI spec, CPUs can have different settings.
+ * We probe this child now so that other devices that depend
+ * on it (i.e., for info about supported states) will see it.
+ */
+ if ((dev = BUS_ADD_CHILD(parent, 0, "acpi_perf", -1)) != NULL)
+ device_probe_and_attach(dev);
+ else
+ device_printf(parent, "add acpi_perf child failed\n");
+}
+
+static int
+acpi_perf_probe(device_t dev)
+{
+ ACPI_HANDLE handle;
+ ACPI_OBJECT *pkg;
+ struct resource *res;
+ ACPI_BUFFER buf;
+ int error, rid, type;
+
+ if (resource_disabled("acpi_perf", 0))
+ return (ENXIO);
+
+ /*
+ * Check the performance state registers. If they are of type
+ * "functional fixed hardware", we attach quietly since we will
+ * only be providing information on settings to other drivers.
+ */
+ error = ENXIO;
+ handle = acpi_get_handle(dev);
+ buf.Pointer = NULL;
+ buf.Length = ACPI_ALLOCATE_BUFFER;
+ if (ACPI_FAILURE(AcpiEvaluateObject(handle, "_PCT", NULL, &buf)))
+ return (error);
+ pkg = (ACPI_OBJECT *)buf.Pointer;
+ if (ACPI_PKG_VALID(pkg, 2)) {
+ rid = 0;
+ error = acpi_PkgGas(dev, pkg, 0, &type, &rid, &res, 0);
+ switch (error) {
+ case 0:
+ bus_release_resource(dev, type, rid, res);
+ bus_delete_resource(dev, type, rid);
+ device_set_desc(dev, "ACPI CPU Frequency Control");
+ break;
+ case EOPNOTSUPP:
+ device_quiet(dev);
+ error = 0;
+ break;
+ }
+ }
+ AcpiOsFree(buf.Pointer);
+
+ return (error);
+}
+
+static int
+acpi_perf_attach(device_t dev)
+{
+ struct acpi_perf_softc *sc;
+
+ sc = device_get_softc(dev);
+ sc->dev = dev;
+ sc->handle = acpi_get_handle(dev);
+ sc->px_max_avail = 0;
+ sc->px_curr_state = CPUFREQ_VAL_UNKNOWN;
+ if (acpi_perf_evaluate(dev) != 0)
+ return (ENXIO);
+ AcpiOsExecute(OSL_NOTIFY_HANDLER, acpi_px_startup, NULL);
+ if (!sc->info_only)
+ cpufreq_register(dev);
+
+ return (0);
+}
+
+static int
+acpi_perf_detach(device_t dev)
+{
+ /* TODO: teardown registers, remove notify handler. */
+ return (ENXIO);
+}
+
+/* Probe and setup any valid performance states (Px). */
+static int
+acpi_perf_evaluate(device_t dev)
+{
+ struct acpi_perf_softc *sc;
+ ACPI_BUFFER buf;
+ ACPI_OBJECT *pkg, *res;
+ ACPI_STATUS status;
+ int count, error, i, j;
+ static int once = 1;
+ uint32_t *p;
+
+ /* Get the control values and parameters for each state. */
+ error = ENXIO;
+ sc = device_get_softc(dev);
+ buf.Pointer = NULL;
+ buf.Length = ACPI_ALLOCATE_BUFFER;
+ status = AcpiEvaluateObject(sc->handle, "_PSS", NULL, &buf);
+ if (ACPI_FAILURE(status))
+ return (ENXIO);
+
+ pkg = (ACPI_OBJECT *)buf.Pointer;
+ if (!ACPI_PKG_VALID(pkg, 1)) {
+ device_printf(dev, "invalid top level _PSS package\n");
+ goto out;
+ }
+ sc->px_count = pkg->Package.Count;
+
+ sc->px_states = malloc(sc->px_count * sizeof(struct acpi_px),
+ M_ACPIPERF, M_WAITOK | M_ZERO);
+ if (sc->px_states == NULL)
+ goto out;
+
+ /*
+ * Each state is a package of {CoreFreq, Power, TransitionLatency,
+ * BusMasterLatency, ControlVal, StatusVal}, sorted from highest
+ * performance to lowest.
+ */
+ count = 0;
+ for (i = 0; i < sc->px_count; i++) {
+ res = &pkg->Package.Elements[i];
+ if (!ACPI_PKG_VALID(res, 6)) {
+ if (once) {
+ once = 0;
+ device_printf(dev, "invalid _PSS package\n");
+ }
+ continue;
+ }
+
+ /* Parse the rest of the package into the struct. */
+ p = &sc->px_states[count].core_freq;
+ for (j = 0; j < 6; j++, p++)
+ acpi_PkgInt32(res, j, p);
+
+ /*
+ * Check for some impossible frequencies that some systems
+ * use to indicate they don't actually support this Px state.
+ */
+ if (sc->px_states[count].core_freq == 0 ||
+ sc->px_states[count].core_freq == 9999 ||
+ sc->px_states[count].core_freq == 0x9999 ||
+ sc->px_states[count].core_freq >= 0xffff)
+ continue;
+
+ /* Check for duplicate entries */
+ if (count > 0 &&
+ sc->px_states[count - 1].core_freq ==
+ sc->px_states[count].core_freq)
+ continue;
+
+ count++;
+ }
+ sc->px_count = count;
+
+ /* No valid Px state found so give up. */
+ if (count == 0)
+ goto out;
+ AcpiOsFree(buf.Pointer);
+
+ /* Get the control and status registers (one of each). */
+ buf.Pointer = NULL;
+ buf.Length = ACPI_ALLOCATE_BUFFER;
+ status = AcpiEvaluateObject(sc->handle, "_PCT", NULL, &buf);
+ if (ACPI_FAILURE(status))
+ goto out;
+
+ /* Check the package of two registers, each a Buffer in GAS format. */
+ pkg = (ACPI_OBJECT *)buf.Pointer;
+ if (!ACPI_PKG_VALID(pkg, 2)) {
+ device_printf(dev, "invalid perf register package\n");
+ goto out;
+ }
+
+ error = acpi_PkgGas(sc->dev, pkg, 0, &sc->perf_ctrl_type, &sc->px_rid,
+ &sc->perf_ctrl, 0);
+ if (error) {
+ /*
+ * If the register is of type FFixedHW, we can only return
+ * info, we can't get or set new settings.
+ */
+ if (error == EOPNOTSUPP) {
+ sc->info_only = TRUE;
+ error = 0;
+ } else
+ device_printf(dev, "failed in PERF_CTL attach\n");
+ goto out;
+ }
+ sc->px_rid++;
+
+ error = acpi_PkgGas(sc->dev, pkg, 1, &sc->perf_sts_type, &sc->px_rid,
+ &sc->perf_status, 0);
+ if (error) {
+ if (error == EOPNOTSUPP) {
+ sc->info_only = TRUE;
+ error = 0;
+ } else
+ device_printf(dev, "failed in PERF_STATUS attach\n");
+ goto out;
+ }
+ sc->px_rid++;
+
+ /* Get our current limit and register for notifies. */
+ acpi_px_available(sc);
+ AcpiInstallNotifyHandler(sc->handle, ACPI_DEVICE_NOTIFY,
+ acpi_px_notify, sc);
+ error = 0;
+
+out:
+ if (error) {
+ if (sc->px_states) {
+ free(sc->px_states, M_ACPIPERF);
+ sc->px_states = NULL;
+ }
+ if (sc->perf_ctrl) {
+ bus_release_resource(sc->dev, sc->perf_ctrl_type, 0,
+ sc->perf_ctrl);
+ bus_delete_resource(sc->dev, sc->perf_ctrl_type, 0);
+ sc->perf_ctrl = NULL;
+ }
+ if (sc->perf_status) {
+ bus_release_resource(sc->dev, sc->perf_sts_type, 1,
+ sc->perf_status);
+ bus_delete_resource(sc->dev, sc->perf_sts_type, 1);
+ sc->perf_status = NULL;
+ }
+ sc->px_rid = 0;
+ sc->px_count = 0;
+ }
+ if (buf.Pointer)
+ AcpiOsFree(buf.Pointer);
+ return (error);
+}
+
+static void
+acpi_px_startup(void *arg)
+{
+
+ /* Signal to the platform that we are taking over CPU control. */
+ if (AcpiGbl_FADT.PstateControl == 0)
+ return;
+ ACPI_LOCK(acpi);
+ AcpiOsWritePort(AcpiGbl_FADT.SmiCommand, AcpiGbl_FADT.PstateControl, 8);
+ ACPI_UNLOCK(acpi);
+}
+
+static void
+acpi_px_notify(ACPI_HANDLE h, UINT32 notify, void *context)
+{
+ struct acpi_perf_softc *sc;
+
+ sc = context;
+ if (notify != ACPI_NOTIFY_PERF_STATES)
+ return;
+
+ acpi_px_available(sc);
+
+ /* TODO: Implement notification when frequency changes. */
+}
+
+/*
+ * Find the highest currently-supported performance state.
+ * This can be called at runtime (e.g., due to a docking event) at
+ * the request of a Notify on the processor object.
+ */
+static void
+acpi_px_available(struct acpi_perf_softc *sc)
+{
+ ACPI_STATUS status;
+ struct cf_setting set;
+
+ status = acpi_GetInteger(sc->handle, "_PPC", &sc->px_max_avail);
+
+ /* If the old state is too high, set current state to the new max. */
+ if (ACPI_SUCCESS(status)) {
+ if (sc->px_curr_state != CPUFREQ_VAL_UNKNOWN &&
+ sc->px_curr_state > sc->px_max_avail) {
+ acpi_px_to_set(sc->dev,
+ &sc->px_states[sc->px_max_avail], &set);
+ acpi_px_set(sc->dev, &set);
+ }
+ } else
+ sc->px_max_avail = 0;
+}
+
+static int
+acpi_px_to_set(device_t dev, struct acpi_px *px, struct cf_setting *set)
+{
+
+ if (px == NULL || set == NULL)
+ return (EINVAL);
+
+ set->freq = px->core_freq;
+ set->power = px->power;
+ /* XXX Include BM latency too? */
+ set->lat = px->trans_lat;
+ set->volts = CPUFREQ_VAL_UNKNOWN;
+ set->dev = dev;
+ set->spec[PX_SPEC_CONTROL] = px->ctrl_val;
+ set->spec[PX_SPEC_STATUS] = px->sts_val;
+
+ return (0);
+}
+
+static int
+acpi_px_settings(device_t dev, struct cf_setting *sets, int *count)
+{
+ struct acpi_perf_softc *sc;
+ int x, y;
+
+ sc = device_get_softc(dev);
+ if (sets == NULL || count == NULL)
+ return (EINVAL);
+ if (*count < sc->px_count - sc->px_max_avail)
+ return (E2BIG);
+
+ /* Return a list of settings that are currently valid. */
+ y = 0;
+ for (x = sc->px_max_avail; x < sc->px_count; x++, y++)
+ acpi_px_to_set(dev, &sc->px_states[x], &sets[y]);
+ *count = sc->px_count - sc->px_max_avail;
+
+ return (0);
+}
+
+static int
+acpi_px_set(device_t dev, const struct cf_setting *set)
+{
+ struct acpi_perf_softc *sc;
+ int i, status, sts_val, tries;
+
+ if (set == NULL)
+ return (EINVAL);
+ sc = device_get_softc(dev);
+
+ /* If we can't set new states, return immediately. */
+ if (sc->info_only)
+ return (ENXIO);
+
+ /* Look up appropriate state, based on frequency. */
+ for (i = sc->px_max_avail; i < sc->px_count; i++) {
+ if (CPUFREQ_CMP(set->freq, sc->px_states[i].core_freq))
+ break;
+ }
+ if (i == sc->px_count)
+ return (EINVAL);
+
+ /* Write the appropriate value to the register. */
+ PX_SET_REG(sc->perf_ctrl, sc->px_states[i].ctrl_val);
+
+ /*
+ * Try for up to 10 ms to verify the desired state was selected.
+ * This is longer than the standard says (1 ms) but in some modes,
+ * systems may take longer to respond.
+ */
+ sts_val = sc->px_states[i].sts_val;
+ for (tries = 0; tries < 1000; tries++) {
+ status = PX_GET_REG(sc->perf_status);
+
+ /*
+ * If we match the status or the desired status is 8 bits
+ * and matches the relevant bits, assume we succeeded. It
+ * appears some systems (IBM R32) expect byte-wide access
+ * even though the standard says the register is 32-bit.
+ */
+ if (status == sts_val ||
+ ((sts_val & ~0xff) == 0 && (status & 0xff) == sts_val))
+ break;
+ DELAY(10);
+ }
+ if (tries == 1000) {
+ device_printf(dev, "Px transition to %d failed\n",
+ sc->px_states[i].core_freq);
+ return (ENXIO);
+ }
+ sc->px_curr_state = i;
+
+ return (0);
+}
+
+static int
+acpi_px_get(device_t dev, struct cf_setting *set)
+{
+ struct acpi_perf_softc *sc;
+ uint64_t rate;
+ int i;
+ struct pcpu *pc;
+
+ if (set == NULL)
+ return (EINVAL);
+ sc = device_get_softc(dev);
+
+ /* If we can't get new states, return immediately. */
+ if (sc->info_only)
+ return (ENXIO);
+
+ /* If we've set the rate before, use the cached value. */
+ if (sc->px_curr_state != CPUFREQ_VAL_UNKNOWN) {
+ acpi_px_to_set(dev, &sc->px_states[sc->px_curr_state], set);
+ return (0);
+ }
+
+ /* Otherwise, estimate and try to match against our settings. */
+ pc = cpu_get_pcpu(dev);
+ if (pc == NULL)
+ return (ENXIO);
+ cpu_est_clockrate(pc->pc_cpuid, &rate);
+ rate /= 1000000;
+ for (i = 0; i < sc->px_count; i++) {
+ if (CPUFREQ_CMP(sc->px_states[i].core_freq, rate)) {
+ sc->px_curr_state = i;
+ acpi_px_to_set(dev, &sc->px_states[i], set);
+ break;
+ }
+ }
+
+ /* No match, give up. */
+ if (i == sc->px_count) {
+ sc->px_curr_state = CPUFREQ_VAL_UNKNOWN;
+ set->freq = CPUFREQ_VAL_UNKNOWN;
+ }
+
+ return (0);
+}
+
+static int
+acpi_px_type(device_t dev, int *type)
+{
+ struct acpi_perf_softc *sc;
+
+ if (type == NULL)
+ return (EINVAL);
+ sc = device_get_softc(dev);
+
+ *type = CPUFREQ_TYPE_ABSOLUTE;
+ if (sc->info_only)
+ *type |= CPUFREQ_FLAG_INFO_ONLY;
+ return (0);
+}
diff --git a/sys/dev/acpica/acpi_powerres.c b/sys/dev/acpica/acpi_powerres.c
new file mode 100644
index 0000000..ba08286
--- /dev/null
+++ b/sys/dev/acpica/acpi_powerres.c
@@ -0,0 +1,757 @@
+/*-
+ * Copyright (c) 2001 Michael Smith
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "opt_acpi.h"
+#include <sys/param.h>
+#include <sys/kernel.h>
+#include <sys/malloc.h>
+#include <sys/bus.h>
+
+#include <contrib/dev/acpica/include/acpi.h>
+#include <contrib/dev/acpica/include/accommon.h>
+
+#include <dev/acpica/acpivar.h>
+
+/*
+ * ACPI power resource management.
+ *
+ * Power resource behaviour is slightly complicated by the fact that
+ * a single power resource may provide power for more than one device.
+ * Thus, we must track the device(s) being powered by a given power
+ * resource, and only deactivate it when there are no powered devices.
+ *
+ * Note that this only manages resources for known devices. There is an
+ * ugly case where we may turn of power to a device which is in use because
+ * we don't know that it depends on a given resource. We should perhaps
+ * try to be smarter about this, but a more complete solution would involve
+ * scanning all of the ACPI namespace to find devices we're not currently
+ * aware of, and this raises questions about whether they should be left
+ * on, turned off, etc.
+ */
+
+static MALLOC_DEFINE(M_ACPIPWR, "acpipwr", "ACPI power resources");
+
+/* Hooks for the ACPI CA debugging infrastructure */
+#define _COMPONENT ACPI_POWERRES
+ACPI_MODULE_NAME("POWERRES")
+
+/* Return values from _STA on a power resource */
+#define ACPI_PWR_OFF 0
+#define ACPI_PWR_ON 1
+#define ACPI_PWR_UNK (-1)
+
+/* A relationship between a power resource and a consumer. */
+struct acpi_powerreference {
+ struct acpi_powerconsumer *ar_consumer;
+ struct acpi_powerresource *ar_resource;
+ TAILQ_ENTRY(acpi_powerreference) ar_rlink; /* link on resource list */
+ TAILQ_ENTRY(acpi_powerreference) ar_clink; /* link on consumer */
+};
+
+/* A power-managed device. */
+struct acpi_powerconsumer {
+ /* Device which is powered */
+ ACPI_HANDLE ac_consumer;
+ int ac_state;
+ TAILQ_ENTRY(acpi_powerconsumer) ac_link;
+ TAILQ_HEAD(,acpi_powerreference) ac_references;
+};
+
+/* A power resource. */
+struct acpi_powerresource {
+ TAILQ_ENTRY(acpi_powerresource) ap_link;
+ TAILQ_HEAD(,acpi_powerreference) ap_references;
+ ACPI_HANDLE ap_resource;
+ UINT64 ap_systemlevel;
+ UINT64 ap_order;
+ int ap_state;
+};
+
+static TAILQ_HEAD(acpi_powerresource_list, acpi_powerresource)
+ acpi_powerresources;
+static TAILQ_HEAD(acpi_powerconsumer_list, acpi_powerconsumer)
+ acpi_powerconsumers;
+ACPI_SERIAL_DECL(powerres, "ACPI power resources");
+
+static ACPI_STATUS acpi_pwr_register_consumer(ACPI_HANDLE consumer);
+#ifdef notyet
+static ACPI_STATUS acpi_pwr_deregister_consumer(ACPI_HANDLE consumer);
+#endif /* notyet */
+static ACPI_STATUS acpi_pwr_register_resource(ACPI_HANDLE res);
+#ifdef notyet
+static ACPI_STATUS acpi_pwr_deregister_resource(ACPI_HANDLE res);
+#endif /* notyet */
+static void acpi_pwr_reference_resource(ACPI_OBJECT *obj,
+ void *arg);
+static int acpi_pwr_dereference_resource(struct acpi_powerconsumer
+ *pc);
+static ACPI_STATUS acpi_pwr_switch_power(void);
+static struct acpi_powerresource
+ *acpi_pwr_find_resource(ACPI_HANDLE res);
+static struct acpi_powerconsumer
+ *acpi_pwr_find_consumer(ACPI_HANDLE consumer);
+
+/* Initialise our lists. */
+static void
+acpi_pwr_init(void *junk)
+{
+ TAILQ_INIT(&acpi_powerresources);
+ TAILQ_INIT(&acpi_powerconsumers);
+}
+SYSINIT(acpi_powerresource, SI_SUB_TUNABLES, SI_ORDER_ANY, acpi_pwr_init, NULL);
+
+/*
+ * Register a power resource.
+ *
+ * It's OK to call this if we already know about the resource.
+ */
+static ACPI_STATUS
+acpi_pwr_register_resource(ACPI_HANDLE res)
+{
+ ACPI_STATUS status;
+ ACPI_BUFFER buf;
+ ACPI_OBJECT *obj;
+ struct acpi_powerresource *rp, *srp;
+
+ ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
+ ACPI_SERIAL_ASSERT(powerres);
+
+ rp = NULL;
+ buf.Pointer = NULL;
+
+ /* Look to see if we know about this resource */
+ if (acpi_pwr_find_resource(res) != NULL)
+ return_ACPI_STATUS (AE_OK); /* already know about it */
+
+ /* Allocate a new resource */
+ if ((rp = malloc(sizeof(*rp), M_ACPIPWR, M_NOWAIT | M_ZERO)) == NULL) {
+ status = AE_NO_MEMORY;
+ goto out;
+ }
+ TAILQ_INIT(&rp->ap_references);
+ rp->ap_resource = res;
+
+ /* Get the Power Resource object */
+ buf.Length = ACPI_ALLOCATE_BUFFER;
+ if (ACPI_FAILURE(status = AcpiEvaluateObject(res, NULL, NULL, &buf))) {
+ ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "no power resource object\n"));
+ goto out;
+ }
+ obj = buf.Pointer;
+ if (obj->Type != ACPI_TYPE_POWER) {
+ ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS,
+ "questionable power resource object %s\n",
+ acpi_name(res)));
+ status = AE_TYPE;
+ goto out;
+ }
+ rp->ap_systemlevel = obj->PowerResource.SystemLevel;
+ rp->ap_order = obj->PowerResource.ResourceOrder;
+ rp->ap_state = ACPI_PWR_UNK;
+
+ /* Sort the resource into the list */
+ status = AE_OK;
+ srp = TAILQ_FIRST(&acpi_powerresources);
+ if (srp == NULL || rp->ap_order < srp->ap_order) {
+ TAILQ_INSERT_HEAD(&acpi_powerresources, rp, ap_link);
+ goto done;
+ }
+ TAILQ_FOREACH(srp, &acpi_powerresources, ap_link) {
+ if (rp->ap_order < srp->ap_order) {
+ TAILQ_INSERT_BEFORE(srp, rp, ap_link);
+ goto done;
+ }
+ }
+ TAILQ_INSERT_TAIL(&acpi_powerresources, rp, ap_link);
+
+ done:
+ ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS,
+ "registered power resource %s\n", acpi_name(res)));
+
+ out:
+ if (buf.Pointer != NULL)
+ AcpiOsFree(buf.Pointer);
+ if (ACPI_FAILURE(status) && rp != NULL)
+ free(rp, M_ACPIPWR);
+ return_ACPI_STATUS (status);
+}
+
+#ifdef notyet
+/*
+ * Deregister a power resource.
+ */
+static ACPI_STATUS
+acpi_pwr_deregister_resource(ACPI_HANDLE res)
+{
+ struct acpi_powerresource *rp;
+
+ ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
+ ACPI_SERIAL_ASSERT(powerres);
+
+ rp = NULL;
+
+ /* Find the resource */
+ if ((rp = acpi_pwr_find_resource(res)) == NULL)
+ return_ACPI_STATUS (AE_BAD_PARAMETER);
+
+ /* Check that there are no consumers referencing this resource */
+ if (TAILQ_FIRST(&rp->ap_references) != NULL)
+ return_ACPI_STATUS (AE_BAD_PARAMETER);
+
+ /* Pull it off the list and free it */
+ TAILQ_REMOVE(&acpi_powerresources, rp, ap_link);
+ free(rp, M_ACPIPWR);
+
+ ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "deregistered power resource %s\n",
+ acpi_name(res)));
+
+ return_ACPI_STATUS (AE_OK);
+}
+#endif /* notyet */
+
+/*
+ * Register a power consumer.
+ *
+ * It's OK to call this if we already know about the consumer.
+ */
+static ACPI_STATUS
+acpi_pwr_register_consumer(ACPI_HANDLE consumer)
+{
+ struct acpi_powerconsumer *pc;
+
+ ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
+ ACPI_SERIAL_ASSERT(powerres);
+
+ /* Check to see whether we know about this consumer already */
+ if (acpi_pwr_find_consumer(consumer) != NULL)
+ return_ACPI_STATUS (AE_OK);
+
+ /* Allocate a new power consumer */
+ if ((pc = malloc(sizeof(*pc), M_ACPIPWR, M_NOWAIT)) == NULL)
+ return_ACPI_STATUS (AE_NO_MEMORY);
+ TAILQ_INSERT_HEAD(&acpi_powerconsumers, pc, ac_link);
+ TAILQ_INIT(&pc->ac_references);
+ pc->ac_consumer = consumer;
+
+ /* XXX we should try to find its current state */
+ pc->ac_state = ACPI_STATE_UNKNOWN;
+
+ ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "registered power consumer %s\n",
+ acpi_name(consumer)));
+
+ return_ACPI_STATUS (AE_OK);
+}
+
+#ifdef notyet
+/*
+ * Deregister a power consumer.
+ *
+ * This should only be done once the consumer has been powered off.
+ * (XXX is this correct? Check once implemented)
+ */
+static ACPI_STATUS
+acpi_pwr_deregister_consumer(ACPI_HANDLE consumer)
+{
+ struct acpi_powerconsumer *pc;
+
+ ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
+ ACPI_SERIAL_ASSERT(powerres);
+
+ /* Find the consumer */
+ if ((pc = acpi_pwr_find_consumer(consumer)) == NULL)
+ return_ACPI_STATUS (AE_BAD_PARAMETER);
+
+ /* Make sure the consumer's not referencing anything right now */
+ if (TAILQ_FIRST(&pc->ac_references) != NULL)
+ return_ACPI_STATUS (AE_BAD_PARAMETER);
+
+ /* Pull the consumer off the list and free it */
+ TAILQ_REMOVE(&acpi_powerconsumers, pc, ac_link);
+ free(pc, M_ACPIPWR);
+
+ ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "deregistered power consumer %s\n",
+ acpi_name(consumer)));
+
+ return_ACPI_STATUS (AE_OK);
+}
+#endif /* notyet */
+
+/*
+ * Set a power consumer to a particular power state.
+ */
+ACPI_STATUS
+acpi_pwr_switch_consumer(ACPI_HANDLE consumer, int state)
+{
+ struct acpi_powerconsumer *pc;
+ ACPI_HANDLE method_handle, reslist_handle, pr0_handle;
+ ACPI_BUFFER reslist_buffer;
+ ACPI_OBJECT *reslist_object;
+ ACPI_STATUS status;
+ char *method_name, *reslist_name;
+
+ ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
+
+ /* It's never ok to switch a non-existent consumer. */
+ if (consumer == NULL)
+ return_ACPI_STATUS (AE_NOT_FOUND);
+ reslist_buffer.Pointer = NULL;
+ reslist_object = NULL;
+ ACPI_SERIAL_BEGIN(powerres);
+
+ /* Find the consumer */
+ if ((pc = acpi_pwr_find_consumer(consumer)) == NULL) {
+ if (ACPI_FAILURE(status = acpi_pwr_register_consumer(consumer)))
+ goto out;
+ if ((pc = acpi_pwr_find_consumer(consumer)) == NULL)
+ panic("acpi added power consumer but can't find it");
+ }
+
+ /* Check for valid transitions. We can only go to D0 from D3. */
+ status = AE_BAD_PARAMETER;
+ if (pc->ac_state == ACPI_STATE_D3 && state != ACPI_STATE_D0)
+ goto out;
+
+ /* Find transition mechanism(s) */
+ switch (state) {
+ case ACPI_STATE_D0:
+ method_name = "_PS0";
+ reslist_name = "_PR0";
+ break;
+ case ACPI_STATE_D1:
+ method_name = "_PS1";
+ reslist_name = "_PR1";
+ break;
+ case ACPI_STATE_D2:
+ method_name = "_PS2";
+ reslist_name = "_PR2";
+ break;
+ case ACPI_STATE_D3:
+ method_name = "_PS3";
+ reslist_name = "_PR3";
+ break;
+ default:
+ goto out;
+ }
+ ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "setup to switch %s D%d -> D%d\n",
+ acpi_name(consumer), pc->ac_state, state));
+
+ /*
+ * Verify that this state is supported, ie. one of method or
+ * reslist must be present. We need to do this before we go
+ * dereferencing resources (since we might be trying to go to
+ * a state we don't support).
+ *
+ * Note that if any states are supported, the device has to
+ * support D0 and D3. It's never an error to try to go to
+ * D0.
+ */
+ if (ACPI_FAILURE(AcpiGetHandle(consumer, method_name, &method_handle)))
+ method_handle = NULL;
+ if (ACPI_FAILURE(AcpiGetHandle(consumer, reslist_name, &reslist_handle)))
+ reslist_handle = NULL;
+ if (reslist_handle == NULL && method_handle == NULL) {
+ if (state == ACPI_STATE_D0) {
+ pc->ac_state = ACPI_STATE_D0;
+ status = AE_OK;
+ goto out;
+ }
+ if (state != ACPI_STATE_D3) {
+ ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS,
+ "attempt to set unsupported state D%d\n", state));
+ goto out;
+ }
+
+ /*
+ * Turn off the resources listed in _PR0 to go to D3. If there is
+ * no _PR0 method, this object doesn't support ACPI power states.
+ */
+ if (ACPI_FAILURE(AcpiGetHandle(consumer, "_PR0", &pr0_handle))) {
+ status = AE_NOT_FOUND;
+ ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS,
+ "device missing _PR0 (desired state was D%d)\n", state));
+ goto out;
+ }
+ reslist_buffer.Length = ACPI_ALLOCATE_BUFFER;
+ status = AcpiEvaluateObject(pr0_handle, NULL, NULL, &reslist_buffer);
+ if (ACPI_FAILURE(status)) {
+ ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS,
+ "can't evaluate _PR0 for device %s, state D%d\n",
+ acpi_name(consumer), state));
+ goto out;
+ }
+ reslist_object = (ACPI_OBJECT *)reslist_buffer.Pointer;
+ if (!ACPI_PKG_VALID(reslist_object, 1)) {
+ ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS,
+ "invalid package object for state D%d\n", state));
+ status = AE_TYPE;
+ goto out;
+ }
+ AcpiOsFree(reslist_buffer.Pointer);
+ reslist_buffer.Pointer = NULL;
+ reslist_object = NULL;
+ }
+
+ /*
+ * Check that we can actually fetch the list of power resources
+ */
+ if (reslist_handle != NULL) {
+ reslist_buffer.Length = ACPI_ALLOCATE_BUFFER;
+ status = AcpiEvaluateObject(reslist_handle, NULL, NULL,
+ &reslist_buffer);
+ if (ACPI_FAILURE(status)) {
+ ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS,
+ "can't evaluate resource list %s\n",
+ acpi_name(reslist_handle)));
+ goto out;
+ }
+ reslist_object = (ACPI_OBJECT *)reslist_buffer.Pointer;
+ if (reslist_object->Type != ACPI_TYPE_PACKAGE) {
+ ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS,
+ "resource list is not ACPI_TYPE_PACKAGE (%d)\n",
+ reslist_object->Type));
+ status = AE_TYPE;
+ goto out;
+ }
+ }
+
+ /*
+ * Now we are ready to switch, so kill off any current power
+ * resource references.
+ */
+ acpi_pwr_dereference_resource(pc);
+
+ /*
+ * Add new power resource references, if we have any. Traverse the
+ * package that we got from evaluating reslist_handle, and look up each
+ * of the resources that are referenced.
+ */
+ if (reslist_object != NULL) {
+ ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "referencing %d new resources\n",
+ reslist_object->Package.Count));
+ acpi_ForeachPackageObject(reslist_object, acpi_pwr_reference_resource,
+ pc);
+ }
+
+ /*
+ * If we changed anything in the resource list, we need to run a switch
+ * pass now.
+ */
+ if (ACPI_FAILURE(status = acpi_pwr_switch_power())) {
+ ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS,
+ "failed to switch resources from %s to D%d\n",
+ acpi_name(consumer), state));
+
+ /* XXX is this appropriate? Should we return to previous state? */
+ goto out;
+ }
+
+ /* Invoke power state switch method (if present) */
+ if (method_handle != NULL) {
+ ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS,
+ "invoking state transition method %s\n",
+ acpi_name(method_handle)));
+ status = AcpiEvaluateObject(method_handle, NULL, NULL, NULL);
+ if (ACPI_FAILURE(status)) {
+ ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "failed to set state - %s\n",
+ AcpiFormatException(status)));
+ pc->ac_state = ACPI_STATE_UNKNOWN;
+
+ /* XXX Should we return to previous state? */
+ goto out;
+ }
+ }
+
+ /* Transition was successful */
+ pc->ac_state = state;
+ status = AE_OK;
+
+out:
+ ACPI_SERIAL_END(powerres);
+ if (reslist_buffer.Pointer != NULL)
+ AcpiOsFree(reslist_buffer.Pointer);
+ return_ACPI_STATUS (status);
+}
+
+/* Enable or disable a power resource for wake */
+ACPI_STATUS
+acpi_pwr_wake_enable(ACPI_HANDLE consumer, int enable)
+{
+ ACPI_STATUS status;
+ struct acpi_powerconsumer *pc;
+ struct acpi_prw_data prw;
+ int i;
+
+ ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
+
+ if (consumer == NULL)
+ return (AE_BAD_PARAMETER);
+
+ ACPI_SERIAL_BEGIN(powerres);
+ if ((pc = acpi_pwr_find_consumer(consumer)) == NULL) {
+ if (ACPI_FAILURE(status = acpi_pwr_register_consumer(consumer)))
+ goto out;
+ if ((pc = acpi_pwr_find_consumer(consumer)) == NULL)
+ panic("acpi wake added power consumer but can't find it");
+ }
+
+ status = AE_OK;
+ if (acpi_parse_prw(consumer, &prw) != 0)
+ goto out;
+ for (i = 0; i < prw.power_res_count; i++)
+ if (enable)
+ acpi_pwr_reference_resource(&prw.power_res[i], pc);
+ else
+ acpi_pwr_dereference_resource(pc);
+
+ if (prw.power_res_count > 0)
+ acpi_pwr_switch_power();
+
+out:
+ ACPI_SERIAL_END(powerres);
+ return (status);
+}
+
+/*
+ * Called to create a reference between a power consumer and a power resource
+ * identified in the object.
+ */
+static void
+acpi_pwr_reference_resource(ACPI_OBJECT *obj, void *arg)
+{
+ struct acpi_powerconsumer *pc = (struct acpi_powerconsumer *)arg;
+ struct acpi_powerreference *pr;
+ struct acpi_powerresource *rp;
+ ACPI_HANDLE res;
+ ACPI_STATUS status;
+
+ ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
+ ACPI_SERIAL_ASSERT(powerres);
+
+ res = acpi_GetReference(NULL, obj);
+ if (res == NULL) {
+ ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS,
+ "can't create a power reference for object type %d\n",
+ obj->Type));
+ return_VOID;
+ }
+
+ /* Create/look up the resource */
+ if (ACPI_FAILURE(status = acpi_pwr_register_resource(res))) {
+ ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS,
+ "couldn't register power resource %s - %s\n",
+ obj->String.Pointer, AcpiFormatException(status)));
+ return_VOID;
+ }
+ if ((rp = acpi_pwr_find_resource(res)) == NULL) {
+ ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "power resource list corrupted\n"));
+ return_VOID;
+ }
+ ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "found power resource %s\n",
+ acpi_name(rp->ap_resource)));
+
+ /* Create a reference between the consumer and resource */
+ if ((pr = malloc(sizeof(*pr), M_ACPIPWR, M_NOWAIT | M_ZERO)) == NULL) {
+ ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS,
+ "allocation failed for a power consumer reference\n"));
+ return_VOID;
+ }
+ pr->ar_consumer = pc;
+ pr->ar_resource = rp;
+ TAILQ_INSERT_TAIL(&pc->ac_references, pr, ar_clink);
+ TAILQ_INSERT_TAIL(&rp->ap_references, pr, ar_rlink);
+
+ return_VOID;
+}
+
+static int
+acpi_pwr_dereference_resource(struct acpi_powerconsumer *pc)
+{
+ struct acpi_powerreference *pr;
+ int changed;
+
+ ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
+ ACPI_SERIAL_ASSERT(powerres);
+
+ changed = 0;
+ while ((pr = TAILQ_FIRST(&pc->ac_references)) != NULL) {
+ ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "removing reference to %s\n",
+ acpi_name(pr->ar_resource->ap_resource)));
+ TAILQ_REMOVE(&pr->ar_resource->ap_references, pr, ar_rlink);
+ TAILQ_REMOVE(&pc->ac_references, pr, ar_clink);
+ free(pr, M_ACPIPWR);
+ changed = 1;
+ }
+
+ return (changed);
+}
+
+/*
+ * Switch power resources to conform to the desired state.
+ *
+ * Consumers may have modified the power resource list in an arbitrary
+ * fashion; we sweep it in sequence order.
+ */
+static ACPI_STATUS
+acpi_pwr_switch_power(void)
+{
+ struct acpi_powerresource *rp;
+ ACPI_STATUS status;
+ int cur;
+
+ ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
+ ACPI_SERIAL_ASSERT(powerres);
+
+ /*
+ * Sweep the list forwards turning things on.
+ */
+ TAILQ_FOREACH(rp, &acpi_powerresources, ap_link) {
+ if (TAILQ_FIRST(&rp->ap_references) == NULL) {
+ ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS,
+ "%s has no references, not turning on\n",
+ acpi_name(rp->ap_resource)));
+ continue;
+ }
+
+ /* We could cache this if we trusted it not to change under us */
+ status = acpi_GetInteger(rp->ap_resource, "_STA", &cur);
+ if (ACPI_FAILURE(status)) {
+ ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "can't get status of %s - %d\n",
+ acpi_name(rp->ap_resource), status));
+ /* XXX is this correct? Always switch if in doubt? */
+ continue;
+ } else if (rp->ap_state == ACPI_PWR_UNK)
+ rp->ap_state = cur;
+
+ /*
+ * Switch if required. Note that we ignore the result of the switch
+ * effort; we don't know what to do if it fails, so checking wouldn't
+ * help much.
+ */
+ if (rp->ap_state != ACPI_PWR_ON) {
+ status = AcpiEvaluateObject(rp->ap_resource, "_ON", NULL, NULL);
+ if (ACPI_FAILURE(status)) {
+ ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS,
+ "failed to switch %s on - %s\n",
+ acpi_name(rp->ap_resource),
+ AcpiFormatException(status)));
+ } else {
+ rp->ap_state = ACPI_PWR_ON;
+ ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "switched %s on\n",
+ acpi_name(rp->ap_resource)));
+ }
+ } else {
+ ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "%s is already on\n",
+ acpi_name(rp->ap_resource)));
+ }
+ }
+
+ /* Sweep the list backwards turning things off. */
+ TAILQ_FOREACH_REVERSE(rp, &acpi_powerresources, acpi_powerresource_list,
+ ap_link) {
+
+ if (TAILQ_FIRST(&rp->ap_references) != NULL) {
+ ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS,
+ "%s has references, not turning off\n",
+ acpi_name(rp->ap_resource)));
+ continue;
+ }
+
+ /* We could cache this if we trusted it not to change under us */
+ status = acpi_GetInteger(rp->ap_resource, "_STA", &cur);
+ if (ACPI_FAILURE(status)) {
+ ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "can't get status of %s - %d\n",
+ acpi_name(rp->ap_resource), status));
+ /* XXX is this correct? Always switch if in doubt? */
+ continue;
+ } else if (rp->ap_state == ACPI_PWR_UNK)
+ rp->ap_state = cur;
+
+ /*
+ * Switch if required. Note that we ignore the result of the switch
+ * effort; we don't know what to do if it fails, so checking wouldn't
+ * help much.
+ */
+ if (rp->ap_state != ACPI_PWR_OFF) {
+ status = AcpiEvaluateObject(rp->ap_resource, "_OFF", NULL, NULL);
+ if (ACPI_FAILURE(status)) {
+ ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS,
+ "failed to switch %s off - %s\n",
+ acpi_name(rp->ap_resource),
+ AcpiFormatException(status)));
+ } else {
+ rp->ap_state = ACPI_PWR_OFF;
+ ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "switched %s off\n",
+ acpi_name(rp->ap_resource)));
+ }
+ } else {
+ ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "%s is already off\n",
+ acpi_name(rp->ap_resource)));
+ }
+ }
+
+ return_ACPI_STATUS (AE_OK);
+}
+
+/*
+ * Find a power resource's control structure.
+ */
+static struct acpi_powerresource *
+acpi_pwr_find_resource(ACPI_HANDLE res)
+{
+ struct acpi_powerresource *rp;
+
+ ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
+ ACPI_SERIAL_ASSERT(powerres);
+
+ TAILQ_FOREACH(rp, &acpi_powerresources, ap_link) {
+ if (rp->ap_resource == res)
+ break;
+ }
+
+ return_PTR (rp);
+}
+
+/*
+ * Find a power consumer's control structure.
+ */
+static struct acpi_powerconsumer *
+acpi_pwr_find_consumer(ACPI_HANDLE consumer)
+{
+ struct acpi_powerconsumer *pc;
+
+ ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
+ ACPI_SERIAL_ASSERT(powerres);
+
+ TAILQ_FOREACH(pc, &acpi_powerconsumers, ac_link) {
+ if (pc->ac_consumer == consumer)
+ break;
+ }
+
+ return_PTR (pc);
+}
diff --git a/sys/dev/acpica/acpi_quirk.c b/sys/dev/acpica/acpi_quirk.c
new file mode 100644
index 0000000..fe0a06f
--- /dev/null
+++ b/sys/dev/acpica/acpi_quirk.c
@@ -0,0 +1,185 @@
+/*-
+ * Copyright (c) 2004 Nate Lawson (SDG)
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/bus.h>
+
+#include <contrib/dev/acpica/include/acpi.h>
+
+#include <dev/acpica/acpivar.h>
+
+enum ops_t {
+ OP_NONE,
+ OP_LEQ,
+ OP_GEQ,
+ OP_EQL,
+};
+
+enum val_t {
+ OEM,
+ OEM_REV,
+ CREATOR,
+ CREATOR_REV,
+};
+
+struct acpi_q_rule {
+ char sig[ACPI_NAME_SIZE]; /* Table signature to match */
+ enum val_t val;
+ union {
+ char *id;
+ enum ops_t op;
+ } x;
+ union {
+ char *tid;
+ int rev;
+ } y;
+};
+
+struct acpi_q_entry {
+ const struct acpi_q_rule *match;
+ int quirks;
+};
+
+#include "acpi_quirks.h"
+
+static int aq_revcmp(int revision, enum ops_t op, int value);
+static int aq_strcmp(char *actual, char *possible);
+static int aq_match_header(ACPI_TABLE_HEADER *hdr,
+ const struct acpi_q_rule *match);
+
+static int
+aq_revcmp(int revision, enum ops_t op, int value)
+{
+ switch (op) {
+ case OP_LEQ:
+ if (revision <= value)
+ return (TRUE);
+ break;
+ case OP_GEQ:
+ if (revision >= value)
+ return (TRUE);
+ break;
+ case OP_EQL:
+ if (revision == value)
+ return (TRUE);
+ break;
+ case OP_NONE:
+ return (TRUE);
+ default:
+ panic("aq_revcmp: invalid op %d", op);
+ }
+
+ return (FALSE);
+}
+
+static int
+aq_strcmp(char *actual, char *possible)
+{
+ if (actual == NULL || possible == NULL)
+ return (TRUE);
+ return (strncmp(actual, possible, strlen(possible)) == 0);
+}
+
+static int
+aq_match_header(ACPI_TABLE_HEADER *hdr, const struct acpi_q_rule *match)
+{
+ int result;
+
+ result = FALSE;
+ switch (match->val) {
+ case OEM:
+ if (aq_strcmp(hdr->OemId, match->x.id) &&
+ aq_strcmp(hdr->OemTableId, match->y.tid))
+ result = TRUE;
+ break;
+ case CREATOR:
+ if (aq_strcmp(hdr->AslCompilerId, match->x.id))
+ result = TRUE;
+ break;
+ case OEM_REV:
+ if (aq_revcmp(hdr->OemRevision, match->x.op, match->y.rev))
+ result = TRUE;
+ break;
+ case CREATOR_REV:
+ if (aq_revcmp(hdr->AslCompilerRevision, match->x.op, match->y.rev))
+ result = TRUE;
+ break;
+ }
+
+ return (result);
+}
+
+int
+acpi_table_quirks(int *quirks)
+{
+ const struct acpi_q_entry *entry;
+ const struct acpi_q_rule *match;
+ ACPI_TABLE_HEADER fadt, dsdt, xsdt, *hdr;
+ int done;
+
+ /* First, allow the machdep system to set its idea of quirks. */
+ KASSERT(quirks != NULL, ("acpi quirks ptr is NULL"));
+ acpi_machdep_quirks(quirks);
+
+ if (ACPI_FAILURE(AcpiGetTableHeader(ACPI_SIG_FADT, 0, &fadt)))
+ bzero(&fadt, sizeof(fadt));
+ if (ACPI_FAILURE(AcpiGetTableHeader(ACPI_SIG_DSDT, 0, &dsdt)))
+ bzero(&dsdt, sizeof(dsdt));
+ if (ACPI_FAILURE(AcpiGetTableHeader(ACPI_SIG_XSDT, 0, &xsdt)))
+ bzero(&xsdt, sizeof(xsdt));
+
+ /* Then, override the quirks with any matched from table signatures. */
+ for (entry = acpi_quirks_table; entry->match; entry++) {
+ done = TRUE;
+ for (match = entry->match; match->sig[0] != '\0'; match++) {
+ if (!strncmp(match->sig, "FADT", ACPI_NAME_SIZE))
+ hdr = &fadt;
+ else if (!strncmp(match->sig, ACPI_SIG_DSDT, ACPI_NAME_SIZE))
+ hdr = &dsdt;
+ else if (!strncmp(match->sig, ACPI_SIG_XSDT, ACPI_NAME_SIZE))
+ hdr = &xsdt;
+ else
+ panic("invalid quirk header\n");
+
+ /* If we don't match any, skip to the next entry. */
+ if (aq_match_header(hdr, match) == FALSE) {
+ done = FALSE;
+ break;
+ }
+ }
+
+ /* If all entries matched, update the quirks and return. */
+ if (done) {
+ *quirks = entry->quirks;
+ break;
+ }
+ }
+
+ return (0);
+}
diff --git a/sys/dev/acpica/acpi_quirks b/sys/dev/acpica/acpi_quirks
new file mode 100644
index 0000000..4b2099d
--- /dev/null
+++ b/sys/dev/acpica/acpi_quirks
@@ -0,0 +1,497 @@
+# $FreeBSD$
+#
+# Quirks for ACPI tables can be added here.
+#
+# Comments start with '#'. Any number of spaces/tabs are ok within a line.
+# Be sure to include a reference to a PR when adding new quirks. Quirks
+# that do not contain a PR were discovered by reliable sources with no
+# supporting info (i.e., other OS's).
+#
+
+# ABit BP6
+name: ABit_BP6
+oem: FADT "AWARD " "AWRDACPI"
+oem_rev: FADT <= 0x30302e31
+quirks: ACPI_Q_BROKEN
+
+# AMI INT 01/18/00
+name: AMI_INT
+oem: FADT "AMIINT" ""
+oem_rev: FADT <= 10
+quirks: ACPI_Q_BROKEN
+
+# ASUS P2B-D
+name: ASUS_P2B_D
+oem: FADT "ASUS " "P2B-D "
+oem_rev: FADT <= 0x58582e32
+quirks: ACPI_Q_BROKEN
+
+# ASUS P2B-F
+name: ASUS_P2B_F
+oem: FADT "ASUS " "P2B-F "
+oem_rev: FADT <= 0x58582e31
+quirks: ACPI_Q_BROKEN
+
+# ASUS P2B-DS 02/03/99
+name: ASUS_P2B_DS
+oem: FADT "ASUS " "P2B-DS "
+oem_rev: FADT <= 0x58582e31
+creator_rev: FADT <= 0x31303030
+quirks: ACPI_Q_BROKEN
+
+# ASUS P2B-DS 10/21/99
+name: ASUS_P2B_2
+oem: FADT "ASUS " "P2B-DS "
+oem_rev: FADT <= 0x58582e32
+creator_rev: FADT <= 0x31303030
+quirks: ACPI_Q_BROKEN
+
+# ASUS P2L97-DS 02/02/99
+name: ASUS_P2L97_DS
+oem: FADT "ASUS " "P2L97-DS"
+oem_rev: FADT <= 0x58582e31
+creator_rev: FADT <= 0x31303030
+quirks: ACPI_Q_BROKEN
+
+# ASUS P5A and P5A-B 03/12/99
+# PR: i386/72450
+# Testing indicates that the ACPI timer runs twice as fast but otherwise
+# this system works normally.
+name: ASUS_P5A
+oem: FADT "ASUS " "P5A "
+oem_rev: FADT <= 0x58582e31
+quirks: ACPI_Q_TIMER
+
+# Compaq Armada 3500
+name: Compaq_Armada_3500
+oem: FADT "COMPAQ" "CPQB05E "
+oem_rev: FADT <= 1
+quirks: ACPI_Q_BROKEN
+
+# Compaq Armada 6500
+name: Compaq_Armada_6500
+oem: FADT "PTLTD " " FACP "
+oem_rev: FADT = 0
+quirks: ACPI_Q_BROKEN
+
+# Compaq M500
+name: Compaq_M500
+oem: FADT "COMPAQ" "CPQB151 "
+oem_rev: FADT <= 0x19990722
+quirks: ACPI_Q_BROKEN
+
+# Compaq Panther
+name: Compaq_Panther
+oem: FADT "COMPAQ" "PANTHER "
+oem_rev: FADT <= 1
+creator_rev: FADT = 0
+quirks: ACPI_Q_BROKEN
+
+# Compaq Presario 1692
+name: Compaq_Presario_1692
+oem: FADT "COMPAQ" "VIPER II"
+oem_rev: FADT <= 0x06040000
+creator: FADT "PTL "
+creator_rev: FADT <= 0xF4240
+quirks: ACPI_Q_BROKEN
+
+# Compaq Presario 1925
+name: Compaq_Presario_1925
+oem: FADT "PTLTD " " FACP "
+oem_rev: FADT <= 0x06040000
+quirks: ACPI_Q_BROKEN
+
+# Compaq R3000Z
+name: Compaq_R3000Z
+oem: FADT "NVIDIA" "CK8 "
+oem_rev: FADT = 0x6040000
+creator: FADT "PTL_"
+quirks: ACPI_Q_MADT_IRQ0
+
+# Compaq Thunder 05/18/99
+# XXX Note creator rev should be "=", I think
+name: Compaq_Thunder
+oem: FADT "COMPAQ" "THUNDER "
+oem_rev: FADT <= 1
+creator_rev: FADT <= 0
+quirks: ACPI_Q_BROKEN
+
+# Compaq RaceBait 12/06/99
+name: Compaq_RaceBait
+oem: FADT "COMPAQ" "RACEBAIT"
+oem_rev: FADT <= 2
+quirks: ACPI_Q_BROKEN
+
+# Dell CP??? 01/27/99
+name: Dell_CPxxx
+oem: FADT "Dell " "CP??? "
+oem_rev: FADT <= 0x27cf011b
+quirks: ACPI_Q_BROKEN
+
+# Dell CPt
+# XXX Is ">=" correct?
+name: Dell_CPt
+oem: FADT "DELL " "CPt C "
+oem_rev: FADT >= 0x27cf090e
+quirks: ACPI_Q_BROKEN
+
+# Dell CPt A02
+# XXX Is ">=" correct?
+name: Dell_CPt_A02
+oem: FADT "DELL " "CPt C "
+oem_rev: FADT >= 0x27cf090f
+quirks: ACPI_Q_BROKEN
+
+# Dell Latitude LT
+name: Dell_Latitude_LT
+oem: FADT "SHARP " " "
+quirks: ACPI_Q_BROKEN
+
+# Dell Inspiron 3500
+name: Dell_Inspiron_3500
+oem: FADT "Dell " "I 3500 "
+quirks: ACPI_Q_BROKEN
+
+# Dell PE1300
+name: Dell_PE1300
+oem: FADT "Dell " "PE1300 "
+oem_rev: FADT <= 1
+quirks: ACPI_Q_BROKEN
+
+# Dell Personal Workstation 1300
+name: Dell_PE1300_2
+oem: FADT "DELL " "PE1300 "
+oem_rev: FADT <= 1
+quirks: ACPI_Q_BROKEN
+
+# FIC PA2013
+name: FIC_PA2013
+oem: FADT "FIC " "PA2013 "
+oem_rev: FADT <= 0x30302e31
+quirks: ACPI_Q_BROKEN
+
+# FIC VB601
+name: FIC_VB601
+oem: FADT "FIC " "VB601 "
+oem_rev: FADT <= 0x30302e31
+quirks: ACPI_Q_BROKEN
+
+# Fujitsu Capricorn
+name: Fujitsu_Capricorn
+oem: FADT "FUJ " "CAPRICOR"
+quirks: ACPI_Q_BROKEN
+
+# Fujitsu M63E
+name: Fujitsu_M63E
+oem: FADT "FUJ000" "M63E "
+quirks: ACPI_Q_BROKEN
+
+# Fujitsu M65
+name: Fujitsu_M65
+oem: FADT "FUJ " "M65 "
+quirks: ACPI_Q_BROKEN
+
+# Fujitsu Sprint
+name: Fujitsu_Sprint
+oem: XSDT "FUJ " "SPRINT "
+quirks: ACPI_Q_BROKEN
+
+# Fujitsu Tandem
+name: Fujitsu_Tandem
+oem: FADT "FUJ " "TANDEM "
+quirks: ACPI_Q_BROKEN
+
+# Fujitsu Elm
+name: Fujitsu_Elm
+oem: FADT "FUJ " "M19A/XA "
+oem_rev: FADT = 1
+creator: FADT "ACER"
+creator_rev: FADT = 0
+quirks: ACPI_Q_BROKEN
+
+# Fujitsu Maple
+name: Fujitsu_Maple
+oem: FADT "FUJ " "M9D "
+oem_rev: FADT = 1
+creator: FADT "ACER"
+creator_rev: FADT = 0
+quirks: ACPI_Q_BROKEN
+
+# Fujitsu Sycamore
+name: Fujitsu_Sycamore
+oem: FADT "Fuj " "M11EJ "
+oem_rev: FADT = 1
+creator: FADT "ACER"
+creator_rev: FADT = 0
+quirks: ACPI_Q_BROKEN
+
+# Fujitsu V66NAS
+name: Fujitsu_V66NAS
+oem: FADT "FUJ " "V66NAS "
+quirks: ACPI_Q_BROKEN
+
+# Gateway E1200
+name: Gateway_E1200
+oem: FADT "INTEL " "MAUI "
+oem_rev: FADT <= 1
+quirks: ACPI_Q_BROKEN
+
+# Gateway E1400
+name: Gateway_E1400
+oem: FADT "SUMATR" "SU81010A"
+oem_rev: FADT <= 0x1999072
+quirks: ACPI_Q_BROKEN
+
+# Gateway Profile
+name: Gateway_Profile
+oem: FADT "GATEWA" "PROFILE "
+oem_rev: FADT <= 1
+quirks: ACPI_Q_BROKEN
+
+# Gateway Solo
+name: Gateway_Solo
+oem: FADT "GATEWA" "SOLO9300"
+oem_rev: FADT <= 0x06040000
+quirks: ACPI_Q_BROKEN
+
+# Gigabyte
+name: Gigabyte_Award
+oem: FADT "GBT" "AWRDACPI"
+oem_rev: FADT = 0
+quirks: ACPI_Q_BROKEN
+
+# GA-5AX (Rev 4)
+# Testing indicates that the ACPI timer runs twice as fast but otherwise
+# this system works normally.
+name: GBT_AWRDACPI
+oem: FADT "GBT " "AWRDACPI"
+oem_rev: FADT <= 0x42302e31
+quirks: ACPI_Q_TIMER
+
+# Hitachi Flora 220CX
+name: Hitachi_Flora_220CX
+oem: FADT "HTCLTD" "HTC2041 "
+oem_rev: FADT <= 1
+quirks: ACPI_Q_BROKEN
+
+# Hitachi Flora 220MP
+name: Hitachi_Flora_220MP
+oem: FADT "HTCLTD" " "
+quirks: ACPI_Q_BROKEN
+
+# Hitachi Flora 270EX
+name: Hitachi_Flora_270EX
+oem: FADT "HTCLTD" "270-EX "
+oem_rev: FADT <= 1
+quirks: ACPI_Q_BROKEN
+
+# Hitachi Flora 270VX-NH7
+name: Hitachi_Flora_270VX_NH7
+oem: FADT "HTCLTD" "HTC2051 "
+oem_rev: FADT <= 1
+quirks: ACPI_Q_BROKEN
+
+# HP Ashaki 02/24/99
+name: HP_Ashaki
+oem: FADT "HP " "ASHAKI "
+oem_rev: FADT = 0
+quirks: ACPI_Q_BROKEN
+
+# HP HPBDD
+name: HP_HPBDD
+oem: FADT "HP " "HPBDD_IO"
+oem_rev: FADT <= 0x1006
+quirks: ACPI_Q_BROKEN
+
+# HP HPCCD HX
+name: HP_HPCCD_HX
+oem: FADT "HP " "HPCCD HX"
+oem_rev: FADT <= 0x10000
+quirks: ACPI_Q_BROKEN
+
+# HP Kayak XUMP
+name: HP_Kayak_XUMP
+oem: FADT "HP " "COGNAC "
+oem_rev: FADT = 0
+quirks: ACPI_Q_BROKEN
+
+# HP LH 4
+name: HP_LH_4
+oem: FADT "HP " "LH 4 "
+oem_rev: FADT <= 1
+quirks: ACPI_Q_BROKEN
+
+# HP Puma 02/12/99
+name: HP_Puma
+oem: FADT "HP " "Puma "
+oem_rev: FADT = 0
+quirks: ACPI_Q_BROKEN
+
+# HP Scimitar
+name: HP_Scimitar
+oem: FADT "HP " "HWPC203 "
+oem_rev: FADT <= 1
+quirks: ACPI_Q_BROKEN
+
+# IBM Aptiva 145
+name: IBM_Aptiva_145
+oem: FADT "XXXXXX" "AWRDACPI"
+oem_rev: FADT <= 1
+quirks: ACPI_Q_BROKEN
+
+# IBM TP240 SandShark
+name: IBM_TP240_SandShark
+oem: FADT "IBM " "SShark-1"
+oem_rev: FADT <= 1
+quirks: ACPI_Q_BROKEN
+
+# Intel Kauai
+name: Intel_Kauai
+oem: FADT "INTEL " "KAUAI "
+quirks: ACPI_Q_BROKEN
+
+# Intel L440GX
+name: Intel_L440GX
+oem: FADT "Intel " "L440GX "
+oem_rev: FADT <= 1
+quirks: ACPI_Q_BROKEN
+
+# Intel OR840 09/14/99
+name: Intel_OR840
+oem: FADT "Intel " "OR840 "
+oem_rev: FADT <= 1
+quirks: ACPI_Q_BROKEN
+
+# Intel Seattle2
+name: Intel_Seattle2
+oem: FADT "INTEL " "SEATTLE2"
+oem_rev: FADT <= 0x19990216
+quirks: ACPI_Q_BROKEN
+
+# Iwill DBS100
+name: Iwill_DBS100
+oem: FADT "AWARD " "AWRDACPI"
+oem_rev: FADT = 0
+quirks: ACPI_Q_BROKEN
+
+# Micron Atlanta
+name: Micron_Atlanta
+oem: FADT "MICRON" "ATLANTA "
+oem_rev: FADT = 0
+quirks: ACPI_Q_BROKEN
+
+# Micron Trek 2U375
+name: Micron_Trek_2U375
+oem: FADT "MICRON" "Trek2AGP"
+oem_rev: FADT = 1
+quirks: ACPI_Q_BROKEN
+
+# NCR S20GX
+name: NCR_S20GX
+oem: FADT "Intel " "L440GX "
+oem_rev: FADT = 0
+quirks: ACPI_Q_BROKEN
+
+# NEC PowerMate 8100
+name: NEC_PowerMate_8100
+oem: FADT "INTEL " "JN440BX1"
+oem_rev: FADT >= 0x19990203
+oem_rev: FADT <= 0x19990810
+creator_rev: FADT = 0xf4240
+quirks: ACPI_Q_BROKEN
+
+# NEC VersaNote
+name: NEC_VersaNote
+oem: FADT "INTEL " "440BX "
+oem_rev: FADT = 0
+creator: FADT "PTL "
+creator_rev: FADT = 0xf4240
+quirks: ACPI_Q_BROKEN
+
+# NEC VersaNote C2
+name: NEC_VersaNote_C2
+oem: FADT "NEC " "ND000011"
+oem_rev: FADT <= 1
+quirks: ACPI_Q_BROKEN
+
+# NEC Versa LXAGP
+name: NEC_VersaNote_LXAGP
+oem: FADT "NEC " "ND000010"
+oem_rev: FADT <= 0x97
+quirks: ACPI_Q_BROKEN
+
+# NEC Z1
+name: NEC_Z1
+oem: FADT "AMIINT" ""
+oem_rev: FADT = 0
+quirks: ACPI_Q_BROKEN
+
+# QDIGRP 01/05/99
+name: QDIGRP_Award
+oem: FADT "QDIGRP" "AWRDACPI"
+oem_rev: FADT <= 0
+quirks: ACPI_Q_BROKEN
+
+# Siemens Mobile 750
+name: Siemens_Mobile_750
+oem: FADT "TSANYO" " "
+oem_rev: FADT = 0
+quirks: ACPI_Q_BROKEN
+
+# Sony F290
+name: Sony_F290
+oem: FADT "SONY " "K0 "
+oem_rev: FADT <= 0x13108b4
+quirks: ACPI_Q_BROKEN
+
+# Sony N505
+name: Sony_N505
+oem: FADT "SONY " "H0 "
+oem_rev: FADT <= 0x06040000
+quirks: ACPI_Q_BROKEN
+
+# Sony PCG-777
+# Sony PCG-888
+# Sony PCG-F16
+# Sony Z505DX
+name: Sony_PCG_xxx
+oem: FADT "SONY " " "
+quirks: ACPI_Q_BROKEN
+
+# Sony PCV-L300
+name: Sony_PCV_L300
+oem: FADT "ASUS " "SHACHI "
+oem_rev: FADT <= 1
+quirks: ACPI_Q_BROKEN
+
+# Sony PCV-S520
+# Sony PCV-S720
+name: Sony_PCV_Sx20
+oem: FADT "SONY " "440BX CR"
+oem_rev: FADT <= 1
+quirks: ACPI_Q_BROKEN
+
+# SuperMicro
+name: SuperMicro_MTB
+oem: FADT "SUPERM" "SUPERMTB"
+oem_rev: FADT <= 2
+quirks: ACPI_Q_BROKEN
+
+# Toshiba Portege 3300
+# Toshiba Satellite 4030XCDT
+name: Toshiba_750
+oem: FADT "TOSHIB" "750 "
+oem_rev: FADT <= 1
+quirks: ACPI_Q_BROKEN
+
+# Tyan
+name: Tyan_TBLE
+oem: FADT "TYANCP" "TYANTBLE"
+oem_rev: FADT <= 9
+quirks: ACPI_Q_BROKEN
+
+# VIA VP3A
+name: VIA_VP3A
+oem: FADT "VIAVP3" "AWRDACPI"
+oem_rev: FADT = 0
+quirks: ACPI_Q_BROKEN
diff --git a/sys/dev/acpica/acpi_resource.c b/sys/dev/acpica/acpi_resource.c
new file mode 100644
index 0000000..ce6732f
--- /dev/null
+++ b/sys/dev/acpica/acpi_resource.c
@@ -0,0 +1,711 @@
+/*-
+ * Copyright (c) 2000 Michael Smith
+ * Copyright (c) 2000 BSDi
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "opt_acpi.h"
+#include <sys/param.h>
+#include <sys/kernel.h>
+#include <sys/bus.h>
+#include <sys/limits.h>
+#include <sys/malloc.h>
+#include <sys/module.h>
+
+#include <machine/bus.h>
+#include <machine/resource.h>
+#include <sys/rman.h>
+
+#include <contrib/dev/acpica/include/acpi.h>
+#include <contrib/dev/acpica/include/accommon.h>
+
+#include <dev/acpica/acpivar.h>
+
+/* Hooks for the ACPI CA debugging infrastructure */
+#define _COMPONENT ACPI_BUS
+ACPI_MODULE_NAME("RESOURCE")
+
+struct lookup_irq_request {
+ ACPI_RESOURCE *acpi_res;
+ struct resource *res;
+ int counter;
+ int rid;
+ int found;
+};
+
+static ACPI_STATUS
+acpi_lookup_irq_handler(ACPI_RESOURCE *res, void *context)
+{
+ struct lookup_irq_request *req;
+ size_t len;
+ u_int irqnum, irq;
+
+ switch (res->Type) {
+ case ACPI_RESOURCE_TYPE_IRQ:
+ irqnum = res->Data.Irq.InterruptCount;
+ irq = res->Data.Irq.Interrupts[0];
+ len = ACPI_RS_SIZE(ACPI_RESOURCE_IRQ);
+ break;
+ case ACPI_RESOURCE_TYPE_EXTENDED_IRQ:
+ irqnum = res->Data.ExtendedIrq.InterruptCount;
+ irq = res->Data.ExtendedIrq.Interrupts[0];
+ len = ACPI_RS_SIZE(ACPI_RESOURCE_EXTENDED_IRQ);
+ break;
+ default:
+ return (AE_OK);
+ }
+ if (irqnum != 1)
+ return (AE_OK);
+ req = (struct lookup_irq_request *)context;
+ if (req->counter != req->rid) {
+ req->counter++;
+ return (AE_OK);
+ }
+ req->found = 1;
+ KASSERT(irq == rman_get_start(req->res),
+ ("IRQ resources do not match"));
+ bcopy(res, req->acpi_res, len);
+ return (AE_CTRL_TERMINATE);
+}
+
+ACPI_STATUS
+acpi_lookup_irq_resource(device_t dev, int rid, struct resource *res,
+ ACPI_RESOURCE *acpi_res)
+{
+ struct lookup_irq_request req;
+ ACPI_STATUS status;
+
+ req.acpi_res = acpi_res;
+ req.res = res;
+ req.counter = 0;
+ req.rid = rid;
+ req.found = 0;
+ status = AcpiWalkResources(acpi_get_handle(dev), "_CRS",
+ acpi_lookup_irq_handler, &req);
+ if (ACPI_SUCCESS(status) && req.found == 0)
+ status = AE_NOT_FOUND;
+ return (status);
+}
+
+void
+acpi_config_intr(device_t dev, ACPI_RESOURCE *res)
+{
+ u_int irq;
+ int pol, trig;
+
+ switch (res->Type) {
+ case ACPI_RESOURCE_TYPE_IRQ:
+ KASSERT(res->Data.Irq.InterruptCount == 1,
+ ("%s: multiple interrupts", __func__));
+ irq = res->Data.Irq.Interrupts[0];
+ trig = res->Data.Irq.Triggering;
+ pol = res->Data.Irq.Polarity;
+ break;
+ case ACPI_RESOURCE_TYPE_EXTENDED_IRQ:
+ KASSERT(res->Data.ExtendedIrq.InterruptCount == 1,
+ ("%s: multiple interrupts", __func__));
+ irq = res->Data.ExtendedIrq.Interrupts[0];
+ trig = res->Data.ExtendedIrq.Triggering;
+ pol = res->Data.ExtendedIrq.Polarity;
+ break;
+ default:
+ panic("%s: bad resource type %u", __func__, res->Type);
+ }
+ BUS_CONFIG_INTR(dev, irq, (trig == ACPI_EDGE_SENSITIVE) ?
+ INTR_TRIGGER_EDGE : INTR_TRIGGER_LEVEL, (pol == ACPI_ACTIVE_HIGH) ?
+ INTR_POLARITY_HIGH : INTR_POLARITY_LOW);
+}
+
+struct acpi_resource_context {
+ struct acpi_parse_resource_set *set;
+ device_t dev;
+ void *context;
+};
+
+#ifdef ACPI_DEBUG_OUTPUT
+static const char *
+acpi_address_range_name(UINT8 ResourceType)
+{
+ static char buf[16];
+
+ switch (ResourceType) {
+ case ACPI_MEMORY_RANGE:
+ return ("Memory");
+ case ACPI_IO_RANGE:
+ return ("IO");
+ case ACPI_BUS_NUMBER_RANGE:
+ return ("Bus Number");
+ default:
+ snprintf(buf, sizeof(buf), "type %u", ResourceType);
+ return (buf);
+ }
+}
+#endif
+
+static ACPI_STATUS
+acpi_parse_resource(ACPI_RESOURCE *res, void *context)
+{
+ struct acpi_parse_resource_set *set;
+ struct acpi_resource_context *arc;
+ UINT64 min, max, length, gran;
+ const char *name;
+ device_t dev;
+
+ arc = context;
+ dev = arc->dev;
+ set = arc->set;
+
+ switch (res->Type) {
+ case ACPI_RESOURCE_TYPE_END_TAG:
+ ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES, "EndTag\n"));
+ break;
+ case ACPI_RESOURCE_TYPE_FIXED_IO:
+ if (res->Data.FixedIo.AddressLength <= 0)
+ break;
+ ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES, "FixedIo 0x%x/%d\n",
+ res->Data.FixedIo.Address, res->Data.FixedIo.AddressLength));
+ set->set_ioport(dev, arc->context, res->Data.FixedIo.Address,
+ res->Data.FixedIo.AddressLength);
+ break;
+ case ACPI_RESOURCE_TYPE_IO:
+ if (res->Data.Io.AddressLength <= 0)
+ break;
+ if (res->Data.Io.Minimum == res->Data.Io.Maximum) {
+ ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES, "Io 0x%x/%d\n",
+ res->Data.Io.Minimum, res->Data.Io.AddressLength));
+ set->set_ioport(dev, arc->context, res->Data.Io.Minimum,
+ res->Data.Io.AddressLength);
+ } else {
+ ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES, "Io 0x%x-0x%x/%d\n",
+ res->Data.Io.Minimum, res->Data.Io.Maximum,
+ res->Data.Io.AddressLength));
+ set->set_iorange(dev, arc->context, res->Data.Io.Minimum,
+ res->Data.Io.Maximum, res->Data.Io.AddressLength,
+ res->Data.Io.Alignment);
+ }
+ break;
+ case ACPI_RESOURCE_TYPE_FIXED_MEMORY32:
+ if (res->Data.FixedMemory32.AddressLength <= 0)
+ break;
+ ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES, "FixedMemory32 0x%x/%d\n",
+ res->Data.FixedMemory32.Address,
+ res->Data.FixedMemory32.AddressLength));
+ set->set_memory(dev, arc->context, res->Data.FixedMemory32.Address,
+ res->Data.FixedMemory32.AddressLength);
+ break;
+ case ACPI_RESOURCE_TYPE_MEMORY32:
+ if (res->Data.Memory32.AddressLength <= 0)
+ break;
+ if (res->Data.Memory32.Minimum == res->Data.Memory32.Maximum) {
+ ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES, "Memory32 0x%x/%d\n",
+ res->Data.Memory32.Minimum, res->Data.Memory32.AddressLength));
+ set->set_memory(dev, arc->context, res->Data.Memory32.Minimum,
+ res->Data.Memory32.AddressLength);
+ } else {
+ ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES, "Memory32 0x%x-0x%x/%d\n",
+ res->Data.Memory32.Minimum, res->Data.Memory32.Maximum,
+ res->Data.Memory32.AddressLength));
+ set->set_memoryrange(dev, arc->context, res->Data.Memory32.Minimum,
+ res->Data.Memory32.Maximum, res->Data.Memory32.AddressLength,
+ res->Data.Memory32.Alignment);
+ }
+ break;
+ case ACPI_RESOURCE_TYPE_MEMORY24:
+ if (res->Data.Memory24.AddressLength <= 0)
+ break;
+ if (res->Data.Memory24.Minimum == res->Data.Memory24.Maximum) {
+ ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES, "Memory24 0x%x/%d\n",
+ res->Data.Memory24.Minimum, res->Data.Memory24.AddressLength));
+ set->set_memory(dev, arc->context, res->Data.Memory24.Minimum,
+ res->Data.Memory24.AddressLength);
+ } else {
+ ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES, "Memory24 0x%x-0x%x/%d\n",
+ res->Data.Memory24.Minimum, res->Data.Memory24.Maximum,
+ res->Data.Memory24.AddressLength));
+ set->set_memoryrange(dev, arc->context, res->Data.Memory24.Minimum,
+ res->Data.Memory24.Maximum, res->Data.Memory24.AddressLength,
+ res->Data.Memory24.Alignment);
+ }
+ break;
+ case ACPI_RESOURCE_TYPE_IRQ:
+ /*
+ * from 1.0b 6.4.2
+ * "This structure is repeated for each separate interrupt
+ * required"
+ */
+ set->set_irq(dev, arc->context, res->Data.Irq.Interrupts,
+ res->Data.Irq.InterruptCount, res->Data.Irq.Triggering,
+ res->Data.Irq.Polarity);
+ break;
+ case ACPI_RESOURCE_TYPE_DMA:
+ /*
+ * from 1.0b 6.4.3
+ * "This structure is repeated for each separate DMA channel
+ * required"
+ */
+ set->set_drq(dev, arc->context, res->Data.Dma.Channels,
+ res->Data.Dma.ChannelCount);
+ break;
+ case ACPI_RESOURCE_TYPE_START_DEPENDENT:
+ ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES, "start dependent functions\n"));
+ set->set_start_dependent(dev, arc->context,
+ res->Data.StartDpf.CompatibilityPriority);
+ break;
+ case ACPI_RESOURCE_TYPE_END_DEPENDENT:
+ ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES, "end dependent functions\n"));
+ set->set_end_dependent(dev, arc->context);
+ break;
+ case ACPI_RESOURCE_TYPE_ADDRESS16:
+ case ACPI_RESOURCE_TYPE_ADDRESS32:
+ case ACPI_RESOURCE_TYPE_ADDRESS64:
+ case ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64:
+ switch (res->Type) {
+ case ACPI_RESOURCE_TYPE_ADDRESS16:
+ gran = res->Data.Address16.Granularity;
+ min = res->Data.Address16.Minimum;
+ max = res->Data.Address16.Maximum;
+ length = res->Data.Address16.AddressLength;
+ name = "Address16";
+ break;
+ case ACPI_RESOURCE_TYPE_ADDRESS32:
+ gran = res->Data.Address32.Granularity;
+ min = res->Data.Address32.Minimum;
+ max = res->Data.Address32.Maximum;
+ length = res->Data.Address32.AddressLength;
+ name = "Address32";
+ break;
+ case ACPI_RESOURCE_TYPE_ADDRESS64:
+ gran = res->Data.Address64.Granularity;
+ min = res->Data.Address64.Minimum;
+ max = res->Data.Address64.Maximum;
+ length = res->Data.Address64.AddressLength;
+ name = "Address64";
+ break;
+ default:
+ KASSERT(res->Type == ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64,
+ ("should never happen"));
+ gran = res->Data.ExtAddress64.Granularity;
+ min = res->Data.ExtAddress64.Minimum;
+ max = res->Data.ExtAddress64.Maximum;
+ length = res->Data.ExtAddress64.AddressLength;
+ name = "ExtAddress64";
+ break;
+ }
+ if (length <= 0)
+ break;
+ if (res->Data.Address.ProducerConsumer != ACPI_CONSUMER) {
+ ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES,
+ "ignored %s %s producer\n", name,
+ acpi_address_range_name(res->Data.Address.ResourceType)));
+ break;
+ }
+ if (res->Data.Address.ResourceType != ACPI_MEMORY_RANGE &&
+ res->Data.Address.ResourceType != ACPI_IO_RANGE) {
+ ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES,
+ "ignored %s for non-memory, non-I/O\n", name));
+ break;
+ }
+
+#ifdef __i386__
+ if (min > ULONG_MAX || (res->Data.Address.MaxAddressFixed && max >
+ ULONG_MAX)) {
+ ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES, "ignored %s above 4G\n",
+ name));
+ break;
+ }
+ if (max > ULONG_MAX)
+ max = ULONG_MAX;
+#endif
+ if (res->Data.Address.MinAddressFixed == ACPI_ADDRESS_FIXED &&
+ res->Data.Address.MaxAddressFixed == ACPI_ADDRESS_FIXED) {
+ if (res->Data.Address.ResourceType == ACPI_MEMORY_RANGE) {
+ ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES, "%s/Memory 0x%jx/%ju\n",
+ name, (uintmax_t)min, (uintmax_t)length));
+ set->set_memory(dev, arc->context, min, length);
+ } else {
+ ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES, "%s/IO 0x%jx/%ju\n", name,
+ (uintmax_t)min, (uintmax_t)length));
+ set->set_ioport(dev, arc->context, min, length);
+ }
+ } else {
+ if (res->Data.Address32.ResourceType == ACPI_MEMORY_RANGE) {
+ ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES,
+ "%s/Memory 0x%jx-0x%jx/%ju\n", name, (uintmax_t)min,
+ (uintmax_t)max, (uintmax_t)length));
+ set->set_memoryrange(dev, arc->context, min, max, length, gran);
+ } else {
+ ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES, "%s/IO 0x%jx-0x%jx/%ju\n",
+ name, (uintmax_t)min, (uintmax_t)max, (uintmax_t)length));
+ set->set_iorange(dev, arc->context, min, max, length, gran);
+ }
+ }
+ break;
+ case ACPI_RESOURCE_TYPE_EXTENDED_IRQ:
+ if (res->Data.ExtendedIrq.ProducerConsumer != ACPI_CONSUMER) {
+ ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES, "ignored ExtIRQ producer\n"));
+ break;
+ }
+ set->set_ext_irq(dev, arc->context, res->Data.ExtendedIrq.Interrupts,
+ res->Data.ExtendedIrq.InterruptCount,
+ res->Data.ExtendedIrq.Triggering, res->Data.ExtendedIrq.Polarity);
+ break;
+ case ACPI_RESOURCE_TYPE_VENDOR:
+ ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES,
+ "unimplemented VendorSpecific resource\n"));
+ break;
+ default:
+ break;
+ }
+ return (AE_OK);
+}
+
+/*
+ * Fetch a device's resources and associate them with the device.
+ *
+ * Note that it might be nice to also locate ACPI-specific resource items, such
+ * as GPE bits.
+ *
+ * We really need to split the resource-fetching code out from the
+ * resource-parsing code, since we may want to use the parsing
+ * code for _PRS someday.
+ */
+ACPI_STATUS
+acpi_parse_resources(device_t dev, ACPI_HANDLE handle,
+ struct acpi_parse_resource_set *set, void *arg)
+{
+ struct acpi_resource_context arc;
+ ACPI_STATUS status;
+
+ ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
+
+ set->set_init(dev, arg, &arc.context);
+ arc.set = set;
+ arc.dev = dev;
+ status = AcpiWalkResources(handle, "_CRS", acpi_parse_resource, &arc);
+ if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
+ printf("can't fetch resources for %s - %s\n",
+ acpi_name(handle), AcpiFormatException(status));
+ return_ACPI_STATUS (status);
+ }
+ set->set_done(dev, arc.context);
+ return_ACPI_STATUS (AE_OK);
+}
+
+/*
+ * Resource-set vectors used to attach _CRS-derived resources
+ * to an ACPI device.
+ */
+static void acpi_res_set_init(device_t dev, void *arg, void **context);
+static void acpi_res_set_done(device_t dev, void *context);
+static void acpi_res_set_ioport(device_t dev, void *context,
+ uint64_t base, uint64_t length);
+static void acpi_res_set_iorange(device_t dev, void *context,
+ uint64_t low, uint64_t high,
+ uint64_t length, uint64_t align);
+static void acpi_res_set_memory(device_t dev, void *context,
+ uint64_t base, uint64_t length);
+static void acpi_res_set_memoryrange(device_t dev, void *context,
+ uint64_t low, uint64_t high,
+ uint64_t length, uint64_t align);
+static void acpi_res_set_irq(device_t dev, void *context, uint8_t *irq,
+ int count, int trig, int pol);
+static void acpi_res_set_ext_irq(device_t dev, void *context,
+ uint32_t *irq, int count, int trig, int pol);
+static void acpi_res_set_drq(device_t dev, void *context, uint8_t *drq,
+ int count);
+static void acpi_res_set_start_dependent(device_t dev, void *context,
+ int preference);
+static void acpi_res_set_end_dependent(device_t dev, void *context);
+
+struct acpi_parse_resource_set acpi_res_parse_set = {
+ acpi_res_set_init,
+ acpi_res_set_done,
+ acpi_res_set_ioport,
+ acpi_res_set_iorange,
+ acpi_res_set_memory,
+ acpi_res_set_memoryrange,
+ acpi_res_set_irq,
+ acpi_res_set_ext_irq,
+ acpi_res_set_drq,
+ acpi_res_set_start_dependent,
+ acpi_res_set_end_dependent
+};
+
+struct acpi_res_context {
+ int ar_nio;
+ int ar_nmem;
+ int ar_nirq;
+ int ar_ndrq;
+ void *ar_parent;
+};
+
+static void
+acpi_res_set_init(device_t dev, void *arg, void **context)
+{
+ struct acpi_res_context *cp;
+
+ if ((cp = AcpiOsAllocate(sizeof(*cp))) != NULL) {
+ bzero(cp, sizeof(*cp));
+ cp->ar_parent = arg;
+ *context = cp;
+ }
+}
+
+static void
+acpi_res_set_done(device_t dev, void *context)
+{
+ struct acpi_res_context *cp = (struct acpi_res_context *)context;
+
+ if (cp == NULL)
+ return;
+ AcpiOsFree(cp);
+}
+
+static void
+acpi_res_set_ioport(device_t dev, void *context, uint64_t base,
+ uint64_t length)
+{
+ struct acpi_res_context *cp = (struct acpi_res_context *)context;
+
+ if (cp == NULL)
+ return;
+ bus_set_resource(dev, SYS_RES_IOPORT, cp->ar_nio++, base, length);
+}
+
+static void
+acpi_res_set_iorange(device_t dev, void *context, uint64_t low,
+ uint64_t high, uint64_t length, uint64_t align)
+{
+ struct acpi_res_context *cp = (struct acpi_res_context *)context;
+
+ if (cp == NULL)
+ return;
+ device_printf(dev, "I/O range not supported\n");
+}
+
+static void
+acpi_res_set_memory(device_t dev, void *context, uint64_t base,
+ uint64_t length)
+{
+ struct acpi_res_context *cp = (struct acpi_res_context *)context;
+
+ if (cp == NULL)
+ return;
+
+ bus_set_resource(dev, SYS_RES_MEMORY, cp->ar_nmem++, base, length);
+}
+
+static void
+acpi_res_set_memoryrange(device_t dev, void *context, uint64_t low,
+ uint64_t high, uint64_t length, uint64_t align)
+{
+ struct acpi_res_context *cp = (struct acpi_res_context *)context;
+
+ if (cp == NULL)
+ return;
+ device_printf(dev, "memory range not supported\n");
+}
+
+static void
+acpi_res_set_irq(device_t dev, void *context, uint8_t *irq, int count,
+ int trig, int pol)
+{
+ struct acpi_res_context *cp = (struct acpi_res_context *)context;
+
+ if (cp == NULL || irq == NULL)
+ return;
+
+ /* This implements no resource relocation. */
+ if (count != 1)
+ return;
+
+ bus_set_resource(dev, SYS_RES_IRQ, cp->ar_nirq++, *irq, 1);
+}
+
+static void
+acpi_res_set_ext_irq(device_t dev, void *context, uint32_t *irq, int count,
+ int trig, int pol)
+{
+ struct acpi_res_context *cp = (struct acpi_res_context *)context;
+
+ if (cp == NULL || irq == NULL)
+ return;
+
+ /* This implements no resource relocation. */
+ if (count != 1)
+ return;
+
+ bus_set_resource(dev, SYS_RES_IRQ, cp->ar_nirq++, *irq, 1);
+}
+
+static void
+acpi_res_set_drq(device_t dev, void *context, uint8_t *drq, int count)
+{
+ struct acpi_res_context *cp = (struct acpi_res_context *)context;
+
+ if (cp == NULL || drq == NULL)
+ return;
+
+ /* This implements no resource relocation. */
+ if (count != 1)
+ return;
+
+ bus_set_resource(dev, SYS_RES_DRQ, cp->ar_ndrq++, *drq, 1);
+}
+
+static void
+acpi_res_set_start_dependent(device_t dev, void *context, int preference)
+{
+ struct acpi_res_context *cp = (struct acpi_res_context *)context;
+
+ if (cp == NULL)
+ return;
+ device_printf(dev, "dependent functions not supported\n");
+}
+
+static void
+acpi_res_set_end_dependent(device_t dev, void *context)
+{
+ struct acpi_res_context *cp = (struct acpi_res_context *)context;
+
+ if (cp == NULL)
+ return;
+ device_printf(dev, "dependent functions not supported\n");
+}
+
+/*
+ * Resource-owning placeholders for IO and memory pseudo-devices.
+ *
+ * This code allocates system resources that will be used by ACPI
+ * child devices. The acpi parent manages these resources through a
+ * private rman.
+ */
+
+static int acpi_sysres_rid = 100;
+
+static int acpi_sysres_probe(device_t dev);
+static int acpi_sysres_attach(device_t dev);
+
+static device_method_t acpi_sysres_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, acpi_sysres_probe),
+ DEVMETHOD(device_attach, acpi_sysres_attach),
+
+ {0, 0}
+};
+
+static driver_t acpi_sysres_driver = {
+ "acpi_sysresource",
+ acpi_sysres_methods,
+ 0,
+};
+
+static devclass_t acpi_sysres_devclass;
+DRIVER_MODULE(acpi_sysresource, acpi, acpi_sysres_driver, acpi_sysres_devclass,
+ 0, 0);
+MODULE_DEPEND(acpi_sysresource, acpi, 1, 1, 1);
+
+static int
+acpi_sysres_probe(device_t dev)
+{
+ static char *sysres_ids[] = { "PNP0C01", "PNP0C02", NULL };
+
+ if (acpi_disabled("sysresource") ||
+ ACPI_ID_PROBE(device_get_parent(dev), dev, sysres_ids) == NULL)
+ return (ENXIO);
+
+ device_set_desc(dev, "System Resource");
+ device_quiet(dev);
+ return (BUS_PROBE_DEFAULT);
+}
+
+static int
+acpi_sysres_attach(device_t dev)
+{
+ device_t bus;
+ struct resource_list_entry *bus_rle, *dev_rle;
+ struct resource_list *bus_rl, *dev_rl;
+ int done, type;
+ u_long start, end, count;
+
+ /*
+ * Loop through all current resources to see if the new one overlaps
+ * any existing ones. If so, grow the old one up and/or down
+ * accordingly. Discard any that are wholly contained in the old. If
+ * the resource is unique, add it to the parent. It will later go into
+ * the rman pool.
+ */
+ bus = device_get_parent(dev);
+ dev_rl = BUS_GET_RESOURCE_LIST(bus, dev);
+ bus_rl = BUS_GET_RESOURCE_LIST(device_get_parent(bus), bus);
+ STAILQ_FOREACH(dev_rle, dev_rl, link) {
+ if (dev_rle->type != SYS_RES_IOPORT && dev_rle->type != SYS_RES_MEMORY)
+ continue;
+
+ start = dev_rle->start;
+ end = dev_rle->end;
+ count = dev_rle->count;
+ type = dev_rle->type;
+ done = FALSE;
+
+ STAILQ_FOREACH(bus_rle, bus_rl, link) {
+ if (bus_rle->type != type)
+ continue;
+
+ /* New resource wholly contained in old, discard. */
+ if (start >= bus_rle->start && end <= bus_rle->end)
+ break;
+
+ /* New tail overlaps old head, grow existing resource downward. */
+ if (start < bus_rle->start && end >= bus_rle->start) {
+ bus_rle->count += bus_rle->start - start;
+ bus_rle->start = start;
+ done = TRUE;
+ }
+
+ /* New head overlaps old tail, grow existing resource upward. */
+ if (start <= bus_rle->end && end > bus_rle->end) {
+ bus_rle->count += end - bus_rle->end;
+ bus_rle->end = end;
+ done = TRUE;
+ }
+
+ /* If we adjusted the old resource, we're finished. */
+ if (done)
+ break;
+ }
+
+ /* If we didn't merge with anything, add this resource. */
+ if (bus_rle == NULL)
+ bus_set_resource(bus, type, acpi_sysres_rid++, start, count);
+ }
+
+ /* After merging/moving resources to the parent, free the list. */
+ resource_list_free(dev_rl);
+
+ return (0);
+}
diff --git a/sys/dev/acpica/acpi_smbat.c b/sys/dev/acpica/acpi_smbat.c
new file mode 100644
index 0000000..f83392f
--- /dev/null
+++ b/sys/dev/acpica/acpi_smbat.c
@@ -0,0 +1,493 @@
+/*-
+ * Copyright (c) 2005 Hans Petter Selasky
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "opt_acpi.h"
+#include <sys/param.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <sys/bus.h>
+
+#include <contrib/dev/acpica/include/acpi.h>
+
+#include <dev/acpica/acpivar.h>
+#include <dev/acpica/acpiio.h>
+#include <dev/acpica/acpi_smbus.h>
+
+/* Transactions have failed after 500 ms. */
+#define SMBUS_TIMEOUT 50
+
+struct acpi_smbat_softc {
+ uint8_t sb_base_addr;
+ device_t ec_dev;
+
+ struct acpi_bif bif;
+ struct acpi_bst bst;
+ struct timespec bif_lastupdated;
+ struct timespec bst_lastupdated;
+};
+
+static int acpi_smbat_probe(device_t dev);
+static int acpi_smbat_attach(device_t dev);
+static int acpi_smbat_shutdown(device_t dev);
+static int acpi_smbat_info_expired(struct timespec *lastupdated);
+static void acpi_smbat_info_updated(struct timespec *lastupdated);
+static int acpi_smbat_get_bif(device_t dev, struct acpi_bif *bif);
+static int acpi_smbat_get_bst(device_t dev, struct acpi_bst *bst);
+
+ACPI_SERIAL_DECL(smbat, "ACPI Smart Battery");
+
+static SYSCTL_NODE(_debug_acpi, OID_AUTO, batt, CTLFLAG_RD, NULL,
+ "Battery debugging");
+
+/* On some laptops with smart batteries, enabling battery monitoring
+ * software causes keystrokes from atkbd to be lost. This has also been
+ * reported on Linux, and is apparently due to the keyboard and I2C line
+ * for the battery being routed through the same chip. Whether that's
+ * accurate or not, adding extra sleeps to the status checking code
+ * causes the problem to go away.
+ *
+ * If you experience that problem, try a value of 10ms and move up
+ * from there.
+ */
+static int batt_sleep_ms;
+SYSCTL_INT(_debug_acpi_batt, OID_AUTO, batt_sleep_ms, CTLFLAG_RW, &batt_sleep_ms, 0,
+ "Sleep during battery status updates to prevent keystroke loss.");
+
+static device_method_t acpi_smbat_methods[] = {
+ /* device interface */
+ DEVMETHOD(device_probe, acpi_smbat_probe),
+ DEVMETHOD(device_attach, acpi_smbat_attach),
+ DEVMETHOD(device_shutdown, acpi_smbat_shutdown),
+
+ /* ACPI battery interface */
+ DEVMETHOD(acpi_batt_get_status, acpi_smbat_get_bst),
+ DEVMETHOD(acpi_batt_get_info, acpi_smbat_get_bif),
+
+ {0, 0}
+};
+
+static driver_t acpi_smbat_driver = {
+ "battery",
+ acpi_smbat_methods,
+ sizeof(struct acpi_smbat_softc),
+};
+
+static devclass_t acpi_smbat_devclass;
+DRIVER_MODULE(acpi_smbat, acpi, acpi_smbat_driver, acpi_smbat_devclass, 0, 0);
+MODULE_DEPEND(acpi_smbat, acpi, 1, 1, 1);
+
+static int
+acpi_smbat_probe(device_t dev)
+{
+ static char *smbat_ids[] = {"ACPI0001", "ACPI0005", NULL};
+ ACPI_STATUS status;
+
+ if (acpi_disabled("smbat") ||
+ ACPI_ID_PROBE(device_get_parent(dev), dev, smbat_ids) == NULL)
+ return (ENXIO);
+ status = AcpiEvaluateObject(acpi_get_handle(dev), "_EC", NULL, NULL);
+ if (ACPI_FAILURE(status))
+ return (ENXIO);
+
+ device_set_desc(dev, "ACPI Smart Battery");
+ return (0);
+}
+
+static int
+acpi_smbat_attach(device_t dev)
+{
+ struct acpi_smbat_softc *sc;
+ uint32_t base;
+
+ sc = device_get_softc(dev);
+ if (ACPI_FAILURE(acpi_GetInteger(acpi_get_handle(dev), "_EC", &base))) {
+ device_printf(dev, "cannot get EC base address\n");
+ return (ENXIO);
+ }
+ sc->sb_base_addr = (base >> 8) & 0xff;
+
+ /* XXX Only works with one EC, but nearly all systems only have one. */
+ sc->ec_dev = devclass_get_device(devclass_find("acpi_ec"), 0);
+ if (sc->ec_dev == NULL) {
+ device_printf(dev, "cannot find EC device\n");
+ return (ENXIO);
+ }
+
+ timespecclear(&sc->bif_lastupdated);
+ timespecclear(&sc->bst_lastupdated);
+
+ if (acpi_battery_register(dev) != 0) {
+ device_printf(dev, "cannot register battery\n");
+ return (ENXIO);
+ }
+ return (0);
+}
+
+static int
+acpi_smbat_shutdown(device_t dev)
+{
+
+ acpi_battery_remove(dev);
+ return (0);
+}
+
+static int
+acpi_smbat_info_expired(struct timespec *lastupdated)
+{
+ struct timespec curtime;
+
+ ACPI_SERIAL_ASSERT(smbat);
+
+ if (lastupdated == NULL)
+ return (TRUE);
+ if (!timespecisset(lastupdated))
+ return (TRUE);
+
+ getnanotime(&curtime);
+ timespecsub(&curtime, lastupdated);
+ return (curtime.tv_sec < 0 ||
+ curtime.tv_sec > acpi_battery_get_info_expire());
+}
+
+static void
+acpi_smbat_info_updated(struct timespec *lastupdated)
+{
+
+ ACPI_SERIAL_ASSERT(smbat);
+
+ if (lastupdated != NULL)
+ getnanotime(lastupdated);
+}
+
+static int
+acpi_smbus_read_2(struct acpi_smbat_softc *sc, uint8_t addr, uint8_t cmd,
+ uint16_t *ptr)
+{
+ int error, to;
+ UINT64 val;
+
+ ACPI_SERIAL_ASSERT(smbat);
+
+ if (batt_sleep_ms)
+ AcpiOsSleep(batt_sleep_ms);
+
+ val = addr;
+ error = ACPI_EC_WRITE(sc->ec_dev, sc->sb_base_addr + SMBUS_ADDR,
+ val, 1);
+ if (error)
+ goto out;
+
+ val = cmd;
+ error = ACPI_EC_WRITE(sc->ec_dev, sc->sb_base_addr + SMBUS_CMD,
+ val, 1);
+ if (error)
+ goto out;
+
+ val = 0x09; /* | 0x80 if PEC */
+ error = ACPI_EC_WRITE(sc->ec_dev, sc->sb_base_addr + SMBUS_PRTCL,
+ val, 1);
+ if (error)
+ goto out;
+
+ if (batt_sleep_ms)
+ AcpiOsSleep(batt_sleep_ms);
+
+ for (to = SMBUS_TIMEOUT; to != 0; to--) {
+ error = ACPI_EC_READ(sc->ec_dev, sc->sb_base_addr + SMBUS_PRTCL,
+ &val, 1);
+ if (error)
+ goto out;
+ if (val == 0)
+ break;
+ AcpiOsSleep(10);
+ }
+ if (to == 0) {
+ error = ETIMEDOUT;
+ goto out;
+ }
+
+ error = ACPI_EC_READ(sc->ec_dev, sc->sb_base_addr + SMBUS_STS, &val, 1);
+ if (error)
+ goto out;
+ if (val & SMBUS_STS_MASK) {
+ printf("%s: AE_ERROR 0x%x\n",
+ __FUNCTION__, (int)(val & SMBUS_STS_MASK));
+ error = EIO;
+ goto out;
+ }
+
+ error = ACPI_EC_READ(sc->ec_dev, sc->sb_base_addr + SMBUS_DATA,
+ &val, 2);
+ if (error)
+ goto out;
+
+ *ptr = val;
+
+out:
+ return (error);
+}
+
+static int
+acpi_smbus_read_multi_1(struct acpi_smbat_softc *sc, uint8_t addr, uint8_t cmd,
+ uint8_t *ptr, uint16_t len)
+{
+ UINT64 val;
+ uint8_t to;
+ int error;
+
+ ACPI_SERIAL_ASSERT(smbat);
+
+ if (batt_sleep_ms)
+ AcpiOsSleep(batt_sleep_ms);
+
+ val = addr;
+ error = ACPI_EC_WRITE(sc->ec_dev, sc->sb_base_addr + SMBUS_ADDR,
+ val, 1);
+ if (error)
+ goto out;
+
+ val = cmd;
+ error = ACPI_EC_WRITE(sc->ec_dev, sc->sb_base_addr + SMBUS_CMD,
+ val, 1);
+ if (error)
+ goto out;
+
+ val = 0x0B /* | 0x80 if PEC */ ;
+ error = ACPI_EC_WRITE(sc->ec_dev, sc->sb_base_addr + SMBUS_PRTCL,
+ val, 1);
+ if (error)
+ goto out;
+
+ if (batt_sleep_ms)
+ AcpiOsSleep(batt_sleep_ms);
+
+ for (to = SMBUS_TIMEOUT; to != 0; to--) {
+ error = ACPI_EC_READ(sc->ec_dev, sc->sb_base_addr + SMBUS_PRTCL,
+ &val, 1);
+ if (error)
+ goto out;
+ if (val == 0)
+ break;
+ AcpiOsSleep(10);
+ }
+ if (to == 0) {
+ error = ETIMEDOUT;
+ goto out;
+ }
+
+ error = ACPI_EC_READ(sc->ec_dev, sc->sb_base_addr + SMBUS_STS, &val, 1);
+ if (error)
+ goto out;
+ if (val & SMBUS_STS_MASK) {
+ printf("%s: AE_ERROR 0x%x\n",
+ __FUNCTION__, (int)(val & SMBUS_STS_MASK));
+ error = EIO;
+ goto out;
+ }
+
+ /* get length */
+ error = ACPI_EC_READ(sc->ec_dev, sc->sb_base_addr + SMBUS_BCNT,
+ &val, 1);
+ if (error)
+ goto out;
+ val = (val & 0x1f) + 1;
+
+ bzero(ptr, len);
+ if (len > val)
+ len = val;
+
+ if (batt_sleep_ms)
+ AcpiOsSleep(batt_sleep_ms);
+
+ while (len--) {
+ error = ACPI_EC_READ(sc->ec_dev, sc->sb_base_addr + SMBUS_DATA
+ + len, &val, 1);
+ if (error)
+ goto out;
+
+ ptr[len] = val;
+ if (batt_sleep_ms)
+ AcpiOsSleep(batt_sleep_ms);
+ }
+
+out:
+ return (error);
+}
+
+static int
+acpi_smbat_get_bst(device_t dev, struct acpi_bst *bst)
+{
+ struct acpi_smbat_softc *sc;
+ int error;
+ uint32_t cap_units, factor;
+ int16_t val;
+ uint8_t addr;
+
+ ACPI_SERIAL_BEGIN(smbat);
+
+ addr = SMBATT_ADDRESS;
+ error = ENXIO;
+ sc = device_get_softc(dev);
+
+ if (!acpi_smbat_info_expired(&sc->bst_lastupdated)) {
+ error = 0;
+ goto out;
+ }
+
+ if (acpi_smbus_read_2(sc, addr, SMBATT_CMD_BATTERY_MODE, &val))
+ goto out;
+ if (val & SMBATT_BM_CAPACITY_MODE) {
+ factor = 10;
+ cap_units = ACPI_BIF_UNITS_MW;
+ } else {
+ factor = 1;
+ cap_units = ACPI_BIF_UNITS_MA;
+ }
+
+ /* get battery status */
+ if (acpi_smbus_read_2(sc, addr, SMBATT_CMD_BATTERY_STATUS, &val))
+ goto out;
+
+ sc->bst.state = 0;
+ if (val & SMBATT_BS_DISCHARGING)
+ sc->bst.state |= ACPI_BATT_STAT_DISCHARG;
+
+ if (val & SMBATT_BS_REMAINING_CAPACITY_ALARM)
+ sc->bst.state |= ACPI_BATT_STAT_CRITICAL;
+
+ /*
+ * If the rate is negative, it is discharging. Otherwise,
+ * it is charging.
+ */
+ if (acpi_smbus_read_2(sc, addr, SMBATT_CMD_CURRENT, &val))
+ goto out;
+
+ if (val > 0) {
+ sc->bst.rate = val * factor;
+ sc->bst.state &= ~SMBATT_BS_DISCHARGING;
+ sc->bst.state |= ACPI_BATT_STAT_CHARGING;
+ } else if (val < 0)
+ sc->bst.rate = (-val) * factor;
+ else
+ sc->bst.rate = 0;
+
+ if (acpi_smbus_read_2(sc, addr, SMBATT_CMD_REMAINING_CAPACITY, &val))
+ goto out;
+ sc->bst.cap = val * factor;
+
+ if (acpi_smbus_read_2(sc, addr, SMBATT_CMD_VOLTAGE, &val))
+ goto out;
+ sc->bst.volt = val;
+
+ acpi_smbat_info_updated(&sc->bst_lastupdated);
+ error = 0;
+
+out:
+ if (error == 0)
+ memcpy(bst, &sc->bst, sizeof(sc->bst));
+ ACPI_SERIAL_END(smbat);
+ return (error);
+}
+
+static int
+acpi_smbat_get_bif(device_t dev, struct acpi_bif *bif)
+{
+ struct acpi_smbat_softc *sc;
+ int error;
+ uint32_t factor;
+ uint16_t val;
+ uint8_t addr;
+
+ ACPI_SERIAL_BEGIN(smbat);
+
+ addr = SMBATT_ADDRESS;
+ error = ENXIO;
+ sc = device_get_softc(dev);
+
+ if (!acpi_smbat_info_expired(&sc->bif_lastupdated)) {
+ error = 0;
+ goto out;
+ }
+
+ if (acpi_smbus_read_2(sc, addr, SMBATT_CMD_BATTERY_MODE, &val))
+ goto out;
+ if (val & SMBATT_BM_CAPACITY_MODE) {
+ factor = 10;
+ sc->bif.units = ACPI_BIF_UNITS_MW;
+ } else {
+ factor = 1;
+ sc->bif.units = ACPI_BIF_UNITS_MA;
+ }
+
+ if (acpi_smbus_read_2(sc, addr, SMBATT_CMD_DESIGN_CAPACITY, &val))
+ goto out;
+ sc->bif.dcap = val * factor;
+
+ if (acpi_smbus_read_2(sc, addr, SMBATT_CMD_FULL_CHARGE_CAPACITY, &val))
+ goto out;
+ sc->bif.lfcap = val * factor;
+ sc->bif.btech = 1; /* secondary (rechargeable) */
+
+ if (acpi_smbus_read_2(sc, addr, SMBATT_CMD_DESIGN_VOLTAGE, &val))
+ goto out;
+ sc->bif.dvol = val;
+
+ sc->bif.wcap = sc->bif.dcap / 10;
+ sc->bif.lcap = sc->bif.dcap / 10;
+
+ sc->bif.gra1 = factor; /* not supported */
+ sc->bif.gra2 = factor; /* not supported */
+
+ if (acpi_smbus_read_multi_1(sc, addr, SMBATT_CMD_DEVICE_NAME,
+ sc->bif.model, sizeof(sc->bif.model)))
+ goto out;
+
+ if (acpi_smbus_read_2(sc, addr, SMBATT_CMD_SERIAL_NUMBER, &val))
+ goto out;
+ snprintf(sc->bif.serial, sizeof(sc->bif.serial), "0x%04x", val);
+
+ if (acpi_smbus_read_multi_1(sc, addr, SMBATT_CMD_DEVICE_CHEMISTRY,
+ sc->bif.type, sizeof(sc->bif.type)))
+ goto out;
+
+ if (acpi_smbus_read_multi_1(sc, addr, SMBATT_CMD_MANUFACTURER_DATA,
+ sc->bif.oeminfo, sizeof(sc->bif.oeminfo)))
+ goto out;
+
+ /* XXX check if device was replugged during read? */
+
+ acpi_smbat_info_updated(&sc->bif_lastupdated);
+ error = 0;
+
+out:
+ if (error == 0)
+ memcpy(bif, &sc->bif, sizeof(sc->bif));
+ ACPI_SERIAL_END(smbat);
+ return (error);
+}
diff --git a/sys/dev/acpica/acpi_smbus.h b/sys/dev/acpica/acpi_smbus.h
new file mode 100644
index 0000000..dd0d9b2
--- /dev/null
+++ b/sys/dev/acpica/acpi_smbus.h
@@ -0,0 +1,285 @@
+/*-
+ * Copyright (c) 2005 Hans Petter Selasky
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _ACPI_SMBUS_H_
+#define _ACPI_SMBUS_H_
+
+/*
+ * System Management Bus register offsets
+ */
+#define SMBUS_PRTCL 0
+#define SMBUS_STS 1
+#define SMBUS_STS_MASK 0x1f
+#define SMBUS_ADDR 2
+#define SMBUS_CMD 3
+#define SMBUS_DATA 4 /* 32 bytes */
+#define SMBUS_BCNT 36
+#define SMBUS_ALRM_ADDR 37
+#define SMBUS_ALRM_DATA 38 /* 2 bytes */
+
+/*
+ * Smart-Battery commands and definitions
+ */
+
+/* Base address */
+#define SMBATT_ADDRESS 0x16
+
+
+/* access: READ WRITE WORD */
+#define SMBATT_CMD_MANUFACTURER_ACCESS 0
+
+/*
+ * access: READ WRITE WORD
+ * unit : mAh (CAPACITY_MODE=0) or 10 mWh (CAPACITY_MODE=1)
+ * range : 0 .. 65535 inclusively
+ */
+#define SMBATT_CMD_REMAINING_CAPACITY_ALARM 0x1
+
+/*
+ * access: READ WRITE WORD
+ * unit : minutes
+ * range : 0 .. 65535 inclusively
+ */
+#define SMBATT_CMD_REMAINING_TIME_ALARM 0x2
+
+/* access: READ WRITE WORD */
+#define SMBATT_CMD_BATTERY_MODE 0x3
+
+#define SMBATT_BM_INTERNAL_CHARGE_CONTROLLER (1 << 0) /* READ */
+#define SMBATT_BM_PRIMARY_BATTERY_SUPPORT (1 << 1) /* READ */
+#define SMBATT_BM_CONDITION_FLAG (1 << 7) /* READ */
+#define SMBATT_BM_CHARGE_CONTROLLER_ENABLED (1 << 8) /* READ WRITE */
+#define SMBATT_BM_PRIMARY_BATTERY (1 << 9) /* READ WRITE */
+#define SMBATT_BM_ALARM_MODE (1 << 13) /* READ WRITE */
+#define SMBATT_BM_CHARGER_MODE (1 << 14) /* READ WRITE */
+#define SMBATT_BM_CAPACITY_MODE (1 << 15) /* READ WRITE */
+
+/*
+ * access: READ WRITE WORD
+ * unit : mAh (CAPACITY_MODE=0) or 10 mWh (CAPACITY_MODE=1)
+ * range : signed WORD
+ */
+#define SMBATT_CMD_AT_RATE 0x4
+
+/*
+ * access: READ WORD
+ * unit : minutes
+ * range : 0 .. 65534, 65535 has special meaning
+ */
+#define SMBATT_CMD_AT_RATE_TIME_TO_FULL 0x5
+
+/*
+ * access: READ WORD
+ * unit : minutes
+ * range : 0 .. 65534, 65535 has special meaning
+ */
+#define SMBATT_CMD_AT_RATE_TIME_TO_EMPTY 0x6
+
+/*
+ * access: READ WORD */
+#define SMBATT_CMD_AT_RATE_OK 0x7
+
+/*
+ * access: READ WORD
+ * unit : 0.1 degrees Kelvin
+ * range : 0 .. 6553.5 Kelvin
+ */
+#define SMBATT_CMD_TEMPERATURE 0x8
+
+/*
+ * access: READ WORD
+ * unit : mV
+ * range : 0 .. 65535 inclusively
+ */
+#define SMBATT_CMD_VOLTAGE 0x9
+
+/*
+ * access: READ WORD
+ * unit : mA
+ * range : signed WORD
+ */
+#define SMBATT_CMD_CURRENT 0xa
+
+/*
+ * access: READ WORD
+ * unit : mA
+ * range : signed WORD
+ */
+#define SMBATT_CMD_AVERAGE_CURRENT 0xb
+
+/*
+ * access: READ WORD
+ * unit : percent
+ * range : 0..100 inclusively
+ */
+#define SMBATT_CMD_MAX_ERROR 0xc
+
+/*
+ * access: READ WORD
+ * unit : percent
+ * range : 0..100 inclusively
+ */
+#define SMBATT_CMD_RELATIVE_STATE_OF_CHARGE 0xd
+
+/*
+ * access: READ WORD
+ * unit : percent
+ * range : 0..100 inclusively
+ */
+#define SMBATT_CMD_ABSOLUTE_STATE_OF_CHARGE 0xe
+
+/*
+ * access: READ WORD
+ * unit : mAh (CAPACITY_MODE=0) or 10 mWh (CAPACITY_MODE=1)
+ * range : 0..65535 inclusively
+ */
+#define SMBATT_CMD_REMAINING_CAPACITY 0xf
+
+/*
+ * access: READ WORD
+ * unit : mAh (CAPACITY_MODE=0) or 10 mWh (CAPACITY_MODE=1)
+ * range : 0..65535 inclusively
+ */
+#define SMBATT_CMD_FULL_CHARGE_CAPACITY 0x10
+
+/*
+ * access: READ WORD
+ * unit : minutes
+ * range : 0..65534, 65535 is reserved
+ */
+#define SMBATT_CMD_RUN_TIME_TO_EMPTY 0x11
+
+/*
+ * access: READ WORD
+ * unit : minutes
+ * range : 0..65534, 65535 is reserved
+ */
+#define SMBATT_CMD_AVERAGE_TIME_TO_EMPTY 0x12
+
+/*
+ * access: READ WORD
+ * unit : minutes
+ * range : 0..65534, 65535 is reserved
+ */
+#define SMBATT_CMD_AVERAGE_TIME_TO_FULL 0x13
+
+/*
+ * access: READ WORD
+ * unit : mA
+ */
+#define SMBATT_CMD_CHARGING_CURRENT 0x14
+
+/*
+ * access: READ WORD
+ * unit : mV
+ * range : 0 .. 65534, 65535 reserved
+ */
+#define SMBATT_CMD_CHARGING_VOLTAGE 0x15
+
+/* access: READ WORD */
+#define SMBATT_CMD_BATTERY_STATUS 0x16
+
+/* alarm bits */
+#define SMBATT_BS_OVER_CHARGED_ALARM (1 << 15)
+#define SMBATT_BS_TERMINATE_CHARGE_ALARM (1 << 14)
+#define SMBATT_BS_RESERVED_2 (1 << 13)
+#define SMBATT_BS_OVER_TEMP_ALARM (1 << 12)
+#define SMBATT_BS_TERMINATE_DISCHARGE_ALARM (1 << 11)
+#define SMBATT_BS_RESERVED_1 (1 << 10)
+#define SMBATT_BS_REMAINING_CAPACITY_ALARM (1 << 9)
+#define SMBATT_BS_REMAINING_TIME_ALARM (1 << 8)
+
+/* status bits */
+#define SMBATT_BS_INITIALIZED (1 << 7)
+#define SMBATT_BS_DISCHARGING (1 << 6)
+#define SMBATT_BS_FULLY_CHARGED (1 << 5)
+#define SMBATT_BS_FULLY_DISCHARGED (1 << 4)
+
+/* error bits */
+#define SMBATT_BS_GET_ERROR(x) ((x) & 0xf)
+#define SMBATT_BS_ERROR_OK 0
+#define SMBATT_BS_ERROR_BUSY 1
+#define SMBATT_BS_ERROR_RESERVED_COMMAND 2
+#define SMBATT_BS_ERROR_UNSUPPORTED_COMMAND 3
+#define SMBATT_BS_ERROR_ACCESS_DENIED 4
+#define SMBATT_BS_ERROR_OVER_UNDER_FLOW 5
+#define SMBATT_BS_ERROR_BADSIZE 6
+#define SMBATT_BS_ERROR_UNKNOWN 7
+
+/*
+ * access: READ WORD
+ * unit : cycle(s)
+ * range : 0 .. 65534, 65535 reserved
+ */
+#define SMBATT_CMD_CYCLE_COUNT 0x17
+
+/*
+ * access: READ WORD
+ * unit : mAh (CAPACITY_MODE=0) or 10 mWh (CAPACITY_MODE=1)
+ * range : 0..65535 inclusively
+ */
+#define SMBATT_CMD_DESIGN_CAPACITY 0x18
+
+/*
+ * access: READ WORD
+ * unit : mV
+ * range : 0..65535 mV
+ */
+#define SMBATT_CMD_DESIGN_VOLTAGE 0x19
+
+/* access: READ WORD */
+#define SMBATT_CMD_SPECIFICATION_INFO 0x1a
+
+#define SMBATT_SI_GET_REVISION(x) (((x) >> 0) & 0xf)
+#define SMBATT_SI_GET_VERSION(x) (((x) >> 4) & 0xf)
+#define SMBATT_SI_GET_VSCALE(x) (((x) >> 8) & 0xf)
+#define SMBATT_SI_GET_IPSCALE(x) (((x) >> 12) & 0xf)
+
+/* access: READ WORD */
+#define SMBATT_CMD_MANUFACTURE_DATE 0x1b
+
+#define SMBATT_MD_GET_DAY(x) (((x) >> 0) & 0x1f)
+#define SMBATT_MD_GET_MONTH(x) (((x) >> 5) & 0xf)
+#define SMBATT_MD_GET_YEAR(x) ((((x) >> 9) & 0x7f) + 1980)
+
+/* access: READ WORD */
+#define SMBATT_CMD_SERIAL_NUMBER 0x1c
+
+/* access: READ BLOCK */
+#define SMBATT_CMD_MANUFACTURER_NAME 0x20
+
+/* access: READ BLOCK */
+#define SMBATT_CMD_DEVICE_NAME 0x21
+
+/* access: READ BLOCK */
+#define SMBATT_CMD_DEVICE_CHEMISTRY 0x22
+
+/* access: READ BLOCK */
+#define SMBATT_CMD_MANUFACTURER_DATA 0x23
+
+#endif /* !_ACPI_SMBUS_H_ */
diff --git a/sys/dev/acpica/acpi_thermal.c b/sys/dev/acpica/acpi_thermal.c
new file mode 100644
index 0000000..32e5c2d
--- /dev/null
+++ b/sys/dev/acpica/acpi_thermal.c
@@ -0,0 +1,1195 @@
+/*-
+ * Copyright (c) 2000, 2001 Michael Smith
+ * Copyright (c) 2000 BSDi
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "opt_acpi.h"
+#include <sys/param.h>
+#include <sys/kernel.h>
+#include <sys/bus.h>
+#include <sys/cpu.h>
+#include <sys/kthread.h>
+#include <sys/malloc.h>
+#include <sys/module.h>
+#include <sys/proc.h>
+#include <sys/reboot.h>
+#include <sys/sysctl.h>
+#include <sys/unistd.h>
+#include <sys/power.h>
+
+#include "cpufreq_if.h"
+
+#include <contrib/dev/acpica/include/acpi.h>
+#include <contrib/dev/acpica/include/accommon.h>
+
+#include <dev/acpica/acpivar.h>
+
+/* Hooks for the ACPI CA debugging infrastructure */
+#define _COMPONENT ACPI_THERMAL
+ACPI_MODULE_NAME("THERMAL")
+
+#define TZ_ZEROC 2732
+#define TZ_KELVTOC(x) (((x) - TZ_ZEROC) / 10), abs(((x) - TZ_ZEROC) % 10)
+
+#define TZ_NOTIFY_TEMPERATURE 0x80 /* Temperature changed. */
+#define TZ_NOTIFY_LEVELS 0x81 /* Cooling levels changed. */
+#define TZ_NOTIFY_DEVICES 0x82 /* Device lists changed. */
+#define TZ_NOTIFY_CRITICAL 0xcc /* Fake notify that _CRT/_HOT reached. */
+
+/* Check for temperature changes every 10 seconds by default */
+#define TZ_POLLRATE 10
+
+/* Make sure the reported temperature is valid for this number of polls. */
+#define TZ_VALIDCHECKS 3
+
+/* Notify the user we will be shutting down in one more poll cycle. */
+#define TZ_NOTIFYCOUNT (TZ_VALIDCHECKS - 1)
+
+/* ACPI spec defines this */
+#define TZ_NUMLEVELS 10
+struct acpi_tz_zone {
+ int ac[TZ_NUMLEVELS];
+ ACPI_BUFFER al[TZ_NUMLEVELS];
+ int crt;
+ int hot;
+ ACPI_BUFFER psl;
+ int psv;
+ int tc1;
+ int tc2;
+ int tsp;
+ int tzp;
+};
+
+struct acpi_tz_softc {
+ device_t tz_dev;
+ ACPI_HANDLE tz_handle; /*Thermal zone handle*/
+ int tz_temperature; /*Current temperature*/
+ int tz_active; /*Current active cooling*/
+#define TZ_ACTIVE_NONE -1
+#define TZ_ACTIVE_UNKNOWN -2
+ int tz_requested; /*Minimum active cooling*/
+ int tz_thflags; /*Current temp-related flags*/
+#define TZ_THFLAG_NONE 0
+#define TZ_THFLAG_PSV (1<<0)
+#define TZ_THFLAG_HOT (1<<2)
+#define TZ_THFLAG_CRT (1<<3)
+ int tz_flags;
+#define TZ_FLAG_NO_SCP (1<<0) /*No _SCP method*/
+#define TZ_FLAG_GETPROFILE (1<<1) /*Get power_profile in timeout*/
+#define TZ_FLAG_GETSETTINGS (1<<2) /*Get devs/setpoints*/
+ struct timespec tz_cooling_started;
+ /*Current cooling starting time*/
+
+ struct sysctl_ctx_list tz_sysctl_ctx;
+ struct sysctl_oid *tz_sysctl_tree;
+ eventhandler_tag tz_event;
+
+ struct acpi_tz_zone tz_zone; /*Thermal zone parameters*/
+ int tz_validchecks;
+
+ /* passive cooling */
+ struct proc *tz_cooling_proc;
+ int tz_cooling_proc_running;
+ int tz_cooling_enabled;
+ int tz_cooling_active;
+ int tz_cooling_updated;
+ int tz_cooling_saved_freq;
+};
+
+#define CPUFREQ_MAX_LEVELS 64 /* XXX cpufreq should export this */
+
+static int acpi_tz_probe(device_t dev);
+static int acpi_tz_attach(device_t dev);
+static int acpi_tz_establish(struct acpi_tz_softc *sc);
+static void acpi_tz_monitor(void *Context);
+static void acpi_tz_switch_cooler_off(ACPI_OBJECT *obj, void *arg);
+static void acpi_tz_switch_cooler_on(ACPI_OBJECT *obj, void *arg);
+static void acpi_tz_getparam(struct acpi_tz_softc *sc, char *node,
+ int *data);
+static void acpi_tz_sanity(struct acpi_tz_softc *sc, int *val, char *what);
+static int acpi_tz_active_sysctl(SYSCTL_HANDLER_ARGS);
+static int acpi_tz_cooling_sysctl(SYSCTL_HANDLER_ARGS);
+static int acpi_tz_temp_sysctl(SYSCTL_HANDLER_ARGS);
+static int acpi_tz_passive_sysctl(SYSCTL_HANDLER_ARGS);
+static void acpi_tz_notify_handler(ACPI_HANDLE h, UINT32 notify,
+ void *context);
+static void acpi_tz_signal(struct acpi_tz_softc *sc, int flags);
+static void acpi_tz_timeout(struct acpi_tz_softc *sc, int flags);
+static void acpi_tz_power_profile(void *arg);
+static void acpi_tz_thread(void *arg);
+static int acpi_tz_cooling_is_available(struct acpi_tz_softc *sc);
+static int acpi_tz_cooling_thread_start(struct acpi_tz_softc *sc);
+
+static device_method_t acpi_tz_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, acpi_tz_probe),
+ DEVMETHOD(device_attach, acpi_tz_attach),
+
+ {0, 0}
+};
+
+static driver_t acpi_tz_driver = {
+ "acpi_tz",
+ acpi_tz_methods,
+ sizeof(struct acpi_tz_softc),
+};
+
+static devclass_t acpi_tz_devclass;
+DRIVER_MODULE(acpi_tz, acpi, acpi_tz_driver, acpi_tz_devclass, 0, 0);
+MODULE_DEPEND(acpi_tz, acpi, 1, 1, 1);
+
+static struct sysctl_ctx_list acpi_tz_sysctl_ctx;
+static struct sysctl_oid *acpi_tz_sysctl_tree;
+
+/* Minimum cooling run time */
+static int acpi_tz_min_runtime;
+static int acpi_tz_polling_rate = TZ_POLLRATE;
+static int acpi_tz_override;
+
+/* Timezone polling thread */
+static struct proc *acpi_tz_proc;
+ACPI_LOCK_DECL(thermal, "ACPI thermal zone");
+
+static int acpi_tz_cooling_unit = -1;
+
+static int
+acpi_tz_probe(device_t dev)
+{
+ int result;
+
+ if (acpi_get_type(dev) == ACPI_TYPE_THERMAL && !acpi_disabled("thermal")) {
+ device_set_desc(dev, "Thermal Zone");
+ result = -10;
+ } else
+ result = ENXIO;
+ return (result);
+}
+
+static int
+acpi_tz_attach(device_t dev)
+{
+ struct acpi_tz_softc *sc;
+ struct acpi_softc *acpi_sc;
+ int error;
+ char oidname[8];
+
+ ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
+
+ sc = device_get_softc(dev);
+ sc->tz_dev = dev;
+ sc->tz_handle = acpi_get_handle(dev);
+ sc->tz_requested = TZ_ACTIVE_NONE;
+ sc->tz_active = TZ_ACTIVE_UNKNOWN;
+ sc->tz_thflags = TZ_THFLAG_NONE;
+ sc->tz_cooling_proc = NULL;
+ sc->tz_cooling_proc_running = FALSE;
+ sc->tz_cooling_active = FALSE;
+ sc->tz_cooling_updated = FALSE;
+ sc->tz_cooling_enabled = FALSE;
+
+ /*
+ * Parse the current state of the thermal zone and build control
+ * structures. We don't need to worry about interference with the
+ * control thread since we haven't fully attached this device yet.
+ */
+ if ((error = acpi_tz_establish(sc)) != 0)
+ return (error);
+
+ /*
+ * Register for any Notify events sent to this zone.
+ */
+ AcpiInstallNotifyHandler(sc->tz_handle, ACPI_DEVICE_NOTIFY,
+ acpi_tz_notify_handler, sc);
+
+ /*
+ * Create our sysctl nodes.
+ *
+ * XXX we need a mechanism for adding nodes under ACPI.
+ */
+ if (device_get_unit(dev) == 0) {
+ acpi_sc = acpi_device_get_parent_softc(dev);
+ sysctl_ctx_init(&acpi_tz_sysctl_ctx);
+ acpi_tz_sysctl_tree = SYSCTL_ADD_NODE(&acpi_tz_sysctl_ctx,
+ SYSCTL_CHILDREN(acpi_sc->acpi_sysctl_tree),
+ OID_AUTO, "thermal", CTLFLAG_RD, 0, "");
+ SYSCTL_ADD_INT(&acpi_tz_sysctl_ctx,
+ SYSCTL_CHILDREN(acpi_tz_sysctl_tree),
+ OID_AUTO, "min_runtime", CTLFLAG_RW,
+ &acpi_tz_min_runtime, 0,
+ "minimum cooling run time in sec");
+ SYSCTL_ADD_INT(&acpi_tz_sysctl_ctx,
+ SYSCTL_CHILDREN(acpi_tz_sysctl_tree),
+ OID_AUTO, "polling_rate", CTLFLAG_RW,
+ &acpi_tz_polling_rate, 0, "monitor polling interval in seconds");
+ SYSCTL_ADD_INT(&acpi_tz_sysctl_ctx,
+ SYSCTL_CHILDREN(acpi_tz_sysctl_tree), OID_AUTO,
+ "user_override", CTLFLAG_RW, &acpi_tz_override, 0,
+ "allow override of thermal settings");
+ }
+ sysctl_ctx_init(&sc->tz_sysctl_ctx);
+ sprintf(oidname, "tz%d", device_get_unit(dev));
+ sc->tz_sysctl_tree = SYSCTL_ADD_NODE(&sc->tz_sysctl_ctx,
+ SYSCTL_CHILDREN(acpi_tz_sysctl_tree),
+ OID_AUTO, oidname, CTLFLAG_RD, 0, "");
+ SYSCTL_ADD_PROC(&sc->tz_sysctl_ctx, SYSCTL_CHILDREN(sc->tz_sysctl_tree),
+ OID_AUTO, "temperature", CTLTYPE_INT | CTLFLAG_RD,
+ &sc->tz_temperature, 0, sysctl_handle_int,
+ "IK", "current thermal zone temperature");
+ SYSCTL_ADD_PROC(&sc->tz_sysctl_ctx, SYSCTL_CHILDREN(sc->tz_sysctl_tree),
+ OID_AUTO, "active", CTLTYPE_INT | CTLFLAG_RW,
+ sc, 0, acpi_tz_active_sysctl, "I", "cooling is active");
+ SYSCTL_ADD_PROC(&sc->tz_sysctl_ctx, SYSCTL_CHILDREN(sc->tz_sysctl_tree),
+ OID_AUTO, "passive_cooling", CTLTYPE_INT | CTLFLAG_RW,
+ sc, 0, acpi_tz_cooling_sysctl, "I",
+ "enable passive (speed reduction) cooling");
+
+ SYSCTL_ADD_INT(&sc->tz_sysctl_ctx, SYSCTL_CHILDREN(sc->tz_sysctl_tree),
+ OID_AUTO, "thermal_flags", CTLFLAG_RD,
+ &sc->tz_thflags, 0, "thermal zone flags");
+ SYSCTL_ADD_PROC(&sc->tz_sysctl_ctx, SYSCTL_CHILDREN(sc->tz_sysctl_tree),
+ OID_AUTO, "_PSV", CTLTYPE_INT | CTLFLAG_RW,
+ sc, offsetof(struct acpi_tz_softc, tz_zone.psv),
+ acpi_tz_temp_sysctl, "IK", "passive cooling temp setpoint");
+ SYSCTL_ADD_PROC(&sc->tz_sysctl_ctx, SYSCTL_CHILDREN(sc->tz_sysctl_tree),
+ OID_AUTO, "_HOT", CTLTYPE_INT | CTLFLAG_RW,
+ sc, offsetof(struct acpi_tz_softc, tz_zone.hot),
+ acpi_tz_temp_sysctl, "IK",
+ "too hot temp setpoint (suspend now)");
+ SYSCTL_ADD_PROC(&sc->tz_sysctl_ctx, SYSCTL_CHILDREN(sc->tz_sysctl_tree),
+ OID_AUTO, "_CRT", CTLTYPE_INT | CTLFLAG_RW,
+ sc, offsetof(struct acpi_tz_softc, tz_zone.crt),
+ acpi_tz_temp_sysctl, "IK",
+ "critical temp setpoint (shutdown now)");
+ SYSCTL_ADD_PROC(&sc->tz_sysctl_ctx, SYSCTL_CHILDREN(sc->tz_sysctl_tree),
+ OID_AUTO, "_ACx", CTLTYPE_INT | CTLFLAG_RD,
+ &sc->tz_zone.ac, sizeof(sc->tz_zone.ac),
+ sysctl_handle_opaque, "IK", "");
+ SYSCTL_ADD_PROC(&sc->tz_sysctl_ctx, SYSCTL_CHILDREN(sc->tz_sysctl_tree),
+ OID_AUTO, "_TC1", CTLTYPE_INT | CTLFLAG_RW,
+ sc, offsetof(struct acpi_tz_softc, tz_zone.tc1),
+ acpi_tz_passive_sysctl, "I",
+ "thermal constant 1 for passive cooling");
+ SYSCTL_ADD_PROC(&sc->tz_sysctl_ctx, SYSCTL_CHILDREN(sc->tz_sysctl_tree),
+ OID_AUTO, "_TC2", CTLTYPE_INT | CTLFLAG_RW,
+ sc, offsetof(struct acpi_tz_softc, tz_zone.tc2),
+ acpi_tz_passive_sysctl, "I",
+ "thermal constant 2 for passive cooling");
+ SYSCTL_ADD_PROC(&sc->tz_sysctl_ctx, SYSCTL_CHILDREN(sc->tz_sysctl_tree),
+ OID_AUTO, "_TSP", CTLTYPE_INT | CTLFLAG_RW,
+ sc, offsetof(struct acpi_tz_softc, tz_zone.tsp),
+ acpi_tz_passive_sysctl, "I",
+ "thermal sampling period for passive cooling");
+
+ /*
+ * Create thread to service all of the thermal zones. Register
+ * our power profile event handler.
+ */
+ sc->tz_event = EVENTHANDLER_REGISTER(power_profile_change,
+ acpi_tz_power_profile, sc, 0);
+ if (acpi_tz_proc == NULL) {
+ error = kproc_create(acpi_tz_thread, NULL, &acpi_tz_proc,
+ RFHIGHPID, 0, "acpi_thermal");
+ if (error != 0) {
+ device_printf(sc->tz_dev, "could not create thread - %d", error);
+ goto out;
+ }
+ }
+
+ /*
+ * Create a thread to handle passive cooling for 1st zone which
+ * has _PSV, _TSP, _TC1 and _TC2. Users can enable it for other
+ * zones manually for now.
+ *
+ * XXX We enable only one zone to avoid multiple zones conflict
+ * with each other since cpufreq currently sets all CPUs to the
+ * given frequency whereas it's possible for different thermal
+ * zones to specify independent settings for multiple CPUs.
+ */
+ if (acpi_tz_cooling_unit < 0 && acpi_tz_cooling_is_available(sc))
+ sc->tz_cooling_enabled = TRUE;
+ if (sc->tz_cooling_enabled) {
+ error = acpi_tz_cooling_thread_start(sc);
+ if (error != 0) {
+ sc->tz_cooling_enabled = FALSE;
+ goto out;
+ }
+ acpi_tz_cooling_unit = device_get_unit(dev);
+ }
+
+ /*
+ * Flag the event handler for a manual invocation by our timeout.
+ * We defer it like this so that the rest of the subsystem has time
+ * to come up. Don't bother evaluating/printing the temperature at
+ * this point; on many systems it'll be bogus until the EC is running.
+ */
+ sc->tz_flags |= TZ_FLAG_GETPROFILE;
+
+out:
+ if (error != 0) {
+ EVENTHANDLER_DEREGISTER(power_profile_change, sc->tz_event);
+ AcpiRemoveNotifyHandler(sc->tz_handle, ACPI_DEVICE_NOTIFY,
+ acpi_tz_notify_handler);
+ sysctl_ctx_free(&sc->tz_sysctl_ctx);
+ }
+ return_VALUE (error);
+}
+
+/*
+ * Parse the current state of this thermal zone and set up to use it.
+ *
+ * Note that we may have previous state, which will have to be discarded.
+ */
+static int
+acpi_tz_establish(struct acpi_tz_softc *sc)
+{
+ ACPI_OBJECT *obj;
+ int i;
+ char nbuf[8];
+
+ ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
+
+ /* Erase any existing state. */
+ for (i = 0; i < TZ_NUMLEVELS; i++)
+ if (sc->tz_zone.al[i].Pointer != NULL)
+ AcpiOsFree(sc->tz_zone.al[i].Pointer);
+ if (sc->tz_zone.psl.Pointer != NULL)
+ AcpiOsFree(sc->tz_zone.psl.Pointer);
+
+ /*
+ * XXX: We initialize only ACPI_BUFFER to avoid race condition
+ * with passive cooling thread which refers psv, tc1, tc2 and tsp.
+ */
+ bzero(sc->tz_zone.ac, sizeof(sc->tz_zone.ac));
+ bzero(sc->tz_zone.al, sizeof(sc->tz_zone.al));
+ bzero(&sc->tz_zone.psl, sizeof(sc->tz_zone.psl));
+
+ /* Evaluate thermal zone parameters. */
+ for (i = 0; i < TZ_NUMLEVELS; i++) {
+ sprintf(nbuf, "_AC%d", i);
+ acpi_tz_getparam(sc, nbuf, &sc->tz_zone.ac[i]);
+ sprintf(nbuf, "_AL%d", i);
+ sc->tz_zone.al[i].Length = ACPI_ALLOCATE_BUFFER;
+ sc->tz_zone.al[i].Pointer = NULL;
+ AcpiEvaluateObject(sc->tz_handle, nbuf, NULL, &sc->tz_zone.al[i]);
+ obj = (ACPI_OBJECT *)sc->tz_zone.al[i].Pointer;
+ if (obj != NULL) {
+ /* Should be a package containing a list of power objects */
+ if (obj->Type != ACPI_TYPE_PACKAGE) {
+ device_printf(sc->tz_dev, "%s has unknown type %d, rejecting\n",
+ nbuf, obj->Type);
+ return_VALUE (ENXIO);
+ }
+ }
+ }
+ acpi_tz_getparam(sc, "_CRT", &sc->tz_zone.crt);
+ acpi_tz_getparam(sc, "_HOT", &sc->tz_zone.hot);
+ sc->tz_zone.psl.Length = ACPI_ALLOCATE_BUFFER;
+ sc->tz_zone.psl.Pointer = NULL;
+ AcpiEvaluateObject(sc->tz_handle, "_PSL", NULL, &sc->tz_zone.psl);
+ acpi_tz_getparam(sc, "_PSV", &sc->tz_zone.psv);
+ acpi_tz_getparam(sc, "_TC1", &sc->tz_zone.tc1);
+ acpi_tz_getparam(sc, "_TC2", &sc->tz_zone.tc2);
+ acpi_tz_getparam(sc, "_TSP", &sc->tz_zone.tsp);
+ acpi_tz_getparam(sc, "_TZP", &sc->tz_zone.tzp);
+
+ /*
+ * Sanity-check the values we've been given.
+ *
+ * XXX what do we do about systems that give us the same value for
+ * more than one of these setpoints?
+ */
+ acpi_tz_sanity(sc, &sc->tz_zone.crt, "_CRT");
+ acpi_tz_sanity(sc, &sc->tz_zone.hot, "_HOT");
+ acpi_tz_sanity(sc, &sc->tz_zone.psv, "_PSV");
+ for (i = 0; i < TZ_NUMLEVELS; i++)
+ acpi_tz_sanity(sc, &sc->tz_zone.ac[i], "_ACx");
+
+ return_VALUE (0);
+}
+
+static char *aclevel_string[] = {
+ "NONE", "_AC0", "_AC1", "_AC2", "_AC3", "_AC4",
+ "_AC5", "_AC6", "_AC7", "_AC8", "_AC9"
+};
+
+static __inline const char *
+acpi_tz_aclevel_string(int active)
+{
+ if (active < -1 || active >= TZ_NUMLEVELS)
+ return (aclevel_string[0]);
+
+ return (aclevel_string[active + 1]);
+}
+
+/*
+ * Get the current temperature.
+ */
+static int
+acpi_tz_get_temperature(struct acpi_tz_softc *sc)
+{
+ int temp;
+ ACPI_STATUS status;
+ static char *tmp_name = "_TMP";
+
+ ACPI_FUNCTION_NAME ("acpi_tz_get_temperature");
+
+ /* Evaluate the thermal zone's _TMP method. */
+ status = acpi_GetInteger(sc->tz_handle, tmp_name, &temp);
+ if (ACPI_FAILURE(status)) {
+ ACPI_VPRINT(sc->tz_dev, acpi_device_get_parent_softc(sc->tz_dev),
+ "error fetching current temperature -- %s\n",
+ AcpiFormatException(status));
+ return (FALSE);
+ }
+
+ /* Check it for validity. */
+ acpi_tz_sanity(sc, &temp, tmp_name);
+ if (temp == -1)
+ return (FALSE);
+
+ ACPI_DEBUG_PRINT((ACPI_DB_VALUES, "got %d.%dC\n", TZ_KELVTOC(temp)));
+ sc->tz_temperature = temp;
+ return (TRUE);
+}
+
+/*
+ * Evaluate the condition of a thermal zone, take appropriate actions.
+ */
+static void
+acpi_tz_monitor(void *Context)
+{
+ struct acpi_tz_softc *sc;
+ struct timespec curtime;
+ int temp;
+ int i;
+ int newactive, newflags;
+
+ ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
+
+ sc = (struct acpi_tz_softc *)Context;
+
+ /* Get the current temperature. */
+ if (!acpi_tz_get_temperature(sc)) {
+ /* XXX disable zone? go to max cooling? */
+ return_VOID;
+ }
+ temp = sc->tz_temperature;
+
+ /*
+ * Work out what we ought to be doing right now.
+ *
+ * Note that the _ACx levels sort from hot to cold.
+ */
+ newactive = TZ_ACTIVE_NONE;
+ for (i = TZ_NUMLEVELS - 1; i >= 0; i--) {
+ if (sc->tz_zone.ac[i] != -1 && temp >= sc->tz_zone.ac[i]) {
+ newactive = i;
+ if (sc->tz_active != newactive) {
+ ACPI_VPRINT(sc->tz_dev,
+ acpi_device_get_parent_softc(sc->tz_dev),
+ "_AC%d: temperature %d.%d >= setpoint %d.%d\n", i,
+ TZ_KELVTOC(temp), TZ_KELVTOC(sc->tz_zone.ac[i]));
+ }
+ }
+ }
+
+ /*
+ * We are going to get _ACx level down (colder side), but give a guaranteed
+ * minimum cooling run time if requested.
+ */
+ if (acpi_tz_min_runtime > 0 && sc->tz_active != TZ_ACTIVE_NONE &&
+ sc->tz_active != TZ_ACTIVE_UNKNOWN &&
+ (newactive == TZ_ACTIVE_NONE || newactive > sc->tz_active)) {
+
+ getnanotime(&curtime);
+ timespecsub(&curtime, &sc->tz_cooling_started);
+ if (curtime.tv_sec < acpi_tz_min_runtime)
+ newactive = sc->tz_active;
+ }
+
+ /* Handle user override of active mode */
+ if (sc->tz_requested != TZ_ACTIVE_NONE && (newactive == TZ_ACTIVE_NONE
+ || sc->tz_requested < newactive))
+ newactive = sc->tz_requested;
+
+ /* update temperature-related flags */
+ newflags = TZ_THFLAG_NONE;
+ if (sc->tz_zone.psv != -1 && temp >= sc->tz_zone.psv)
+ newflags |= TZ_THFLAG_PSV;
+ if (sc->tz_zone.hot != -1 && temp >= sc->tz_zone.hot)
+ newflags |= TZ_THFLAG_HOT;
+ if (sc->tz_zone.crt != -1 && temp >= sc->tz_zone.crt)
+ newflags |= TZ_THFLAG_CRT;
+
+ /* If the active cooling state has changed, we have to switch things. */
+ if (sc->tz_active == TZ_ACTIVE_UNKNOWN) {
+ /*
+ * We don't know which cooling device is on or off,
+ * so stop them all, because we now know which
+ * should be on (if any).
+ */
+ for (i = 0; i < TZ_NUMLEVELS; i++) {
+ if (sc->tz_zone.al[i].Pointer != NULL) {
+ acpi_ForeachPackageObject(
+ (ACPI_OBJECT *)sc->tz_zone.al[i].Pointer,
+ acpi_tz_switch_cooler_off, sc);
+ }
+ }
+ /* now we know that all devices are off */
+ sc->tz_active = TZ_ACTIVE_NONE;
+ }
+
+ if (newactive != sc->tz_active) {
+ /* Turn off the cooling devices that are on, if any are */
+ if (sc->tz_active != TZ_ACTIVE_NONE)
+ acpi_ForeachPackageObject(
+ (ACPI_OBJECT *)sc->tz_zone.al[sc->tz_active].Pointer,
+ acpi_tz_switch_cooler_off, sc);
+
+ /* Turn on cooling devices that are required, if any are */
+ if (newactive != TZ_ACTIVE_NONE) {
+ acpi_ForeachPackageObject(
+ (ACPI_OBJECT *)sc->tz_zone.al[newactive].Pointer,
+ acpi_tz_switch_cooler_on, sc);
+ }
+ ACPI_VPRINT(sc->tz_dev, acpi_device_get_parent_softc(sc->tz_dev),
+ "switched from %s to %s: %d.%dC\n",
+ acpi_tz_aclevel_string(sc->tz_active),
+ acpi_tz_aclevel_string(newactive), TZ_KELVTOC(temp));
+ sc->tz_active = newactive;
+ getnanotime(&sc->tz_cooling_started);
+ }
+
+ /* XXX (de)activate any passive cooling that may be required. */
+
+ /*
+ * If the temperature is at _HOT or _CRT, increment our event count.
+ * If it has occurred enough times, shutdown the system. This is
+ * needed because some systems will report an invalid high temperature
+ * for one poll cycle. It is suspected this is due to the embedded
+ * controller timing out. A typical value is 138C for one cycle on
+ * a system that is otherwise 65C.
+ *
+ * If we're almost at that threshold, notify the user through devd(8).
+ */
+ if ((newflags & (TZ_THFLAG_HOT | TZ_THFLAG_CRT)) != 0) {
+ sc->tz_validchecks++;
+ if (sc->tz_validchecks == TZ_VALIDCHECKS) {
+ device_printf(sc->tz_dev,
+ "WARNING - current temperature (%d.%dC) exceeds safe limits\n",
+ TZ_KELVTOC(sc->tz_temperature));
+ shutdown_nice(RB_POWEROFF);
+ } else if (sc->tz_validchecks == TZ_NOTIFYCOUNT)
+ acpi_UserNotify("Thermal", sc->tz_handle, TZ_NOTIFY_CRITICAL);
+ } else {
+ sc->tz_validchecks = 0;
+ }
+ sc->tz_thflags = newflags;
+
+ return_VOID;
+}
+
+/*
+ * Given an object, verify that it's a reference to a device of some sort,
+ * and try to switch it off.
+ */
+static void
+acpi_tz_switch_cooler_off(ACPI_OBJECT *obj, void *arg)
+{
+ ACPI_HANDLE cooler;
+
+ ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
+
+ cooler = acpi_GetReference(NULL, obj);
+ if (cooler == NULL) {
+ ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "can't get handle\n"));
+ return_VOID;
+ }
+
+ ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "called to turn %s off\n",
+ acpi_name(cooler)));
+ acpi_pwr_switch_consumer(cooler, ACPI_STATE_D3);
+
+ return_VOID;
+}
+
+/*
+ * Given an object, verify that it's a reference to a device of some sort,
+ * and try to switch it on.
+ *
+ * XXX replication of off/on function code is bad.
+ */
+static void
+acpi_tz_switch_cooler_on(ACPI_OBJECT *obj, void *arg)
+{
+ struct acpi_tz_softc *sc = (struct acpi_tz_softc *)arg;
+ ACPI_HANDLE cooler;
+ ACPI_STATUS status;
+
+ ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
+
+ cooler = acpi_GetReference(NULL, obj);
+ if (cooler == NULL) {
+ ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "can't get handle\n"));
+ return_VOID;
+ }
+
+ ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "called to turn %s on\n",
+ acpi_name(cooler)));
+ status = acpi_pwr_switch_consumer(cooler, ACPI_STATE_D0);
+ if (ACPI_FAILURE(status)) {
+ ACPI_VPRINT(sc->tz_dev, acpi_device_get_parent_softc(sc->tz_dev),
+ "failed to activate %s - %s\n", acpi_name(cooler),
+ AcpiFormatException(status));
+ }
+
+ return_VOID;
+}
+
+/*
+ * Read/debug-print a parameter, default it to -1.
+ */
+static void
+acpi_tz_getparam(struct acpi_tz_softc *sc, char *node, int *data)
+{
+
+ ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
+
+ if (ACPI_FAILURE(acpi_GetInteger(sc->tz_handle, node, data))) {
+ *data = -1;
+ } else {
+ ACPI_DEBUG_PRINT((ACPI_DB_VALUES, "%s.%s = %d\n",
+ acpi_name(sc->tz_handle), node, *data));
+ }
+
+ return_VOID;
+}
+
+/*
+ * Sanity-check a temperature value. Assume that setpoints
+ * should be between 0C and 200C.
+ */
+static void
+acpi_tz_sanity(struct acpi_tz_softc *sc, int *val, char *what)
+{
+ if (*val != -1 && (*val < TZ_ZEROC || *val > TZ_ZEROC + 2000)) {
+ device_printf(sc->tz_dev, "%s value is absurd, ignored (%d.%dC)\n",
+ what, TZ_KELVTOC(*val));
+ *val = -1;
+ }
+}
+
+/*
+ * Respond to a sysctl on the active state node.
+ */
+static int
+acpi_tz_active_sysctl(SYSCTL_HANDLER_ARGS)
+{
+ struct acpi_tz_softc *sc;
+ int active;
+ int error;
+
+ sc = (struct acpi_tz_softc *)oidp->oid_arg1;
+ active = sc->tz_active;
+ error = sysctl_handle_int(oidp, &active, 0, req);
+
+ /* Error or no new value */
+ if (error != 0 || req->newptr == NULL)
+ return (error);
+ if (active < -1 || active >= TZ_NUMLEVELS)
+ return (EINVAL);
+
+ /* Set new preferred level and re-switch */
+ sc->tz_requested = active;
+ acpi_tz_signal(sc, 0);
+ return (0);
+}
+
+static int
+acpi_tz_cooling_sysctl(SYSCTL_HANDLER_ARGS)
+{
+ struct acpi_tz_softc *sc;
+ int enabled, error;
+
+ sc = (struct acpi_tz_softc *)oidp->oid_arg1;
+ enabled = sc->tz_cooling_enabled;
+ error = sysctl_handle_int(oidp, &enabled, 0, req);
+
+ /* Error or no new value */
+ if (error != 0 || req->newptr == NULL)
+ return (error);
+ if (enabled != TRUE && enabled != FALSE)
+ return (EINVAL);
+
+ if (enabled) {
+ if (acpi_tz_cooling_is_available(sc))
+ error = acpi_tz_cooling_thread_start(sc);
+ else
+ error = ENODEV;
+ if (error)
+ enabled = FALSE;
+ }
+ sc->tz_cooling_enabled = enabled;
+ return (error);
+}
+
+static int
+acpi_tz_temp_sysctl(SYSCTL_HANDLER_ARGS)
+{
+ struct acpi_tz_softc *sc;
+ int temp, *temp_ptr;
+ int error;
+
+ sc = oidp->oid_arg1;
+ temp_ptr = (int *)((uintptr_t)sc + oidp->oid_arg2);
+ temp = *temp_ptr;
+ error = sysctl_handle_int(oidp, &temp, 0, req);
+
+ /* Error or no new value */
+ if (error != 0 || req->newptr == NULL)
+ return (error);
+
+ /* Only allow changing settings if override is set. */
+ if (!acpi_tz_override)
+ return (EPERM);
+
+ /* Check user-supplied value for sanity. */
+ acpi_tz_sanity(sc, &temp, "user-supplied temp");
+ if (temp == -1)
+ return (EINVAL);
+
+ *temp_ptr = temp;
+ return (0);
+}
+
+static int
+acpi_tz_passive_sysctl(SYSCTL_HANDLER_ARGS)
+{
+ struct acpi_tz_softc *sc;
+ int val, *val_ptr;
+ int error;
+
+ sc = oidp->oid_arg1;
+ val_ptr = (int *)((uintptr_t)sc + oidp->oid_arg2);
+ val = *val_ptr;
+ error = sysctl_handle_int(oidp, &val, 0, req);
+
+ /* Error or no new value */
+ if (error != 0 || req->newptr == NULL)
+ return (error);
+
+ /* Only allow changing settings if override is set. */
+ if (!acpi_tz_override)
+ return (EPERM);
+
+ *val_ptr = val;
+ return (0);
+}
+
+static void
+acpi_tz_notify_handler(ACPI_HANDLE h, UINT32 notify, void *context)
+{
+ struct acpi_tz_softc *sc = (struct acpi_tz_softc *)context;
+
+ ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
+
+ switch (notify) {
+ case TZ_NOTIFY_TEMPERATURE:
+ /* Temperature change occurred */
+ acpi_tz_signal(sc, 0);
+ break;
+ case TZ_NOTIFY_DEVICES:
+ case TZ_NOTIFY_LEVELS:
+ /* Zone devices/setpoints changed */
+ acpi_tz_signal(sc, TZ_FLAG_GETSETTINGS);
+ break;
+ default:
+ ACPI_VPRINT(sc->tz_dev, acpi_device_get_parent_softc(sc->tz_dev),
+ "unknown Notify event 0x%x\n", notify);
+ break;
+ }
+
+ acpi_UserNotify("Thermal", h, notify);
+
+ return_VOID;
+}
+
+static void
+acpi_tz_signal(struct acpi_tz_softc *sc, int flags)
+{
+ ACPI_LOCK(thermal);
+ sc->tz_flags |= flags;
+ ACPI_UNLOCK(thermal);
+ wakeup(&acpi_tz_proc);
+}
+
+/*
+ * Notifies can be generated asynchronously but have also been seen to be
+ * triggered by other thermal methods. One system generates a notify of
+ * 0x81 when the fan is turned on or off. Another generates it when _SCP
+ * is called. To handle these situations, we check the zone via
+ * acpi_tz_monitor() before evaluating changes to setpoints or the cooling
+ * policy.
+ */
+static void
+acpi_tz_timeout(struct acpi_tz_softc *sc, int flags)
+{
+
+ /* Check the current temperature and take action based on it */
+ acpi_tz_monitor(sc);
+
+ /* If requested, get the power profile settings. */
+ if (flags & TZ_FLAG_GETPROFILE)
+ acpi_tz_power_profile(sc);
+
+ /*
+ * If requested, check for new devices/setpoints. After finding them,
+ * check if we need to switch fans based on the new values.
+ */
+ if (flags & TZ_FLAG_GETSETTINGS) {
+ acpi_tz_establish(sc);
+ acpi_tz_monitor(sc);
+ }
+
+ /* XXX passive cooling actions? */
+}
+
+/*
+ * System power profile may have changed; fetch and notify the
+ * thermal zone accordingly.
+ *
+ * Since this can be called from an arbitrary eventhandler, it needs
+ * to get the ACPI lock itself.
+ */
+static void
+acpi_tz_power_profile(void *arg)
+{
+ ACPI_STATUS status;
+ struct acpi_tz_softc *sc = (struct acpi_tz_softc *)arg;
+ int state;
+
+ state = power_profile_get_state();
+ if (state != POWER_PROFILE_PERFORMANCE && state != POWER_PROFILE_ECONOMY)
+ return;
+
+ /* check that we haven't decided there's no _SCP method */
+ if ((sc->tz_flags & TZ_FLAG_NO_SCP) == 0) {
+
+ /* Call _SCP to set the new profile */
+ status = acpi_SetInteger(sc->tz_handle, "_SCP",
+ (state == POWER_PROFILE_PERFORMANCE) ? 0 : 1);
+ if (ACPI_FAILURE(status)) {
+ if (status != AE_NOT_FOUND)
+ ACPI_VPRINT(sc->tz_dev,
+ acpi_device_get_parent_softc(sc->tz_dev),
+ "can't evaluate %s._SCP - %s\n",
+ acpi_name(sc->tz_handle),
+ AcpiFormatException(status));
+ sc->tz_flags |= TZ_FLAG_NO_SCP;
+ } else {
+ /* We have to re-evaluate the entire zone now */
+ acpi_tz_signal(sc, TZ_FLAG_GETSETTINGS);
+ }
+ }
+}
+
+/*
+ * Thermal zone monitor thread.
+ */
+static void
+acpi_tz_thread(void *arg)
+{
+ device_t *devs;
+ int devcount, i;
+ int flags;
+ struct acpi_tz_softc **sc;
+
+ ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
+
+ devs = NULL;
+ devcount = 0;
+ sc = NULL;
+
+ for (;;) {
+ /* If the number of devices has changed, re-evaluate. */
+ if (devclass_get_count(acpi_tz_devclass) != devcount) {
+ if (devs != NULL) {
+ free(devs, M_TEMP);
+ free(sc, M_TEMP);
+ }
+ devclass_get_devices(acpi_tz_devclass, &devs, &devcount);
+ sc = malloc(sizeof(struct acpi_tz_softc *) * devcount, M_TEMP,
+ M_WAITOK | M_ZERO);
+ for (i = 0; i < devcount; i++)
+ sc[i] = device_get_softc(devs[i]);
+ }
+
+ /* Check for temperature events and act on them. */
+ for (i = 0; i < devcount; i++) {
+ ACPI_LOCK(thermal);
+ flags = sc[i]->tz_flags;
+ sc[i]->tz_flags &= TZ_FLAG_NO_SCP;
+ ACPI_UNLOCK(thermal);
+ acpi_tz_timeout(sc[i], flags);
+ }
+
+ /* If more work to do, don't go to sleep yet. */
+ ACPI_LOCK(thermal);
+ for (i = 0; i < devcount; i++) {
+ if (sc[i]->tz_flags & ~TZ_FLAG_NO_SCP)
+ break;
+ }
+
+ /*
+ * If we have no more work, sleep for a while, setting PDROP so that
+ * the mutex will not be reacquired. Otherwise, drop the mutex and
+ * loop to handle more events.
+ */
+ if (i == devcount)
+ msleep(&acpi_tz_proc, &thermal_mutex, PZERO | PDROP, "tzpoll",
+ hz * acpi_tz_polling_rate);
+ else
+ ACPI_UNLOCK(thermal);
+ }
+}
+
+static int
+acpi_tz_cpufreq_restore(struct acpi_tz_softc *sc)
+{
+ device_t dev;
+ int error;
+
+ if (!sc->tz_cooling_updated)
+ return (0);
+ if ((dev = devclass_get_device(devclass_find("cpufreq"), 0)) == NULL)
+ return (ENXIO);
+ ACPI_VPRINT(sc->tz_dev, acpi_device_get_parent_softc(sc->tz_dev),
+ "temperature %d.%dC: resuming previous clock speed (%d MHz)\n",
+ TZ_KELVTOC(sc->tz_temperature), sc->tz_cooling_saved_freq);
+ error = CPUFREQ_SET(dev, NULL, CPUFREQ_PRIO_KERN);
+ if (error == 0)
+ sc->tz_cooling_updated = FALSE;
+ return (error);
+}
+
+static int
+acpi_tz_cpufreq_update(struct acpi_tz_softc *sc, int req)
+{
+ device_t dev;
+ struct cf_level *levels;
+ int num_levels, error, freq, desired_freq, perf, i;
+
+ levels = malloc(CPUFREQ_MAX_LEVELS * sizeof(*levels), M_TEMP, M_NOWAIT);
+ if (levels == NULL)
+ return (ENOMEM);
+
+ /*
+ * Find the main device, cpufreq0. We don't yet support independent
+ * CPU frequency control on SMP.
+ */
+ if ((dev = devclass_get_device(devclass_find("cpufreq"), 0)) == NULL) {
+ error = ENXIO;
+ goto out;
+ }
+
+ /* Get the current frequency. */
+ error = CPUFREQ_GET(dev, &levels[0]);
+ if (error)
+ goto out;
+ freq = levels[0].total_set.freq;
+
+ /* Get the current available frequency levels. */
+ num_levels = CPUFREQ_MAX_LEVELS;
+ error = CPUFREQ_LEVELS(dev, levels, &num_levels);
+ if (error) {
+ if (error == E2BIG)
+ printf("cpufreq: need to increase CPUFREQ_MAX_LEVELS\n");
+ goto out;
+ }
+
+ /* Calculate the desired frequency as a percent of the max frequency. */
+ perf = 100 * freq / levels[0].total_set.freq - req;
+ if (perf < 0)
+ perf = 0;
+ else if (perf > 100)
+ perf = 100;
+ desired_freq = levels[0].total_set.freq * perf / 100;
+
+ if (desired_freq < freq) {
+ /* Find the closest available frequency, rounding down. */
+ for (i = 0; i < num_levels; i++)
+ if (levels[i].total_set.freq <= desired_freq)
+ break;
+
+ /* If we didn't find a relevant setting, use the lowest. */
+ if (i == num_levels)
+ i--;
+ } else {
+ /* If we didn't decrease frequency yet, don't increase it. */
+ if (!sc->tz_cooling_updated) {
+ sc->tz_cooling_active = FALSE;
+ goto out;
+ }
+
+ /* Use saved cpu frequency as maximum value. */
+ if (desired_freq > sc->tz_cooling_saved_freq)
+ desired_freq = sc->tz_cooling_saved_freq;
+
+ /* Find the closest available frequency, rounding up. */
+ for (i = num_levels - 1; i >= 0; i--)
+ if (levels[i].total_set.freq >= desired_freq)
+ break;
+
+ /* If we didn't find a relevant setting, use the highest. */
+ if (i == -1)
+ i++;
+
+ /* If we're going to the highest frequency, restore the old setting. */
+ if (i == 0 || desired_freq == sc->tz_cooling_saved_freq) {
+ error = acpi_tz_cpufreq_restore(sc);
+ if (error == 0)
+ sc->tz_cooling_active = FALSE;
+ goto out;
+ }
+ }
+
+ /* If we are going to a new frequency, activate it. */
+ if (levels[i].total_set.freq != freq) {
+ ACPI_VPRINT(sc->tz_dev, acpi_device_get_parent_softc(sc->tz_dev),
+ "temperature %d.%dC: %screasing clock speed "
+ "from %d MHz to %d MHz\n",
+ TZ_KELVTOC(sc->tz_temperature),
+ (freq > levels[i].total_set.freq) ? "de" : "in",
+ freq, levels[i].total_set.freq);
+ error = CPUFREQ_SET(dev, &levels[i], CPUFREQ_PRIO_KERN);
+ if (error == 0 && !sc->tz_cooling_updated) {
+ sc->tz_cooling_saved_freq = freq;
+ sc->tz_cooling_updated = TRUE;
+ }
+ }
+
+out:
+ if (levels)
+ free(levels, M_TEMP);
+ return (error);
+}
+
+/*
+ * Passive cooling thread; monitors current temperature according to the
+ * cooling interval and calculates whether to scale back CPU frequency.
+ */
+static void
+acpi_tz_cooling_thread(void *arg)
+{
+ struct acpi_tz_softc *sc;
+ int error, perf, curr_temp, prev_temp;
+
+ ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
+
+ sc = (struct acpi_tz_softc *)arg;
+
+ prev_temp = sc->tz_temperature;
+ while (sc->tz_cooling_enabled) {
+ if (sc->tz_cooling_active)
+ (void)acpi_tz_get_temperature(sc);
+ curr_temp = sc->tz_temperature;
+ if (curr_temp >= sc->tz_zone.psv)
+ sc->tz_cooling_active = TRUE;
+ if (sc->tz_cooling_active) {
+ perf = sc->tz_zone.tc1 * (curr_temp - prev_temp) +
+ sc->tz_zone.tc2 * (curr_temp - sc->tz_zone.psv);
+ perf /= 10;
+
+ if (perf != 0) {
+ error = acpi_tz_cpufreq_update(sc, perf);
+
+ /*
+ * If error and not simply a higher priority setting was
+ * active, disable cooling.
+ */
+ if (error != 0 && error != EPERM) {
+ device_printf(sc->tz_dev,
+ "failed to set new freq, disabling passive cooling\n");
+ sc->tz_cooling_enabled = FALSE;
+ }
+ }
+ }
+ prev_temp = curr_temp;
+ tsleep(&sc->tz_cooling_proc, PZERO, "cooling",
+ hz * sc->tz_zone.tsp / 10);
+ }
+ if (sc->tz_cooling_active) {
+ acpi_tz_cpufreq_restore(sc);
+ sc->tz_cooling_active = FALSE;
+ }
+ sc->tz_cooling_proc = NULL;
+ ACPI_LOCK(thermal);
+ sc->tz_cooling_proc_running = FALSE;
+ ACPI_UNLOCK(thermal);
+ kproc_exit(0);
+}
+
+/*
+ * TODO: We ignore _PSL (list of cooling devices) since cpufreq enumerates
+ * all CPUs for us. However, it's possible in the future _PSL will
+ * reference non-CPU devices so we may want to support it then.
+ */
+static int
+acpi_tz_cooling_is_available(struct acpi_tz_softc *sc)
+{
+ return (sc->tz_zone.tc1 != -1 && sc->tz_zone.tc2 != -1 &&
+ sc->tz_zone.tsp != -1 && sc->tz_zone.tsp != 0 &&
+ sc->tz_zone.psv != -1);
+}
+
+static int
+acpi_tz_cooling_thread_start(struct acpi_tz_softc *sc)
+{
+ int error;
+
+ ACPI_LOCK(thermal);
+ if (sc->tz_cooling_proc_running) {
+ ACPI_UNLOCK(thermal);
+ return (0);
+ }
+ sc->tz_cooling_proc_running = TRUE;
+ ACPI_UNLOCK(thermal);
+ error = 0;
+ if (sc->tz_cooling_proc == NULL) {
+ error = kproc_create(acpi_tz_cooling_thread, sc,
+ &sc->tz_cooling_proc, RFHIGHPID, 0, "acpi_cooling%d",
+ device_get_unit(sc->tz_dev));
+ if (error != 0) {
+ device_printf(sc->tz_dev, "could not create thread - %d", error);
+ ACPI_LOCK(thermal);
+ sc->tz_cooling_proc_running = FALSE;
+ ACPI_UNLOCK(thermal);
+ }
+ }
+ return (error);
+}
diff --git a/sys/dev/acpica/acpi_throttle.c b/sys/dev/acpica/acpi_throttle.c
new file mode 100644
index 0000000..40476e0
--- /dev/null
+++ b/sys/dev/acpica/acpi_throttle.c
@@ -0,0 +1,443 @@
+/*-
+ * Copyright (c) 2003-2005 Nate Lawson (SDG)
+ * Copyright (c) 2001 Michael Smith
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "opt_acpi.h"
+#include <sys/param.h>
+#include <sys/bus.h>
+#include <sys/cpu.h>
+#include <sys/kernel.h>
+#include <sys/malloc.h>
+#include <sys/module.h>
+#include <sys/rman.h>
+
+#include <machine/bus.h>
+
+#include <contrib/dev/acpica/include/acpi.h>
+
+#include <dev/acpica/acpivar.h>
+#include <dev/pci/pcivar.h>
+
+#include "cpufreq_if.h"
+
+/*
+ * Throttling provides relative frequency control. It involves modulating
+ * the clock so that the CPU is active for only a fraction of the normal
+ * clock cycle. It does not change voltage and so is less efficient than
+ * other mechanisms. Since it is relative, it can be used in addition to
+ * absolute cpufreq drivers. We support the ACPI 2.0 specification.
+ */
+
+struct acpi_throttle_softc {
+ device_t cpu_dev;
+ ACPI_HANDLE cpu_handle;
+ uint32_t cpu_p_blk; /* ACPI P_BLK location */
+ uint32_t cpu_p_blk_len; /* P_BLK length (must be 6). */
+ struct resource *cpu_p_cnt; /* Throttling control register */
+ int cpu_p_type; /* Resource type for cpu_p_cnt. */
+ uint32_t cpu_thr_state; /* Current throttle setting. */
+};
+
+#define THR_GET_REG(reg) \
+ (bus_space_read_4(rman_get_bustag((reg)), \
+ rman_get_bushandle((reg)), 0))
+#define THR_SET_REG(reg, val) \
+ (bus_space_write_4(rman_get_bustag((reg)), \
+ rman_get_bushandle((reg)), 0, (val)))
+
+/*
+ * Speeds are stored in counts, from 1 to CPU_MAX_SPEED, and
+ * reported to the user in hundredths of a percent.
+ */
+#define CPU_MAX_SPEED (1 << cpu_duty_width)
+#define CPU_SPEED_PERCENT(x) ((10000 * (x)) / CPU_MAX_SPEED)
+#define CPU_SPEED_PRINTABLE(x) (CPU_SPEED_PERCENT(x) / 10), \
+ (CPU_SPEED_PERCENT(x) % 10)
+#define CPU_P_CNT_THT_EN (1<<4)
+#define CPU_QUIRK_NO_THROTTLE (1<<1) /* Throttling is not usable. */
+
+#define PCI_VENDOR_INTEL 0x8086
+#define PCI_DEVICE_82371AB_3 0x7113 /* PIIX4 chipset for quirks. */
+#define PCI_REVISION_A_STEP 0
+#define PCI_REVISION_B_STEP 1
+
+static uint32_t cpu_duty_offset; /* Offset in P_CNT of throttle val. */
+static uint32_t cpu_duty_width; /* Bit width of throttle value. */
+static int thr_rid; /* Driver-wide resource id. */
+static int thr_quirks; /* Indicate any hardware bugs. */
+
+static void acpi_throttle_identify(driver_t *driver, device_t parent);
+static int acpi_throttle_probe(device_t dev);
+static int acpi_throttle_attach(device_t dev);
+static int acpi_throttle_evaluate(struct acpi_throttle_softc *sc);
+static int acpi_throttle_quirks(struct acpi_throttle_softc *sc);
+static int acpi_thr_settings(device_t dev, struct cf_setting *sets,
+ int *count);
+static int acpi_thr_set(device_t dev, const struct cf_setting *set);
+static int acpi_thr_get(device_t dev, struct cf_setting *set);
+static int acpi_thr_type(device_t dev, int *type);
+
+static device_method_t acpi_throttle_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_identify, acpi_throttle_identify),
+ DEVMETHOD(device_probe, acpi_throttle_probe),
+ DEVMETHOD(device_attach, acpi_throttle_attach),
+
+ /* cpufreq interface */
+ DEVMETHOD(cpufreq_drv_set, acpi_thr_set),
+ DEVMETHOD(cpufreq_drv_get, acpi_thr_get),
+ DEVMETHOD(cpufreq_drv_type, acpi_thr_type),
+ DEVMETHOD(cpufreq_drv_settings, acpi_thr_settings),
+ {0, 0}
+};
+
+static driver_t acpi_throttle_driver = {
+ "acpi_throttle",
+ acpi_throttle_methods,
+ sizeof(struct acpi_throttle_softc),
+};
+
+static devclass_t acpi_throttle_devclass;
+DRIVER_MODULE(acpi_throttle, cpu, acpi_throttle_driver, acpi_throttle_devclass,
+ 0, 0);
+
+static void
+acpi_throttle_identify(driver_t *driver, device_t parent)
+{
+ ACPI_BUFFER buf;
+ ACPI_HANDLE handle;
+ ACPI_OBJECT *obj;
+
+ /* Make sure we're not being doubly invoked. */
+ if (device_find_child(parent, "acpi_throttle", -1))
+ return;
+
+ /* Check for a valid duty width and parent CPU type. */
+ handle = acpi_get_handle(parent);
+ if (handle == NULL)
+ return;
+ if (AcpiGbl_FADT.DutyWidth == 0 ||
+ acpi_get_type(parent) != ACPI_TYPE_PROCESSOR)
+ return;
+
+ /*
+ * Add a child if there's a non-NULL P_BLK and correct length, or
+ * if the _PTC method is present.
+ */
+ buf.Pointer = NULL;
+ buf.Length = ACPI_ALLOCATE_BUFFER;
+ if (ACPI_FAILURE(AcpiEvaluateObject(handle, NULL, NULL, &buf)))
+ return;
+ obj = (ACPI_OBJECT *)buf.Pointer;
+ if ((obj->Processor.PblkAddress && obj->Processor.PblkLength >= 4) ||
+ ACPI_SUCCESS(AcpiEvaluateObject(handle, "_PTC", NULL, NULL))) {
+ if (BUS_ADD_CHILD(parent, 0, "acpi_throttle", -1) == NULL)
+ device_printf(parent, "add throttle child failed\n");
+ }
+ AcpiOsFree(obj);
+}
+
+static int
+acpi_throttle_probe(device_t dev)
+{
+
+ if (resource_disabled("acpi_throttle", 0))
+ return (ENXIO);
+
+ /*
+ * On i386 platforms at least, ACPI throttling is accomplished by
+ * the chipset modulating the STPCLK# pin based on the duty cycle.
+ * Since p4tcc uses the same mechanism (but internal to the CPU),
+ * we disable acpi_throttle when p4tcc is also present.
+ */
+ if (device_find_child(device_get_parent(dev), "p4tcc", -1) &&
+ !resource_disabled("p4tcc", 0))
+ return (ENXIO);
+
+ device_set_desc(dev, "ACPI CPU Throttling");
+ return (0);
+}
+
+static int
+acpi_throttle_attach(device_t dev)
+{
+ struct acpi_throttle_softc *sc;
+ struct cf_setting set;
+ ACPI_BUFFER buf;
+ ACPI_OBJECT *obj;
+ ACPI_STATUS status;
+ int error;
+
+ sc = device_get_softc(dev);
+ sc->cpu_dev = dev;
+ sc->cpu_handle = acpi_get_handle(dev);
+
+ buf.Pointer = NULL;
+ buf.Length = ACPI_ALLOCATE_BUFFER;
+ status = AcpiEvaluateObject(sc->cpu_handle, NULL, NULL, &buf);
+ if (ACPI_FAILURE(status)) {
+ device_printf(dev, "attach failed to get Processor obj - %s\n",
+ AcpiFormatException(status));
+ return (ENXIO);
+ }
+ obj = (ACPI_OBJECT *)buf.Pointer;
+ sc->cpu_p_blk = obj->Processor.PblkAddress;
+ sc->cpu_p_blk_len = obj->Processor.PblkLength;
+ AcpiOsFree(obj);
+
+ /* If this is the first device probed, check for quirks. */
+ if (device_get_unit(dev) == 0)
+ acpi_throttle_quirks(sc);
+
+ /* Attempt to attach the actual throttling register. */
+ error = acpi_throttle_evaluate(sc);
+ if (error)
+ return (error);
+
+ /*
+ * Set our initial frequency to the highest since some systems
+ * seem to boot with this at the lowest setting.
+ */
+ set.freq = 10000;
+ acpi_thr_set(dev, &set);
+
+ /* Everything went ok, register with cpufreq(4). */
+ cpufreq_register(dev);
+ return (0);
+}
+
+static int
+acpi_throttle_evaluate(struct acpi_throttle_softc *sc)
+{
+ uint32_t duty_end;
+ ACPI_BUFFER buf;
+ ACPI_OBJECT obj;
+ ACPI_GENERIC_ADDRESS gas;
+ ACPI_STATUS status;
+
+ /* Get throttling parameters from the FADT. 0 means not supported. */
+ if (device_get_unit(sc->cpu_dev) == 0) {
+ cpu_duty_offset = AcpiGbl_FADT.DutyOffset;
+ cpu_duty_width = AcpiGbl_FADT.DutyWidth;
+ }
+ if (cpu_duty_width == 0 || (thr_quirks & CPU_QUIRK_NO_THROTTLE) != 0)
+ return (ENXIO);
+
+ /* Validate the duty offset/width. */
+ duty_end = cpu_duty_offset + cpu_duty_width - 1;
+ if (duty_end > 31) {
+ device_printf(sc->cpu_dev,
+ "CLK_VAL field overflows P_CNT register\n");
+ return (ENXIO);
+ }
+ if (cpu_duty_offset <= 4 && duty_end >= 4) {
+ device_printf(sc->cpu_dev,
+ "CLK_VAL field overlaps THT_EN bit\n");
+ return (ENXIO);
+ }
+
+ /*
+ * If not present, fall back to using the processor's P_BLK to find
+ * the P_CNT register.
+ *
+ * Note that some systems seem to duplicate the P_BLK pointer
+ * across multiple CPUs, so not getting the resource is not fatal.
+ */
+ buf.Pointer = &obj;
+ buf.Length = sizeof(obj);
+ status = AcpiEvaluateObject(sc->cpu_handle, "_PTC", NULL, &buf);
+ if (ACPI_SUCCESS(status)) {
+ if (obj.Buffer.Length < sizeof(ACPI_GENERIC_ADDRESS) + 3) {
+ device_printf(sc->cpu_dev, "_PTC buffer too small\n");
+ return (ENXIO);
+ }
+ memcpy(&gas, obj.Buffer.Pointer + 3, sizeof(gas));
+ acpi_bus_alloc_gas(sc->cpu_dev, &sc->cpu_p_type, &thr_rid,
+ &gas, &sc->cpu_p_cnt, 0);
+ if (sc->cpu_p_cnt != NULL && bootverbose) {
+ device_printf(sc->cpu_dev, "P_CNT from _PTC %#jx\n",
+ gas.Address);
+ }
+ }
+
+ /* If _PTC not present or other failure, try the P_BLK. */
+ if (sc->cpu_p_cnt == NULL) {
+ /*
+ * The spec says P_BLK must be 6 bytes long. However, some
+ * systems use it to indicate a fractional set of features
+ * present so we take anything >= 4.
+ */
+ if (sc->cpu_p_blk_len < 4)
+ return (ENXIO);
+ gas.Address = sc->cpu_p_blk;
+ gas.SpaceId = ACPI_ADR_SPACE_SYSTEM_IO;
+ gas.BitWidth = 32;
+ acpi_bus_alloc_gas(sc->cpu_dev, &sc->cpu_p_type, &thr_rid,
+ &gas, &sc->cpu_p_cnt, 0);
+ if (sc->cpu_p_cnt != NULL) {
+ if (bootverbose)
+ device_printf(sc->cpu_dev,
+ "P_CNT from P_BLK %#x\n", sc->cpu_p_blk);
+ } else {
+ device_printf(sc->cpu_dev, "failed to attach P_CNT\n");
+ return (ENXIO);
+ }
+ }
+ thr_rid++;
+
+ return (0);
+}
+
+static int
+acpi_throttle_quirks(struct acpi_throttle_softc *sc)
+{
+ device_t acpi_dev;
+
+ /* Look for various quirks of the PIIX4 part. */
+ acpi_dev = pci_find_device(PCI_VENDOR_INTEL, PCI_DEVICE_82371AB_3);
+ if (acpi_dev) {
+ switch (pci_get_revid(acpi_dev)) {
+ /*
+ * Disable throttling control on PIIX4 A and B-step.
+ * See specification changes #13 ("Manual Throttle Duty Cycle")
+ * and #14 ("Enabling and Disabling Manual Throttle"), plus
+ * erratum #5 ("STPCLK# Deassertion Time") from the January
+ * 2002 PIIX4 specification update. Note that few (if any)
+ * mobile systems ever used this part.
+ */
+ case PCI_REVISION_A_STEP:
+ case PCI_REVISION_B_STEP:
+ thr_quirks |= CPU_QUIRK_NO_THROTTLE;
+ break;
+ default:
+ break;
+ }
+ }
+
+ return (0);
+}
+
+static int
+acpi_thr_settings(device_t dev, struct cf_setting *sets, int *count)
+{
+ int i, speed;
+
+ if (sets == NULL || count == NULL)
+ return (EINVAL);
+ if (*count < CPU_MAX_SPEED)
+ return (E2BIG);
+
+ /* Return a list of valid settings for this driver. */
+ memset(sets, CPUFREQ_VAL_UNKNOWN, sizeof(*sets) * CPU_MAX_SPEED);
+ for (i = 0, speed = CPU_MAX_SPEED; speed != 0; i++, speed--) {
+ sets[i].freq = CPU_SPEED_PERCENT(speed);
+ sets[i].dev = dev;
+ }
+ *count = CPU_MAX_SPEED;
+
+ return (0);
+}
+
+static int
+acpi_thr_set(device_t dev, const struct cf_setting *set)
+{
+ struct acpi_throttle_softc *sc;
+ uint32_t clk_val, p_cnt, speed;
+
+ if (set == NULL)
+ return (EINVAL);
+ sc = device_get_softc(dev);
+
+ /*
+ * Validate requested state converts to a duty cycle that is an
+ * integer from [1 .. CPU_MAX_SPEED].
+ */
+ speed = set->freq * CPU_MAX_SPEED / 10000;
+ if (speed * 10000 != set->freq * CPU_MAX_SPEED ||
+ speed < 1 || speed > CPU_MAX_SPEED)
+ return (EINVAL);
+
+ /* If we're at this setting, don't bother applying it again. */
+ if (speed == sc->cpu_thr_state)
+ return (0);
+
+ /* Get the current P_CNT value and disable throttling */
+ p_cnt = THR_GET_REG(sc->cpu_p_cnt);
+ p_cnt &= ~CPU_P_CNT_THT_EN;
+ THR_SET_REG(sc->cpu_p_cnt, p_cnt);
+
+ /* If we're at maximum speed, that's all */
+ if (speed < CPU_MAX_SPEED) {
+ /* Mask the old CLK_VAL off and OR in the new value */
+ clk_val = (CPU_MAX_SPEED - 1) << cpu_duty_offset;
+ p_cnt &= ~clk_val;
+ p_cnt |= (speed << cpu_duty_offset);
+
+ /* Write the new P_CNT value and then enable throttling */
+ THR_SET_REG(sc->cpu_p_cnt, p_cnt);
+ p_cnt |= CPU_P_CNT_THT_EN;
+ THR_SET_REG(sc->cpu_p_cnt, p_cnt);
+ }
+ sc->cpu_thr_state = speed;
+
+ return (0);
+}
+
+static int
+acpi_thr_get(device_t dev, struct cf_setting *set)
+{
+ struct acpi_throttle_softc *sc;
+ uint32_t p_cnt, clk_val;
+
+ if (set == NULL)
+ return (EINVAL);
+ sc = device_get_softc(dev);
+
+ /* Get the current throttling setting from P_CNT. */
+ p_cnt = THR_GET_REG(sc->cpu_p_cnt);
+ clk_val = (p_cnt >> cpu_duty_offset) & (CPU_MAX_SPEED - 1);
+ sc->cpu_thr_state = clk_val;
+
+ memset(set, CPUFREQ_VAL_UNKNOWN, sizeof(*set));
+ set->freq = CPU_SPEED_PERCENT(clk_val);
+ set->dev = dev;
+
+ return (0);
+}
+
+static int
+acpi_thr_type(device_t dev, int *type)
+{
+
+ if (type == NULL)
+ return (EINVAL);
+
+ *type = CPUFREQ_TYPE_RELATIVE;
+ return (0);
+}
diff --git a/sys/dev/acpica/acpi_timer.c b/sys/dev/acpica/acpi_timer.c
new file mode 100644
index 0000000..9f61cd8
--- /dev/null
+++ b/sys/dev/acpica/acpi_timer.c
@@ -0,0 +1,456 @@
+/*-
+ * Copyright (c) 2000, 2001 Michael Smith
+ * Copyright (c) 2000 BSDi
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "opt_acpi.h"
+#include <sys/param.h>
+#include <sys/bus.h>
+#include <sys/eventhandler.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <sys/sysctl.h>
+#include <sys/timetc.h>
+
+#include <machine/bus.h>
+#include <machine/resource.h>
+#include <sys/rman.h>
+
+#include <contrib/dev/acpica/include/acpi.h>
+#include <contrib/dev/acpica/include/accommon.h>
+
+#include <dev/acpica/acpivar.h>
+#include <dev/pci/pcivar.h>
+
+/*
+ * A timecounter based on the free-running ACPI timer.
+ *
+ * Based on the i386-only mp_clock.c by <phk@FreeBSD.ORG>.
+ */
+
+/* Hooks for the ACPI CA debugging infrastructure */
+#define _COMPONENT ACPI_TIMER
+ACPI_MODULE_NAME("TIMER")
+
+static device_t acpi_timer_dev;
+static struct resource *acpi_timer_reg;
+static bus_space_handle_t acpi_timer_bsh;
+static bus_space_tag_t acpi_timer_bst;
+static eventhandler_tag acpi_timer_eh;
+
+static u_int acpi_timer_frequency = 14318182 / 4;
+
+static void acpi_timer_identify(driver_t *driver, device_t parent);
+static int acpi_timer_probe(device_t dev);
+static int acpi_timer_attach(device_t dev);
+static void acpi_timer_resume_handler(struct timecounter *);
+static void acpi_timer_suspend_handler(struct timecounter *);
+static u_int acpi_timer_get_timecount(struct timecounter *tc);
+static u_int acpi_timer_get_timecount_safe(struct timecounter *tc);
+static int acpi_timer_sysctl_freq(SYSCTL_HANDLER_ARGS);
+static void acpi_timer_boot_test(void);
+
+static int acpi_timer_test(void);
+
+static device_method_t acpi_timer_methods[] = {
+ DEVMETHOD(device_identify, acpi_timer_identify),
+ DEVMETHOD(device_probe, acpi_timer_probe),
+ DEVMETHOD(device_attach, acpi_timer_attach),
+
+ {0, 0}
+};
+
+static driver_t acpi_timer_driver = {
+ "acpi_timer",
+ acpi_timer_methods,
+ 0,
+};
+
+static devclass_t acpi_timer_devclass;
+DRIVER_MODULE(acpi_timer, acpi, acpi_timer_driver, acpi_timer_devclass, 0, 0);
+MODULE_DEPEND(acpi_timer, acpi, 1, 1, 1);
+
+static struct timecounter acpi_timer_timecounter = {
+ acpi_timer_get_timecount_safe, /* get_timecount function */
+ 0, /* no poll_pps */
+ 0, /* no default counter_mask */
+ 0, /* no default frequency */
+ "ACPI", /* name */
+ -1 /* quality (chosen later) */
+};
+
+static __inline uint32_t
+acpi_timer_read(void)
+{
+
+ return (bus_space_read_4(acpi_timer_bst, acpi_timer_bsh, 0));
+}
+
+/*
+ * Locate the ACPI timer using the FADT, set up and allocate the I/O resources
+ * we will be using.
+ */
+static void
+acpi_timer_identify(driver_t *driver, device_t parent)
+{
+ device_t dev;
+ u_long rlen, rstart;
+ int rid, rtype;
+
+ ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
+
+ if (acpi_disabled("timer") || (acpi_quirks & ACPI_Q_TIMER) ||
+ acpi_timer_dev)
+ return_VOID;
+
+ if ((dev = BUS_ADD_CHILD(parent, 2, "acpi_timer", 0)) == NULL) {
+ device_printf(parent, "could not add acpi_timer0\n");
+ return_VOID;
+ }
+ acpi_timer_dev = dev;
+
+ switch (AcpiGbl_FADT.XPmTimerBlock.SpaceId) {
+ case ACPI_ADR_SPACE_SYSTEM_MEMORY:
+ rtype = SYS_RES_MEMORY;
+ break;
+ case ACPI_ADR_SPACE_SYSTEM_IO:
+ rtype = SYS_RES_IOPORT;
+ break;
+ default:
+ return_VOID;
+ }
+ rid = 0;
+ rlen = AcpiGbl_FADT.PmTimerLength;
+ rstart = AcpiGbl_FADT.XPmTimerBlock.Address;
+ if (bus_set_resource(dev, rtype, rid, rstart, rlen))
+ device_printf(dev, "couldn't set resource (%s 0x%lx+0x%lx)\n",
+ (rtype == SYS_RES_IOPORT) ? "port" : "mem", rstart, rlen);
+ return_VOID;
+}
+
+static int
+acpi_timer_probe(device_t dev)
+{
+ char desc[40];
+ int i, j, rid, rtype;
+
+ ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
+
+ if (dev != acpi_timer_dev)
+ return (ENXIO);
+
+ switch (AcpiGbl_FADT.XPmTimerBlock.SpaceId) {
+ case ACPI_ADR_SPACE_SYSTEM_MEMORY:
+ rtype = SYS_RES_MEMORY;
+ break;
+ case ACPI_ADR_SPACE_SYSTEM_IO:
+ rtype = SYS_RES_IOPORT;
+ break;
+ default:
+ return (ENXIO);
+ }
+ rid = 0;
+ acpi_timer_reg = bus_alloc_resource_any(dev, rtype, &rid, RF_ACTIVE);
+ if (acpi_timer_reg == NULL) {
+ device_printf(dev, "couldn't allocate resource (%s 0x%lx)\n",
+ (rtype == SYS_RES_IOPORT) ? "port" : "mem",
+ (u_long)AcpiGbl_FADT.XPmTimerBlock.Address);
+ return (ENXIO);
+ }
+ acpi_timer_bsh = rman_get_bushandle(acpi_timer_reg);
+ acpi_timer_bst = rman_get_bustag(acpi_timer_reg);
+ if (AcpiGbl_FADT.Flags & ACPI_FADT_32BIT_TIMER)
+ acpi_timer_timecounter.tc_counter_mask = 0xffffffff;
+ else
+ acpi_timer_timecounter.tc_counter_mask = 0x00ffffff;
+ acpi_timer_timecounter.tc_frequency = acpi_timer_frequency;
+ if (testenv("debug.acpi.timer_test"))
+ acpi_timer_boot_test();
+
+ /*
+ * If all tests of the counter succeed, use the ACPI-fast method. If
+ * at least one failed, default to using the safe routine, which reads
+ * the timer multiple times to get a consistent value before returning.
+ */
+ j = 0;
+ if (bootverbose)
+ printf("ACPI timer:");
+ for (i = 0; i < 10; i++)
+ j += acpi_timer_test();
+ if (bootverbose)
+ printf(" -> %d\n", j);
+ if (j == 10) {
+ acpi_timer_timecounter.tc_name = "ACPI-fast";
+ acpi_timer_timecounter.tc_get_timecount = acpi_timer_get_timecount;
+ acpi_timer_timecounter.tc_quality = 900;
+ } else {
+ acpi_timer_timecounter.tc_name = "ACPI-safe";
+ acpi_timer_timecounter.tc_get_timecount = acpi_timer_get_timecount_safe;
+ acpi_timer_timecounter.tc_quality = 850;
+ }
+ tc_init(&acpi_timer_timecounter);
+
+ sprintf(desc, "%d-bit timer at %u.%06uMHz",
+ (AcpiGbl_FADT.Flags & ACPI_FADT_32BIT_TIMER) != 0 ? 32 : 24,
+ acpi_timer_frequency / 1000000, acpi_timer_frequency % 1000000);
+ device_set_desc_copy(dev, desc);
+
+ /* Release the resource, we'll allocate it again during attach. */
+ bus_release_resource(dev, rtype, rid, acpi_timer_reg);
+ return (0);
+}
+
+static int
+acpi_timer_attach(device_t dev)
+{
+ int rid, rtype;
+
+ ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
+
+ switch (AcpiGbl_FADT.XPmTimerBlock.SpaceId) {
+ case ACPI_ADR_SPACE_SYSTEM_MEMORY:
+ rtype = SYS_RES_MEMORY;
+ break;
+ case ACPI_ADR_SPACE_SYSTEM_IO:
+ rtype = SYS_RES_IOPORT;
+ break;
+ default:
+ return (ENXIO);
+ }
+ rid = 0;
+ acpi_timer_reg = bus_alloc_resource_any(dev, rtype, &rid, RF_ACTIVE);
+ if (acpi_timer_reg == NULL)
+ return (ENXIO);
+ acpi_timer_bsh = rman_get_bushandle(acpi_timer_reg);
+ acpi_timer_bst = rman_get_bustag(acpi_timer_reg);
+
+ /* Register suspend event handler. */
+ if (EVENTHANDLER_REGISTER(power_suspend, acpi_timer_suspend_handler,
+ &acpi_timer_timecounter, EVENTHANDLER_PRI_LAST) == NULL)
+ device_printf(dev, "failed to register suspend event handler\n");
+
+ return (0);
+}
+
+static void
+acpi_timer_resume_handler(struct timecounter *newtc)
+{
+ struct timecounter *tc;
+
+ tc = timecounter;
+ if (tc != newtc) {
+ if (bootverbose)
+ device_printf(acpi_timer_dev,
+ "restoring timecounter, %s -> %s\n",
+ tc->tc_name, newtc->tc_name);
+ (void)newtc->tc_get_timecount(newtc);
+ (void)newtc->tc_get_timecount(newtc);
+ timecounter = newtc;
+ }
+}
+
+static void
+acpi_timer_suspend_handler(struct timecounter *newtc)
+{
+ struct timecounter *tc;
+
+ /* Deregister existing resume event handler. */
+ if (acpi_timer_eh != NULL) {
+ EVENTHANDLER_DEREGISTER(power_resume, acpi_timer_eh);
+ acpi_timer_eh = NULL;
+ }
+
+ KASSERT(newtc == &acpi_timer_timecounter,
+ ("acpi_timer_suspend_handler: wrong timecounter"));
+
+ tc = timecounter;
+ if (tc != newtc) {
+ if (bootverbose)
+ device_printf(acpi_timer_dev,
+ "switching timecounter, %s -> %s\n",
+ tc->tc_name, newtc->tc_name);
+ (void)acpi_timer_read();
+ (void)acpi_timer_read();
+ timecounter = newtc;
+ acpi_timer_eh = EVENTHANDLER_REGISTER(power_resume,
+ acpi_timer_resume_handler, tc, EVENTHANDLER_PRI_LAST);
+ }
+}
+
+/*
+ * Fetch current time value from reliable hardware.
+ */
+static u_int
+acpi_timer_get_timecount(struct timecounter *tc)
+{
+ return (acpi_timer_read());
+}
+
+/*
+ * Fetch current time value from hardware that may not correctly
+ * latch the counter. We need to read until we have three monotonic
+ * samples and then use the middle one, otherwise we are not protected
+ * against the fact that the bits can be wrong in two directions. If
+ * we only cared about monosity, two reads would be enough.
+ */
+static u_int
+acpi_timer_get_timecount_safe(struct timecounter *tc)
+{
+ u_int u1, u2, u3;
+
+ u2 = acpi_timer_read();
+ u3 = acpi_timer_read();
+ do {
+ u1 = u2;
+ u2 = u3;
+ u3 = acpi_timer_read();
+ } while (u1 > u2 || u2 > u3);
+
+ return (u2);
+}
+
+/*
+ * Timecounter freqency adjustment interface.
+ */
+static int
+acpi_timer_sysctl_freq(SYSCTL_HANDLER_ARGS)
+{
+ int error;
+ u_int freq;
+
+ if (acpi_timer_timecounter.tc_frequency == 0)
+ return (EOPNOTSUPP);
+ freq = acpi_timer_frequency;
+ error = sysctl_handle_int(oidp, &freq, 0, req);
+ if (error == 0 && req->newptr != NULL) {
+ acpi_timer_frequency = freq;
+ acpi_timer_timecounter.tc_frequency = acpi_timer_frequency;
+ }
+
+ return (error);
+}
+
+SYSCTL_PROC(_machdep, OID_AUTO, acpi_timer_freq, CTLTYPE_INT | CTLFLAG_RW,
+ 0, sizeof(u_int), acpi_timer_sysctl_freq, "I", "ACPI timer frequency");
+
+/*
+ * Some ACPI timers are known or believed to suffer from implementation
+ * problems which can lead to erroneous values being read. This function
+ * tests for consistent results from the timer and returns 1 if it believes
+ * the timer is consistent, otherwise it returns 0.
+ *
+ * It appears the cause is that the counter is not latched to the PCI bus
+ * clock when read:
+ *
+ * ] 20. ACPI Timer Errata
+ * ]
+ * ] Problem: The power management timer may return improper result when
+ * ] read. Although the timer value settles properly after incrementing,
+ * ] while incrementing there is a 3nS window every 69.8nS where the
+ * ] timer value is indeterminate (a 4.2% chance that the data will be
+ * ] incorrect when read). As a result, the ACPI free running count up
+ * ] timer specification is violated due to erroneous reads. Implication:
+ * ] System hangs due to the "inaccuracy" of the timer when used by
+ * ] software for time critical events and delays.
+ * ]
+ * ] Workaround: Read the register twice and compare.
+ * ] Status: This will not be fixed in the PIIX4 or PIIX4E, it is fixed
+ * ] in the PIIX4M.
+ */
+#define N 2000
+static int
+acpi_timer_test()
+{
+ uint32_t last, this;
+ int delta, max, max2, min, n;
+ register_t s;
+
+ min = INT32_MAX;
+ max = max2 = 0;
+
+ /* Test the timer with interrupts disabled to get accurate results. */
+ s = intr_disable();
+ last = acpi_timer_read();
+ for (n = 0; n < N; n++) {
+ this = acpi_timer_read();
+ delta = acpi_TimerDelta(this, last);
+ if (delta > max) {
+ max2 = max;
+ max = delta;
+ } else if (delta > max2)
+ max2 = delta;
+ if (delta < min)
+ min = delta;
+ last = this;
+ }
+ intr_restore(s);
+
+ delta = max2 - min;
+ if ((max - min > 8 || delta > 3) && vm_guest == VM_GUEST_NO)
+ n = 0;
+ else if (min < 0 || max == 0 || max2 == 0)
+ n = 0;
+ else
+ n = 1;
+ if (bootverbose)
+ printf(" %d/%d", n, delta);
+
+ return (n);
+}
+#undef N
+
+/*
+ * Test harness for verifying ACPI timer behaviour.
+ * Boot with debug.acpi.timer_test set to invoke this.
+ */
+static void
+acpi_timer_boot_test(void)
+{
+ uint32_t u1, u2, u3;
+
+ u1 = acpi_timer_read();
+ u2 = acpi_timer_read();
+ u3 = acpi_timer_read();
+
+ device_printf(acpi_timer_dev, "timer test in progress, reboot to quit.\n");
+ for (;;) {
+ /*
+ * The failure case is where u3 > u1, but u2 does not fall between
+ * the two, ie. it contains garbage.
+ */
+ if (u3 > u1) {
+ if (u2 < u1 || u2 > u3)
+ device_printf(acpi_timer_dev,
+ "timer is not monotonic: 0x%08x,0x%08x,0x%08x\n",
+ u1, u2, u3);
+ }
+ u1 = u2;
+ u2 = u3;
+ u3 = acpi_timer_read();
+ }
+}
diff --git a/sys/dev/acpica/acpi_video.c b/sys/dev/acpica/acpi_video.c
new file mode 100644
index 0000000..3954892
--- /dev/null
+++ b/sys/dev/acpica/acpi_video.c
@@ -0,0 +1,1081 @@
+/*-
+ * Copyright (c) 2002-2003 Taku YAMAMOTO <taku@cent.saitama-u.ac.jp>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $Id: acpi_vid.c,v 1.4 2003/10/13 10:07:36 taku Exp $
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/kernel.h>
+#include <sys/malloc.h>
+#include <sys/module.h>
+#include <sys/bus.h>
+#include <sys/power.h>
+#include <sys/queue.h>
+#include <sys/sysctl.h>
+
+#include <contrib/dev/acpica/include/acpi.h>
+
+#include <dev/acpica/acpivar.h>
+
+/* ACPI video extension driver. */
+struct acpi_video_output {
+ ACPI_HANDLE handle;
+ UINT32 adr;
+ STAILQ_ENTRY(acpi_video_output) vo_next;
+ struct {
+ int num;
+ STAILQ_ENTRY(acpi_video_output) next;
+ } vo_unit;
+ int vo_brightness;
+ int vo_fullpower;
+ int vo_economy;
+ int vo_numlevels;
+ int *vo_levels;
+ struct sysctl_ctx_list vo_sysctl_ctx;
+ struct sysctl_oid *vo_sysctl_tree;
+};
+
+STAILQ_HEAD(acpi_video_output_queue, acpi_video_output);
+
+struct acpi_video_softc {
+ device_t device;
+ ACPI_HANDLE handle;
+ struct acpi_video_output_queue vid_outputs;
+ eventhandler_tag vid_pwr_evh;
+};
+
+/* interfaces */
+static int acpi_video_modevent(struct module*, int, void *);
+static void acpi_video_identify(driver_t *driver, device_t parent);
+static int acpi_video_probe(device_t);
+static int acpi_video_attach(device_t);
+static int acpi_video_detach(device_t);
+static int acpi_video_resume(device_t);
+static int acpi_video_shutdown(device_t);
+static void acpi_video_notify_handler(ACPI_HANDLE, UINT32, void *);
+static void acpi_video_power_profile(void *);
+static void acpi_video_bind_outputs(struct acpi_video_softc *);
+static struct acpi_video_output *acpi_video_vo_init(UINT32);
+static void acpi_video_vo_bind(struct acpi_video_output *, ACPI_HANDLE);
+static void acpi_video_vo_destroy(struct acpi_video_output *);
+static int acpi_video_vo_check_level(struct acpi_video_output *, int);
+static void acpi_video_vo_notify_handler(ACPI_HANDLE, UINT32, void *);
+static int acpi_video_vo_active_sysctl(SYSCTL_HANDLER_ARGS);
+static int acpi_video_vo_bright_sysctl(SYSCTL_HANDLER_ARGS);
+static int acpi_video_vo_presets_sysctl(SYSCTL_HANDLER_ARGS);
+static int acpi_video_vo_levels_sysctl(SYSCTL_HANDLER_ARGS);
+
+/* operations */
+static void vid_set_switch_policy(ACPI_HANDLE, UINT32);
+static int vid_enum_outputs(ACPI_HANDLE,
+ void(*)(ACPI_HANDLE, UINT32, void *), void *);
+static int vo_get_brightness_levels(ACPI_HANDLE, int **);
+static int vo_get_brightness(ACPI_HANDLE);
+static void vo_set_brightness(ACPI_HANDLE, int);
+static UINT32 vo_get_device_status(ACPI_HANDLE);
+static UINT32 vo_get_graphics_state(ACPI_HANDLE);
+static void vo_set_device_state(ACPI_HANDLE, UINT32);
+
+/* events */
+#define VID_NOTIFY_SWITCHED 0x80
+#define VID_NOTIFY_REPROBE 0x81
+#define VID_NOTIFY_CYCLE_BRN 0x85
+#define VID_NOTIFY_INC_BRN 0x86
+#define VID_NOTIFY_DEC_BRN 0x87
+#define VID_NOTIFY_ZERO_BRN 0x88
+
+/* _DOS (Enable/Disable Output Switching) argument bits */
+#define DOS_SWITCH_MASK 3
+#define DOS_SWITCH_BY_OSPM 0
+#define DOS_SWITCH_BY_BIOS 1
+#define DOS_SWITCH_LOCKED 2
+#define DOS_BRIGHTNESS_BY_OSPM (1 << 2)
+
+/* _DOD and subdev's _ADR */
+#define DOD_DEVID_MASK 0x0f00
+#define DOD_DEVID_MASK_FULL 0xffff
+#define DOD_DEVID_MASK_DISPIDX 0x000f
+#define DOD_DEVID_MASK_DISPPORT 0x00f0
+#define DOD_DEVID_MONITOR 0x0100
+#define DOD_DEVID_LCD 0x0110
+#define DOD_DEVID_TV 0x0200
+#define DOD_DEVID_EXT 0x0300
+#define DOD_DEVID_INTDFP 0x0400
+#define DOD_BIOS (1 << 16)
+#define DOD_NONVGA (1 << 17)
+#define DOD_HEAD_ID_SHIFT 18
+#define DOD_HEAD_ID_BITS 3
+#define DOD_HEAD_ID_MASK \
+ (((1 << DOD_HEAD_ID_BITS) - 1) << DOD_HEAD_ID_SHIFT)
+#define DOD_DEVID_SCHEME_STD (1 << 31)
+
+/* _BCL related constants */
+#define BCL_FULLPOWER 0
+#define BCL_ECONOMY 1
+
+/* _DCS (Device Currrent Status) value bits and masks. */
+#define DCS_EXISTS (1 << 0)
+#define DCS_ACTIVE (1 << 1)
+#define DCS_READY (1 << 2)
+#define DCS_FUNCTIONAL (1 << 3)
+#define DCS_ATTACHED (1 << 4)
+
+/* _DSS (Device Set Status) argument bits and masks. */
+#define DSS_INACTIVE 0
+#define DSS_ACTIVE (1 << 0)
+#define DSS_SETNEXT (1 << 30)
+#define DSS_COMMIT (1 << 31)
+
+static device_method_t acpi_video_methods[] = {
+ DEVMETHOD(device_identify, acpi_video_identify),
+ DEVMETHOD(device_probe, acpi_video_probe),
+ DEVMETHOD(device_attach, acpi_video_attach),
+ DEVMETHOD(device_detach, acpi_video_detach),
+ DEVMETHOD(device_resume, acpi_video_resume),
+ DEVMETHOD(device_shutdown, acpi_video_shutdown),
+ { 0, 0 }
+};
+
+static driver_t acpi_video_driver = {
+ "acpi_video",
+ acpi_video_methods,
+ sizeof(struct acpi_video_softc),
+};
+
+static devclass_t acpi_video_devclass;
+
+DRIVER_MODULE(acpi_video, vgapci, acpi_video_driver, acpi_video_devclass,
+ acpi_video_modevent, NULL);
+MODULE_DEPEND(acpi_video, acpi, 1, 1, 1);
+
+static struct sysctl_ctx_list acpi_video_sysctl_ctx;
+static struct sysctl_oid *acpi_video_sysctl_tree;
+static struct acpi_video_output_queue crt_units, tv_units,
+ ext_units, lcd_units, other_units;
+
+/*
+ * The 'video' lock protects the hierarchy of video output devices
+ * (the video "bus"). The 'video_output' lock protects per-output
+ * data is equivalent to a softc lock for each video output.
+ */
+ACPI_SERIAL_DECL(video, "ACPI video");
+ACPI_SERIAL_DECL(video_output, "ACPI video output");
+static MALLOC_DEFINE(M_ACPIVIDEO, "acpivideo", "ACPI video extension");
+
+static int
+acpi_video_modevent(struct module *mod __unused, int evt, void *cookie __unused)
+{
+ int err;
+
+ err = 0;
+ switch (evt) {
+ case MOD_LOAD:
+ sysctl_ctx_init(&acpi_video_sysctl_ctx);
+ STAILQ_INIT(&crt_units);
+ STAILQ_INIT(&tv_units);
+ STAILQ_INIT(&ext_units);
+ STAILQ_INIT(&lcd_units);
+ STAILQ_INIT(&other_units);
+ break;
+ case MOD_UNLOAD:
+ sysctl_ctx_free(&acpi_video_sysctl_ctx);
+ acpi_video_sysctl_tree = NULL;
+ break;
+ default:
+ err = EINVAL;
+ }
+
+ return (err);
+}
+
+static void
+acpi_video_identify(driver_t *driver, device_t parent)
+{
+
+ if (device_find_child(parent, "acpi_video", -1) == NULL)
+ device_add_child(parent, "acpi_video", -1);
+}
+
+static int
+acpi_video_probe(device_t dev)
+{
+ ACPI_HANDLE devh, h;
+ ACPI_OBJECT_TYPE t_dos;
+
+ devh = acpi_get_handle(dev);
+ if (acpi_disabled("video") ||
+ ACPI_FAILURE(AcpiGetHandle(devh, "_DOD", &h)) ||
+ ACPI_FAILURE(AcpiGetHandle(devh, "_DOS", &h)) ||
+ ACPI_FAILURE(AcpiGetType(h, &t_dos)) ||
+ t_dos != ACPI_TYPE_METHOD)
+ return (ENXIO);
+
+ device_set_desc(dev, "ACPI video extension");
+ return (0);
+}
+
+static int
+acpi_video_attach(device_t dev)
+{
+ struct acpi_softc *acpi_sc;
+ struct acpi_video_softc *sc;
+
+ sc = device_get_softc(dev);
+
+ acpi_sc = devclass_get_softc(devclass_find("acpi"), 0);
+ if (acpi_sc == NULL)
+ return (ENXIO);
+ ACPI_SERIAL_BEGIN(video);
+ if (acpi_video_sysctl_tree == NULL) {
+ acpi_video_sysctl_tree = SYSCTL_ADD_NODE(&acpi_video_sysctl_ctx,
+ SYSCTL_CHILDREN(acpi_sc->acpi_sysctl_tree),
+ OID_AUTO, "video", CTLFLAG_RD, 0,
+ "video extension control");
+ }
+ ACPI_SERIAL_END(video);
+
+ sc->device = dev;
+ sc->handle = acpi_get_handle(dev);
+ STAILQ_INIT(&sc->vid_outputs);
+
+ AcpiInstallNotifyHandler(sc->handle, ACPI_DEVICE_NOTIFY,
+ acpi_video_notify_handler, sc);
+ sc->vid_pwr_evh = EVENTHANDLER_REGISTER(power_profile_change,
+ acpi_video_power_profile, sc, 0);
+
+ ACPI_SERIAL_BEGIN(video);
+ acpi_video_bind_outputs(sc);
+ ACPI_SERIAL_END(video);
+
+ /*
+ * Notify the BIOS that we want to switch both active outputs and
+ * brightness levels.
+ */
+ vid_set_switch_policy(sc->handle, DOS_SWITCH_BY_OSPM |
+ DOS_BRIGHTNESS_BY_OSPM);
+
+ acpi_video_power_profile(sc);
+
+ return (0);
+}
+
+static int
+acpi_video_detach(device_t dev)
+{
+ struct acpi_video_softc *sc;
+ struct acpi_video_output *vo, *vn;
+
+ sc = device_get_softc(dev);
+
+ vid_set_switch_policy(sc->handle, DOS_SWITCH_BY_BIOS);
+ EVENTHANDLER_DEREGISTER(power_profile_change, sc->vid_pwr_evh);
+ AcpiRemoveNotifyHandler(sc->handle, ACPI_DEVICE_NOTIFY,
+ acpi_video_notify_handler);
+
+ ACPI_SERIAL_BEGIN(video);
+ STAILQ_FOREACH_SAFE(vo, &sc->vid_outputs, vo_next, vn) {
+ acpi_video_vo_destroy(vo);
+ }
+ ACPI_SERIAL_END(video);
+
+ return (0);
+}
+
+static int
+acpi_video_resume(device_t dev)
+{
+ struct acpi_video_softc *sc;
+ struct acpi_video_output *vo, *vn;
+ int level;
+
+ sc = device_get_softc(dev);
+
+ /* Restore brightness level */
+ ACPI_SERIAL_BEGIN(video);
+ ACPI_SERIAL_BEGIN(video_output);
+ STAILQ_FOREACH_SAFE(vo, &sc->vid_outputs, vo_next, vn) {
+ if ((vo->adr & DOD_DEVID_MASK_FULL) != DOD_DEVID_LCD &&
+ (vo->adr & DOD_DEVID_MASK) != DOD_DEVID_INTDFP)
+ continue;
+
+ if ((vo_get_device_status(vo->handle) & DCS_ACTIVE) == 0)
+ continue;
+
+ level = vo_get_brightness(vo->handle);
+ if (level != -1)
+ vo_set_brightness(vo->handle, level);
+ }
+ ACPI_SERIAL_END(video_output);
+ ACPI_SERIAL_END(video);
+
+ return (0);
+}
+
+static int
+acpi_video_shutdown(device_t dev)
+{
+ struct acpi_video_softc *sc;
+
+ sc = device_get_softc(dev);
+ vid_set_switch_policy(sc->handle, DOS_SWITCH_BY_BIOS);
+
+ return (0);
+}
+
+static void
+acpi_video_notify_handler(ACPI_HANDLE handle, UINT32 notify, void *context)
+{
+ struct acpi_video_softc *sc;
+ struct acpi_video_output *vo, *vo_tmp;
+ ACPI_HANDLE lasthand;
+ UINT32 dcs, dss, dss_p;
+
+ sc = (struct acpi_video_softc *)context;
+
+ switch (notify) {
+ case VID_NOTIFY_SWITCHED:
+ dss_p = 0;
+ lasthand = NULL;
+ ACPI_SERIAL_BEGIN(video);
+ ACPI_SERIAL_BEGIN(video_output);
+ STAILQ_FOREACH(vo, &sc->vid_outputs, vo_next) {
+ dss = vo_get_graphics_state(vo->handle);
+ dcs = vo_get_device_status(vo->handle);
+ if (!(dcs & DCS_READY))
+ dss = DSS_INACTIVE;
+ if (((dcs & DCS_ACTIVE) && dss == DSS_INACTIVE) ||
+ (!(dcs & DCS_ACTIVE) && dss == DSS_ACTIVE)) {
+ if (lasthand != NULL)
+ vo_set_device_state(lasthand, dss_p);
+ dss_p = dss;
+ lasthand = vo->handle;
+ }
+ }
+ if (lasthand != NULL)
+ vo_set_device_state(lasthand, dss_p|DSS_COMMIT);
+ ACPI_SERIAL_END(video_output);
+ ACPI_SERIAL_END(video);
+ break;
+ case VID_NOTIFY_REPROBE:
+ ACPI_SERIAL_BEGIN(video);
+ STAILQ_FOREACH(vo, &sc->vid_outputs, vo_next)
+ vo->handle = NULL;
+ acpi_video_bind_outputs(sc);
+ STAILQ_FOREACH_SAFE(vo, &sc->vid_outputs, vo_next, vo_tmp) {
+ if (vo->handle == NULL) {
+ STAILQ_REMOVE(&sc->vid_outputs, vo,
+ acpi_video_output, vo_next);
+ acpi_video_vo_destroy(vo);
+ }
+ }
+ ACPI_SERIAL_END(video);
+ break;
+ default:
+ device_printf(sc->device, "unknown notify event 0x%x\n",
+ notify);
+ }
+}
+
+static void
+acpi_video_power_profile(void *context)
+{
+ int state;
+ struct acpi_video_softc *sc;
+ struct acpi_video_output *vo;
+
+ sc = context;
+ state = power_profile_get_state();
+ if (state != POWER_PROFILE_PERFORMANCE &&
+ state != POWER_PROFILE_ECONOMY)
+ return;
+
+ ACPI_SERIAL_BEGIN(video);
+ ACPI_SERIAL_BEGIN(video_output);
+ STAILQ_FOREACH(vo, &sc->vid_outputs, vo_next) {
+ if (vo->vo_levels != NULL && vo->vo_brightness == -1)
+ vo_set_brightness(vo->handle,
+ state == POWER_PROFILE_ECONOMY ?
+ vo->vo_economy : vo->vo_fullpower);
+ }
+ ACPI_SERIAL_END(video_output);
+ ACPI_SERIAL_END(video);
+}
+
+static void
+acpi_video_bind_outputs_subr(ACPI_HANDLE handle, UINT32 adr, void *context)
+{
+ struct acpi_video_softc *sc;
+ struct acpi_video_output *vo;
+
+ ACPI_SERIAL_ASSERT(video);
+ sc = context;
+
+ STAILQ_FOREACH(vo, &sc->vid_outputs, vo_next) {
+ if (vo->adr == adr) {
+ acpi_video_vo_bind(vo, handle);
+ return;
+ }
+ }
+ vo = acpi_video_vo_init(adr);
+ if (vo != NULL) {
+ acpi_video_vo_bind(vo, handle);
+ STAILQ_INSERT_TAIL(&sc->vid_outputs, vo, vo_next);
+ }
+}
+
+static void
+acpi_video_bind_outputs(struct acpi_video_softc *sc)
+{
+
+ ACPI_SERIAL_ASSERT(video);
+ vid_enum_outputs(sc->handle, acpi_video_bind_outputs_subr, sc);
+}
+
+static struct acpi_video_output *
+acpi_video_vo_init(UINT32 adr)
+{
+ struct acpi_video_output *vn, *vo, *vp;
+ int n, x;
+ char name[8], env[32];
+ const char *type, *desc;
+ struct acpi_video_output_queue *voqh;
+
+ ACPI_SERIAL_ASSERT(video);
+
+ switch (adr & DOD_DEVID_MASK) {
+ case DOD_DEVID_MONITOR:
+ if ((adr & DOD_DEVID_MASK_FULL) == DOD_DEVID_LCD) {
+ /* DOD_DEVID_LCD is a common, backward compatible ID */
+ desc = "Internal/Integrated Digital Flat Panel";
+ type = "lcd";
+ voqh = &lcd_units;
+ } else {
+ desc = "VGA CRT or VESA Compatible Analog Monitor";
+ type = "crt";
+ voqh = &crt_units;
+ }
+ break;
+ case DOD_DEVID_TV:
+ desc = "TV/HDTV or Analog-Video Monitor";
+ type = "tv";
+ voqh = &tv_units;
+ break;
+ case DOD_DEVID_EXT:
+ desc = "External Digital Monitor";
+ type = "ext";
+ voqh = &ext_units;
+ break;
+ case DOD_DEVID_INTDFP:
+ desc = "Internal/Integrated Digital Flat Panel";
+ type = "lcd";
+ voqh = &lcd_units;
+ break;
+ default:
+ desc = "unknown output";
+ type = "out";
+ voqh = &other_units;
+ }
+
+ n = 0;
+ vp = NULL;
+ STAILQ_FOREACH(vn, voqh, vo_unit.next) {
+ if (vn->vo_unit.num != n)
+ break;
+ vp = vn;
+ n++;
+ }
+
+ snprintf(name, sizeof(name), "%s%d", type, n);
+
+ vo = malloc(sizeof(*vo), M_ACPIVIDEO, M_NOWAIT);
+ if (vo != NULL) {
+ vo->handle = NULL;
+ vo->adr = adr;
+ vo->vo_unit.num = n;
+ vo->vo_brightness = -1;
+ vo->vo_fullpower = -1; /* TODO: override with tunables */
+ vo->vo_economy = -1;
+ vo->vo_numlevels = 0;
+ vo->vo_levels = NULL;
+ snprintf(env, sizeof(env), "hw.acpi.video.%s.fullpower", name);
+ if (getenv_int(env, &x))
+ vo->vo_fullpower = x;
+ snprintf(env, sizeof(env), "hw.acpi.video.%s.economy", name);
+ if (getenv_int(env, &x))
+ vo->vo_economy = x;
+
+ sysctl_ctx_init(&vo->vo_sysctl_ctx);
+ if (vp != NULL)
+ STAILQ_INSERT_AFTER(voqh, vp, vo, vo_unit.next);
+ else
+ STAILQ_INSERT_TAIL(voqh, vo, vo_unit.next);
+ if (acpi_video_sysctl_tree != NULL)
+ vo->vo_sysctl_tree =
+ SYSCTL_ADD_NODE(&vo->vo_sysctl_ctx,
+ SYSCTL_CHILDREN(acpi_video_sysctl_tree),
+ OID_AUTO, name, CTLFLAG_RD, 0, desc);
+ if (vo->vo_sysctl_tree != NULL) {
+ SYSCTL_ADD_PROC(&vo->vo_sysctl_ctx,
+ SYSCTL_CHILDREN(vo->vo_sysctl_tree),
+ OID_AUTO, "active",
+ CTLTYPE_INT|CTLFLAG_RW, vo, 0,
+ acpi_video_vo_active_sysctl, "I",
+ "current activity of this device");
+ SYSCTL_ADD_PROC(&vo->vo_sysctl_ctx,
+ SYSCTL_CHILDREN(vo->vo_sysctl_tree),
+ OID_AUTO, "brightness",
+ CTLTYPE_INT|CTLFLAG_RW, vo, 0,
+ acpi_video_vo_bright_sysctl, "I",
+ "current brightness level");
+ SYSCTL_ADD_PROC(&vo->vo_sysctl_ctx,
+ SYSCTL_CHILDREN(vo->vo_sysctl_tree),
+ OID_AUTO, "fullpower",
+ CTLTYPE_INT|CTLFLAG_RW, vo,
+ POWER_PROFILE_PERFORMANCE,
+ acpi_video_vo_presets_sysctl, "I",
+ "preset level for full power mode");
+ SYSCTL_ADD_PROC(&vo->vo_sysctl_ctx,
+ SYSCTL_CHILDREN(vo->vo_sysctl_tree),
+ OID_AUTO, "economy",
+ CTLTYPE_INT|CTLFLAG_RW, vo,
+ POWER_PROFILE_ECONOMY,
+ acpi_video_vo_presets_sysctl, "I",
+ "preset level for economy mode");
+ SYSCTL_ADD_PROC(&vo->vo_sysctl_ctx,
+ SYSCTL_CHILDREN(vo->vo_sysctl_tree),
+ OID_AUTO, "levels",
+ CTLTYPE_INT | CTLFLAG_RD, vo, 0,
+ acpi_video_vo_levels_sysctl, "I",
+ "supported brightness levels");
+ } else
+ printf("%s: sysctl node creation failed\n", type);
+ } else
+ printf("%s: softc allocation failed\n", type);
+
+ if (bootverbose) {
+ printf("found %s(%x)", desc, adr & DOD_DEVID_MASK_FULL);
+ printf(", idx#%x", adr & DOD_DEVID_MASK_DISPIDX);
+ printf(", port#%x", (adr & DOD_DEVID_MASK_DISPPORT) >> 4);
+ if (adr & DOD_BIOS)
+ printf(", detectable by BIOS");
+ if (adr & DOD_NONVGA)
+ printf(" (Non-VGA output device whose power "
+ "is related to the VGA device)");
+ printf(", head #%d\n",
+ (adr & DOD_HEAD_ID_MASK) >> DOD_HEAD_ID_SHIFT);
+ }
+ return (vo);
+}
+
+static void
+acpi_video_vo_bind(struct acpi_video_output *vo, ACPI_HANDLE handle)
+{
+
+ ACPI_SERIAL_BEGIN(video_output);
+ if (vo->vo_levels != NULL)
+ AcpiOsFree(vo->vo_levels);
+ vo->handle = handle;
+ vo->vo_numlevels = vo_get_brightness_levels(handle, &vo->vo_levels);
+ if (vo->vo_numlevels >= 2) {
+ if (vo->vo_fullpower == -1
+ || acpi_video_vo_check_level(vo, vo->vo_fullpower) != 0)
+ /* XXX - can't deal with rebinding... */
+ vo->vo_fullpower = vo->vo_levels[BCL_FULLPOWER];
+ if (vo->vo_economy == -1
+ || acpi_video_vo_check_level(vo, vo->vo_economy) != 0)
+ /* XXX - see above. */
+ vo->vo_economy = vo->vo_levels[BCL_ECONOMY];
+ }
+ if (vo->vo_levels != NULL)
+ AcpiInstallNotifyHandler(handle, ACPI_DEVICE_NOTIFY,
+ acpi_video_vo_notify_handler, vo);
+ ACPI_SERIAL_END(video_output);
+}
+
+static void
+acpi_video_vo_destroy(struct acpi_video_output *vo)
+{
+ struct acpi_video_output_queue *voqh;
+
+ ACPI_SERIAL_ASSERT(video);
+ if (vo->vo_sysctl_tree != NULL) {
+ vo->vo_sysctl_tree = NULL;
+ sysctl_ctx_free(&vo->vo_sysctl_ctx);
+ }
+ if (vo->vo_levels != NULL) {
+ AcpiRemoveNotifyHandler(vo->handle, ACPI_DEVICE_NOTIFY,
+ acpi_video_vo_notify_handler);
+ AcpiOsFree(vo->vo_levels);
+ }
+
+ switch (vo->adr & DOD_DEVID_MASK) {
+ case DOD_DEVID_MONITOR:
+ voqh = &crt_units;
+ break;
+ case DOD_DEVID_TV:
+ voqh = &tv_units;
+ break;
+ case DOD_DEVID_EXT:
+ voqh = &ext_units;
+ break;
+ case DOD_DEVID_INTDFP:
+ voqh = &lcd_units;
+ break;
+ default:
+ voqh = &other_units;
+ }
+ STAILQ_REMOVE(voqh, vo, acpi_video_output, vo_unit.next);
+ free(vo, M_ACPIVIDEO);
+}
+
+static int
+acpi_video_vo_check_level(struct acpi_video_output *vo, int level)
+{
+ int i;
+
+ ACPI_SERIAL_ASSERT(video_output);
+ if (vo->vo_levels == NULL)
+ return (ENODEV);
+ for (i = 0; i < vo->vo_numlevels; i++)
+ if (vo->vo_levels[i] == level)
+ return (0);
+ return (EINVAL);
+}
+
+static void
+acpi_video_vo_notify_handler(ACPI_HANDLE handle, UINT32 notify, void *context)
+{
+ struct acpi_video_output *vo;
+ int i, j, level, new_level;
+
+ vo = context;
+ ACPI_SERIAL_BEGIN(video_output);
+ if (vo->handle != handle)
+ goto out;
+
+ switch (notify) {
+ case VID_NOTIFY_CYCLE_BRN:
+ if (vo->vo_numlevels <= 3)
+ goto out;
+ /* FALLTHROUGH */
+ case VID_NOTIFY_INC_BRN:
+ case VID_NOTIFY_DEC_BRN:
+ case VID_NOTIFY_ZERO_BRN:
+ if (vo->vo_levels == NULL)
+ goto out;
+ level = vo_get_brightness(handle);
+ if (level < 0)
+ goto out;
+ break;
+ default:
+ printf("unknown notify event 0x%x from %s\n",
+ notify, acpi_name(handle));
+ goto out;
+ }
+
+ new_level = level;
+ switch (notify) {
+ case VID_NOTIFY_CYCLE_BRN:
+ for (i = 2; i < vo->vo_numlevels; i++)
+ if (vo->vo_levels[i] == level) {
+ new_level = vo->vo_numlevels > i + 1 ?
+ vo->vo_levels[i + 1] : vo->vo_levels[2];
+ break;
+ }
+ break;
+ case VID_NOTIFY_INC_BRN:
+ case VID_NOTIFY_DEC_BRN:
+ for (i = 0; i < vo->vo_numlevels; i++) {
+ j = vo->vo_levels[i];
+ if (notify == VID_NOTIFY_INC_BRN) {
+ if (j > level &&
+ (j < new_level || level == new_level))
+ new_level = j;
+ } else {
+ if (j < level &&
+ (j > new_level || level == new_level))
+ new_level = j;
+ }
+ }
+ break;
+ case VID_NOTIFY_ZERO_BRN:
+ for (i = 0; i < vo->vo_numlevels; i++)
+ if (vo->vo_levels[i] == 0) {
+ new_level = 0;
+ break;
+ }
+ break;
+ }
+ if (new_level != level) {
+ vo_set_brightness(handle, new_level);
+ vo->vo_brightness = new_level;
+ }
+
+out:
+ ACPI_SERIAL_END(video_output);
+}
+
+/* ARGSUSED */
+static int
+acpi_video_vo_active_sysctl(SYSCTL_HANDLER_ARGS)
+{
+ struct acpi_video_output *vo;
+ int state, err;
+
+ vo = (struct acpi_video_output *)arg1;
+ if (vo->handle == NULL)
+ return (ENXIO);
+ ACPI_SERIAL_BEGIN(video_output);
+ state = (vo_get_device_status(vo->handle) & DCS_ACTIVE) ? 1 : 0;
+ err = sysctl_handle_int(oidp, &state, 0, req);
+ if (err != 0 || req->newptr == NULL)
+ goto out;
+ vo_set_device_state(vo->handle,
+ DSS_COMMIT | (state ? DSS_ACTIVE : DSS_INACTIVE));
+out:
+ ACPI_SERIAL_END(video_output);
+ return (err);
+}
+
+/* ARGSUSED */
+static int
+acpi_video_vo_bright_sysctl(SYSCTL_HANDLER_ARGS)
+{
+ struct acpi_video_output *vo;
+ int level, preset, err;
+
+ vo = (struct acpi_video_output *)arg1;
+ ACPI_SERIAL_BEGIN(video_output);
+ if (vo->handle == NULL) {
+ err = ENXIO;
+ goto out;
+ }
+ if (vo->vo_levels == NULL) {
+ err = ENODEV;
+ goto out;
+ }
+
+ preset = (power_profile_get_state() == POWER_PROFILE_ECONOMY) ?
+ vo->vo_economy : vo->vo_fullpower;
+ level = vo->vo_brightness;
+ if (level == -1)
+ level = preset;
+
+ err = sysctl_handle_int(oidp, &level, 0, req);
+ if (err != 0 || req->newptr == NULL)
+ goto out;
+ if (level < -1 || level > 100) {
+ err = EINVAL;
+ goto out;
+ }
+
+ if (level != -1 && (err = acpi_video_vo_check_level(vo, level)))
+ goto out;
+ vo->vo_brightness = level;
+ vo_set_brightness(vo->handle, (level == -1) ? preset : level);
+
+out:
+ ACPI_SERIAL_END(video_output);
+ return (err);
+}
+
+static int
+acpi_video_vo_presets_sysctl(SYSCTL_HANDLER_ARGS)
+{
+ struct acpi_video_output *vo;
+ int i, level, *preset, err;
+
+ vo = (struct acpi_video_output *)arg1;
+ ACPI_SERIAL_BEGIN(video_output);
+ if (vo->handle == NULL) {
+ err = ENXIO;
+ goto out;
+ }
+ if (vo->vo_levels == NULL) {
+ err = ENODEV;
+ goto out;
+ }
+ preset = (arg2 == POWER_PROFILE_ECONOMY) ?
+ &vo->vo_economy : &vo->vo_fullpower;
+ level = *preset;
+ err = sysctl_handle_int(oidp, &level, 0, req);
+ if (err != 0 || req->newptr == NULL)
+ goto out;
+ if (level < -1 || level > 100) {
+ err = EINVAL;
+ goto out;
+ }
+ if (level == -1) {
+ i = (arg2 == POWER_PROFILE_ECONOMY) ?
+ BCL_ECONOMY : BCL_FULLPOWER;
+ level = vo->vo_levels[i];
+ } else if ((err = acpi_video_vo_check_level(vo, level)) != 0)
+ goto out;
+
+ if (vo->vo_brightness == -1 && (power_profile_get_state() == arg2))
+ vo_set_brightness(vo->handle, level);
+ *preset = level;
+
+out:
+ ACPI_SERIAL_END(video_output);
+ return (err);
+}
+
+/* ARGSUSED */
+static int
+acpi_video_vo_levels_sysctl(SYSCTL_HANDLER_ARGS)
+{
+ struct acpi_video_output *vo;
+ int err;
+
+ vo = (struct acpi_video_output *)arg1;
+ ACPI_SERIAL_BEGIN(video_output);
+ if (vo->vo_levels == NULL) {
+ err = ENODEV;
+ goto out;
+ }
+ if (req->newptr != NULL) {
+ err = EPERM;
+ goto out;
+ }
+ err = sysctl_handle_opaque(oidp, vo->vo_levels,
+ vo->vo_numlevels * sizeof(*vo->vo_levels), req);
+
+out:
+ ACPI_SERIAL_END(video_output);
+ return (err);
+}
+
+static void
+vid_set_switch_policy(ACPI_HANDLE handle, UINT32 policy)
+{
+ ACPI_STATUS status;
+
+ status = acpi_SetInteger(handle, "_DOS", policy);
+ if (ACPI_FAILURE(status))
+ printf("can't evaluate %s._DOS - %s\n",
+ acpi_name(handle), AcpiFormatException(status));
+}
+
+struct enum_callback_arg {
+ void (*callback)(ACPI_HANDLE, UINT32, void *);
+ void *context;
+ ACPI_OBJECT *dod_pkg;
+ int count;
+};
+
+static ACPI_STATUS
+vid_enum_outputs_subr(ACPI_HANDLE handle, UINT32 level __unused,
+ void *context, void **retp __unused)
+{
+ ACPI_STATUS status;
+ UINT32 adr, val;
+ struct enum_callback_arg *argset;
+ size_t i;
+
+ ACPI_SERIAL_ASSERT(video);
+ argset = context;
+ status = acpi_GetInteger(handle, "_ADR", &adr);
+ if (ACPI_FAILURE(status))
+ return (AE_OK);
+
+ for (i = 0; i < argset->dod_pkg->Package.Count; i++) {
+ if (acpi_PkgInt32(argset->dod_pkg, i, &val) == 0 &&
+ (val & DOD_DEVID_MASK_FULL) == adr) {
+ argset->callback(handle, val, argset->context);
+ argset->count++;
+ }
+ }
+
+ return (AE_OK);
+}
+
+static int
+vid_enum_outputs(ACPI_HANDLE handle,
+ void (*callback)(ACPI_HANDLE, UINT32, void *), void *context)
+{
+ ACPI_STATUS status;
+ ACPI_BUFFER dod_buf;
+ ACPI_OBJECT *res;
+ struct enum_callback_arg argset;
+
+ ACPI_SERIAL_ASSERT(video);
+ dod_buf.Length = ACPI_ALLOCATE_BUFFER;
+ dod_buf.Pointer = NULL;
+ status = AcpiEvaluateObject(handle, "_DOD", NULL, &dod_buf);
+ if (ACPI_FAILURE(status)) {
+ if (status != AE_NOT_FOUND)
+ printf("can't evaluate %s._DOD - %s\n",
+ acpi_name(handle), AcpiFormatException(status));
+ argset.count = -1;
+ goto out;
+ }
+ res = (ACPI_OBJECT *)dod_buf.Pointer;
+ if (!ACPI_PKG_VALID(res, 1)) {
+ printf("evaluation of %s._DOD makes no sense\n",
+ acpi_name(handle));
+ argset.count = -1;
+ goto out;
+ }
+ if (callback == NULL) {
+ argset.count = res->Package.Count;
+ goto out;
+ }
+ argset.callback = callback;
+ argset.context = context;
+ argset.dod_pkg = res;
+ argset.count = 0;
+ status = AcpiWalkNamespace(ACPI_TYPE_DEVICE, handle, 1,
+ vid_enum_outputs_subr, NULL, &argset, NULL);
+ if (ACPI_FAILURE(status))
+ printf("failed walking down %s - %s\n",
+ acpi_name(handle), AcpiFormatException(status));
+out:
+ if (dod_buf.Pointer != NULL)
+ AcpiOsFree(dod_buf.Pointer);
+ return (argset.count);
+}
+
+static int
+vo_get_brightness_levels(ACPI_HANDLE handle, int **levelp)
+{
+ ACPI_STATUS status;
+ ACPI_BUFFER bcl_buf;
+ ACPI_OBJECT *res;
+ int num, i, n, *levels;
+
+ bcl_buf.Length = ACPI_ALLOCATE_BUFFER;
+ bcl_buf.Pointer = NULL;
+ status = AcpiEvaluateObject(handle, "_BCL", NULL, &bcl_buf);
+ if (ACPI_FAILURE(status)) {
+ if (status != AE_NOT_FOUND)
+ printf("can't evaluate %s._BCL - %s\n",
+ acpi_name(handle), AcpiFormatException(status));
+ goto out;
+ }
+ res = (ACPI_OBJECT *)bcl_buf.Pointer;
+ if (!ACPI_PKG_VALID(res, 2)) {
+ printf("evaluation of %s._BCL makes no sense\n",
+ acpi_name(handle));
+ goto out;
+ }
+ num = res->Package.Count;
+ if (num < 2 || levelp == NULL)
+ goto out;
+ levels = AcpiOsAllocate(num * sizeof(*levels));
+ if (levels == NULL)
+ goto out;
+ for (i = 0, n = 0; i < num; i++)
+ if (acpi_PkgInt32(res, i, &levels[n]) == 0)
+ n++;
+ if (n < 2) {
+ AcpiOsFree(levels);
+ goto out;
+ }
+ *levelp = levels;
+ return (n);
+
+out:
+ if (bcl_buf.Pointer != NULL)
+ AcpiOsFree(bcl_buf.Pointer);
+ return (0);
+}
+
+static int
+vo_get_brightness(ACPI_HANDLE handle)
+{
+ UINT32 level;
+ ACPI_STATUS status;
+
+ ACPI_SERIAL_ASSERT(video_output);
+ status = acpi_GetInteger(handle, "_BQC", &level);
+ if (ACPI_FAILURE(status)) {
+ printf("can't evaluate %s._BQC - %s\n", acpi_name(handle),
+ AcpiFormatException(status));
+ return (-1);
+ }
+ if (level > 100)
+ return (-1);
+
+ return (level);
+}
+
+static void
+vo_set_brightness(ACPI_HANDLE handle, int level)
+{
+ ACPI_STATUS status;
+
+ ACPI_SERIAL_ASSERT(video_output);
+ status = acpi_SetInteger(handle, "_BCM", level);
+ if (ACPI_FAILURE(status))
+ printf("can't evaluate %s._BCM - %s\n",
+ acpi_name(handle), AcpiFormatException(status));
+}
+
+static UINT32
+vo_get_device_status(ACPI_HANDLE handle)
+{
+ UINT32 dcs;
+ ACPI_STATUS status;
+
+ ACPI_SERIAL_ASSERT(video_output);
+ dcs = 0;
+ status = acpi_GetInteger(handle, "_DCS", &dcs);
+ if (ACPI_FAILURE(status))
+ printf("can't evaluate %s._DCS - %s\n",
+ acpi_name(handle), AcpiFormatException(status));
+
+ return (dcs);
+}
+
+static UINT32
+vo_get_graphics_state(ACPI_HANDLE handle)
+{
+ UINT32 dgs;
+ ACPI_STATUS status;
+
+ dgs = 0;
+ status = acpi_GetInteger(handle, "_DGS", &dgs);
+ if (ACPI_FAILURE(status))
+ printf("can't evaluate %s._DGS - %s\n",
+ acpi_name(handle), AcpiFormatException(status));
+
+ return (dgs);
+}
+
+static void
+vo_set_device_state(ACPI_HANDLE handle, UINT32 state)
+{
+ ACPI_STATUS status;
+
+ ACPI_SERIAL_ASSERT(video_output);
+ status = acpi_SetInteger(handle, "_DSS", state);
+ if (ACPI_FAILURE(status))
+ printf("can't evaluate %s._DSS - %s\n",
+ acpi_name(handle), AcpiFormatException(status));
+}
diff --git a/sys/dev/acpica/acpiio.h b/sys/dev/acpica/acpiio.h
new file mode 100644
index 0000000..c65028f
--- /dev/null
+++ b/sys/dev/acpica/acpiio.h
@@ -0,0 +1,125 @@
+/*-
+ * Copyright (c) 1999 Takanori Watanabe <takawata@jp.freebsd.org>
+ * Copyright (c) 1999 Mitsuru IWASAKI <iwasaki@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _ACPIIO_H_
+#define _ACPIIO_H_
+
+/*
+ * Core ACPI subsystem ioctls
+ */
+#define ACPIIO_SETSLPSTATE _IOW('P', 3, int) /* DEPRECATED */
+
+/* Request S1-5 sleep state. User is notified and then sleep proceeds. */
+#define ACPIIO_REQSLPSTATE _IOW('P', 4, int)
+
+/* Allow suspend to continue (0) or abort it (errno). */
+#define ACPIIO_ACKSLPSTATE _IOW('P', 5, int)
+
+struct acpi_battinfo {
+ int cap; /* percent */
+ int min; /* remaining time (in minutes) */
+ int state; /* battery state */
+ int rate; /* emptying rate */
+};
+
+#define ACPI_CMBAT_MAXSTRLEN 32
+struct acpi_bif {
+ uint32_t units; /* Units (mW or mA). */
+#define ACPI_BIF_UNITS_MW 0 /* Capacity in mWh, rate in mW. */
+#define ACPI_BIF_UNITS_MA 1 /* Capacity in mAh, rate in mA. */
+ uint32_t dcap; /* Design Capacity */
+ uint32_t lfcap; /* Last Full capacity */
+ uint32_t btech; /* Battery Technology */
+ uint32_t dvol; /* Design voltage (mV) */
+ uint32_t wcap; /* WARN capacity */
+ uint32_t lcap; /* Low capacity */
+ uint32_t gra1; /* Granularity 1 (Warn to Low) */
+ uint32_t gra2; /* Granularity 2 (Full to Warn) */
+ char model[ACPI_CMBAT_MAXSTRLEN]; /* model identifier */
+ char serial[ACPI_CMBAT_MAXSTRLEN]; /* Serial number */
+ char type[ACPI_CMBAT_MAXSTRLEN]; /* Type */
+ char oeminfo[ACPI_CMBAT_MAXSTRLEN]; /* OEM infomation */
+};
+
+struct acpi_bst {
+ uint32_t state; /* Battery State */
+ uint32_t rate; /* Present Rate */
+ uint32_t cap; /* Remaining Capacity */
+ uint32_t volt; /* Present Voltage */
+};
+
+/*
+ * Note that the following definitions represent status bits for internal
+ * driver state. The first three of them (charging, discharging and critical)
+ * conveninetly conform to ACPI specification of status returned by _BST
+ * method. Other definitions (not present, etc) are synthetic.
+ * Also note that according to the specification the charging and discharging
+ * status bits must not be set at the same time.
+ */
+#define ACPI_BATT_STAT_DISCHARG 0x0001
+#define ACPI_BATT_STAT_CHARGING 0x0002
+#define ACPI_BATT_STAT_CRITICAL 0x0004
+#define ACPI_BATT_STAT_INVALID \
+ (ACPI_BATT_STAT_DISCHARG | ACPI_BATT_STAT_CHARGING)
+#define ACPI_BATT_STAT_BST_MASK \
+ (ACPI_BATT_STAT_INVALID | ACPI_BATT_STAT_CRITICAL)
+#define ACPI_BATT_STAT_NOT_PRESENT ACPI_BATT_STAT_BST_MASK
+
+union acpi_battery_ioctl_arg {
+ int unit; /* Device unit or ACPI_BATTERY_ALL_UNITS. */
+
+ struct acpi_battinfo battinfo;
+
+ struct acpi_bif bif;
+ struct acpi_bst bst;
+};
+
+#define ACPI_BATTERY_ALL_UNITS (-1)
+#define ACPI_BATT_UNKNOWN 0xffffffff /* _BST or _BIF value unknown. */
+
+/* Common battery ioctls */
+#define ACPIIO_BATT_GET_UNITS _IOR('B', 0x01, int)
+#define ACPIIO_BATT_GET_BATTINFO _IOWR('B', 0x03, union acpi_battery_ioctl_arg)
+#define ACPIIO_BATT_GET_BIF _IOWR('B', 0x10, union acpi_battery_ioctl_arg)
+#define ACPIIO_BATT_GET_BST _IOWR('B', 0x11, union acpi_battery_ioctl_arg)
+
+/* Control Method battery ioctls (deprecated) */
+#define ACPIIO_CMBAT_GET_BIF ACPIIO_BATT_GET_BIF
+#define ACPIIO_CMBAT_GET_BST ACPIIO_BATT_GET_BST
+
+/* Get AC adapter status. */
+#define ACPIIO_ACAD_GET_STATUS _IOR('A', 1, int)
+
+#ifdef _KERNEL
+typedef int (*acpi_ioctl_fn)(u_long cmd, caddr_t addr, void *arg);
+extern int acpi_register_ioctl(u_long cmd, acpi_ioctl_fn fn, void *arg);
+extern void acpi_deregister_ioctl(u_long cmd, acpi_ioctl_fn fn);
+#endif
+
+#endif /* !_ACPIIO_H_ */
diff --git a/sys/dev/acpica/acpivar.h b/sys/dev/acpica/acpivar.h
new file mode 100644
index 0000000..81cff16
--- /dev/null
+++ b/sys/dev/acpica/acpivar.h
@@ -0,0 +1,493 @@
+/*-
+ * Copyright (c) 2000 Mitsuru IWASAKI <iwasaki@jp.freebsd.org>
+ * Copyright (c) 2000 Michael Smith <msmith@freebsd.org>
+ * Copyright (c) 2000 BSDi
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _ACPIVAR_H_
+#define _ACPIVAR_H_
+
+#ifdef _KERNEL
+
+#include "acpi_if.h"
+#include "bus_if.h"
+#include <sys/eventhandler.h>
+#include <sys/ktr.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/selinfo.h>
+#include <sys/sx.h>
+#include <sys/sysctl.h>
+
+#include <machine/bus.h>
+#include <machine/resource.h>
+
+struct apm_clone_data;
+struct acpi_softc {
+ device_t acpi_dev;
+ struct cdev *acpi_dev_t;
+
+ int acpi_enabled;
+ int acpi_sstate;
+ int acpi_sleep_disabled;
+ int acpi_resources_reserved;
+
+ struct sysctl_ctx_list acpi_sysctl_ctx;
+ struct sysctl_oid *acpi_sysctl_tree;
+ int acpi_power_button_sx;
+ int acpi_sleep_button_sx;
+ int acpi_lid_switch_sx;
+
+ int acpi_standby_sx;
+ int acpi_suspend_sx;
+
+ int acpi_sleep_delay;
+ int acpi_s4bios;
+ int acpi_do_disable;
+ int acpi_verbose;
+ int acpi_handle_reboot;
+
+ vm_offset_t acpi_wakeaddr;
+ vm_paddr_t acpi_wakephys;
+
+ int acpi_next_sstate; /* Next suspend Sx state. */
+ struct apm_clone_data *acpi_clone; /* Pseudo-dev for devd(8). */
+ STAILQ_HEAD(,apm_clone_data) apm_cdevs; /* All apm/apmctl/acpi cdevs. */
+ struct callout susp_force_to; /* Force suspend if no acks. */
+};
+
+struct acpi_device {
+ /* ACPI ivars */
+ ACPI_HANDLE ad_handle;
+ void *ad_private;
+ int ad_flags;
+
+ /* Resources */
+ struct resource_list ad_rl;
+};
+
+/* Track device (/dev/{apm,apmctl} and /dev/acpi) notification status. */
+struct apm_clone_data {
+ STAILQ_ENTRY(apm_clone_data) entries;
+ struct cdev *cdev;
+ int flags;
+#define ACPI_EVF_NONE 0 /* /dev/apm semantics */
+#define ACPI_EVF_DEVD 1 /* /dev/acpi is handled via devd(8) */
+#define ACPI_EVF_WRITE 2 /* Device instance is opened writable. */
+ int notify_status;
+#define APM_EV_NONE 0 /* Device not yet aware of pending sleep. */
+#define APM_EV_NOTIFIED 1 /* Device saw next sleep state. */
+#define APM_EV_ACKED 2 /* Device agreed sleep can occur. */
+ struct acpi_softc *acpi_sc;
+ struct selinfo sel_read;
+};
+
+#define ACPI_PRW_MAX_POWERRES 8
+
+struct acpi_prw_data {
+ ACPI_HANDLE gpe_handle;
+ int gpe_bit;
+ int lowest_wake;
+ ACPI_OBJECT power_res[ACPI_PRW_MAX_POWERRES];
+ int power_res_count;
+};
+
+/* Flags for each device defined in the AML namespace. */
+#define ACPI_FLAG_WAKE_ENABLED 0x1
+
+/* Macros for extracting parts of a PCI address from an _ADR value. */
+#define ACPI_ADR_PCI_SLOT(adr) (((adr) & 0xffff0000) >> 16)
+#define ACPI_ADR_PCI_FUNC(adr) ((adr) & 0xffff)
+
+/*
+ * Entry points to ACPI from above are global functions defined in this
+ * file, sysctls, and I/O on the control device. Entry points from below
+ * are interrupts (the SCI), notifies, task queue threads, and the thermal
+ * zone polling thread.
+ *
+ * ACPI tables and global shared data are protected by a global lock
+ * (acpi_mutex).
+ *
+ * Each ACPI device can have its own driver-specific mutex for protecting
+ * shared access to local data. The ACPI_LOCK macros handle mutexes.
+ *
+ * Drivers that need to serialize access to functions (e.g., to route
+ * interrupts, get/set control paths, etc.) should use the sx lock macros
+ * (ACPI_SERIAL).
+ *
+ * ACPI-CA handles its own locking and should not be called with locks held.
+ *
+ * The most complicated path is:
+ * GPE -> EC runs _Qxx -> _Qxx reads EC space -> GPE
+ */
+extern struct mtx acpi_mutex;
+#define ACPI_LOCK(sys) mtx_lock(&sys##_mutex)
+#define ACPI_UNLOCK(sys) mtx_unlock(&sys##_mutex)
+#define ACPI_LOCK_ASSERT(sys) mtx_assert(&sys##_mutex, MA_OWNED);
+#define ACPI_LOCK_DECL(sys, name) \
+ static struct mtx sys##_mutex; \
+ MTX_SYSINIT(sys##_mutex, &sys##_mutex, name, MTX_DEF)
+#define ACPI_SERIAL_BEGIN(sys) sx_xlock(&sys##_sxlock)
+#define ACPI_SERIAL_END(sys) sx_xunlock(&sys##_sxlock)
+#define ACPI_SERIAL_ASSERT(sys) sx_assert(&sys##_sxlock, SX_XLOCKED);
+#define ACPI_SERIAL_DECL(sys, name) \
+ static struct sx sys##_sxlock; \
+ SX_SYSINIT(sys##_sxlock, &sys##_sxlock, name)
+
+/*
+ * ACPI CA does not define layers for non-ACPI CA drivers.
+ * We define some here within the range provided.
+ */
+#define ACPI_AC_ADAPTER 0x00010000
+#define ACPI_BATTERY 0x00020000
+#define ACPI_BUS 0x00040000
+#define ACPI_BUTTON 0x00080000
+#define ACPI_EC 0x00100000
+#define ACPI_FAN 0x00200000
+#define ACPI_POWERRES 0x00400000
+#define ACPI_PROCESSOR 0x00800000
+#define ACPI_THERMAL 0x01000000
+#define ACPI_TIMER 0x02000000
+#define ACPI_OEM 0x04000000
+
+/*
+ * Constants for different interrupt models used with acpi_SetIntrModel().
+ */
+#define ACPI_INTR_PIC 0
+#define ACPI_INTR_APIC 1
+#define ACPI_INTR_SAPIC 2
+
+/*
+ * Various features and capabilities for the acpi_get_features() method.
+ * In particular, these are used for the ACPI 3.0 _PDC and _OSC methods.
+ * See the Intel document titled "Intel Processor Vendor-Specific ACPI",
+ * number 302223-005.
+ */
+#define ACPI_CAP_PERF_MSRS (1 << 0) /* Intel SpeedStep PERF_CTL MSRs */
+#define ACPI_CAP_C1_IO_HALT (1 << 1) /* Intel C1 "IO then halt" sequence */
+#define ACPI_CAP_THR_MSRS (1 << 2) /* Intel OnDemand throttling MSRs */
+#define ACPI_CAP_SMP_SAME (1 << 3) /* MP C1, Px, and Tx (all the same) */
+#define ACPI_CAP_SMP_SAME_C3 (1 << 4) /* MP C2 and C3 (all the same) */
+#define ACPI_CAP_SMP_DIFF_PX (1 << 5) /* MP Px (different, using _PSD) */
+#define ACPI_CAP_SMP_DIFF_CX (1 << 6) /* MP Cx (different, using _CSD) */
+#define ACPI_CAP_SMP_DIFF_TX (1 << 7) /* MP Tx (different, using _TSD) */
+#define ACPI_CAP_SMP_C1_NATIVE (1 << 8) /* MP C1 support other than halt */
+#define ACPI_CAP_SMP_C3_NATIVE (1 << 9) /* MP C2 and C3 support */
+#define ACPI_CAP_PX_HW_COORD (1 << 11) /* Intel P-state HW coordination */
+
+/*
+ * Quirk flags.
+ *
+ * ACPI_Q_BROKEN: Disables all ACPI support.
+ * ACPI_Q_TIMER: Disables support for the ACPI timer.
+ * ACPI_Q_MADT_IRQ0: Specifies that ISA IRQ 0 is wired up to pin 0 of the
+ * first APIC and that the MADT should force that by ignoring the PC-AT
+ * compatible flag and ignoring overrides that redirect IRQ 0 to pin 2.
+ */
+extern int acpi_quirks;
+#define ACPI_Q_OK 0
+#define ACPI_Q_BROKEN (1 << 0)
+#define ACPI_Q_TIMER (1 << 1)
+#define ACPI_Q_MADT_IRQ0 (1 << 2)
+
+/*
+ * Note that the low ivar values are reserved to provide
+ * interface compatibility with ISA drivers which can also
+ * attach to ACPI.
+ */
+#define ACPI_IVAR_HANDLE 0x100
+#define ACPI_IVAR_UNUSED 0x101 /* Unused/reserved. */
+#define ACPI_IVAR_PRIVATE 0x102
+#define ACPI_IVAR_FLAGS 0x103
+
+/*
+ * Accessor functions for our ivars. Default value for BUS_READ_IVAR is
+ * (type) 0. The <sys/bus.h> accessor functions don't check return values.
+ */
+#define __ACPI_BUS_ACCESSOR(varp, var, ivarp, ivar, type) \
+ \
+static __inline type varp ## _get_ ## var(device_t dev) \
+{ \
+ uintptr_t v = 0; \
+ BUS_READ_IVAR(device_get_parent(dev), dev, \
+ ivarp ## _IVAR_ ## ivar, &v); \
+ return ((type) v); \
+} \
+ \
+static __inline void varp ## _set_ ## var(device_t dev, type t) \
+{ \
+ uintptr_t v = (uintptr_t) t; \
+ BUS_WRITE_IVAR(device_get_parent(dev), dev, \
+ ivarp ## _IVAR_ ## ivar, v); \
+}
+
+__ACPI_BUS_ACCESSOR(acpi, handle, ACPI, HANDLE, ACPI_HANDLE)
+__ACPI_BUS_ACCESSOR(acpi, private, ACPI, PRIVATE, void *)
+__ACPI_BUS_ACCESSOR(acpi, flags, ACPI, FLAGS, int)
+
+void acpi_fake_objhandler(ACPI_HANDLE h, void *data);
+static __inline device_t
+acpi_get_device(ACPI_HANDLE handle)
+{
+ void *dev = NULL;
+ AcpiGetData(handle, acpi_fake_objhandler, &dev);
+ return ((device_t)dev);
+}
+
+static __inline ACPI_OBJECT_TYPE
+acpi_get_type(device_t dev)
+{
+ ACPI_HANDLE h;
+ ACPI_OBJECT_TYPE t;
+
+ if ((h = acpi_get_handle(dev)) == NULL)
+ return (ACPI_TYPE_NOT_FOUND);
+ if (ACPI_FAILURE(AcpiGetType(h, &t)))
+ return (ACPI_TYPE_NOT_FOUND);
+ return (t);
+}
+
+/* Find the difference between two PM tick counts. */
+static __inline uint32_t
+acpi_TimerDelta(uint32_t end, uint32_t start)
+{
+
+ if (end < start && (AcpiGbl_FADT.Flags & ACPI_FADT_32BIT_TIMER) == 0)
+ end |= 0x01000000;
+ return (end - start);
+}
+
+#ifdef ACPI_DEBUGGER
+void acpi_EnterDebugger(void);
+#endif
+
+#ifdef ACPI_DEBUG
+#include <sys/cons.h>
+#define STEP(x) do {printf x, printf("\n"); cngetc();} while (0)
+#else
+#define STEP(x)
+#endif
+
+#define ACPI_VPRINT(dev, acpi_sc, x...) do { \
+ if (acpi_get_verbose(acpi_sc)) \
+ device_printf(dev, x); \
+} while (0)
+
+/* Values for the device _STA (status) method. */
+#define ACPI_STA_PRESENT (1 << 0)
+#define ACPI_STA_ENABLED (1 << 1)
+#define ACPI_STA_SHOW_IN_UI (1 << 2)
+#define ACPI_STA_FUNCTIONAL (1 << 3)
+#define ACPI_STA_BATT_PRESENT (1 << 4)
+
+#define ACPI_DEVINFO_PRESENT(x, flags) \
+ (((x) & (flags)) == (flags))
+#define ACPI_DEVICE_PRESENT(x) \
+ ACPI_DEVINFO_PRESENT(x, ACPI_STA_PRESENT | ACPI_STA_FUNCTIONAL)
+#define ACPI_BATTERY_PRESENT(x) \
+ ACPI_DEVINFO_PRESENT(x, ACPI_STA_PRESENT | ACPI_STA_FUNCTIONAL | \
+ ACPI_STA_BATT_PRESENT)
+
+/* Callback function type for walking subtables within a table. */
+typedef void acpi_subtable_handler(ACPI_SUBTABLE_HEADER *, void *);
+
+BOOLEAN acpi_DeviceIsPresent(device_t dev);
+BOOLEAN acpi_BatteryIsPresent(device_t dev);
+ACPI_STATUS acpi_GetHandleInScope(ACPI_HANDLE parent, char *path,
+ ACPI_HANDLE *result);
+ACPI_BUFFER *acpi_AllocBuffer(int size);
+ACPI_STATUS acpi_ConvertBufferToInteger(ACPI_BUFFER *bufp,
+ UINT32 *number);
+ACPI_STATUS acpi_GetInteger(ACPI_HANDLE handle, char *path,
+ UINT32 *number);
+ACPI_STATUS acpi_SetInteger(ACPI_HANDLE handle, char *path,
+ UINT32 number);
+ACPI_STATUS acpi_ForeachPackageObject(ACPI_OBJECT *obj,
+ void (*func)(ACPI_OBJECT *comp, void *arg), void *arg);
+ACPI_STATUS acpi_FindIndexedResource(ACPI_BUFFER *buf, int index,
+ ACPI_RESOURCE **resp);
+ACPI_STATUS acpi_AppendBufferResource(ACPI_BUFFER *buf,
+ ACPI_RESOURCE *res);
+ACPI_STATUS acpi_OverrideInterruptLevel(UINT32 InterruptNumber);
+ACPI_STATUS acpi_SetIntrModel(int model);
+int acpi_ReqSleepState(struct acpi_softc *sc, int state);
+int acpi_AckSleepState(struct apm_clone_data *clone, int error);
+ACPI_STATUS acpi_SetSleepState(struct acpi_softc *sc, int state);
+int acpi_wake_set_enable(device_t dev, int enable);
+int acpi_parse_prw(ACPI_HANDLE h, struct acpi_prw_data *prw);
+ACPI_STATUS acpi_Startup(void);
+void acpi_UserNotify(const char *subsystem, ACPI_HANDLE h,
+ uint8_t notify);
+int acpi_bus_alloc_gas(device_t dev, int *type, int *rid,
+ ACPI_GENERIC_ADDRESS *gas, struct resource **res,
+ u_int flags);
+void acpi_walk_subtables(void *first, void *end,
+ acpi_subtable_handler *handler, void *arg);
+BOOLEAN acpi_MatchHid(ACPI_HANDLE h, const char *hid);
+
+struct acpi_parse_resource_set {
+ void (*set_init)(device_t dev, void *arg, void **context);
+ void (*set_done)(device_t dev, void *context);
+ void (*set_ioport)(device_t dev, void *context, uint64_t base,
+ uint64_t length);
+ void (*set_iorange)(device_t dev, void *context, uint64_t low,
+ uint64_t high, uint64_t length, uint64_t align);
+ void (*set_memory)(device_t dev, void *context, uint64_t base,
+ uint64_t length);
+ void (*set_memoryrange)(device_t dev, void *context, uint64_t low,
+ uint64_t high, uint64_t length, uint64_t align);
+ void (*set_irq)(device_t dev, void *context, uint8_t *irq,
+ int count, int trig, int pol);
+ void (*set_ext_irq)(device_t dev, void *context, uint32_t *irq,
+ int count, int trig, int pol);
+ void (*set_drq)(device_t dev, void *context, uint8_t *drq,
+ int count);
+ void (*set_start_dependent)(device_t dev, void *context,
+ int preference);
+ void (*set_end_dependent)(device_t dev, void *context);
+};
+
+extern struct acpi_parse_resource_set acpi_res_parse_set;
+
+int acpi_identify(void);
+void acpi_config_intr(device_t dev, ACPI_RESOURCE *res);
+ACPI_STATUS acpi_lookup_irq_resource(device_t dev, int rid,
+ struct resource *res, ACPI_RESOURCE *acpi_res);
+ACPI_STATUS acpi_parse_resources(device_t dev, ACPI_HANDLE handle,
+ struct acpi_parse_resource_set *set, void *arg);
+struct resource *acpi_alloc_sysres(device_t child, int type, int *rid,
+ u_long start, u_long end, u_long count, u_int flags);
+
+/* ACPI event handling */
+UINT32 acpi_event_power_button_sleep(void *context);
+UINT32 acpi_event_power_button_wake(void *context);
+UINT32 acpi_event_sleep_button_sleep(void *context);
+UINT32 acpi_event_sleep_button_wake(void *context);
+
+#define ACPI_EVENT_PRI_FIRST 0
+#define ACPI_EVENT_PRI_DEFAULT 10000
+#define ACPI_EVENT_PRI_LAST 20000
+
+typedef void (*acpi_event_handler_t)(void *, int);
+
+EVENTHANDLER_DECLARE(acpi_sleep_event, acpi_event_handler_t);
+EVENTHANDLER_DECLARE(acpi_wakeup_event, acpi_event_handler_t);
+
+/* Device power control. */
+ACPI_STATUS acpi_pwr_wake_enable(ACPI_HANDLE consumer, int enable);
+ACPI_STATUS acpi_pwr_switch_consumer(ACPI_HANDLE consumer, int state);
+int acpi_device_pwr_for_sleep(device_t bus, device_t dev,
+ int *dstate);
+
+/* APM emulation */
+void acpi_apm_init(struct acpi_softc *);
+
+/* Misc. */
+static __inline struct acpi_softc *
+acpi_device_get_parent_softc(device_t child)
+{
+ device_t parent;
+
+ parent = device_get_parent(child);
+ if (parent == NULL)
+ return (NULL);
+ return (device_get_softc(parent));
+}
+
+static __inline int
+acpi_get_verbose(struct acpi_softc *sc)
+{
+ if (sc)
+ return (sc->acpi_verbose);
+ return (0);
+}
+
+char *acpi_name(ACPI_HANDLE handle);
+int acpi_avoid(ACPI_HANDLE handle);
+int acpi_disabled(char *subsys);
+int acpi_machdep_init(device_t dev);
+void acpi_install_wakeup_handler(struct acpi_softc *sc);
+int acpi_sleep_machdep(struct acpi_softc *sc, int state);
+int acpi_wakeup_machdep(struct acpi_softc *sc, int state,
+ int sleep_result, int intr_enabled);
+int acpi_table_quirks(int *quirks);
+int acpi_machdep_quirks(int *quirks);
+
+/* Battery Abstraction. */
+struct acpi_battinfo;
+
+int acpi_battery_register(device_t dev);
+int acpi_battery_remove(device_t dev);
+int acpi_battery_get_units(void);
+int acpi_battery_get_info_expire(void);
+int acpi_battery_bst_valid(struct acpi_bst *bst);
+int acpi_battery_bif_valid(struct acpi_bif *bif);
+int acpi_battery_get_battinfo(device_t dev,
+ struct acpi_battinfo *info);
+
+/* Embedded controller. */
+void acpi_ec_ecdt_probe(device_t);
+
+/* AC adapter interface. */
+int acpi_acad_get_acline(int *);
+
+/* Package manipulation convenience functions. */
+#define ACPI_PKG_VALID(pkg, size) \
+ ((pkg) != NULL && (pkg)->Type == ACPI_TYPE_PACKAGE && \
+ (pkg)->Package.Count >= (size))
+int acpi_PkgInt(ACPI_OBJECT *res, int idx, UINT64 *dst);
+int acpi_PkgInt32(ACPI_OBJECT *res, int idx, uint32_t *dst);
+int acpi_PkgStr(ACPI_OBJECT *res, int idx, void *dst, size_t size);
+int acpi_PkgGas(device_t dev, ACPI_OBJECT *res, int idx, int *type,
+ int *rid, struct resource **dst, u_int flags);
+ACPI_HANDLE acpi_GetReference(ACPI_HANDLE scope, ACPI_OBJECT *obj);
+
+/*
+ * Base level for BUS_ADD_CHILD. Special devices are added at orders less
+ * than this, and normal devices at or above this level. This keeps the
+ * probe order sorted so that things like sysresource are available before
+ * their children need them.
+ */
+#define ACPI_DEV_BASE_ORDER 100
+
+/* Default maximum number of tasks to enqueue. */
+#ifndef ACPI_MAX_TASKS
+#define ACPI_MAX_TASKS 32
+#endif
+
+/* Default number of task queue threads to start. */
+#ifndef ACPI_MAX_THREADS
+#define ACPI_MAX_THREADS 3
+#endif
+
+/* Use the device logging level for ktr(4). */
+#define KTR_ACPI KTR_DEV
+
+SYSCTL_DECL(_debug_acpi);
+
+#endif /* _KERNEL */
+#endif /* !_ACPIVAR_H_ */
OpenPOWER on IntegriCloud