summaryrefslogtreecommitdiffstats
path: root/drivers/acpi
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/acpi')
-rw-r--r--drivers/acpi/Kconfig403
-rw-r--r--drivers/acpi/Makefile68
-rw-r--r--drivers/acpi/ac.c393
-rw-r--r--drivers/acpi/acpi_memhotplug.c575
-rw-r--r--drivers/acpi/asus_acpi.c1474
-rw-r--r--drivers/acpi/battery.c908
-rw-r--r--drivers/acpi/blacklist.c241
-rw-r--r--drivers/acpi/bus.c878
-rw-r--r--drivers/acpi/button.c549
-rw-r--r--drivers/acpi/cm_sbs.c106
-rw-r--r--drivers/acpi/container.c282
-rw-r--r--drivers/acpi/debug.c345
-rw-r--r--drivers/acpi/dispatcher/Makefile9
-rw-r--r--drivers/acpi/dispatcher/dsfield.c649
-rw-r--r--drivers/acpi/dispatcher/dsinit.c204
-rw-r--r--drivers/acpi/dispatcher/dsmethod.c623
-rw-r--r--drivers/acpi/dispatcher/dsmthdat.c717
-rw-r--r--drivers/acpi/dispatcher/dsobject.c812
-rw-r--r--drivers/acpi/dispatcher/dsopcode.c1429
-rw-r--r--drivers/acpi/dispatcher/dsutils.c868
-rw-r--r--drivers/acpi/dispatcher/dswexec.c745
-rw-r--r--drivers/acpi/dispatcher/dswload.c1202
-rw-r--r--drivers/acpi/dispatcher/dswscope.c213
-rw-r--r--drivers/acpi/dispatcher/dswstate.c752
-rw-r--r--drivers/acpi/dock.c1160
-rw-r--r--drivers/acpi/ec.c1106
-rw-r--r--drivers/acpi/event.c307
-rw-r--r--drivers/acpi/events/Makefile9
-rw-r--r--drivers/acpi/events/evevent.c312
-rw-r--r--drivers/acpi/events/evgpe.c725
-rw-r--r--drivers/acpi/events/evgpeblk.c1215
-rw-r--r--drivers/acpi/events/evmisc.c631
-rw-r--r--drivers/acpi/events/evregion.c1074
-rw-r--r--drivers/acpi/events/evrgnini.c688
-rw-r--r--drivers/acpi/events/evsci.c184
-rw-r--r--drivers/acpi/events/evxface.c820
-rw-r--r--drivers/acpi/events/evxfevnt.c719
-rw-r--r--drivers/acpi/events/evxfregn.c253
-rw-r--r--drivers/acpi/executer/Makefile10
-rw-r--r--drivers/acpi/executer/exconfig.c535
-rw-r--r--drivers/acpi/executer/exconvrt.c691
-rw-r--r--drivers/acpi/executer/excreate.c521
-rw-r--r--drivers/acpi/executer/exdump.c1059
-rw-r--r--drivers/acpi/executer/exfield.c339
-rw-r--r--drivers/acpi/executer/exfldio.c957
-rw-r--r--drivers/acpi/executer/exmisc.c725
-rw-r--r--drivers/acpi/executer/exmutex.c473
-rw-r--r--drivers/acpi/executer/exnames.c435
-rw-r--r--drivers/acpi/executer/exoparg1.c1049
-rw-r--r--drivers/acpi/executer/exoparg2.c604
-rw-r--r--drivers/acpi/executer/exoparg3.c272
-rw-r--r--drivers/acpi/executer/exoparg6.c340
-rw-r--r--drivers/acpi/executer/exprep.c589
-rw-r--r--drivers/acpi/executer/exregion.c498
-rw-r--r--drivers/acpi/executer/exresnte.c277
-rw-r--r--drivers/acpi/executer/exresolv.c550
-rw-r--r--drivers/acpi/executer/exresop.c700
-rw-r--r--drivers/acpi/executer/exstore.c715
-rw-r--r--drivers/acpi/executer/exstoren.c303
-rw-r--r--drivers/acpi/executer/exstorob.c208
-rw-r--r--drivers/acpi/executer/exsystem.c302
-rw-r--r--drivers/acpi/executer/exutils.c420
-rw-r--r--drivers/acpi/fan.c373
-rw-r--r--drivers/acpi/glue.c302
-rw-r--r--drivers/acpi/hardware/Makefile9
-rw-r--r--drivers/acpi/hardware/hwacpi.c184
-rw-r--r--drivers/acpi/hardware/hwgpe.c477
-rw-r--r--drivers/acpi/hardware/hwregs.c857
-rw-r--r--drivers/acpi/hardware/hwsleep.c643
-rw-r--r--drivers/acpi/hardware/hwtimer.c187
-rw-r--r--drivers/acpi/namespace/Makefile12
-rw-r--r--drivers/acpi/namespace/nsaccess.c674
-rw-r--r--drivers/acpi/namespace/nsalloc.c496
-rw-r--r--drivers/acpi/namespace/nsdump.c708
-rw-r--r--drivers/acpi/namespace/nsdumpdv.c140
-rw-r--r--drivers/acpi/namespace/nseval.c319
-rw-r--r--drivers/acpi/namespace/nsinit.c592
-rw-r--r--drivers/acpi/namespace/nsload.c314
-rw-r--r--drivers/acpi/namespace/nsnames.c264
-rw-r--r--drivers/acpi/namespace/nsobject.c440
-rw-r--r--drivers/acpi/namespace/nsparse.c203
-rw-r--r--drivers/acpi/namespace/nspredef.c900
-rw-r--r--drivers/acpi/namespace/nssearch.c414
-rw-r--r--drivers/acpi/namespace/nsutils.c996
-rw-r--r--drivers/acpi/namespace/nswalk.c295
-rw-r--r--drivers/acpi/namespace/nsxfeval.c811
-rw-r--r--drivers/acpi/namespace/nsxfname.c359
-rw-r--r--drivers/acpi/namespace/nsxfobj.c286
-rw-r--r--drivers/acpi/numa.c286
-rw-r--r--drivers/acpi/osl.c1382
-rw-r--r--drivers/acpi/parser/Makefile8
-rw-r--r--drivers/acpi/parser/psargs.c751
-rw-r--r--drivers/acpi/parser/psloop.c1087
-rw-r--r--drivers/acpi/parser/psopcode.c809
-rw-r--r--drivers/acpi/parser/psparse.c688
-rw-r--r--drivers/acpi/parser/psscope.c264
-rw-r--r--drivers/acpi/parser/pstree.c311
-rw-r--r--drivers/acpi/parser/psutils.c243
-rw-r--r--drivers/acpi/parser/pswalk.c109
-rw-r--r--drivers/acpi/parser/psxface.c351
-rw-r--r--drivers/acpi/pci_bind.c373
-rw-r--r--drivers/acpi/pci_irq.c621
-rw-r--r--drivers/acpi/pci_link.c969
-rw-r--r--drivers/acpi/pci_root.c388
-rw-r--r--drivers/acpi/pci_slot.c365
-rw-r--r--drivers/acpi/power.c795
-rw-r--r--drivers/acpi/processor_core.c1190
-rw-r--r--drivers/acpi/processor_idle.c1921
-rw-r--r--drivers/acpi/processor_perflib.c814
-rw-r--r--drivers/acpi/processor_thermal.c517
-rw-r--r--drivers/acpi/processor_throttling.c1277
-rw-r--r--drivers/acpi/reboot.c50
-rw-r--r--drivers/acpi/resources/Makefile10
-rw-r--r--drivers/acpi/resources/rsaddr.c380
-rw-r--r--drivers/acpi/resources/rscalc.c617
-rw-r--r--drivers/acpi/resources/rscreate.c467
-rw-r--r--drivers/acpi/resources/rsdump.c770
-rw-r--r--drivers/acpi/resources/rsinfo.c205
-rw-r--r--drivers/acpi/resources/rsio.c289
-rw-r--r--drivers/acpi/resources/rsirq.c265
-rw-r--r--drivers/acpi/resources/rslist.c202
-rw-r--r--drivers/acpi/resources/rsmemory.c235
-rw-r--r--drivers/acpi/resources/rsmisc.c560
-rw-r--r--drivers/acpi/resources/rsutils.c726
-rw-r--r--drivers/acpi/resources/rsxface.c570
-rw-r--r--drivers/acpi/sbs.c1043
-rw-r--r--drivers/acpi/sbshc.c330
-rw-r--r--drivers/acpi/sbshc.h33
-rw-r--r--drivers/acpi/scan.c1571
-rw-r--r--drivers/acpi/sleep/Makefile5
-rw-r--r--drivers/acpi/sleep/main.c696
-rw-r--r--drivers/acpi/sleep/proc.c526
-rw-r--r--drivers/acpi/sleep/sleep.h7
-rw-r--r--drivers/acpi/sleep/wakeup.c173
-rw-r--r--drivers/acpi/system.c649
-rw-r--r--drivers/acpi/tables.c319
-rw-r--r--drivers/acpi/tables/Makefile7
-rw-r--r--drivers/acpi/tables/tbfadt.c474
-rw-r--r--drivers/acpi/tables/tbfind.c139
-rw-r--r--drivers/acpi/tables/tbinstal.c573
-rw-r--r--drivers/acpi/tables/tbutils.c556
-rw-r--r--drivers/acpi/tables/tbxface.c734
-rw-r--r--drivers/acpi/tables/tbxfroot.c273
-rw-r--r--drivers/acpi/thermal.c1892
-rw-r--r--drivers/acpi/toshiba_acpi.c863
-rw-r--r--drivers/acpi/utilities/Makefile9
-rw-r--r--drivers/acpi/utilities/utalloc.c382
-rw-r--r--drivers/acpi/utilities/utcache.c314
-rw-r--r--drivers/acpi/utilities/utcopy.c969
-rw-r--r--drivers/acpi/utilities/utdebug.c654
-rw-r--r--drivers/acpi/utilities/utdelete.c676
-rw-r--r--drivers/acpi/utilities/uteval.c751
-rw-r--r--drivers/acpi/utilities/utglobal.c819
-rw-r--r--drivers/acpi/utilities/utinit.c151
-rw-r--r--drivers/acpi/utilities/utmath.c311
-rw-r--r--drivers/acpi/utilities/utmisc.c1090
-rw-r--r--drivers/acpi/utilities/utmutex.c341
-rw-r--r--drivers/acpi/utilities/utobject.c676
-rw-r--r--drivers/acpi/utilities/utresrc.c615
-rw-r--r--drivers/acpi/utilities/utstate.c346
-rw-r--r--drivers/acpi/utilities/utxface.c500
-rw-r--r--drivers/acpi/utils.c426
-rw-r--r--drivers/acpi/video.c2156
-rw-r--r--drivers/acpi/video_detect.c267
-rw-r--r--drivers/acpi/wmi.c747
165 files changed, 91403 insertions, 0 deletions
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
new file mode 100644
index 0000000..b0243fd
--- /dev/null
+++ b/drivers/acpi/Kconfig
@@ -0,0 +1,403 @@
+#
+# ACPI Configuration
+#
+
+menuconfig ACPI
+ bool "ACPI (Advanced Configuration and Power Interface) Support"
+ depends on !IA64_HP_SIM
+ depends on IA64 || X86
+ depends on PCI
+ depends on PM
+ select PNP
+ default y
+ ---help---
+ Advanced Configuration and Power Interface (ACPI) support for
+ Linux requires an ACPI compliant platform (hardware/firmware),
+ and assumes the presence of OS-directed configuration and power
+ management (OSPM) software. This option will enlarge your
+ kernel by about 70K.
+
+ Linux ACPI provides a robust functional replacement for several
+ legacy configuration and power management interfaces, including
+ the Plug-and-Play BIOS specification (PnP BIOS), the
+ MultiProcessor Specification (MPS), and the Advanced Power
+ Management (APM) specification. If both ACPI and APM support
+ are configured, whichever is loaded first shall be used.
+
+ The ACPI SourceForge project contains the latest source code,
+ documentation, tools, mailing list subscription, and other
+ information. This project is available at:
+ <http://sourceforge.net/projects/acpi>
+
+ Linux support for ACPI is based on Intel Corporation's ACPI
+ Component Architecture (ACPI CA). For more information see:
+ <http://developer.intel.com/technology/iapc/acpi>
+
+ ACPI is an open industry specification co-developed by Compaq,
+ Intel, Microsoft, Phoenix, and Toshiba. The specification is
+ available at:
+ <http://www.acpi.info>
+
+if ACPI
+
+config ACPI_SLEEP
+ bool
+ depends on SUSPEND || HIBERNATION
+ default y
+
+config ACPI_PROCFS
+ bool "Deprecated /proc/acpi files"
+ depends on PROC_FS
+ ---help---
+ For backwards compatibility, this option allows
+ deprecated /proc/acpi/ files to exist, even when
+ they have been replaced by functions in /sys.
+ The deprecated files (and their replacements) include:
+
+ /proc/acpi/sleep (/sys/power/state)
+ /proc/acpi/info (/sys/modules/acpi/parameters/acpica_version)
+ /proc/acpi/dsdt (/sys/firmware/acpi/tables/DSDT)
+ /proc/acpi/fadt (/sys/firmware/acpi/tables/FACP)
+ /proc/acpi/debug_layer (/sys/module/acpi/parameters/debug_layer)
+ /proc/acpi/debug_level (/sys/module/acpi/parameters/debug_level)
+
+ This option has no effect on /proc/acpi/ files
+ and functions which do not yet exist in /sys.
+
+ Say N to delete /proc/acpi/ files that have moved to /sys/
+config ACPI_PROCFS_POWER
+ bool "Deprecated power /proc/acpi directories"
+ depends on PROC_FS
+ default y
+ ---help---
+ For backwards compatibility, this option allows
+ deprecated power /proc/acpi/ directories to exist, even when
+ they have been replaced by functions in /sys.
+ The deprecated directories (and their replacements) include:
+ /proc/acpi/battery/* (/sys/class/power_supply/*)
+ /proc/acpi/ac_adapter/* (sys/class/power_supply/*)
+ This option has no effect on /proc/acpi/ directories
+ and functions, which do not yet exist in /sys
+
+ Say N to delete power /proc/acpi/ directories that have moved to /sys/
+
+config ACPI_SYSFS_POWER
+ bool "Future power /sys interface"
+ select POWER_SUPPLY
+ default y
+ ---help---
+ Say N to disable power /sys interface
+
+config ACPI_PROC_EVENT
+ bool "Deprecated /proc/acpi/event support"
+ depends on PROC_FS
+ default y
+ ---help---
+ A user-space daemon, acpi, typically read /proc/acpi/event
+ and handled all ACPI sub-system generated events.
+
+ These events are now delivered to user-space via
+ either the input layer, or as netlink events.
+
+ This build option enables the old code for legacy
+ user-space implementation. After some time, this will
+ be moved under CONFIG_ACPI_PROCFS, and then deleted.
+
+ Say Y here to retain the old behaviour. Say N if your
+ user-space is newer than kernel 2.6.23 (September 2007).
+
+config ACPI_AC
+ tristate "AC Adapter"
+ depends on X86
+ default y
+ help
+ This driver adds support for the AC Adapter object, which indicates
+ whether a system is on AC, or not. If you have a system that can
+ switch between A/C and battery, say Y.
+
+config ACPI_BATTERY
+ tristate "Battery"
+ depends on X86
+ default y
+ help
+ This driver adds support for battery information through
+ /proc/acpi/battery. If you have a mobile system with a battery,
+ say Y.
+
+config ACPI_BUTTON
+ tristate "Button"
+ depends on INPUT
+ default y
+ help
+ This driver handles events on the power, sleep and lid buttons.
+ A daemon reads /proc/acpi/event and perform user-defined actions
+ such as shutting down the system. This is necessary for
+ software controlled poweroff.
+
+config ACPI_VIDEO
+ tristate "Video"
+ depends on X86 && BACKLIGHT_CLASS_DEVICE && VIDEO_OUTPUT_CONTROL
+ depends on INPUT
+ select THERMAL
+ help
+ This driver implement the ACPI Extensions For Display Adapters
+ for integrated graphics devices on motherboard, as specified in
+ ACPI 2.0 Specification, Appendix B, allowing to perform some basic
+ control like defining the video POST device, retrieving EDID information
+ or to setup a video output, etc.
+ Note that this is an ref. implementation only. It may or may not work
+ for your integrated video device.
+
+config ACPI_FAN
+ tristate "Fan"
+ select THERMAL
+ default y
+ help
+ This driver adds support for ACPI fan devices, allowing user-mode
+ applications to perform basic fan control (on, off, status).
+
+config ACPI_DOCK
+ bool "Dock"
+ depends on EXPERIMENTAL
+ help
+ This driver adds support for ACPI controlled docking stations and removable
+ drive bays such as the IBM ultrabay or the Dell Module Bay.
+
+config ACPI_PROCESSOR
+ tristate "Processor"
+ select THERMAL
+ default y
+ help
+ This driver installs ACPI as the idle handler for Linux, and uses
+ ACPI C2 and C3 processor states to save power, on systems that
+ support it. It is required by several flavors of cpufreq
+ Performance-state drivers.
+
+config ACPI_HOTPLUG_CPU
+ bool
+ depends on ACPI_PROCESSOR && HOTPLUG_CPU
+ select ACPI_CONTAINER
+ default y
+
+config ACPI_THERMAL
+ tristate "Thermal Zone"
+ depends on ACPI_PROCESSOR
+ select THERMAL
+ default y
+ help
+ This driver adds support for ACPI thermal zones. Most mobile and
+ some desktop systems support ACPI thermal zones. It is HIGHLY
+ recommended that this option be enabled, as your processor(s)
+ may be damaged without it.
+
+config ACPI_NUMA
+ bool "NUMA support"
+ depends on NUMA
+ depends on (X86 || IA64)
+ default y if IA64_GENERIC || IA64_SGI_SN2
+
+config ACPI_WMI
+ tristate "WMI (EXPERIMENTAL)"
+ depends on X86
+ depends on EXPERIMENTAL
+ help
+ This driver adds support for the ACPI-WMI (Windows Management
+ Instrumentation) mapper device (PNP0C14) found on some systems.
+
+ ACPI-WMI is a proprietary extension to ACPI to expose parts of the
+ ACPI firmware to userspace - this is done through various vendor
+ defined methods and data blocks in a PNP0C14 device, which are then
+ made available for userspace to call.
+
+ The implementation of this in Linux currently only exposes this to
+ other kernel space drivers.
+
+ This driver is a required dependency to build the firmware specific
+ drivers needed on many machines, including Acer and HP laptops.
+
+ It is safe to enable this driver even if your DSDT doesn't define
+ any ACPI-WMI devices.
+
+config ACPI_ASUS
+ tristate "ASUS/Medion Laptop Extras"
+ depends on X86
+ select BACKLIGHT_CLASS_DEVICE
+ ---help---
+ This driver provides support for extra features of ACPI-compatible
+ ASUS laptops. As some of Medion laptops are made by ASUS, it may also
+ support some Medion laptops (such as 9675 for example). It makes all
+ the extra buttons generate standard ACPI events that go through
+ /proc/acpi/events, and (on some models) adds support for changing the
+ display brightness and output, switching the LCD backlight on and off,
+ and most importantly, allows you to blink those fancy LEDs intended
+ for reporting mail and wireless status.
+
+ Note: display switching code is currently considered EXPERIMENTAL,
+ toying with these values may even lock your machine.
+
+ All settings are changed via /proc/acpi/asus directory entries. Owner
+ and group for these entries can be set with asus_uid and asus_gid
+ parameters.
+
+ More information and a userspace daemon for handling the extra buttons
+ at <http://sourceforge.net/projects/acpi4asus/>.
+
+ If you have an ACPI-compatible ASUS laptop, say Y or M here. This
+ driver is still under development, so if your laptop is unsupported or
+ something works not quite as expected, please use the mailing list
+ available on the above page (acpi4asus-user@lists.sourceforge.net).
+
+ NOTE: This driver is deprecated and will probably be removed soon,
+ use asus-laptop instead.
+
+config ACPI_TOSHIBA
+ tristate "Toshiba Laptop Extras"
+ depends on X86 && INPUT
+ select INPUT_POLLDEV
+ select NET
+ select RFKILL
+ select BACKLIGHT_CLASS_DEVICE
+ ---help---
+ This driver adds support for access to certain system settings
+ on "legacy free" Toshiba laptops. These laptops can be recognized by
+ their lack of a BIOS setup menu and APM support.
+
+ On these machines, all system configuration is handled through the
+ ACPI. This driver is required for access to controls not covered
+ by the general ACPI drivers, such as LCD brightness, video output,
+ etc.
+
+ This driver differs from the non-ACPI Toshiba laptop driver (located
+ under "Processor type and features") in several aspects.
+ Configuration is accessed by reading and writing text files in the
+ /proc tree instead of by program interface to /dev. Furthermore, no
+ power management functions are exposed, as those are handled by the
+ general ACPI drivers.
+
+ More information about this driver is available at
+ <http://memebeam.org/toys/ToshibaAcpiDriver>.
+
+ If you have a legacy free Toshiba laptop (such as the Libretto L1
+ series), say Y.
+
+config ACPI_CUSTOM_DSDT_FILE
+ string "Custom DSDT Table file to include"
+ default ""
+ depends on !STANDALONE
+ help
+ This option supports a custom DSDT by linking it into the kernel.
+ See Documentation/acpi/dsdt-override.txt
+
+ Enter the full path name to the file which includes the AmlCode
+ declaration.
+
+ If unsure, don't enter a file name.
+
+config ACPI_CUSTOM_DSDT
+ bool
+ default ACPI_CUSTOM_DSDT_FILE != ""
+
+config ACPI_BLACKLIST_YEAR
+ int "Disable ACPI for systems before Jan 1st this year" if X86_32
+ default 0
+ help
+ enter a 4-digit year, eg. 2001 to disable ACPI by default
+ on platforms with DMI BIOS date before January 1st that year.
+ "acpi=force" can be used to override this mechanism.
+
+ Enter 0 to disable this mechanism and allow ACPI to
+ run by default no matter what the year. (default)
+
+config ACPI_DEBUG
+ bool "Debug Statements"
+ default n
+ help
+ The ACPI subsystem can produce debug output. Saying Y enables this
+ output and increases the kernel size by around 50K.
+
+ Use the acpi.debug_layer and acpi.debug_level kernel command-line
+ parameters documented in Documentation/acpi/debug.txt and
+ Documentation/kernel-parameters.txt to control the type and
+ amount of debug output.
+
+config ACPI_DEBUG_FUNC_TRACE
+ bool "Additionally enable ACPI function tracing"
+ default n
+ depends on ACPI_DEBUG
+ help
+ ACPI Debug Statements slow down ACPI processing. Function trace
+ is about half of the penalty and is rarely useful.
+
+config ACPI_PCI_SLOT
+ tristate "PCI slot detection driver"
+ default n
+ help
+ This driver will attempt to discover all PCI slots in your system,
+ and creates entries in /sys/bus/pci/slots/. This feature can
+ help you correlate PCI bus addresses with the physical geography
+ of your slots. If you are unsure, say N.
+
+config ACPI_SYSTEM
+ bool
+ default y
+ help
+ This driver will enable your system to shut down using ACPI, and
+ dump your ACPI DSDT table using /proc/acpi/dsdt.
+
+config X86_PM_TIMER
+ bool "Power Management Timer Support" if EMBEDDED
+ depends on X86
+ default y
+ help
+ The Power Management Timer is available on all ACPI-capable,
+ in most cases even if ACPI is unusable or blacklisted.
+
+ This timing source is not affected by power management features
+ like aggressive processor idling, throttling, frequency and/or
+ voltage scaling, unlike the commonly used Time Stamp Counter
+ (TSC) timing source.
+
+ You should nearly always say Y here because many modern
+ systems require this timer.
+
+config ACPI_CONTAINER
+ tristate "ACPI0004,PNP0A05 and PNP0A06 Container Driver (EXPERIMENTAL)"
+ depends on EXPERIMENTAL
+ default (ACPI_HOTPLUG_MEMORY || ACPI_HOTPLUG_CPU || ACPI_HOTPLUG_IO)
+ ---help---
+ This allows _physical_ insertion and removal of CPUs and memory.
+ This can be useful, for example, on NUMA machines that support
+ ACPI based physical hotplug of nodes, or non-NUMA machines that
+ support physical cpu/memory hot-plug.
+
+ If one selects "m", this driver can be loaded with
+ "modprobe acpi_container".
+
+config ACPI_HOTPLUG_MEMORY
+ tristate "Memory Hotplug"
+ depends on MEMORY_HOTPLUG
+ default n
+ help
+ This driver adds supports for ACPI Memory Hotplug. This driver
+ provides support for fielding notifications on ACPI memory
+ devices (PNP0C80) which represent memory ranges that may be
+ onlined or offlined during runtime.
+
+ Enabling this driver assumes that your platform hardware
+ and firmware have support for hot-plugging physical memory. If
+ your system does not support physically adding or ripping out
+ memory DIMMs at some platform defined granularity (individually
+ or as a bank) at runtime, then you need not enable this driver.
+
+ If one selects "m," this driver can be loaded using the following
+ command:
+ $>modprobe acpi_memhotplug
+
+config ACPI_SBS
+ tristate "Smart Battery System"
+ depends on X86
+ help
+ This driver adds support for the Smart Battery System, another
+ type of access to battery information, found on some laptops.
+
+endif # ACPI
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
new file mode 100644
index 0000000..3c0c933
--- /dev/null
+++ b/drivers/acpi/Makefile
@@ -0,0 +1,68 @@
+#
+# Makefile for the Linux ACPI interpreter
+#
+
+export ACPI_CFLAGS
+
+ACPI_CFLAGS := -Os
+
+ifdef CONFIG_ACPI_DEBUG
+ ACPI_CFLAGS += -DACPI_DEBUG_OUTPUT
+endif
+
+EXTRA_CFLAGS += $(ACPI_CFLAGS)
+
+#
+# ACPI Boot-Time Table Parsing
+#
+obj-y += tables.o
+obj-$(CONFIG_X86) += blacklist.o
+
+#
+# ACPI Core Subsystem (Interpreter)
+#
+obj-y += osl.o utils.o reboot.o\
+ dispatcher/ events/ executer/ hardware/ \
+ namespace/ parser/ resources/ tables/ \
+ utilities/
+
+#
+# ACPI Bus and Device Drivers
+#
+processor-objs += processor_core.o processor_throttling.o \
+ processor_idle.o processor_thermal.o
+ifdef CONFIG_CPU_FREQ
+processor-objs += processor_perflib.o
+endif
+
+obj-y += sleep/
+obj-y += bus.o glue.o
+obj-y += scan.o
+# Keep EC driver first. Initialization of others depend on it.
+obj-y += ec.o
+obj-$(CONFIG_ACPI_AC) += ac.o
+obj-$(CONFIG_ACPI_BATTERY) += battery.o
+obj-$(CONFIG_ACPI_BUTTON) += button.o
+obj-$(CONFIG_ACPI_FAN) += fan.o
+obj-$(CONFIG_ACPI_DOCK) += dock.o
+obj-$(CONFIG_ACPI_VIDEO) += video.o
+ifdef CONFIG_ACPI_VIDEO
+obj-y += video_detect.o
+endif
+
+obj-y += pci_root.o pci_link.o pci_irq.o pci_bind.o
+obj-$(CONFIG_ACPI_PCI_SLOT) += pci_slot.o
+obj-$(CONFIG_ACPI_PROCESSOR) += processor.o
+obj-$(CONFIG_ACPI_CONTAINER) += container.o
+obj-$(CONFIG_ACPI_THERMAL) += thermal.o
+obj-y += power.o
+obj-$(CONFIG_ACPI_SYSTEM) += system.o event.o
+obj-$(CONFIG_ACPI_DEBUG) += debug.o
+obj-$(CONFIG_ACPI_NUMA) += numa.o
+obj-$(CONFIG_ACPI_WMI) += wmi.o
+obj-$(CONFIG_ACPI_ASUS) += asus_acpi.o
+obj-$(CONFIG_ACPI_TOSHIBA) += toshiba_acpi.o
+obj-$(CONFIG_ACPI_HOTPLUG_MEMORY) += acpi_memhotplug.o
+obj-$(CONFIG_ACPI_PROCFS_POWER) += cm_sbs.o
+obj-$(CONFIG_ACPI_SBS) += sbshc.o
+obj-$(CONFIG_ACPI_SBS) += sbs.o
diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c
new file mode 100644
index 0000000..9b917da
--- /dev/null
+++ b/drivers/acpi/ac.c
@@ -0,0 +1,393 @@
+/*
+ * acpi_ac.c - ACPI AC Adapter Driver ($Revision: 27 $)
+ *
+ * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
+ * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#ifdef CONFIG_ACPI_PROCFS_POWER
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#endif
+#ifdef CONFIG_ACPI_SYSFS_POWER
+#include <linux/power_supply.h>
+#endif
+#include <acpi/acpi_bus.h>
+#include <acpi/acpi_drivers.h>
+
+#define ACPI_AC_CLASS "ac_adapter"
+#define ACPI_AC_DEVICE_NAME "AC Adapter"
+#define ACPI_AC_FILE_STATE "state"
+#define ACPI_AC_NOTIFY_STATUS 0x80
+#define ACPI_AC_STATUS_OFFLINE 0x00
+#define ACPI_AC_STATUS_ONLINE 0x01
+#define ACPI_AC_STATUS_UNKNOWN 0xFF
+
+#define _COMPONENT ACPI_AC_COMPONENT
+ACPI_MODULE_NAME("ac");
+
+MODULE_AUTHOR("Paul Diefenbaugh");
+MODULE_DESCRIPTION("ACPI AC Adapter Driver");
+MODULE_LICENSE("GPL");
+
+#ifdef CONFIG_ACPI_PROCFS_POWER
+extern struct proc_dir_entry *acpi_lock_ac_dir(void);
+extern void *acpi_unlock_ac_dir(struct proc_dir_entry *acpi_ac_dir);
+static int acpi_ac_open_fs(struct inode *inode, struct file *file);
+#endif
+
+static int acpi_ac_add(struct acpi_device *device);
+static int acpi_ac_remove(struct acpi_device *device, int type);
+static int acpi_ac_resume(struct acpi_device *device);
+
+static const struct acpi_device_id ac_device_ids[] = {
+ {"ACPI0003", 0},
+ {"", 0},
+};
+MODULE_DEVICE_TABLE(acpi, ac_device_ids);
+
+static struct acpi_driver acpi_ac_driver = {
+ .name = "ac",
+ .class = ACPI_AC_CLASS,
+ .ids = ac_device_ids,
+ .ops = {
+ .add = acpi_ac_add,
+ .remove = acpi_ac_remove,
+ .resume = acpi_ac_resume,
+ },
+};
+
+struct acpi_ac {
+#ifdef CONFIG_ACPI_SYSFS_POWER
+ struct power_supply charger;
+#endif
+ struct acpi_device * device;
+ unsigned long long state;
+};
+
+#define to_acpi_ac(x) container_of(x, struct acpi_ac, charger);
+
+#ifdef CONFIG_ACPI_PROCFS_POWER
+static const struct file_operations acpi_ac_fops = {
+ .owner = THIS_MODULE,
+ .open = acpi_ac_open_fs,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+#endif
+#ifdef CONFIG_ACPI_SYSFS_POWER
+static int get_ac_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct acpi_ac *ac = to_acpi_ac(psy);
+ switch (psp) {
+ case POWER_SUPPLY_PROP_ONLINE:
+ val->intval = ac->state;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static enum power_supply_property ac_props[] = {
+ POWER_SUPPLY_PROP_ONLINE,
+};
+#endif
+/* --------------------------------------------------------------------------
+ AC Adapter Management
+ -------------------------------------------------------------------------- */
+
+static int acpi_ac_get_state(struct acpi_ac *ac)
+{
+ acpi_status status = AE_OK;
+
+
+ if (!ac)
+ return -EINVAL;
+
+ status = acpi_evaluate_integer(ac->device->handle, "_PSR", NULL, &ac->state);
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status, "Error reading AC Adapter state"));
+ ac->state = ACPI_AC_STATUS_UNKNOWN;
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_ACPI_PROCFS_POWER
+/* --------------------------------------------------------------------------
+ FS Interface (/proc)
+ -------------------------------------------------------------------------- */
+
+static struct proc_dir_entry *acpi_ac_dir;
+
+static int acpi_ac_seq_show(struct seq_file *seq, void *offset)
+{
+ struct acpi_ac *ac = seq->private;
+
+
+ if (!ac)
+ return 0;
+
+ if (acpi_ac_get_state(ac)) {
+ seq_puts(seq, "ERROR: Unable to read AC Adapter state\n");
+ return 0;
+ }
+
+ seq_puts(seq, "state: ");
+ switch (ac->state) {
+ case ACPI_AC_STATUS_OFFLINE:
+ seq_puts(seq, "off-line\n");
+ break;
+ case ACPI_AC_STATUS_ONLINE:
+ seq_puts(seq, "on-line\n");
+ break;
+ default:
+ seq_puts(seq, "unknown\n");
+ break;
+ }
+
+ return 0;
+}
+
+static int acpi_ac_open_fs(struct inode *inode, struct file *file)
+{
+ return single_open(file, acpi_ac_seq_show, PDE(inode)->data);
+}
+
+static int acpi_ac_add_fs(struct acpi_device *device)
+{
+ struct proc_dir_entry *entry = NULL;
+
+
+ if (!acpi_device_dir(device)) {
+ acpi_device_dir(device) = proc_mkdir(acpi_device_bid(device),
+ acpi_ac_dir);
+ if (!acpi_device_dir(device))
+ return -ENODEV;
+ acpi_device_dir(device)->owner = THIS_MODULE;
+ }
+
+ /* 'state' [R] */
+ entry = proc_create_data(ACPI_AC_FILE_STATE,
+ S_IRUGO, acpi_device_dir(device),
+ &acpi_ac_fops, acpi_driver_data(device));
+ if (!entry)
+ return -ENODEV;
+ return 0;
+}
+
+static int acpi_ac_remove_fs(struct acpi_device *device)
+{
+
+ if (acpi_device_dir(device)) {
+ remove_proc_entry(ACPI_AC_FILE_STATE, acpi_device_dir(device));
+
+ remove_proc_entry(acpi_device_bid(device), acpi_ac_dir);
+ acpi_device_dir(device) = NULL;
+ }
+
+ return 0;
+}
+#endif
+
+/* --------------------------------------------------------------------------
+ Driver Model
+ -------------------------------------------------------------------------- */
+
+static void acpi_ac_notify(acpi_handle handle, u32 event, void *data)
+{
+ struct acpi_ac *ac = data;
+ struct acpi_device *device = NULL;
+
+
+ if (!ac)
+ return;
+
+ device = ac->device;
+ switch (event) {
+ default:
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Unsupported event [0x%x]\n", event));
+ case ACPI_AC_NOTIFY_STATUS:
+ case ACPI_NOTIFY_BUS_CHECK:
+ case ACPI_NOTIFY_DEVICE_CHECK:
+ acpi_ac_get_state(ac);
+ acpi_bus_generate_proc_event(device, event, (u32) ac->state);
+ acpi_bus_generate_netlink_event(device->pnp.device_class,
+ dev_name(&device->dev), event,
+ (u32) ac->state);
+#ifdef CONFIG_ACPI_SYSFS_POWER
+ kobject_uevent(&ac->charger.dev->kobj, KOBJ_CHANGE);
+#endif
+ }
+
+ return;
+}
+
+static int acpi_ac_add(struct acpi_device *device)
+{
+ int result = 0;
+ acpi_status status = AE_OK;
+ struct acpi_ac *ac = NULL;
+
+
+ if (!device)
+ return -EINVAL;
+
+ ac = kzalloc(sizeof(struct acpi_ac), GFP_KERNEL);
+ if (!ac)
+ return -ENOMEM;
+
+ ac->device = device;
+ strcpy(acpi_device_name(device), ACPI_AC_DEVICE_NAME);
+ strcpy(acpi_device_class(device), ACPI_AC_CLASS);
+ device->driver_data = ac;
+
+ result = acpi_ac_get_state(ac);
+ if (result)
+ goto end;
+
+#ifdef CONFIG_ACPI_PROCFS_POWER
+ result = acpi_ac_add_fs(device);
+#endif
+ if (result)
+ goto end;
+#ifdef CONFIG_ACPI_SYSFS_POWER
+ ac->charger.name = acpi_device_bid(device);
+ ac->charger.type = POWER_SUPPLY_TYPE_MAINS;
+ ac->charger.properties = ac_props;
+ ac->charger.num_properties = ARRAY_SIZE(ac_props);
+ ac->charger.get_property = get_ac_property;
+ power_supply_register(&ac->device->dev, &ac->charger);
+#endif
+ status = acpi_install_notify_handler(device->handle,
+ ACPI_ALL_NOTIFY, acpi_ac_notify,
+ ac);
+ if (ACPI_FAILURE(status)) {
+ result = -ENODEV;
+ goto end;
+ }
+
+ printk(KERN_INFO PREFIX "%s [%s] (%s)\n",
+ acpi_device_name(device), acpi_device_bid(device),
+ ac->state ? "on-line" : "off-line");
+
+ end:
+ if (result) {
+#ifdef CONFIG_ACPI_PROCFS_POWER
+ acpi_ac_remove_fs(device);
+#endif
+ kfree(ac);
+ }
+
+ return result;
+}
+
+static int acpi_ac_resume(struct acpi_device *device)
+{
+ struct acpi_ac *ac;
+ unsigned old_state;
+ if (!device || !acpi_driver_data(device))
+ return -EINVAL;
+ ac = acpi_driver_data(device);
+ old_state = ac->state;
+ if (acpi_ac_get_state(ac))
+ return 0;
+#ifdef CONFIG_ACPI_SYSFS_POWER
+ if (old_state != ac->state)
+ kobject_uevent(&ac->charger.dev->kobj, KOBJ_CHANGE);
+#endif
+ return 0;
+}
+
+static int acpi_ac_remove(struct acpi_device *device, int type)
+{
+ acpi_status status = AE_OK;
+ struct acpi_ac *ac = NULL;
+
+
+ if (!device || !acpi_driver_data(device))
+ return -EINVAL;
+
+ ac = acpi_driver_data(device);
+
+ status = acpi_remove_notify_handler(device->handle,
+ ACPI_ALL_NOTIFY, acpi_ac_notify);
+#ifdef CONFIG_ACPI_SYSFS_POWER
+ if (ac->charger.dev)
+ power_supply_unregister(&ac->charger);
+#endif
+#ifdef CONFIG_ACPI_PROCFS_POWER
+ acpi_ac_remove_fs(device);
+#endif
+
+ kfree(ac);
+
+ return 0;
+}
+
+static int __init acpi_ac_init(void)
+{
+ int result;
+
+ if (acpi_disabled)
+ return -ENODEV;
+
+#ifdef CONFIG_ACPI_PROCFS_POWER
+ acpi_ac_dir = acpi_lock_ac_dir();
+ if (!acpi_ac_dir)
+ return -ENODEV;
+#endif
+
+ result = acpi_bus_register_driver(&acpi_ac_driver);
+ if (result < 0) {
+#ifdef CONFIG_ACPI_PROCFS_POWER
+ acpi_unlock_ac_dir(acpi_ac_dir);
+#endif
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static void __exit acpi_ac_exit(void)
+{
+
+ acpi_bus_unregister_driver(&acpi_ac_driver);
+
+#ifdef CONFIG_ACPI_PROCFS_POWER
+ acpi_unlock_ac_dir(acpi_ac_dir);
+#endif
+
+ return;
+}
+
+module_init(acpi_ac_init);
+module_exit(acpi_ac_exit);
diff --git a/drivers/acpi/acpi_memhotplug.c b/drivers/acpi/acpi_memhotplug.c
new file mode 100644
index 0000000..63a17b5
--- /dev/null
+++ b/drivers/acpi/acpi_memhotplug.c
@@ -0,0 +1,575 @@
+/*
+ * Copyright (C) 2004 Intel Corporation <naveen.b.s@intel.com>
+ *
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *
+ * ACPI based HotPlug driver that supports Memory Hotplug
+ * This driver fields notifications from firmare for memory add
+ * and remove operations and alerts the VM of the affected memory
+ * ranges.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/memory_hotplug.h>
+#include <acpi/acpi_drivers.h>
+
+#define ACPI_MEMORY_DEVICE_CLASS "memory"
+#define ACPI_MEMORY_DEVICE_HID "PNP0C80"
+#define ACPI_MEMORY_DEVICE_NAME "Hotplug Mem Device"
+
+#define _COMPONENT ACPI_MEMORY_DEVICE_COMPONENT
+
+ACPI_MODULE_NAME("acpi_memhotplug");
+MODULE_AUTHOR("Naveen B S <naveen.b.s@intel.com>");
+MODULE_DESCRIPTION("Hotplug Mem Driver");
+MODULE_LICENSE("GPL");
+
+/* Memory Device States */
+#define MEMORY_INVALID_STATE 0
+#define MEMORY_POWER_ON_STATE 1
+#define MEMORY_POWER_OFF_STATE 2
+
+static int acpi_memory_device_add(struct acpi_device *device);
+static int acpi_memory_device_remove(struct acpi_device *device, int type);
+static int acpi_memory_device_start(struct acpi_device *device);
+
+static const struct acpi_device_id memory_device_ids[] = {
+ {ACPI_MEMORY_DEVICE_HID, 0},
+ {"", 0},
+};
+MODULE_DEVICE_TABLE(acpi, memory_device_ids);
+
+static struct acpi_driver acpi_memory_device_driver = {
+ .name = "acpi_memhotplug",
+ .class = ACPI_MEMORY_DEVICE_CLASS,
+ .ids = memory_device_ids,
+ .ops = {
+ .add = acpi_memory_device_add,
+ .remove = acpi_memory_device_remove,
+ .start = acpi_memory_device_start,
+ },
+};
+
+struct acpi_memory_info {
+ struct list_head list;
+ u64 start_addr; /* Memory Range start physical addr */
+ u64 length; /* Memory Range length */
+ unsigned short caching; /* memory cache attribute */
+ unsigned short write_protect; /* memory read/write attribute */
+ unsigned int enabled:1;
+};
+
+struct acpi_memory_device {
+ struct acpi_device * device;
+ unsigned int state; /* State of the memory device */
+ struct list_head res_list;
+};
+
+static int acpi_hotmem_initialized;
+
+static acpi_status
+acpi_memory_get_resource(struct acpi_resource *resource, void *context)
+{
+ struct acpi_memory_device *mem_device = context;
+ struct acpi_resource_address64 address64;
+ struct acpi_memory_info *info, *new;
+ acpi_status status;
+
+ status = acpi_resource_to_address64(resource, &address64);
+ if (ACPI_FAILURE(status) ||
+ (address64.resource_type != ACPI_MEMORY_RANGE))
+ return AE_OK;
+
+ list_for_each_entry(info, &mem_device->res_list, list) {
+ /* Can we combine the resource range information? */
+ if ((info->caching == address64.info.mem.caching) &&
+ (info->write_protect == address64.info.mem.write_protect) &&
+ (info->start_addr + info->length == address64.minimum)) {
+ info->length += address64.address_length;
+ return AE_OK;
+ }
+ }
+
+ new = kzalloc(sizeof(struct acpi_memory_info), GFP_KERNEL);
+ if (!new)
+ return AE_ERROR;
+
+ INIT_LIST_HEAD(&new->list);
+ new->caching = address64.info.mem.caching;
+ new->write_protect = address64.info.mem.write_protect;
+ new->start_addr = address64.minimum;
+ new->length = address64.address_length;
+ list_add_tail(&new->list, &mem_device->res_list);
+
+ return AE_OK;
+}
+
+static int
+acpi_memory_get_device_resources(struct acpi_memory_device *mem_device)
+{
+ acpi_status status;
+ struct acpi_memory_info *info, *n;
+
+
+ if (!list_empty(&mem_device->res_list))
+ return 0;
+
+ status = acpi_walk_resources(mem_device->device->handle, METHOD_NAME__CRS,
+ acpi_memory_get_resource, mem_device);
+ if (ACPI_FAILURE(status)) {
+ list_for_each_entry_safe(info, n, &mem_device->res_list, list)
+ kfree(info);
+ INIT_LIST_HEAD(&mem_device->res_list);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+acpi_memory_get_device(acpi_handle handle,
+ struct acpi_memory_device **mem_device)
+{
+ acpi_status status;
+ acpi_handle phandle;
+ struct acpi_device *device = NULL;
+ struct acpi_device *pdevice = NULL;
+
+
+ if (!acpi_bus_get_device(handle, &device) && device)
+ goto end;
+
+ status = acpi_get_parent(handle, &phandle);
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status, "Cannot find acpi parent"));
+ return -EINVAL;
+ }
+
+ /* Get the parent device */
+ status = acpi_bus_get_device(phandle, &pdevice);
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status, "Cannot get acpi bus device"));
+ return -EINVAL;
+ }
+
+ /*
+ * Now add the notified device. This creates the acpi_device
+ * and invokes .add function
+ */
+ status = acpi_bus_add(&device, pdevice, handle, ACPI_BUS_TYPE_DEVICE);
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status, "Cannot add acpi bus"));
+ return -EINVAL;
+ }
+
+ end:
+ *mem_device = acpi_driver_data(device);
+ if (!(*mem_device)) {
+ printk(KERN_ERR "\n driver data not found");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int acpi_memory_check_device(struct acpi_memory_device *mem_device)
+{
+ unsigned long long current_status;
+
+ /* Get device present/absent information from the _STA */
+ if (ACPI_FAILURE(acpi_evaluate_integer(mem_device->device->handle, "_STA",
+ NULL, &current_status)))
+ return -ENODEV;
+ /*
+ * Check for device status. Device should be
+ * present/enabled/functioning.
+ */
+ if (!((current_status & ACPI_STA_DEVICE_PRESENT)
+ && (current_status & ACPI_STA_DEVICE_ENABLED)
+ && (current_status & ACPI_STA_DEVICE_FUNCTIONING)))
+ return -ENODEV;
+
+ return 0;
+}
+
+static int acpi_memory_enable_device(struct acpi_memory_device *mem_device)
+{
+ int result, num_enabled = 0;
+ struct acpi_memory_info *info;
+ int node;
+
+
+ /* Get the range from the _CRS */
+ result = acpi_memory_get_device_resources(mem_device);
+ if (result) {
+ printk(KERN_ERR PREFIX "get_device_resources failed\n");
+ mem_device->state = MEMORY_INVALID_STATE;
+ return result;
+ }
+
+ node = acpi_get_node(mem_device->device->handle);
+ /*
+ * Tell the VM there is more memory here...
+ * Note: Assume that this function returns zero on success
+ * We don't have memory-hot-add rollback function,now.
+ * (i.e. memory-hot-remove function)
+ */
+ list_for_each_entry(info, &mem_device->res_list, list) {
+ if (info->enabled) { /* just sanity check...*/
+ num_enabled++;
+ continue;
+ }
+
+ if (node < 0)
+ node = memory_add_physaddr_to_nid(info->start_addr);
+
+ result = add_memory(node, info->start_addr, info->length);
+ if (result)
+ continue;
+ info->enabled = 1;
+ num_enabled++;
+ }
+ if (!num_enabled) {
+ printk(KERN_ERR PREFIX "add_memory failed\n");
+ mem_device->state = MEMORY_INVALID_STATE;
+ return -EINVAL;
+ }
+
+ return result;
+}
+
+static int acpi_memory_powerdown_device(struct acpi_memory_device *mem_device)
+{
+ acpi_status status;
+ struct acpi_object_list arg_list;
+ union acpi_object arg;
+ unsigned long long current_status;
+
+
+ /* Issue the _EJ0 command */
+ arg_list.count = 1;
+ arg_list.pointer = &arg;
+ arg.type = ACPI_TYPE_INTEGER;
+ arg.integer.value = 1;
+ status = acpi_evaluate_object(mem_device->device->handle,
+ "_EJ0", &arg_list, NULL);
+ /* Return on _EJ0 failure */
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status, "_EJ0 failed"));
+ return -ENODEV;
+ }
+
+ /* Evalute _STA to check if the device is disabled */
+ status = acpi_evaluate_integer(mem_device->device->handle, "_STA",
+ NULL, &current_status);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+
+ /* Check for device status. Device should be disabled */
+ if (current_status & ACPI_STA_DEVICE_ENABLED)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int acpi_memory_disable_device(struct acpi_memory_device *mem_device)
+{
+ int result;
+ struct acpi_memory_info *info, *n;
+
+
+ /*
+ * Ask the VM to offline this memory range.
+ * Note: Assume that this function returns zero on success
+ */
+ list_for_each_entry_safe(info, n, &mem_device->res_list, list) {
+ if (info->enabled) {
+ result = remove_memory(info->start_addr, info->length);
+ if (result)
+ return result;
+ }
+ kfree(info);
+ }
+
+ /* Power-off and eject the device */
+ result = acpi_memory_powerdown_device(mem_device);
+ if (result) {
+ /* Set the status of the device to invalid */
+ mem_device->state = MEMORY_INVALID_STATE;
+ return result;
+ }
+
+ mem_device->state = MEMORY_POWER_OFF_STATE;
+ return result;
+}
+
+static void acpi_memory_device_notify(acpi_handle handle, u32 event, void *data)
+{
+ struct acpi_memory_device *mem_device;
+ struct acpi_device *device;
+
+
+ switch (event) {
+ case ACPI_NOTIFY_BUS_CHECK:
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "\nReceived BUS CHECK notification for device\n"));
+ /* Fall Through */
+ case ACPI_NOTIFY_DEVICE_CHECK:
+ if (event == ACPI_NOTIFY_DEVICE_CHECK)
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "\nReceived DEVICE CHECK notification for device\n"));
+ if (acpi_memory_get_device(handle, &mem_device)) {
+ printk(KERN_ERR PREFIX "Cannot find driver data\n");
+ return;
+ }
+
+ if (!acpi_memory_check_device(mem_device)) {
+ if (acpi_memory_enable_device(mem_device))
+ printk(KERN_ERR PREFIX
+ "Cannot enable memory device\n");
+ }
+ break;
+ case ACPI_NOTIFY_EJECT_REQUEST:
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "\nReceived EJECT REQUEST notification for device\n"));
+
+ if (acpi_bus_get_device(handle, &device)) {
+ printk(KERN_ERR PREFIX "Device doesn't exist\n");
+ break;
+ }
+ mem_device = acpi_driver_data(device);
+ if (!mem_device) {
+ printk(KERN_ERR PREFIX "Driver Data is NULL\n");
+ break;
+ }
+
+ /*
+ * Currently disabling memory device from kernel mode
+ * TBD: Can also be disabled from user mode scripts
+ * TBD: Can also be disabled by Callback registration
+ * with generic sysfs driver
+ */
+ if (acpi_memory_disable_device(mem_device))
+ printk(KERN_ERR PREFIX
+ "Disable memory device\n");
+ /*
+ * TBD: Invoke acpi_bus_remove to cleanup data structures
+ */
+ break;
+ default:
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Unsupported event [0x%x]\n", event));
+ break;
+ }
+
+ return;
+}
+
+static int acpi_memory_device_add(struct acpi_device *device)
+{
+ int result;
+ struct acpi_memory_device *mem_device = NULL;
+
+
+ if (!device)
+ return -EINVAL;
+
+ mem_device = kzalloc(sizeof(struct acpi_memory_device), GFP_KERNEL);
+ if (!mem_device)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&mem_device->res_list);
+ mem_device->device = device;
+ sprintf(acpi_device_name(device), "%s", ACPI_MEMORY_DEVICE_NAME);
+ sprintf(acpi_device_class(device), "%s", ACPI_MEMORY_DEVICE_CLASS);
+ device->driver_data = mem_device;
+
+ /* Get the range from the _CRS */
+ result = acpi_memory_get_device_resources(mem_device);
+ if (result) {
+ kfree(mem_device);
+ return result;
+ }
+
+ /* Set the device state */
+ mem_device->state = MEMORY_POWER_ON_STATE;
+
+ printk(KERN_DEBUG "%s \n", acpi_device_name(device));
+
+ return result;
+}
+
+static int acpi_memory_device_remove(struct acpi_device *device, int type)
+{
+ struct acpi_memory_device *mem_device = NULL;
+
+
+ if (!device || !acpi_driver_data(device))
+ return -EINVAL;
+
+ mem_device = acpi_driver_data(device);
+ kfree(mem_device);
+
+ return 0;
+}
+
+static int acpi_memory_device_start (struct acpi_device *device)
+{
+ struct acpi_memory_device *mem_device;
+ int result = 0;
+
+ /*
+ * Early boot code has recognized memory area by EFI/E820.
+ * If DSDT shows these memory devices on boot, hotplug is not necessary
+ * for them. So, it just returns until completion of this driver's
+ * start up.
+ */
+ if (!acpi_hotmem_initialized)
+ return 0;
+
+ mem_device = acpi_driver_data(device);
+
+ if (!acpi_memory_check_device(mem_device)) {
+ /* call add_memory func */
+ result = acpi_memory_enable_device(mem_device);
+ if (result)
+ printk(KERN_ERR PREFIX
+ "Error in acpi_memory_enable_device\n");
+ }
+ return result;
+}
+
+/*
+ * Helper function to check for memory device
+ */
+static acpi_status is_memory_device(acpi_handle handle)
+{
+ char *hardware_id;
+ acpi_status status;
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ struct acpi_device_info *info;
+
+
+ status = acpi_get_object_info(handle, &buffer);
+ if (ACPI_FAILURE(status))
+ return status;
+
+ info = buffer.pointer;
+ if (!(info->valid & ACPI_VALID_HID)) {
+ kfree(buffer.pointer);
+ return AE_ERROR;
+ }
+
+ hardware_id = info->hardware_id.value;
+ if ((hardware_id == NULL) ||
+ (strcmp(hardware_id, ACPI_MEMORY_DEVICE_HID)))
+ status = AE_ERROR;
+
+ kfree(buffer.pointer);
+ return status;
+}
+
+static acpi_status
+acpi_memory_register_notify_handler(acpi_handle handle,
+ u32 level, void *ctxt, void **retv)
+{
+ acpi_status status;
+
+
+ status = is_memory_device(handle);
+ if (ACPI_FAILURE(status))
+ return AE_OK; /* continue */
+
+ status = acpi_install_notify_handler(handle, ACPI_SYSTEM_NOTIFY,
+ acpi_memory_device_notify, NULL);
+ /* continue */
+ return AE_OK;
+}
+
+static acpi_status
+acpi_memory_deregister_notify_handler(acpi_handle handle,
+ u32 level, void *ctxt, void **retv)
+{
+ acpi_status status;
+
+
+ status = is_memory_device(handle);
+ if (ACPI_FAILURE(status))
+ return AE_OK; /* continue */
+
+ status = acpi_remove_notify_handler(handle,
+ ACPI_SYSTEM_NOTIFY,
+ acpi_memory_device_notify);
+
+ return AE_OK; /* continue */
+}
+
+static int __init acpi_memory_device_init(void)
+{
+ int result;
+ acpi_status status;
+
+
+ result = acpi_bus_register_driver(&acpi_memory_device_driver);
+
+ if (result < 0)
+ return -ENODEV;
+
+ status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
+ ACPI_UINT32_MAX,
+ acpi_memory_register_notify_handler,
+ NULL, NULL);
+
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status, "walk_namespace failed"));
+ acpi_bus_unregister_driver(&acpi_memory_device_driver);
+ return -ENODEV;
+ }
+
+ acpi_hotmem_initialized = 1;
+ return 0;
+}
+
+static void __exit acpi_memory_device_exit(void)
+{
+ acpi_status status;
+
+
+ /*
+ * Adding this to un-install notification handlers for all the device
+ * handles.
+ */
+ status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
+ ACPI_UINT32_MAX,
+ acpi_memory_deregister_notify_handler,
+ NULL, NULL);
+
+ if (ACPI_FAILURE(status))
+ ACPI_EXCEPTION((AE_INFO, status, "walk_namespace failed"));
+
+ acpi_bus_unregister_driver(&acpi_memory_device_driver);
+
+ return;
+}
+
+module_init(acpi_memory_device_init);
+module_exit(acpi_memory_device_exit);
diff --git a/drivers/acpi/asus_acpi.c b/drivers/acpi/asus_acpi.c
new file mode 100644
index 0000000..d63f26e
--- /dev/null
+++ b/drivers/acpi/asus_acpi.c
@@ -0,0 +1,1474 @@
+/*
+ * asus_acpi.c - Asus Laptop ACPI Extras
+ *
+ *
+ * Copyright (C) 2002-2005 Julien Lerouge, 2003-2006 Karol Kozimor
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ *
+ * The development page for this driver is located at
+ * http://sourceforge.net/projects/acpi4asus/
+ *
+ * Credits:
+ * Pontus Fuchs - Helper functions, cleanup
+ * Johann Wiesner - Small compile fixes
+ * John Belmonte - ACPI code for Toshiba laptop was a good starting point.
+ * �ic Burghard - LED display support for W1N
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/proc_fs.h>
+#include <linux/backlight.h>
+#include <acpi/acpi_drivers.h>
+#include <acpi/acpi_bus.h>
+#include <asm/uaccess.h>
+
+#define ASUS_ACPI_VERSION "0.30"
+
+#define PROC_ASUS "asus" /* The directory */
+#define PROC_MLED "mled"
+#define PROC_WLED "wled"
+#define PROC_TLED "tled"
+#define PROC_BT "bluetooth"
+#define PROC_LEDD "ledd"
+#define PROC_INFO "info"
+#define PROC_LCD "lcd"
+#define PROC_BRN "brn"
+#define PROC_DISP "disp"
+
+#define ACPI_HOTK_NAME "Asus Laptop ACPI Extras Driver"
+#define ACPI_HOTK_CLASS "hotkey"
+#define ACPI_HOTK_DEVICE_NAME "Hotkey"
+
+/*
+ * Some events we use, same for all Asus
+ */
+#define BR_UP 0x10
+#define BR_DOWN 0x20
+
+/*
+ * Flags for hotk status
+ */
+#define MLED_ON 0x01 /* Mail LED */
+#define WLED_ON 0x02 /* Wireless LED */
+#define TLED_ON 0x04 /* Touchpad LED */
+#define BT_ON 0x08 /* Internal Bluetooth */
+
+MODULE_AUTHOR("Julien Lerouge, Karol Kozimor");
+MODULE_DESCRIPTION(ACPI_HOTK_NAME);
+MODULE_LICENSE("GPL");
+
+static uid_t asus_uid;
+static gid_t asus_gid;
+module_param(asus_uid, uint, 0);
+MODULE_PARM_DESC(asus_uid, "UID for entries in /proc/acpi/asus");
+module_param(asus_gid, uint, 0);
+MODULE_PARM_DESC(asus_gid, "GID for entries in /proc/acpi/asus");
+
+/* For each model, all features implemented,
+ * those marked with R are relative to HOTK, A for absolute */
+struct model_data {
+ char *name; /* name of the laptop________________A */
+ char *mt_mled; /* method to handle mled_____________R */
+ char *mled_status; /* node to handle mled reading_______A */
+ char *mt_wled; /* method to handle wled_____________R */
+ char *wled_status; /* node to handle wled reading_______A */
+ char *mt_tled; /* method to handle tled_____________R */
+ char *tled_status; /* node to handle tled reading_______A */
+ char *mt_ledd; /* method to handle LED display______R */
+ char *mt_bt_switch; /* method to switch Bluetooth on/off_R */
+ char *bt_status; /* no model currently supports this__? */
+ char *mt_lcd_switch; /* method to turn LCD on/off_________A */
+ char *lcd_status; /* node to read LCD panel state______A */
+ char *brightness_up; /* method to set brightness up_______A */
+ char *brightness_down; /* method to set brightness down ____A */
+ char *brightness_set; /* method to set absolute brightness_R */
+ char *brightness_get; /* method to get absolute brightness_R */
+ char *brightness_status;/* node to get brightness____________A */
+ char *display_set; /* method to set video output________R */
+ char *display_get; /* method to get video output________R */
+};
+
+/*
+ * This is the main structure, we can use it to store anything interesting
+ * about the hotk device
+ */
+struct asus_hotk {
+ struct acpi_device *device; /* the device we are in */
+ acpi_handle handle; /* the handle of the hotk device */
+ char status; /* status of the hotk, for LEDs */
+ u32 ledd_status; /* status of the LED display */
+ struct model_data *methods; /* methods available on the laptop */
+ u8 brightness; /* brightness level */
+ enum {
+ A1x = 0, /* A1340D, A1300F */
+ A2x, /* A2500H */
+ A4G, /* A4700G */
+ D1x, /* D1 */
+ L2D, /* L2000D */
+ L3C, /* L3800C */
+ L3D, /* L3400D */
+ L3H, /* L3H, L2000E, L5D */
+ L4R, /* L4500R */
+ L5x, /* L5800C */
+ L8L, /* L8400L */
+ M1A, /* M1300A */
+ M2E, /* M2400E, L4400L */
+ M6N, /* M6800N, W3400N */
+ M6R, /* M6700R, A3000G */
+ P30, /* Samsung P30 */
+ S1x, /* S1300A, but also L1400B and M2400A (L84F) */
+ S2x, /* S200 (J1 reported), Victor MP-XP7210 */
+ W1N, /* W1000N */
+ W5A, /* W5A */
+ W3V, /* W3030V */
+ xxN, /* M2400N, M3700N, M5200N, M6800N,
+ S1300N, S5200N*/
+ A4S, /* Z81sp */
+ F3Sa, /* (Centrino) */
+ R1F,
+ END_MODEL
+ } model; /* Models currently supported */
+ u16 event_count[128]; /* Count for each event TODO make this better */
+};
+
+/* Here we go */
+#define A1x_PREFIX "\\_SB.PCI0.ISA.EC0."
+#define L3C_PREFIX "\\_SB.PCI0.PX40.ECD0."
+#define M1A_PREFIX "\\_SB.PCI0.PX40.EC0."
+#define P30_PREFIX "\\_SB.PCI0.LPCB.EC0."
+#define S1x_PREFIX "\\_SB.PCI0.PX40."
+#define S2x_PREFIX A1x_PREFIX
+#define xxN_PREFIX "\\_SB.PCI0.SBRG.EC0."
+
+static struct model_data model_conf[END_MODEL] = {
+ /*
+ * TODO I have seen a SWBX and AIBX method on some models, like L1400B,
+ * it seems to be a kind of switch, but what for ?
+ */
+
+ {
+ .name = "A1x",
+ .mt_mled = "MLED",
+ .mled_status = "\\MAIL",
+ .mt_lcd_switch = A1x_PREFIX "_Q10",
+ .lcd_status = "\\BKLI",
+ .brightness_up = A1x_PREFIX "_Q0E",
+ .brightness_down = A1x_PREFIX "_Q0F"},
+
+ {
+ .name = "A2x",
+ .mt_mled = "MLED",
+ .mt_wled = "WLED",
+ .wled_status = "\\SG66",
+ .mt_lcd_switch = "\\Q10",
+ .lcd_status = "\\BAOF",
+ .brightness_set = "SPLV",
+ .brightness_get = "GPLV",
+ .display_set = "SDSP",
+ .display_get = "\\INFB"},
+
+ {
+ .name = "A4G",
+ .mt_mled = "MLED",
+/* WLED present, but not controlled by ACPI */
+ .mt_lcd_switch = xxN_PREFIX "_Q10",
+ .brightness_set = "SPLV",
+ .brightness_get = "GPLV",
+ .display_set = "SDSP",
+ .display_get = "\\ADVG"},
+
+ {
+ .name = "D1x",
+ .mt_mled = "MLED",
+ .mt_lcd_switch = "\\Q0D",
+ .lcd_status = "\\GP11",
+ .brightness_up = "\\Q0C",
+ .brightness_down = "\\Q0B",
+ .brightness_status = "\\BLVL",
+ .display_set = "SDSP",
+ .display_get = "\\INFB"},
+
+ {
+ .name = "L2D",
+ .mt_mled = "MLED",
+ .mled_status = "\\SGP6",
+ .mt_wled = "WLED",
+ .wled_status = "\\RCP3",
+ .mt_lcd_switch = "\\Q10",
+ .lcd_status = "\\SGP0",
+ .brightness_up = "\\Q0E",
+ .brightness_down = "\\Q0F",
+ .display_set = "SDSP",
+ .display_get = "\\INFB"},
+
+ {
+ .name = "L3C",
+ .mt_mled = "MLED",
+ .mt_wled = "WLED",
+ .mt_lcd_switch = L3C_PREFIX "_Q10",
+ .lcd_status = "\\GL32",
+ .brightness_set = "SPLV",
+ .brightness_get = "GPLV",
+ .display_set = "SDSP",
+ .display_get = "\\_SB.PCI0.PCI1.VGAC.NMAP"},
+
+ {
+ .name = "L3D",
+ .mt_mled = "MLED",
+ .mled_status = "\\MALD",
+ .mt_wled = "WLED",
+ .mt_lcd_switch = "\\Q10",
+ .lcd_status = "\\BKLG",
+ .brightness_set = "SPLV",
+ .brightness_get = "GPLV",
+ .display_set = "SDSP",
+ .display_get = "\\INFB"},
+
+ {
+ .name = "L3H",
+ .mt_mled = "MLED",
+ .mt_wled = "WLED",
+ .mt_lcd_switch = "EHK",
+ .lcd_status = "\\_SB.PCI0.PM.PBC",
+ .brightness_set = "SPLV",
+ .brightness_get = "GPLV",
+ .display_set = "SDSP",
+ .display_get = "\\INFB"},
+
+ {
+ .name = "L4R",
+ .mt_mled = "MLED",
+ .mt_wled = "WLED",
+ .wled_status = "\\_SB.PCI0.SBRG.SG13",
+ .mt_lcd_switch = xxN_PREFIX "_Q10",
+ .lcd_status = "\\_SB.PCI0.SBSM.SEO4",
+ .brightness_set = "SPLV",
+ .brightness_get = "GPLV",
+ .display_set = "SDSP",
+ .display_get = "\\_SB.PCI0.P0P1.VGA.GETD"},
+
+ {
+ .name = "L5x",
+ .mt_mled = "MLED",
+/* WLED present, but not controlled by ACPI */
+ .mt_tled = "TLED",
+ .mt_lcd_switch = "\\Q0D",
+ .lcd_status = "\\BAOF",
+ .brightness_set = "SPLV",
+ .brightness_get = "GPLV",
+ .display_set = "SDSP",
+ .display_get = "\\INFB"},
+
+ {
+ .name = "L8L"
+/* No features, but at least support the hotkeys */
+ },
+
+ {
+ .name = "M1A",
+ .mt_mled = "MLED",
+ .mt_lcd_switch = M1A_PREFIX "Q10",
+ .lcd_status = "\\PNOF",
+ .brightness_up = M1A_PREFIX "Q0E",
+ .brightness_down = M1A_PREFIX "Q0F",
+ .brightness_status = "\\BRIT",
+ .display_set = "SDSP",
+ .display_get = "\\INFB"},
+
+ {
+ .name = "M2E",
+ .mt_mled = "MLED",
+ .mt_wled = "WLED",
+ .mt_lcd_switch = "\\Q10",
+ .lcd_status = "\\GP06",
+ .brightness_set = "SPLV",
+ .brightness_get = "GPLV",
+ .display_set = "SDSP",
+ .display_get = "\\INFB"},
+
+ {
+ .name = "M6N",
+ .mt_mled = "MLED",
+ .mt_wled = "WLED",
+ .wled_status = "\\_SB.PCI0.SBRG.SG13",
+ .mt_lcd_switch = xxN_PREFIX "_Q10",
+ .lcd_status = "\\_SB.BKLT",
+ .brightness_set = "SPLV",
+ .brightness_get = "GPLV",
+ .display_set = "SDSP",
+ .display_get = "\\SSTE"},
+
+ {
+ .name = "M6R",
+ .mt_mled = "MLED",
+ .mt_wled = "WLED",
+ .mt_lcd_switch = xxN_PREFIX "_Q10",
+ .lcd_status = "\\_SB.PCI0.SBSM.SEO4",
+ .brightness_set = "SPLV",
+ .brightness_get = "GPLV",
+ .display_set = "SDSP",
+ .display_get = "\\_SB.PCI0.P0P1.VGA.GETD"},
+
+ {
+ .name = "P30",
+ .mt_wled = "WLED",
+ .mt_lcd_switch = P30_PREFIX "_Q0E",
+ .lcd_status = "\\BKLT",
+ .brightness_up = P30_PREFIX "_Q68",
+ .brightness_down = P30_PREFIX "_Q69",
+ .brightness_get = "GPLV",
+ .display_set = "SDSP",
+ .display_get = "\\DNXT"},
+
+ {
+ .name = "S1x",
+ .mt_mled = "MLED",
+ .mled_status = "\\EMLE",
+ .mt_wled = "WLED",
+ .mt_lcd_switch = S1x_PREFIX "Q10",
+ .lcd_status = "\\PNOF",
+ .brightness_set = "SPLV",
+ .brightness_get = "GPLV"},
+
+ {
+ .name = "S2x",
+ .mt_mled = "MLED",
+ .mled_status = "\\MAIL",
+ .mt_lcd_switch = S2x_PREFIX "_Q10",
+ .lcd_status = "\\BKLI",
+ .brightness_up = S2x_PREFIX "_Q0B",
+ .brightness_down = S2x_PREFIX "_Q0A"},
+
+ {
+ .name = "W1N",
+ .mt_mled = "MLED",
+ .mt_wled = "WLED",
+ .mt_ledd = "SLCM",
+ .mt_lcd_switch = xxN_PREFIX "_Q10",
+ .lcd_status = "\\BKLT",
+ .brightness_set = "SPLV",
+ .brightness_get = "GPLV",
+ .display_set = "SDSP",
+ .display_get = "\\ADVG"},
+
+ {
+ .name = "W5A",
+ .mt_bt_switch = "BLED",
+ .mt_wled = "WLED",
+ .mt_lcd_switch = xxN_PREFIX "_Q10",
+ .brightness_set = "SPLV",
+ .brightness_get = "GPLV",
+ .display_set = "SDSP",
+ .display_get = "\\ADVG"},
+
+ {
+ .name = "W3V",
+ .mt_mled = "MLED",
+ .mt_wled = "WLED",
+ .mt_lcd_switch = xxN_PREFIX "_Q10",
+ .lcd_status = "\\BKLT",
+ .brightness_set = "SPLV",
+ .brightness_get = "GPLV",
+ .display_set = "SDSP",
+ .display_get = "\\INFB"},
+
+ {
+ .name = "xxN",
+ .mt_mled = "MLED",
+/* WLED present, but not controlled by ACPI */
+ .mt_lcd_switch = xxN_PREFIX "_Q10",
+ .lcd_status = "\\BKLT",
+ .brightness_set = "SPLV",
+ .brightness_get = "GPLV",
+ .display_set = "SDSP",
+ .display_get = "\\ADVG"},
+
+ {
+ .name = "A4S",
+ .brightness_set = "SPLV",
+ .brightness_get = "GPLV",
+ .mt_bt_switch = "BLED",
+ .mt_wled = "WLED"
+ },
+
+ {
+ .name = "F3Sa",
+ .mt_bt_switch = "BLED",
+ .mt_wled = "WLED",
+ .mt_mled = "MLED",
+ .brightness_get = "GPLV",
+ .brightness_set = "SPLV",
+ .mt_lcd_switch = "\\_SB.PCI0.SBRG.EC0._Q10",
+ .lcd_status = "\\_SB.PCI0.SBRG.EC0.RPIN",
+ .display_get = "\\ADVG",
+ .display_set = "SDSP",
+ },
+ {
+ .name = "R1F",
+ .mt_bt_switch = "BLED",
+ .mt_mled = "MLED",
+ .mt_wled = "WLED",
+ .mt_lcd_switch = "\\Q10",
+ .lcd_status = "\\GP06",
+ .brightness_set = "SPLV",
+ .brightness_get = "GPLV",
+ .display_set = "SDSP",
+ .display_get = "\\INFB"
+ }
+};
+
+/* procdir we use */
+static struct proc_dir_entry *asus_proc_dir;
+
+static struct backlight_device *asus_backlight_device;
+
+/*
+ * This header is made available to allow proper configuration given model,
+ * revision number , ... this info cannot go in struct asus_hotk because it is
+ * available before the hotk
+ */
+static struct acpi_table_header *asus_info;
+
+/* The actual device the driver binds to */
+static struct asus_hotk *hotk;
+
+/*
+ * The hotkey driver and autoloading declaration
+ */
+static int asus_hotk_add(struct acpi_device *device);
+static int asus_hotk_remove(struct acpi_device *device, int type);
+static const struct acpi_device_id asus_device_ids[] = {
+ {"ATK0100", 0},
+ {"", 0},
+};
+MODULE_DEVICE_TABLE(acpi, asus_device_ids);
+
+static struct acpi_driver asus_hotk_driver = {
+ .name = "asus_acpi",
+ .class = ACPI_HOTK_CLASS,
+ .ids = asus_device_ids,
+ .ops = {
+ .add = asus_hotk_add,
+ .remove = asus_hotk_remove,
+ },
+};
+
+/*
+ * This function evaluates an ACPI method, given an int as parameter, the
+ * method is searched within the scope of the handle, can be NULL. The output
+ * of the method is written is output, which can also be NULL
+ *
+ * returns 1 if write is successful, 0 else.
+ */
+static int write_acpi_int(acpi_handle handle, const char *method, int val,
+ struct acpi_buffer *output)
+{
+ struct acpi_object_list params; /* list of input parameters (int) */
+ union acpi_object in_obj; /* the only param we use */
+ acpi_status status;
+
+ params.count = 1;
+ params.pointer = &in_obj;
+ in_obj.type = ACPI_TYPE_INTEGER;
+ in_obj.integer.value = val;
+
+ status = acpi_evaluate_object(handle, (char *)method, &params, output);
+ return (status == AE_OK);
+}
+
+static int read_acpi_int(acpi_handle handle, const char *method, int *val)
+{
+ struct acpi_buffer output;
+ union acpi_object out_obj;
+ acpi_status status;
+
+ output.length = sizeof(out_obj);
+ output.pointer = &out_obj;
+
+ status = acpi_evaluate_object(handle, (char *)method, NULL, &output);
+ *val = out_obj.integer.value;
+ return (status == AE_OK) && (out_obj.type == ACPI_TYPE_INTEGER);
+}
+
+/*
+ * We write our info in page, we begin at offset off and cannot write more
+ * than count bytes. We set eof to 1 if we handle those 2 values. We return the
+ * number of bytes written in page
+ */
+static int
+proc_read_info(char *page, char **start, off_t off, int count, int *eof,
+ void *data)
+{
+ int len = 0;
+ int temp;
+ char buf[16]; /* enough for all info */
+ /*
+ * We use the easy way, we don't care of off and count,
+ * so we don't set eof to 1
+ */
+
+ len += sprintf(page, ACPI_HOTK_NAME " " ASUS_ACPI_VERSION "\n");
+ len += sprintf(page + len, "Model reference : %s\n",
+ hotk->methods->name);
+ /*
+ * The SFUN method probably allows the original driver to get the list
+ * of features supported by a given model. For now, 0x0100 or 0x0800
+ * bit signifies that the laptop is equipped with a Wi-Fi MiniPCI card.
+ * The significance of others is yet to be found.
+ */
+ if (read_acpi_int(hotk->handle, "SFUN", &temp))
+ len +=
+ sprintf(page + len, "SFUN value : 0x%04x\n", temp);
+ /*
+ * Another value for userspace: the ASYM method returns 0x02 for
+ * battery low and 0x04 for battery critical, its readings tend to be
+ * more accurate than those provided by _BST.
+ * Note: since not all the laptops provide this method, errors are
+ * silently ignored.
+ */
+ if (read_acpi_int(hotk->handle, "ASYM", &temp))
+ len +=
+ sprintf(page + len, "ASYM value : 0x%04x\n", temp);
+ if (asus_info) {
+ snprintf(buf, 16, "%d", asus_info->length);
+ len += sprintf(page + len, "DSDT length : %s\n", buf);
+ snprintf(buf, 16, "%d", asus_info->checksum);
+ len += sprintf(page + len, "DSDT checksum : %s\n", buf);
+ snprintf(buf, 16, "%d", asus_info->revision);
+ len += sprintf(page + len, "DSDT revision : %s\n", buf);
+ snprintf(buf, 7, "%s", asus_info->oem_id);
+ len += sprintf(page + len, "OEM id : %s\n", buf);
+ snprintf(buf, 9, "%s", asus_info->oem_table_id);
+ len += sprintf(page + len, "OEM table id : %s\n", buf);
+ snprintf(buf, 16, "%x", asus_info->oem_revision);
+ len += sprintf(page + len, "OEM revision : 0x%s\n", buf);
+ snprintf(buf, 5, "%s", asus_info->asl_compiler_id);
+ len += sprintf(page + len, "ASL comp vendor id : %s\n", buf);
+ snprintf(buf, 16, "%x", asus_info->asl_compiler_revision);
+ len += sprintf(page + len, "ASL comp revision : 0x%s\n", buf);
+ }
+
+ return len;
+}
+
+/*
+ * /proc handlers
+ * We write our info in page, we begin at offset off and cannot write more
+ * than count bytes. We set eof to 1 if we handle those 2 values. We return the
+ * number of bytes written in page
+ */
+
+/* Generic LED functions */
+static int read_led(const char *ledname, int ledmask)
+{
+ if (ledname) {
+ int led_status;
+
+ if (read_acpi_int(NULL, ledname, &led_status))
+ return led_status;
+ else
+ printk(KERN_WARNING "Asus ACPI: Error reading LED "
+ "status\n");
+ }
+ return (hotk->status & ledmask) ? 1 : 0;
+}
+
+static int parse_arg(const char __user *buf, unsigned long count, int *val)
+{
+ char s[32];
+ if (!count)
+ return 0;
+ if (count > 31)
+ return -EINVAL;
+ if (copy_from_user(s, buf, count))
+ return -EFAULT;
+ s[count] = 0;
+ if (sscanf(s, "%i", val) != 1)
+ return -EINVAL;
+ return count;
+}
+
+/* FIXME: kill extraneous args so it can be called independently */
+static int
+write_led(const char __user *buffer, unsigned long count,
+ char *ledname, int ledmask, int invert)
+{
+ int rv, value;
+ int led_out = 0;
+
+ rv = parse_arg(buffer, count, &value);
+ if (rv > 0)
+ led_out = value ? 1 : 0;
+
+ hotk->status =
+ (led_out) ? (hotk->status | ledmask) : (hotk->status & ~ledmask);
+
+ if (invert) /* invert target value */
+ led_out = !led_out;
+
+ if (!write_acpi_int(hotk->handle, ledname, led_out, NULL))
+ printk(KERN_WARNING "Asus ACPI: LED (%s) write failed\n",
+ ledname);
+
+ return rv;
+}
+
+/*
+ * Proc handlers for MLED
+ */
+static int
+proc_read_mled(char *page, char **start, off_t off, int count, int *eof,
+ void *data)
+{
+ return sprintf(page, "%d\n",
+ read_led(hotk->methods->mled_status, MLED_ON));
+}
+
+static int
+proc_write_mled(struct file *file, const char __user *buffer,
+ unsigned long count, void *data)
+{
+ return write_led(buffer, count, hotk->methods->mt_mled, MLED_ON, 1);
+}
+
+/*
+ * Proc handlers for LED display
+ */
+static int
+proc_read_ledd(char *page, char **start, off_t off, int count, int *eof,
+ void *data)
+{
+ return sprintf(page, "0x%08x\n", hotk->ledd_status);
+}
+
+static int
+proc_write_ledd(struct file *file, const char __user *buffer,
+ unsigned long count, void *data)
+{
+ int rv, value;
+
+ rv = parse_arg(buffer, count, &value);
+ if (rv > 0) {
+ if (!write_acpi_int
+ (hotk->handle, hotk->methods->mt_ledd, value, NULL))
+ printk(KERN_WARNING
+ "Asus ACPI: LED display write failed\n");
+ else
+ hotk->ledd_status = (u32) value;
+ }
+ return rv;
+}
+
+/*
+ * Proc handlers for WLED
+ */
+static int
+proc_read_wled(char *page, char **start, off_t off, int count, int *eof,
+ void *data)
+{
+ return sprintf(page, "%d\n",
+ read_led(hotk->methods->wled_status, WLED_ON));
+}
+
+static int
+proc_write_wled(struct file *file, const char __user *buffer,
+ unsigned long count, void *data)
+{
+ return write_led(buffer, count, hotk->methods->mt_wled, WLED_ON, 0);
+}
+
+/*
+ * Proc handlers for Bluetooth
+ */
+static int
+proc_read_bluetooth(char *page, char **start, off_t off, int count, int *eof,
+ void *data)
+{
+ return sprintf(page, "%d\n", read_led(hotk->methods->bt_status, BT_ON));
+}
+
+static int
+proc_write_bluetooth(struct file *file, const char __user *buffer,
+ unsigned long count, void *data)
+{
+ /* Note: mt_bt_switch controls both internal Bluetooth adapter's
+ presence and its LED */
+ return write_led(buffer, count, hotk->methods->mt_bt_switch, BT_ON, 0);
+}
+
+/*
+ * Proc handlers for TLED
+ */
+static int
+proc_read_tled(char *page, char **start, off_t off, int count, int *eof,
+ void *data)
+{
+ return sprintf(page, "%d\n",
+ read_led(hotk->methods->tled_status, TLED_ON));
+}
+
+static int
+proc_write_tled(struct file *file, const char __user *buffer,
+ unsigned long count, void *data)
+{
+ return write_led(buffer, count, hotk->methods->mt_tled, TLED_ON, 0);
+}
+
+static int get_lcd_state(void)
+{
+ int lcd = 0;
+
+ if (hotk->model == L3H) {
+ /* L3H and the like have to be handled differently */
+ acpi_status status = 0;
+ struct acpi_object_list input;
+ union acpi_object mt_params[2];
+ struct acpi_buffer output;
+ union acpi_object out_obj;
+
+ input.count = 2;
+ input.pointer = mt_params;
+ /* Note: the following values are partly guessed up, but
+ otherwise they seem to work */
+ mt_params[0].type = ACPI_TYPE_INTEGER;
+ mt_params[0].integer.value = 0x02;
+ mt_params[1].type = ACPI_TYPE_INTEGER;
+ mt_params[1].integer.value = 0x02;
+
+ output.length = sizeof(out_obj);
+ output.pointer = &out_obj;
+
+ status =
+ acpi_evaluate_object(NULL, hotk->methods->lcd_status,
+ &input, &output);
+ if (status != AE_OK)
+ return -1;
+ if (out_obj.type == ACPI_TYPE_INTEGER)
+ /* That's what the AML code does */
+ lcd = out_obj.integer.value >> 8;
+ } else if (hotk->model == F3Sa) {
+ unsigned long long tmp;
+ union acpi_object param;
+ struct acpi_object_list input;
+ acpi_status status;
+
+ /* Read pin 11 */
+ param.type = ACPI_TYPE_INTEGER;
+ param.integer.value = 0x11;
+ input.count = 1;
+ input.pointer = &param;
+
+ status = acpi_evaluate_integer(NULL, hotk->methods->lcd_status,
+ &input, &tmp);
+ if (status != AE_OK)
+ return -1;
+
+ lcd = tmp;
+ } else {
+ /* We don't have to check anything if we are here */
+ if (!read_acpi_int(NULL, hotk->methods->lcd_status, &lcd))
+ printk(KERN_WARNING
+ "Asus ACPI: Error reading LCD status\n");
+
+ if (hotk->model == L2D)
+ lcd = ~lcd;
+ }
+
+ return (lcd & 1);
+}
+
+static int set_lcd_state(int value)
+{
+ int lcd = 0;
+ acpi_status status = 0;
+
+ lcd = value ? 1 : 0;
+ if (lcd != get_lcd_state()) {
+ /* switch */
+ if (hotk->model != L3H) {
+ status =
+ acpi_evaluate_object(NULL,
+ hotk->methods->mt_lcd_switch,
+ NULL, NULL);
+ } else {
+ /* L3H and the like must be handled differently */
+ if (!write_acpi_int
+ (hotk->handle, hotk->methods->mt_lcd_switch, 0x07,
+ NULL))
+ status = AE_ERROR;
+ /* L3H's AML executes EHK (0x07) upon Fn+F7 keypress,
+ the exact behaviour is simulated here */
+ }
+ if (ACPI_FAILURE(status))
+ printk(KERN_WARNING "Asus ACPI: Error switching LCD\n");
+ }
+ return 0;
+
+}
+
+static int
+proc_read_lcd(char *page, char **start, off_t off, int count, int *eof,
+ void *data)
+{
+ return sprintf(page, "%d\n", get_lcd_state());
+}
+
+static int
+proc_write_lcd(struct file *file, const char __user *buffer,
+ unsigned long count, void *data)
+{
+ int rv, value;
+
+ rv = parse_arg(buffer, count, &value);
+ if (rv > 0)
+ set_lcd_state(value);
+ return rv;
+}
+
+static int read_brightness(struct backlight_device *bd)
+{
+ int value;
+
+ if (hotk->methods->brightness_get) { /* SPLV/GPLV laptop */
+ if (!read_acpi_int(hotk->handle, hotk->methods->brightness_get,
+ &value))
+ printk(KERN_WARNING
+ "Asus ACPI: Error reading brightness\n");
+ } else if (hotk->methods->brightness_status) { /* For D1 for example */
+ if (!read_acpi_int(NULL, hotk->methods->brightness_status,
+ &value))
+ printk(KERN_WARNING
+ "Asus ACPI: Error reading brightness\n");
+ } else /* No GPLV method */
+ value = hotk->brightness;
+ return value;
+}
+
+/*
+ * Change the brightness level
+ */
+static int set_brightness(int value)
+{
+ acpi_status status = 0;
+ int ret = 0;
+
+ /* SPLV laptop */
+ if (hotk->methods->brightness_set) {
+ if (!write_acpi_int(hotk->handle, hotk->methods->brightness_set,
+ value, NULL))
+ printk(KERN_WARNING
+ "Asus ACPI: Error changing brightness\n");
+ ret = -EIO;
+ goto out;
+ }
+
+ /* No SPLV method if we are here, act as appropriate */
+ value -= read_brightness(NULL);
+ while (value != 0) {
+ status = acpi_evaluate_object(NULL, (value > 0) ?
+ hotk->methods->brightness_up :
+ hotk->methods->brightness_down,
+ NULL, NULL);
+ (value > 0) ? value-- : value++;
+ if (ACPI_FAILURE(status))
+ printk(KERN_WARNING
+ "Asus ACPI: Error changing brightness\n");
+ ret = -EIO;
+ }
+out:
+ return ret;
+}
+
+static int set_brightness_status(struct backlight_device *bd)
+{
+ return set_brightness(bd->props.brightness);
+}
+
+static int
+proc_read_brn(char *page, char **start, off_t off, int count, int *eof,
+ void *data)
+{
+ return sprintf(page, "%d\n", read_brightness(NULL));
+}
+
+static int
+proc_write_brn(struct file *file, const char __user *buffer,
+ unsigned long count, void *data)
+{
+ int rv, value;
+
+ rv = parse_arg(buffer, count, &value);
+ if (rv > 0) {
+ value = (0 < value) ? ((15 < value) ? 15 : value) : 0;
+ /* 0 <= value <= 15 */
+ set_brightness(value);
+ }
+ return rv;
+}
+
+static void set_display(int value)
+{
+ /* no sanity check needed for now */
+ if (!write_acpi_int(hotk->handle, hotk->methods->display_set,
+ value, NULL))
+ printk(KERN_WARNING "Asus ACPI: Error setting display\n");
+ return;
+}
+
+/*
+ * Now, *this* one could be more user-friendly, but so far, no-one has
+ * complained. The significance of bits is the same as in proc_write_disp()
+ */
+static int
+proc_read_disp(char *page, char **start, off_t off, int count, int *eof,
+ void *data)
+{
+ int value = 0;
+
+ if (!read_acpi_int(hotk->handle, hotk->methods->display_get, &value))
+ printk(KERN_WARNING
+ "Asus ACPI: Error reading display status\n");
+ value &= 0x07; /* needed for some models, shouldn't hurt others */
+ return sprintf(page, "%d\n", value);
+}
+
+/*
+ * Experimental support for display switching. As of now: 1 should activate
+ * the LCD output, 2 should do for CRT, and 4 for TV-Out. Any combination
+ * (bitwise) of these will suffice. I never actually tested 3 displays hooked
+ * up simultaneously, so be warned. See the acpi4asus README for more info.
+ */
+static int
+proc_write_disp(struct file *file, const char __user *buffer,
+ unsigned long count, void *data)
+{
+ int rv, value;
+
+ rv = parse_arg(buffer, count, &value);
+ if (rv > 0)
+ set_display(value);
+ return rv;
+}
+
+typedef int (proc_readfunc) (char *page, char **start, off_t off, int count,
+ int *eof, void *data);
+typedef int (proc_writefunc) (struct file *file, const char __user *buffer,
+ unsigned long count, void *data);
+
+static int
+asus_proc_add(char *name, proc_writefunc *writefunc,
+ proc_readfunc *readfunc, mode_t mode,
+ struct acpi_device *device)
+{
+ struct proc_dir_entry *proc =
+ create_proc_entry(name, mode, acpi_device_dir(device));
+ if (!proc) {
+ printk(KERN_WARNING " Unable to create %s fs entry\n", name);
+ return -1;
+ }
+ proc->write_proc = writefunc;
+ proc->read_proc = readfunc;
+ proc->data = acpi_driver_data(device);
+ proc->owner = THIS_MODULE;
+ proc->uid = asus_uid;
+ proc->gid = asus_gid;
+ return 0;
+}
+
+static int asus_hotk_add_fs(struct acpi_device *device)
+{
+ struct proc_dir_entry *proc;
+ mode_t mode;
+
+ /*
+ * If parameter uid or gid is not changed, keep the default setting for
+ * our proc entries (-rw-rw-rw-) else, it means we care about security,
+ * and then set to -rw-rw----
+ */
+
+ if ((asus_uid == 0) && (asus_gid == 0)) {
+ mode = S_IFREG | S_IRUGO | S_IWUGO;
+ } else {
+ mode = S_IFREG | S_IRUSR | S_IRGRP | S_IWUSR | S_IWGRP;
+ printk(KERN_WARNING " asus_uid and asus_gid parameters are "
+ "deprecated, use chown and chmod instead!\n");
+ }
+
+ acpi_device_dir(device) = asus_proc_dir;
+ if (!acpi_device_dir(device))
+ return -ENODEV;
+
+ proc = create_proc_entry(PROC_INFO, mode, acpi_device_dir(device));
+ if (proc) {
+ proc->read_proc = proc_read_info;
+ proc->data = acpi_driver_data(device);
+ proc->owner = THIS_MODULE;
+ proc->uid = asus_uid;
+ proc->gid = asus_gid;
+ } else {
+ printk(KERN_WARNING " Unable to create " PROC_INFO
+ " fs entry\n");
+ }
+
+ if (hotk->methods->mt_wled) {
+ asus_proc_add(PROC_WLED, &proc_write_wled, &proc_read_wled,
+ mode, device);
+ }
+
+ if (hotk->methods->mt_ledd) {
+ asus_proc_add(PROC_LEDD, &proc_write_ledd, &proc_read_ledd,
+ mode, device);
+ }
+
+ if (hotk->methods->mt_mled) {
+ asus_proc_add(PROC_MLED, &proc_write_mled, &proc_read_mled,
+ mode, device);
+ }
+
+ if (hotk->methods->mt_tled) {
+ asus_proc_add(PROC_TLED, &proc_write_tled, &proc_read_tled,
+ mode, device);
+ }
+
+ if (hotk->methods->mt_bt_switch) {
+ asus_proc_add(PROC_BT, &proc_write_bluetooth,
+ &proc_read_bluetooth, mode, device);
+ }
+
+ /*
+ * We need both read node and write method as LCD switch is also
+ * accessible from the keyboard
+ */
+ if (hotk->methods->mt_lcd_switch && hotk->methods->lcd_status) {
+ asus_proc_add(PROC_LCD, &proc_write_lcd, &proc_read_lcd, mode,
+ device);
+ }
+
+ if ((hotk->methods->brightness_up && hotk->methods->brightness_down) ||
+ (hotk->methods->brightness_get && hotk->methods->brightness_set)) {
+ asus_proc_add(PROC_BRN, &proc_write_brn, &proc_read_brn, mode,
+ device);
+ }
+
+ if (hotk->methods->display_set) {
+ asus_proc_add(PROC_DISP, &proc_write_disp, &proc_read_disp,
+ mode, device);
+ }
+
+ return 0;
+}
+
+static int asus_hotk_remove_fs(struct acpi_device *device)
+{
+ if (acpi_device_dir(device)) {
+ remove_proc_entry(PROC_INFO, acpi_device_dir(device));
+ if (hotk->methods->mt_wled)
+ remove_proc_entry(PROC_WLED, acpi_device_dir(device));
+ if (hotk->methods->mt_mled)
+ remove_proc_entry(PROC_MLED, acpi_device_dir(device));
+ if (hotk->methods->mt_tled)
+ remove_proc_entry(PROC_TLED, acpi_device_dir(device));
+ if (hotk->methods->mt_ledd)
+ remove_proc_entry(PROC_LEDD, acpi_device_dir(device));
+ if (hotk->methods->mt_bt_switch)
+ remove_proc_entry(PROC_BT, acpi_device_dir(device));
+ if (hotk->methods->mt_lcd_switch && hotk->methods->lcd_status)
+ remove_proc_entry(PROC_LCD, acpi_device_dir(device));
+ if ((hotk->methods->brightness_up
+ && hotk->methods->brightness_down)
+ || (hotk->methods->brightness_get
+ && hotk->methods->brightness_set))
+ remove_proc_entry(PROC_BRN, acpi_device_dir(device));
+ if (hotk->methods->display_set)
+ remove_proc_entry(PROC_DISP, acpi_device_dir(device));
+ }
+ return 0;
+}
+
+static void asus_hotk_notify(acpi_handle handle, u32 event, void *data)
+{
+ /* TODO Find a better way to handle events count. */
+ if (!hotk)
+ return;
+
+ if ((event & ~((u32) BR_UP)) < 16)
+ hotk->brightness = (event & ~((u32) BR_UP));
+ else if ((event & ~((u32) BR_DOWN)) < 16)
+ hotk->brightness = (event & ~((u32) BR_DOWN));
+
+ acpi_bus_generate_proc_event(hotk->device, event,
+ hotk->event_count[event % 128]++);
+
+ return;
+}
+
+/*
+ * Match the model string to the list of supported models. Return END_MODEL if
+ * no match or model is NULL.
+ */
+static int asus_model_match(char *model)
+{
+ if (model == NULL)
+ return END_MODEL;
+
+ if (strncmp(model, "L3D", 3) == 0)
+ return L3D;
+ else if (strncmp(model, "L2E", 3) == 0 ||
+ strncmp(model, "L3H", 3) == 0 || strncmp(model, "L5D", 3) == 0)
+ return L3H;
+ else if (strncmp(model, "L3", 2) == 0 || strncmp(model, "L2B", 3) == 0)
+ return L3C;
+ else if (strncmp(model, "L8L", 3) == 0)
+ return L8L;
+ else if (strncmp(model, "L4R", 3) == 0)
+ return L4R;
+ else if (strncmp(model, "M6N", 3) == 0 || strncmp(model, "W3N", 3) == 0)
+ return M6N;
+ else if (strncmp(model, "M6R", 3) == 0 || strncmp(model, "A3G", 3) == 0)
+ return M6R;
+ else if (strncmp(model, "M2N", 3) == 0 ||
+ strncmp(model, "M3N", 3) == 0 ||
+ strncmp(model, "M5N", 3) == 0 ||
+ strncmp(model, "M6N", 3) == 0 ||
+ strncmp(model, "S1N", 3) == 0 ||
+ strncmp(model, "S5N", 3) == 0 || strncmp(model, "W1N", 3) == 0)
+ return xxN;
+ else if (strncmp(model, "M1", 2) == 0)
+ return M1A;
+ else if (strncmp(model, "M2", 2) == 0 || strncmp(model, "L4E", 3) == 0)
+ return M2E;
+ else if (strncmp(model, "L2", 2) == 0)
+ return L2D;
+ else if (strncmp(model, "L8", 2) == 0)
+ return S1x;
+ else if (strncmp(model, "D1", 2) == 0)
+ return D1x;
+ else if (strncmp(model, "A1", 2) == 0)
+ return A1x;
+ else if (strncmp(model, "A2", 2) == 0)
+ return A2x;
+ else if (strncmp(model, "J1", 2) == 0)
+ return S2x;
+ else if (strncmp(model, "L5", 2) == 0)
+ return L5x;
+ else if (strncmp(model, "A4G", 3) == 0)
+ return A4G;
+ else if (strncmp(model, "W1N", 3) == 0)
+ return W1N;
+ else if (strncmp(model, "W3V", 3) == 0)
+ return W3V;
+ else if (strncmp(model, "W5A", 3) == 0)
+ return W5A;
+ else if (strncmp(model, "R1F", 3) == 0)
+ return R1F;
+ else if (strncmp(model, "A4S", 3) == 0)
+ return A4S;
+ else if (strncmp(model, "F3Sa", 4) == 0)
+ return F3Sa;
+ else
+ return END_MODEL;
+}
+
+/*
+ * This function is used to initialize the hotk with right values. In this
+ * method, we can make all the detection we want, and modify the hotk struct
+ */
+static int asus_hotk_get_info(void)
+{
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ union acpi_object *model = NULL;
+ int bsts_result;
+ char *string = NULL;
+ acpi_status status;
+
+ /*
+ * Get DSDT headers early enough to allow for differentiating between
+ * models, but late enough to allow acpi_bus_register_driver() to fail
+ * before doing anything ACPI-specific. Should we encounter a machine,
+ * which needs special handling (i.e. its hotkey device has a different
+ * HID), this bit will be moved. A global variable asus_info contains
+ * the DSDT header.
+ */
+ status = acpi_get_table(ACPI_SIG_DSDT, 1, &asus_info);
+ if (ACPI_FAILURE(status))
+ printk(KERN_WARNING " Couldn't get the DSDT table header\n");
+
+ /* We have to write 0 on init this far for all ASUS models */
+ if (!write_acpi_int(hotk->handle, "INIT", 0, &buffer)) {
+ printk(KERN_ERR " Hotkey initialization failed\n");
+ return -ENODEV;
+ }
+
+ /* This needs to be called for some laptops to init properly */
+ if (!read_acpi_int(hotk->handle, "BSTS", &bsts_result))
+ printk(KERN_WARNING " Error calling BSTS\n");
+ else if (bsts_result)
+ printk(KERN_NOTICE " BSTS called, 0x%02x returned\n",
+ bsts_result);
+
+ /*
+ * Try to match the object returned by INIT to the specific model.
+ * Handle every possible object (or the lack of thereof) the DSDT
+ * writers might throw at us. When in trouble, we pass NULL to
+ * asus_model_match() and try something completely different.
+ */
+ if (buffer.pointer) {
+ model = buffer.pointer;
+ switch (model->type) {
+ case ACPI_TYPE_STRING:
+ string = model->string.pointer;
+ break;
+ case ACPI_TYPE_BUFFER:
+ string = model->buffer.pointer;
+ break;
+ default:
+ kfree(model);
+ model = NULL;
+ break;
+ }
+ }
+ hotk->model = asus_model_match(string);
+ if (hotk->model == END_MODEL) { /* match failed */
+ if (asus_info &&
+ strncmp(asus_info->oem_table_id, "ODEM", 4) == 0) {
+ hotk->model = P30;
+ printk(KERN_NOTICE
+ " Samsung P30 detected, supported\n");
+ } else {
+ hotk->model = M2E;
+ printk(KERN_NOTICE " unsupported model %s, trying "
+ "default values\n", string);
+ printk(KERN_NOTICE
+ " send /proc/acpi/dsdt to the developers\n");
+ kfree(model);
+ return -ENODEV;
+ }
+ hotk->methods = &model_conf[hotk->model];
+ return AE_OK;
+ }
+ hotk->methods = &model_conf[hotk->model];
+ printk(KERN_NOTICE " %s model detected, supported\n", string);
+
+ /* Sort of per-model blacklist */
+ if (strncmp(string, "L2B", 3) == 0)
+ hotk->methods->lcd_status = NULL;
+ /* L2B is similar enough to L3C to use its settings, with this only
+ exception */
+ else if (strncmp(string, "A3G", 3) == 0)
+ hotk->methods->lcd_status = "\\BLFG";
+ /* A3G is like M6R */
+ else if (strncmp(string, "S5N", 3) == 0 ||
+ strncmp(string, "M5N", 3) == 0 ||
+ strncmp(string, "W3N", 3) == 0)
+ hotk->methods->mt_mled = NULL;
+ /* S5N, M5N and W3N have no MLED */
+ else if (strncmp(string, "L5D", 3) == 0)
+ hotk->methods->mt_wled = NULL;
+ /* L5D's WLED is not controlled by ACPI */
+ else if (strncmp(string, "M2N", 3) == 0 ||
+ strncmp(string, "W3V", 3) == 0 ||
+ strncmp(string, "S1N", 3) == 0)
+ hotk->methods->mt_wled = "WLED";
+ /* M2N, S1N and W3V have a usable WLED */
+ else if (asus_info) {
+ if (strncmp(asus_info->oem_table_id, "L1", 2) == 0)
+ hotk->methods->mled_status = NULL;
+ /* S1300A reports L84F, but L1400B too, account for that */
+ }
+
+ kfree(model);
+
+ return AE_OK;
+}
+
+static int asus_hotk_check(void)
+{
+ int result = 0;
+
+ result = acpi_bus_get_status(hotk->device);
+ if (result)
+ return result;
+
+ if (hotk->device->status.present) {
+ result = asus_hotk_get_info();
+ } else {
+ printk(KERN_ERR " Hotkey device not present, aborting\n");
+ return -EINVAL;
+ }
+
+ return result;
+}
+
+static int asus_hotk_found;
+
+static int asus_hotk_add(struct acpi_device *device)
+{
+ acpi_status status = AE_OK;
+ int result;
+
+ if (!device)
+ return -EINVAL;
+
+ printk(KERN_NOTICE "Asus Laptop ACPI Extras version %s\n",
+ ASUS_ACPI_VERSION);
+
+ hotk = kzalloc(sizeof(struct asus_hotk), GFP_KERNEL);
+ if (!hotk)
+ return -ENOMEM;
+
+ hotk->handle = device->handle;
+ strcpy(acpi_device_name(device), ACPI_HOTK_DEVICE_NAME);
+ strcpy(acpi_device_class(device), ACPI_HOTK_CLASS);
+ device->driver_data = hotk;
+ hotk->device = device;
+
+ result = asus_hotk_check();
+ if (result)
+ goto end;
+
+ result = asus_hotk_add_fs(device);
+ if (result)
+ goto end;
+
+ /*
+ * We install the handler, it will receive the hotk in parameter, so, we
+ * could add other data to the hotk struct
+ */
+ status = acpi_install_notify_handler(hotk->handle, ACPI_SYSTEM_NOTIFY,
+ asus_hotk_notify, hotk);
+ if (ACPI_FAILURE(status))
+ printk(KERN_ERR " Error installing notify handler\n");
+
+ /* For laptops without GPLV: init the hotk->brightness value */
+ if ((!hotk->methods->brightness_get)
+ && (!hotk->methods->brightness_status)
+ && (hotk->methods->brightness_up && hotk->methods->brightness_down)) {
+ status =
+ acpi_evaluate_object(NULL, hotk->methods->brightness_down,
+ NULL, NULL);
+ if (ACPI_FAILURE(status))
+ printk(KERN_WARNING " Error changing brightness\n");
+ else {
+ status =
+ acpi_evaluate_object(NULL,
+ hotk->methods->brightness_up,
+ NULL, NULL);
+ if (ACPI_FAILURE(status))
+ printk(KERN_WARNING " Strange, error changing"
+ " brightness\n");
+ }
+ }
+
+ asus_hotk_found = 1;
+
+ /* LED display is off by default */
+ hotk->ledd_status = 0xFFF;
+
+end:
+ if (result)
+ kfree(hotk);
+
+ return result;
+}
+
+static int asus_hotk_remove(struct acpi_device *device, int type)
+{
+ acpi_status status = 0;
+
+ if (!device || !acpi_driver_data(device))
+ return -EINVAL;
+
+ status = acpi_remove_notify_handler(hotk->handle, ACPI_SYSTEM_NOTIFY,
+ asus_hotk_notify);
+ if (ACPI_FAILURE(status))
+ printk(KERN_ERR "Asus ACPI: Error removing notify handler\n");
+
+ asus_hotk_remove_fs(device);
+
+ kfree(hotk);
+
+ return 0;
+}
+
+static struct backlight_ops asus_backlight_data = {
+ .get_brightness = read_brightness,
+ .update_status = set_brightness_status,
+};
+
+static void asus_acpi_exit(void)
+{
+ if (asus_backlight_device)
+ backlight_device_unregister(asus_backlight_device);
+
+ acpi_bus_unregister_driver(&asus_hotk_driver);
+ remove_proc_entry(PROC_ASUS, acpi_root_dir);
+
+ return;
+}
+
+static int __init asus_acpi_init(void)
+{
+ int result;
+
+ if (acpi_disabled)
+ return -ENODEV;
+
+ asus_proc_dir = proc_mkdir(PROC_ASUS, acpi_root_dir);
+ if (!asus_proc_dir) {
+ printk(KERN_ERR "Asus ACPI: Unable to create /proc entry\n");
+ return -ENODEV;
+ }
+ asus_proc_dir->owner = THIS_MODULE;
+
+ result = acpi_bus_register_driver(&asus_hotk_driver);
+ if (result < 0) {
+ remove_proc_entry(PROC_ASUS, acpi_root_dir);
+ return result;
+ }
+
+ /*
+ * This is a bit of a kludge. We only want this module loaded
+ * for ASUS systems, but there's currently no way to probe the
+ * ACPI namespace for ASUS HIDs. So we just return failure if
+ * we didn't find one, which will cause the module to be
+ * unloaded.
+ */
+ if (!asus_hotk_found) {
+ acpi_bus_unregister_driver(&asus_hotk_driver);
+ remove_proc_entry(PROC_ASUS, acpi_root_dir);
+ return -ENODEV;
+ }
+
+ asus_backlight_device = backlight_device_register("asus", NULL, NULL,
+ &asus_backlight_data);
+ if (IS_ERR(asus_backlight_device)) {
+ printk(KERN_ERR "Could not register asus backlight device\n");
+ asus_backlight_device = NULL;
+ asus_acpi_exit();
+ return -ENODEV;
+ }
+ asus_backlight_device->props.max_brightness = 15;
+
+ return 0;
+}
+
+module_init(asus_acpi_init);
+module_exit(asus_acpi_exit);
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
new file mode 100644
index 0000000..65132f9
--- /dev/null
+++ b/drivers/acpi/battery.c
@@ -0,0 +1,908 @@
+/*
+ * battery.c - ACPI Battery Driver (Revision: 2.0)
+ *
+ * Copyright (C) 2007 Alexey Starikovskiy <astarikovskiy@suse.de>
+ * Copyright (C) 2004-2007 Vladimir Lebedev <vladimir.p.lebedev@intel.com>
+ * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
+ * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/jiffies.h>
+
+#ifdef CONFIG_ACPI_PROCFS_POWER
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <asm/uaccess.h>
+#endif
+
+#include <acpi/acpi_bus.h>
+#include <acpi/acpi_drivers.h>
+
+#ifdef CONFIG_ACPI_SYSFS_POWER
+#include <linux/power_supply.h>
+#endif
+
+#define ACPI_BATTERY_VALUE_UNKNOWN 0xFFFFFFFF
+
+#define ACPI_BATTERY_CLASS "battery"
+#define ACPI_BATTERY_DEVICE_NAME "Battery"
+#define ACPI_BATTERY_NOTIFY_STATUS 0x80
+#define ACPI_BATTERY_NOTIFY_INFO 0x81
+
+#define _COMPONENT ACPI_BATTERY_COMPONENT
+
+ACPI_MODULE_NAME("battery");
+
+MODULE_AUTHOR("Paul Diefenbaugh");
+MODULE_AUTHOR("Alexey Starikovskiy <astarikovskiy@suse.de>");
+MODULE_DESCRIPTION("ACPI Battery Driver");
+MODULE_LICENSE("GPL");
+
+static unsigned int cache_time = 1000;
+module_param(cache_time, uint, 0644);
+MODULE_PARM_DESC(cache_time, "cache time in milliseconds");
+
+#ifdef CONFIG_ACPI_PROCFS_POWER
+extern struct proc_dir_entry *acpi_lock_battery_dir(void);
+extern void *acpi_unlock_battery_dir(struct proc_dir_entry *acpi_battery_dir);
+
+enum acpi_battery_files {
+ info_tag = 0,
+ state_tag,
+ alarm_tag,
+ ACPI_BATTERY_NUMFILES,
+};
+
+#endif
+
+static const struct acpi_device_id battery_device_ids[] = {
+ {"PNP0C0A", 0},
+ {"", 0},
+};
+
+MODULE_DEVICE_TABLE(acpi, battery_device_ids);
+
+
+struct acpi_battery {
+ struct mutex lock;
+#ifdef CONFIG_ACPI_SYSFS_POWER
+ struct power_supply bat;
+#endif
+ struct acpi_device *device;
+ unsigned long update_time;
+ int current_now;
+ int capacity_now;
+ int voltage_now;
+ int design_capacity;
+ int full_charge_capacity;
+ int technology;
+ int design_voltage;
+ int design_capacity_warning;
+ int design_capacity_low;
+ int capacity_granularity_1;
+ int capacity_granularity_2;
+ int alarm;
+ char model_number[32];
+ char serial_number[32];
+ char type[32];
+ char oem_info[32];
+ int state;
+ int power_unit;
+ u8 alarm_present;
+};
+
+#define to_acpi_battery(x) container_of(x, struct acpi_battery, bat);
+
+inline int acpi_battery_present(struct acpi_battery *battery)
+{
+ return battery->device->status.battery_present;
+}
+
+#ifdef CONFIG_ACPI_SYSFS_POWER
+static int acpi_battery_technology(struct acpi_battery *battery)
+{
+ if (!strcasecmp("NiCd", battery->type))
+ return POWER_SUPPLY_TECHNOLOGY_NiCd;
+ if (!strcasecmp("NiMH", battery->type))
+ return POWER_SUPPLY_TECHNOLOGY_NiMH;
+ if (!strcasecmp("LION", battery->type))
+ return POWER_SUPPLY_TECHNOLOGY_LION;
+ if (!strncasecmp("LI-ION", battery->type, 6))
+ return POWER_SUPPLY_TECHNOLOGY_LION;
+ if (!strcasecmp("LiP", battery->type))
+ return POWER_SUPPLY_TECHNOLOGY_LIPO;
+ return POWER_SUPPLY_TECHNOLOGY_UNKNOWN;
+}
+
+static int acpi_battery_get_state(struct acpi_battery *battery);
+
+static int acpi_battery_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct acpi_battery *battery = to_acpi_battery(psy);
+
+ if (acpi_battery_present(battery)) {
+ /* run battery update only if it is present */
+ acpi_battery_get_state(battery);
+ } else if (psp != POWER_SUPPLY_PROP_PRESENT)
+ return -ENODEV;
+ switch (psp) {
+ case POWER_SUPPLY_PROP_STATUS:
+ if (battery->state & 0x01)
+ val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
+ else if (battery->state & 0x02)
+ val->intval = POWER_SUPPLY_STATUS_CHARGING;
+ else if (battery->state == 0)
+ val->intval = POWER_SUPPLY_STATUS_FULL;
+ else
+ val->intval = POWER_SUPPLY_STATUS_UNKNOWN;
+ break;
+ case POWER_SUPPLY_PROP_PRESENT:
+ val->intval = acpi_battery_present(battery);
+ break;
+ case POWER_SUPPLY_PROP_TECHNOLOGY:
+ val->intval = acpi_battery_technology(battery);
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN:
+ val->intval = battery->design_voltage * 1000;
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ val->intval = battery->voltage_now * 1000;
+ break;
+ case POWER_SUPPLY_PROP_CURRENT_NOW:
+ val->intval = battery->current_now * 1000;
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
+ case POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN:
+ val->intval = battery->design_capacity * 1000;
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_FULL:
+ case POWER_SUPPLY_PROP_ENERGY_FULL:
+ val->intval = battery->full_charge_capacity * 1000;
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_NOW:
+ case POWER_SUPPLY_PROP_ENERGY_NOW:
+ val->intval = battery->capacity_now * 1000;
+ break;
+ case POWER_SUPPLY_PROP_MODEL_NAME:
+ val->strval = battery->model_number;
+ break;
+ case POWER_SUPPLY_PROP_MANUFACTURER:
+ val->strval = battery->oem_info;
+ break;
+ case POWER_SUPPLY_PROP_SERIAL_NUMBER:
+ val->strval = battery->serial_number;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static enum power_supply_property charge_battery_props[] = {
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_TECHNOLOGY,
+ POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
+ POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
+ POWER_SUPPLY_PROP_CHARGE_FULL,
+ POWER_SUPPLY_PROP_CHARGE_NOW,
+ POWER_SUPPLY_PROP_MODEL_NAME,
+ POWER_SUPPLY_PROP_MANUFACTURER,
+ POWER_SUPPLY_PROP_SERIAL_NUMBER,
+};
+
+static enum power_supply_property energy_battery_props[] = {
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_TECHNOLOGY,
+ POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
+ POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN,
+ POWER_SUPPLY_PROP_ENERGY_FULL,
+ POWER_SUPPLY_PROP_ENERGY_NOW,
+ POWER_SUPPLY_PROP_MODEL_NAME,
+ POWER_SUPPLY_PROP_MANUFACTURER,
+ POWER_SUPPLY_PROP_SERIAL_NUMBER,
+};
+#endif
+
+#ifdef CONFIG_ACPI_PROCFS_POWER
+inline char *acpi_battery_units(struct acpi_battery *battery)
+{
+ return (battery->power_unit)?"mA":"mW";
+}
+#endif
+
+/* --------------------------------------------------------------------------
+ Battery Management
+ -------------------------------------------------------------------------- */
+struct acpi_offsets {
+ size_t offset; /* offset inside struct acpi_sbs_battery */
+ u8 mode; /* int or string? */
+};
+
+static struct acpi_offsets state_offsets[] = {
+ {offsetof(struct acpi_battery, state), 0},
+ {offsetof(struct acpi_battery, current_now), 0},
+ {offsetof(struct acpi_battery, capacity_now), 0},
+ {offsetof(struct acpi_battery, voltage_now), 0},
+};
+
+static struct acpi_offsets info_offsets[] = {
+ {offsetof(struct acpi_battery, power_unit), 0},
+ {offsetof(struct acpi_battery, design_capacity), 0},
+ {offsetof(struct acpi_battery, full_charge_capacity), 0},
+ {offsetof(struct acpi_battery, technology), 0},
+ {offsetof(struct acpi_battery, design_voltage), 0},
+ {offsetof(struct acpi_battery, design_capacity_warning), 0},
+ {offsetof(struct acpi_battery, design_capacity_low), 0},
+ {offsetof(struct acpi_battery, capacity_granularity_1), 0},
+ {offsetof(struct acpi_battery, capacity_granularity_2), 0},
+ {offsetof(struct acpi_battery, model_number), 1},
+ {offsetof(struct acpi_battery, serial_number), 1},
+ {offsetof(struct acpi_battery, type), 1},
+ {offsetof(struct acpi_battery, oem_info), 1},
+};
+
+static int extract_package(struct acpi_battery *battery,
+ union acpi_object *package,
+ struct acpi_offsets *offsets, int num)
+{
+ int i;
+ union acpi_object *element;
+ if (package->type != ACPI_TYPE_PACKAGE)
+ return -EFAULT;
+ for (i = 0; i < num; ++i) {
+ if (package->package.count <= i)
+ return -EFAULT;
+ element = &package->package.elements[i];
+ if (offsets[i].mode) {
+ u8 *ptr = (u8 *)battery + offsets[i].offset;
+ if (element->type == ACPI_TYPE_STRING ||
+ element->type == ACPI_TYPE_BUFFER)
+ strncpy(ptr, element->string.pointer, 32);
+ else if (element->type == ACPI_TYPE_INTEGER) {
+ strncpy(ptr, (u8 *)&element->integer.value,
+ sizeof(acpi_integer));
+ ptr[sizeof(acpi_integer)] = 0;
+ } else
+ *ptr = 0; /* don't have value */
+ } else {
+ int *x = (int *)((u8 *)battery + offsets[i].offset);
+ *x = (element->type == ACPI_TYPE_INTEGER) ?
+ element->integer.value : -1;
+ }
+ }
+ return 0;
+}
+
+static int acpi_battery_get_status(struct acpi_battery *battery)
+{
+ if (acpi_bus_get_status(battery->device)) {
+ ACPI_EXCEPTION((AE_INFO, AE_ERROR, "Evaluating _STA"));
+ return -ENODEV;
+ }
+ return 0;
+}
+
+static int acpi_battery_get_info(struct acpi_battery *battery)
+{
+ int result = -EFAULT;
+ acpi_status status = 0;
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+
+ if (!acpi_battery_present(battery))
+ return 0;
+ mutex_lock(&battery->lock);
+ status = acpi_evaluate_object(battery->device->handle, "_BIF",
+ NULL, &buffer);
+ mutex_unlock(&battery->lock);
+
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status, "Evaluating _BIF"));
+ return -ENODEV;
+ }
+
+ result = extract_package(battery, buffer.pointer,
+ info_offsets, ARRAY_SIZE(info_offsets));
+ kfree(buffer.pointer);
+ return result;
+}
+
+static int acpi_battery_get_state(struct acpi_battery *battery)
+{
+ int result = 0;
+ acpi_status status = 0;
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+
+ if (!acpi_battery_present(battery))
+ return 0;
+
+ if (battery->update_time &&
+ time_before(jiffies, battery->update_time +
+ msecs_to_jiffies(cache_time)))
+ return 0;
+
+ mutex_lock(&battery->lock);
+ status = acpi_evaluate_object(battery->device->handle, "_BST",
+ NULL, &buffer);
+ mutex_unlock(&battery->lock);
+
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status, "Evaluating _BST"));
+ return -ENODEV;
+ }
+
+ result = extract_package(battery, buffer.pointer,
+ state_offsets, ARRAY_SIZE(state_offsets));
+ battery->update_time = jiffies;
+ kfree(buffer.pointer);
+ return result;
+}
+
+static int acpi_battery_set_alarm(struct acpi_battery *battery)
+{
+ acpi_status status = 0;
+ union acpi_object arg0 = { .type = ACPI_TYPE_INTEGER };
+ struct acpi_object_list arg_list = { 1, &arg0 };
+
+ if (!acpi_battery_present(battery)|| !battery->alarm_present)
+ return -ENODEV;
+
+ arg0.integer.value = battery->alarm;
+
+ mutex_lock(&battery->lock);
+ status = acpi_evaluate_object(battery->device->handle, "_BTP",
+ &arg_list, NULL);
+ mutex_unlock(&battery->lock);
+
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Alarm set to %d\n", battery->alarm));
+ return 0;
+}
+
+static int acpi_battery_init_alarm(struct acpi_battery *battery)
+{
+ acpi_status status = AE_OK;
+ acpi_handle handle = NULL;
+
+ /* See if alarms are supported, and if so, set default */
+ status = acpi_get_handle(battery->device->handle, "_BTP", &handle);
+ if (ACPI_FAILURE(status)) {
+ battery->alarm_present = 0;
+ return 0;
+ }
+ battery->alarm_present = 1;
+ if (!battery->alarm)
+ battery->alarm = battery->design_capacity_warning;
+ return acpi_battery_set_alarm(battery);
+}
+
+#ifdef CONFIG_ACPI_SYSFS_POWER
+static ssize_t acpi_battery_alarm_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct acpi_battery *battery = to_acpi_battery(dev_get_drvdata(dev));
+ return sprintf(buf, "%d\n", battery->alarm * 1000);
+}
+
+static ssize_t acpi_battery_alarm_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ unsigned long x;
+ struct acpi_battery *battery = to_acpi_battery(dev_get_drvdata(dev));
+ if (sscanf(buf, "%ld\n", &x) == 1)
+ battery->alarm = x/1000;
+ if (acpi_battery_present(battery))
+ acpi_battery_set_alarm(battery);
+ return count;
+}
+
+static struct device_attribute alarm_attr = {
+ .attr = {.name = "alarm", .mode = 0644},
+ .show = acpi_battery_alarm_show,
+ .store = acpi_battery_alarm_store,
+};
+
+static int sysfs_add_battery(struct acpi_battery *battery)
+{
+ int result;
+
+ if (battery->power_unit) {
+ battery->bat.properties = charge_battery_props;
+ battery->bat.num_properties =
+ ARRAY_SIZE(charge_battery_props);
+ } else {
+ battery->bat.properties = energy_battery_props;
+ battery->bat.num_properties =
+ ARRAY_SIZE(energy_battery_props);
+ }
+
+ battery->bat.name = acpi_device_bid(battery->device);
+ battery->bat.type = POWER_SUPPLY_TYPE_BATTERY;
+ battery->bat.get_property = acpi_battery_get_property;
+
+ result = power_supply_register(&battery->device->dev, &battery->bat);
+ if (result)
+ return result;
+ return device_create_file(battery->bat.dev, &alarm_attr);
+}
+
+static void sysfs_remove_battery(struct acpi_battery *battery)
+{
+ if (!battery->bat.dev)
+ return;
+ device_remove_file(battery->bat.dev, &alarm_attr);
+ power_supply_unregister(&battery->bat);
+ battery->bat.dev = NULL;
+}
+#endif
+
+static int acpi_battery_update(struct acpi_battery *battery)
+{
+ int result, old_present = acpi_battery_present(battery);
+ result = acpi_battery_get_status(battery);
+ if (result)
+ return result;
+#ifdef CONFIG_ACPI_SYSFS_POWER
+ if (!acpi_battery_present(battery)) {
+ sysfs_remove_battery(battery);
+ battery->update_time = 0;
+ return 0;
+ }
+#endif
+ if (!battery->update_time ||
+ old_present != acpi_battery_present(battery)) {
+ result = acpi_battery_get_info(battery);
+ if (result)
+ return result;
+ acpi_battery_init_alarm(battery);
+ }
+#ifdef CONFIG_ACPI_SYSFS_POWER
+ if (!battery->bat.dev)
+ sysfs_add_battery(battery);
+#endif
+ return acpi_battery_get_state(battery);
+}
+
+/* --------------------------------------------------------------------------
+ FS Interface (/proc)
+ -------------------------------------------------------------------------- */
+
+#ifdef CONFIG_ACPI_PROCFS_POWER
+static struct proc_dir_entry *acpi_battery_dir;
+
+static int acpi_battery_print_info(struct seq_file *seq, int result)
+{
+ struct acpi_battery *battery = seq->private;
+
+ if (result)
+ goto end;
+
+ seq_printf(seq, "present: %s\n",
+ acpi_battery_present(battery)?"yes":"no");
+ if (!acpi_battery_present(battery))
+ goto end;
+ if (battery->design_capacity == ACPI_BATTERY_VALUE_UNKNOWN)
+ seq_printf(seq, "design capacity: unknown\n");
+ else
+ seq_printf(seq, "design capacity: %d %sh\n",
+ battery->design_capacity,
+ acpi_battery_units(battery));
+
+ if (battery->full_charge_capacity == ACPI_BATTERY_VALUE_UNKNOWN)
+ seq_printf(seq, "last full capacity: unknown\n");
+ else
+ seq_printf(seq, "last full capacity: %d %sh\n",
+ battery->full_charge_capacity,
+ acpi_battery_units(battery));
+
+ seq_printf(seq, "battery technology: %srechargeable\n",
+ (!battery->technology)?"non-":"");
+
+ if (battery->design_voltage == ACPI_BATTERY_VALUE_UNKNOWN)
+ seq_printf(seq, "design voltage: unknown\n");
+ else
+ seq_printf(seq, "design voltage: %d mV\n",
+ battery->design_voltage);
+ seq_printf(seq, "design capacity warning: %d %sh\n",
+ battery->design_capacity_warning,
+ acpi_battery_units(battery));
+ seq_printf(seq, "design capacity low: %d %sh\n",
+ battery->design_capacity_low,
+ acpi_battery_units(battery));
+ seq_printf(seq, "capacity granularity 1: %d %sh\n",
+ battery->capacity_granularity_1,
+ acpi_battery_units(battery));
+ seq_printf(seq, "capacity granularity 2: %d %sh\n",
+ battery->capacity_granularity_2,
+ acpi_battery_units(battery));
+ seq_printf(seq, "model number: %s\n", battery->model_number);
+ seq_printf(seq, "serial number: %s\n", battery->serial_number);
+ seq_printf(seq, "battery type: %s\n", battery->type);
+ seq_printf(seq, "OEM info: %s\n", battery->oem_info);
+ end:
+ if (result)
+ seq_printf(seq, "ERROR: Unable to read battery info\n");
+ return result;
+}
+
+static int acpi_battery_print_state(struct seq_file *seq, int result)
+{
+ struct acpi_battery *battery = seq->private;
+
+ if (result)
+ goto end;
+
+ seq_printf(seq, "present: %s\n",
+ acpi_battery_present(battery)?"yes":"no");
+ if (!acpi_battery_present(battery))
+ goto end;
+
+ seq_printf(seq, "capacity state: %s\n",
+ (battery->state & 0x04)?"critical":"ok");
+ if ((battery->state & 0x01) && (battery->state & 0x02))
+ seq_printf(seq,
+ "charging state: charging/discharging\n");
+ else if (battery->state & 0x01)
+ seq_printf(seq, "charging state: discharging\n");
+ else if (battery->state & 0x02)
+ seq_printf(seq, "charging state: charging\n");
+ else
+ seq_printf(seq, "charging state: charged\n");
+
+ if (battery->current_now == ACPI_BATTERY_VALUE_UNKNOWN)
+ seq_printf(seq, "present rate: unknown\n");
+ else
+ seq_printf(seq, "present rate: %d %s\n",
+ battery->current_now, acpi_battery_units(battery));
+
+ if (battery->capacity_now == ACPI_BATTERY_VALUE_UNKNOWN)
+ seq_printf(seq, "remaining capacity: unknown\n");
+ else
+ seq_printf(seq, "remaining capacity: %d %sh\n",
+ battery->capacity_now, acpi_battery_units(battery));
+ if (battery->voltage_now == ACPI_BATTERY_VALUE_UNKNOWN)
+ seq_printf(seq, "present voltage: unknown\n");
+ else
+ seq_printf(seq, "present voltage: %d mV\n",
+ battery->voltage_now);
+ end:
+ if (result)
+ seq_printf(seq, "ERROR: Unable to read battery state\n");
+
+ return result;
+}
+
+static int acpi_battery_print_alarm(struct seq_file *seq, int result)
+{
+ struct acpi_battery *battery = seq->private;
+
+ if (result)
+ goto end;
+
+ if (!acpi_battery_present(battery)) {
+ seq_printf(seq, "present: no\n");
+ goto end;
+ }
+ seq_printf(seq, "alarm: ");
+ if (!battery->alarm)
+ seq_printf(seq, "unsupported\n");
+ else
+ seq_printf(seq, "%u %sh\n", battery->alarm,
+ acpi_battery_units(battery));
+ end:
+ if (result)
+ seq_printf(seq, "ERROR: Unable to read battery alarm\n");
+ return result;
+}
+
+static ssize_t acpi_battery_write_alarm(struct file *file,
+ const char __user * buffer,
+ size_t count, loff_t * ppos)
+{
+ int result = 0;
+ char alarm_string[12] = { '\0' };
+ struct seq_file *m = file->private_data;
+ struct acpi_battery *battery = m->private;
+
+ if (!battery || (count > sizeof(alarm_string) - 1))
+ return -EINVAL;
+ if (!acpi_battery_present(battery)) {
+ result = -ENODEV;
+ goto end;
+ }
+ if (copy_from_user(alarm_string, buffer, count)) {
+ result = -EFAULT;
+ goto end;
+ }
+ alarm_string[count] = '\0';
+ battery->alarm = simple_strtol(alarm_string, NULL, 0);
+ result = acpi_battery_set_alarm(battery);
+ end:
+ if (!result)
+ return count;
+ return result;
+}
+
+typedef int(*print_func)(struct seq_file *seq, int result);
+
+static print_func acpi_print_funcs[ACPI_BATTERY_NUMFILES] = {
+ acpi_battery_print_info,
+ acpi_battery_print_state,
+ acpi_battery_print_alarm,
+};
+
+static int acpi_battery_read(int fid, struct seq_file *seq)
+{
+ struct acpi_battery *battery = seq->private;
+ int result = acpi_battery_update(battery);
+ return acpi_print_funcs[fid](seq, result);
+}
+
+#define DECLARE_FILE_FUNCTIONS(_name) \
+static int acpi_battery_read_##_name(struct seq_file *seq, void *offset) \
+{ \
+ return acpi_battery_read(_name##_tag, seq); \
+} \
+static int acpi_battery_##_name##_open_fs(struct inode *inode, struct file *file) \
+{ \
+ return single_open(file, acpi_battery_read_##_name, PDE(inode)->data); \
+}
+
+DECLARE_FILE_FUNCTIONS(info);
+DECLARE_FILE_FUNCTIONS(state);
+DECLARE_FILE_FUNCTIONS(alarm);
+
+#undef DECLARE_FILE_FUNCTIONS
+
+#define FILE_DESCRIPTION_RO(_name) \
+ { \
+ .name = __stringify(_name), \
+ .mode = S_IRUGO, \
+ .ops = { \
+ .open = acpi_battery_##_name##_open_fs, \
+ .read = seq_read, \
+ .llseek = seq_lseek, \
+ .release = single_release, \
+ .owner = THIS_MODULE, \
+ }, \
+ }
+
+#define FILE_DESCRIPTION_RW(_name) \
+ { \
+ .name = __stringify(_name), \
+ .mode = S_IFREG | S_IRUGO | S_IWUSR, \
+ .ops = { \
+ .open = acpi_battery_##_name##_open_fs, \
+ .read = seq_read, \
+ .llseek = seq_lseek, \
+ .write = acpi_battery_write_##_name, \
+ .release = single_release, \
+ .owner = THIS_MODULE, \
+ }, \
+ }
+
+static struct battery_file {
+ struct file_operations ops;
+ mode_t mode;
+ char *name;
+} acpi_battery_file[] = {
+ FILE_DESCRIPTION_RO(info),
+ FILE_DESCRIPTION_RO(state),
+ FILE_DESCRIPTION_RW(alarm),
+};
+
+#undef FILE_DESCRIPTION_RO
+#undef FILE_DESCRIPTION_RW
+
+static int acpi_battery_add_fs(struct acpi_device *device)
+{
+ struct proc_dir_entry *entry = NULL;
+ int i;
+
+ if (!acpi_device_dir(device)) {
+ acpi_device_dir(device) = proc_mkdir(acpi_device_bid(device),
+ acpi_battery_dir);
+ if (!acpi_device_dir(device))
+ return -ENODEV;
+ acpi_device_dir(device)->owner = THIS_MODULE;
+ }
+
+ for (i = 0; i < ACPI_BATTERY_NUMFILES; ++i) {
+ entry = proc_create_data(acpi_battery_file[i].name,
+ acpi_battery_file[i].mode,
+ acpi_device_dir(device),
+ &acpi_battery_file[i].ops,
+ acpi_driver_data(device));
+ if (!entry)
+ return -ENODEV;
+ }
+ return 0;
+}
+
+static void acpi_battery_remove_fs(struct acpi_device *device)
+{
+ int i;
+ if (!acpi_device_dir(device))
+ return;
+ for (i = 0; i < ACPI_BATTERY_NUMFILES; ++i)
+ remove_proc_entry(acpi_battery_file[i].name,
+ acpi_device_dir(device));
+
+ remove_proc_entry(acpi_device_bid(device), acpi_battery_dir);
+ acpi_device_dir(device) = NULL;
+}
+
+#endif
+
+/* --------------------------------------------------------------------------
+ Driver Interface
+ -------------------------------------------------------------------------- */
+
+static void acpi_battery_notify(acpi_handle handle, u32 event, void *data)
+{
+ struct acpi_battery *battery = data;
+ struct acpi_device *device;
+ if (!battery)
+ return;
+ device = battery->device;
+ acpi_battery_update(battery);
+ acpi_bus_generate_proc_event(device, event,
+ acpi_battery_present(battery));
+ acpi_bus_generate_netlink_event(device->pnp.device_class,
+ dev_name(&device->dev), event,
+ acpi_battery_present(battery));
+#ifdef CONFIG_ACPI_SYSFS_POWER
+ /* acpi_batter_update could remove power_supply object */
+ if (battery->bat.dev)
+ kobject_uevent(&battery->bat.dev->kobj, KOBJ_CHANGE);
+#endif
+}
+
+static int acpi_battery_add(struct acpi_device *device)
+{
+ int result = 0;
+ acpi_status status = 0;
+ struct acpi_battery *battery = NULL;
+ if (!device)
+ return -EINVAL;
+ battery = kzalloc(sizeof(struct acpi_battery), GFP_KERNEL);
+ if (!battery)
+ return -ENOMEM;
+ battery->device = device;
+ strcpy(acpi_device_name(device), ACPI_BATTERY_DEVICE_NAME);
+ strcpy(acpi_device_class(device), ACPI_BATTERY_CLASS);
+ device->driver_data = battery;
+ mutex_init(&battery->lock);
+ acpi_battery_update(battery);
+#ifdef CONFIG_ACPI_PROCFS_POWER
+ result = acpi_battery_add_fs(device);
+ if (result)
+ goto end;
+#endif
+ status = acpi_install_notify_handler(device->handle,
+ ACPI_ALL_NOTIFY,
+ acpi_battery_notify, battery);
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status, "Installing notify handler"));
+ result = -ENODEV;
+ goto end;
+ }
+ printk(KERN_INFO PREFIX "%s Slot [%s] (battery %s)\n",
+ ACPI_BATTERY_DEVICE_NAME, acpi_device_bid(device),
+ device->status.battery_present ? "present" : "absent");
+ end:
+ if (result) {
+#ifdef CONFIG_ACPI_PROCFS_POWER
+ acpi_battery_remove_fs(device);
+#endif
+ kfree(battery);
+ }
+ return result;
+}
+
+static int acpi_battery_remove(struct acpi_device *device, int type)
+{
+ acpi_status status = 0;
+ struct acpi_battery *battery = NULL;
+
+ if (!device || !acpi_driver_data(device))
+ return -EINVAL;
+ battery = acpi_driver_data(device);
+ status = acpi_remove_notify_handler(device->handle,
+ ACPI_ALL_NOTIFY,
+ acpi_battery_notify);
+#ifdef CONFIG_ACPI_PROCFS_POWER
+ acpi_battery_remove_fs(device);
+#endif
+#ifdef CONFIG_ACPI_SYSFS_POWER
+ sysfs_remove_battery(battery);
+#endif
+ mutex_destroy(&battery->lock);
+ kfree(battery);
+ return 0;
+}
+
+/* this is needed to learn about changes made in suspended state */
+static int acpi_battery_resume(struct acpi_device *device)
+{
+ struct acpi_battery *battery;
+ if (!device)
+ return -EINVAL;
+ battery = acpi_driver_data(device);
+ battery->update_time = 0;
+ acpi_battery_update(battery);
+ return 0;
+}
+
+static struct acpi_driver acpi_battery_driver = {
+ .name = "battery",
+ .class = ACPI_BATTERY_CLASS,
+ .ids = battery_device_ids,
+ .ops = {
+ .add = acpi_battery_add,
+ .resume = acpi_battery_resume,
+ .remove = acpi_battery_remove,
+ },
+};
+
+static int __init acpi_battery_init(void)
+{
+ if (acpi_disabled)
+ return -ENODEV;
+#ifdef CONFIG_ACPI_PROCFS_POWER
+ acpi_battery_dir = acpi_lock_battery_dir();
+ if (!acpi_battery_dir)
+ return -ENODEV;
+#endif
+ if (acpi_bus_register_driver(&acpi_battery_driver) < 0) {
+#ifdef CONFIG_ACPI_PROCFS_POWER
+ acpi_unlock_battery_dir(acpi_battery_dir);
+#endif
+ return -ENODEV;
+ }
+ return 0;
+}
+
+static void __exit acpi_battery_exit(void)
+{
+ acpi_bus_unregister_driver(&acpi_battery_driver);
+#ifdef CONFIG_ACPI_PROCFS_POWER
+ acpi_unlock_battery_dir(acpi_battery_dir);
+#endif
+}
+
+module_init(acpi_battery_init);
+module_exit(acpi_battery_exit);
diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
new file mode 100644
index 0000000..09c6980
--- /dev/null
+++ b/drivers/acpi/blacklist.c
@@ -0,0 +1,241 @@
+/*
+ * blacklist.c
+ *
+ * Check to see if the given machine has a known bad ACPI BIOS
+ * or if the BIOS is too old.
+ * Check given machine against acpi_osi_dmi_table[].
+ *
+ * Copyright (C) 2004 Len Brown <len.brown@intel.com>
+ * Copyright (C) 2002 Andy Grover <andrew.grover@intel.com>
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/acpi.h>
+#include <acpi/acpi_bus.h>
+#include <linux/dmi.h>
+
+enum acpi_blacklist_predicates {
+ all_versions,
+ less_than_or_equal,
+ equal,
+ greater_than_or_equal,
+};
+
+struct acpi_blacklist_item {
+ char oem_id[7];
+ char oem_table_id[9];
+ u32 oem_revision;
+ char *table;
+ enum acpi_blacklist_predicates oem_revision_predicate;
+ char *reason;
+ u32 is_critical_error;
+};
+
+static struct dmi_system_id acpi_osi_dmi_table[] __initdata;
+
+/*
+ * POLICY: If *anything* doesn't work, put it on the blacklist.
+ * If they are critical errors, mark it critical, and abort driver load.
+ */
+static struct acpi_blacklist_item acpi_blacklist[] __initdata = {
+ /* Compaq Presario 1700 */
+ {"PTLTD ", " DSDT ", 0x06040000, ACPI_SIG_DSDT, less_than_or_equal,
+ "Multiple problems", 1},
+ /* Sony FX120, FX140, FX150? */
+ {"SONY ", "U0 ", 0x20010313, ACPI_SIG_DSDT, less_than_or_equal,
+ "ACPI driver problem", 1},
+ /* Compaq Presario 800, Insyde BIOS */
+ {"INT440", "SYSFexxx", 0x00001001, ACPI_SIG_DSDT, less_than_or_equal,
+ "Does not use _REG to protect EC OpRegions", 1},
+ /* IBM 600E - _ADR should return 7, but it returns 1 */
+ {"IBM ", "TP600E ", 0x00000105, ACPI_SIG_DSDT, less_than_or_equal,
+ "Incorrect _ADR", 1},
+
+ {""}
+};
+
+#if CONFIG_ACPI_BLACKLIST_YEAR
+
+static int __init blacklist_by_year(void)
+{
+ int year = dmi_get_year(DMI_BIOS_DATE);
+ /* Doesn't exist? Likely an old system */
+ if (year == -1) {
+ printk(KERN_ERR PREFIX "no DMI BIOS year, "
+ "acpi=force is required to enable ACPI\n" );
+ return 1;
+ }
+ /* 0? Likely a buggy new BIOS */
+ if (year == 0) {
+ printk(KERN_ERR PREFIX "DMI BIOS year==0, "
+ "assuming ACPI-capable machine\n" );
+ return 0;
+ }
+ if (year < CONFIG_ACPI_BLACKLIST_YEAR) {
+ printk(KERN_ERR PREFIX "BIOS age (%d) fails cutoff (%d), "
+ "acpi=force is required to enable ACPI\n",
+ year, CONFIG_ACPI_BLACKLIST_YEAR);
+ return 1;
+ }
+ return 0;
+}
+#else
+static inline int blacklist_by_year(void)
+{
+ return 0;
+}
+#endif
+
+int __init acpi_blacklisted(void)
+{
+ int i = 0;
+ int blacklisted = 0;
+ struct acpi_table_header table_header;
+
+ while (acpi_blacklist[i].oem_id[0] != '\0') {
+ if (acpi_get_table_header(acpi_blacklist[i].table, 0, &table_header)) {
+ i++;
+ continue;
+ }
+
+ if (strncmp(acpi_blacklist[i].oem_id, table_header.oem_id, 6)) {
+ i++;
+ continue;
+ }
+
+ if (strncmp
+ (acpi_blacklist[i].oem_table_id, table_header.oem_table_id,
+ 8)) {
+ i++;
+ continue;
+ }
+
+ if ((acpi_blacklist[i].oem_revision_predicate == all_versions)
+ || (acpi_blacklist[i].oem_revision_predicate ==
+ less_than_or_equal
+ && table_header.oem_revision <=
+ acpi_blacklist[i].oem_revision)
+ || (acpi_blacklist[i].oem_revision_predicate ==
+ greater_than_or_equal
+ && table_header.oem_revision >=
+ acpi_blacklist[i].oem_revision)
+ || (acpi_blacklist[i].oem_revision_predicate == equal
+ && table_header.oem_revision ==
+ acpi_blacklist[i].oem_revision)) {
+
+ printk(KERN_ERR PREFIX
+ "Vendor \"%6.6s\" System \"%8.8s\" "
+ "Revision 0x%x has a known ACPI BIOS problem.\n",
+ acpi_blacklist[i].oem_id,
+ acpi_blacklist[i].oem_table_id,
+ acpi_blacklist[i].oem_revision);
+
+ printk(KERN_ERR PREFIX
+ "Reason: %s. This is a %s error\n",
+ acpi_blacklist[i].reason,
+ (acpi_blacklist[i].
+ is_critical_error ? "non-recoverable" :
+ "recoverable"));
+
+ blacklisted = acpi_blacklist[i].is_critical_error;
+ break;
+ } else {
+ i++;
+ }
+ }
+
+ blacklisted += blacklist_by_year();
+
+ dmi_check_system(acpi_osi_dmi_table);
+
+ return blacklisted;
+}
+#ifdef CONFIG_DMI
+static int __init dmi_enable_osi_linux(const struct dmi_system_id *d)
+{
+ acpi_dmi_osi_linux(1, d); /* enable */
+ return 0;
+}
+static int __init dmi_disable_osi_vista(const struct dmi_system_id *d)
+{
+ printk(KERN_NOTICE PREFIX "DMI detected: %s\n", d->ident);
+ acpi_osi_setup("!Windows 2006");
+ return 0;
+}
+
+static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
+ {
+ .callback = dmi_disable_osi_vista,
+ .ident = "Fujitsu Siemens",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "ESPRIMO Mobile V5505"),
+ },
+ },
+
+ /*
+ * BIOS invocation of _OSI(Linux) is almost always a BIOS bug.
+ * Linux ignores it, except for the machines enumerated below.
+ */
+
+ /*
+ * Lenovo has a mix of systems OSI(Linux) situations
+ * and thus we can not wildcard the vendor.
+ *
+ * _OSI(Linux) helps sound
+ * DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad R61"),
+ * DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T61"),
+ * _OSI(Linux) has Linux specific hooks
+ * DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X61"),
+ * _OSI(Linux) is a NOP:
+ * DMI_MATCH(DMI_PRODUCT_VERSION, "3000 N100"),
+ * DMI_MATCH(DMI_PRODUCT_VERSION, "LENOVO3000 V100"),
+ */
+ {
+ .callback = dmi_enable_osi_linux,
+ .ident = "Lenovo ThinkPad R61",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad R61"),
+ },
+ },
+ {
+ .callback = dmi_enable_osi_linux,
+ .ident = "Lenovo ThinkPad T61",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T61"),
+ },
+ },
+ {
+ .callback = dmi_enable_osi_linux,
+ .ident = "Lenovo ThinkPad X61",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X61"),
+ },
+ },
+ {}
+};
+
+#endif /* CONFIG_DMI */
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
new file mode 100644
index 0000000..765fd1c
--- /dev/null
+++ b/drivers/acpi/bus.c
@@ -0,0 +1,878 @@
+/*
+ * acpi_bus.c - ACPI Bus Driver ($Revision: 80 $)
+ *
+ * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/sched.h>
+#include <linux/pm.h>
+#include <linux/device.h>
+#include <linux/proc_fs.h>
+#include <linux/acpi.h>
+#ifdef CONFIG_X86
+#include <asm/mpspec.h>
+#endif
+#include <linux/pci.h>
+#include <acpi/acpi_bus.h>
+#include <acpi/acpi_drivers.h>
+
+#define _COMPONENT ACPI_BUS_COMPONENT
+ACPI_MODULE_NAME("bus");
+
+struct acpi_device *acpi_root;
+struct proc_dir_entry *acpi_root_dir;
+EXPORT_SYMBOL(acpi_root_dir);
+
+#define STRUCT_TO_INT(s) (*((int*)&s))
+
+static int set_power_nocheck(const struct dmi_system_id *id)
+{
+ printk(KERN_NOTICE PREFIX "%s detected - "
+ "disable power check in power transistion\n", id->ident);
+ acpi_power_nocheck = 1;
+ return 0;
+}
+static struct dmi_system_id __cpuinitdata power_nocheck_dmi_table[] = {
+ {
+ set_power_nocheck, "HP Pavilion 05", {
+ DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"),
+ DMI_MATCH(DMI_SYS_VENDOR, "HP Pavilion 05"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "2001211RE101GLEND") }, NULL},
+ {},
+};
+
+
+/* --------------------------------------------------------------------------
+ Device Management
+ -------------------------------------------------------------------------- */
+
+int acpi_bus_get_device(acpi_handle handle, struct acpi_device **device)
+{
+ acpi_status status = AE_OK;
+
+
+ if (!device)
+ return -EINVAL;
+
+ /* TBD: Support fixed-feature devices */
+
+ status = acpi_get_data(handle, acpi_bus_data_handler, (void **)device);
+ if (ACPI_FAILURE(status) || !*device) {
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No context for object [%p]\n",
+ handle));
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+EXPORT_SYMBOL(acpi_bus_get_device);
+
+int acpi_bus_get_status(struct acpi_device *device)
+{
+ acpi_status status = AE_OK;
+ unsigned long long sta = 0;
+
+
+ if (!device)
+ return -EINVAL;
+
+ /*
+ * Evaluate _STA if present.
+ */
+ if (device->flags.dynamic_status) {
+ status =
+ acpi_evaluate_integer(device->handle, "_STA", NULL, &sta);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+ STRUCT_TO_INT(device->status) = (int)sta;
+ }
+
+ /*
+ * According to ACPI spec some device can be present and functional
+ * even if the parent is not present but functional.
+ * In such conditions the child device should not inherit the status
+ * from the parent.
+ */
+ else
+ STRUCT_TO_INT(device->status) =
+ ACPI_STA_DEVICE_PRESENT | ACPI_STA_DEVICE_ENABLED |
+ ACPI_STA_DEVICE_UI | ACPI_STA_DEVICE_FUNCTIONING;
+
+ if (device->status.functional && !device->status.present) {
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device [%s] status [%08x]: "
+ "functional but not present;\n",
+ device->pnp.bus_id,
+ (u32) STRUCT_TO_INT(device->status)));
+ }
+
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device [%s] status [%08x]\n",
+ device->pnp.bus_id,
+ (u32) STRUCT_TO_INT(device->status)));
+
+ return 0;
+}
+
+EXPORT_SYMBOL(acpi_bus_get_status);
+
+void acpi_bus_private_data_handler(acpi_handle handle,
+ u32 function, void *context)
+{
+ return;
+}
+EXPORT_SYMBOL(acpi_bus_private_data_handler);
+
+int acpi_bus_get_private_data(acpi_handle handle, void **data)
+{
+ acpi_status status = AE_OK;
+
+ if (!*data)
+ return -EINVAL;
+
+ status = acpi_get_data(handle, acpi_bus_private_data_handler, data);
+ if (ACPI_FAILURE(status) || !*data) {
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No context for object [%p]\n",
+ handle));
+ return -ENODEV;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(acpi_bus_get_private_data);
+
+/* --------------------------------------------------------------------------
+ Power Management
+ -------------------------------------------------------------------------- */
+
+int acpi_bus_get_power(acpi_handle handle, int *state)
+{
+ int result = 0;
+ acpi_status status = 0;
+ struct acpi_device *device = NULL;
+ unsigned long long psc = 0;
+
+
+ result = acpi_bus_get_device(handle, &device);
+ if (result)
+ return result;
+
+ *state = ACPI_STATE_UNKNOWN;
+
+ if (!device->flags.power_manageable) {
+ /* TBD: Non-recursive algorithm for walking up hierarchy */
+ if (device->parent)
+ *state = device->parent->power.state;
+ else
+ *state = ACPI_STATE_D0;
+ } else {
+ /*
+ * Get the device's power state either directly (via _PSC) or
+ * indirectly (via power resources).
+ */
+ if (device->power.flags.explicit_get) {
+ status = acpi_evaluate_integer(device->handle, "_PSC",
+ NULL, &psc);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+ device->power.state = (int)psc;
+ } else if (device->power.flags.power_resources) {
+ result = acpi_power_get_inferred_state(device);
+ if (result)
+ return result;
+ }
+
+ *state = device->power.state;
+ }
+
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device [%s] power state is D%d\n",
+ device->pnp.bus_id, device->power.state));
+
+ return 0;
+}
+
+EXPORT_SYMBOL(acpi_bus_get_power);
+
+int acpi_bus_set_power(acpi_handle handle, int state)
+{
+ int result = 0;
+ acpi_status status = AE_OK;
+ struct acpi_device *device = NULL;
+ char object_name[5] = { '_', 'P', 'S', '0' + state, '\0' };
+
+
+ result = acpi_bus_get_device(handle, &device);
+ if (result)
+ return result;
+
+ if ((state < ACPI_STATE_D0) || (state > ACPI_STATE_D3))
+ return -EINVAL;
+
+ /* Make sure this is a valid target state */
+
+ if (!device->flags.power_manageable) {
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device `[%s]' is not power manageable\n",
+ kobject_name(&device->dev.kobj)));
+ return -ENODEV;
+ }
+ /*
+ * Get device's current power state
+ */
+ if (!acpi_power_nocheck) {
+ /*
+ * Maybe the incorrect power state is returned on the bogus
+ * bios, which is different with the real power state.
+ * For example: the bios returns D0 state and the real power
+ * state is D3. OS expects to set the device to D0 state. In
+ * such case if OS uses the power state returned by the BIOS,
+ * the device can't be transisted to the correct power state.
+ * So if the acpi_power_nocheck is set, it is unnecessary to
+ * get the power state by calling acpi_bus_get_power.
+ */
+ acpi_bus_get_power(device->handle, &device->power.state);
+ }
+ if ((state == device->power.state) && !device->flags.force_power_state) {
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device is already at D%d\n",
+ state));
+ return 0;
+ }
+
+ if (!device->power.states[state].flags.valid) {
+ printk(KERN_WARNING PREFIX "Device does not support D%d\n", state);
+ return -ENODEV;
+ }
+ if (device->parent && (state < device->parent->power.state)) {
+ printk(KERN_WARNING PREFIX
+ "Cannot set device to a higher-powered"
+ " state than parent\n");
+ return -ENODEV;
+ }
+
+ /*
+ * Transition Power
+ * ----------------
+ * On transitions to a high-powered state we first apply power (via
+ * power resources) then evalute _PSx. Conversly for transitions to
+ * a lower-powered state.
+ */
+ if (state < device->power.state) {
+ if (device->power.flags.power_resources) {
+ result = acpi_power_transition(device, state);
+ if (result)
+ goto end;
+ }
+ if (device->power.states[state].flags.explicit_set) {
+ status = acpi_evaluate_object(device->handle,
+ object_name, NULL, NULL);
+ if (ACPI_FAILURE(status)) {
+ result = -ENODEV;
+ goto end;
+ }
+ }
+ } else {
+ if (device->power.states[state].flags.explicit_set) {
+ status = acpi_evaluate_object(device->handle,
+ object_name, NULL, NULL);
+ if (ACPI_FAILURE(status)) {
+ result = -ENODEV;
+ goto end;
+ }
+ }
+ if (device->power.flags.power_resources) {
+ result = acpi_power_transition(device, state);
+ if (result)
+ goto end;
+ }
+ }
+
+ end:
+ if (result)
+ printk(KERN_WARNING PREFIX
+ "Transitioning device [%s] to D%d\n",
+ device->pnp.bus_id, state);
+ else {
+ device->power.state = state;
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Device [%s] transitioned to D%d\n",
+ device->pnp.bus_id, state));
+ }
+
+ return result;
+}
+
+EXPORT_SYMBOL(acpi_bus_set_power);
+
+bool acpi_bus_power_manageable(acpi_handle handle)
+{
+ struct acpi_device *device;
+ int result;
+
+ result = acpi_bus_get_device(handle, &device);
+ return result ? false : device->flags.power_manageable;
+}
+
+EXPORT_SYMBOL(acpi_bus_power_manageable);
+
+bool acpi_bus_can_wakeup(acpi_handle handle)
+{
+ struct acpi_device *device;
+ int result;
+
+ result = acpi_bus_get_device(handle, &device);
+ return result ? false : device->wakeup.flags.valid;
+}
+
+EXPORT_SYMBOL(acpi_bus_can_wakeup);
+
+/* --------------------------------------------------------------------------
+ Event Management
+ -------------------------------------------------------------------------- */
+
+#ifdef CONFIG_ACPI_PROC_EVENT
+static DEFINE_SPINLOCK(acpi_bus_event_lock);
+
+LIST_HEAD(acpi_bus_event_list);
+DECLARE_WAIT_QUEUE_HEAD(acpi_bus_event_queue);
+
+extern int event_is_open;
+
+int acpi_bus_generate_proc_event4(const char *device_class, const char *bus_id, u8 type, int data)
+{
+ struct acpi_bus_event *event;
+ unsigned long flags = 0;
+
+ /* drop event on the floor if no one's listening */
+ if (!event_is_open)
+ return 0;
+
+ event = kmalloc(sizeof(struct acpi_bus_event), GFP_ATOMIC);
+ if (!event)
+ return -ENOMEM;
+
+ strcpy(event->device_class, device_class);
+ strcpy(event->bus_id, bus_id);
+ event->type = type;
+ event->data = data;
+
+ spin_lock_irqsave(&acpi_bus_event_lock, flags);
+ list_add_tail(&event->node, &acpi_bus_event_list);
+ spin_unlock_irqrestore(&acpi_bus_event_lock, flags);
+
+ wake_up_interruptible(&acpi_bus_event_queue);
+
+ return 0;
+
+}
+
+EXPORT_SYMBOL_GPL(acpi_bus_generate_proc_event4);
+
+int acpi_bus_generate_proc_event(struct acpi_device *device, u8 type, int data)
+{
+ if (!device)
+ return -EINVAL;
+ return acpi_bus_generate_proc_event4(device->pnp.device_class,
+ device->pnp.bus_id, type, data);
+}
+
+EXPORT_SYMBOL(acpi_bus_generate_proc_event);
+
+int acpi_bus_receive_event(struct acpi_bus_event *event)
+{
+ unsigned long flags = 0;
+ struct acpi_bus_event *entry = NULL;
+
+ DECLARE_WAITQUEUE(wait, current);
+
+
+ if (!event)
+ return -EINVAL;
+
+ if (list_empty(&acpi_bus_event_list)) {
+
+ set_current_state(TASK_INTERRUPTIBLE);
+ add_wait_queue(&acpi_bus_event_queue, &wait);
+
+ if (list_empty(&acpi_bus_event_list))
+ schedule();
+
+ remove_wait_queue(&acpi_bus_event_queue, &wait);
+ set_current_state(TASK_RUNNING);
+
+ if (signal_pending(current))
+ return -ERESTARTSYS;
+ }
+
+ spin_lock_irqsave(&acpi_bus_event_lock, flags);
+ if (!list_empty(&acpi_bus_event_list)) {
+ entry = list_entry(acpi_bus_event_list.next,
+ struct acpi_bus_event, node);
+ list_del(&entry->node);
+ }
+ spin_unlock_irqrestore(&acpi_bus_event_lock, flags);
+
+ if (!entry)
+ return -ENODEV;
+
+ memcpy(event, entry, sizeof(struct acpi_bus_event));
+
+ kfree(entry);
+
+ return 0;
+}
+
+#endif /* CONFIG_ACPI_PROC_EVENT */
+
+/* --------------------------------------------------------------------------
+ Notification Handling
+ -------------------------------------------------------------------------- */
+
+static int
+acpi_bus_check_device(struct acpi_device *device, int *status_changed)
+{
+ acpi_status status = 0;
+ struct acpi_device_status old_status;
+
+
+ if (!device)
+ return -EINVAL;
+
+ if (status_changed)
+ *status_changed = 0;
+
+ old_status = device->status;
+
+ /*
+ * Make sure this device's parent is present before we go about
+ * messing with the device.
+ */
+ if (device->parent && !device->parent->status.present) {
+ device->status = device->parent->status;
+ if (STRUCT_TO_INT(old_status) != STRUCT_TO_INT(device->status)) {
+ if (status_changed)
+ *status_changed = 1;
+ }
+ return 0;
+ }
+
+ status = acpi_bus_get_status(device);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+
+ if (STRUCT_TO_INT(old_status) == STRUCT_TO_INT(device->status))
+ return 0;
+
+ if (status_changed)
+ *status_changed = 1;
+
+ /*
+ * Device Insertion/Removal
+ */
+ if ((device->status.present) && !(old_status.present)) {
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device insertion detected\n"));
+ /* TBD: Handle device insertion */
+ } else if (!(device->status.present) && (old_status.present)) {
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device removal detected\n"));
+ /* TBD: Handle device removal */
+ }
+
+ return 0;
+}
+
+static int acpi_bus_check_scope(struct acpi_device *device)
+{
+ int result = 0;
+ int status_changed = 0;
+
+
+ if (!device)
+ return -EINVAL;
+
+ /* Status Change? */
+ result = acpi_bus_check_device(device, &status_changed);
+ if (result)
+ return result;
+
+ if (!status_changed)
+ return 0;
+
+ /*
+ * TBD: Enumerate child devices within this device's scope and
+ * run acpi_bus_check_device()'s on them.
+ */
+
+ return 0;
+}
+
+static BLOCKING_NOTIFIER_HEAD(acpi_bus_notify_list);
+int register_acpi_bus_notifier(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_register(&acpi_bus_notify_list, nb);
+}
+EXPORT_SYMBOL_GPL(register_acpi_bus_notifier);
+
+void unregister_acpi_bus_notifier(struct notifier_block *nb)
+{
+ blocking_notifier_chain_unregister(&acpi_bus_notify_list, nb);
+}
+EXPORT_SYMBOL_GPL(unregister_acpi_bus_notifier);
+
+/**
+ * acpi_bus_notify
+ * ---------------
+ * Callback for all 'system-level' device notifications (values 0x00-0x7F).
+ */
+static void acpi_bus_notify(acpi_handle handle, u32 type, void *data)
+{
+ int result = 0;
+ struct acpi_device *device = NULL;
+
+ blocking_notifier_call_chain(&acpi_bus_notify_list,
+ type, (void *)handle);
+
+ if (acpi_bus_get_device(handle, &device))
+ return;
+
+ switch (type) {
+
+ case ACPI_NOTIFY_BUS_CHECK:
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Received BUS CHECK notification for device [%s]\n",
+ device->pnp.bus_id));
+ result = acpi_bus_check_scope(device);
+ /*
+ * TBD: We'll need to outsource certain events to non-ACPI
+ * drivers via the device manager (device.c).
+ */
+ break;
+
+ case ACPI_NOTIFY_DEVICE_CHECK:
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Received DEVICE CHECK notification for device [%s]\n",
+ device->pnp.bus_id));
+ result = acpi_bus_check_device(device, NULL);
+ /*
+ * TBD: We'll need to outsource certain events to non-ACPI
+ * drivers via the device manager (device.c).
+ */
+ break;
+
+ case ACPI_NOTIFY_DEVICE_WAKE:
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Received DEVICE WAKE notification for device [%s]\n",
+ device->pnp.bus_id));
+ /* TBD */
+ break;
+
+ case ACPI_NOTIFY_EJECT_REQUEST:
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Received EJECT REQUEST notification for device [%s]\n",
+ device->pnp.bus_id));
+ /* TBD */
+ break;
+
+ case ACPI_NOTIFY_DEVICE_CHECK_LIGHT:
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Received DEVICE CHECK LIGHT notification for device [%s]\n",
+ device->pnp.bus_id));
+ /* TBD: Exactly what does 'light' mean? */
+ break;
+
+ case ACPI_NOTIFY_FREQUENCY_MISMATCH:
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Received FREQUENCY MISMATCH notification for device [%s]\n",
+ device->pnp.bus_id));
+ /* TBD */
+ break;
+
+ case ACPI_NOTIFY_BUS_MODE_MISMATCH:
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Received BUS MODE MISMATCH notification for device [%s]\n",
+ device->pnp.bus_id));
+ /* TBD */
+ break;
+
+ case ACPI_NOTIFY_POWER_FAULT:
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Received POWER FAULT notification for device [%s]\n",
+ device->pnp.bus_id));
+ /* TBD */
+ break;
+
+ default:
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Received unknown/unsupported notification [%08x]\n",
+ type));
+ break;
+ }
+
+ return;
+}
+
+/* --------------------------------------------------------------------------
+ Initialization/Cleanup
+ -------------------------------------------------------------------------- */
+
+static int __init acpi_bus_init_irq(void)
+{
+ acpi_status status = AE_OK;
+ union acpi_object arg = { ACPI_TYPE_INTEGER };
+ struct acpi_object_list arg_list = { 1, &arg };
+ char *message = NULL;
+
+
+ /*
+ * Let the system know what interrupt model we are using by
+ * evaluating the \_PIC object, if exists.
+ */
+
+ switch (acpi_irq_model) {
+ case ACPI_IRQ_MODEL_PIC:
+ message = "PIC";
+ break;
+ case ACPI_IRQ_MODEL_IOAPIC:
+ message = "IOAPIC";
+ break;
+ case ACPI_IRQ_MODEL_IOSAPIC:
+ message = "IOSAPIC";
+ break;
+ case ACPI_IRQ_MODEL_PLATFORM:
+ message = "platform specific model";
+ break;
+ default:
+ printk(KERN_WARNING PREFIX "Unknown interrupt routing model\n");
+ return -ENODEV;
+ }
+
+ printk(KERN_INFO PREFIX "Using %s for interrupt routing\n", message);
+
+ arg.integer.value = acpi_irq_model;
+
+ status = acpi_evaluate_object(NULL, "\\_PIC", &arg_list, NULL);
+ if (ACPI_FAILURE(status) && (status != AE_NOT_FOUND)) {
+ ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PIC"));
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+u8 acpi_gbl_permanent_mmap;
+
+
+void __init acpi_early_init(void)
+{
+ acpi_status status = AE_OK;
+
+ if (acpi_disabled)
+ return;
+
+ printk(KERN_INFO PREFIX "Core revision %08x\n", ACPI_CA_VERSION);
+
+ /* enable workarounds, unless strict ACPI spec. compliance */
+ if (!acpi_strict)
+ acpi_gbl_enable_interpreter_slack = TRUE;
+
+ acpi_gbl_permanent_mmap = 1;
+
+ status = acpi_reallocate_root_table();
+ if (ACPI_FAILURE(status)) {
+ printk(KERN_ERR PREFIX
+ "Unable to reallocate ACPI tables\n");
+ goto error0;
+ }
+
+ status = acpi_initialize_subsystem();
+ if (ACPI_FAILURE(status)) {
+ printk(KERN_ERR PREFIX
+ "Unable to initialize the ACPI Interpreter\n");
+ goto error0;
+ }
+
+ status = acpi_load_tables();
+ if (ACPI_FAILURE(status)) {
+ printk(KERN_ERR PREFIX
+ "Unable to load the System Description Tables\n");
+ goto error0;
+ }
+
+#ifdef CONFIG_X86
+ if (!acpi_ioapic) {
+ /* compatible (0) means level (3) */
+ if (!(acpi_sci_flags & ACPI_MADT_TRIGGER_MASK)) {
+ acpi_sci_flags &= ~ACPI_MADT_TRIGGER_MASK;
+ acpi_sci_flags |= ACPI_MADT_TRIGGER_LEVEL;
+ }
+ /* Set PIC-mode SCI trigger type */
+ acpi_pic_sci_set_trigger(acpi_gbl_FADT.sci_interrupt,
+ (acpi_sci_flags & ACPI_MADT_TRIGGER_MASK) >> 2);
+ } else {
+ /*
+ * now that acpi_gbl_FADT is initialized,
+ * update it with result from INT_SRC_OVR parsing
+ */
+ acpi_gbl_FADT.sci_interrupt = acpi_sci_override_gsi;
+ }
+#endif
+
+ status =
+ acpi_enable_subsystem(~
+ (ACPI_NO_HARDWARE_INIT |
+ ACPI_NO_ACPI_ENABLE));
+ if (ACPI_FAILURE(status)) {
+ printk(KERN_ERR PREFIX "Unable to enable ACPI\n");
+ goto error0;
+ }
+
+ return;
+
+ error0:
+ disable_acpi();
+ return;
+}
+
+static int __init acpi_bus_init(void)
+{
+ int result = 0;
+ acpi_status status = AE_OK;
+ extern acpi_status acpi_os_initialize1(void);
+
+
+ status = acpi_os_initialize1();
+
+ status =
+ acpi_enable_subsystem(ACPI_NO_HARDWARE_INIT | ACPI_NO_ACPI_ENABLE);
+ if (ACPI_FAILURE(status)) {
+ printk(KERN_ERR PREFIX
+ "Unable to start the ACPI Interpreter\n");
+ goto error1;
+ }
+
+ if (ACPI_FAILURE(status)) {
+ printk(KERN_ERR PREFIX
+ "Unable to initialize ACPI OS objects\n");
+ goto error1;
+ }
+
+ /*
+ * ACPI 2.0 requires the EC driver to be loaded and work before
+ * the EC device is found in the namespace (i.e. before acpi_initialize_objects()
+ * is called).
+ *
+ * This is accomplished by looking for the ECDT table, and getting
+ * the EC parameters out of that.
+ */
+ status = acpi_ec_ecdt_probe();
+ /* Ignore result. Not having an ECDT is not fatal. */
+
+ status = acpi_initialize_objects(ACPI_FULL_INITIALIZATION);
+ if (ACPI_FAILURE(status)) {
+ printk(KERN_ERR PREFIX "Unable to initialize ACPI objects\n");
+ goto error1;
+ }
+
+ /*
+ * Maybe EC region is required at bus_scan/acpi_get_devices. So it
+ * is necessary to enable it as early as possible.
+ */
+ acpi_boot_ec_enable();
+
+ printk(KERN_INFO PREFIX "Interpreter enabled\n");
+
+ /* Initialize sleep structures */
+ acpi_sleep_init();
+
+ /*
+ * Get the system interrupt model and evaluate \_PIC.
+ */
+ result = acpi_bus_init_irq();
+ if (result)
+ goto error1;
+
+ /*
+ * Register the for all standard device notifications.
+ */
+ status =
+ acpi_install_notify_handler(ACPI_ROOT_OBJECT, ACPI_SYSTEM_NOTIFY,
+ &acpi_bus_notify, NULL);
+ if (ACPI_FAILURE(status)) {
+ printk(KERN_ERR PREFIX
+ "Unable to register for device notifications\n");
+ goto error1;
+ }
+
+ /*
+ * Create the top ACPI proc directory
+ */
+ acpi_root_dir = proc_mkdir(ACPI_BUS_FILE_ROOT, NULL);
+
+ return 0;
+
+ /* Mimic structured exception handling */
+ error1:
+ acpi_terminate();
+ return -ENODEV;
+}
+
+struct kobject *acpi_kobj;
+
+static int __init acpi_init(void)
+{
+ int result = 0;
+
+
+ if (acpi_disabled) {
+ printk(KERN_INFO PREFIX "Interpreter disabled.\n");
+ return -ENODEV;
+ }
+
+ acpi_kobj = kobject_create_and_add("acpi", firmware_kobj);
+ if (!acpi_kobj) {
+ printk(KERN_WARNING "%s: kset create error\n", __func__);
+ acpi_kobj = NULL;
+ }
+
+ result = acpi_bus_init();
+
+ if (!result) {
+ pci_mmcfg_late_init();
+ if (!(pm_flags & PM_APM))
+ pm_flags |= PM_ACPI;
+ else {
+ printk(KERN_INFO PREFIX
+ "APM is already active, exiting\n");
+ disable_acpi();
+ result = -ENODEV;
+ }
+ } else
+ disable_acpi();
+ /*
+ * If the laptop falls into the DMI check table, the power state check
+ * will be disabled in the course of device power transistion.
+ */
+ dmi_check_system(power_nocheck_dmi_table);
+ return result;
+}
+
+subsys_initcall(acpi_init);
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
new file mode 100644
index 0000000..171fd91
--- /dev/null
+++ b/drivers/acpi/button.c
@@ -0,0 +1,549 @@
+/*
+ * acpi_button.c - ACPI Button Driver ($Revision: 30 $)
+ *
+ * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
+ * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/input.h>
+#include <acpi/acpi_bus.h>
+#include <acpi/acpi_drivers.h>
+
+#define ACPI_BUTTON_CLASS "button"
+#define ACPI_BUTTON_FILE_INFO "info"
+#define ACPI_BUTTON_FILE_STATE "state"
+#define ACPI_BUTTON_TYPE_UNKNOWN 0x00
+#define ACPI_BUTTON_NOTIFY_STATUS 0x80
+
+#define ACPI_BUTTON_SUBCLASS_POWER "power"
+#define ACPI_BUTTON_HID_POWER "PNP0C0C"
+#define ACPI_BUTTON_DEVICE_NAME_POWER "Power Button (CM)"
+#define ACPI_BUTTON_DEVICE_NAME_POWERF "Power Button (FF)"
+#define ACPI_BUTTON_TYPE_POWER 0x01
+#define ACPI_BUTTON_TYPE_POWERF 0x02
+
+#define ACPI_BUTTON_SUBCLASS_SLEEP "sleep"
+#define ACPI_BUTTON_HID_SLEEP "PNP0C0E"
+#define ACPI_BUTTON_DEVICE_NAME_SLEEP "Sleep Button (CM)"
+#define ACPI_BUTTON_DEVICE_NAME_SLEEPF "Sleep Button (FF)"
+#define ACPI_BUTTON_TYPE_SLEEP 0x03
+#define ACPI_BUTTON_TYPE_SLEEPF 0x04
+
+#define ACPI_BUTTON_SUBCLASS_LID "lid"
+#define ACPI_BUTTON_HID_LID "PNP0C0D"
+#define ACPI_BUTTON_DEVICE_NAME_LID "Lid Switch"
+#define ACPI_BUTTON_TYPE_LID 0x05
+
+#define _COMPONENT ACPI_BUTTON_COMPONENT
+ACPI_MODULE_NAME("button");
+
+MODULE_AUTHOR("Paul Diefenbaugh");
+MODULE_DESCRIPTION("ACPI Button Driver");
+MODULE_LICENSE("GPL");
+
+static const struct acpi_device_id button_device_ids[] = {
+ {ACPI_BUTTON_HID_LID, 0},
+ {ACPI_BUTTON_HID_SLEEP, 0},
+ {ACPI_BUTTON_HID_SLEEPF, 0},
+ {ACPI_BUTTON_HID_POWER, 0},
+ {ACPI_BUTTON_HID_POWERF, 0},
+ {"", 0},
+};
+MODULE_DEVICE_TABLE(acpi, button_device_ids);
+
+static int acpi_button_add(struct acpi_device *device);
+static int acpi_button_remove(struct acpi_device *device, int type);
+static int acpi_button_resume(struct acpi_device *device);
+static int acpi_button_info_open_fs(struct inode *inode, struct file *file);
+static int acpi_button_state_open_fs(struct inode *inode, struct file *file);
+
+static struct acpi_driver acpi_button_driver = {
+ .name = "button",
+ .class = ACPI_BUTTON_CLASS,
+ .ids = button_device_ids,
+ .ops = {
+ .add = acpi_button_add,
+ .resume = acpi_button_resume,
+ .remove = acpi_button_remove,
+ },
+};
+
+struct acpi_button {
+ struct acpi_device *device; /* Fixed button kludge */
+ unsigned int type;
+ struct input_dev *input;
+ char phys[32]; /* for input device */
+ unsigned long pushed;
+};
+
+static const struct file_operations acpi_button_info_fops = {
+ .owner = THIS_MODULE,
+ .open = acpi_button_info_open_fs,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static const struct file_operations acpi_button_state_fops = {
+ .owner = THIS_MODULE,
+ .open = acpi_button_state_open_fs,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+/* --------------------------------------------------------------------------
+ FS Interface (/proc)
+ -------------------------------------------------------------------------- */
+
+static struct proc_dir_entry *acpi_button_dir;
+
+static int acpi_button_info_seq_show(struct seq_file *seq, void *offset)
+{
+ struct acpi_button *button = seq->private;
+
+ if (!button || !button->device)
+ return 0;
+
+ seq_printf(seq, "type: %s\n",
+ acpi_device_name(button->device));
+
+ return 0;
+}
+
+static int acpi_button_info_open_fs(struct inode *inode, struct file *file)
+{
+ return single_open(file, acpi_button_info_seq_show, PDE(inode)->data);
+}
+
+static int acpi_button_state_seq_show(struct seq_file *seq, void *offset)
+{
+ struct acpi_button *button = seq->private;
+ acpi_status status;
+ unsigned long long state;
+
+ if (!button || !button->device)
+ return 0;
+
+ status = acpi_evaluate_integer(button->device->handle, "_LID", NULL, &state);
+ seq_printf(seq, "state: %s\n",
+ ACPI_FAILURE(status) ? "unsupported" :
+ (state ? "open" : "closed"));
+ return 0;
+}
+
+static int acpi_button_state_open_fs(struct inode *inode, struct file *file)
+{
+ return single_open(file, acpi_button_state_seq_show, PDE(inode)->data);
+}
+
+static struct proc_dir_entry *acpi_power_dir;
+static struct proc_dir_entry *acpi_sleep_dir;
+static struct proc_dir_entry *acpi_lid_dir;
+
+static int acpi_button_add_fs(struct acpi_device *device)
+{
+ struct proc_dir_entry *entry = NULL;
+ struct acpi_button *button;
+
+ if (!device || !acpi_driver_data(device))
+ return -EINVAL;
+
+ button = acpi_driver_data(device);
+
+ switch (button->type) {
+ case ACPI_BUTTON_TYPE_POWER:
+ case ACPI_BUTTON_TYPE_POWERF:
+ if (!acpi_power_dir)
+ acpi_power_dir = proc_mkdir(ACPI_BUTTON_SUBCLASS_POWER,
+ acpi_button_dir);
+ entry = acpi_power_dir;
+ break;
+ case ACPI_BUTTON_TYPE_SLEEP:
+ case ACPI_BUTTON_TYPE_SLEEPF:
+ if (!acpi_sleep_dir)
+ acpi_sleep_dir = proc_mkdir(ACPI_BUTTON_SUBCLASS_SLEEP,
+ acpi_button_dir);
+ entry = acpi_sleep_dir;
+ break;
+ case ACPI_BUTTON_TYPE_LID:
+ if (!acpi_lid_dir)
+ acpi_lid_dir = proc_mkdir(ACPI_BUTTON_SUBCLASS_LID,
+ acpi_button_dir);
+ entry = acpi_lid_dir;
+ break;
+ }
+
+ if (!entry)
+ return -ENODEV;
+ entry->owner = THIS_MODULE;
+
+ acpi_device_dir(device) = proc_mkdir(acpi_device_bid(device), entry);
+ if (!acpi_device_dir(device))
+ return -ENODEV;
+ acpi_device_dir(device)->owner = THIS_MODULE;
+
+ /* 'info' [R] */
+ entry = proc_create_data(ACPI_BUTTON_FILE_INFO,
+ S_IRUGO, acpi_device_dir(device),
+ &acpi_button_info_fops,
+ acpi_driver_data(device));
+ if (!entry)
+ return -ENODEV;
+
+ /* show lid state [R] */
+ if (button->type == ACPI_BUTTON_TYPE_LID) {
+ entry = proc_create_data(ACPI_BUTTON_FILE_STATE,
+ S_IRUGO, acpi_device_dir(device),
+ &acpi_button_state_fops,
+ acpi_driver_data(device));
+ if (!entry)
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int acpi_button_remove_fs(struct acpi_device *device)
+{
+ struct acpi_button *button = acpi_driver_data(device);
+
+ if (acpi_device_dir(device)) {
+ if (button->type == ACPI_BUTTON_TYPE_LID)
+ remove_proc_entry(ACPI_BUTTON_FILE_STATE,
+ acpi_device_dir(device));
+ remove_proc_entry(ACPI_BUTTON_FILE_INFO,
+ acpi_device_dir(device));
+
+ remove_proc_entry(acpi_device_bid(device),
+ acpi_device_dir(device)->parent);
+ acpi_device_dir(device) = NULL;
+ }
+
+ return 0;
+}
+
+/* --------------------------------------------------------------------------
+ Driver Interface
+ -------------------------------------------------------------------------- */
+static int acpi_lid_send_state(struct acpi_button *button)
+{
+ unsigned long long state;
+ acpi_status status;
+
+ status = acpi_evaluate_integer(button->device->handle, "_LID", NULL,
+ &state);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+ /* input layer checks if event is redundant */
+ input_report_switch(button->input, SW_LID, !state);
+ input_sync(button->input);
+ return 0;
+}
+
+static void acpi_button_notify(acpi_handle handle, u32 event, void *data)
+{
+ struct acpi_button *button = data;
+ struct input_dev *input;
+
+ if (!button || !button->device)
+ return;
+
+ switch (event) {
+ case ACPI_BUTTON_NOTIFY_STATUS:
+ input = button->input;
+ if (button->type == ACPI_BUTTON_TYPE_LID) {
+ acpi_lid_send_state(button);
+ } else {
+ int keycode = test_bit(KEY_SLEEP, input->keybit) ?
+ KEY_SLEEP : KEY_POWER;
+
+ input_report_key(input, keycode, 1);
+ input_sync(input);
+ input_report_key(input, keycode, 0);
+ input_sync(input);
+ }
+
+ acpi_bus_generate_proc_event(button->device, event,
+ ++button->pushed);
+ break;
+ default:
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Unsupported event [0x%x]\n", event));
+ break;
+ }
+
+ return;
+}
+
+static acpi_status acpi_button_notify_fixed(void *data)
+{
+ struct acpi_button *button = data;
+
+ if (!button)
+ return AE_BAD_PARAMETER;
+
+ acpi_button_notify(button->device->handle, ACPI_BUTTON_NOTIFY_STATUS, button);
+
+ return AE_OK;
+}
+
+static int acpi_button_install_notify_handlers(struct acpi_button *button)
+{
+ acpi_status status;
+
+ switch (button->type) {
+ case ACPI_BUTTON_TYPE_POWERF:
+ status =
+ acpi_install_fixed_event_handler(ACPI_EVENT_POWER_BUTTON,
+ acpi_button_notify_fixed,
+ button);
+ break;
+ case ACPI_BUTTON_TYPE_SLEEPF:
+ status =
+ acpi_install_fixed_event_handler(ACPI_EVENT_SLEEP_BUTTON,
+ acpi_button_notify_fixed,
+ button);
+ break;
+ default:
+ status = acpi_install_notify_handler(button->device->handle,
+ ACPI_DEVICE_NOTIFY,
+ acpi_button_notify,
+ button);
+ break;
+ }
+
+ return ACPI_FAILURE(status) ? -ENODEV : 0;
+}
+
+static int acpi_button_resume(struct acpi_device *device)
+{
+ struct acpi_button *button;
+ if (!device)
+ return -EINVAL;
+ button = acpi_driver_data(device);
+ if (button && button->type == ACPI_BUTTON_TYPE_LID)
+ return acpi_lid_send_state(button);
+ return 0;
+}
+
+static void acpi_button_remove_notify_handlers(struct acpi_button *button)
+{
+ switch (button->type) {
+ case ACPI_BUTTON_TYPE_POWERF:
+ acpi_remove_fixed_event_handler(ACPI_EVENT_POWER_BUTTON,
+ acpi_button_notify_fixed);
+ break;
+ case ACPI_BUTTON_TYPE_SLEEPF:
+ acpi_remove_fixed_event_handler(ACPI_EVENT_SLEEP_BUTTON,
+ acpi_button_notify_fixed);
+ break;
+ default:
+ acpi_remove_notify_handler(button->device->handle,
+ ACPI_DEVICE_NOTIFY,
+ acpi_button_notify);
+ break;
+ }
+}
+
+static int acpi_button_add(struct acpi_device *device)
+{
+ int error;
+ struct acpi_button *button;
+ struct input_dev *input;
+
+ if (!device)
+ return -EINVAL;
+
+ button = kzalloc(sizeof(struct acpi_button), GFP_KERNEL);
+ if (!button)
+ return -ENOMEM;
+
+ button->device = device;
+ device->driver_data = button;
+
+ button->input = input = input_allocate_device();
+ if (!input) {
+ error = -ENOMEM;
+ goto err_free_button;
+ }
+
+ /*
+ * Determine the button type (via hid), as fixed-feature buttons
+ * need to be handled a bit differently than generic-space.
+ */
+ if (!strcmp(acpi_device_hid(device), ACPI_BUTTON_HID_POWER)) {
+ button->type = ACPI_BUTTON_TYPE_POWER;
+ strcpy(acpi_device_name(device), ACPI_BUTTON_DEVICE_NAME_POWER);
+ sprintf(acpi_device_class(device), "%s/%s",
+ ACPI_BUTTON_CLASS, ACPI_BUTTON_SUBCLASS_POWER);
+ } else if (!strcmp(acpi_device_hid(device), ACPI_BUTTON_HID_POWERF)) {
+ button->type = ACPI_BUTTON_TYPE_POWERF;
+ strcpy(acpi_device_name(device),
+ ACPI_BUTTON_DEVICE_NAME_POWERF);
+ sprintf(acpi_device_class(device), "%s/%s",
+ ACPI_BUTTON_CLASS, ACPI_BUTTON_SUBCLASS_POWER);
+ } else if (!strcmp(acpi_device_hid(device), ACPI_BUTTON_HID_SLEEP)) {
+ button->type = ACPI_BUTTON_TYPE_SLEEP;
+ strcpy(acpi_device_name(device), ACPI_BUTTON_DEVICE_NAME_SLEEP);
+ sprintf(acpi_device_class(device), "%s/%s",
+ ACPI_BUTTON_CLASS, ACPI_BUTTON_SUBCLASS_SLEEP);
+ } else if (!strcmp(acpi_device_hid(device), ACPI_BUTTON_HID_SLEEPF)) {
+ button->type = ACPI_BUTTON_TYPE_SLEEPF;
+ strcpy(acpi_device_name(device),
+ ACPI_BUTTON_DEVICE_NAME_SLEEPF);
+ sprintf(acpi_device_class(device), "%s/%s",
+ ACPI_BUTTON_CLASS, ACPI_BUTTON_SUBCLASS_SLEEP);
+ } else if (!strcmp(acpi_device_hid(device), ACPI_BUTTON_HID_LID)) {
+ button->type = ACPI_BUTTON_TYPE_LID;
+ strcpy(acpi_device_name(device), ACPI_BUTTON_DEVICE_NAME_LID);
+ sprintf(acpi_device_class(device), "%s/%s",
+ ACPI_BUTTON_CLASS, ACPI_BUTTON_SUBCLASS_LID);
+ } else {
+ printk(KERN_ERR PREFIX "Unsupported hid [%s]\n",
+ acpi_device_hid(device));
+ error = -ENODEV;
+ goto err_free_input;
+ }
+
+ error = acpi_button_add_fs(device);
+ if (error)
+ goto err_free_input;
+
+ error = acpi_button_install_notify_handlers(button);
+ if (error)
+ goto err_remove_fs;
+
+ snprintf(button->phys, sizeof(button->phys),
+ "%s/button/input0", acpi_device_hid(device));
+
+ input->name = acpi_device_name(device);
+ input->phys = button->phys;
+ input->id.bustype = BUS_HOST;
+ input->id.product = button->type;
+ input->dev.parent = &device->dev;
+
+ switch (button->type) {
+ case ACPI_BUTTON_TYPE_POWER:
+ case ACPI_BUTTON_TYPE_POWERF:
+ input->evbit[0] = BIT_MASK(EV_KEY);
+ set_bit(KEY_POWER, input->keybit);
+ break;
+
+ case ACPI_BUTTON_TYPE_SLEEP:
+ case ACPI_BUTTON_TYPE_SLEEPF:
+ input->evbit[0] = BIT_MASK(EV_KEY);
+ set_bit(KEY_SLEEP, input->keybit);
+ break;
+
+ case ACPI_BUTTON_TYPE_LID:
+ input->evbit[0] = BIT_MASK(EV_SW);
+ set_bit(SW_LID, input->swbit);
+ break;
+ }
+
+ error = input_register_device(input);
+ if (error)
+ goto err_remove_handlers;
+ if (button->type == ACPI_BUTTON_TYPE_LID)
+ acpi_lid_send_state(button);
+
+ if (device->wakeup.flags.valid) {
+ /* Button's GPE is run-wake GPE */
+ acpi_set_gpe_type(device->wakeup.gpe_device,
+ device->wakeup.gpe_number,
+ ACPI_GPE_TYPE_WAKE_RUN);
+ acpi_enable_gpe(device->wakeup.gpe_device,
+ device->wakeup.gpe_number);
+ device->wakeup.state.enabled = 1;
+ }
+
+ printk(KERN_INFO PREFIX "%s [%s]\n",
+ acpi_device_name(device), acpi_device_bid(device));
+
+ return 0;
+
+ err_remove_handlers:
+ acpi_button_remove_notify_handlers(button);
+ err_remove_fs:
+ acpi_button_remove_fs(device);
+ err_free_input:
+ input_free_device(input);
+ err_free_button:
+ kfree(button);
+ return error;
+}
+
+static int acpi_button_remove(struct acpi_device *device, int type)
+{
+ struct acpi_button *button;
+
+ if (!device || !acpi_driver_data(device))
+ return -EINVAL;
+
+ button = acpi_driver_data(device);
+
+ acpi_button_remove_notify_handlers(button);
+ acpi_button_remove_fs(device);
+ input_unregister_device(button->input);
+ kfree(button);
+
+ return 0;
+}
+
+static int __init acpi_button_init(void)
+{
+ int result;
+
+ acpi_button_dir = proc_mkdir(ACPI_BUTTON_CLASS, acpi_root_dir);
+ if (!acpi_button_dir)
+ return -ENODEV;
+ acpi_button_dir->owner = THIS_MODULE;
+ result = acpi_bus_register_driver(&acpi_button_driver);
+ if (result < 0) {
+ remove_proc_entry(ACPI_BUTTON_CLASS, acpi_root_dir);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static void __exit acpi_button_exit(void)
+{
+ acpi_bus_unregister_driver(&acpi_button_driver);
+
+ if (acpi_power_dir)
+ remove_proc_entry(ACPI_BUTTON_SUBCLASS_POWER, acpi_button_dir);
+ if (acpi_sleep_dir)
+ remove_proc_entry(ACPI_BUTTON_SUBCLASS_SLEEP, acpi_button_dir);
+ if (acpi_lid_dir)
+ remove_proc_entry(ACPI_BUTTON_SUBCLASS_LID, acpi_button_dir);
+ remove_proc_entry(ACPI_BUTTON_CLASS, acpi_root_dir);
+}
+
+module_init(acpi_button_init);
+module_exit(acpi_button_exit);
diff --git a/drivers/acpi/cm_sbs.c b/drivers/acpi/cm_sbs.c
new file mode 100644
index 0000000..307963b
--- /dev/null
+++ b/drivers/acpi/cm_sbs.c
@@ -0,0 +1,106 @@
+/*
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/acpi.h>
+#include <linux/types.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <acpi/acpi_bus.h>
+#include <acpi/acpi_drivers.h>
+#include <acpi/acmacros.h>
+#include <acpi/actypes.h>
+#include <acpi/acutils.h>
+
+ACPI_MODULE_NAME("cm_sbs");
+#define ACPI_AC_CLASS "ac_adapter"
+#define ACPI_BATTERY_CLASS "battery"
+#define _COMPONENT ACPI_SBS_COMPONENT
+static struct proc_dir_entry *acpi_ac_dir;
+static struct proc_dir_entry *acpi_battery_dir;
+
+static DEFINE_MUTEX(cm_sbs_mutex);
+
+static int lock_ac_dir_cnt;
+static int lock_battery_dir_cnt;
+
+struct proc_dir_entry *acpi_lock_ac_dir(void)
+{
+ mutex_lock(&cm_sbs_mutex);
+ if (!acpi_ac_dir)
+ acpi_ac_dir = proc_mkdir(ACPI_AC_CLASS, acpi_root_dir);
+ if (acpi_ac_dir) {
+ lock_ac_dir_cnt++;
+ } else {
+ printk(KERN_ERR PREFIX
+ "Cannot create %s\n", ACPI_AC_CLASS);
+ }
+ mutex_unlock(&cm_sbs_mutex);
+ return acpi_ac_dir;
+}
+EXPORT_SYMBOL(acpi_lock_ac_dir);
+
+void acpi_unlock_ac_dir(struct proc_dir_entry *acpi_ac_dir_param)
+{
+ mutex_lock(&cm_sbs_mutex);
+ if (acpi_ac_dir_param)
+ lock_ac_dir_cnt--;
+ if (lock_ac_dir_cnt == 0 && acpi_ac_dir_param && acpi_ac_dir) {
+ remove_proc_entry(ACPI_AC_CLASS, acpi_root_dir);
+ acpi_ac_dir = NULL;
+ }
+ mutex_unlock(&cm_sbs_mutex);
+}
+EXPORT_SYMBOL(acpi_unlock_ac_dir);
+
+struct proc_dir_entry *acpi_lock_battery_dir(void)
+{
+ mutex_lock(&cm_sbs_mutex);
+ if (!acpi_battery_dir) {
+ acpi_battery_dir =
+ proc_mkdir(ACPI_BATTERY_CLASS, acpi_root_dir);
+ }
+ if (acpi_battery_dir) {
+ lock_battery_dir_cnt++;
+ } else {
+ printk(KERN_ERR PREFIX
+ "Cannot create %s\n", ACPI_BATTERY_CLASS);
+ }
+ mutex_unlock(&cm_sbs_mutex);
+ return acpi_battery_dir;
+}
+EXPORT_SYMBOL(acpi_lock_battery_dir);
+
+void acpi_unlock_battery_dir(struct proc_dir_entry *acpi_battery_dir_param)
+{
+ mutex_lock(&cm_sbs_mutex);
+ if (acpi_battery_dir_param)
+ lock_battery_dir_cnt--;
+ if (lock_battery_dir_cnt == 0 && acpi_battery_dir_param
+ && acpi_battery_dir) {
+ remove_proc_entry(ACPI_BATTERY_CLASS, acpi_root_dir);
+ acpi_battery_dir = NULL;
+ }
+ mutex_unlock(&cm_sbs_mutex);
+ return;
+}
+EXPORT_SYMBOL(acpi_unlock_battery_dir);
diff --git a/drivers/acpi/container.c b/drivers/acpi/container.c
new file mode 100644
index 0000000..17020c1
--- /dev/null
+++ b/drivers/acpi/container.c
@@ -0,0 +1,282 @@
+/*
+ * acpi_container.c - ACPI Generic Container Driver
+ * ($Revision: )
+ *
+ * Copyright (C) 2004 Anil S Keshavamurthy (anil.s.keshavamurthy@intel.com)
+ * Copyright (C) 2004 Keiichiro Tokunaga (tokunaga.keiich@jp.fujitsu.com)
+ * Copyright (C) 2004 Motoyuki Ito (motoyuki@soft.fujitsu.com)
+ * Copyright (C) 2004 Intel Corp.
+ * Copyright (C) 2004 FUJITSU LIMITED
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/acpi.h>
+#include <acpi/acpi_bus.h>
+#include <acpi/acpi_drivers.h>
+#include <acpi/container.h>
+
+#define ACPI_CONTAINER_DEVICE_NAME "ACPI container device"
+#define ACPI_CONTAINER_CLASS "container"
+
+#define INSTALL_NOTIFY_HANDLER 1
+#define UNINSTALL_NOTIFY_HANDLER 2
+
+#define _COMPONENT ACPI_CONTAINER_COMPONENT
+ACPI_MODULE_NAME("container");
+
+MODULE_AUTHOR("Anil S Keshavamurthy");
+MODULE_DESCRIPTION("ACPI container driver");
+MODULE_LICENSE("GPL");
+
+static int acpi_container_add(struct acpi_device *device);
+static int acpi_container_remove(struct acpi_device *device, int type);
+
+static const struct acpi_device_id container_device_ids[] = {
+ {"ACPI0004", 0},
+ {"PNP0A05", 0},
+ {"PNP0A06", 0},
+ {"", 0},
+};
+MODULE_DEVICE_TABLE(acpi, container_device_ids);
+
+static struct acpi_driver acpi_container_driver = {
+ .name = "container",
+ .class = ACPI_CONTAINER_CLASS,
+ .ids = container_device_ids,
+ .ops = {
+ .add = acpi_container_add,
+ .remove = acpi_container_remove,
+ },
+};
+
+/*******************************************************************/
+
+static int is_device_present(acpi_handle handle)
+{
+ acpi_handle temp;
+ acpi_status status;
+ unsigned long long sta;
+
+
+ status = acpi_get_handle(handle, "_STA", &temp);
+ if (ACPI_FAILURE(status))
+ return 1; /* _STA not found, assume device present */
+
+ status = acpi_evaluate_integer(handle, "_STA", NULL, &sta);
+ if (ACPI_FAILURE(status))
+ return 0; /* Firmware error */
+
+ return ((sta & ACPI_STA_DEVICE_PRESENT) == ACPI_STA_DEVICE_PRESENT);
+}
+
+/*******************************************************************/
+static int acpi_container_add(struct acpi_device *device)
+{
+ struct acpi_container *container;
+
+
+ if (!device) {
+ printk(KERN_ERR PREFIX "device is NULL\n");
+ return -EINVAL;
+ }
+
+ container = kzalloc(sizeof(struct acpi_container), GFP_KERNEL);
+ if (!container)
+ return -ENOMEM;
+
+ container->handle = device->handle;
+ strcpy(acpi_device_name(device), ACPI_CONTAINER_DEVICE_NAME);
+ strcpy(acpi_device_class(device), ACPI_CONTAINER_CLASS);
+ device->driver_data = container;
+
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device <%s> bid <%s>\n",
+ acpi_device_name(device), acpi_device_bid(device)));
+
+ return 0;
+}
+
+static int acpi_container_remove(struct acpi_device *device, int type)
+{
+ acpi_status status = AE_OK;
+ struct acpi_container *pc = NULL;
+
+ pc = acpi_driver_data(device);
+ kfree(pc);
+ return status;
+}
+
+static int container_device_add(struct acpi_device **device, acpi_handle handle)
+{
+ acpi_handle phandle;
+ struct acpi_device *pdev;
+ int result;
+
+
+ if (acpi_get_parent(handle, &phandle)) {
+ return -ENODEV;
+ }
+
+ if (acpi_bus_get_device(phandle, &pdev)) {
+ return -ENODEV;
+ }
+
+ if (acpi_bus_add(device, pdev, handle, ACPI_BUS_TYPE_DEVICE)) {
+ return -ENODEV;
+ }
+
+ result = acpi_bus_start(*device);
+
+ return result;
+}
+
+static void container_notify_cb(acpi_handle handle, u32 type, void *context)
+{
+ struct acpi_device *device = NULL;
+ int result;
+ int present;
+ acpi_status status;
+
+
+ present = is_device_present(handle);
+
+ switch (type) {
+ case ACPI_NOTIFY_BUS_CHECK:
+ /* Fall through */
+ case ACPI_NOTIFY_DEVICE_CHECK:
+ printk("Container driver received %s event\n",
+ (type == ACPI_NOTIFY_BUS_CHECK) ?
+ "ACPI_NOTIFY_BUS_CHECK" : "ACPI_NOTIFY_DEVICE_CHECK");
+ status = acpi_bus_get_device(handle, &device);
+ if (present) {
+ if (ACPI_FAILURE(status) || !device) {
+ result = container_device_add(&device, handle);
+ if (!result)
+ kobject_uevent(&device->dev.kobj,
+ KOBJ_ONLINE);
+ else
+ printk("Failed to add container\n");
+ }
+ } else {
+ if (ACPI_SUCCESS(status)) {
+ /* device exist and this is a remove request */
+ kobject_uevent(&device->dev.kobj, KOBJ_OFFLINE);
+ }
+ }
+ break;
+ case ACPI_NOTIFY_EJECT_REQUEST:
+ if (!acpi_bus_get_device(handle, &device) && device) {
+ kobject_uevent(&device->dev.kobj, KOBJ_OFFLINE);
+ }
+ break;
+ default:
+ break;
+ }
+ return;
+}
+
+static acpi_status
+container_walk_namespace_cb(acpi_handle handle,
+ u32 lvl, void *context, void **rv)
+{
+ char *hid = NULL;
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ struct acpi_device_info *info;
+ acpi_status status;
+ int *action = context;
+
+
+ status = acpi_get_object_info(handle, &buffer);
+ if (ACPI_FAILURE(status) || !buffer.pointer) {
+ return AE_OK;
+ }
+
+ info = buffer.pointer;
+ if (info->valid & ACPI_VALID_HID)
+ hid = info->hardware_id.value;
+
+ if (hid == NULL) {
+ goto end;
+ }
+
+ if (strcmp(hid, "ACPI0004") && strcmp(hid, "PNP0A05") &&
+ strcmp(hid, "PNP0A06")) {
+ goto end;
+ }
+
+ switch (*action) {
+ case INSTALL_NOTIFY_HANDLER:
+ acpi_install_notify_handler(handle,
+ ACPI_SYSTEM_NOTIFY,
+ container_notify_cb, NULL);
+ break;
+ case UNINSTALL_NOTIFY_HANDLER:
+ acpi_remove_notify_handler(handle,
+ ACPI_SYSTEM_NOTIFY,
+ container_notify_cb);
+ break;
+ default:
+ break;
+ }
+
+ end:
+ kfree(buffer.pointer);
+
+ return AE_OK;
+}
+
+static int __init acpi_container_init(void)
+{
+ int result = 0;
+ int action = INSTALL_NOTIFY_HANDLER;
+
+ result = acpi_bus_register_driver(&acpi_container_driver);
+ if (result < 0) {
+ return (result);
+ }
+
+ /* register notify handler to every container device */
+ acpi_walk_namespace(ACPI_TYPE_DEVICE,
+ ACPI_ROOT_OBJECT,
+ ACPI_UINT32_MAX,
+ container_walk_namespace_cb, &action, NULL);
+
+ return (0);
+}
+
+static void __exit acpi_container_exit(void)
+{
+ int action = UNINSTALL_NOTIFY_HANDLER;
+
+
+ acpi_walk_namespace(ACPI_TYPE_DEVICE,
+ ACPI_ROOT_OBJECT,
+ ACPI_UINT32_MAX,
+ container_walk_namespace_cb, &action, NULL);
+
+ acpi_bus_unregister_driver(&acpi_container_driver);
+
+ return;
+}
+
+module_init(acpi_container_init);
+module_exit(acpi_container_exit);
diff --git a/drivers/acpi/debug.c b/drivers/acpi/debug.c
new file mode 100644
index 0000000..c483968
--- /dev/null
+++ b/drivers/acpi/debug.c
@@ -0,0 +1,345 @@
+/*
+ * debug.c - ACPI debug interface to userspace.
+ */
+
+#include <linux/proc_fs.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/moduleparam.h>
+#include <asm/uaccess.h>
+#include <acpi/acpi_drivers.h>
+#include <acpi/acglobal.h>
+
+#define _COMPONENT ACPI_SYSTEM_COMPONENT
+ACPI_MODULE_NAME("debug");
+
+#ifdef MODULE_PARAM_PREFIX
+#undef MODULE_PARAM_PREFIX
+#endif
+#define MODULE_PARAM_PREFIX "acpi."
+
+struct acpi_dlayer {
+ const char *name;
+ unsigned long value;
+};
+struct acpi_dlevel {
+ const char *name;
+ unsigned long value;
+};
+#define ACPI_DEBUG_INIT(v) { .name = #v, .value = v }
+
+static const struct acpi_dlayer acpi_debug_layers[] = {
+ ACPI_DEBUG_INIT(ACPI_UTILITIES),
+ ACPI_DEBUG_INIT(ACPI_HARDWARE),
+ ACPI_DEBUG_INIT(ACPI_EVENTS),
+ ACPI_DEBUG_INIT(ACPI_TABLES),
+ ACPI_DEBUG_INIT(ACPI_NAMESPACE),
+ ACPI_DEBUG_INIT(ACPI_PARSER),
+ ACPI_DEBUG_INIT(ACPI_DISPATCHER),
+ ACPI_DEBUG_INIT(ACPI_EXECUTER),
+ ACPI_DEBUG_INIT(ACPI_RESOURCES),
+ ACPI_DEBUG_INIT(ACPI_CA_DEBUGGER),
+ ACPI_DEBUG_INIT(ACPI_OS_SERVICES),
+ ACPI_DEBUG_INIT(ACPI_CA_DISASSEMBLER),
+ ACPI_DEBUG_INIT(ACPI_COMPILER),
+ ACPI_DEBUG_INIT(ACPI_TOOLS),
+
+ ACPI_DEBUG_INIT(ACPI_BUS_COMPONENT),
+ ACPI_DEBUG_INIT(ACPI_AC_COMPONENT),
+ ACPI_DEBUG_INIT(ACPI_BATTERY_COMPONENT),
+ ACPI_DEBUG_INIT(ACPI_BUTTON_COMPONENT),
+ ACPI_DEBUG_INIT(ACPI_SBS_COMPONENT),
+ ACPI_DEBUG_INIT(ACPI_FAN_COMPONENT),
+ ACPI_DEBUG_INIT(ACPI_PCI_COMPONENT),
+ ACPI_DEBUG_INIT(ACPI_POWER_COMPONENT),
+ ACPI_DEBUG_INIT(ACPI_CONTAINER_COMPONENT),
+ ACPI_DEBUG_INIT(ACPI_SYSTEM_COMPONENT),
+ ACPI_DEBUG_INIT(ACPI_THERMAL_COMPONENT),
+ ACPI_DEBUG_INIT(ACPI_MEMORY_DEVICE_COMPONENT),
+ ACPI_DEBUG_INIT(ACPI_VIDEO_COMPONENT),
+ ACPI_DEBUG_INIT(ACPI_PROCESSOR_COMPONENT),
+};
+
+static const struct acpi_dlevel acpi_debug_levels[] = {
+ ACPI_DEBUG_INIT(ACPI_LV_INIT),
+ ACPI_DEBUG_INIT(ACPI_LV_DEBUG_OBJECT),
+ ACPI_DEBUG_INIT(ACPI_LV_INFO),
+
+ ACPI_DEBUG_INIT(ACPI_LV_INIT_NAMES),
+ ACPI_DEBUG_INIT(ACPI_LV_PARSE),
+ ACPI_DEBUG_INIT(ACPI_LV_LOAD),
+ ACPI_DEBUG_INIT(ACPI_LV_DISPATCH),
+ ACPI_DEBUG_INIT(ACPI_LV_EXEC),
+ ACPI_DEBUG_INIT(ACPI_LV_NAMES),
+ ACPI_DEBUG_INIT(ACPI_LV_OPREGION),
+ ACPI_DEBUG_INIT(ACPI_LV_BFIELD),
+ ACPI_DEBUG_INIT(ACPI_LV_TABLES),
+ ACPI_DEBUG_INIT(ACPI_LV_VALUES),
+ ACPI_DEBUG_INIT(ACPI_LV_OBJECTS),
+ ACPI_DEBUG_INIT(ACPI_LV_RESOURCES),
+ ACPI_DEBUG_INIT(ACPI_LV_USER_REQUESTS),
+ ACPI_DEBUG_INIT(ACPI_LV_PACKAGE),
+
+ ACPI_DEBUG_INIT(ACPI_LV_ALLOCATIONS),
+ ACPI_DEBUG_INIT(ACPI_LV_FUNCTIONS),
+ ACPI_DEBUG_INIT(ACPI_LV_OPTIMIZATIONS),
+
+ ACPI_DEBUG_INIT(ACPI_LV_MUTEX),
+ ACPI_DEBUG_INIT(ACPI_LV_THREADS),
+ ACPI_DEBUG_INIT(ACPI_LV_IO),
+ ACPI_DEBUG_INIT(ACPI_LV_INTERRUPTS),
+
+ ACPI_DEBUG_INIT(ACPI_LV_AML_DISASSEMBLE),
+ ACPI_DEBUG_INIT(ACPI_LV_VERBOSE_INFO),
+ ACPI_DEBUG_INIT(ACPI_LV_FULL_TABLES),
+ ACPI_DEBUG_INIT(ACPI_LV_EVENTS),
+};
+
+/* --------------------------------------------------------------------------
+ FS Interface (/sys)
+ -------------------------------------------------------------------------- */
+static int param_get_debug_layer(char *buffer, struct kernel_param *kp) {
+ int result = 0;
+ int i;
+
+ result = sprintf(buffer, "%-25s\tHex SET\n", "Description");
+
+ for(i = 0; i <ARRAY_SIZE(acpi_debug_layers); i++) {
+ result += sprintf(buffer+result, "%-25s\t0x%08lX [%c]\n",
+ acpi_debug_layers[i].name,
+ acpi_debug_layers[i].value,
+ (acpi_dbg_layer & acpi_debug_layers[i].value) ? '*' : ' ');
+ }
+ result += sprintf(buffer+result, "%-25s\t0x%08X [%c]\n", "ACPI_ALL_DRIVERS",
+ ACPI_ALL_DRIVERS,
+ (acpi_dbg_layer & ACPI_ALL_DRIVERS) ==
+ ACPI_ALL_DRIVERS ? '*' : (acpi_dbg_layer &
+ ACPI_ALL_DRIVERS) == 0 ? ' ' : '-');
+ result += sprintf(buffer+result, "--\ndebug_layer = 0x%08X ( * = enabled)\n", acpi_dbg_layer);
+
+ return result;
+}
+
+static int param_get_debug_level(char *buffer, struct kernel_param *kp) {
+ int result = 0;
+ int i;
+
+ result = sprintf(buffer, "%-25s\tHex SET\n", "Description");
+
+ for (i = 0; i < ARRAY_SIZE(acpi_debug_levels); i++) {
+ result += sprintf(buffer+result, "%-25s\t0x%08lX [%c]\n",
+ acpi_debug_levels[i].name,
+ acpi_debug_levels[i].value,
+ (acpi_dbg_level & acpi_debug_levels[i].
+ value) ? '*' : ' ');
+ }
+ result += sprintf(buffer+result, "--\ndebug_level = 0x%08X (* = enabled)\n",
+ acpi_dbg_level);
+
+ return result;
+}
+
+module_param_call(debug_layer, param_set_uint, param_get_debug_layer, &acpi_dbg_layer, 0644);
+module_param_call(debug_level, param_set_uint, param_get_debug_level, &acpi_dbg_level, 0644);
+
+static char trace_method_name[6];
+module_param_string(trace_method_name, trace_method_name, 6, 0644);
+static unsigned int trace_debug_layer;
+module_param(trace_debug_layer, uint, 0644);
+static unsigned int trace_debug_level;
+module_param(trace_debug_level, uint, 0644);
+
+static int param_set_trace_state(const char *val, struct kernel_param *kp)
+{
+ int result = 0;
+
+ if (!strncmp(val, "enable", strlen("enable") - 1)) {
+ result = acpi_debug_trace(trace_method_name, trace_debug_level,
+ trace_debug_layer, 0);
+ if (result)
+ result = -EBUSY;
+ goto exit;
+ }
+
+ if (!strncmp(val, "disable", strlen("disable") - 1)) {
+ int name = 0;
+ result = acpi_debug_trace((char *)&name, trace_debug_level,
+ trace_debug_layer, 0);
+ if (result)
+ result = -EBUSY;
+ goto exit;
+ }
+
+ if (!strncmp(val, "1", 1)) {
+ result = acpi_debug_trace(trace_method_name, trace_debug_level,
+ trace_debug_layer, 1);
+ if (result)
+ result = -EBUSY;
+ goto exit;
+ }
+
+ result = -EINVAL;
+exit:
+ return result;
+}
+
+static int param_get_trace_state(char *buffer, struct kernel_param *kp)
+{
+ if (!acpi_gbl_trace_method_name)
+ return sprintf(buffer, "disable");
+ else {
+ if (acpi_gbl_trace_flags & 1)
+ return sprintf(buffer, "1");
+ else
+ return sprintf(buffer, "enable");
+ }
+ return 0;
+}
+
+module_param_call(trace_state, param_set_trace_state, param_get_trace_state,
+ NULL, 0644);
+
+/* --------------------------------------------------------------------------
+ FS Interface (/proc)
+ -------------------------------------------------------------------------- */
+#ifdef CONFIG_ACPI_PROCFS
+#define ACPI_SYSTEM_FILE_DEBUG_LAYER "debug_layer"
+#define ACPI_SYSTEM_FILE_DEBUG_LEVEL "debug_level"
+
+static int
+acpi_system_read_debug(char *page,
+ char **start, off_t off, int count, int *eof, void *data)
+{
+ char *p = page;
+ int size = 0;
+ unsigned int i;
+
+ if (off != 0)
+ goto end;
+
+ p += sprintf(p, "%-25s\tHex SET\n", "Description");
+
+ switch ((unsigned long)data) {
+ case 0:
+ for (i = 0; i < ARRAY_SIZE(acpi_debug_layers); i++) {
+ p += sprintf(p, "%-25s\t0x%08lX [%c]\n",
+ acpi_debug_layers[i].name,
+ acpi_debug_layers[i].value,
+ (acpi_dbg_layer & acpi_debug_layers[i].
+ value) ? '*' : ' ');
+ }
+ p += sprintf(p, "%-25s\t0x%08X [%c]\n", "ACPI_ALL_DRIVERS",
+ ACPI_ALL_DRIVERS,
+ (acpi_dbg_layer & ACPI_ALL_DRIVERS) ==
+ ACPI_ALL_DRIVERS ? '*' : (acpi_dbg_layer &
+ ACPI_ALL_DRIVERS) ==
+ 0 ? ' ' : '-');
+ p += sprintf(p,
+ "--\ndebug_layer = 0x%08X (* = enabled, - = partial)\n",
+ acpi_dbg_layer);
+ break;
+ case 1:
+ for (i = 0; i < ARRAY_SIZE(acpi_debug_levels); i++) {
+ p += sprintf(p, "%-25s\t0x%08lX [%c]\n",
+ acpi_debug_levels[i].name,
+ acpi_debug_levels[i].value,
+ (acpi_dbg_level & acpi_debug_levels[i].
+ value) ? '*' : ' ');
+ }
+ p += sprintf(p, "--\ndebug_level = 0x%08X (* = enabled)\n",
+ acpi_dbg_level);
+ break;
+ default:
+ p += sprintf(p, "Invalid debug option\n");
+ break;
+ }
+
+ end:
+ size = (p - page);
+ if (size <= off + count)
+ *eof = 1;
+ *start = page + off;
+ size -= off;
+ if (size > count)
+ size = count;
+ if (size < 0)
+ size = 0;
+
+ return size;
+}
+
+static int
+acpi_system_write_debug(struct file *file,
+ const char __user * buffer,
+ unsigned long count, void *data)
+{
+ char debug_string[12] = { '\0' };
+
+
+ if (count > sizeof(debug_string) - 1)
+ return -EINVAL;
+
+ if (copy_from_user(debug_string, buffer, count))
+ return -EFAULT;
+
+ debug_string[count] = '\0';
+
+ switch ((unsigned long)data) {
+ case 0:
+ acpi_dbg_layer = simple_strtoul(debug_string, NULL, 0);
+ break;
+ case 1:
+ acpi_dbg_level = simple_strtoul(debug_string, NULL, 0);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return count;
+}
+
+static int __init acpi_debug_init(void)
+{
+ struct proc_dir_entry *entry;
+ int error = 0;
+ char *name;
+
+
+ if (acpi_disabled)
+ return 0;
+
+ /* 'debug_layer' [R/W] */
+ name = ACPI_SYSTEM_FILE_DEBUG_LAYER;
+ entry =
+ create_proc_read_entry(name, S_IFREG | S_IRUGO | S_IWUSR,
+ acpi_root_dir, acpi_system_read_debug,
+ (void *)0);
+ if (entry)
+ entry->write_proc = acpi_system_write_debug;
+ else
+ goto Error;
+
+ /* 'debug_level' [R/W] */
+ name = ACPI_SYSTEM_FILE_DEBUG_LEVEL;
+ entry =
+ create_proc_read_entry(name, S_IFREG | S_IRUGO | S_IWUSR,
+ acpi_root_dir, acpi_system_read_debug,
+ (void *)1);
+ if (entry)
+ entry->write_proc = acpi_system_write_debug;
+ else
+ goto Error;
+
+ Done:
+ return error;
+
+ Error:
+ remove_proc_entry(ACPI_SYSTEM_FILE_DEBUG_LEVEL, acpi_root_dir);
+ remove_proc_entry(ACPI_SYSTEM_FILE_DEBUG_LAYER, acpi_root_dir);
+ error = -ENODEV;
+ goto Done;
+}
+
+subsys_initcall(acpi_debug_init);
+#endif
diff --git a/drivers/acpi/dispatcher/Makefile b/drivers/acpi/dispatcher/Makefile
new file mode 100644
index 0000000..eb7e602
--- /dev/null
+++ b/drivers/acpi/dispatcher/Makefile
@@ -0,0 +1,9 @@
+#
+# Makefile for all Linux ACPI interpreter subdirectories
+#
+
+obj-y := dsfield.o dsmthdat.o dsopcode.o dswexec.o dswscope.o \
+ dsmethod.o dsobject.o dsutils.o dswload.o dswstate.o \
+ dsinit.o
+
+EXTRA_CFLAGS += $(ACPI_CFLAGS)
diff --git a/drivers/acpi/dispatcher/dsfield.c b/drivers/acpi/dispatcher/dsfield.c
new file mode 100644
index 0000000..f988a5e
--- /dev/null
+++ b/drivers/acpi/dispatcher/dsfield.c
@@ -0,0 +1,649 @@
+/******************************************************************************
+ *
+ * Module Name: dsfield - Dispatcher field routines
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include <acpi/amlcode.h>
+#include <acpi/acdispat.h>
+#include <acpi/acinterp.h>
+#include <acpi/acnamesp.h>
+#include <acpi/acparser.h>
+
+#define _COMPONENT ACPI_DISPATCHER
+ACPI_MODULE_NAME("dsfield")
+
+/* Local prototypes */
+static acpi_status
+acpi_ds_get_field_names(struct acpi_create_field_info *info,
+ struct acpi_walk_state *walk_state,
+ union acpi_parse_object *arg);
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ds_create_buffer_field
+ *
+ * PARAMETERS: Op - Current parse op (create_xXField)
+ * walk_state - Current state
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Execute the create_field operators:
+ * create_bit_field_op,
+ * create_byte_field_op,
+ * create_word_field_op,
+ * create_dword_field_op,
+ * create_qword_field_op,
+ * create_field_op (all of which define a field in a buffer)
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ds_create_buffer_field(union acpi_parse_object *op,
+ struct acpi_walk_state *walk_state)
+{
+ union acpi_parse_object *arg;
+ struct acpi_namespace_node *node;
+ acpi_status status;
+ union acpi_operand_object *obj_desc;
+ union acpi_operand_object *second_desc = NULL;
+ u32 flags;
+
+ ACPI_FUNCTION_TRACE(ds_create_buffer_field);
+
+ /*
+ * Get the name_string argument (name of the new buffer_field)
+ */
+ if (op->common.aml_opcode == AML_CREATE_FIELD_OP) {
+
+ /* For create_field, name is the 4th argument */
+
+ arg = acpi_ps_get_arg(op, 3);
+ } else {
+ /* For all other create_xXXField operators, name is the 3rd argument */
+
+ arg = acpi_ps_get_arg(op, 2);
+ }
+
+ if (!arg) {
+ return_ACPI_STATUS(AE_AML_NO_OPERAND);
+ }
+
+ if (walk_state->deferred_node) {
+ node = walk_state->deferred_node;
+ status = AE_OK;
+ } else {
+ /* Execute flag should always be set when this function is entered */
+
+ if (!(walk_state->parse_flags & ACPI_PARSE_EXECUTE)) {
+ return_ACPI_STATUS(AE_AML_INTERNAL);
+ }
+
+ /* Creating new namespace node, should not already exist */
+
+ flags = ACPI_NS_NO_UPSEARCH | ACPI_NS_DONT_OPEN_SCOPE |
+ ACPI_NS_ERROR_IF_FOUND;
+
+ /* Mark node temporary if we are executing a method */
+
+ if (walk_state->method_node) {
+ flags |= ACPI_NS_TEMPORARY;
+ }
+
+ /* Enter the name_string into the namespace */
+
+ status =
+ acpi_ns_lookup(walk_state->scope_info,
+ arg->common.value.string, ACPI_TYPE_ANY,
+ ACPI_IMODE_LOAD_PASS1, flags, walk_state,
+ &node);
+ if (ACPI_FAILURE(status)) {
+ ACPI_ERROR_NAMESPACE(arg->common.value.string, status);
+ return_ACPI_STATUS(status);
+ }
+ }
+
+ /*
+ * We could put the returned object (Node) on the object stack for later,
+ * but for now, we will put it in the "op" object that the parser uses,
+ * so we can get it again at the end of this scope.
+ */
+ op->common.node = node;
+
+ /*
+ * If there is no object attached to the node, this node was just created
+ * and we need to create the field object. Otherwise, this was a lookup
+ * of an existing node and we don't want to create the field object again.
+ */
+ obj_desc = acpi_ns_get_attached_object(node);
+ if (obj_desc) {
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ /*
+ * The Field definition is not fully parsed at this time.
+ * (We must save the address of the AML for the buffer and index operands)
+ */
+
+ /* Create the buffer field object */
+
+ obj_desc = acpi_ut_create_internal_object(ACPI_TYPE_BUFFER_FIELD);
+ if (!obj_desc) {
+ status = AE_NO_MEMORY;
+ goto cleanup;
+ }
+
+ /*
+ * Remember location in AML stream of the field unit opcode and operands --
+ * since the buffer and index operands must be evaluated.
+ */
+ second_desc = obj_desc->common.next_object;
+ second_desc->extra.aml_start = op->named.data;
+ second_desc->extra.aml_length = op->named.length;
+ obj_desc->buffer_field.node = node;
+
+ /* Attach constructed field descriptors to parent node */
+
+ status = acpi_ns_attach_object(node, obj_desc, ACPI_TYPE_BUFFER_FIELD);
+ if (ACPI_FAILURE(status)) {
+ goto cleanup;
+ }
+
+ cleanup:
+
+ /* Remove local reference to the object */
+
+ acpi_ut_remove_reference(obj_desc);
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ds_get_field_names
+ *
+ * PARAMETERS: Info - create_field info structure
+ * ` walk_state - Current method state
+ * Arg - First parser arg for the field name list
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Process all named fields in a field declaration. Names are
+ * entered into the namespace.
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ds_get_field_names(struct acpi_create_field_info *info,
+ struct acpi_walk_state *walk_state,
+ union acpi_parse_object *arg)
+{
+ acpi_status status;
+ acpi_integer position;
+
+ ACPI_FUNCTION_TRACE_PTR(ds_get_field_names, info);
+
+ /* First field starts at bit zero */
+
+ info->field_bit_position = 0;
+
+ /* Process all elements in the field list (of parse nodes) */
+
+ while (arg) {
+ /*
+ * Three types of field elements are handled:
+ * 1) Offset - specifies a bit offset
+ * 2) access_as - changes the access mode
+ * 3) Name - Enters a new named field into the namespace
+ */
+ switch (arg->common.aml_opcode) {
+ case AML_INT_RESERVEDFIELD_OP:
+
+ position = (acpi_integer) info->field_bit_position
+ + (acpi_integer) arg->common.value.size;
+
+ if (position > ACPI_UINT32_MAX) {
+ ACPI_ERROR((AE_INFO,
+ "Bit offset within field too large (> 0xFFFFFFFF)"));
+ return_ACPI_STATUS(AE_SUPPORT);
+ }
+
+ info->field_bit_position = (u32) position;
+ break;
+
+ case AML_INT_ACCESSFIELD_OP:
+
+ /*
+ * Get a new access_type and access_attribute -- to be used for all
+ * field units that follow, until field end or another access_as
+ * keyword.
+ *
+ * In field_flags, preserve the flag bits other than the
+ * ACCESS_TYPE bits
+ */
+ info->field_flags = (u8)
+ ((info->
+ field_flags & ~(AML_FIELD_ACCESS_TYPE_MASK)) |
+ ((u8) ((u32) arg->common.value.integer >> 8)));
+
+ info->attribute = (u8) (arg->common.value.integer);
+ break;
+
+ case AML_INT_NAMEDFIELD_OP:
+
+ /* Lookup the name, it should already exist */
+
+ status = acpi_ns_lookup(walk_state->scope_info,
+ (char *)&arg->named.name,
+ info->field_type,
+ ACPI_IMODE_EXECUTE,
+ ACPI_NS_DONT_OPEN_SCOPE,
+ walk_state, &info->field_node);
+ if (ACPI_FAILURE(status)) {
+ ACPI_ERROR_NAMESPACE((char *)&arg->named.name,
+ status);
+ return_ACPI_STATUS(status);
+ } else {
+ arg->common.node = info->field_node;
+ info->field_bit_length = arg->common.value.size;
+
+ /*
+ * If there is no object attached to the node, this node was
+ * just created and we need to create the field object.
+ * Otherwise, this was a lookup of an existing node and we
+ * don't want to create the field object again.
+ */
+ if (!acpi_ns_get_attached_object
+ (info->field_node)) {
+ status = acpi_ex_prep_field_value(info);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+ }
+ }
+
+ /* Keep track of bit position for the next field */
+
+ position = (acpi_integer) info->field_bit_position
+ + (acpi_integer) arg->common.value.size;
+
+ if (position > ACPI_UINT32_MAX) {
+ ACPI_ERROR((AE_INFO,
+ "Field [%4.4s] bit offset too large (> 0xFFFFFFFF)",
+ ACPI_CAST_PTR(char,
+ &info->field_node->
+ name)));
+ return_ACPI_STATUS(AE_SUPPORT);
+ }
+
+ info->field_bit_position += info->field_bit_length;
+ break;
+
+ default:
+
+ ACPI_ERROR((AE_INFO,
+ "Invalid opcode in field list: %X",
+ arg->common.aml_opcode));
+ return_ACPI_STATUS(AE_AML_BAD_OPCODE);
+ }
+
+ arg = arg->common.next;
+ }
+
+ return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ds_create_field
+ *
+ * PARAMETERS: Op - Op containing the Field definition and args
+ * region_node - Object for the containing Operation Region
+ * ` walk_state - Current method state
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Create a new field in the specified operation region
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ds_create_field(union acpi_parse_object *op,
+ struct acpi_namespace_node *region_node,
+ struct acpi_walk_state *walk_state)
+{
+ acpi_status status;
+ union acpi_parse_object *arg;
+ struct acpi_create_field_info info;
+
+ ACPI_FUNCTION_TRACE_PTR(ds_create_field, op);
+
+ /* First arg is the name of the parent op_region (must already exist) */
+
+ arg = op->common.value.arg;
+ if (!region_node) {
+ status =
+ acpi_ns_lookup(walk_state->scope_info,
+ arg->common.value.name, ACPI_TYPE_REGION,
+ ACPI_IMODE_EXECUTE, ACPI_NS_SEARCH_PARENT,
+ walk_state, &region_node);
+ if (ACPI_FAILURE(status)) {
+ ACPI_ERROR_NAMESPACE(arg->common.value.name, status);
+ return_ACPI_STATUS(status);
+ }
+ }
+
+ /* Second arg is the field flags */
+
+ arg = arg->common.next;
+ info.field_flags = (u8) arg->common.value.integer;
+ info.attribute = 0;
+
+ /* Each remaining arg is a Named Field */
+
+ info.field_type = ACPI_TYPE_LOCAL_REGION_FIELD;
+ info.region_node = region_node;
+
+ status = acpi_ds_get_field_names(&info, walk_state, arg->common.next);
+
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ds_init_field_objects
+ *
+ * PARAMETERS: Op - Op containing the Field definition and args
+ * ` walk_state - Current method state
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: For each "Field Unit" name in the argument list that is
+ * part of the field declaration, enter the name into the
+ * namespace.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ds_init_field_objects(union acpi_parse_object *op,
+ struct acpi_walk_state *walk_state)
+{
+ acpi_status status;
+ union acpi_parse_object *arg = NULL;
+ struct acpi_namespace_node *node;
+ u8 type = 0;
+ u32 flags;
+
+ ACPI_FUNCTION_TRACE_PTR(ds_init_field_objects, op);
+
+ /* Execute flag should always be set when this function is entered */
+
+ if (!(walk_state->parse_flags & ACPI_PARSE_EXECUTE)) {
+ if (walk_state->parse_flags & ACPI_PARSE_DEFERRED_OP) {
+
+ /* bank_field Op is deferred, just return OK */
+
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ return_ACPI_STATUS(AE_AML_INTERNAL);
+ }
+
+ /*
+ * Get the field_list argument for this opcode. This is the start of the
+ * list of field elements.
+ */
+ switch (walk_state->opcode) {
+ case AML_FIELD_OP:
+ arg = acpi_ps_get_arg(op, 2);
+ type = ACPI_TYPE_LOCAL_REGION_FIELD;
+ break;
+
+ case AML_BANK_FIELD_OP:
+ arg = acpi_ps_get_arg(op, 4);
+ type = ACPI_TYPE_LOCAL_BANK_FIELD;
+ break;
+
+ case AML_INDEX_FIELD_OP:
+ arg = acpi_ps_get_arg(op, 3);
+ type = ACPI_TYPE_LOCAL_INDEX_FIELD;
+ break;
+
+ default:
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
+
+ /* Creating new namespace node(s), should not already exist */
+
+ flags = ACPI_NS_NO_UPSEARCH | ACPI_NS_DONT_OPEN_SCOPE |
+ ACPI_NS_ERROR_IF_FOUND;
+
+ /* Mark node(s) temporary if we are executing a method */
+
+ if (walk_state->method_node) {
+ flags |= ACPI_NS_TEMPORARY;
+ }
+
+ /*
+ * Walk the list of entries in the field_list
+ * Note: field_list can be of zero length. In this case, Arg will be NULL.
+ */
+ while (arg) {
+ /*
+ * Ignore OFFSET and ACCESSAS terms here; we are only interested in the
+ * field names in order to enter them into the namespace.
+ */
+ if (arg->common.aml_opcode == AML_INT_NAMEDFIELD_OP) {
+ status = acpi_ns_lookup(walk_state->scope_info,
+ (char *)&arg->named.name, type,
+ ACPI_IMODE_LOAD_PASS1, flags,
+ walk_state, &node);
+ if (ACPI_FAILURE(status)) {
+ ACPI_ERROR_NAMESPACE((char *)&arg->named.name,
+ status);
+ if (status != AE_ALREADY_EXISTS) {
+ return_ACPI_STATUS(status);
+ }
+
+ /* Name already exists, just ignore this error */
+
+ status = AE_OK;
+ }
+
+ arg->common.node = node;
+ }
+
+ /* Get the next field element in the list */
+
+ arg = arg->common.next;
+ }
+
+ return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ds_create_bank_field
+ *
+ * PARAMETERS: Op - Op containing the Field definition and args
+ * region_node - Object for the containing Operation Region
+ * walk_state - Current method state
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Create a new bank field in the specified operation region
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ds_create_bank_field(union acpi_parse_object *op,
+ struct acpi_namespace_node *region_node,
+ struct acpi_walk_state *walk_state)
+{
+ acpi_status status;
+ union acpi_parse_object *arg;
+ struct acpi_create_field_info info;
+
+ ACPI_FUNCTION_TRACE_PTR(ds_create_bank_field, op);
+
+ /* First arg is the name of the parent op_region (must already exist) */
+
+ arg = op->common.value.arg;
+ if (!region_node) {
+ status =
+ acpi_ns_lookup(walk_state->scope_info,
+ arg->common.value.name, ACPI_TYPE_REGION,
+ ACPI_IMODE_EXECUTE, ACPI_NS_SEARCH_PARENT,
+ walk_state, &region_node);
+ if (ACPI_FAILURE(status)) {
+ ACPI_ERROR_NAMESPACE(arg->common.value.name, status);
+ return_ACPI_STATUS(status);
+ }
+ }
+
+ /* Second arg is the Bank Register (Field) (must already exist) */
+
+ arg = arg->common.next;
+ status =
+ acpi_ns_lookup(walk_state->scope_info, arg->common.value.string,
+ ACPI_TYPE_ANY, ACPI_IMODE_EXECUTE,
+ ACPI_NS_SEARCH_PARENT, walk_state,
+ &info.register_node);
+ if (ACPI_FAILURE(status)) {
+ ACPI_ERROR_NAMESPACE(arg->common.value.string, status);
+ return_ACPI_STATUS(status);
+ }
+
+ /*
+ * Third arg is the bank_value
+ * This arg is a term_arg, not a constant
+ * It will be evaluated later, by acpi_ds_eval_bank_field_operands
+ */
+ arg = arg->common.next;
+
+ /* Fourth arg is the field flags */
+
+ arg = arg->common.next;
+ info.field_flags = (u8) arg->common.value.integer;
+
+ /* Each remaining arg is a Named Field */
+
+ info.field_type = ACPI_TYPE_LOCAL_BANK_FIELD;
+ info.region_node = region_node;
+
+ /*
+ * Use Info.data_register_node to store bank_field Op
+ * It's safe because data_register_node will never be used when create bank field
+ * We store aml_start and aml_length in the bank_field Op for late evaluation
+ * Used in acpi_ex_prep_field_value(Info)
+ *
+ * TBD: Or, should we add a field in struct acpi_create_field_info, like "void *ParentOp"?
+ */
+ info.data_register_node = (struct acpi_namespace_node *)op;
+
+ status = acpi_ds_get_field_names(&info, walk_state, arg->common.next);
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ds_create_index_field
+ *
+ * PARAMETERS: Op - Op containing the Field definition and args
+ * region_node - Object for the containing Operation Region
+ * ` walk_state - Current method state
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Create a new index field in the specified operation region
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ds_create_index_field(union acpi_parse_object *op,
+ struct acpi_namespace_node *region_node,
+ struct acpi_walk_state *walk_state)
+{
+ acpi_status status;
+ union acpi_parse_object *arg;
+ struct acpi_create_field_info info;
+
+ ACPI_FUNCTION_TRACE_PTR(ds_create_index_field, op);
+
+ /* First arg is the name of the Index register (must already exist) */
+
+ arg = op->common.value.arg;
+ status =
+ acpi_ns_lookup(walk_state->scope_info, arg->common.value.string,
+ ACPI_TYPE_ANY, ACPI_IMODE_EXECUTE,
+ ACPI_NS_SEARCH_PARENT, walk_state,
+ &info.register_node);
+ if (ACPI_FAILURE(status)) {
+ ACPI_ERROR_NAMESPACE(arg->common.value.string, status);
+ return_ACPI_STATUS(status);
+ }
+
+ /* Second arg is the data register (must already exist) */
+
+ arg = arg->common.next;
+ status =
+ acpi_ns_lookup(walk_state->scope_info, arg->common.value.string,
+ ACPI_TYPE_ANY, ACPI_IMODE_EXECUTE,
+ ACPI_NS_SEARCH_PARENT, walk_state,
+ &info.data_register_node);
+ if (ACPI_FAILURE(status)) {
+ ACPI_ERROR_NAMESPACE(arg->common.value.string, status);
+ return_ACPI_STATUS(status);
+ }
+
+ /* Next arg is the field flags */
+
+ arg = arg->common.next;
+ info.field_flags = (u8) arg->common.value.integer;
+
+ /* Each remaining arg is a Named Field */
+
+ info.field_type = ACPI_TYPE_LOCAL_INDEX_FIELD;
+ info.region_node = region_node;
+
+ status = acpi_ds_get_field_names(&info, walk_state, arg->common.next);
+
+ return_ACPI_STATUS(status);
+}
diff --git a/drivers/acpi/dispatcher/dsinit.c b/drivers/acpi/dispatcher/dsinit.c
new file mode 100644
index 0000000..949f7c7
--- /dev/null
+++ b/drivers/acpi/dispatcher/dsinit.c
@@ -0,0 +1,204 @@
+/******************************************************************************
+ *
+ * Module Name: dsinit - Object initialization namespace walk
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include <acpi/acdispat.h>
+#include <acpi/acnamesp.h>
+#include <acpi/actables.h>
+
+#define _COMPONENT ACPI_DISPATCHER
+ACPI_MODULE_NAME("dsinit")
+
+/* Local prototypes */
+static acpi_status
+acpi_ds_init_one_object(acpi_handle obj_handle,
+ u32 level, void *context, void **return_value);
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ds_init_one_object
+ *
+ * PARAMETERS: obj_handle - Node for the object
+ * Level - Current nesting level
+ * Context - Points to a init info struct
+ * return_value - Not used
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Callback from acpi_walk_namespace. Invoked for every object
+ * within the namespace.
+ *
+ * Currently, the only objects that require initialization are:
+ * 1) Methods
+ * 2) Operation Regions
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ds_init_one_object(acpi_handle obj_handle,
+ u32 level, void *context, void **return_value)
+{
+ struct acpi_init_walk_info *info =
+ (struct acpi_init_walk_info *)context;
+ struct acpi_namespace_node *node =
+ (struct acpi_namespace_node *)obj_handle;
+ acpi_object_type type;
+ acpi_status status;
+
+ ACPI_FUNCTION_ENTRY();
+
+ /*
+ * We are only interested in NS nodes owned by the table that
+ * was just loaded
+ */
+ if (node->owner_id != info->owner_id) {
+ return (AE_OK);
+ }
+
+ info->object_count++;
+
+ /* And even then, we are only interested in a few object types */
+
+ type = acpi_ns_get_type(obj_handle);
+
+ switch (type) {
+ case ACPI_TYPE_REGION:
+
+ status = acpi_ds_initialize_region(obj_handle);
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status,
+ "During Region initialization %p [%4.4s]",
+ obj_handle,
+ acpi_ut_get_node_name(obj_handle)));
+ }
+
+ info->op_region_count++;
+ break;
+
+ case ACPI_TYPE_METHOD:
+
+ info->method_count++;
+ break;
+
+ case ACPI_TYPE_DEVICE:
+
+ info->device_count++;
+ break;
+
+ default:
+ break;
+ }
+
+ /*
+ * We ignore errors from above, and always return OK, since
+ * we don't want to abort the walk on a single error.
+ */
+ return (AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ds_initialize_objects
+ *
+ * PARAMETERS: table_desc - Descriptor for parent ACPI table
+ * start_node - Root of subtree to be initialized.
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Walk the namespace starting at "StartNode" and perform any
+ * necessary initialization on the objects found therein
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ds_initialize_objects(u32 table_index,
+ struct acpi_namespace_node * start_node)
+{
+ acpi_status status;
+ struct acpi_init_walk_info info;
+ struct acpi_table_header *table;
+ acpi_owner_id owner_id;
+
+ ACPI_FUNCTION_TRACE(ds_initialize_objects);
+
+ status = acpi_tb_get_owner_id(table_index, &owner_id);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
+ "**** Starting initialization of namespace objects ****\n"));
+ ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT, "Parsing all Control Methods:"));
+
+ info.method_count = 0;
+ info.op_region_count = 0;
+ info.object_count = 0;
+ info.device_count = 0;
+ info.table_index = table_index;
+ info.owner_id = owner_id;
+
+ /* Walk entire namespace from the supplied root */
+
+ status = acpi_walk_namespace(ACPI_TYPE_ANY, start_node, ACPI_UINT32_MAX,
+ acpi_ds_init_one_object, &info, NULL);
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status, "During WalkNamespace"));
+ }
+
+ status = acpi_get_table_by_index(table_index, &table);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT,
+ "\nTable [%4.4s](id %4.4X) - %hd Objects with %hd Devices %hd Methods %hd Regions\n",
+ table->signature, owner_id, info.object_count,
+ info.device_count, info.method_count,
+ info.op_region_count));
+
+ ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
+ "%hd Methods, %hd Regions\n", info.method_count,
+ info.op_region_count));
+
+ return_ACPI_STATUS(AE_OK);
+}
diff --git a/drivers/acpi/dispatcher/dsmethod.c b/drivers/acpi/dispatcher/dsmethod.c
new file mode 100644
index 0000000..279a5a6
--- /dev/null
+++ b/drivers/acpi/dispatcher/dsmethod.c
@@ -0,0 +1,623 @@
+/******************************************************************************
+ *
+ * Module Name: dsmethod - Parser/Interpreter interface - control method parsing
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include <acpi/amlcode.h>
+#include <acpi/acdispat.h>
+#include <acpi/acinterp.h>
+#include <acpi/acnamesp.h>
+#include <acpi/acdisasm.h>
+
+#define _COMPONENT ACPI_DISPATCHER
+ACPI_MODULE_NAME("dsmethod")
+
+/* Local prototypes */
+static acpi_status
+acpi_ds_create_method_mutex(union acpi_operand_object *method_desc);
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ds_method_error
+ *
+ * PARAMETERS: Status - Execution status
+ * walk_state - Current state
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Called on method error. Invoke the global exception handler if
+ * present, dump the method data if the disassembler is configured
+ *
+ * Note: Allows the exception handler to change the status code
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ds_method_error(acpi_status status, struct acpi_walk_state *walk_state)
+{
+ ACPI_FUNCTION_ENTRY();
+
+ /* Ignore AE_OK and control exception codes */
+
+ if (ACPI_SUCCESS(status) || (status & AE_CODE_CONTROL)) {
+ return (status);
+ }
+
+ /* Invoke the global exception handler */
+
+ if (acpi_gbl_exception_handler) {
+
+ /* Exit the interpreter, allow handler to execute methods */
+
+ acpi_ex_exit_interpreter();
+
+ /*
+ * Handler can map the exception code to anything it wants, including
+ * AE_OK, in which case the executing method will not be aborted.
+ */
+ status = acpi_gbl_exception_handler(status,
+ walk_state->method_node ?
+ walk_state->method_node->
+ name.integer : 0,
+ walk_state->opcode,
+ walk_state->aml_offset,
+ NULL);
+ acpi_ex_enter_interpreter();
+ }
+
+ acpi_ds_clear_implicit_return(walk_state);
+
+#ifdef ACPI_DISASSEMBLER
+ if (ACPI_FAILURE(status)) {
+
+ /* Display method locals/args if disassembler is present */
+
+ acpi_dm_dump_method_info(status, walk_state, walk_state->op);
+ }
+#endif
+
+ return (status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ds_create_method_mutex
+ *
+ * PARAMETERS: obj_desc - The method object
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Create a mutex object for a serialized control method
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ds_create_method_mutex(union acpi_operand_object *method_desc)
+{
+ union acpi_operand_object *mutex_desc;
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(ds_create_method_mutex);
+
+ /* Create the new mutex object */
+
+ mutex_desc = acpi_ut_create_internal_object(ACPI_TYPE_MUTEX);
+ if (!mutex_desc) {
+ return_ACPI_STATUS(AE_NO_MEMORY);
+ }
+
+ /* Create the actual OS Mutex */
+
+ status = acpi_os_create_mutex(&mutex_desc->mutex.os_mutex);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ mutex_desc->mutex.sync_level = method_desc->method.sync_level;
+ method_desc->method.mutex = mutex_desc;
+ return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ds_begin_method_execution
+ *
+ * PARAMETERS: method_node - Node of the method
+ * obj_desc - The method object
+ * walk_state - current state, NULL if not yet executing
+ * a method.
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Prepare a method for execution. Parses the method if necessary,
+ * increments the thread count, and waits at the method semaphore
+ * for clearance to execute.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ds_begin_method_execution(struct acpi_namespace_node *method_node,
+ union acpi_operand_object *obj_desc,
+ struct acpi_walk_state *walk_state)
+{
+ acpi_status status = AE_OK;
+
+ ACPI_FUNCTION_TRACE_PTR(ds_begin_method_execution, method_node);
+
+ if (!method_node) {
+ return_ACPI_STATUS(AE_NULL_ENTRY);
+ }
+
+ /* Prevent wraparound of thread count */
+
+ if (obj_desc->method.thread_count == ACPI_UINT8_MAX) {
+ ACPI_ERROR((AE_INFO,
+ "Method reached maximum reentrancy limit (255)"));
+ return_ACPI_STATUS(AE_AML_METHOD_LIMIT);
+ }
+
+ /*
+ * If this method is serialized, we need to acquire the method mutex.
+ */
+ if (obj_desc->method.method_flags & AML_METHOD_SERIALIZED) {
+ /*
+ * Create a mutex for the method if it is defined to be Serialized
+ * and a mutex has not already been created. We defer the mutex creation
+ * until a method is actually executed, to minimize the object count
+ */
+ if (!obj_desc->method.mutex) {
+ status = acpi_ds_create_method_mutex(obj_desc);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+ }
+
+ /*
+ * The current_sync_level (per-thread) must be less than or equal to
+ * the sync level of the method. This mechanism provides some
+ * deadlock prevention
+ *
+ * Top-level method invocation has no walk state at this point
+ */
+ if (walk_state &&
+ (walk_state->thread->current_sync_level >
+ obj_desc->method.mutex->mutex.sync_level)) {
+ ACPI_ERROR((AE_INFO,
+ "Cannot acquire Mutex for method [%4.4s], current SyncLevel is too large (%d)",
+ acpi_ut_get_node_name(method_node),
+ walk_state->thread->current_sync_level));
+
+ return_ACPI_STATUS(AE_AML_MUTEX_ORDER);
+ }
+
+ /*
+ * Obtain the method mutex if necessary. Do not acquire mutex for a
+ * recursive call.
+ */
+ if (!walk_state ||
+ !obj_desc->method.mutex->mutex.thread_id ||
+ (walk_state->thread->thread_id !=
+ obj_desc->method.mutex->mutex.thread_id)) {
+ /*
+ * Acquire the method mutex. This releases the interpreter if we
+ * block (and reacquires it before it returns)
+ */
+ status =
+ acpi_ex_system_wait_mutex(obj_desc->method.mutex->
+ mutex.os_mutex,
+ ACPI_WAIT_FOREVER);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /* Update the mutex and walk info and save the original sync_level */
+
+ if (walk_state) {
+ obj_desc->method.mutex->mutex.
+ original_sync_level =
+ walk_state->thread->current_sync_level;
+
+ obj_desc->method.mutex->mutex.thread_id =
+ walk_state->thread->thread_id;
+ walk_state->thread->current_sync_level =
+ obj_desc->method.sync_level;
+ } else {
+ obj_desc->method.mutex->mutex.
+ original_sync_level =
+ obj_desc->method.mutex->mutex.sync_level;
+ }
+ }
+
+ /* Always increase acquisition depth */
+
+ obj_desc->method.mutex->mutex.acquisition_depth++;
+ }
+
+ /*
+ * Allocate an Owner ID for this method, only if this is the first thread
+ * to begin concurrent execution. We only need one owner_id, even if the
+ * method is invoked recursively.
+ */
+ if (!obj_desc->method.owner_id) {
+ status = acpi_ut_allocate_owner_id(&obj_desc->method.owner_id);
+ if (ACPI_FAILURE(status)) {
+ goto cleanup;
+ }
+ }
+
+ /*
+ * Increment the method parse tree thread count since it has been
+ * reentered one more time (even if it is the same thread)
+ */
+ obj_desc->method.thread_count++;
+ return_ACPI_STATUS(status);
+
+ cleanup:
+ /* On error, must release the method mutex (if present) */
+
+ if (obj_desc->method.mutex) {
+ acpi_os_release_mutex(obj_desc->method.mutex->mutex.os_mutex);
+ }
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ds_call_control_method
+ *
+ * PARAMETERS: Thread - Info for this thread
+ * this_walk_state - Current walk state
+ * Op - Current Op to be walked
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Transfer execution to a called control method
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ds_call_control_method(struct acpi_thread_state *thread,
+ struct acpi_walk_state *this_walk_state,
+ union acpi_parse_object *op)
+{
+ acpi_status status;
+ struct acpi_namespace_node *method_node;
+ struct acpi_walk_state *next_walk_state = NULL;
+ union acpi_operand_object *obj_desc;
+ struct acpi_evaluate_info *info;
+ u32 i;
+
+ ACPI_FUNCTION_TRACE_PTR(ds_call_control_method, this_walk_state);
+
+ ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
+ "Calling method %p, currentstate=%p\n",
+ this_walk_state->prev_op, this_walk_state));
+
+ /*
+ * Get the namespace entry for the control method we are about to call
+ */
+ method_node = this_walk_state->method_call_node;
+ if (!method_node) {
+ return_ACPI_STATUS(AE_NULL_ENTRY);
+ }
+
+ obj_desc = acpi_ns_get_attached_object(method_node);
+ if (!obj_desc) {
+ return_ACPI_STATUS(AE_NULL_OBJECT);
+ }
+
+ /* Init for new method, possibly wait on method mutex */
+
+ status = acpi_ds_begin_method_execution(method_node, obj_desc,
+ this_walk_state);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /* Begin method parse/execution. Create a new walk state */
+
+ next_walk_state = acpi_ds_create_walk_state(obj_desc->method.owner_id,
+ NULL, obj_desc, thread);
+ if (!next_walk_state) {
+ status = AE_NO_MEMORY;
+ goto cleanup;
+ }
+
+ /*
+ * The resolved arguments were put on the previous walk state's operand
+ * stack. Operands on the previous walk state stack always
+ * start at index 0. Also, null terminate the list of arguments
+ */
+ this_walk_state->operands[this_walk_state->num_operands] = NULL;
+
+ /*
+ * Allocate and initialize the evaluation information block
+ * TBD: this is somewhat inefficient, should change interface to
+ * ds_init_aml_walk. For now, keeps this struct off the CPU stack
+ */
+ info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info));
+ if (!info) {
+ return_ACPI_STATUS(AE_NO_MEMORY);
+ }
+
+ info->parameters = &this_walk_state->operands[0];
+
+ status = acpi_ds_init_aml_walk(next_walk_state, NULL, method_node,
+ obj_desc->method.aml_start,
+ obj_desc->method.aml_length, info,
+ ACPI_IMODE_EXECUTE);
+
+ ACPI_FREE(info);
+ if (ACPI_FAILURE(status)) {
+ goto cleanup;
+ }
+
+ /*
+ * Delete the operands on the previous walkstate operand stack
+ * (they were copied to new objects)
+ */
+ for (i = 0; i < obj_desc->method.param_count; i++) {
+ acpi_ut_remove_reference(this_walk_state->operands[i]);
+ this_walk_state->operands[i] = NULL;
+ }
+
+ /* Clear the operand stack */
+
+ this_walk_state->num_operands = 0;
+
+ ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
+ "**** Begin nested execution of [%4.4s] **** WalkState=%p\n",
+ method_node->name.ascii, next_walk_state));
+
+ /* Invoke an internal method if necessary */
+
+ if (obj_desc->method.method_flags & AML_METHOD_INTERNAL_ONLY) {
+ status = obj_desc->method.implementation(next_walk_state);
+ }
+
+ return_ACPI_STATUS(status);
+
+ cleanup:
+
+ /* On error, we must terminate the method properly */
+
+ acpi_ds_terminate_control_method(obj_desc, next_walk_state);
+ if (next_walk_state) {
+ acpi_ds_delete_walk_state(next_walk_state);
+ }
+
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ds_restart_control_method
+ *
+ * PARAMETERS: walk_state - State for preempted method (caller)
+ * return_desc - Return value from the called method
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Restart a method that was preempted by another (nested) method
+ * invocation. Handle the return value (if any) from the callee.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ds_restart_control_method(struct acpi_walk_state *walk_state,
+ union acpi_operand_object *return_desc)
+{
+ acpi_status status;
+ int same_as_implicit_return;
+
+ ACPI_FUNCTION_TRACE_PTR(ds_restart_control_method, walk_state);
+
+ ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
+ "****Restart [%4.4s] Op %p ReturnValueFromCallee %p\n",
+ acpi_ut_get_node_name(walk_state->method_node),
+ walk_state->method_call_op, return_desc));
+
+ ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
+ " ReturnFromThisMethodUsed?=%X ResStack %p Walk %p\n",
+ walk_state->return_used,
+ walk_state->results, walk_state));
+
+ /* Did the called method return a value? */
+
+ if (return_desc) {
+
+ /* Is the implicit return object the same as the return desc? */
+
+ same_as_implicit_return =
+ (walk_state->implicit_return_obj == return_desc);
+
+ /* Are we actually going to use the return value? */
+
+ if (walk_state->return_used) {
+
+ /* Save the return value from the previous method */
+
+ status = acpi_ds_result_push(return_desc, walk_state);
+ if (ACPI_FAILURE(status)) {
+ acpi_ut_remove_reference(return_desc);
+ return_ACPI_STATUS(status);
+ }
+
+ /*
+ * Save as THIS method's return value in case it is returned
+ * immediately to yet another method
+ */
+ walk_state->return_desc = return_desc;
+ }
+
+ /*
+ * The following code is the optional support for the so-called
+ * "implicit return". Some AML code assumes that the last value of the
+ * method is "implicitly" returned to the caller, in the absence of an
+ * explicit return value.
+ *
+ * Just save the last result of the method as the return value.
+ *
+ * NOTE: this is optional because the ASL language does not actually
+ * support this behavior.
+ */
+ else if (!acpi_ds_do_implicit_return
+ (return_desc, walk_state, FALSE)
+ || same_as_implicit_return) {
+ /*
+ * Delete the return value if it will not be used by the
+ * calling method or remove one reference if the explicit return
+ * is the same as the implicit return value.
+ */
+ acpi_ut_remove_reference(return_desc);
+ }
+ }
+
+ return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ds_terminate_control_method
+ *
+ * PARAMETERS: method_desc - Method object
+ * walk_state - State associated with the method
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Terminate a control method. Delete everything that the method
+ * created, delete all locals and arguments, and delete the parse
+ * tree if requested.
+ *
+ * MUTEX: Interpreter is locked
+ *
+ ******************************************************************************/
+
+void
+acpi_ds_terminate_control_method(union acpi_operand_object *method_desc,
+ struct acpi_walk_state *walk_state)
+{
+
+ ACPI_FUNCTION_TRACE_PTR(ds_terminate_control_method, walk_state);
+
+ /* method_desc is required, walk_state is optional */
+
+ if (!method_desc) {
+ return_VOID;
+ }
+
+ if (walk_state) {
+
+ /* Delete all arguments and locals */
+
+ acpi_ds_method_data_delete_all(walk_state);
+
+ /*
+ * If method is serialized, release the mutex and restore the
+ * current sync level for this thread
+ */
+ if (method_desc->method.mutex) {
+
+ /* Acquisition Depth handles recursive calls */
+
+ method_desc->method.mutex->mutex.acquisition_depth--;
+ if (!method_desc->method.mutex->mutex.acquisition_depth) {
+ walk_state->thread->current_sync_level =
+ method_desc->method.mutex->mutex.
+ original_sync_level;
+
+ acpi_os_release_mutex(method_desc->method.
+ mutex->mutex.os_mutex);
+ method_desc->method.mutex->mutex.thread_id = NULL;
+ }
+ }
+
+ /*
+ * Delete any namespace objects created anywhere within
+ * the namespace by the execution of this method
+ */
+ acpi_ns_delete_namespace_by_owner(method_desc->method.owner_id);
+ }
+
+ /* Decrement the thread count on the method */
+
+ if (method_desc->method.thread_count) {
+ method_desc->method.thread_count--;
+ } else {
+ ACPI_ERROR((AE_INFO, "Invalid zero thread count in method"));
+ }
+
+ /* Are there any other threads currently executing this method? */
+
+ if (method_desc->method.thread_count) {
+ /*
+ * Additional threads. Do not release the owner_id in this case,
+ * we immediately reuse it for the next thread executing this method
+ */
+ ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
+ "*** Completed execution of one thread, %d threads remaining\n",
+ method_desc->method.thread_count));
+ } else {
+ /* This is the only executing thread for this method */
+
+ /*
+ * Support to dynamically change a method from not_serialized to
+ * Serialized if it appears that the method is incorrectly written and
+ * does not support multiple thread execution. The best example of this
+ * is if such a method creates namespace objects and blocks. A second
+ * thread will fail with an AE_ALREADY_EXISTS exception
+ *
+ * This code is here because we must wait until the last thread exits
+ * before creating the synchronization semaphore.
+ */
+ if ((method_desc->method.method_flags & AML_METHOD_SERIALIZED)
+ && (!method_desc->method.mutex)) {
+ (void)acpi_ds_create_method_mutex(method_desc);
+ }
+
+ /* No more threads, we can free the owner_id */
+
+ acpi_ut_release_owner_id(&method_desc->method.owner_id);
+ }
+
+ return_VOID;
+}
diff --git a/drivers/acpi/dispatcher/dsmthdat.c b/drivers/acpi/dispatcher/dsmthdat.c
new file mode 100644
index 0000000..d03f81b
--- /dev/null
+++ b/drivers/acpi/dispatcher/dsmthdat.c
@@ -0,0 +1,717 @@
+/*******************************************************************************
+ *
+ * Module Name: dsmthdat - control method arguments and local variables
+ *
+ ******************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include <acpi/acdispat.h>
+#include <acpi/acnamesp.h>
+#include <acpi/acinterp.h>
+
+#define _COMPONENT ACPI_DISPATCHER
+ACPI_MODULE_NAME("dsmthdat")
+
+/* Local prototypes */
+static void
+acpi_ds_method_data_delete_value(u8 type,
+ u32 index, struct acpi_walk_state *walk_state);
+
+static acpi_status
+acpi_ds_method_data_set_value(u8 type,
+ u32 index,
+ union acpi_operand_object *object,
+ struct acpi_walk_state *walk_state);
+
+#ifdef ACPI_OBSOLETE_FUNCTIONS
+acpi_object_type
+acpi_ds_method_data_get_type(u16 opcode,
+ u32 index, struct acpi_walk_state *walk_state);
+#endif
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ds_method_data_init
+ *
+ * PARAMETERS: walk_state - Current walk state object
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Initialize the data structures that hold the method's arguments
+ * and locals. The data struct is an array of namespace nodes for
+ * each - this allows ref_of and de_ref_of to work properly for these
+ * special data types.
+ *
+ * NOTES: walk_state fields are initialized to zero by the
+ * ACPI_ALLOCATE_ZEROED().
+ *
+ * A pseudo-Namespace Node is assigned to each argument and local
+ * so that ref_of() can return a pointer to the Node.
+ *
+ ******************************************************************************/
+
+void acpi_ds_method_data_init(struct acpi_walk_state *walk_state)
+{
+ u32 i;
+
+ ACPI_FUNCTION_TRACE(ds_method_data_init);
+
+ /* Init the method arguments */
+
+ for (i = 0; i < ACPI_METHOD_NUM_ARGS; i++) {
+ ACPI_MOVE_32_TO_32(&walk_state->arguments[i].name,
+ NAMEOF_ARG_NTE);
+ walk_state->arguments[i].name.integer |= (i << 24);
+ walk_state->arguments[i].descriptor_type = ACPI_DESC_TYPE_NAMED;
+ walk_state->arguments[i].type = ACPI_TYPE_ANY;
+ walk_state->arguments[i].flags =
+ ANOBJ_END_OF_PEER_LIST | ANOBJ_METHOD_ARG;
+ }
+
+ /* Init the method locals */
+
+ for (i = 0; i < ACPI_METHOD_NUM_LOCALS; i++) {
+ ACPI_MOVE_32_TO_32(&walk_state->local_variables[i].name,
+ NAMEOF_LOCAL_NTE);
+
+ walk_state->local_variables[i].name.integer |= (i << 24);
+ walk_state->local_variables[i].descriptor_type =
+ ACPI_DESC_TYPE_NAMED;
+ walk_state->local_variables[i].type = ACPI_TYPE_ANY;
+ walk_state->local_variables[i].flags =
+ ANOBJ_END_OF_PEER_LIST | ANOBJ_METHOD_LOCAL;
+ }
+
+ return_VOID;
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ds_method_data_delete_all
+ *
+ * PARAMETERS: walk_state - Current walk state object
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Delete method locals and arguments. Arguments are only
+ * deleted if this method was called from another method.
+ *
+ ******************************************************************************/
+
+void acpi_ds_method_data_delete_all(struct acpi_walk_state *walk_state)
+{
+ u32 index;
+
+ ACPI_FUNCTION_TRACE(ds_method_data_delete_all);
+
+ /* Detach the locals */
+
+ for (index = 0; index < ACPI_METHOD_NUM_LOCALS; index++) {
+ if (walk_state->local_variables[index].object) {
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Deleting Local%d=%p\n",
+ index,
+ walk_state->local_variables[index].
+ object));
+
+ /* Detach object (if present) and remove a reference */
+
+ acpi_ns_detach_object(&walk_state->
+ local_variables[index]);
+ }
+ }
+
+ /* Detach the arguments */
+
+ for (index = 0; index < ACPI_METHOD_NUM_ARGS; index++) {
+ if (walk_state->arguments[index].object) {
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Deleting Arg%d=%p\n",
+ index,
+ walk_state->arguments[index].object));
+
+ /* Detach object (if present) and remove a reference */
+
+ acpi_ns_detach_object(&walk_state->arguments[index]);
+ }
+ }
+
+ return_VOID;
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ds_method_data_init_args
+ *
+ * PARAMETERS: *Params - Pointer to a parameter list for the method
+ * max_param_count - The arg count for this method
+ * walk_state - Current walk state object
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Initialize arguments for a method. The parameter list is a list
+ * of ACPI operand objects, either null terminated or whose length
+ * is defined by max_param_count.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ds_method_data_init_args(union acpi_operand_object **params,
+ u32 max_param_count,
+ struct acpi_walk_state *walk_state)
+{
+ acpi_status status;
+ u32 index = 0;
+
+ ACPI_FUNCTION_TRACE_PTR(ds_method_data_init_args, params);
+
+ if (!params) {
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+ "No param list passed to method\n"));
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ /* Copy passed parameters into the new method stack frame */
+
+ while ((index < ACPI_METHOD_NUM_ARGS) &&
+ (index < max_param_count) && params[index]) {
+ /*
+ * A valid parameter.
+ * Store the argument in the method/walk descriptor.
+ * Do not copy the arg in order to implement call by reference
+ */
+ status = acpi_ds_method_data_set_value(ACPI_REFCLASS_ARG, index,
+ params[index],
+ walk_state);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ index++;
+ }
+
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "%d args passed to method\n", index));
+ return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ds_method_data_get_node
+ *
+ * PARAMETERS: Type - Either ACPI_REFCLASS_LOCAL or
+ * ACPI_REFCLASS_ARG
+ * Index - Which Local or Arg whose type to get
+ * walk_state - Current walk state object
+ * Node - Where the node is returned.
+ *
+ * RETURN: Status and node
+ *
+ * DESCRIPTION: Get the Node associated with a local or arg.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ds_method_data_get_node(u8 type,
+ u32 index,
+ struct acpi_walk_state *walk_state,
+ struct acpi_namespace_node **node)
+{
+ ACPI_FUNCTION_TRACE(ds_method_data_get_node);
+
+ /*
+ * Method Locals and Arguments are supported
+ */
+ switch (type) {
+ case ACPI_REFCLASS_LOCAL:
+
+ if (index > ACPI_METHOD_MAX_LOCAL) {
+ ACPI_ERROR((AE_INFO,
+ "Local index %d is invalid (max %d)",
+ index, ACPI_METHOD_MAX_LOCAL));
+ return_ACPI_STATUS(AE_AML_INVALID_INDEX);
+ }
+
+ /* Return a pointer to the pseudo-node */
+
+ *node = &walk_state->local_variables[index];
+ break;
+
+ case ACPI_REFCLASS_ARG:
+
+ if (index > ACPI_METHOD_MAX_ARG) {
+ ACPI_ERROR((AE_INFO,
+ "Arg index %d is invalid (max %d)",
+ index, ACPI_METHOD_MAX_ARG));
+ return_ACPI_STATUS(AE_AML_INVALID_INDEX);
+ }
+
+ /* Return a pointer to the pseudo-node */
+
+ *node = &walk_state->arguments[index];
+ break;
+
+ default:
+ ACPI_ERROR((AE_INFO, "Type %d is invalid", type));
+ return_ACPI_STATUS(AE_TYPE);
+ }
+
+ return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ds_method_data_set_value
+ *
+ * PARAMETERS: Type - Either ACPI_REFCLASS_LOCAL or
+ * ACPI_REFCLASS_ARG
+ * Index - Which Local or Arg to get
+ * Object - Object to be inserted into the stack entry
+ * walk_state - Current walk state object
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Insert an object onto the method stack at entry Opcode:Index.
+ * Note: There is no "implicit conversion" for locals.
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ds_method_data_set_value(u8 type,
+ u32 index,
+ union acpi_operand_object *object,
+ struct acpi_walk_state *walk_state)
+{
+ acpi_status status;
+ struct acpi_namespace_node *node;
+
+ ACPI_FUNCTION_TRACE(ds_method_data_set_value);
+
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+ "NewObj %p Type %2.2X, Refs=%d [%s]\n", object,
+ type, object->common.reference_count,
+ acpi_ut_get_type_name(object->common.type)));
+
+ /* Get the namespace node for the arg/local */
+
+ status = acpi_ds_method_data_get_node(type, index, walk_state, &node);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /*
+ * Increment ref count so object can't be deleted while installed.
+ * NOTE: We do not copy the object in order to preserve the call by
+ * reference semantics of ACPI Control Method invocation.
+ * (See ACPI Specification 2.0_c)
+ */
+ acpi_ut_add_reference(object);
+
+ /* Install the object */
+
+ node->object = object;
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ds_method_data_get_value
+ *
+ * PARAMETERS: Type - Either ACPI_REFCLASS_LOCAL or
+ * ACPI_REFCLASS_ARG
+ * Index - Which local_var or argument to get
+ * walk_state - Current walk state object
+ * dest_desc - Where Arg or Local value is returned
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Retrieve value of selected Arg or Local for this method
+ * Used only in acpi_ex_resolve_to_value().
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ds_method_data_get_value(u8 type,
+ u32 index,
+ struct acpi_walk_state *walk_state,
+ union acpi_operand_object **dest_desc)
+{
+ acpi_status status;
+ struct acpi_namespace_node *node;
+ union acpi_operand_object *object;
+
+ ACPI_FUNCTION_TRACE(ds_method_data_get_value);
+
+ /* Validate the object descriptor */
+
+ if (!dest_desc) {
+ ACPI_ERROR((AE_INFO, "Null object descriptor pointer"));
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
+
+ /* Get the namespace node for the arg/local */
+
+ status = acpi_ds_method_data_get_node(type, index, walk_state, &node);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /* Get the object from the node */
+
+ object = node->object;
+
+ /* Examine the returned object, it must be valid. */
+
+ if (!object) {
+ /*
+ * Index points to uninitialized object.
+ * This means that either 1) The expected argument was
+ * not passed to the method, or 2) A local variable
+ * was referenced by the method (via the ASL)
+ * before it was initialized. Either case is an error.
+ */
+
+ /* If slack enabled, init the local_x/arg_x to an Integer of value zero */
+
+ if (acpi_gbl_enable_interpreter_slack) {
+ object =
+ acpi_ut_create_internal_object(ACPI_TYPE_INTEGER);
+ if (!object) {
+ return_ACPI_STATUS(AE_NO_MEMORY);
+ }
+
+ object->integer.value = 0;
+ node->object = object;
+ }
+
+ /* Otherwise, return the error */
+
+ else
+ switch (type) {
+ case ACPI_REFCLASS_ARG:
+
+ ACPI_ERROR((AE_INFO,
+ "Uninitialized Arg[%d] at node %p",
+ index, node));
+
+ return_ACPI_STATUS(AE_AML_UNINITIALIZED_ARG);
+
+ case ACPI_REFCLASS_LOCAL:
+
+ ACPI_ERROR((AE_INFO,
+ "Uninitialized Local[%d] at node %p",
+ index, node));
+
+ return_ACPI_STATUS(AE_AML_UNINITIALIZED_LOCAL);
+
+ default:
+
+ ACPI_ERROR((AE_INFO,
+ "Not a Arg/Local opcode: %X",
+ type));
+ return_ACPI_STATUS(AE_AML_INTERNAL);
+ }
+ }
+
+ /*
+ * The Index points to an initialized and valid object.
+ * Return an additional reference to the object
+ */
+ *dest_desc = object;
+ acpi_ut_add_reference(object);
+
+ return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ds_method_data_delete_value
+ *
+ * PARAMETERS: Type - Either ACPI_REFCLASS_LOCAL or
+ * ACPI_REFCLASS_ARG
+ * Index - Which local_var or argument to delete
+ * walk_state - Current walk state object
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Delete the entry at Opcode:Index. Inserts
+ * a null into the stack slot after the object is deleted.
+ *
+ ******************************************************************************/
+
+static void
+acpi_ds_method_data_delete_value(u8 type,
+ u32 index, struct acpi_walk_state *walk_state)
+{
+ acpi_status status;
+ struct acpi_namespace_node *node;
+ union acpi_operand_object *object;
+
+ ACPI_FUNCTION_TRACE(ds_method_data_delete_value);
+
+ /* Get the namespace node for the arg/local */
+
+ status = acpi_ds_method_data_get_node(type, index, walk_state, &node);
+ if (ACPI_FAILURE(status)) {
+ return_VOID;
+ }
+
+ /* Get the associated object */
+
+ object = acpi_ns_get_attached_object(node);
+
+ /*
+ * Undefine the Arg or Local by setting its descriptor
+ * pointer to NULL. Locals/Args can contain both
+ * ACPI_OPERAND_OBJECTS and ACPI_NAMESPACE_NODEs
+ */
+ node->object = NULL;
+
+ if ((object) &&
+ (ACPI_GET_DESCRIPTOR_TYPE(object) == ACPI_DESC_TYPE_OPERAND)) {
+ /*
+ * There is a valid object.
+ * Decrement the reference count by one to balance the
+ * increment when the object was stored.
+ */
+ acpi_ut_remove_reference(object);
+ }
+
+ return_VOID;
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ds_store_object_to_local
+ *
+ * PARAMETERS: Type - Either ACPI_REFCLASS_LOCAL or
+ * ACPI_REFCLASS_ARG
+ * Index - Which Local or Arg to set
+ * obj_desc - Value to be stored
+ * walk_state - Current walk state
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Store a value in an Arg or Local. The obj_desc is installed
+ * as the new value for the Arg or Local and the reference count
+ * for obj_desc is incremented.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ds_store_object_to_local(u8 type,
+ u32 index,
+ union acpi_operand_object *obj_desc,
+ struct acpi_walk_state *walk_state)
+{
+ acpi_status status;
+ struct acpi_namespace_node *node;
+ union acpi_operand_object *current_obj_desc;
+ union acpi_operand_object *new_obj_desc;
+
+ ACPI_FUNCTION_TRACE(ds_store_object_to_local);
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Type=%2.2X Index=%d Obj=%p\n",
+ type, index, obj_desc));
+
+ /* Parameter validation */
+
+ if (!obj_desc) {
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
+
+ /* Get the namespace node for the arg/local */
+
+ status = acpi_ds_method_data_get_node(type, index, walk_state, &node);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ current_obj_desc = acpi_ns_get_attached_object(node);
+ if (current_obj_desc == obj_desc) {
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Obj=%p already installed!\n",
+ obj_desc));
+ return_ACPI_STATUS(status);
+ }
+
+ /*
+ * If the reference count on the object is more than one, we must
+ * take a copy of the object before we store. A reference count
+ * of exactly 1 means that the object was just created during the
+ * evaluation of an expression, and we can safely use it since it
+ * is not used anywhere else.
+ */
+ new_obj_desc = obj_desc;
+ if (obj_desc->common.reference_count > 1) {
+ status =
+ acpi_ut_copy_iobject_to_iobject(obj_desc, &new_obj_desc,
+ walk_state);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+ }
+
+ /*
+ * If there is an object already in this slot, we either
+ * have to delete it, or if this is an argument and there
+ * is an object reference stored there, we have to do
+ * an indirect store!
+ */
+ if (current_obj_desc) {
+ /*
+ * Check for an indirect store if an argument
+ * contains an object reference (stored as an Node).
+ * We don't allow this automatic dereferencing for
+ * locals, since a store to a local should overwrite
+ * anything there, including an object reference.
+ *
+ * If both Arg0 and Local0 contain ref_of (Local4):
+ *
+ * Store (1, Arg0) - Causes indirect store to local4
+ * Store (1, Local0) - Stores 1 in local0, overwriting
+ * the reference to local4
+ * Store (1, de_refof (Local0)) - Causes indirect store to local4
+ *
+ * Weird, but true.
+ */
+ if (type == ACPI_REFCLASS_ARG) {
+ /*
+ * If we have a valid reference object that came from ref_of(),
+ * do the indirect store
+ */
+ if ((ACPI_GET_DESCRIPTOR_TYPE(current_obj_desc) ==
+ ACPI_DESC_TYPE_OPERAND)
+ && (current_obj_desc->common.type ==
+ ACPI_TYPE_LOCAL_REFERENCE)
+ && (current_obj_desc->reference.class ==
+ ACPI_REFCLASS_REFOF)) {
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+ "Arg (%p) is an ObjRef(Node), storing in node %p\n",
+ new_obj_desc,
+ current_obj_desc));
+
+ /*
+ * Store this object to the Node (perform the indirect store)
+ * NOTE: No implicit conversion is performed, as per the ACPI
+ * specification rules on storing to Locals/Args.
+ */
+ status =
+ acpi_ex_store_object_to_node(new_obj_desc,
+ current_obj_desc->
+ reference.
+ object,
+ walk_state,
+ ACPI_NO_IMPLICIT_CONVERSION);
+
+ /* Remove local reference if we copied the object above */
+
+ if (new_obj_desc != obj_desc) {
+ acpi_ut_remove_reference(new_obj_desc);
+ }
+ return_ACPI_STATUS(status);
+ }
+ }
+
+ /* Delete the existing object before storing the new one */
+
+ acpi_ds_method_data_delete_value(type, index, walk_state);
+ }
+
+ /*
+ * Install the Obj descriptor (*new_obj_desc) into
+ * the descriptor for the Arg or Local.
+ * (increments the object reference count by one)
+ */
+ status =
+ acpi_ds_method_data_set_value(type, index, new_obj_desc,
+ walk_state);
+
+ /* Remove local reference if we copied the object above */
+
+ if (new_obj_desc != obj_desc) {
+ acpi_ut_remove_reference(new_obj_desc);
+ }
+
+ return_ACPI_STATUS(status);
+}
+
+#ifdef ACPI_OBSOLETE_FUNCTIONS
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ds_method_data_get_type
+ *
+ * PARAMETERS: Opcode - Either AML_LOCAL_OP or AML_ARG_OP
+ * Index - Which Local or Arg whose type to get
+ * walk_state - Current walk state object
+ *
+ * RETURN: Data type of current value of the selected Arg or Local
+ *
+ * DESCRIPTION: Get the type of the object stored in the Local or Arg
+ *
+ ******************************************************************************/
+
+acpi_object_type
+acpi_ds_method_data_get_type(u16 opcode,
+ u32 index, struct acpi_walk_state *walk_state)
+{
+ acpi_status status;
+ struct acpi_namespace_node *node;
+ union acpi_operand_object *object;
+
+ ACPI_FUNCTION_TRACE(ds_method_data_get_type);
+
+ /* Get the namespace node for the arg/local */
+
+ status = acpi_ds_method_data_get_node(opcode, index, walk_state, &node);
+ if (ACPI_FAILURE(status)) {
+ return_VALUE((ACPI_TYPE_NOT_FOUND));
+ }
+
+ /* Get the object */
+
+ object = acpi_ns_get_attached_object(node);
+ if (!object) {
+
+ /* Uninitialized local/arg, return TYPE_ANY */
+
+ return_VALUE(ACPI_TYPE_ANY);
+ }
+
+ /* Get the object type */
+
+ return_VALUE(ACPI_GET_OBJECT_TYPE(object));
+}
+#endif
diff --git a/drivers/acpi/dispatcher/dsobject.c b/drivers/acpi/dispatcher/dsobject.c
new file mode 100644
index 0000000..4f08e59
--- /dev/null
+++ b/drivers/acpi/dispatcher/dsobject.c
@@ -0,0 +1,812 @@
+/******************************************************************************
+ *
+ * Module Name: dsobject - Dispatcher object management routines
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include <acpi/acparser.h>
+#include <acpi/amlcode.h>
+#include <acpi/acdispat.h>
+#include <acpi/acnamesp.h>
+#include <acpi/acinterp.h>
+
+#define _COMPONENT ACPI_DISPATCHER
+ACPI_MODULE_NAME("dsobject")
+
+/* Local prototypes */
+static acpi_status
+acpi_ds_build_internal_object(struct acpi_walk_state *walk_state,
+ union acpi_parse_object *op,
+ union acpi_operand_object **obj_desc_ptr);
+
+#ifndef ACPI_NO_METHOD_EXECUTION
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ds_build_internal_object
+ *
+ * PARAMETERS: walk_state - Current walk state
+ * Op - Parser object to be translated
+ * obj_desc_ptr - Where the ACPI internal object is returned
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Translate a parser Op object to the equivalent namespace object
+ * Simple objects are any objects other than a package object!
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ds_build_internal_object(struct acpi_walk_state *walk_state,
+ union acpi_parse_object *op,
+ union acpi_operand_object **obj_desc_ptr)
+{
+ union acpi_operand_object *obj_desc;
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(ds_build_internal_object);
+
+ *obj_desc_ptr = NULL;
+ if (op->common.aml_opcode == AML_INT_NAMEPATH_OP) {
+ /*
+ * This is a named object reference. If this name was
+ * previously looked up in the namespace, it was stored in this op.
+ * Otherwise, go ahead and look it up now
+ */
+ if (!op->common.node) {
+ status = acpi_ns_lookup(walk_state->scope_info,
+ op->common.value.string,
+ ACPI_TYPE_ANY,
+ ACPI_IMODE_EXECUTE,
+ ACPI_NS_SEARCH_PARENT |
+ ACPI_NS_DONT_OPEN_SCOPE, NULL,
+ ACPI_CAST_INDIRECT_PTR(struct
+ acpi_namespace_node,
+ &(op->
+ common.
+ node)));
+ if (ACPI_FAILURE(status)) {
+
+ /* Check if we are resolving a named reference within a package */
+
+ if ((status == AE_NOT_FOUND)
+ && (acpi_gbl_enable_interpreter_slack)
+ &&
+ ((op->common.parent->common.aml_opcode ==
+ AML_PACKAGE_OP)
+ || (op->common.parent->common.aml_opcode ==
+ AML_VAR_PACKAGE_OP))) {
+ /*
+ * We didn't find the target and we are populating elements
+ * of a package - ignore if slack enabled. Some ASL code
+ * contains dangling invalid references in packages and
+ * expects that no exception will be issued. Leave the
+ * element as a null element. It cannot be used, but it
+ * can be overwritten by subsequent ASL code - this is
+ * typically the case.
+ */
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Ignoring unresolved reference in package [%4.4s]\n",
+ walk_state->
+ scope_info->scope.
+ node->name.ascii));
+
+ return_ACPI_STATUS(AE_OK);
+ } else {
+ ACPI_ERROR_NAMESPACE(op->common.value.
+ string, status);
+ }
+
+ return_ACPI_STATUS(status);
+ }
+ }
+
+ /* Special object resolution for elements of a package */
+
+ if ((op->common.parent->common.aml_opcode == AML_PACKAGE_OP) ||
+ (op->common.parent->common.aml_opcode ==
+ AML_VAR_PACKAGE_OP)) {
+ /*
+ * Attempt to resolve the node to a value before we insert it into
+ * the package. If this is a reference to a common data type,
+ * resolve it immediately. According to the ACPI spec, package
+ * elements can only be "data objects" or method references.
+ * Attempt to resolve to an Integer, Buffer, String or Package.
+ * If cannot, return the named reference (for things like Devices,
+ * Methods, etc.) Buffer Fields and Fields will resolve to simple
+ * objects (int/buf/str/pkg).
+ *
+ * NOTE: References to things like Devices, Methods, Mutexes, etc.
+ * will remain as named references. This behavior is not described
+ * in the ACPI spec, but it appears to be an oversight.
+ */
+ obj_desc =
+ ACPI_CAST_PTR(union acpi_operand_object,
+ op->common.node);
+
+ status =
+ acpi_ex_resolve_node_to_value(ACPI_CAST_INDIRECT_PTR
+ (struct
+ acpi_namespace_node,
+ &obj_desc),
+ walk_state);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ switch (op->common.node->type) {
+ /*
+ * For these types, we need the actual node, not the subobject.
+ * However, the subobject did not get an extra reference count above.
+ *
+ * TBD: should ex_resolve_node_to_value be changed to fix this?
+ */
+ case ACPI_TYPE_DEVICE:
+ case ACPI_TYPE_THERMAL:
+
+ acpi_ut_add_reference(op->common.node->object);
+
+ /*lint -fallthrough */
+ /*
+ * For these types, we need the actual node, not the subobject.
+ * The subobject got an extra reference count in ex_resolve_node_to_value.
+ */
+ case ACPI_TYPE_MUTEX:
+ case ACPI_TYPE_METHOD:
+ case ACPI_TYPE_POWER:
+ case ACPI_TYPE_PROCESSOR:
+ case ACPI_TYPE_EVENT:
+ case ACPI_TYPE_REGION:
+
+ /* We will create a reference object for these types below */
+ break;
+
+ default:
+ /*
+ * All other types - the node was resolved to an actual
+ * object, we are done.
+ */
+ goto exit;
+ }
+ }
+ }
+
+ /* Create and init a new internal ACPI object */
+
+ obj_desc = acpi_ut_create_internal_object((acpi_ps_get_opcode_info
+ (op->common.aml_opcode))->
+ object_type);
+ if (!obj_desc) {
+ return_ACPI_STATUS(AE_NO_MEMORY);
+ }
+
+ status =
+ acpi_ds_init_object_from_op(walk_state, op, op->common.aml_opcode,
+ &obj_desc);
+ if (ACPI_FAILURE(status)) {
+ acpi_ut_remove_reference(obj_desc);
+ return_ACPI_STATUS(status);
+ }
+
+ exit:
+ *obj_desc_ptr = obj_desc;
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ds_build_internal_buffer_obj
+ *
+ * PARAMETERS: walk_state - Current walk state
+ * Op - Parser object to be translated
+ * buffer_length - Length of the buffer
+ * obj_desc_ptr - Where the ACPI internal object is returned
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Translate a parser Op package object to the equivalent
+ * namespace object
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ds_build_internal_buffer_obj(struct acpi_walk_state *walk_state,
+ union acpi_parse_object *op,
+ u32 buffer_length,
+ union acpi_operand_object **obj_desc_ptr)
+{
+ union acpi_parse_object *arg;
+ union acpi_operand_object *obj_desc;
+ union acpi_parse_object *byte_list;
+ u32 byte_list_length = 0;
+
+ ACPI_FUNCTION_TRACE(ds_build_internal_buffer_obj);
+
+ /*
+ * If we are evaluating a Named buffer object "Name (xxxx, Buffer)".
+ * The buffer object already exists (from the NS node), otherwise it must
+ * be created.
+ */
+ obj_desc = *obj_desc_ptr;
+ if (!obj_desc) {
+
+ /* Create a new buffer object */
+
+ obj_desc = acpi_ut_create_internal_object(ACPI_TYPE_BUFFER);
+ *obj_desc_ptr = obj_desc;
+ if (!obj_desc) {
+ return_ACPI_STATUS(AE_NO_MEMORY);
+ }
+ }
+
+ /*
+ * Second arg is the buffer data (optional) byte_list can be either
+ * individual bytes or a string initializer. In either case, a
+ * byte_list appears in the AML.
+ */
+ arg = op->common.value.arg; /* skip first arg */
+
+ byte_list = arg->named.next;
+ if (byte_list) {
+ if (byte_list->common.aml_opcode != AML_INT_BYTELIST_OP) {
+ ACPI_ERROR((AE_INFO,
+ "Expecting bytelist, got AML opcode %X in op %p",
+ byte_list->common.aml_opcode, byte_list));
+
+ acpi_ut_remove_reference(obj_desc);
+ return (AE_TYPE);
+ }
+
+ byte_list_length = (u32) byte_list->common.value.integer;
+ }
+
+ /*
+ * The buffer length (number of bytes) will be the larger of:
+ * 1) The specified buffer length and
+ * 2) The length of the initializer byte list
+ */
+ obj_desc->buffer.length = buffer_length;
+ if (byte_list_length > buffer_length) {
+ obj_desc->buffer.length = byte_list_length;
+ }
+
+ /* Allocate the buffer */
+
+ if (obj_desc->buffer.length == 0) {
+ obj_desc->buffer.pointer = NULL;
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+ "Buffer defined with zero length in AML, creating\n"));
+ } else {
+ obj_desc->buffer.pointer =
+ ACPI_ALLOCATE_ZEROED(obj_desc->buffer.length);
+ if (!obj_desc->buffer.pointer) {
+ acpi_ut_delete_object_desc(obj_desc);
+ return_ACPI_STATUS(AE_NO_MEMORY);
+ }
+
+ /* Initialize buffer from the byte_list (if present) */
+
+ if (byte_list) {
+ ACPI_MEMCPY(obj_desc->buffer.pointer,
+ byte_list->named.data, byte_list_length);
+ }
+ }
+
+ obj_desc->buffer.flags |= AOPOBJ_DATA_VALID;
+ op->common.node = ACPI_CAST_PTR(struct acpi_namespace_node, obj_desc);
+ return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ds_build_internal_package_obj
+ *
+ * PARAMETERS: walk_state - Current walk state
+ * Op - Parser object to be translated
+ * element_count - Number of elements in the package - this is
+ * the num_elements argument to Package()
+ * obj_desc_ptr - Where the ACPI internal object is returned
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Translate a parser Op package object to the equivalent
+ * namespace object
+ *
+ * NOTE: The number of elements in the package will be always be the num_elements
+ * count, regardless of the number of elements in the package list. If
+ * num_elements is smaller, only that many package list elements are used.
+ * if num_elements is larger, the Package object is padded out with
+ * objects of type Uninitialized (as per ACPI spec.)
+ *
+ * Even though the ASL compilers do not allow num_elements to be smaller
+ * than the Package list length (for the fixed length package opcode), some
+ * BIOS code modifies the AML on the fly to adjust the num_elements, and
+ * this code compensates for that. This also provides compatibility with
+ * other AML interpreters.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ds_build_internal_package_obj(struct acpi_walk_state *walk_state,
+ union acpi_parse_object *op,
+ u32 element_count,
+ union acpi_operand_object **obj_desc_ptr)
+{
+ union acpi_parse_object *arg;
+ union acpi_parse_object *parent;
+ union acpi_operand_object *obj_desc = NULL;
+ acpi_status status = AE_OK;
+ unsigned i;
+ u16 index;
+ u16 reference_count;
+
+ ACPI_FUNCTION_TRACE(ds_build_internal_package_obj);
+
+ /* Find the parent of a possibly nested package */
+
+ parent = op->common.parent;
+ while ((parent->common.aml_opcode == AML_PACKAGE_OP) ||
+ (parent->common.aml_opcode == AML_VAR_PACKAGE_OP)) {
+ parent = parent->common.parent;
+ }
+
+ /*
+ * If we are evaluating a Named package object "Name (xxxx, Package)",
+ * the package object already exists, otherwise it must be created.
+ */
+ obj_desc = *obj_desc_ptr;
+ if (!obj_desc) {
+ obj_desc = acpi_ut_create_internal_object(ACPI_TYPE_PACKAGE);
+ *obj_desc_ptr = obj_desc;
+ if (!obj_desc) {
+ return_ACPI_STATUS(AE_NO_MEMORY);
+ }
+
+ obj_desc->package.node = parent->common.node;
+ }
+
+ /*
+ * Allocate the element array (array of pointers to the individual
+ * objects) based on the num_elements parameter. Add an extra pointer slot
+ * so that the list is always null terminated.
+ */
+ obj_desc->package.elements = ACPI_ALLOCATE_ZEROED(((acpi_size)
+ element_count +
+ 1) * sizeof(void *));
+
+ if (!obj_desc->package.elements) {
+ acpi_ut_delete_object_desc(obj_desc);
+ return_ACPI_STATUS(AE_NO_MEMORY);
+ }
+
+ obj_desc->package.count = element_count;
+
+ /*
+ * Initialize the elements of the package, up to the num_elements count.
+ * Package is automatically padded with uninitialized (NULL) elements
+ * if num_elements is greater than the package list length. Likewise,
+ * Package is truncated if num_elements is less than the list length.
+ */
+ arg = op->common.value.arg;
+ arg = arg->common.next;
+ for (i = 0; arg && (i < element_count); i++) {
+ if (arg->common.aml_opcode == AML_INT_RETURN_VALUE_OP) {
+ if (arg->common.node->type == ACPI_TYPE_METHOD) {
+ /*
+ * A method reference "looks" to the parser to be a method
+ * invocation, so we special case it here
+ */
+ arg->common.aml_opcode = AML_INT_NAMEPATH_OP;
+ status =
+ acpi_ds_build_internal_object(walk_state,
+ arg,
+ &obj_desc->
+ package.
+ elements[i]);
+ } else {
+ /* This package element is already built, just get it */
+
+ obj_desc->package.elements[i] =
+ ACPI_CAST_PTR(union acpi_operand_object,
+ arg->common.node);
+ }
+ } else {
+ status = acpi_ds_build_internal_object(walk_state, arg,
+ &obj_desc->
+ package.
+ elements[i]);
+ }
+
+ if (*obj_desc_ptr) {
+
+ /* Existing package, get existing reference count */
+
+ reference_count =
+ (*obj_desc_ptr)->common.reference_count;
+ if (reference_count > 1) {
+
+ /* Make new element ref count match original ref count */
+
+ for (index = 0; index < (reference_count - 1);
+ index++) {
+ acpi_ut_add_reference((obj_desc->
+ package.
+ elements[i]));
+ }
+ }
+ }
+
+ arg = arg->common.next;
+ }
+
+ /* Check for match between num_elements and actual length of package_list */
+
+ if (arg) {
+ /*
+ * num_elements was exhausted, but there are remaining elements in the
+ * package_list.
+ *
+ * Note: technically, this is an error, from ACPI spec: "It is an error
+ * for NumElements to be less than the number of elements in the
+ * PackageList". However, for now, we just print an error message and
+ * no exception is returned.
+ */
+ while (arg) {
+
+ /* Find out how many elements there really are */
+
+ i++;
+ arg = arg->common.next;
+ }
+
+ ACPI_WARNING((AE_INFO,
+ "Package List length (%X) larger than NumElements count (%X), truncated\n",
+ i, element_count));
+ } else if (i < element_count) {
+ /*
+ * Arg list (elements) was exhausted, but we did not reach num_elements count.
+ * Note: this is not an error, the package is padded out with NULLs.
+ */
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Package List length (%X) smaller than NumElements count (%X), padded with null elements\n",
+ i, element_count));
+ }
+
+ obj_desc->package.flags |= AOPOBJ_DATA_VALID;
+ op->common.node = ACPI_CAST_PTR(struct acpi_namespace_node, obj_desc);
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ds_create_node
+ *
+ * PARAMETERS: walk_state - Current walk state
+ * Node - NS Node to be initialized
+ * Op - Parser object to be translated
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Create the object to be associated with a namespace node
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ds_create_node(struct acpi_walk_state *walk_state,
+ struct acpi_namespace_node *node,
+ union acpi_parse_object *op)
+{
+ acpi_status status;
+ union acpi_operand_object *obj_desc;
+
+ ACPI_FUNCTION_TRACE_PTR(ds_create_node, op);
+
+ /*
+ * Because of the execution pass through the non-control-method
+ * parts of the table, we can arrive here twice. Only init
+ * the named object node the first time through
+ */
+ if (acpi_ns_get_attached_object(node)) {
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ if (!op->common.value.arg) {
+
+ /* No arguments, there is nothing to do */
+
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ /* Build an internal object for the argument(s) */
+
+ status = acpi_ds_build_internal_object(walk_state, op->common.value.arg,
+ &obj_desc);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /* Re-type the object according to its argument */
+
+ node->type = ACPI_GET_OBJECT_TYPE(obj_desc);
+
+ /* Attach obj to node */
+
+ status = acpi_ns_attach_object(node, obj_desc, node->type);
+
+ /* Remove local reference to the object */
+
+ acpi_ut_remove_reference(obj_desc);
+ return_ACPI_STATUS(status);
+}
+
+#endif /* ACPI_NO_METHOD_EXECUTION */
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ds_init_object_from_op
+ *
+ * PARAMETERS: walk_state - Current walk state
+ * Op - Parser op used to init the internal object
+ * Opcode - AML opcode associated with the object
+ * ret_obj_desc - Namespace object to be initialized
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Initialize a namespace object from a parser Op and its
+ * associated arguments. The namespace object is a more compact
+ * representation of the Op and its arguments.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ds_init_object_from_op(struct acpi_walk_state *walk_state,
+ union acpi_parse_object *op,
+ u16 opcode,
+ union acpi_operand_object **ret_obj_desc)
+{
+ const struct acpi_opcode_info *op_info;
+ union acpi_operand_object *obj_desc;
+ acpi_status status = AE_OK;
+
+ ACPI_FUNCTION_TRACE(ds_init_object_from_op);
+
+ obj_desc = *ret_obj_desc;
+ op_info = acpi_ps_get_opcode_info(opcode);
+ if (op_info->class == AML_CLASS_UNKNOWN) {
+
+ /* Unknown opcode */
+
+ return_ACPI_STATUS(AE_TYPE);
+ }
+
+ /* Perform per-object initialization */
+
+ switch (ACPI_GET_OBJECT_TYPE(obj_desc)) {
+ case ACPI_TYPE_BUFFER:
+
+ /*
+ * Defer evaluation of Buffer term_arg operand
+ */
+ obj_desc->buffer.node =
+ ACPI_CAST_PTR(struct acpi_namespace_node,
+ walk_state->operands[0]);
+ obj_desc->buffer.aml_start = op->named.data;
+ obj_desc->buffer.aml_length = op->named.length;
+ break;
+
+ case ACPI_TYPE_PACKAGE:
+
+ /*
+ * Defer evaluation of Package term_arg operand
+ */
+ obj_desc->package.node =
+ ACPI_CAST_PTR(struct acpi_namespace_node,
+ walk_state->operands[0]);
+ obj_desc->package.aml_start = op->named.data;
+ obj_desc->package.aml_length = op->named.length;
+ break;
+
+ case ACPI_TYPE_INTEGER:
+
+ switch (op_info->type) {
+ case AML_TYPE_CONSTANT:
+ /*
+ * Resolve AML Constants here - AND ONLY HERE!
+ * All constants are integers.
+ * We mark the integer with a flag that indicates that it started
+ * life as a constant -- so that stores to constants will perform
+ * as expected (noop). zero_op is used as a placeholder for optional
+ * target operands.
+ */
+ obj_desc->common.flags = AOPOBJ_AML_CONSTANT;
+
+ switch (opcode) {
+ case AML_ZERO_OP:
+
+ obj_desc->integer.value = 0;
+ break;
+
+ case AML_ONE_OP:
+
+ obj_desc->integer.value = 1;
+ break;
+
+ case AML_ONES_OP:
+
+ obj_desc->integer.value = ACPI_INTEGER_MAX;
+
+ /* Truncate value if we are executing from a 32-bit ACPI table */
+
+#ifndef ACPI_NO_METHOD_EXECUTION
+ acpi_ex_truncate_for32bit_table(obj_desc);
+#endif
+ break;
+
+ case AML_REVISION_OP:
+
+ obj_desc->integer.value = ACPI_CA_VERSION;
+ break;
+
+ default:
+
+ ACPI_ERROR((AE_INFO,
+ "Unknown constant opcode %X",
+ opcode));
+ status = AE_AML_OPERAND_TYPE;
+ break;
+ }
+ break;
+
+ case AML_TYPE_LITERAL:
+
+ obj_desc->integer.value = op->common.value.integer;
+#ifndef ACPI_NO_METHOD_EXECUTION
+ acpi_ex_truncate_for32bit_table(obj_desc);
+#endif
+ break;
+
+ default:
+ ACPI_ERROR((AE_INFO, "Unknown Integer type %X",
+ op_info->type));
+ status = AE_AML_OPERAND_TYPE;
+ break;
+ }
+ break;
+
+ case ACPI_TYPE_STRING:
+
+ obj_desc->string.pointer = op->common.value.string;
+ obj_desc->string.length =
+ (u32) ACPI_STRLEN(op->common.value.string);
+
+ /*
+ * The string is contained in the ACPI table, don't ever try
+ * to delete it
+ */
+ obj_desc->common.flags |= AOPOBJ_STATIC_POINTER;
+ break;
+
+ case ACPI_TYPE_METHOD:
+ break;
+
+ case ACPI_TYPE_LOCAL_REFERENCE:
+
+ switch (op_info->type) {
+ case AML_TYPE_LOCAL_VARIABLE:
+
+ /* Local ID (0-7) is (AML opcode - base AML_LOCAL_OP) */
+
+ obj_desc->reference.value = opcode - AML_LOCAL_OP;
+ obj_desc->reference.class = ACPI_REFCLASS_LOCAL;
+
+#ifndef ACPI_NO_METHOD_EXECUTION
+ status =
+ acpi_ds_method_data_get_node(ACPI_REFCLASS_LOCAL,
+ obj_desc->reference.
+ value, walk_state,
+ ACPI_CAST_INDIRECT_PTR
+ (struct
+ acpi_namespace_node,
+ &obj_desc->reference.
+ object));
+#endif
+ break;
+
+ case AML_TYPE_METHOD_ARGUMENT:
+
+ /* Arg ID (0-6) is (AML opcode - base AML_ARG_OP) */
+
+ obj_desc->reference.value = opcode - AML_ARG_OP;
+ obj_desc->reference.class = ACPI_REFCLASS_ARG;
+
+#ifndef ACPI_NO_METHOD_EXECUTION
+ status = acpi_ds_method_data_get_node(ACPI_REFCLASS_ARG,
+ obj_desc->
+ reference.value,
+ walk_state,
+ ACPI_CAST_INDIRECT_PTR
+ (struct
+ acpi_namespace_node,
+ &obj_desc->
+ reference.
+ object));
+#endif
+ break;
+
+ default: /* Object name or Debug object */
+
+ switch (op->common.aml_opcode) {
+ case AML_INT_NAMEPATH_OP:
+
+ /* Node was saved in Op */
+
+ obj_desc->reference.node = op->common.node;
+ obj_desc->reference.object =
+ op->common.node->object;
+ obj_desc->reference.class = ACPI_REFCLASS_NAME;
+ break;
+
+ case AML_DEBUG_OP:
+
+ obj_desc->reference.class = ACPI_REFCLASS_DEBUG;
+ break;
+
+ default:
+
+ ACPI_ERROR((AE_INFO,
+ "Unimplemented reference type for AML opcode: %4.4X",
+ opcode));
+ return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
+ }
+ break;
+ }
+ break;
+
+ default:
+
+ ACPI_ERROR((AE_INFO, "Unimplemented data type: %X",
+ ACPI_GET_OBJECT_TYPE(obj_desc)));
+
+ status = AE_AML_OPERAND_TYPE;
+ break;
+ }
+
+ return_ACPI_STATUS(status);
+}
diff --git a/drivers/acpi/dispatcher/dsopcode.c b/drivers/acpi/dispatcher/dsopcode.c
new file mode 100644
index 0000000..69fae59
--- /dev/null
+++ b/drivers/acpi/dispatcher/dsopcode.c
@@ -0,0 +1,1429 @@
+/******************************************************************************
+ *
+ * Module Name: dsopcode - Dispatcher Op Region support and handling of
+ * "control" opcodes
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include <acpi/acparser.h>
+#include <acpi/amlcode.h>
+#include <acpi/acdispat.h>
+#include <acpi/acinterp.h>
+#include <acpi/acnamesp.h>
+#include <acpi/acevents.h>
+#include <acpi/actables.h>
+
+#define _COMPONENT ACPI_DISPATCHER
+ACPI_MODULE_NAME("dsopcode")
+
+/* Local prototypes */
+static acpi_status
+acpi_ds_execute_arguments(struct acpi_namespace_node *node,
+ struct acpi_namespace_node *scope_node,
+ u32 aml_length, u8 * aml_start);
+
+static acpi_status
+acpi_ds_init_buffer_field(u16 aml_opcode,
+ union acpi_operand_object *obj_desc,
+ union acpi_operand_object *buffer_desc,
+ union acpi_operand_object *offset_desc,
+ union acpi_operand_object *length_desc,
+ union acpi_operand_object *result_desc);
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ds_execute_arguments
+ *
+ * PARAMETERS: Node - Object NS node
+ * scope_node - Parent NS node
+ * aml_length - Length of executable AML
+ * aml_start - Pointer to the AML
+ *
+ * RETURN: Status.
+ *
+ * DESCRIPTION: Late (deferred) execution of region or field arguments
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ds_execute_arguments(struct acpi_namespace_node *node,
+ struct acpi_namespace_node *scope_node,
+ u32 aml_length, u8 * aml_start)
+{
+ acpi_status status;
+ union acpi_parse_object *op;
+ struct acpi_walk_state *walk_state;
+
+ ACPI_FUNCTION_TRACE(ds_execute_arguments);
+
+ /*
+ * Allocate a new parser op to be the root of the parsed tree
+ */
+ op = acpi_ps_alloc_op(AML_INT_EVAL_SUBTREE_OP);
+ if (!op) {
+ return_ACPI_STATUS(AE_NO_MEMORY);
+ }
+
+ /* Save the Node for use in acpi_ps_parse_aml */
+
+ op->common.node = scope_node;
+
+ /* Create and initialize a new parser state */
+
+ walk_state = acpi_ds_create_walk_state(0, NULL, NULL, NULL);
+ if (!walk_state) {
+ status = AE_NO_MEMORY;
+ goto cleanup;
+ }
+
+ status = acpi_ds_init_aml_walk(walk_state, op, NULL, aml_start,
+ aml_length, NULL, ACPI_IMODE_LOAD_PASS1);
+ if (ACPI_FAILURE(status)) {
+ acpi_ds_delete_walk_state(walk_state);
+ goto cleanup;
+ }
+
+ /* Mark this parse as a deferred opcode */
+
+ walk_state->parse_flags = ACPI_PARSE_DEFERRED_OP;
+ walk_state->deferred_node = node;
+
+ /* Pass1: Parse the entire declaration */
+
+ status = acpi_ps_parse_aml(walk_state);
+ if (ACPI_FAILURE(status)) {
+ goto cleanup;
+ }
+
+ /* Get and init the Op created above */
+
+ op->common.node = node;
+ acpi_ps_delete_parse_tree(op);
+
+ /* Evaluate the deferred arguments */
+
+ op = acpi_ps_alloc_op(AML_INT_EVAL_SUBTREE_OP);
+ if (!op) {
+ return_ACPI_STATUS(AE_NO_MEMORY);
+ }
+
+ op->common.node = scope_node;
+
+ /* Create and initialize a new parser state */
+
+ walk_state = acpi_ds_create_walk_state(0, NULL, NULL, NULL);
+ if (!walk_state) {
+ status = AE_NO_MEMORY;
+ goto cleanup;
+ }
+
+ /* Execute the opcode and arguments */
+
+ status = acpi_ds_init_aml_walk(walk_state, op, NULL, aml_start,
+ aml_length, NULL, ACPI_IMODE_EXECUTE);
+ if (ACPI_FAILURE(status)) {
+ acpi_ds_delete_walk_state(walk_state);
+ goto cleanup;
+ }
+
+ /* Mark this execution as a deferred opcode */
+
+ walk_state->deferred_node = node;
+ status = acpi_ps_parse_aml(walk_state);
+
+ cleanup:
+ acpi_ps_delete_parse_tree(op);
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ds_get_buffer_field_arguments
+ *
+ * PARAMETERS: obj_desc - A valid buffer_field object
+ *
+ * RETURN: Status.
+ *
+ * DESCRIPTION: Get buffer_field Buffer and Index. This implements the late
+ * evaluation of these field attributes.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ds_get_buffer_field_arguments(union acpi_operand_object *obj_desc)
+{
+ union acpi_operand_object *extra_desc;
+ struct acpi_namespace_node *node;
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE_PTR(ds_get_buffer_field_arguments, obj_desc);
+
+ if (obj_desc->common.flags & AOPOBJ_DATA_VALID) {
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ /* Get the AML pointer (method object) and buffer_field node */
+
+ extra_desc = acpi_ns_get_secondary_object(obj_desc);
+ node = obj_desc->buffer_field.node;
+
+ ACPI_DEBUG_EXEC(acpi_ut_display_init_pathname
+ (ACPI_TYPE_BUFFER_FIELD, node, NULL));
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "[%4.4s] BufferField Arg Init\n",
+ acpi_ut_get_node_name(node)));
+
+ /* Execute the AML code for the term_arg arguments */
+
+ status = acpi_ds_execute_arguments(node, acpi_ns_get_parent_node(node),
+ extra_desc->extra.aml_length,
+ extra_desc->extra.aml_start);
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ds_get_bank_field_arguments
+ *
+ * PARAMETERS: obj_desc - A valid bank_field object
+ *
+ * RETURN: Status.
+ *
+ * DESCRIPTION: Get bank_field bank_value. This implements the late
+ * evaluation of these field attributes.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ds_get_bank_field_arguments(union acpi_operand_object *obj_desc)
+{
+ union acpi_operand_object *extra_desc;
+ struct acpi_namespace_node *node;
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE_PTR(ds_get_bank_field_arguments, obj_desc);
+
+ if (obj_desc->common.flags & AOPOBJ_DATA_VALID) {
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ /* Get the AML pointer (method object) and bank_field node */
+
+ extra_desc = acpi_ns_get_secondary_object(obj_desc);
+ node = obj_desc->bank_field.node;
+
+ ACPI_DEBUG_EXEC(acpi_ut_display_init_pathname
+ (ACPI_TYPE_LOCAL_BANK_FIELD, node, NULL));
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "[%4.4s] BankField Arg Init\n",
+ acpi_ut_get_node_name(node)));
+
+ /* Execute the AML code for the term_arg arguments */
+
+ status = acpi_ds_execute_arguments(node, acpi_ns_get_parent_node(node),
+ extra_desc->extra.aml_length,
+ extra_desc->extra.aml_start);
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ds_get_buffer_arguments
+ *
+ * PARAMETERS: obj_desc - A valid Buffer object
+ *
+ * RETURN: Status.
+ *
+ * DESCRIPTION: Get Buffer length and initializer byte list. This implements
+ * the late evaluation of these attributes.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ds_get_buffer_arguments(union acpi_operand_object *obj_desc)
+{
+ struct acpi_namespace_node *node;
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE_PTR(ds_get_buffer_arguments, obj_desc);
+
+ if (obj_desc->common.flags & AOPOBJ_DATA_VALID) {
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ /* Get the Buffer node */
+
+ node = obj_desc->buffer.node;
+ if (!node) {
+ ACPI_ERROR((AE_INFO,
+ "No pointer back to NS node in buffer obj %p",
+ obj_desc));
+ return_ACPI_STATUS(AE_AML_INTERNAL);
+ }
+
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Buffer Arg Init\n"));
+
+ /* Execute the AML code for the term_arg arguments */
+
+ status = acpi_ds_execute_arguments(node, node,
+ obj_desc->buffer.aml_length,
+ obj_desc->buffer.aml_start);
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ds_get_package_arguments
+ *
+ * PARAMETERS: obj_desc - A valid Package object
+ *
+ * RETURN: Status.
+ *
+ * DESCRIPTION: Get Package length and initializer byte list. This implements
+ * the late evaluation of these attributes.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ds_get_package_arguments(union acpi_operand_object *obj_desc)
+{
+ struct acpi_namespace_node *node;
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE_PTR(ds_get_package_arguments, obj_desc);
+
+ if (obj_desc->common.flags & AOPOBJ_DATA_VALID) {
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ /* Get the Package node */
+
+ node = obj_desc->package.node;
+ if (!node) {
+ ACPI_ERROR((AE_INFO,
+ "No pointer back to NS node in package %p",
+ obj_desc));
+ return_ACPI_STATUS(AE_AML_INTERNAL);
+ }
+
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Package Arg Init\n"));
+
+ /* Execute the AML code for the term_arg arguments */
+
+ status = acpi_ds_execute_arguments(node, node,
+ obj_desc->package.aml_length,
+ obj_desc->package.aml_start);
+ return_ACPI_STATUS(status);
+}
+
+/*****************************************************************************
+ *
+ * FUNCTION: acpi_ds_get_region_arguments
+ *
+ * PARAMETERS: obj_desc - A valid region object
+ *
+ * RETURN: Status.
+ *
+ * DESCRIPTION: Get region address and length. This implements the late
+ * evaluation of these region attributes.
+ *
+ ****************************************************************************/
+
+acpi_status acpi_ds_get_region_arguments(union acpi_operand_object *obj_desc)
+{
+ struct acpi_namespace_node *node;
+ acpi_status status;
+ union acpi_operand_object *extra_desc;
+
+ ACPI_FUNCTION_TRACE_PTR(ds_get_region_arguments, obj_desc);
+
+ if (obj_desc->region.flags & AOPOBJ_DATA_VALID) {
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ extra_desc = acpi_ns_get_secondary_object(obj_desc);
+ if (!extra_desc) {
+ return_ACPI_STATUS(AE_NOT_EXIST);
+ }
+
+ /* Get the Region node */
+
+ node = obj_desc->region.node;
+
+ ACPI_DEBUG_EXEC(acpi_ut_display_init_pathname
+ (ACPI_TYPE_REGION, node, NULL));
+
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "[%4.4s] OpRegion Arg Init at AML %p\n",
+ acpi_ut_get_node_name(node),
+ extra_desc->extra.aml_start));
+
+ /* Execute the argument AML */
+
+ status = acpi_ds_execute_arguments(node, acpi_ns_get_parent_node(node),
+ extra_desc->extra.aml_length,
+ extra_desc->extra.aml_start);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /* Validate the region address/length via the host OS */
+
+ status = acpi_os_validate_address(obj_desc->region.space_id,
+ obj_desc->region.address,
+ (acpi_size) obj_desc->region.length,
+ acpi_ut_get_node_name(node));
+
+ if (ACPI_FAILURE(status)) {
+ /*
+ * Invalid address/length. We will emit an error message and mark
+ * the region as invalid, so that it will cause an additional error if
+ * it is ever used. Then return AE_OK.
+ */
+ ACPI_EXCEPTION((AE_INFO, status,
+ "During address validation of OpRegion [%4.4s]",
+ node->name.ascii));
+ obj_desc->common.flags |= AOPOBJ_INVALID;
+ status = AE_OK;
+ }
+
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ds_initialize_region
+ *
+ * PARAMETERS: obj_handle - Region namespace node
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Front end to ev_initialize_region
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ds_initialize_region(acpi_handle obj_handle)
+{
+ union acpi_operand_object *obj_desc;
+ acpi_status status;
+
+ obj_desc = acpi_ns_get_attached_object(obj_handle);
+
+ /* Namespace is NOT locked */
+
+ status = acpi_ev_initialize_region(obj_desc, FALSE);
+ return (status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ds_init_buffer_field
+ *
+ * PARAMETERS: aml_opcode - create_xxx_field
+ * obj_desc - buffer_field object
+ * buffer_desc - Host Buffer
+ * offset_desc - Offset into buffer
+ * length_desc - Length of field (CREATE_FIELD_OP only)
+ * result_desc - Where to store the result
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Perform actual initialization of a buffer field
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ds_init_buffer_field(u16 aml_opcode,
+ union acpi_operand_object *obj_desc,
+ union acpi_operand_object *buffer_desc,
+ union acpi_operand_object *offset_desc,
+ union acpi_operand_object *length_desc,
+ union acpi_operand_object *result_desc)
+{
+ u32 offset;
+ u32 bit_offset;
+ u32 bit_count;
+ u8 field_flags;
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE_PTR(ds_init_buffer_field, obj_desc);
+
+ /* Host object must be a Buffer */
+
+ if (ACPI_GET_OBJECT_TYPE(buffer_desc) != ACPI_TYPE_BUFFER) {
+ ACPI_ERROR((AE_INFO,
+ "Target of Create Field is not a Buffer object - %s",
+ acpi_ut_get_object_type_name(buffer_desc)));
+
+ status = AE_AML_OPERAND_TYPE;
+ goto cleanup;
+ }
+
+ /*
+ * The last parameter to all of these opcodes (result_desc) started
+ * out as a name_string, and should therefore now be a NS node
+ * after resolution in acpi_ex_resolve_operands().
+ */
+ if (ACPI_GET_DESCRIPTOR_TYPE(result_desc) != ACPI_DESC_TYPE_NAMED) {
+ ACPI_ERROR((AE_INFO,
+ "(%s) destination not a NS Node [%s]",
+ acpi_ps_get_opcode_name(aml_opcode),
+ acpi_ut_get_descriptor_name(result_desc)));
+
+ status = AE_AML_OPERAND_TYPE;
+ goto cleanup;
+ }
+
+ offset = (u32) offset_desc->integer.value;
+
+ /*
+ * Setup the Bit offsets and counts, according to the opcode
+ */
+ switch (aml_opcode) {
+ case AML_CREATE_FIELD_OP:
+
+ /* Offset is in bits, count is in bits */
+
+ field_flags = AML_FIELD_ACCESS_BYTE;
+ bit_offset = offset;
+ bit_count = (u32) length_desc->integer.value;
+
+ /* Must have a valid (>0) bit count */
+
+ if (bit_count == 0) {
+ ACPI_ERROR((AE_INFO,
+ "Attempt to CreateField of length zero"));
+ status = AE_AML_OPERAND_VALUE;
+ goto cleanup;
+ }
+ break;
+
+ case AML_CREATE_BIT_FIELD_OP:
+
+ /* Offset is in bits, Field is one bit */
+
+ bit_offset = offset;
+ bit_count = 1;
+ field_flags = AML_FIELD_ACCESS_BYTE;
+ break;
+
+ case AML_CREATE_BYTE_FIELD_OP:
+
+ /* Offset is in bytes, field is one byte */
+
+ bit_offset = 8 * offset;
+ bit_count = 8;
+ field_flags = AML_FIELD_ACCESS_BYTE;
+ break;
+
+ case AML_CREATE_WORD_FIELD_OP:
+
+ /* Offset is in bytes, field is one word */
+
+ bit_offset = 8 * offset;
+ bit_count = 16;
+ field_flags = AML_FIELD_ACCESS_WORD;
+ break;
+
+ case AML_CREATE_DWORD_FIELD_OP:
+
+ /* Offset is in bytes, field is one dword */
+
+ bit_offset = 8 * offset;
+ bit_count = 32;
+ field_flags = AML_FIELD_ACCESS_DWORD;
+ break;
+
+ case AML_CREATE_QWORD_FIELD_OP:
+
+ /* Offset is in bytes, field is one qword */
+
+ bit_offset = 8 * offset;
+ bit_count = 64;
+ field_flags = AML_FIELD_ACCESS_QWORD;
+ break;
+
+ default:
+
+ ACPI_ERROR((AE_INFO,
+ "Unknown field creation opcode %02x", aml_opcode));
+ status = AE_AML_BAD_OPCODE;
+ goto cleanup;
+ }
+
+ /* Entire field must fit within the current length of the buffer */
+
+ if ((bit_offset + bit_count) > (8 * (u32) buffer_desc->buffer.length)) {
+ ACPI_ERROR((AE_INFO,
+ "Field [%4.4s] at %d exceeds Buffer [%4.4s] size %d (bits)",
+ acpi_ut_get_node_name(result_desc),
+ bit_offset + bit_count,
+ acpi_ut_get_node_name(buffer_desc->buffer.node),
+ 8 * (u32) buffer_desc->buffer.length));
+ status = AE_AML_BUFFER_LIMIT;
+ goto cleanup;
+ }
+
+ /*
+ * Initialize areas of the field object that are common to all fields
+ * For field_flags, use LOCK_RULE = 0 (NO_LOCK),
+ * UPDATE_RULE = 0 (UPDATE_PRESERVE)
+ */
+ status = acpi_ex_prep_common_field_object(obj_desc, field_flags, 0,
+ bit_offset, bit_count);
+ if (ACPI_FAILURE(status)) {
+ goto cleanup;
+ }
+
+ obj_desc->buffer_field.buffer_obj = buffer_desc;
+
+ /* Reference count for buffer_desc inherits obj_desc count */
+
+ buffer_desc->common.reference_count = (u16)
+ (buffer_desc->common.reference_count +
+ obj_desc->common.reference_count);
+
+ cleanup:
+
+ /* Always delete the operands */
+
+ acpi_ut_remove_reference(offset_desc);
+ acpi_ut_remove_reference(buffer_desc);
+
+ if (aml_opcode == AML_CREATE_FIELD_OP) {
+ acpi_ut_remove_reference(length_desc);
+ }
+
+ /* On failure, delete the result descriptor */
+
+ if (ACPI_FAILURE(status)) {
+ acpi_ut_remove_reference(result_desc); /* Result descriptor */
+ } else {
+ /* Now the address and length are valid for this buffer_field */
+
+ obj_desc->buffer_field.flags |= AOPOBJ_DATA_VALID;
+ }
+
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ds_eval_buffer_field_operands
+ *
+ * PARAMETERS: walk_state - Current walk
+ * Op - A valid buffer_field Op object
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Get buffer_field Buffer and Index
+ * Called from acpi_ds_exec_end_op during buffer_field parse tree walk
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ds_eval_buffer_field_operands(struct acpi_walk_state *walk_state,
+ union acpi_parse_object *op)
+{
+ acpi_status status;
+ union acpi_operand_object *obj_desc;
+ struct acpi_namespace_node *node;
+ union acpi_parse_object *next_op;
+
+ ACPI_FUNCTION_TRACE_PTR(ds_eval_buffer_field_operands, op);
+
+ /*
+ * This is where we evaluate the address and length fields of the
+ * create_xxx_field declaration
+ */
+ node = op->common.node;
+
+ /* next_op points to the op that holds the Buffer */
+
+ next_op = op->common.value.arg;
+
+ /* Evaluate/create the address and length operands */
+
+ status = acpi_ds_create_operands(walk_state, next_op);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ obj_desc = acpi_ns_get_attached_object(node);
+ if (!obj_desc) {
+ return_ACPI_STATUS(AE_NOT_EXIST);
+ }
+
+ /* Resolve the operands */
+
+ status = acpi_ex_resolve_operands(op->common.aml_opcode,
+ ACPI_WALK_OPERANDS, walk_state);
+ if (ACPI_FAILURE(status)) {
+ ACPI_ERROR((AE_INFO, "(%s) bad operand(s) (%X)",
+ acpi_ps_get_opcode_name(op->common.aml_opcode),
+ status));
+
+ return_ACPI_STATUS(status);
+ }
+
+ /* Initialize the Buffer Field */
+
+ if (op->common.aml_opcode == AML_CREATE_FIELD_OP) {
+
+ /* NOTE: Slightly different operands for this opcode */
+
+ status =
+ acpi_ds_init_buffer_field(op->common.aml_opcode, obj_desc,
+ walk_state->operands[0],
+ walk_state->operands[1],
+ walk_state->operands[2],
+ walk_state->operands[3]);
+ } else {
+ /* All other, create_xxx_field opcodes */
+
+ status =
+ acpi_ds_init_buffer_field(op->common.aml_opcode, obj_desc,
+ walk_state->operands[0],
+ walk_state->operands[1], NULL,
+ walk_state->operands[2]);
+ }
+
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ds_eval_region_operands
+ *
+ * PARAMETERS: walk_state - Current walk
+ * Op - A valid region Op object
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Get region address and length
+ * Called from acpi_ds_exec_end_op during op_region parse tree walk
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ds_eval_region_operands(struct acpi_walk_state *walk_state,
+ union acpi_parse_object *op)
+{
+ acpi_status status;
+ union acpi_operand_object *obj_desc;
+ union acpi_operand_object *operand_desc;
+ struct acpi_namespace_node *node;
+ union acpi_parse_object *next_op;
+
+ ACPI_FUNCTION_TRACE_PTR(ds_eval_region_operands, op);
+
+ /*
+ * This is where we evaluate the address and length fields of the
+ * op_region declaration
+ */
+ node = op->common.node;
+
+ /* next_op points to the op that holds the space_iD */
+
+ next_op = op->common.value.arg;
+
+ /* next_op points to address op */
+
+ next_op = next_op->common.next;
+
+ /* Evaluate/create the address and length operands */
+
+ status = acpi_ds_create_operands(walk_state, next_op);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /* Resolve the length and address operands to numbers */
+
+ status = acpi_ex_resolve_operands(op->common.aml_opcode,
+ ACPI_WALK_OPERANDS, walk_state);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ obj_desc = acpi_ns_get_attached_object(node);
+ if (!obj_desc) {
+ return_ACPI_STATUS(AE_NOT_EXIST);
+ }
+
+ /*
+ * Get the length operand and save it
+ * (at Top of stack)
+ */
+ operand_desc = walk_state->operands[walk_state->num_operands - 1];
+
+ obj_desc->region.length = (u32) operand_desc->integer.value;
+ acpi_ut_remove_reference(operand_desc);
+
+ /*
+ * Get the address and save it
+ * (at top of stack - 1)
+ */
+ operand_desc = walk_state->operands[walk_state->num_operands - 2];
+
+ obj_desc->region.address = (acpi_physical_address)
+ operand_desc->integer.value;
+ acpi_ut_remove_reference(operand_desc);
+
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "RgnObj %p Addr %8.8X%8.8X Len %X\n",
+ obj_desc,
+ ACPI_FORMAT_NATIVE_UINT(obj_desc->region.address),
+ obj_desc->region.length));
+
+ /* Now the address and length are valid for this opregion */
+
+ obj_desc->region.flags |= AOPOBJ_DATA_VALID;
+
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ds_eval_table_region_operands
+ *
+ * PARAMETERS: walk_state - Current walk
+ * Op - A valid region Op object
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Get region address and length
+ * Called from acpi_ds_exec_end_op during data_table_region parse tree walk
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ds_eval_table_region_operands(struct acpi_walk_state *walk_state,
+ union acpi_parse_object *op)
+{
+ acpi_status status;
+ union acpi_operand_object *obj_desc;
+ union acpi_operand_object **operand;
+ struct acpi_namespace_node *node;
+ union acpi_parse_object *next_op;
+ u32 table_index;
+ struct acpi_table_header *table;
+
+ ACPI_FUNCTION_TRACE_PTR(ds_eval_table_region_operands, op);
+
+ /*
+ * This is where we evaluate the signature_string and oem_iDString
+ * and oem_table_iDString of the data_table_region declaration
+ */
+ node = op->common.node;
+
+ /* next_op points to signature_string op */
+
+ next_op = op->common.value.arg;
+
+ /*
+ * Evaluate/create the signature_string and oem_iDString
+ * and oem_table_iDString operands
+ */
+ status = acpi_ds_create_operands(walk_state, next_op);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /*
+ * Resolve the signature_string and oem_iDString
+ * and oem_table_iDString operands
+ */
+ status = acpi_ex_resolve_operands(op->common.aml_opcode,
+ ACPI_WALK_OPERANDS, walk_state);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ operand = &walk_state->operands[0];
+
+ /* Find the ACPI table */
+
+ status = acpi_tb_find_table(operand[0]->string.pointer,
+ operand[1]->string.pointer,
+ operand[2]->string.pointer, &table_index);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ acpi_ut_remove_reference(operand[0]);
+ acpi_ut_remove_reference(operand[1]);
+ acpi_ut_remove_reference(operand[2]);
+
+ status = acpi_get_table_by_index(table_index, &table);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ obj_desc = acpi_ns_get_attached_object(node);
+ if (!obj_desc) {
+ return_ACPI_STATUS(AE_NOT_EXIST);
+ }
+
+ obj_desc->region.address =
+ (acpi_physical_address) ACPI_TO_INTEGER(table);
+ obj_desc->region.length = table->length;
+
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "RgnObj %p Addr %8.8X%8.8X Len %X\n",
+ obj_desc,
+ ACPI_FORMAT_NATIVE_UINT(obj_desc->region.address),
+ obj_desc->region.length));
+
+ /* Now the address and length are valid for this opregion */
+
+ obj_desc->region.flags |= AOPOBJ_DATA_VALID;
+
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ds_eval_data_object_operands
+ *
+ * PARAMETERS: walk_state - Current walk
+ * Op - A valid data_object Op object
+ * obj_desc - data_object
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Get the operands and complete the following data object types:
+ * Buffer, Package.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ds_eval_data_object_operands(struct acpi_walk_state *walk_state,
+ union acpi_parse_object *op,
+ union acpi_operand_object *obj_desc)
+{
+ acpi_status status;
+ union acpi_operand_object *arg_desc;
+ u32 length;
+
+ ACPI_FUNCTION_TRACE(ds_eval_data_object_operands);
+
+ /* The first operand (for all of these data objects) is the length */
+
+ /*
+ * Set proper index into operand stack for acpi_ds_obj_stack_push
+ * invoked inside acpi_ds_create_operand.
+ */
+ walk_state->operand_index = walk_state->num_operands;
+
+ status = acpi_ds_create_operand(walk_state, op->common.value.arg, 1);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ status = acpi_ex_resolve_operands(walk_state->opcode,
+ &(walk_state->
+ operands[walk_state->num_operands -
+ 1]), walk_state);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /* Extract length operand */
+
+ arg_desc = walk_state->operands[walk_state->num_operands - 1];
+ length = (u32) arg_desc->integer.value;
+
+ /* Cleanup for length operand */
+
+ status = acpi_ds_obj_stack_pop(1, walk_state);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ acpi_ut_remove_reference(arg_desc);
+
+ /*
+ * Create the actual data object
+ */
+ switch (op->common.aml_opcode) {
+ case AML_BUFFER_OP:
+
+ status =
+ acpi_ds_build_internal_buffer_obj(walk_state, op, length,
+ &obj_desc);
+ break;
+
+ case AML_PACKAGE_OP:
+ case AML_VAR_PACKAGE_OP:
+
+ status =
+ acpi_ds_build_internal_package_obj(walk_state, op, length,
+ &obj_desc);
+ break;
+
+ default:
+ return_ACPI_STATUS(AE_AML_BAD_OPCODE);
+ }
+
+ if (ACPI_SUCCESS(status)) {
+ /*
+ * Return the object in the walk_state, unless the parent is a package -
+ * in this case, the return object will be stored in the parse tree
+ * for the package.
+ */
+ if ((!op->common.parent) ||
+ ((op->common.parent->common.aml_opcode != AML_PACKAGE_OP) &&
+ (op->common.parent->common.aml_opcode !=
+ AML_VAR_PACKAGE_OP)
+ && (op->common.parent->common.aml_opcode != AML_NAME_OP))) {
+ walk_state->result_obj = obj_desc;
+ }
+ }
+
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ds_eval_bank_field_operands
+ *
+ * PARAMETERS: walk_state - Current walk
+ * Op - A valid bank_field Op object
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Get bank_field bank_value
+ * Called from acpi_ds_exec_end_op during bank_field parse tree walk
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ds_eval_bank_field_operands(struct acpi_walk_state *walk_state,
+ union acpi_parse_object *op)
+{
+ acpi_status status;
+ union acpi_operand_object *obj_desc;
+ union acpi_operand_object *operand_desc;
+ struct acpi_namespace_node *node;
+ union acpi_parse_object *next_op;
+ union acpi_parse_object *arg;
+
+ ACPI_FUNCTION_TRACE_PTR(ds_eval_bank_field_operands, op);
+
+ /*
+ * This is where we evaluate the bank_value field of the
+ * bank_field declaration
+ */
+
+ /* next_op points to the op that holds the Region */
+
+ next_op = op->common.value.arg;
+
+ /* next_op points to the op that holds the Bank Register */
+
+ next_op = next_op->common.next;
+
+ /* next_op points to the op that holds the Bank Value */
+
+ next_op = next_op->common.next;
+
+ /*
+ * Set proper index into operand stack for acpi_ds_obj_stack_push
+ * invoked inside acpi_ds_create_operand.
+ *
+ * We use walk_state->Operands[0] to store the evaluated bank_value
+ */
+ walk_state->operand_index = 0;
+
+ status = acpi_ds_create_operand(walk_state, next_op, 0);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ status = acpi_ex_resolve_to_value(&walk_state->operands[0], walk_state);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ ACPI_DUMP_OPERANDS(ACPI_WALK_OPERANDS,
+ acpi_ps_get_opcode_name(op->common.aml_opcode), 1);
+ /*
+ * Get the bank_value operand and save it
+ * (at Top of stack)
+ */
+ operand_desc = walk_state->operands[0];
+
+ /* Arg points to the start Bank Field */
+
+ arg = acpi_ps_get_arg(op, 4);
+ while (arg) {
+
+ /* Ignore OFFSET and ACCESSAS terms here */
+
+ if (arg->common.aml_opcode == AML_INT_NAMEDFIELD_OP) {
+ node = arg->common.node;
+
+ obj_desc = acpi_ns_get_attached_object(node);
+ if (!obj_desc) {
+ return_ACPI_STATUS(AE_NOT_EXIST);
+ }
+
+ obj_desc->bank_field.value =
+ (u32) operand_desc->integer.value;
+ }
+
+ /* Move to next field in the list */
+
+ arg = arg->common.next;
+ }
+
+ acpi_ut_remove_reference(operand_desc);
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ds_exec_begin_control_op
+ *
+ * PARAMETERS: walk_list - The list that owns the walk stack
+ * Op - The control Op
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Handles all control ops encountered during control method
+ * execution.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ds_exec_begin_control_op(struct acpi_walk_state *walk_state,
+ union acpi_parse_object *op)
+{
+ acpi_status status = AE_OK;
+ union acpi_generic_state *control_state;
+
+ ACPI_FUNCTION_NAME(ds_exec_begin_control_op);
+
+ ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "Op=%p Opcode=%2.2X State=%p\n", op,
+ op->common.aml_opcode, walk_state));
+
+ switch (op->common.aml_opcode) {
+ case AML_IF_OP:
+ case AML_WHILE_OP:
+
+ /*
+ * IF/WHILE: Create a new control state to manage these
+ * constructs. We need to manage these as a stack, in order
+ * to handle nesting.
+ */
+ control_state = acpi_ut_create_control_state();
+ if (!control_state) {
+ status = AE_NO_MEMORY;
+ break;
+ }
+ /*
+ * Save a pointer to the predicate for multiple executions
+ * of a loop
+ */
+ control_state->control.aml_predicate_start =
+ walk_state->parser_state.aml - 1;
+ control_state->control.package_end =
+ walk_state->parser_state.pkg_end;
+ control_state->control.opcode = op->common.aml_opcode;
+
+ /* Push the control state on this walk's control stack */
+
+ acpi_ut_push_generic_state(&walk_state->control_state,
+ control_state);
+ break;
+
+ case AML_ELSE_OP:
+
+ /* Predicate is in the state object */
+ /* If predicate is true, the IF was executed, ignore ELSE part */
+
+ if (walk_state->last_predicate) {
+ status = AE_CTRL_TRUE;
+ }
+
+ break;
+
+ case AML_RETURN_OP:
+
+ break;
+
+ default:
+ break;
+ }
+
+ return (status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ds_exec_end_control_op
+ *
+ * PARAMETERS: walk_list - The list that owns the walk stack
+ * Op - The control Op
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Handles all control ops encountered during control method
+ * execution.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ds_exec_end_control_op(struct acpi_walk_state * walk_state,
+ union acpi_parse_object * op)
+{
+ acpi_status status = AE_OK;
+ union acpi_generic_state *control_state;
+
+ ACPI_FUNCTION_NAME(ds_exec_end_control_op);
+
+ switch (op->common.aml_opcode) {
+ case AML_IF_OP:
+
+ ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "[IF_OP] Op=%p\n", op));
+
+ /*
+ * Save the result of the predicate in case there is an
+ * ELSE to come
+ */
+ walk_state->last_predicate =
+ (u8) walk_state->control_state->common.value;
+
+ /*
+ * Pop the control state that was created at the start
+ * of the IF and free it
+ */
+ control_state =
+ acpi_ut_pop_generic_state(&walk_state->control_state);
+ acpi_ut_delete_generic_state(control_state);
+ break;
+
+ case AML_ELSE_OP:
+
+ break;
+
+ case AML_WHILE_OP:
+
+ ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "[WHILE_OP] Op=%p\n", op));
+
+ if (walk_state->control_state->common.value) {
+
+ /* Predicate was true, go back and evaluate it again! */
+
+ status = AE_CTRL_PENDING;
+ }
+
+ ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
+ "[WHILE_OP] termination! Op=%p\n", op));
+
+ /* Pop this control state and free it */
+
+ control_state =
+ acpi_ut_pop_generic_state(&walk_state->control_state);
+
+ walk_state->aml_last_while =
+ control_state->control.aml_predicate_start;
+ acpi_ut_delete_generic_state(control_state);
+ break;
+
+ case AML_RETURN_OP:
+
+ ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
+ "[RETURN_OP] Op=%p Arg=%p\n", op,
+ op->common.value.arg));
+
+ /*
+ * One optional operand -- the return value
+ * It can be either an immediate operand or a result that
+ * has been bubbled up the tree
+ */
+ if (op->common.value.arg) {
+
+ /* Since we have a real Return(), delete any implicit return */
+
+ acpi_ds_clear_implicit_return(walk_state);
+
+ /* Return statement has an immediate operand */
+
+ status =
+ acpi_ds_create_operands(walk_state,
+ op->common.value.arg);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+
+ /*
+ * If value being returned is a Reference (such as
+ * an arg or local), resolve it now because it may
+ * cease to exist at the end of the method.
+ */
+ status =
+ acpi_ex_resolve_to_value(&walk_state->operands[0],
+ walk_state);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+
+ /*
+ * Get the return value and save as the last result
+ * value. This is the only place where walk_state->return_desc
+ * is set to anything other than zero!
+ */
+ walk_state->return_desc = walk_state->operands[0];
+ } else if (walk_state->result_count) {
+
+ /* Since we have a real Return(), delete any implicit return */
+
+ acpi_ds_clear_implicit_return(walk_state);
+
+ /*
+ * The return value has come from a previous calculation.
+ *
+ * If value being returned is a Reference (such as
+ * an arg or local), resolve it now because it may
+ * cease to exist at the end of the method.
+ *
+ * Allow references created by the Index operator to return unchanged.
+ */
+ if ((ACPI_GET_DESCRIPTOR_TYPE
+ (walk_state->results->results.obj_desc[0]) ==
+ ACPI_DESC_TYPE_OPERAND)
+ &&
+ (ACPI_GET_OBJECT_TYPE
+ (walk_state->results->results.obj_desc[0]) ==
+ ACPI_TYPE_LOCAL_REFERENCE)
+ && ((walk_state->results->results.obj_desc[0])->
+ reference.class != ACPI_REFCLASS_INDEX)) {
+ status =
+ acpi_ex_resolve_to_value(&walk_state->
+ results->results.
+ obj_desc[0],
+ walk_state);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+ }
+
+ walk_state->return_desc =
+ walk_state->results->results.obj_desc[0];
+ } else {
+ /* No return operand */
+
+ if (walk_state->num_operands) {
+ acpi_ut_remove_reference(walk_state->
+ operands[0]);
+ }
+
+ walk_state->operands[0] = NULL;
+ walk_state->num_operands = 0;
+ walk_state->return_desc = NULL;
+ }
+
+ ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
+ "Completed RETURN_OP State=%p, RetVal=%p\n",
+ walk_state, walk_state->return_desc));
+
+ /* End the control method execution right now */
+
+ status = AE_CTRL_TERMINATE;
+ break;
+
+ case AML_NOOP_OP:
+
+ /* Just do nothing! */
+ break;
+
+ case AML_BREAK_POINT_OP:
+
+ /* Call up to the OS service layer to handle this */
+
+ status =
+ acpi_os_signal(ACPI_SIGNAL_BREAKPOINT,
+ "Executed AML Breakpoint opcode");
+
+ /* If and when it returns, all done. */
+
+ break;
+
+ case AML_BREAK_OP:
+ case AML_CONTINUE_OP: /* ACPI 2.0 */
+
+ /* Pop and delete control states until we find a while */
+
+ while (walk_state->control_state &&
+ (walk_state->control_state->control.opcode !=
+ AML_WHILE_OP)) {
+ control_state =
+ acpi_ut_pop_generic_state(&walk_state->
+ control_state);
+ acpi_ut_delete_generic_state(control_state);
+ }
+
+ /* No while found? */
+
+ if (!walk_state->control_state) {
+ return (AE_AML_NO_WHILE);
+ }
+
+ /* Was: walk_state->aml_last_while = walk_state->control_state->Control.aml_predicate_start; */
+
+ walk_state->aml_last_while =
+ walk_state->control_state->control.package_end;
+
+ /* Return status depending on opcode */
+
+ if (op->common.aml_opcode == AML_BREAK_OP) {
+ status = AE_CTRL_BREAK;
+ } else {
+ status = AE_CTRL_CONTINUE;
+ }
+ break;
+
+ default:
+
+ ACPI_ERROR((AE_INFO, "Unknown control opcode=%X Op=%p",
+ op->common.aml_opcode, op));
+
+ status = AE_AML_BAD_OPCODE;
+ break;
+ }
+
+ return (status);
+}
diff --git a/drivers/acpi/dispatcher/dsutils.c b/drivers/acpi/dispatcher/dsutils.c
new file mode 100644
index 0000000..b398982
--- /dev/null
+++ b/drivers/acpi/dispatcher/dsutils.c
@@ -0,0 +1,868 @@
+/*******************************************************************************
+ *
+ * Module Name: dsutils - Dispatcher utilities
+ *
+ ******************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include <acpi/acparser.h>
+#include <acpi/amlcode.h>
+#include <acpi/acdispat.h>
+#include <acpi/acinterp.h>
+#include <acpi/acnamesp.h>
+#include <acpi/acdebug.h>
+
+#define _COMPONENT ACPI_DISPATCHER
+ACPI_MODULE_NAME("dsutils")
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ds_clear_implicit_return
+ *
+ * PARAMETERS: walk_state - Current State
+ *
+ * RETURN: None.
+ *
+ * DESCRIPTION: Clear and remove a reference on an implicit return value. Used
+ * to delete "stale" return values (if enabled, the return value
+ * from every operator is saved at least momentarily, in case the
+ * parent method exits.)
+ *
+ ******************************************************************************/
+void acpi_ds_clear_implicit_return(struct acpi_walk_state *walk_state)
+{
+ ACPI_FUNCTION_NAME(ds_clear_implicit_return);
+
+ /*
+ * Slack must be enabled for this feature
+ */
+ if (!acpi_gbl_enable_interpreter_slack) {
+ return;
+ }
+
+ if (walk_state->implicit_return_obj) {
+ /*
+ * Delete any "stale" implicit return. However, in
+ * complex statements, the implicit return value can be
+ * bubbled up several levels.
+ */
+ ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
+ "Removing reference on stale implicit return obj %p\n",
+ walk_state->implicit_return_obj));
+
+ acpi_ut_remove_reference(walk_state->implicit_return_obj);
+ walk_state->implicit_return_obj = NULL;
+ }
+}
+
+#ifndef ACPI_NO_METHOD_EXECUTION
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ds_do_implicit_return
+ *
+ * PARAMETERS: return_desc - The return value
+ * walk_state - Current State
+ * add_reference - True if a reference should be added to the
+ * return object
+ *
+ * RETURN: TRUE if implicit return enabled, FALSE otherwise
+ *
+ * DESCRIPTION: Implements the optional "implicit return". We save the result
+ * of every ASL operator and control method invocation in case the
+ * parent method exit. Before storing a new return value, we
+ * delete the previous return value.
+ *
+ ******************************************************************************/
+
+u8
+acpi_ds_do_implicit_return(union acpi_operand_object *return_desc,
+ struct acpi_walk_state *walk_state, u8 add_reference)
+{
+ ACPI_FUNCTION_NAME(ds_do_implicit_return);
+
+ /*
+ * Slack must be enabled for this feature, and we must
+ * have a valid return object
+ */
+ if ((!acpi_gbl_enable_interpreter_slack) || (!return_desc)) {
+ return (FALSE);
+ }
+
+ ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
+ "Result %p will be implicitly returned; Prev=%p\n",
+ return_desc, walk_state->implicit_return_obj));
+
+ /*
+ * Delete any "stale" implicit return value first. However, in
+ * complex statements, the implicit return value can be
+ * bubbled up several levels, so we don't clear the value if it
+ * is the same as the return_desc.
+ */
+ if (walk_state->implicit_return_obj) {
+ if (walk_state->implicit_return_obj == return_desc) {
+ return (TRUE);
+ }
+ acpi_ds_clear_implicit_return(walk_state);
+ }
+
+ /* Save the implicit return value, add a reference if requested */
+
+ walk_state->implicit_return_obj = return_desc;
+ if (add_reference) {
+ acpi_ut_add_reference(return_desc);
+ }
+
+ return (TRUE);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ds_is_result_used
+ *
+ * PARAMETERS: Op - Current Op
+ * walk_state - Current State
+ *
+ * RETURN: TRUE if result is used, FALSE otherwise
+ *
+ * DESCRIPTION: Check if a result object will be used by the parent
+ *
+ ******************************************************************************/
+
+u8
+acpi_ds_is_result_used(union acpi_parse_object * op,
+ struct acpi_walk_state * walk_state)
+{
+ const struct acpi_opcode_info *parent_info;
+
+ ACPI_FUNCTION_TRACE_PTR(ds_is_result_used, op);
+
+ /* Must have both an Op and a Result Object */
+
+ if (!op) {
+ ACPI_ERROR((AE_INFO, "Null Op"));
+ return_UINT8(TRUE);
+ }
+
+ /*
+ * We know that this operator is not a
+ * Return() operator (would not come here.) The following code is the
+ * optional support for a so-called "implicit return". Some AML code
+ * assumes that the last value of the method is "implicitly" returned
+ * to the caller. Just save the last result as the return value.
+ * NOTE: this is optional because the ASL language does not actually
+ * support this behavior.
+ */
+ (void)acpi_ds_do_implicit_return(walk_state->result_obj, walk_state,
+ TRUE);
+
+ /*
+ * Now determine if the parent will use the result
+ *
+ * If there is no parent, or the parent is a scope_op, we are executing
+ * at the method level. An executing method typically has no parent,
+ * since each method is parsed separately. A method invoked externally
+ * via execute_control_method has a scope_op as the parent.
+ */
+ if ((!op->common.parent) ||
+ (op->common.parent->common.aml_opcode == AML_SCOPE_OP)) {
+
+ /* No parent, the return value cannot possibly be used */
+
+ ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
+ "At Method level, result of [%s] not used\n",
+ acpi_ps_get_opcode_name(op->common.
+ aml_opcode)));
+ return_UINT8(FALSE);
+ }
+
+ /* Get info on the parent. The root_op is AML_SCOPE */
+
+ parent_info =
+ acpi_ps_get_opcode_info(op->common.parent->common.aml_opcode);
+ if (parent_info->class == AML_CLASS_UNKNOWN) {
+ ACPI_ERROR((AE_INFO, "Unknown parent opcode Op=%p", op));
+ return_UINT8(FALSE);
+ }
+
+ /*
+ * Decide what to do with the result based on the parent. If
+ * the parent opcode will not use the result, delete the object.
+ * Otherwise leave it as is, it will be deleted when it is used
+ * as an operand later.
+ */
+ switch (parent_info->class) {
+ case AML_CLASS_CONTROL:
+
+ switch (op->common.parent->common.aml_opcode) {
+ case AML_RETURN_OP:
+
+ /* Never delete the return value associated with a return opcode */
+
+ goto result_used;
+
+ case AML_IF_OP:
+ case AML_WHILE_OP:
+
+ /*
+ * If we are executing the predicate AND this is the predicate op,
+ * we will use the return value
+ */
+ if ((walk_state->control_state->common.state ==
+ ACPI_CONTROL_PREDICATE_EXECUTING)
+ && (walk_state->control_state->control.
+ predicate_op == op)) {
+ goto result_used;
+ }
+ break;
+
+ default:
+ /* Ignore other control opcodes */
+ break;
+ }
+
+ /* The general control opcode returns no result */
+
+ goto result_not_used;
+
+ case AML_CLASS_CREATE:
+
+ /*
+ * These opcodes allow term_arg(s) as operands and therefore
+ * the operands can be method calls. The result is used.
+ */
+ goto result_used;
+
+ case AML_CLASS_NAMED_OBJECT:
+
+ if ((op->common.parent->common.aml_opcode == AML_REGION_OP) ||
+ (op->common.parent->common.aml_opcode == AML_DATA_REGION_OP)
+ || (op->common.parent->common.aml_opcode == AML_PACKAGE_OP)
+ || (op->common.parent->common.aml_opcode ==
+ AML_VAR_PACKAGE_OP)
+ || (op->common.parent->common.aml_opcode == AML_BUFFER_OP)
+ || (op->common.parent->common.aml_opcode ==
+ AML_INT_EVAL_SUBTREE_OP)
+ || (op->common.parent->common.aml_opcode ==
+ AML_BANK_FIELD_OP)) {
+ /*
+ * These opcodes allow term_arg(s) as operands and therefore
+ * the operands can be method calls. The result is used.
+ */
+ goto result_used;
+ }
+
+ goto result_not_used;
+
+ default:
+
+ /*
+ * In all other cases. the parent will actually use the return
+ * object, so keep it.
+ */
+ goto result_used;
+ }
+
+ result_used:
+ ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
+ "Result of [%s] used by Parent [%s] Op=%p\n",
+ acpi_ps_get_opcode_name(op->common.aml_opcode),
+ acpi_ps_get_opcode_name(op->common.parent->common.
+ aml_opcode), op));
+
+ return_UINT8(TRUE);
+
+ result_not_used:
+ ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
+ "Result of [%s] not used by Parent [%s] Op=%p\n",
+ acpi_ps_get_opcode_name(op->common.aml_opcode),
+ acpi_ps_get_opcode_name(op->common.parent->common.
+ aml_opcode), op));
+
+ return_UINT8(FALSE);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ds_delete_result_if_not_used
+ *
+ * PARAMETERS: Op - Current parse Op
+ * result_obj - Result of the operation
+ * walk_state - Current state
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Used after interpretation of an opcode. If there is an internal
+ * result descriptor, check if the parent opcode will actually use
+ * this result. If not, delete the result now so that it will
+ * not become orphaned.
+ *
+ ******************************************************************************/
+
+void
+acpi_ds_delete_result_if_not_used(union acpi_parse_object *op,
+ union acpi_operand_object *result_obj,
+ struct acpi_walk_state *walk_state)
+{
+ union acpi_operand_object *obj_desc;
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE_PTR(ds_delete_result_if_not_used, result_obj);
+
+ if (!op) {
+ ACPI_ERROR((AE_INFO, "Null Op"));
+ return_VOID;
+ }
+
+ if (!result_obj) {
+ return_VOID;
+ }
+
+ if (!acpi_ds_is_result_used(op, walk_state)) {
+
+ /* Must pop the result stack (obj_desc should be equal to result_obj) */
+
+ status = acpi_ds_result_pop(&obj_desc, walk_state);
+ if (ACPI_SUCCESS(status)) {
+ acpi_ut_remove_reference(result_obj);
+ }
+ }
+
+ return_VOID;
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ds_resolve_operands
+ *
+ * PARAMETERS: walk_state - Current walk state with operands on stack
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Resolve all operands to their values. Used to prepare
+ * arguments to a control method invocation (a call from one
+ * method to another.)
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ds_resolve_operands(struct acpi_walk_state *walk_state)
+{
+ u32 i;
+ acpi_status status = AE_OK;
+
+ ACPI_FUNCTION_TRACE_PTR(ds_resolve_operands, walk_state);
+
+ /*
+ * Attempt to resolve each of the valid operands
+ * Method arguments are passed by reference, not by value. This means
+ * that the actual objects are passed, not copies of the objects.
+ */
+ for (i = 0; i < walk_state->num_operands; i++) {
+ status =
+ acpi_ex_resolve_to_value(&walk_state->operands[i],
+ walk_state);
+ if (ACPI_FAILURE(status)) {
+ break;
+ }
+ }
+
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ds_clear_operands
+ *
+ * PARAMETERS: walk_state - Current walk state with operands on stack
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Clear all operands on the current walk state operand stack.
+ *
+ ******************************************************************************/
+
+void acpi_ds_clear_operands(struct acpi_walk_state *walk_state)
+{
+ u32 i;
+
+ ACPI_FUNCTION_TRACE_PTR(ds_clear_operands, walk_state);
+
+ /* Remove a reference on each operand on the stack */
+
+ for (i = 0; i < walk_state->num_operands; i++) {
+ /*
+ * Remove a reference to all operands, including both
+ * "Arguments" and "Targets".
+ */
+ acpi_ut_remove_reference(walk_state->operands[i]);
+ walk_state->operands[i] = NULL;
+ }
+
+ walk_state->num_operands = 0;
+ return_VOID;
+}
+#endif
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ds_create_operand
+ *
+ * PARAMETERS: walk_state - Current walk state
+ * Arg - Parse object for the argument
+ * arg_index - Which argument (zero based)
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Translate a parse tree object that is an argument to an AML
+ * opcode to the equivalent interpreter object. This may include
+ * looking up a name or entering a new name into the internal
+ * namespace.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ds_create_operand(struct acpi_walk_state *walk_state,
+ union acpi_parse_object *arg, u32 arg_index)
+{
+ acpi_status status = AE_OK;
+ char *name_string;
+ u32 name_length;
+ union acpi_operand_object *obj_desc;
+ union acpi_parse_object *parent_op;
+ u16 opcode;
+ acpi_interpreter_mode interpreter_mode;
+ const struct acpi_opcode_info *op_info;
+
+ ACPI_FUNCTION_TRACE_PTR(ds_create_operand, arg);
+
+ /* A valid name must be looked up in the namespace */
+
+ if ((arg->common.aml_opcode == AML_INT_NAMEPATH_OP) &&
+ (arg->common.value.string) &&
+ !(arg->common.flags & ACPI_PARSEOP_IN_STACK)) {
+ ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "Getting a name: Arg=%p\n",
+ arg));
+
+ /* Get the entire name string from the AML stream */
+
+ status =
+ acpi_ex_get_name_string(ACPI_TYPE_ANY,
+ arg->common.value.buffer,
+ &name_string, &name_length);
+
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /* All prefixes have been handled, and the name is in name_string */
+
+ /*
+ * Special handling for buffer_field declarations. This is a deferred
+ * opcode that unfortunately defines the field name as the last
+ * parameter instead of the first. We get here when we are performing
+ * the deferred execution, so the actual name of the field is already
+ * in the namespace. We don't want to attempt to look it up again
+ * because we may be executing in a different scope than where the
+ * actual opcode exists.
+ */
+ if ((walk_state->deferred_node) &&
+ (walk_state->deferred_node->type == ACPI_TYPE_BUFFER_FIELD)
+ && (arg_index ==
+ (u32) ((walk_state->opcode ==
+ AML_CREATE_FIELD_OP) ? 3 : 2))) {
+ obj_desc =
+ ACPI_CAST_PTR(union acpi_operand_object,
+ walk_state->deferred_node);
+ status = AE_OK;
+ } else { /* All other opcodes */
+
+ /*
+ * Differentiate between a namespace "create" operation
+ * versus a "lookup" operation (IMODE_LOAD_PASS2 vs.
+ * IMODE_EXECUTE) in order to support the creation of
+ * namespace objects during the execution of control methods.
+ */
+ parent_op = arg->common.parent;
+ op_info =
+ acpi_ps_get_opcode_info(parent_op->common.
+ aml_opcode);
+ if ((op_info->flags & AML_NSNODE)
+ && (parent_op->common.aml_opcode !=
+ AML_INT_METHODCALL_OP)
+ && (parent_op->common.aml_opcode != AML_REGION_OP)
+ && (parent_op->common.aml_opcode !=
+ AML_INT_NAMEPATH_OP)) {
+
+ /* Enter name into namespace if not found */
+
+ interpreter_mode = ACPI_IMODE_LOAD_PASS2;
+ } else {
+ /* Return a failure if name not found */
+
+ interpreter_mode = ACPI_IMODE_EXECUTE;
+ }
+
+ status =
+ acpi_ns_lookup(walk_state->scope_info, name_string,
+ ACPI_TYPE_ANY, interpreter_mode,
+ ACPI_NS_SEARCH_PARENT |
+ ACPI_NS_DONT_OPEN_SCOPE, walk_state,
+ ACPI_CAST_INDIRECT_PTR(struct
+ acpi_namespace_node,
+ &obj_desc));
+ /*
+ * The only case where we pass through (ignore) a NOT_FOUND
+ * error is for the cond_ref_of opcode.
+ */
+ if (status == AE_NOT_FOUND) {
+ if (parent_op->common.aml_opcode ==
+ AML_COND_REF_OF_OP) {
+ /*
+ * For the Conditional Reference op, it's OK if
+ * the name is not found; We just need a way to
+ * indicate this to the interpreter, set the
+ * object to the root
+ */
+ obj_desc = ACPI_CAST_PTR(union
+ acpi_operand_object,
+ acpi_gbl_root_node);
+ status = AE_OK;
+ } else {
+ /*
+ * We just plain didn't find it -- which is a
+ * very serious error at this point
+ */
+ status = AE_AML_NAME_NOT_FOUND;
+ }
+ }
+
+ if (ACPI_FAILURE(status)) {
+ ACPI_ERROR_NAMESPACE(name_string, status);
+ }
+ }
+
+ /* Free the namestring created above */
+
+ ACPI_FREE(name_string);
+
+ /* Check status from the lookup */
+
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /* Put the resulting object onto the current object stack */
+
+ status = acpi_ds_obj_stack_push(obj_desc, walk_state);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+ ACPI_DEBUGGER_EXEC(acpi_db_display_argument_object
+ (obj_desc, walk_state));
+ } else {
+ /* Check for null name case */
+
+ if ((arg->common.aml_opcode == AML_INT_NAMEPATH_OP) &&
+ !(arg->common.flags & ACPI_PARSEOP_IN_STACK)) {
+ /*
+ * If the name is null, this means that this is an
+ * optional result parameter that was not specified
+ * in the original ASL. Create a Zero Constant for a
+ * placeholder. (Store to a constant is a Noop.)
+ */
+ opcode = AML_ZERO_OP; /* Has no arguments! */
+
+ ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
+ "Null namepath: Arg=%p\n", arg));
+ } else {
+ opcode = arg->common.aml_opcode;
+ }
+
+ /* Get the object type of the argument */
+
+ op_info = acpi_ps_get_opcode_info(opcode);
+ if (op_info->object_type == ACPI_TYPE_INVALID) {
+ return_ACPI_STATUS(AE_NOT_IMPLEMENTED);
+ }
+
+ if ((op_info->flags & AML_HAS_RETVAL)
+ || (arg->common.flags & ACPI_PARSEOP_IN_STACK)) {
+ ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
+ "Argument previously created, already stacked\n"));
+
+ ACPI_DEBUGGER_EXEC(acpi_db_display_argument_object
+ (walk_state->
+ operands[walk_state->num_operands -
+ 1], walk_state));
+
+ /*
+ * Use value that was already previously returned
+ * by the evaluation of this argument
+ */
+ status = acpi_ds_result_pop(&obj_desc, walk_state);
+ if (ACPI_FAILURE(status)) {
+ /*
+ * Only error is underflow, and this indicates
+ * a missing or null operand!
+ */
+ ACPI_EXCEPTION((AE_INFO, status,
+ "Missing or null operand"));
+ return_ACPI_STATUS(status);
+ }
+ } else {
+ /* Create an ACPI_INTERNAL_OBJECT for the argument */
+
+ obj_desc =
+ acpi_ut_create_internal_object(op_info->
+ object_type);
+ if (!obj_desc) {
+ return_ACPI_STATUS(AE_NO_MEMORY);
+ }
+
+ /* Initialize the new object */
+
+ status =
+ acpi_ds_init_object_from_op(walk_state, arg, opcode,
+ &obj_desc);
+ if (ACPI_FAILURE(status)) {
+ acpi_ut_delete_object_desc(obj_desc);
+ return_ACPI_STATUS(status);
+ }
+ }
+
+ /* Put the operand object on the object stack */
+
+ status = acpi_ds_obj_stack_push(obj_desc, walk_state);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ ACPI_DEBUGGER_EXEC(acpi_db_display_argument_object
+ (obj_desc, walk_state));
+ }
+
+ return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ds_create_operands
+ *
+ * PARAMETERS: walk_state - Current state
+ * first_arg - First argument of a parser argument tree
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Convert an operator's arguments from a parse tree format to
+ * namespace objects and place those argument object on the object
+ * stack in preparation for evaluation by the interpreter.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ds_create_operands(struct acpi_walk_state *walk_state,
+ union acpi_parse_object *first_arg)
+{
+ acpi_status status = AE_OK;
+ union acpi_parse_object *arg;
+ union acpi_parse_object *arguments[ACPI_OBJ_NUM_OPERANDS];
+ u32 arg_count = 0;
+ u32 index = walk_state->num_operands;
+ u32 i;
+
+ ACPI_FUNCTION_TRACE_PTR(ds_create_operands, first_arg);
+
+ /* Get all arguments in the list */
+
+ arg = first_arg;
+ while (arg) {
+ if (index >= ACPI_OBJ_NUM_OPERANDS) {
+ return_ACPI_STATUS(AE_BAD_DATA);
+ }
+
+ arguments[index] = arg;
+ walk_state->operands[index] = NULL;
+
+ /* Move on to next argument, if any */
+
+ arg = arg->common.next;
+ arg_count++;
+ index++;
+ }
+
+ index--;
+
+ /* It is the appropriate order to get objects from the Result stack */
+
+ for (i = 0; i < arg_count; i++) {
+ arg = arguments[index];
+
+ /* Force the filling of the operand stack in inverse order */
+
+ walk_state->operand_index = (u8) index;
+
+ status = acpi_ds_create_operand(walk_state, arg, index);
+ if (ACPI_FAILURE(status)) {
+ goto cleanup;
+ }
+
+ index--;
+
+ ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
+ "Arg #%d (%p) done, Arg1=%p\n", index, arg,
+ first_arg));
+ }
+
+ return_ACPI_STATUS(status);
+
+ cleanup:
+ /*
+ * We must undo everything done above; meaning that we must
+ * pop everything off of the operand stack and delete those
+ * objects
+ */
+ acpi_ds_obj_stack_pop_and_delete(arg_count, walk_state);
+
+ ACPI_EXCEPTION((AE_INFO, status, "While creating Arg %d", index));
+ return_ACPI_STATUS(status);
+}
+
+/*****************************************************************************
+ *
+ * FUNCTION: acpi_ds_evaluate_name_path
+ *
+ * PARAMETERS: walk_state - Current state of the parse tree walk,
+ * the opcode of current operation should be
+ * AML_INT_NAMEPATH_OP
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Translate the -name_path- parse tree object to the equivalent
+ * interpreter object, convert it to value, if needed, duplicate
+ * it, if needed, and push it onto the current result stack.
+ *
+ ****************************************************************************/
+
+acpi_status acpi_ds_evaluate_name_path(struct acpi_walk_state *walk_state)
+{
+ acpi_status status = AE_OK;
+ union acpi_parse_object *op = walk_state->op;
+ union acpi_operand_object **operand = &walk_state->operands[0];
+ union acpi_operand_object *new_obj_desc;
+ u8 type;
+
+ ACPI_FUNCTION_TRACE_PTR(ds_evaluate_name_path, walk_state);
+
+ if (!op->common.parent) {
+
+ /* This happens after certain exception processing */
+
+ goto exit;
+ }
+
+ if ((op->common.parent->common.aml_opcode == AML_PACKAGE_OP) ||
+ (op->common.parent->common.aml_opcode == AML_VAR_PACKAGE_OP) ||
+ (op->common.parent->common.aml_opcode == AML_REF_OF_OP)) {
+
+ /* TBD: Should we specify this feature as a bit of op_info->Flags of these opcodes? */
+
+ goto exit;
+ }
+
+ status = acpi_ds_create_operand(walk_state, op, 0);
+ if (ACPI_FAILURE(status)) {
+ goto exit;
+ }
+
+ if (op->common.flags & ACPI_PARSEOP_TARGET) {
+ new_obj_desc = *operand;
+ goto push_result;
+ }
+
+ type = ACPI_GET_OBJECT_TYPE(*operand);
+
+ status = acpi_ex_resolve_to_value(operand, walk_state);
+ if (ACPI_FAILURE(status)) {
+ goto exit;
+ }
+
+ if (type == ACPI_TYPE_INTEGER) {
+
+ /* It was incremented by acpi_ex_resolve_to_value */
+
+ acpi_ut_remove_reference(*operand);
+
+ status =
+ acpi_ut_copy_iobject_to_iobject(*operand, &new_obj_desc,
+ walk_state);
+ if (ACPI_FAILURE(status)) {
+ goto exit;
+ }
+ } else {
+ /*
+ * The object either was anew created or is
+ * a Namespace node - don't decrement it.
+ */
+ new_obj_desc = *operand;
+ }
+
+ /* Cleanup for name-path operand */
+
+ status = acpi_ds_obj_stack_pop(1, walk_state);
+ if (ACPI_FAILURE(status)) {
+ walk_state->result_obj = new_obj_desc;
+ goto exit;
+ }
+
+ push_result:
+
+ walk_state->result_obj = new_obj_desc;
+
+ status = acpi_ds_result_push(walk_state->result_obj, walk_state);
+ if (ACPI_SUCCESS(status)) {
+
+ /* Force to take it from stack */
+
+ op->common.flags |= ACPI_PARSEOP_IN_STACK;
+ }
+
+ exit:
+
+ return_ACPI_STATUS(status);
+}
diff --git a/drivers/acpi/dispatcher/dswexec.c b/drivers/acpi/dispatcher/dswexec.c
new file mode 100644
index 0000000..396fe12
--- /dev/null
+++ b/drivers/acpi/dispatcher/dswexec.c
@@ -0,0 +1,745 @@
+/******************************************************************************
+ *
+ * Module Name: dswexec - Dispatcher method execution callbacks;
+ * dispatch to interpreter.
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include <acpi/acparser.h>
+#include <acpi/amlcode.h>
+#include <acpi/acdispat.h>
+#include <acpi/acinterp.h>
+#include <acpi/acnamesp.h>
+#include <acpi/acdebug.h>
+
+#define _COMPONENT ACPI_DISPATCHER
+ACPI_MODULE_NAME("dswexec")
+
+/*
+ * Dispatch table for opcode classes
+ */
+static ACPI_EXECUTE_OP acpi_gbl_op_type_dispatch[] = {
+ acpi_ex_opcode_0A_0T_1R,
+ acpi_ex_opcode_1A_0T_0R,
+ acpi_ex_opcode_1A_0T_1R,
+ acpi_ex_opcode_1A_1T_0R,
+ acpi_ex_opcode_1A_1T_1R,
+ acpi_ex_opcode_2A_0T_0R,
+ acpi_ex_opcode_2A_0T_1R,
+ acpi_ex_opcode_2A_1T_1R,
+ acpi_ex_opcode_2A_2T_1R,
+ acpi_ex_opcode_3A_0T_0R,
+ acpi_ex_opcode_3A_1T_1R,
+ acpi_ex_opcode_6A_0T_1R
+};
+
+/*****************************************************************************
+ *
+ * FUNCTION: acpi_ds_get_predicate_value
+ *
+ * PARAMETERS: walk_state - Current state of the parse tree walk
+ * result_obj - if non-zero, pop result from result stack
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Get the result of a predicate evaluation
+ *
+ ****************************************************************************/
+
+acpi_status
+acpi_ds_get_predicate_value(struct acpi_walk_state *walk_state,
+ union acpi_operand_object *result_obj)
+{
+ acpi_status status = AE_OK;
+ union acpi_operand_object *obj_desc;
+ union acpi_operand_object *local_obj_desc = NULL;
+
+ ACPI_FUNCTION_TRACE_PTR(ds_get_predicate_value, walk_state);
+
+ walk_state->control_state->common.state = 0;
+
+ if (result_obj) {
+ status = acpi_ds_result_pop(&obj_desc, walk_state);
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status,
+ "Could not get result from predicate evaluation"));
+
+ return_ACPI_STATUS(status);
+ }
+ } else {
+ status = acpi_ds_create_operand(walk_state, walk_state->op, 0);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ status =
+ acpi_ex_resolve_to_value(&walk_state->operands[0],
+ walk_state);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ obj_desc = walk_state->operands[0];
+ }
+
+ if (!obj_desc) {
+ ACPI_ERROR((AE_INFO,
+ "No predicate ObjDesc=%p State=%p",
+ obj_desc, walk_state));
+
+ return_ACPI_STATUS(AE_AML_NO_OPERAND);
+ }
+
+ /*
+ * Result of predicate evaluation must be an Integer
+ * object. Implicitly convert the argument if necessary.
+ */
+ status = acpi_ex_convert_to_integer(obj_desc, &local_obj_desc, 16);
+ if (ACPI_FAILURE(status)) {
+ goto cleanup;
+ }
+
+ if (ACPI_GET_OBJECT_TYPE(local_obj_desc) != ACPI_TYPE_INTEGER) {
+ ACPI_ERROR((AE_INFO,
+ "Bad predicate (not an integer) ObjDesc=%p State=%p Type=%X",
+ obj_desc, walk_state,
+ ACPI_GET_OBJECT_TYPE(obj_desc)));
+
+ status = AE_AML_OPERAND_TYPE;
+ goto cleanup;
+ }
+
+ /* Truncate the predicate to 32-bits if necessary */
+
+ acpi_ex_truncate_for32bit_table(local_obj_desc);
+
+ /*
+ * Save the result of the predicate evaluation on
+ * the control stack
+ */
+ if (local_obj_desc->integer.value) {
+ walk_state->control_state->common.value = TRUE;
+ } else {
+ /*
+ * Predicate is FALSE, we will just toss the
+ * rest of the package
+ */
+ walk_state->control_state->common.value = FALSE;
+ status = AE_CTRL_FALSE;
+ }
+
+ /* Predicate can be used for an implicit return value */
+
+ (void)acpi_ds_do_implicit_return(local_obj_desc, walk_state, TRUE);
+
+ cleanup:
+
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Completed a predicate eval=%X Op=%p\n",
+ walk_state->control_state->common.value,
+ walk_state->op));
+
+ /* Break to debugger to display result */
+
+ ACPI_DEBUGGER_EXEC(acpi_db_display_result_object
+ (local_obj_desc, walk_state));
+
+ /*
+ * Delete the predicate result object (we know that
+ * we don't need it anymore)
+ */
+ if (local_obj_desc != obj_desc) {
+ acpi_ut_remove_reference(local_obj_desc);
+ }
+ acpi_ut_remove_reference(obj_desc);
+
+ walk_state->control_state->common.state = ACPI_CONTROL_NORMAL;
+ return_ACPI_STATUS(status);
+}
+
+/*****************************************************************************
+ *
+ * FUNCTION: acpi_ds_exec_begin_op
+ *
+ * PARAMETERS: walk_state - Current state of the parse tree walk
+ * out_op - Where to return op if a new one is created
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Descending callback used during the execution of control
+ * methods. This is where most operators and operands are
+ * dispatched to the interpreter.
+ *
+ ****************************************************************************/
+
+acpi_status
+acpi_ds_exec_begin_op(struct acpi_walk_state *walk_state,
+ union acpi_parse_object **out_op)
+{
+ union acpi_parse_object *op;
+ acpi_status status = AE_OK;
+ u32 opcode_class;
+
+ ACPI_FUNCTION_TRACE_PTR(ds_exec_begin_op, walk_state);
+
+ op = walk_state->op;
+ if (!op) {
+ status = acpi_ds_load2_begin_op(walk_state, out_op);
+ if (ACPI_FAILURE(status)) {
+ goto error_exit;
+ }
+
+ op = *out_op;
+ walk_state->op = op;
+ walk_state->opcode = op->common.aml_opcode;
+ walk_state->op_info =
+ acpi_ps_get_opcode_info(op->common.aml_opcode);
+
+ if (acpi_ns_opens_scope(walk_state->op_info->object_type)) {
+ ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
+ "(%s) Popping scope for Op %p\n",
+ acpi_ut_get_type_name(walk_state->
+ op_info->
+ object_type),
+ op));
+
+ status = acpi_ds_scope_stack_pop(walk_state);
+ if (ACPI_FAILURE(status)) {
+ goto error_exit;
+ }
+ }
+ }
+
+ if (op == walk_state->origin) {
+ if (out_op) {
+ *out_op = op;
+ }
+
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ /*
+ * If the previous opcode was a conditional, this opcode
+ * must be the beginning of the associated predicate.
+ * Save this knowledge in the current scope descriptor
+ */
+ if ((walk_state->control_state) &&
+ (walk_state->control_state->common.state ==
+ ACPI_CONTROL_CONDITIONAL_EXECUTING)) {
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+ "Exec predicate Op=%p State=%p\n", op,
+ walk_state));
+
+ walk_state->control_state->common.state =
+ ACPI_CONTROL_PREDICATE_EXECUTING;
+
+ /* Save start of predicate */
+
+ walk_state->control_state->control.predicate_op = op;
+ }
+
+ opcode_class = walk_state->op_info->class;
+
+ /* We want to send namepaths to the load code */
+
+ if (op->common.aml_opcode == AML_INT_NAMEPATH_OP) {
+ opcode_class = AML_CLASS_NAMED_OBJECT;
+ }
+
+ /*
+ * Handle the opcode based upon the opcode type
+ */
+ switch (opcode_class) {
+ case AML_CLASS_CONTROL:
+
+ status = acpi_ds_exec_begin_control_op(walk_state, op);
+ break;
+
+ case AML_CLASS_NAMED_OBJECT:
+
+ if (walk_state->walk_type & ACPI_WALK_METHOD) {
+ /*
+ * Found a named object declaration during method execution;
+ * we must enter this object into the namespace. The created
+ * object is temporary and will be deleted upon completion of
+ * the execution of this method.
+ */
+ status = acpi_ds_load2_begin_op(walk_state, NULL);
+ }
+
+ break;
+
+ case AML_CLASS_EXECUTE:
+ case AML_CLASS_CREATE:
+
+ break;
+
+ default:
+ break;
+ }
+
+ /* Nothing to do here during method execution */
+
+ return_ACPI_STATUS(status);
+
+ error_exit:
+ status = acpi_ds_method_error(status, walk_state);
+ return_ACPI_STATUS(status);
+}
+
+/*****************************************************************************
+ *
+ * FUNCTION: acpi_ds_exec_end_op
+ *
+ * PARAMETERS: walk_state - Current state of the parse tree walk
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Ascending callback used during the execution of control
+ * methods. The only thing we really need to do here is to
+ * notice the beginning of IF, ELSE, and WHILE blocks.
+ *
+ ****************************************************************************/
+
+acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state)
+{
+ union acpi_parse_object *op;
+ acpi_status status = AE_OK;
+ u32 op_type;
+ u32 op_class;
+ union acpi_parse_object *next_op;
+ union acpi_parse_object *first_arg;
+
+ ACPI_FUNCTION_TRACE_PTR(ds_exec_end_op, walk_state);
+
+ op = walk_state->op;
+ op_type = walk_state->op_info->type;
+ op_class = walk_state->op_info->class;
+
+ if (op_class == AML_CLASS_UNKNOWN) {
+ ACPI_ERROR((AE_INFO, "Unknown opcode %X",
+ op->common.aml_opcode));
+ return_ACPI_STATUS(AE_NOT_IMPLEMENTED);
+ }
+
+ first_arg = op->common.value.arg;
+
+ /* Init the walk state */
+
+ walk_state->num_operands = 0;
+ walk_state->operand_index = 0;
+ walk_state->return_desc = NULL;
+ walk_state->result_obj = NULL;
+
+ /* Call debugger for single step support (DEBUG build only) */
+
+ ACPI_DEBUGGER_EXEC(status =
+ acpi_db_single_step(walk_state, op, op_class));
+ ACPI_DEBUGGER_EXEC(if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);}
+ ) ;
+
+ /* Decode the Opcode Class */
+
+ switch (op_class) {
+ case AML_CLASS_ARGUMENT: /* Constants, literals, etc. */
+
+ if (walk_state->opcode == AML_INT_NAMEPATH_OP) {
+ status = acpi_ds_evaluate_name_path(walk_state);
+ if (ACPI_FAILURE(status)) {
+ goto cleanup;
+ }
+ }
+ break;
+
+ case AML_CLASS_EXECUTE: /* Most operators with arguments */
+
+ /* Build resolved operand stack */
+
+ status = acpi_ds_create_operands(walk_state, first_arg);
+ if (ACPI_FAILURE(status)) {
+ goto cleanup;
+ }
+
+ /*
+ * All opcodes require operand resolution, with the only exceptions
+ * being the object_type and size_of operators.
+ */
+ if (!(walk_state->op_info->flags & AML_NO_OPERAND_RESOLVE)) {
+
+ /* Resolve all operands */
+
+ status = acpi_ex_resolve_operands(walk_state->opcode,
+ &(walk_state->
+ operands
+ [walk_state->
+ num_operands - 1]),
+ walk_state);
+ }
+
+ if (ACPI_SUCCESS(status)) {
+ /*
+ * Dispatch the request to the appropriate interpreter handler
+ * routine. There is one routine per opcode "type" based upon the
+ * number of opcode arguments and return type.
+ */
+ status =
+ acpi_gbl_op_type_dispatch[op_type] (walk_state);
+ } else {
+ /*
+ * Treat constructs of the form "Store(LocalX,LocalX)" as noops when the
+ * Local is uninitialized.
+ */
+ if ((status == AE_AML_UNINITIALIZED_LOCAL) &&
+ (walk_state->opcode == AML_STORE_OP) &&
+ (walk_state->operands[0]->common.type ==
+ ACPI_TYPE_LOCAL_REFERENCE)
+ && (walk_state->operands[1]->common.type ==
+ ACPI_TYPE_LOCAL_REFERENCE)
+ && (walk_state->operands[0]->reference.class ==
+ walk_state->operands[1]->reference.class)
+ && (walk_state->operands[0]->reference.value ==
+ walk_state->operands[1]->reference.value)) {
+ status = AE_OK;
+ } else {
+ ACPI_EXCEPTION((AE_INFO, status,
+ "While resolving operands for [%s]",
+ acpi_ps_get_opcode_name
+ (walk_state->opcode)));
+ }
+ }
+
+ /* Always delete the argument objects and clear the operand stack */
+
+ acpi_ds_clear_operands(walk_state);
+
+ /*
+ * If a result object was returned from above, push it on the
+ * current result stack
+ */
+ if (ACPI_SUCCESS(status) && walk_state->result_obj) {
+ status =
+ acpi_ds_result_push(walk_state->result_obj,
+ walk_state);
+ }
+ break;
+
+ default:
+
+ switch (op_type) {
+ case AML_TYPE_CONTROL: /* Type 1 opcode, IF/ELSE/WHILE/NOOP */
+
+ /* 1 Operand, 0 external_result, 0 internal_result */
+
+ status = acpi_ds_exec_end_control_op(walk_state, op);
+
+ break;
+
+ case AML_TYPE_METHOD_CALL:
+
+ /*
+ * If the method is referenced from within a package
+ * declaration, it is not a invocation of the method, just
+ * a reference to it.
+ */
+ if ((op->asl.parent) &&
+ ((op->asl.parent->asl.aml_opcode == AML_PACKAGE_OP)
+ || (op->asl.parent->asl.aml_opcode ==
+ AML_VAR_PACKAGE_OP))) {
+ ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
+ "Method Reference in a Package, Op=%p\n",
+ op));
+
+ op->common.node =
+ (struct acpi_namespace_node *)op->asl.value.
+ arg->asl.node;
+ acpi_ut_add_reference(op->asl.value.arg->asl.
+ node->object);
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
+ "Method invocation, Op=%p\n", op));
+
+ /*
+ * (AML_METHODCALL) Op->Asl.Value.Arg->Asl.Node contains
+ * the method Node pointer
+ */
+ /* next_op points to the op that holds the method name */
+
+ next_op = first_arg;
+
+ /* next_op points to first argument op */
+
+ next_op = next_op->common.next;
+
+ /*
+ * Get the method's arguments and put them on the operand stack
+ */
+ status = acpi_ds_create_operands(walk_state, next_op);
+ if (ACPI_FAILURE(status)) {
+ break;
+ }
+
+ /*
+ * Since the operands will be passed to another control method,
+ * we must resolve all local references here (Local variables,
+ * arguments to *this* method, etc.)
+ */
+ status = acpi_ds_resolve_operands(walk_state);
+ if (ACPI_FAILURE(status)) {
+
+ /* On error, clear all resolved operands */
+
+ acpi_ds_clear_operands(walk_state);
+ break;
+ }
+
+ /*
+ * Tell the walk loop to preempt this running method and
+ * execute the new method
+ */
+ status = AE_CTRL_TRANSFER;
+
+ /*
+ * Return now; we don't want to disturb anything,
+ * especially the operand count!
+ */
+ return_ACPI_STATUS(status);
+
+ case AML_TYPE_CREATE_FIELD:
+
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+ "Executing CreateField Buffer/Index Op=%p\n",
+ op));
+
+ status = acpi_ds_load2_end_op(walk_state);
+ if (ACPI_FAILURE(status)) {
+ break;
+ }
+
+ status =
+ acpi_ds_eval_buffer_field_operands(walk_state, op);
+ break;
+
+ case AML_TYPE_CREATE_OBJECT:
+
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+ "Executing CreateObject (Buffer/Package) Op=%p\n",
+ op));
+
+ switch (op->common.parent->common.aml_opcode) {
+ case AML_NAME_OP:
+
+ /*
+ * Put the Node on the object stack (Contains the ACPI Name
+ * of this object)
+ */
+ walk_state->operands[0] =
+ (void *)op->common.parent->common.node;
+ walk_state->num_operands = 1;
+
+ status = acpi_ds_create_node(walk_state,
+ op->common.parent->
+ common.node,
+ op->common.parent);
+ if (ACPI_FAILURE(status)) {
+ break;
+ }
+
+ /* Fall through */
+ /*lint -fallthrough */
+
+ case AML_INT_EVAL_SUBTREE_OP:
+
+ status =
+ acpi_ds_eval_data_object_operands
+ (walk_state, op,
+ acpi_ns_get_attached_object(op->common.
+ parent->common.
+ node));
+ break;
+
+ default:
+
+ status =
+ acpi_ds_eval_data_object_operands
+ (walk_state, op, NULL);
+ break;
+ }
+
+ /*
+ * If a result object was returned from above, push it on the
+ * current result stack
+ */
+ if (walk_state->result_obj) {
+ status =
+ acpi_ds_result_push(walk_state->result_obj,
+ walk_state);
+ }
+ break;
+
+ case AML_TYPE_NAMED_FIELD:
+ case AML_TYPE_NAMED_COMPLEX:
+ case AML_TYPE_NAMED_SIMPLE:
+ case AML_TYPE_NAMED_NO_OBJ:
+
+ status = acpi_ds_load2_end_op(walk_state);
+ if (ACPI_FAILURE(status)) {
+ break;
+ }
+
+ if (op->common.aml_opcode == AML_REGION_OP) {
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+ "Executing OpRegion Address/Length Op=%p\n",
+ op));
+
+ status =
+ acpi_ds_eval_region_operands(walk_state,
+ op);
+ if (ACPI_FAILURE(status)) {
+ break;
+ }
+ } else if (op->common.aml_opcode == AML_DATA_REGION_OP) {
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+ "Executing DataTableRegion Strings Op=%p\n",
+ op));
+
+ status =
+ acpi_ds_eval_table_region_operands
+ (walk_state, op);
+ if (ACPI_FAILURE(status)) {
+ break;
+ }
+ } else if (op->common.aml_opcode == AML_BANK_FIELD_OP) {
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+ "Executing BankField Op=%p\n",
+ op));
+
+ status =
+ acpi_ds_eval_bank_field_operands(walk_state,
+ op);
+ if (ACPI_FAILURE(status)) {
+ break;
+ }
+ }
+ break;
+
+ case AML_TYPE_UNDEFINED:
+
+ ACPI_ERROR((AE_INFO,
+ "Undefined opcode type Op=%p", op));
+ return_ACPI_STATUS(AE_NOT_IMPLEMENTED);
+
+ case AML_TYPE_BOGUS:
+
+ ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
+ "Internal opcode=%X type Op=%p\n",
+ walk_state->opcode, op));
+ break;
+
+ default:
+
+ ACPI_ERROR((AE_INFO,
+ "Unimplemented opcode, class=%X type=%X Opcode=%X Op=%p",
+ op_class, op_type, op->common.aml_opcode,
+ op));
+
+ status = AE_NOT_IMPLEMENTED;
+ break;
+ }
+ }
+
+ /*
+ * ACPI 2.0 support for 64-bit integers: Truncate numeric
+ * result value if we are executing from a 32-bit ACPI table
+ */
+ acpi_ex_truncate_for32bit_table(walk_state->result_obj);
+
+ /*
+ * Check if we just completed the evaluation of a
+ * conditional predicate
+ */
+ if ((ACPI_SUCCESS(status)) &&
+ (walk_state->control_state) &&
+ (walk_state->control_state->common.state ==
+ ACPI_CONTROL_PREDICATE_EXECUTING) &&
+ (walk_state->control_state->control.predicate_op == op)) {
+ status =
+ acpi_ds_get_predicate_value(walk_state,
+ walk_state->result_obj);
+ walk_state->result_obj = NULL;
+ }
+
+ cleanup:
+
+ if (walk_state->result_obj) {
+
+ /* Break to debugger to display result */
+
+ ACPI_DEBUGGER_EXEC(acpi_db_display_result_object
+ (walk_state->result_obj, walk_state));
+
+ /*
+ * Delete the result op if and only if:
+ * Parent will not use the result -- such as any
+ * non-nested type2 op in a method (parent will be method)
+ */
+ acpi_ds_delete_result_if_not_used(op, walk_state->result_obj,
+ walk_state);
+ }
+#ifdef _UNDER_DEVELOPMENT
+
+ if (walk_state->parser_state.aml == walk_state->parser_state.aml_end) {
+ acpi_db_method_end(walk_state);
+ }
+#endif
+
+ /* Invoke exception handler on error */
+
+ if (ACPI_FAILURE(status)) {
+ status = acpi_ds_method_error(status, walk_state);
+ }
+
+ /* Always clear the object stack */
+
+ walk_state->num_operands = 0;
+ return_ACPI_STATUS(status);
+}
diff --git a/drivers/acpi/dispatcher/dswload.c b/drivers/acpi/dispatcher/dswload.c
new file mode 100644
index 0000000..dff7a3e
--- /dev/null
+++ b/drivers/acpi/dispatcher/dswload.c
@@ -0,0 +1,1202 @@
+/******************************************************************************
+ *
+ * Module Name: dswload - Dispatcher namespace load callbacks
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include <acpi/acparser.h>
+#include <acpi/amlcode.h>
+#include <acpi/acdispat.h>
+#include <acpi/acinterp.h>
+#include <acpi/acnamesp.h>
+#include <acpi/acevents.h>
+
+#ifdef ACPI_ASL_COMPILER
+#include <acpi/acdisasm.h>
+#endif
+
+#define _COMPONENT ACPI_DISPATCHER
+ACPI_MODULE_NAME("dswload")
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ds_init_callbacks
+ *
+ * PARAMETERS: walk_state - Current state of the parse tree walk
+ * pass_number - 1, 2, or 3
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Init walk state callbacks
+ *
+ ******************************************************************************/
+acpi_status
+acpi_ds_init_callbacks(struct acpi_walk_state *walk_state, u32 pass_number)
+{
+
+ switch (pass_number) {
+ case 1:
+ walk_state->parse_flags = ACPI_PARSE_LOAD_PASS1 |
+ ACPI_PARSE_DELETE_TREE;
+ walk_state->descending_callback = acpi_ds_load1_begin_op;
+ walk_state->ascending_callback = acpi_ds_load1_end_op;
+ break;
+
+ case 2:
+ walk_state->parse_flags = ACPI_PARSE_LOAD_PASS1 |
+ ACPI_PARSE_DELETE_TREE;
+ walk_state->descending_callback = acpi_ds_load2_begin_op;
+ walk_state->ascending_callback = acpi_ds_load2_end_op;
+ break;
+
+ case 3:
+#ifndef ACPI_NO_METHOD_EXECUTION
+ walk_state->parse_flags |= ACPI_PARSE_EXECUTE |
+ ACPI_PARSE_DELETE_TREE;
+ walk_state->descending_callback = acpi_ds_exec_begin_op;
+ walk_state->ascending_callback = acpi_ds_exec_end_op;
+#endif
+ break;
+
+ default:
+ return (AE_BAD_PARAMETER);
+ }
+
+ return (AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ds_load1_begin_op
+ *
+ * PARAMETERS: walk_state - Current state of the parse tree walk
+ * out_op - Where to return op if a new one is created
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Descending callback used during the loading of ACPI tables.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ds_load1_begin_op(struct acpi_walk_state * walk_state,
+ union acpi_parse_object ** out_op)
+{
+ union acpi_parse_object *op;
+ struct acpi_namespace_node *node;
+ acpi_status status;
+ acpi_object_type object_type;
+ char *path;
+ u32 flags;
+
+ ACPI_FUNCTION_TRACE(ds_load1_begin_op);
+
+ op = walk_state->op;
+ ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "Op=%p State=%p\n", op,
+ walk_state));
+
+ /* We are only interested in opcodes that have an associated name */
+
+ if (op) {
+ if (!(walk_state->op_info->flags & AML_NAMED)) {
+ *out_op = op;
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ /* Check if this object has already been installed in the namespace */
+
+ if (op->common.node) {
+ *out_op = op;
+ return_ACPI_STATUS(AE_OK);
+ }
+ }
+
+ path = acpi_ps_get_next_namestring(&walk_state->parser_state);
+
+ /* Map the raw opcode into an internal object type */
+
+ object_type = walk_state->op_info->object_type;
+
+ ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
+ "State=%p Op=%p [%s]\n", walk_state, op,
+ acpi_ut_get_type_name(object_type)));
+
+ switch (walk_state->opcode) {
+ case AML_SCOPE_OP:
+
+ /*
+ * The target name of the Scope() operator must exist at this point so
+ * that we can actually open the scope to enter new names underneath it.
+ * Allow search-to-root for single namesegs.
+ */
+ status =
+ acpi_ns_lookup(walk_state->scope_info, path, object_type,
+ ACPI_IMODE_EXECUTE, ACPI_NS_SEARCH_PARENT,
+ walk_state, &(node));
+#ifdef ACPI_ASL_COMPILER
+ if (status == AE_NOT_FOUND) {
+ /*
+ * Table disassembly:
+ * Target of Scope() not found. Generate an External for it, and
+ * insert the name into the namespace.
+ */
+ acpi_dm_add_to_external_list(path, ACPI_TYPE_DEVICE, 0);
+ status =
+ acpi_ns_lookup(walk_state->scope_info, path,
+ object_type, ACPI_IMODE_LOAD_PASS1,
+ ACPI_NS_SEARCH_PARENT, walk_state,
+ &node);
+ }
+#endif
+ if (ACPI_FAILURE(status)) {
+ ACPI_ERROR_NAMESPACE(path, status);
+ return_ACPI_STATUS(status);
+ }
+
+ /*
+ * Check to make sure that the target is
+ * one of the opcodes that actually opens a scope
+ */
+ switch (node->type) {
+ case ACPI_TYPE_ANY:
+ case ACPI_TYPE_LOCAL_SCOPE: /* Scope */
+ case ACPI_TYPE_DEVICE:
+ case ACPI_TYPE_POWER:
+ case ACPI_TYPE_PROCESSOR:
+ case ACPI_TYPE_THERMAL:
+
+ /* These are acceptable types */
+ break;
+
+ case ACPI_TYPE_INTEGER:
+ case ACPI_TYPE_STRING:
+ case ACPI_TYPE_BUFFER:
+
+ /*
+ * These types we will allow, but we will change the type. This
+ * enables some existing code of the form:
+ *
+ * Name (DEB, 0)
+ * Scope (DEB) { ... }
+ *
+ * Note: silently change the type here. On the second pass, we will report
+ * a warning
+ */
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Type override - [%4.4s] had invalid type (%s) for Scope operator, changed to (Scope)\n",
+ path,
+ acpi_ut_get_type_name(node->type)));
+
+ node->type = ACPI_TYPE_ANY;
+ walk_state->scope_info->common.value = ACPI_TYPE_ANY;
+ break;
+
+ default:
+
+ /* All other types are an error */
+
+ ACPI_ERROR((AE_INFO,
+ "Invalid type (%s) for target of Scope operator [%4.4s] (Cannot override)",
+ acpi_ut_get_type_name(node->type), path));
+
+ return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
+ }
+ break;
+
+ default:
+ /*
+ * For all other named opcodes, we will enter the name into
+ * the namespace.
+ *
+ * Setup the search flags.
+ * Since we are entering a name into the namespace, we do not want to
+ * enable the search-to-root upsearch.
+ *
+ * There are only two conditions where it is acceptable that the name
+ * already exists:
+ * 1) the Scope() operator can reopen a scoping object that was
+ * previously defined (Scope, Method, Device, etc.)
+ * 2) Whenever we are parsing a deferred opcode (op_region, Buffer,
+ * buffer_field, or Package), the name of the object is already
+ * in the namespace.
+ */
+ if (walk_state->deferred_node) {
+
+ /* This name is already in the namespace, get the node */
+
+ node = walk_state->deferred_node;
+ status = AE_OK;
+ break;
+ }
+
+ /*
+ * If we are executing a method, do not create any namespace objects
+ * during the load phase, only during execution.
+ */
+ if (walk_state->method_node) {
+ node = NULL;
+ status = AE_OK;
+ break;
+ }
+
+ flags = ACPI_NS_NO_UPSEARCH;
+ if ((walk_state->opcode != AML_SCOPE_OP) &&
+ (!(walk_state->parse_flags & ACPI_PARSE_DEFERRED_OP))) {
+ flags |= ACPI_NS_ERROR_IF_FOUND;
+ ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
+ "[%s] Cannot already exist\n",
+ acpi_ut_get_type_name(object_type)));
+ } else {
+ ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
+ "[%s] Both Find or Create allowed\n",
+ acpi_ut_get_type_name(object_type)));
+ }
+
+ /*
+ * Enter the named type into the internal namespace. We enter the name
+ * as we go downward in the parse tree. Any necessary subobjects that
+ * involve arguments to the opcode must be created as we go back up the
+ * parse tree later.
+ */
+ status =
+ acpi_ns_lookup(walk_state->scope_info, path, object_type,
+ ACPI_IMODE_LOAD_PASS1, flags, walk_state,
+ &node);
+ if (ACPI_FAILURE(status)) {
+ if (status == AE_ALREADY_EXISTS) {
+
+ /* The name already exists in this scope */
+
+ if (node->flags & ANOBJ_IS_EXTERNAL) {
+ /*
+ * Allow one create on an object or segment that was
+ * previously declared External
+ */
+ node->flags &= ~ANOBJ_IS_EXTERNAL;
+ node->type = (u8) object_type;
+
+ /* Just retyped a node, probably will need to open a scope */
+
+ if (acpi_ns_opens_scope(object_type)) {
+ status =
+ acpi_ds_scope_stack_push
+ (node, object_type,
+ walk_state);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS
+ (status);
+ }
+ }
+
+ status = AE_OK;
+ }
+ }
+
+ if (ACPI_FAILURE(status)) {
+ ACPI_ERROR_NAMESPACE(path, status);
+ return_ACPI_STATUS(status);
+ }
+ }
+ break;
+ }
+
+ /* Common exit */
+
+ if (!op) {
+
+ /* Create a new op */
+
+ op = acpi_ps_alloc_op(walk_state->opcode);
+ if (!op) {
+ return_ACPI_STATUS(AE_NO_MEMORY);
+ }
+ }
+
+ /* Initialize the op */
+
+#if (defined (ACPI_NO_METHOD_EXECUTION) || defined (ACPI_CONSTANT_EVAL_ONLY))
+ op->named.path = ACPI_CAST_PTR(u8, path);
+#endif
+
+ if (node) {
+ /*
+ * Put the Node in the "op" object that the parser uses, so we
+ * can get it again quickly when this scope is closed
+ */
+ op->common.node = node;
+ op->named.name = node->name.integer;
+ }
+
+ acpi_ps_append_arg(acpi_ps_get_parent_scope(&walk_state->parser_state),
+ op);
+ *out_op = op;
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ds_load1_end_op
+ *
+ * PARAMETERS: walk_state - Current state of the parse tree walk
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Ascending callback used during the loading of the namespace,
+ * both control methods and everything else.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ds_load1_end_op(struct acpi_walk_state *walk_state)
+{
+ union acpi_parse_object *op;
+ acpi_object_type object_type;
+ acpi_status status = AE_OK;
+
+ ACPI_FUNCTION_TRACE(ds_load1_end_op);
+
+ op = walk_state->op;
+ ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "Op=%p State=%p\n", op,
+ walk_state));
+
+ /* We are only interested in opcodes that have an associated name */
+
+ if (!(walk_state->op_info->flags & (AML_NAMED | AML_FIELD))) {
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ /* Get the object type to determine if we should pop the scope */
+
+ object_type = walk_state->op_info->object_type;
+
+#ifndef ACPI_NO_METHOD_EXECUTION
+ if (walk_state->op_info->flags & AML_FIELD) {
+ /*
+ * If we are executing a method, do not create any namespace objects
+ * during the load phase, only during execution.
+ */
+ if (!walk_state->method_node) {
+ if (walk_state->opcode == AML_FIELD_OP ||
+ walk_state->opcode == AML_BANK_FIELD_OP ||
+ walk_state->opcode == AML_INDEX_FIELD_OP) {
+ status =
+ acpi_ds_init_field_objects(op, walk_state);
+ }
+ }
+ return_ACPI_STATUS(status);
+ }
+
+ /*
+ * If we are executing a method, do not create any namespace objects
+ * during the load phase, only during execution.
+ */
+ if (!walk_state->method_node) {
+ if (op->common.aml_opcode == AML_REGION_OP) {
+ status =
+ acpi_ex_create_region(op->named.data,
+ op->named.length,
+ (acpi_adr_space_type) ((op->
+ common.
+ value.
+ arg)->
+ common.
+ value.
+ integer),
+ walk_state);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+ } else if (op->common.aml_opcode == AML_DATA_REGION_OP) {
+ status =
+ acpi_ex_create_region(op->named.data,
+ op->named.length,
+ REGION_DATA_TABLE,
+ walk_state);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+ }
+ }
+#endif
+
+ if (op->common.aml_opcode == AML_NAME_OP) {
+
+ /* For Name opcode, get the object type from the argument */
+
+ if (op->common.value.arg) {
+ object_type = (acpi_ps_get_opcode_info((op->common.
+ value.arg)->
+ common.
+ aml_opcode))->
+ object_type;
+
+ /* Set node type if we have a namespace node */
+
+ if (op->common.node) {
+ op->common.node->type = (u8) object_type;
+ }
+ }
+ }
+
+ /*
+ * If we are executing a method, do not create any namespace objects
+ * during the load phase, only during execution.
+ */
+ if (!walk_state->method_node) {
+ if (op->common.aml_opcode == AML_METHOD_OP) {
+ /*
+ * method_op pkg_length name_string method_flags term_list
+ *
+ * Note: We must create the method node/object pair as soon as we
+ * see the method declaration. This allows later pass1 parsing
+ * of invocations of the method (need to know the number of
+ * arguments.)
+ */
+ ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
+ "LOADING-Method: State=%p Op=%p NamedObj=%p\n",
+ walk_state, op, op->named.node));
+
+ if (!acpi_ns_get_attached_object(op->named.node)) {
+ walk_state->operands[0] =
+ ACPI_CAST_PTR(void, op->named.node);
+ walk_state->num_operands = 1;
+
+ status =
+ acpi_ds_create_operands(walk_state,
+ op->common.value.
+ arg);
+ if (ACPI_SUCCESS(status)) {
+ status =
+ acpi_ex_create_method(op->named.
+ data,
+ op->named.
+ length,
+ walk_state);
+ }
+
+ walk_state->operands[0] = NULL;
+ walk_state->num_operands = 0;
+
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+ }
+ }
+ }
+
+ /* Pop the scope stack (only if loading a table) */
+
+ if (!walk_state->method_node && acpi_ns_opens_scope(object_type)) {
+ ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
+ "(%s): Popping scope for Op %p\n",
+ acpi_ut_get_type_name(object_type), op));
+
+ status = acpi_ds_scope_stack_pop(walk_state);
+ }
+
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ds_load2_begin_op
+ *
+ * PARAMETERS: walk_state - Current state of the parse tree walk
+ * out_op - Wher to return op if a new one is created
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Descending callback used during the loading of ACPI tables.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ds_load2_begin_op(struct acpi_walk_state *walk_state,
+ union acpi_parse_object **out_op)
+{
+ union acpi_parse_object *op;
+ struct acpi_namespace_node *node;
+ acpi_status status;
+ acpi_object_type object_type;
+ char *buffer_ptr;
+ u32 flags;
+
+ ACPI_FUNCTION_TRACE(ds_load2_begin_op);
+
+ op = walk_state->op;
+ ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "Op=%p State=%p\n", op,
+ walk_state));
+
+ if (op) {
+ if ((walk_state->control_state) &&
+ (walk_state->control_state->common.state ==
+ ACPI_CONTROL_CONDITIONAL_EXECUTING)) {
+
+ /* We are executing a while loop outside of a method */
+
+ status = acpi_ds_exec_begin_op(walk_state, out_op);
+ return_ACPI_STATUS(status);
+ }
+
+ /* We only care about Namespace opcodes here */
+
+ if ((!(walk_state->op_info->flags & AML_NSOPCODE) &&
+ (walk_state->opcode != AML_INT_NAMEPATH_OP)) ||
+ (!(walk_state->op_info->flags & AML_NAMED))) {
+#ifdef ACPI_ENABLE_MODULE_LEVEL_CODE
+ if ((walk_state->op_info->class == AML_CLASS_EXECUTE) ||
+ (walk_state->op_info->class == AML_CLASS_CONTROL)) {
+ ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
+ "Begin/EXEC: %s (fl %8.8X)\n",
+ walk_state->op_info->name,
+ walk_state->op_info->flags));
+
+ /* Executing a type1 or type2 opcode outside of a method */
+
+ status =
+ acpi_ds_exec_begin_op(walk_state, out_op);
+ return_ACPI_STATUS(status);
+ }
+#endif
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ /* Get the name we are going to enter or lookup in the namespace */
+
+ if (walk_state->opcode == AML_INT_NAMEPATH_OP) {
+
+ /* For Namepath op, get the path string */
+
+ buffer_ptr = op->common.value.string;
+ if (!buffer_ptr) {
+
+ /* No name, just exit */
+
+ return_ACPI_STATUS(AE_OK);
+ }
+ } else {
+ /* Get name from the op */
+
+ buffer_ptr = ACPI_CAST_PTR(char, &op->named.name);
+ }
+ } else {
+ /* Get the namestring from the raw AML */
+
+ buffer_ptr =
+ acpi_ps_get_next_namestring(&walk_state->parser_state);
+ }
+
+ /* Map the opcode into an internal object type */
+
+ object_type = walk_state->op_info->object_type;
+
+ ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
+ "State=%p Op=%p Type=%X\n", walk_state, op,
+ object_type));
+
+ switch (walk_state->opcode) {
+ case AML_FIELD_OP:
+ case AML_BANK_FIELD_OP:
+ case AML_INDEX_FIELD_OP:
+
+ node = NULL;
+ status = AE_OK;
+ break;
+
+ case AML_INT_NAMEPATH_OP:
+ /*
+ * The name_path is an object reference to an existing object.
+ * Don't enter the name into the namespace, but look it up
+ * for use later.
+ */
+ status =
+ acpi_ns_lookup(walk_state->scope_info, buffer_ptr,
+ object_type, ACPI_IMODE_EXECUTE,
+ ACPI_NS_SEARCH_PARENT, walk_state, &(node));
+ break;
+
+ case AML_SCOPE_OP:
+ /*
+ * The Path is an object reference to an existing object.
+ * Don't enter the name into the namespace, but look it up
+ * for use later.
+ */
+ status =
+ acpi_ns_lookup(walk_state->scope_info, buffer_ptr,
+ object_type, ACPI_IMODE_EXECUTE,
+ ACPI_NS_SEARCH_PARENT, walk_state, &(node));
+ if (ACPI_FAILURE(status)) {
+#ifdef ACPI_ASL_COMPILER
+ if (status == AE_NOT_FOUND) {
+ status = AE_OK;
+ } else {
+ ACPI_ERROR_NAMESPACE(buffer_ptr, status);
+ }
+#else
+ ACPI_ERROR_NAMESPACE(buffer_ptr, status);
+#endif
+ return_ACPI_STATUS(status);
+ }
+
+ /*
+ * We must check to make sure that the target is
+ * one of the opcodes that actually opens a scope
+ */
+ switch (node->type) {
+ case ACPI_TYPE_ANY:
+ case ACPI_TYPE_LOCAL_SCOPE: /* Scope */
+ case ACPI_TYPE_DEVICE:
+ case ACPI_TYPE_POWER:
+ case ACPI_TYPE_PROCESSOR:
+ case ACPI_TYPE_THERMAL:
+
+ /* These are acceptable types */
+ break;
+
+ case ACPI_TYPE_INTEGER:
+ case ACPI_TYPE_STRING:
+ case ACPI_TYPE_BUFFER:
+
+ /*
+ * These types we will allow, but we will change the type. This
+ * enables some existing code of the form:
+ *
+ * Name (DEB, 0)
+ * Scope (DEB) { ... }
+ */
+ ACPI_WARNING((AE_INFO,
+ "Type override - [%4.4s] had invalid type (%s) for Scope operator, changed to (Scope)",
+ buffer_ptr,
+ acpi_ut_get_type_name(node->type)));
+
+ node->type = ACPI_TYPE_ANY;
+ walk_state->scope_info->common.value = ACPI_TYPE_ANY;
+ break;
+
+ default:
+
+ /* All other types are an error */
+
+ ACPI_ERROR((AE_INFO,
+ "Invalid type (%s) for target of Scope operator [%4.4s]",
+ acpi_ut_get_type_name(node->type),
+ buffer_ptr));
+
+ return (AE_AML_OPERAND_TYPE);
+ }
+ break;
+
+ default:
+
+ /* All other opcodes */
+
+ if (op && op->common.node) {
+
+ /* This op/node was previously entered into the namespace */
+
+ node = op->common.node;
+
+ if (acpi_ns_opens_scope(object_type)) {
+ status =
+ acpi_ds_scope_stack_push(node, object_type,
+ walk_state);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+ }
+
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ /*
+ * Enter the named type into the internal namespace. We enter the name
+ * as we go downward in the parse tree. Any necessary subobjects that
+ * involve arguments to the opcode must be created as we go back up the
+ * parse tree later.
+ *
+ * Note: Name may already exist if we are executing a deferred opcode.
+ */
+ if (walk_state->deferred_node) {
+
+ /* This name is already in the namespace, get the node */
+
+ node = walk_state->deferred_node;
+ status = AE_OK;
+ break;
+ }
+
+ flags = ACPI_NS_NO_UPSEARCH;
+ if (walk_state->pass_number == ACPI_IMODE_EXECUTE) {
+
+ /* Execution mode, node cannot already exist, node is temporary */
+
+ flags |= (ACPI_NS_ERROR_IF_FOUND | ACPI_NS_TEMPORARY);
+ }
+
+ /* Add new entry or lookup existing entry */
+
+ status =
+ acpi_ns_lookup(walk_state->scope_info, buffer_ptr,
+ object_type, ACPI_IMODE_LOAD_PASS2, flags,
+ walk_state, &node);
+
+ if (ACPI_SUCCESS(status) && (flags & ACPI_NS_TEMPORARY)) {
+ ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
+ "***New Node [%4.4s] %p is temporary\n",
+ acpi_ut_get_node_name(node), node));
+ }
+ break;
+ }
+
+ if (ACPI_FAILURE(status)) {
+ ACPI_ERROR_NAMESPACE(buffer_ptr, status);
+ return_ACPI_STATUS(status);
+ }
+
+ if (!op) {
+
+ /* Create a new op */
+
+ op = acpi_ps_alloc_op(walk_state->opcode);
+ if (!op) {
+ return_ACPI_STATUS(AE_NO_MEMORY);
+ }
+
+ /* Initialize the new op */
+
+ if (node) {
+ op->named.name = node->name.integer;
+ }
+ *out_op = op;
+ }
+
+ /*
+ * Put the Node in the "op" object that the parser uses, so we
+ * can get it again quickly when this scope is closed
+ */
+ op->common.node = node;
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ds_load2_end_op
+ *
+ * PARAMETERS: walk_state - Current state of the parse tree walk
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Ascending callback used during the loading of the namespace,
+ * both control methods and everything else.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state)
+{
+ union acpi_parse_object *op;
+ acpi_status status = AE_OK;
+ acpi_object_type object_type;
+ struct acpi_namespace_node *node;
+ union acpi_parse_object *arg;
+ struct acpi_namespace_node *new_node;
+#ifndef ACPI_NO_METHOD_EXECUTION
+ u32 i;
+ u8 region_space;
+#endif
+
+ ACPI_FUNCTION_TRACE(ds_load2_end_op);
+
+ op = walk_state->op;
+ ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "Opcode [%s] Op %p State %p\n",
+ walk_state->op_info->name, op, walk_state));
+
+ /* Check if opcode had an associated namespace object */
+
+ if (!(walk_state->op_info->flags & AML_NSOBJECT)) {
+#ifndef ACPI_NO_METHOD_EXECUTION
+#ifdef ACPI_ENABLE_MODULE_LEVEL_CODE
+ /* No namespace object. Executable opcode? */
+
+ if ((walk_state->op_info->class == AML_CLASS_EXECUTE) ||
+ (walk_state->op_info->class == AML_CLASS_CONTROL)) {
+ ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
+ "End/EXEC: %s (fl %8.8X)\n",
+ walk_state->op_info->name,
+ walk_state->op_info->flags));
+
+ /* Executing a type1 or type2 opcode outside of a method */
+
+ status = acpi_ds_exec_end_op(walk_state);
+ return_ACPI_STATUS(status);
+ }
+#endif
+#endif
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ if (op->common.aml_opcode == AML_SCOPE_OP) {
+ ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
+ "Ending scope Op=%p State=%p\n", op,
+ walk_state));
+ }
+
+ object_type = walk_state->op_info->object_type;
+
+ /*
+ * Get the Node/name from the earlier lookup
+ * (It was saved in the *op structure)
+ */
+ node = op->common.node;
+
+ /*
+ * Put the Node on the object stack (Contains the ACPI Name of
+ * this object)
+ */
+ walk_state->operands[0] = (void *)node;
+ walk_state->num_operands = 1;
+
+ /* Pop the scope stack */
+
+ if (acpi_ns_opens_scope(object_type) &&
+ (op->common.aml_opcode != AML_INT_METHODCALL_OP)) {
+ ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
+ "(%s) Popping scope for Op %p\n",
+ acpi_ut_get_type_name(object_type), op));
+
+ status = acpi_ds_scope_stack_pop(walk_state);
+ if (ACPI_FAILURE(status)) {
+ goto cleanup;
+ }
+ }
+
+ /*
+ * Named operations are as follows:
+ *
+ * AML_ALIAS
+ * AML_BANKFIELD
+ * AML_CREATEBITFIELD
+ * AML_CREATEBYTEFIELD
+ * AML_CREATEDWORDFIELD
+ * AML_CREATEFIELD
+ * AML_CREATEQWORDFIELD
+ * AML_CREATEWORDFIELD
+ * AML_DATA_REGION
+ * AML_DEVICE
+ * AML_EVENT
+ * AML_FIELD
+ * AML_INDEXFIELD
+ * AML_METHOD
+ * AML_METHODCALL
+ * AML_MUTEX
+ * AML_NAME
+ * AML_NAMEDFIELD
+ * AML_OPREGION
+ * AML_POWERRES
+ * AML_PROCESSOR
+ * AML_SCOPE
+ * AML_THERMALZONE
+ */
+
+ ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
+ "Create-Load [%s] State=%p Op=%p NamedObj=%p\n",
+ acpi_ps_get_opcode_name(op->common.aml_opcode),
+ walk_state, op, node));
+
+ /* Decode the opcode */
+
+ arg = op->common.value.arg;
+
+ switch (walk_state->op_info->type) {
+#ifndef ACPI_NO_METHOD_EXECUTION
+
+ case AML_TYPE_CREATE_FIELD:
+ /*
+ * Create the field object, but the field buffer and index must
+ * be evaluated later during the execution phase
+ */
+ status = acpi_ds_create_buffer_field(op, walk_state);
+ break;
+
+ case AML_TYPE_NAMED_FIELD:
+ /*
+ * If we are executing a method, initialize the field
+ */
+ if (walk_state->method_node) {
+ status = acpi_ds_init_field_objects(op, walk_state);
+ }
+
+ switch (op->common.aml_opcode) {
+ case AML_INDEX_FIELD_OP:
+
+ status =
+ acpi_ds_create_index_field(op,
+ (acpi_handle) arg->
+ common.node, walk_state);
+ break;
+
+ case AML_BANK_FIELD_OP:
+
+ status =
+ acpi_ds_create_bank_field(op, arg->common.node,
+ walk_state);
+ break;
+
+ case AML_FIELD_OP:
+
+ status =
+ acpi_ds_create_field(op, arg->common.node,
+ walk_state);
+ break;
+
+ default:
+ /* All NAMED_FIELD opcodes must be handled above */
+ break;
+ }
+ break;
+
+ case AML_TYPE_NAMED_SIMPLE:
+
+ status = acpi_ds_create_operands(walk_state, arg);
+ if (ACPI_FAILURE(status)) {
+ goto cleanup;
+ }
+
+ switch (op->common.aml_opcode) {
+ case AML_PROCESSOR_OP:
+
+ status = acpi_ex_create_processor(walk_state);
+ break;
+
+ case AML_POWER_RES_OP:
+
+ status = acpi_ex_create_power_resource(walk_state);
+ break;
+
+ case AML_MUTEX_OP:
+
+ status = acpi_ex_create_mutex(walk_state);
+ break;
+
+ case AML_EVENT_OP:
+
+ status = acpi_ex_create_event(walk_state);
+ break;
+
+ case AML_ALIAS_OP:
+
+ status = acpi_ex_create_alias(walk_state);
+ break;
+
+ default:
+ /* Unknown opcode */
+
+ status = AE_OK;
+ goto cleanup;
+ }
+
+ /* Delete operands */
+
+ for (i = 1; i < walk_state->num_operands; i++) {
+ acpi_ut_remove_reference(walk_state->operands[i]);
+ walk_state->operands[i] = NULL;
+ }
+
+ break;
+#endif /* ACPI_NO_METHOD_EXECUTION */
+
+ case AML_TYPE_NAMED_COMPLEX:
+
+ switch (op->common.aml_opcode) {
+#ifndef ACPI_NO_METHOD_EXECUTION
+ case AML_REGION_OP:
+ case AML_DATA_REGION_OP:
+
+ if (op->common.aml_opcode == AML_REGION_OP) {
+ region_space = (acpi_adr_space_type)
+ ((op->common.value.arg)->common.value.
+ integer);
+ } else {
+ region_space = REGION_DATA_TABLE;
+ }
+
+ /*
+ * If we are executing a method, initialize the region
+ */
+ if (walk_state->method_node) {
+ status =
+ acpi_ex_create_region(op->named.data,
+ op->named.length,
+ region_space,
+ walk_state);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+ }
+
+ /*
+ * The op_region is not fully parsed at this time. Only valid
+ * argument is the space_id. (We must save the address of the
+ * AML of the address and length operands)
+ */
+
+ /*
+ * If we have a valid region, initialize it
+ * Namespace is NOT locked at this point.
+ */
+ status =
+ acpi_ev_initialize_region
+ (acpi_ns_get_attached_object(node), FALSE);
+ if (ACPI_FAILURE(status)) {
+ /*
+ * If AE_NOT_EXIST is returned, it is not fatal
+ * because many regions get created before a handler
+ * is installed for said region.
+ */
+ if (AE_NOT_EXIST == status) {
+ status = AE_OK;
+ }
+ }
+ break;
+
+ case AML_NAME_OP:
+
+ status = acpi_ds_create_node(walk_state, node, op);
+ break;
+
+ case AML_METHOD_OP:
+ /*
+ * method_op pkg_length name_string method_flags term_list
+ *
+ * Note: We must create the method node/object pair as soon as we
+ * see the method declaration. This allows later pass1 parsing
+ * of invocations of the method (need to know the number of
+ * arguments.)
+ */
+ ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
+ "LOADING-Method: State=%p Op=%p NamedObj=%p\n",
+ walk_state, op, op->named.node));
+
+ if (!acpi_ns_get_attached_object(op->named.node)) {
+ walk_state->operands[0] =
+ ACPI_CAST_PTR(void, op->named.node);
+ walk_state->num_operands = 1;
+
+ status =
+ acpi_ds_create_operands(walk_state,
+ op->common.value.
+ arg);
+ if (ACPI_SUCCESS(status)) {
+ status =
+ acpi_ex_create_method(op->named.
+ data,
+ op->named.
+ length,
+ walk_state);
+ }
+ walk_state->operands[0] = NULL;
+ walk_state->num_operands = 0;
+
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+ }
+ break;
+
+#endif /* ACPI_NO_METHOD_EXECUTION */
+
+ default:
+ /* All NAMED_COMPLEX opcodes must be handled above */
+ break;
+ }
+ break;
+
+ case AML_CLASS_INTERNAL:
+
+ /* case AML_INT_NAMEPATH_OP: */
+ break;
+
+ case AML_CLASS_METHOD_CALL:
+
+ ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
+ "RESOLVING-MethodCall: State=%p Op=%p NamedObj=%p\n",
+ walk_state, op, node));
+
+ /*
+ * Lookup the method name and save the Node
+ */
+ status =
+ acpi_ns_lookup(walk_state->scope_info,
+ arg->common.value.string, ACPI_TYPE_ANY,
+ ACPI_IMODE_LOAD_PASS2,
+ ACPI_NS_SEARCH_PARENT |
+ ACPI_NS_DONT_OPEN_SCOPE, walk_state,
+ &(new_node));
+ if (ACPI_SUCCESS(status)) {
+ /*
+ * Make sure that what we found is indeed a method
+ * We didn't search for a method on purpose, to see if the name
+ * would resolve
+ */
+ if (new_node->type != ACPI_TYPE_METHOD) {
+ status = AE_AML_OPERAND_TYPE;
+ }
+
+ /* We could put the returned object (Node) on the object stack for
+ * later, but for now, we will put it in the "op" object that the
+ * parser uses, so we can get it again at the end of this scope
+ */
+ op->common.node = new_node;
+ } else {
+ ACPI_ERROR_NAMESPACE(arg->common.value.string, status);
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ cleanup:
+
+ /* Remove the Node pushed at the very beginning */
+
+ walk_state->operands[0] = NULL;
+ walk_state->num_operands = 0;
+ return_ACPI_STATUS(status);
+}
diff --git a/drivers/acpi/dispatcher/dswscope.c b/drivers/acpi/dispatcher/dswscope.c
new file mode 100644
index 0000000..9e60732
--- /dev/null
+++ b/drivers/acpi/dispatcher/dswscope.c
@@ -0,0 +1,213 @@
+/******************************************************************************
+ *
+ * Module Name: dswscope - Scope stack manipulation
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include <acpi/acdispat.h>
+
+#define _COMPONENT ACPI_DISPATCHER
+ACPI_MODULE_NAME("dswscope")
+
+/****************************************************************************
+ *
+ * FUNCTION: acpi_ds_scope_stack_clear
+ *
+ * PARAMETERS: walk_state - Current state
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Pop (and free) everything on the scope stack except the
+ * root scope object (which remains at the stack top.)
+ *
+ ***************************************************************************/
+void acpi_ds_scope_stack_clear(struct acpi_walk_state *walk_state)
+{
+ union acpi_generic_state *scope_info;
+
+ ACPI_FUNCTION_NAME(ds_scope_stack_clear);
+
+ while (walk_state->scope_info) {
+
+ /* Pop a scope off the stack */
+
+ scope_info = walk_state->scope_info;
+ walk_state->scope_info = scope_info->scope.next;
+
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+ "Popped object type (%s)\n",
+ acpi_ut_get_type_name(scope_info->common.
+ value)));
+ acpi_ut_delete_generic_state(scope_info);
+ }
+}
+
+/****************************************************************************
+ *
+ * FUNCTION: acpi_ds_scope_stack_push
+ *
+ * PARAMETERS: Node - Name to be made current
+ * Type - Type of frame being pushed
+ * walk_state - Current state
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Push the current scope on the scope stack, and make the
+ * passed Node current.
+ *
+ ***************************************************************************/
+
+acpi_status
+acpi_ds_scope_stack_push(struct acpi_namespace_node *node,
+ acpi_object_type type,
+ struct acpi_walk_state *walk_state)
+{
+ union acpi_generic_state *scope_info;
+ union acpi_generic_state *old_scope_info;
+
+ ACPI_FUNCTION_TRACE(ds_scope_stack_push);
+
+ if (!node) {
+
+ /* Invalid scope */
+
+ ACPI_ERROR((AE_INFO, "Null scope parameter"));
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
+
+ /* Make sure object type is valid */
+
+ if (!acpi_ut_valid_object_type(type)) {
+ ACPI_WARNING((AE_INFO, "Invalid object type: 0x%X", type));
+ }
+
+ /* Allocate a new scope object */
+
+ scope_info = acpi_ut_create_generic_state();
+ if (!scope_info) {
+ return_ACPI_STATUS(AE_NO_MEMORY);
+ }
+
+ /* Init new scope object */
+
+ scope_info->common.descriptor_type = ACPI_DESC_TYPE_STATE_WSCOPE;
+ scope_info->scope.node = node;
+ scope_info->common.value = (u16) type;
+
+ walk_state->scope_depth++;
+
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+ "[%.2d] Pushed scope ",
+ (u32) walk_state->scope_depth));
+
+ old_scope_info = walk_state->scope_info;
+ if (old_scope_info) {
+ ACPI_DEBUG_PRINT_RAW((ACPI_DB_EXEC,
+ "[%4.4s] (%s)",
+ acpi_ut_get_node_name(old_scope_info->
+ scope.node),
+ acpi_ut_get_type_name(old_scope_info->
+ common.value)));
+ } else {
+ ACPI_DEBUG_PRINT_RAW((ACPI_DB_EXEC, "[\\___] (%s)", "ROOT"));
+ }
+
+ ACPI_DEBUG_PRINT_RAW((ACPI_DB_EXEC,
+ ", New scope -> [%4.4s] (%s)\n",
+ acpi_ut_get_node_name(scope_info->scope.node),
+ acpi_ut_get_type_name(scope_info->common.value)));
+
+ /* Push new scope object onto stack */
+
+ acpi_ut_push_generic_state(&walk_state->scope_info, scope_info);
+ return_ACPI_STATUS(AE_OK);
+}
+
+/****************************************************************************
+ *
+ * FUNCTION: acpi_ds_scope_stack_pop
+ *
+ * PARAMETERS: walk_state - Current state
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Pop the scope stack once.
+ *
+ ***************************************************************************/
+
+acpi_status acpi_ds_scope_stack_pop(struct acpi_walk_state *walk_state)
+{
+ union acpi_generic_state *scope_info;
+ union acpi_generic_state *new_scope_info;
+
+ ACPI_FUNCTION_TRACE(ds_scope_stack_pop);
+
+ /*
+ * Pop scope info object off the stack.
+ */
+ scope_info = acpi_ut_pop_generic_state(&walk_state->scope_info);
+ if (!scope_info) {
+ return_ACPI_STATUS(AE_STACK_UNDERFLOW);
+ }
+
+ walk_state->scope_depth--;
+
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+ "[%.2d] Popped scope [%4.4s] (%s), New scope -> ",
+ (u32) walk_state->scope_depth,
+ acpi_ut_get_node_name(scope_info->scope.node),
+ acpi_ut_get_type_name(scope_info->common.value)));
+
+ new_scope_info = walk_state->scope_info;
+ if (new_scope_info) {
+ ACPI_DEBUG_PRINT_RAW((ACPI_DB_EXEC,
+ "[%4.4s] (%s)\n",
+ acpi_ut_get_node_name(new_scope_info->
+ scope.node),
+ acpi_ut_get_type_name(new_scope_info->
+ common.value)));
+ } else {
+ ACPI_DEBUG_PRINT_RAW((ACPI_DB_EXEC, "[\\___] (ROOT)\n"));
+ }
+
+ acpi_ut_delete_generic_state(scope_info);
+ return_ACPI_STATUS(AE_OK);
+}
diff --git a/drivers/acpi/dispatcher/dswstate.c b/drivers/acpi/dispatcher/dswstate.c
new file mode 100644
index 0000000..b00d4af
--- /dev/null
+++ b/drivers/acpi/dispatcher/dswstate.c
@@ -0,0 +1,752 @@
+/******************************************************************************
+ *
+ * Module Name: dswstate - Dispatcher parse tree walk management routines
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include <acpi/acparser.h>
+#include <acpi/acdispat.h>
+#include <acpi/acnamesp.h>
+
+#define _COMPONENT ACPI_DISPATCHER
+ACPI_MODULE_NAME("dswstate")
+
+ /* Local prototypes */
+static acpi_status acpi_ds_result_stack_push(struct acpi_walk_state *ws);
+static acpi_status acpi_ds_result_stack_pop(struct acpi_walk_state *ws);
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ds_result_pop
+ *
+ * PARAMETERS: Object - Where to return the popped object
+ * walk_state - Current Walk state
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Pop an object off the top of this walk's result stack
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ds_result_pop(union acpi_operand_object **object,
+ struct acpi_walk_state *walk_state)
+{
+ u32 index;
+ union acpi_generic_state *state;
+ acpi_status status;
+
+ ACPI_FUNCTION_NAME(ds_result_pop);
+
+ state = walk_state->results;
+
+ /* Incorrect state of result stack */
+
+ if (state && !walk_state->result_count) {
+ ACPI_ERROR((AE_INFO, "No results on result stack"));
+ return (AE_AML_INTERNAL);
+ }
+
+ if (!state && walk_state->result_count) {
+ ACPI_ERROR((AE_INFO, "No result state for result stack"));
+ return (AE_AML_INTERNAL);
+ }
+
+ /* Empty result stack */
+
+ if (!state) {
+ ACPI_ERROR((AE_INFO, "Result stack is empty! State=%p",
+ walk_state));
+ return (AE_AML_NO_RETURN_VALUE);
+ }
+
+ /* Return object of the top element and clean that top element result stack */
+
+ walk_state->result_count--;
+ index = walk_state->result_count % ACPI_RESULTS_FRAME_OBJ_NUM;
+
+ *object = state->results.obj_desc[index];
+ if (!*object) {
+ ACPI_ERROR((AE_INFO,
+ "No result objects on result stack, State=%p",
+ walk_state));
+ return (AE_AML_NO_RETURN_VALUE);
+ }
+
+ state->results.obj_desc[index] = NULL;
+ if (index == 0) {
+ status = acpi_ds_result_stack_pop(walk_state);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+ }
+
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+ "Obj=%p [%s] Index=%X State=%p Num=%X\n", *object,
+ acpi_ut_get_object_type_name(*object),
+ index, walk_state, walk_state->result_count));
+
+ return (AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ds_result_push
+ *
+ * PARAMETERS: Object - Where to return the popped object
+ * walk_state - Current Walk state
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Push an object onto the current result stack
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ds_result_push(union acpi_operand_object * object,
+ struct acpi_walk_state * walk_state)
+{
+ union acpi_generic_state *state;
+ acpi_status status;
+ u32 index;
+
+ ACPI_FUNCTION_NAME(ds_result_push);
+
+ if (walk_state->result_count > walk_state->result_size) {
+ ACPI_ERROR((AE_INFO, "Result stack is full"));
+ return (AE_AML_INTERNAL);
+ } else if (walk_state->result_count == walk_state->result_size) {
+
+ /* Extend the result stack */
+
+ status = acpi_ds_result_stack_push(walk_state);
+ if (ACPI_FAILURE(status)) {
+ ACPI_ERROR((AE_INFO,
+ "Failed to extend the result stack"));
+ return (status);
+ }
+ }
+
+ if (!(walk_state->result_count < walk_state->result_size)) {
+ ACPI_ERROR((AE_INFO, "No free elements in result stack"));
+ return (AE_AML_INTERNAL);
+ }
+
+ state = walk_state->results;
+ if (!state) {
+ ACPI_ERROR((AE_INFO, "No result stack frame during push"));
+ return (AE_AML_INTERNAL);
+ }
+
+ if (!object) {
+ ACPI_ERROR((AE_INFO,
+ "Null Object! Obj=%p State=%p Num=%X",
+ object, walk_state, walk_state->result_count));
+ return (AE_BAD_PARAMETER);
+ }
+
+ /* Assign the address of object to the top free element of result stack */
+
+ index = walk_state->result_count % ACPI_RESULTS_FRAME_OBJ_NUM;
+ state->results.obj_desc[index] = object;
+ walk_state->result_count++;
+
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Obj=%p [%s] State=%p Num=%X Cur=%X\n",
+ object,
+ acpi_ut_get_object_type_name((union
+ acpi_operand_object *)
+ object), walk_state,
+ walk_state->result_count,
+ walk_state->current_result));
+
+ return (AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ds_result_stack_push
+ *
+ * PARAMETERS: walk_state - Current Walk state
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Push an object onto the walk_state result stack
+ *
+ ******************************************************************************/
+
+static acpi_status acpi_ds_result_stack_push(struct acpi_walk_state *walk_state)
+{
+ union acpi_generic_state *state;
+
+ ACPI_FUNCTION_NAME(ds_result_stack_push);
+
+ /* Check for stack overflow */
+
+ if (((u32) walk_state->result_size + ACPI_RESULTS_FRAME_OBJ_NUM) >
+ ACPI_RESULTS_OBJ_NUM_MAX) {
+ ACPI_ERROR((AE_INFO, "Result stack overflow: State=%p Num=%X",
+ walk_state, walk_state->result_size));
+ return (AE_STACK_OVERFLOW);
+ }
+
+ state = acpi_ut_create_generic_state();
+ if (!state) {
+ return (AE_NO_MEMORY);
+ }
+
+ state->common.descriptor_type = ACPI_DESC_TYPE_STATE_RESULT;
+ acpi_ut_push_generic_state(&walk_state->results, state);
+
+ /* Increase the length of the result stack by the length of frame */
+
+ walk_state->result_size += ACPI_RESULTS_FRAME_OBJ_NUM;
+
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Results=%p State=%p\n",
+ state, walk_state));
+
+ return (AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ds_result_stack_pop
+ *
+ * PARAMETERS: walk_state - Current Walk state
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Pop an object off of the walk_state result stack
+ *
+ ******************************************************************************/
+
+static acpi_status acpi_ds_result_stack_pop(struct acpi_walk_state *walk_state)
+{
+ union acpi_generic_state *state;
+
+ ACPI_FUNCTION_NAME(ds_result_stack_pop);
+
+ /* Check for stack underflow */
+
+ if (walk_state->results == NULL) {
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+ "Result stack underflow - State=%p\n",
+ walk_state));
+ return (AE_AML_NO_OPERAND);
+ }
+
+ if (walk_state->result_size < ACPI_RESULTS_FRAME_OBJ_NUM) {
+ ACPI_ERROR((AE_INFO, "Insufficient result stack size"));
+ return (AE_AML_INTERNAL);
+ }
+
+ state = acpi_ut_pop_generic_state(&walk_state->results);
+ acpi_ut_delete_generic_state(state);
+
+ /* Decrease the length of result stack by the length of frame */
+
+ walk_state->result_size -= ACPI_RESULTS_FRAME_OBJ_NUM;
+
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+ "Result=%p RemainingResults=%X State=%p\n",
+ state, walk_state->result_count, walk_state));
+
+ return (AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ds_obj_stack_push
+ *
+ * PARAMETERS: Object - Object to push
+ * walk_state - Current Walk state
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Push an object onto this walk's object/operand stack
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ds_obj_stack_push(void *object, struct acpi_walk_state * walk_state)
+{
+ ACPI_FUNCTION_NAME(ds_obj_stack_push);
+
+ /* Check for stack overflow */
+
+ if (walk_state->num_operands >= ACPI_OBJ_NUM_OPERANDS) {
+ ACPI_ERROR((AE_INFO,
+ "Object stack overflow! Obj=%p State=%p #Ops=%X",
+ object, walk_state, walk_state->num_operands));
+ return (AE_STACK_OVERFLOW);
+ }
+
+ /* Put the object onto the stack */
+
+ walk_state->operands[walk_state->operand_index] = object;
+ walk_state->num_operands++;
+
+ /* For the usual order of filling the operand stack */
+
+ walk_state->operand_index++;
+
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Obj=%p [%s] State=%p #Ops=%X\n",
+ object,
+ acpi_ut_get_object_type_name((union
+ acpi_operand_object *)
+ object), walk_state,
+ walk_state->num_operands));
+
+ return (AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ds_obj_stack_pop
+ *
+ * PARAMETERS: pop_count - Number of objects/entries to pop
+ * walk_state - Current Walk state
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Pop this walk's object stack. Objects on the stack are NOT
+ * deleted by this routine.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ds_obj_stack_pop(u32 pop_count, struct acpi_walk_state * walk_state)
+{
+ u32 i;
+
+ ACPI_FUNCTION_NAME(ds_obj_stack_pop);
+
+ for (i = 0; i < pop_count; i++) {
+
+ /* Check for stack underflow */
+
+ if (walk_state->num_operands == 0) {
+ ACPI_ERROR((AE_INFO,
+ "Object stack underflow! Count=%X State=%p #Ops=%X",
+ pop_count, walk_state,
+ walk_state->num_operands));
+ return (AE_STACK_UNDERFLOW);
+ }
+
+ /* Just set the stack entry to null */
+
+ walk_state->num_operands--;
+ walk_state->operands[walk_state->num_operands] = NULL;
+ }
+
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Count=%X State=%p #Ops=%X\n",
+ pop_count, walk_state, walk_state->num_operands));
+
+ return (AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ds_obj_stack_pop_and_delete
+ *
+ * PARAMETERS: pop_count - Number of objects/entries to pop
+ * walk_state - Current Walk state
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Pop this walk's object stack and delete each object that is
+ * popped off.
+ *
+ ******************************************************************************/
+
+void
+acpi_ds_obj_stack_pop_and_delete(u32 pop_count,
+ struct acpi_walk_state *walk_state)
+{
+ s32 i;
+ union acpi_operand_object *obj_desc;
+
+ ACPI_FUNCTION_NAME(ds_obj_stack_pop_and_delete);
+
+ if (pop_count == 0) {
+ return;
+ }
+
+ for (i = (s32) pop_count - 1; i >= 0; i--) {
+ if (walk_state->num_operands == 0) {
+ return;
+ }
+
+ /* Pop the stack and delete an object if present in this stack entry */
+
+ walk_state->num_operands--;
+ obj_desc = walk_state->operands[i];
+ if (obj_desc) {
+ acpi_ut_remove_reference(walk_state->operands[i]);
+ walk_state->operands[i] = NULL;
+ }
+ }
+
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Count=%X State=%p #Ops=%X\n",
+ pop_count, walk_state, walk_state->num_operands));
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ds_get_current_walk_state
+ *
+ * PARAMETERS: Thread - Get current active state for this Thread
+ *
+ * RETURN: Pointer to the current walk state
+ *
+ * DESCRIPTION: Get the walk state that is at the head of the list (the "current"
+ * walk state.)
+ *
+ ******************************************************************************/
+
+struct acpi_walk_state *acpi_ds_get_current_walk_state(struct acpi_thread_state
+ *thread)
+{
+ ACPI_FUNCTION_NAME(ds_get_current_walk_state);
+
+ if (!thread) {
+ return (NULL);
+ }
+
+ ACPI_DEBUG_PRINT((ACPI_DB_PARSE, "Current WalkState %p\n",
+ thread->walk_state_list));
+
+ return (thread->walk_state_list);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ds_push_walk_state
+ *
+ * PARAMETERS: walk_state - State to push
+ * Thread - Thread state object
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Place the Thread state at the head of the state list
+ *
+ ******************************************************************************/
+
+void
+acpi_ds_push_walk_state(struct acpi_walk_state *walk_state,
+ struct acpi_thread_state *thread)
+{
+ ACPI_FUNCTION_TRACE(ds_push_walk_state);
+
+ walk_state->next = thread->walk_state_list;
+ thread->walk_state_list = walk_state;
+
+ return_VOID;
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ds_pop_walk_state
+ *
+ * PARAMETERS: Thread - Current thread state
+ *
+ * RETURN: A walk_state object popped from the thread's stack
+ *
+ * DESCRIPTION: Remove and return the walkstate object that is at the head of
+ * the walk stack for the given walk list. NULL indicates that
+ * the list is empty.
+ *
+ ******************************************************************************/
+
+struct acpi_walk_state *acpi_ds_pop_walk_state(struct acpi_thread_state *thread)
+{
+ struct acpi_walk_state *walk_state;
+
+ ACPI_FUNCTION_TRACE(ds_pop_walk_state);
+
+ walk_state = thread->walk_state_list;
+
+ if (walk_state) {
+
+ /* Next walk state becomes the current walk state */
+
+ thread->walk_state_list = walk_state->next;
+
+ /*
+ * Don't clear the NEXT field, this serves as an indicator
+ * that there is a parent WALK STATE
+ * Do Not: walk_state->Next = NULL;
+ */
+ }
+
+ return_PTR(walk_state);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ds_create_walk_state
+ *
+ * PARAMETERS: owner_id - ID for object creation
+ * Origin - Starting point for this walk
+ * method_desc - Method object
+ * Thread - Current thread state
+ *
+ * RETURN: Pointer to the new walk state.
+ *
+ * DESCRIPTION: Allocate and initialize a new walk state. The current walk
+ * state is set to this new state.
+ *
+ ******************************************************************************/
+
+struct acpi_walk_state *acpi_ds_create_walk_state(acpi_owner_id owner_id, union acpi_parse_object
+ *origin, union acpi_operand_object
+ *method_desc, struct acpi_thread_state
+ *thread)
+{
+ struct acpi_walk_state *walk_state;
+
+ ACPI_FUNCTION_TRACE(ds_create_walk_state);
+
+ walk_state = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_walk_state));
+ if (!walk_state) {
+ return_PTR(NULL);
+ }
+
+ walk_state->descriptor_type = ACPI_DESC_TYPE_WALK;
+ walk_state->method_desc = method_desc;
+ walk_state->owner_id = owner_id;
+ walk_state->origin = origin;
+ walk_state->thread = thread;
+
+ walk_state->parser_state.start_op = origin;
+
+ /* Init the method args/local */
+
+#if (!defined (ACPI_NO_METHOD_EXECUTION) && !defined (ACPI_CONSTANT_EVAL_ONLY))
+ acpi_ds_method_data_init(walk_state);
+#endif
+
+ /* Put the new state at the head of the walk list */
+
+ if (thread) {
+ acpi_ds_push_walk_state(walk_state, thread);
+ }
+
+ return_PTR(walk_state);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ds_init_aml_walk
+ *
+ * PARAMETERS: walk_state - New state to be initialized
+ * Op - Current parse op
+ * method_node - Control method NS node, if any
+ * aml_start - Start of AML
+ * aml_length - Length of AML
+ * Info - Method info block (params, etc.)
+ * pass_number - 1, 2, or 3
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Initialize a walk state for a pass 1 or 2 parse tree walk
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ds_init_aml_walk(struct acpi_walk_state *walk_state,
+ union acpi_parse_object *op,
+ struct acpi_namespace_node *method_node,
+ u8 * aml_start,
+ u32 aml_length,
+ struct acpi_evaluate_info *info, u8 pass_number)
+{
+ acpi_status status;
+ struct acpi_parse_state *parser_state = &walk_state->parser_state;
+ union acpi_parse_object *extra_op;
+
+ ACPI_FUNCTION_TRACE(ds_init_aml_walk);
+
+ walk_state->parser_state.aml =
+ walk_state->parser_state.aml_start = aml_start;
+ walk_state->parser_state.aml_end =
+ walk_state->parser_state.pkg_end = aml_start + aml_length;
+
+ /* The next_op of the next_walk will be the beginning of the method */
+
+ walk_state->next_op = NULL;
+ walk_state->pass_number = pass_number;
+
+ if (info) {
+ walk_state->params = info->parameters;
+ walk_state->caller_return_desc = &info->return_object;
+ }
+
+ status = acpi_ps_init_scope(&walk_state->parser_state, op);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ if (method_node) {
+ walk_state->parser_state.start_node = method_node;
+ walk_state->walk_type = ACPI_WALK_METHOD;
+ walk_state->method_node = method_node;
+ walk_state->method_desc =
+ acpi_ns_get_attached_object(method_node);
+
+ /* Push start scope on scope stack and make it current */
+
+ status =
+ acpi_ds_scope_stack_push(method_node, ACPI_TYPE_METHOD,
+ walk_state);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /* Init the method arguments */
+
+ status = acpi_ds_method_data_init_args(walk_state->params,
+ ACPI_METHOD_NUM_ARGS,
+ walk_state);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+ } else {
+ /*
+ * Setup the current scope.
+ * Find a Named Op that has a namespace node associated with it.
+ * search upwards from this Op. Current scope is the first
+ * Op with a namespace node.
+ */
+ extra_op = parser_state->start_op;
+ while (extra_op && !extra_op->common.node) {
+ extra_op = extra_op->common.parent;
+ }
+
+ if (!extra_op) {
+ parser_state->start_node = NULL;
+ } else {
+ parser_state->start_node = extra_op->common.node;
+ }
+
+ if (parser_state->start_node) {
+
+ /* Push start scope on scope stack and make it current */
+
+ status =
+ acpi_ds_scope_stack_push(parser_state->start_node,
+ parser_state->start_node->
+ type, walk_state);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+ }
+ }
+
+ status = acpi_ds_init_callbacks(walk_state, pass_number);
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ds_delete_walk_state
+ *
+ * PARAMETERS: walk_state - State to delete
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Delete a walk state including all internal data structures
+ *
+ ******************************************************************************/
+
+void acpi_ds_delete_walk_state(struct acpi_walk_state *walk_state)
+{
+ union acpi_generic_state *state;
+
+ ACPI_FUNCTION_TRACE_PTR(ds_delete_walk_state, walk_state);
+
+ if (!walk_state) {
+ return;
+ }
+
+ if (walk_state->descriptor_type != ACPI_DESC_TYPE_WALK) {
+ ACPI_ERROR((AE_INFO, "%p is not a valid walk state",
+ walk_state));
+ return;
+ }
+
+ /* There should not be any open scopes */
+
+ if (walk_state->parser_state.scope) {
+ ACPI_ERROR((AE_INFO, "%p walk still has a scope list",
+ walk_state));
+ acpi_ps_cleanup_scope(&walk_state->parser_state);
+ }
+
+ /* Always must free any linked control states */
+
+ while (walk_state->control_state) {
+ state = walk_state->control_state;
+ walk_state->control_state = state->common.next;
+
+ acpi_ut_delete_generic_state(state);
+ }
+
+ /* Always must free any linked parse states */
+
+ while (walk_state->scope_info) {
+ state = walk_state->scope_info;
+ walk_state->scope_info = state->common.next;
+
+ acpi_ut_delete_generic_state(state);
+ }
+
+ /* Always must free any stacked result states */
+
+ while (walk_state->results) {
+ state = walk_state->results;
+ walk_state->results = state->common.next;
+
+ acpi_ut_delete_generic_state(state);
+ }
+
+ ACPI_FREE(walk_state);
+ return_VOID;
+}
diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c
new file mode 100644
index 0000000..afd5db3
--- /dev/null
+++ b/drivers/acpi/dock.c
@@ -0,0 +1,1160 @@
+/*
+ * dock.c - ACPI dock station driver
+ *
+ * Copyright (C) 2006 Kristen Carlson Accardi <kristen.c.accardi@intel.com>
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/notifier.h>
+#include <linux/platform_device.h>
+#include <linux/jiffies.h>
+#include <linux/stddef.h>
+#include <acpi/acpi_bus.h>
+#include <acpi/acpi_drivers.h>
+
+#define ACPI_DOCK_DRIVER_DESCRIPTION "ACPI Dock Station Driver"
+
+ACPI_MODULE_NAME("dock");
+MODULE_AUTHOR("Kristen Carlson Accardi");
+MODULE_DESCRIPTION(ACPI_DOCK_DRIVER_DESCRIPTION);
+MODULE_LICENSE("GPL");
+
+static int immediate_undock = 1;
+module_param(immediate_undock, bool, 0644);
+MODULE_PARM_DESC(immediate_undock, "1 (default) will cause the driver to "
+ "undock immediately when the undock button is pressed, 0 will cause"
+ " the driver to wait for userspace to write the undock sysfs file "
+ " before undocking");
+
+static struct atomic_notifier_head dock_notifier_list;
+static char dock_device_name[] = "dock";
+
+static const struct acpi_device_id dock_device_ids[] = {
+ {"LNXDOCK", 0},
+ {"", 0},
+};
+MODULE_DEVICE_TABLE(acpi, dock_device_ids);
+
+struct dock_station {
+ acpi_handle handle;
+ unsigned long last_dock_time;
+ u32 flags;
+ spinlock_t dd_lock;
+ struct mutex hp_lock;
+ struct list_head dependent_devices;
+ struct list_head hotplug_devices;
+
+ struct list_head sibiling;
+ struct platform_device *dock_device;
+};
+static LIST_HEAD(dock_stations);
+static int dock_station_count;
+
+struct dock_dependent_device {
+ struct list_head list;
+ struct list_head hotplug_list;
+ acpi_handle handle;
+ struct acpi_dock_ops *ops;
+ void *context;
+};
+
+#define DOCK_DOCKING 0x00000001
+#define DOCK_UNDOCKING 0x00000002
+#define DOCK_IS_DOCK 0x00000010
+#define DOCK_IS_ATA 0x00000020
+#define DOCK_IS_BAT 0x00000040
+#define DOCK_EVENT 3
+#define UNDOCK_EVENT 2
+
+/*****************************************************************************
+ * Dock Dependent device functions *
+ *****************************************************************************/
+/**
+ * alloc_dock_dependent_device - allocate and init a dependent device
+ * @handle: the acpi_handle of the dependent device
+ *
+ * Allocate memory for a dependent device structure for a device referenced
+ * by the acpi handle
+ */
+static struct dock_dependent_device *
+alloc_dock_dependent_device(acpi_handle handle)
+{
+ struct dock_dependent_device *dd;
+
+ dd = kzalloc(sizeof(*dd), GFP_KERNEL);
+ if (dd) {
+ dd->handle = handle;
+ INIT_LIST_HEAD(&dd->list);
+ INIT_LIST_HEAD(&dd->hotplug_list);
+ }
+ return dd;
+}
+
+/**
+ * add_dock_dependent_device - associate a device with the dock station
+ * @ds: The dock station
+ * @dd: The dependent device
+ *
+ * Add the dependent device to the dock's dependent device list.
+ */
+static void
+add_dock_dependent_device(struct dock_station *ds,
+ struct dock_dependent_device *dd)
+{
+ spin_lock(&ds->dd_lock);
+ list_add_tail(&dd->list, &ds->dependent_devices);
+ spin_unlock(&ds->dd_lock);
+}
+
+/**
+ * dock_add_hotplug_device - associate a hotplug handler with the dock station
+ * @ds: The dock station
+ * @dd: The dependent device struct
+ *
+ * Add the dependent device to the dock's hotplug device list
+ */
+static void
+dock_add_hotplug_device(struct dock_station *ds,
+ struct dock_dependent_device *dd)
+{
+ mutex_lock(&ds->hp_lock);
+ list_add_tail(&dd->hotplug_list, &ds->hotplug_devices);
+ mutex_unlock(&ds->hp_lock);
+}
+
+/**
+ * dock_del_hotplug_device - remove a hotplug handler from the dock station
+ * @ds: The dock station
+ * @dd: the dependent device struct
+ *
+ * Delete the dependent device from the dock's hotplug device list
+ */
+static void
+dock_del_hotplug_device(struct dock_station *ds,
+ struct dock_dependent_device *dd)
+{
+ mutex_lock(&ds->hp_lock);
+ list_del(&dd->hotplug_list);
+ mutex_unlock(&ds->hp_lock);
+}
+
+/**
+ * find_dock_dependent_device - get a device dependent on this dock
+ * @ds: the dock station
+ * @handle: the acpi_handle of the device we want
+ *
+ * iterate over the dependent device list for this dock. If the
+ * dependent device matches the handle, return.
+ */
+static struct dock_dependent_device *
+find_dock_dependent_device(struct dock_station *ds, acpi_handle handle)
+{
+ struct dock_dependent_device *dd;
+
+ spin_lock(&ds->dd_lock);
+ list_for_each_entry(dd, &ds->dependent_devices, list) {
+ if (handle == dd->handle) {
+ spin_unlock(&ds->dd_lock);
+ return dd;
+ }
+ }
+ spin_unlock(&ds->dd_lock);
+ return NULL;
+}
+
+/*****************************************************************************
+ * Dock functions *
+ *****************************************************************************/
+/**
+ * is_dock - see if a device is a dock station
+ * @handle: acpi handle of the device
+ *
+ * If an acpi object has a _DCK method, then it is by definition a dock
+ * station, so return true.
+ */
+static int is_dock(acpi_handle handle)
+{
+ acpi_status status;
+ acpi_handle tmp;
+
+ status = acpi_get_handle(handle, "_DCK", &tmp);
+ if (ACPI_FAILURE(status))
+ return 0;
+ return 1;
+}
+
+static int is_ejectable(acpi_handle handle)
+{
+ acpi_status status;
+ acpi_handle tmp;
+
+ status = acpi_get_handle(handle, "_EJ0", &tmp);
+ if (ACPI_FAILURE(status))
+ return 0;
+ return 1;
+}
+
+static int is_ata(acpi_handle handle)
+{
+ acpi_handle tmp;
+
+ if ((ACPI_SUCCESS(acpi_get_handle(handle, "_GTF", &tmp))) ||
+ (ACPI_SUCCESS(acpi_get_handle(handle, "_GTM", &tmp))) ||
+ (ACPI_SUCCESS(acpi_get_handle(handle, "_STM", &tmp))) ||
+ (ACPI_SUCCESS(acpi_get_handle(handle, "_SDD", &tmp))))
+ return 1;
+
+ return 0;
+}
+
+static int is_battery(acpi_handle handle)
+{
+ struct acpi_device_info *info;
+ struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
+ int ret = 1;
+
+ if (!ACPI_SUCCESS(acpi_get_object_info(handle, &buffer)))
+ return 0;
+ info = buffer.pointer;
+ if (!(info->valid & ACPI_VALID_HID))
+ ret = 0;
+ else
+ ret = !strcmp("PNP0C0A", info->hardware_id.value);
+
+ kfree(buffer.pointer);
+ return ret;
+}
+
+static int is_ejectable_bay(acpi_handle handle)
+{
+ acpi_handle phandle;
+ if (!is_ejectable(handle))
+ return 0;
+ if (is_battery(handle) || is_ata(handle))
+ return 1;
+ if (!acpi_get_parent(handle, &phandle) && is_ata(phandle))
+ return 1;
+ return 0;
+}
+
+/**
+ * is_dock_device - see if a device is on a dock station
+ * @handle: acpi handle of the device
+ *
+ * If this device is either the dock station itself,
+ * or is a device dependent on the dock station, then it
+ * is a dock device
+ */
+int is_dock_device(acpi_handle handle)
+{
+ struct dock_station *dock_station;
+
+ if (!dock_station_count)
+ return 0;
+
+ if (is_dock(handle))
+ return 1;
+ list_for_each_entry(dock_station, &dock_stations, sibiling) {
+ if (find_dock_dependent_device(dock_station, handle))
+ return 1;
+ }
+
+ return 0;
+}
+
+EXPORT_SYMBOL_GPL(is_dock_device);
+
+/**
+ * dock_present - see if the dock station is present.
+ * @ds: the dock station
+ *
+ * execute the _STA method. note that present does not
+ * imply that we are docked.
+ */
+static int dock_present(struct dock_station *ds)
+{
+ unsigned long long sta;
+ acpi_status status;
+
+ if (ds) {
+ status = acpi_evaluate_integer(ds->handle, "_STA", NULL, &sta);
+ if (ACPI_SUCCESS(status) && sta)
+ return 1;
+ }
+ return 0;
+}
+
+
+
+/**
+ * dock_create_acpi_device - add new devices to acpi
+ * @handle - handle of the device to add
+ *
+ * This function will create a new acpi_device for the given
+ * handle if one does not exist already. This should cause
+ * acpi to scan for drivers for the given devices, and call
+ * matching driver's add routine.
+ *
+ * Returns a pointer to the acpi_device corresponding to the handle.
+ */
+static struct acpi_device * dock_create_acpi_device(acpi_handle handle)
+{
+ struct acpi_device *device = NULL;
+ struct acpi_device *parent_device;
+ acpi_handle parent;
+ int ret;
+
+ if (acpi_bus_get_device(handle, &device)) {
+ /*
+ * no device created for this object,
+ * so we should create one.
+ */
+ acpi_get_parent(handle, &parent);
+ if (acpi_bus_get_device(parent, &parent_device))
+ parent_device = NULL;
+
+ ret = acpi_bus_add(&device, parent_device, handle,
+ ACPI_BUS_TYPE_DEVICE);
+ if (ret) {
+ pr_debug("error adding bus, %x\n",
+ -ret);
+ return NULL;
+ }
+ }
+ return device;
+}
+
+/**
+ * dock_remove_acpi_device - remove the acpi_device struct from acpi
+ * @handle - the handle of the device to remove
+ *
+ * Tell acpi to remove the acpi_device. This should cause any loaded
+ * driver to have it's remove routine called.
+ */
+static void dock_remove_acpi_device(acpi_handle handle)
+{
+ struct acpi_device *device;
+ int ret;
+
+ if (!acpi_bus_get_device(handle, &device)) {
+ ret = acpi_bus_trim(device, 1);
+ if (ret)
+ pr_debug("error removing bus, %x\n", -ret);
+ }
+}
+
+
+/**
+ * hotplug_dock_devices - insert or remove devices on the dock station
+ * @ds: the dock station
+ * @event: either bus check or eject request
+ *
+ * Some devices on the dock station need to have drivers called
+ * to perform hotplug operations after a dock event has occurred.
+ * Traverse the list of dock devices that have registered a
+ * hotplug handler, and call the handler.
+ */
+static void hotplug_dock_devices(struct dock_station *ds, u32 event)
+{
+ struct dock_dependent_device *dd;
+
+ mutex_lock(&ds->hp_lock);
+
+ /*
+ * First call driver specific hotplug functions
+ */
+ list_for_each_entry(dd, &ds->hotplug_devices, hotplug_list) {
+ if (dd->ops && dd->ops->handler)
+ dd->ops->handler(dd->handle, event, dd->context);
+ }
+
+ /*
+ * Now make sure that an acpi_device is created for each
+ * dependent device, or removed if this is an eject request.
+ * This will cause acpi_drivers to be stopped/started if they
+ * exist
+ */
+ list_for_each_entry(dd, &ds->dependent_devices, list) {
+ if (event == ACPI_NOTIFY_EJECT_REQUEST)
+ dock_remove_acpi_device(dd->handle);
+ else
+ dock_create_acpi_device(dd->handle);
+ }
+ mutex_unlock(&ds->hp_lock);
+}
+
+static void dock_event(struct dock_station *ds, u32 event, int num)
+{
+ struct device *dev = &ds->dock_device->dev;
+ char event_string[13];
+ char *envp[] = { event_string, NULL };
+ struct dock_dependent_device *dd;
+
+ if (num == UNDOCK_EVENT)
+ sprintf(event_string, "EVENT=undock");
+ else
+ sprintf(event_string, "EVENT=dock");
+
+ /*
+ * Indicate that the status of the dock station has
+ * changed.
+ */
+ if (num == DOCK_EVENT)
+ kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, envp);
+
+ list_for_each_entry(dd, &ds->hotplug_devices, hotplug_list)
+ if (dd->ops && dd->ops->uevent)
+ dd->ops->uevent(dd->handle, event, dd->context);
+ if (num != DOCK_EVENT)
+ kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, envp);
+}
+
+/**
+ * eject_dock - respond to a dock eject request
+ * @ds: the dock station
+ *
+ * This is called after _DCK is called, to execute the dock station's
+ * _EJ0 method.
+ */
+static void eject_dock(struct dock_station *ds)
+{
+ struct acpi_object_list arg_list;
+ union acpi_object arg;
+ acpi_status status;
+ acpi_handle tmp;
+
+ /* all dock devices should have _EJ0, but check anyway */
+ status = acpi_get_handle(ds->handle, "_EJ0", &tmp);
+ if (ACPI_FAILURE(status)) {
+ pr_debug("No _EJ0 support for dock device\n");
+ return;
+ }
+
+ arg_list.count = 1;
+ arg_list.pointer = &arg;
+ arg.type = ACPI_TYPE_INTEGER;
+ arg.integer.value = 1;
+
+ if (ACPI_FAILURE(acpi_evaluate_object(ds->handle, "_EJ0",
+ &arg_list, NULL)))
+ pr_debug("Failed to evaluate _EJ0!\n");
+}
+
+/**
+ * handle_dock - handle a dock event
+ * @ds: the dock station
+ * @dock: to dock, or undock - that is the question
+ *
+ * Execute the _DCK method in response to an acpi event
+ */
+static void handle_dock(struct dock_station *ds, int dock)
+{
+ acpi_status status;
+ struct acpi_object_list arg_list;
+ union acpi_object arg;
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ struct acpi_buffer name_buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+
+ acpi_get_name(ds->handle, ACPI_FULL_PATHNAME, &name_buffer);
+
+ printk(KERN_INFO PREFIX "%s - %s\n",
+ (char *)name_buffer.pointer, dock ? "docking" : "undocking");
+
+ /* _DCK method has one argument */
+ arg_list.count = 1;
+ arg_list.pointer = &arg;
+ arg.type = ACPI_TYPE_INTEGER;
+ arg.integer.value = dock;
+ status = acpi_evaluate_object(ds->handle, "_DCK", &arg_list, &buffer);
+ if (ACPI_FAILURE(status) && status != AE_NOT_FOUND)
+ ACPI_EXCEPTION((AE_INFO, status, "%s - failed to execute"
+ " _DCK\n", (char *)name_buffer.pointer));
+
+ kfree(buffer.pointer);
+ kfree(name_buffer.pointer);
+}
+
+static inline void dock(struct dock_station *ds)
+{
+ handle_dock(ds, 1);
+}
+
+static inline void undock(struct dock_station *ds)
+{
+ handle_dock(ds, 0);
+}
+
+static inline void begin_dock(struct dock_station *ds)
+{
+ ds->flags |= DOCK_DOCKING;
+}
+
+static inline void complete_dock(struct dock_station *ds)
+{
+ ds->flags &= ~(DOCK_DOCKING);
+ ds->last_dock_time = jiffies;
+}
+
+static inline void begin_undock(struct dock_station *ds)
+{
+ ds->flags |= DOCK_UNDOCKING;
+}
+
+static inline void complete_undock(struct dock_station *ds)
+{
+ ds->flags &= ~(DOCK_UNDOCKING);
+}
+
+static void dock_lock(struct dock_station *ds, int lock)
+{
+ struct acpi_object_list arg_list;
+ union acpi_object arg;
+ acpi_status status;
+
+ arg_list.count = 1;
+ arg_list.pointer = &arg;
+ arg.type = ACPI_TYPE_INTEGER;
+ arg.integer.value = !!lock;
+ status = acpi_evaluate_object(ds->handle, "_LCK", &arg_list, NULL);
+ if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
+ if (lock)
+ printk(KERN_WARNING PREFIX "Locking device failed\n");
+ else
+ printk(KERN_WARNING PREFIX "Unlocking device failed\n");
+ }
+}
+
+/**
+ * dock_in_progress - see if we are in the middle of handling a dock event
+ * @ds: the dock station
+ *
+ * Sometimes while docking, false dock events can be sent to the driver
+ * because good connections aren't made or some other reason. Ignore these
+ * if we are in the middle of doing something.
+ */
+static int dock_in_progress(struct dock_station *ds)
+{
+ if ((ds->flags & DOCK_DOCKING) ||
+ time_before(jiffies, (ds->last_dock_time + HZ)))
+ return 1;
+ return 0;
+}
+
+/**
+ * register_dock_notifier - add yourself to the dock notifier list
+ * @nb: the callers notifier block
+ *
+ * If a driver wishes to be notified about dock events, they can
+ * use this function to put a notifier block on the dock notifier list.
+ * this notifier call chain will be called after a dock event, but
+ * before hotplugging any new devices.
+ */
+int register_dock_notifier(struct notifier_block *nb)
+{
+ if (!dock_station_count)
+ return -ENODEV;
+
+ return atomic_notifier_chain_register(&dock_notifier_list, nb);
+}
+
+EXPORT_SYMBOL_GPL(register_dock_notifier);
+
+/**
+ * unregister_dock_notifier - remove yourself from the dock notifier list
+ * @nb: the callers notifier block
+ */
+void unregister_dock_notifier(struct notifier_block *nb)
+{
+ if (!dock_station_count)
+ return;
+
+ atomic_notifier_chain_unregister(&dock_notifier_list, nb);
+}
+
+EXPORT_SYMBOL_GPL(unregister_dock_notifier);
+
+/**
+ * register_hotplug_dock_device - register a hotplug function
+ * @handle: the handle of the device
+ * @ops: handlers to call after docking
+ * @context: device specific data
+ *
+ * If a driver would like to perform a hotplug operation after a dock
+ * event, they can register an acpi_notifiy_handler to be called by
+ * the dock driver after _DCK is executed.
+ */
+int
+register_hotplug_dock_device(acpi_handle handle, struct acpi_dock_ops *ops,
+ void *context)
+{
+ struct dock_dependent_device *dd;
+ struct dock_station *dock_station;
+ int ret = -EINVAL;
+
+ if (!dock_station_count)
+ return -ENODEV;
+
+ /*
+ * make sure this handle is for a device dependent on the dock,
+ * this would include the dock station itself
+ */
+ list_for_each_entry(dock_station, &dock_stations, sibiling) {
+ /*
+ * An ATA bay can be in a dock and itself can be ejected
+ * seperately, so there are two 'dock stations' which need the
+ * ops
+ */
+ dd = find_dock_dependent_device(dock_station, handle);
+ if (dd) {
+ dd->ops = ops;
+ dd->context = context;
+ dock_add_hotplug_device(dock_station, dd);
+ ret = 0;
+ }
+ }
+
+ return ret;
+}
+
+EXPORT_SYMBOL_GPL(register_hotplug_dock_device);
+
+/**
+ * unregister_hotplug_dock_device - remove yourself from the hotplug list
+ * @handle: the acpi handle of the device
+ */
+void unregister_hotplug_dock_device(acpi_handle handle)
+{
+ struct dock_dependent_device *dd;
+ struct dock_station *dock_station;
+
+ if (!dock_station_count)
+ return;
+
+ list_for_each_entry(dock_station, &dock_stations, sibiling) {
+ dd = find_dock_dependent_device(dock_station, handle);
+ if (dd)
+ dock_del_hotplug_device(dock_station, dd);
+ }
+}
+
+EXPORT_SYMBOL_GPL(unregister_hotplug_dock_device);
+
+/**
+ * handle_eject_request - handle an undock request checking for error conditions
+ *
+ * Check to make sure the dock device is still present, then undock and
+ * hotremove all the devices that may need removing.
+ */
+static int handle_eject_request(struct dock_station *ds, u32 event)
+{
+ if (dock_in_progress(ds))
+ return -EBUSY;
+
+ /*
+ * here we need to generate the undock
+ * event prior to actually doing the undock
+ * so that the device struct still exists.
+ * Also, even send the dock event if the
+ * device is not present anymore
+ */
+ dock_event(ds, event, UNDOCK_EVENT);
+
+ hotplug_dock_devices(ds, ACPI_NOTIFY_EJECT_REQUEST);
+ undock(ds);
+ dock_lock(ds, 0);
+ eject_dock(ds);
+ if (dock_present(ds)) {
+ printk(KERN_ERR PREFIX "Unable to undock!\n");
+ return -EBUSY;
+ }
+ complete_undock(ds);
+ return 0;
+}
+
+/**
+ * dock_notify - act upon an acpi dock notification
+ * @handle: the dock station handle
+ * @event: the acpi event
+ * @data: our driver data struct
+ *
+ * If we are notified to dock, then check to see if the dock is
+ * present and then dock. Notify all drivers of the dock event,
+ * and then hotplug and devices that may need hotplugging.
+ */
+static void dock_notify(acpi_handle handle, u32 event, void *data)
+{
+ struct dock_station *ds = data;
+ struct acpi_device *tmp;
+ int surprise_removal = 0;
+
+ /*
+ * According to acpi spec 3.0a, if a DEVICE_CHECK notification
+ * is sent and _DCK is present, it is assumed to mean an undock
+ * request.
+ */
+ if ((ds->flags & DOCK_IS_DOCK) && event == ACPI_NOTIFY_DEVICE_CHECK)
+ event = ACPI_NOTIFY_EJECT_REQUEST;
+
+ /*
+ * dock station: BUS_CHECK - docked or surprise removal
+ * DEVICE_CHECK - undocked
+ * other device: BUS_CHECK/DEVICE_CHECK - added or surprise removal
+ *
+ * To simplify event handling, dock dependent device handler always
+ * get ACPI_NOTIFY_BUS_CHECK/ACPI_NOTIFY_DEVICE_CHECK for add and
+ * ACPI_NOTIFY_EJECT_REQUEST for removal
+ */
+ switch (event) {
+ case ACPI_NOTIFY_BUS_CHECK:
+ case ACPI_NOTIFY_DEVICE_CHECK:
+ if (!dock_in_progress(ds) && acpi_bus_get_device(ds->handle,
+ &tmp)) {
+ begin_dock(ds);
+ dock(ds);
+ if (!dock_present(ds)) {
+ printk(KERN_ERR PREFIX "Unable to dock!\n");
+ complete_dock(ds);
+ break;
+ }
+ atomic_notifier_call_chain(&dock_notifier_list,
+ event, NULL);
+ hotplug_dock_devices(ds, event);
+ complete_dock(ds);
+ dock_event(ds, event, DOCK_EVENT);
+ dock_lock(ds, 1);
+ break;
+ }
+ if (dock_present(ds) || dock_in_progress(ds))
+ break;
+ /* This is a surprise removal */
+ surprise_removal = 1;
+ event = ACPI_NOTIFY_EJECT_REQUEST;
+ /* Fall back */
+ case ACPI_NOTIFY_EJECT_REQUEST:
+ begin_undock(ds);
+ if ((immediate_undock && !(ds->flags & DOCK_IS_ATA))
+ || surprise_removal)
+ handle_eject_request(ds, event);
+ else
+ dock_event(ds, event, UNDOCK_EVENT);
+ break;
+ default:
+ printk(KERN_ERR PREFIX "Unknown dock event %d\n", event);
+ }
+}
+
+struct dock_data {
+ acpi_handle handle;
+ unsigned long event;
+ struct dock_station *ds;
+};
+
+static void acpi_dock_deferred_cb(void *context)
+{
+ struct dock_data *data = (struct dock_data *)context;
+
+ dock_notify(data->handle, data->event, data->ds);
+ kfree(data);
+}
+
+static int acpi_dock_notifier_call(struct notifier_block *this,
+ unsigned long event, void *data)
+{
+ struct dock_station *dock_station;
+ acpi_handle handle = (acpi_handle)data;
+
+ if (event != ACPI_NOTIFY_BUS_CHECK && event != ACPI_NOTIFY_DEVICE_CHECK
+ && event != ACPI_NOTIFY_EJECT_REQUEST)
+ return 0;
+ list_for_each_entry(dock_station, &dock_stations, sibiling) {
+ if (dock_station->handle == handle) {
+ struct dock_data *dock_data;
+
+ dock_data = kmalloc(sizeof(*dock_data), GFP_KERNEL);
+ if (!dock_data)
+ return 0;
+ dock_data->handle = handle;
+ dock_data->event = event;
+ dock_data->ds = dock_station;
+ acpi_os_hotplug_execute(acpi_dock_deferred_cb,
+ dock_data);
+ return 0 ;
+ }
+ }
+ return 0;
+}
+
+static struct notifier_block dock_acpi_notifier = {
+ .notifier_call = acpi_dock_notifier_call,
+};
+
+/**
+ * find_dock_devices - find devices on the dock station
+ * @handle: the handle of the device we are examining
+ * @lvl: unused
+ * @context: the dock station private data
+ * @rv: unused
+ *
+ * This function is called by acpi_walk_namespace. It will
+ * check to see if an object has an _EJD method. If it does, then it
+ * will see if it is dependent on the dock station.
+ */
+static acpi_status
+find_dock_devices(acpi_handle handle, u32 lvl, void *context, void **rv)
+{
+ acpi_status status;
+ acpi_handle tmp, parent;
+ struct dock_station *ds = context;
+ struct dock_dependent_device *dd;
+
+ status = acpi_bus_get_ejd(handle, &tmp);
+ if (ACPI_FAILURE(status)) {
+ /* try the parent device as well */
+ status = acpi_get_parent(handle, &parent);
+ if (ACPI_FAILURE(status))
+ goto fdd_out;
+ /* see if parent is dependent on dock */
+ status = acpi_bus_get_ejd(parent, &tmp);
+ if (ACPI_FAILURE(status))
+ goto fdd_out;
+ }
+
+ if (tmp == ds->handle) {
+ dd = alloc_dock_dependent_device(handle);
+ if (dd)
+ add_dock_dependent_device(ds, dd);
+ }
+fdd_out:
+ return AE_OK;
+}
+
+/*
+ * show_docked - read method for "docked" file in sysfs
+ */
+static ssize_t show_docked(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct acpi_device *tmp;
+
+ struct dock_station *dock_station = *((struct dock_station **)
+ dev->platform_data);
+
+ if (ACPI_SUCCESS(acpi_bus_get_device(dock_station->handle, &tmp)))
+ return snprintf(buf, PAGE_SIZE, "1\n");
+ return snprintf(buf, PAGE_SIZE, "0\n");
+}
+static DEVICE_ATTR(docked, S_IRUGO, show_docked, NULL);
+
+/*
+ * show_flags - read method for flags file in sysfs
+ */
+static ssize_t show_flags(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dock_station *dock_station = *((struct dock_station **)
+ dev->platform_data);
+ return snprintf(buf, PAGE_SIZE, "%d\n", dock_station->flags);
+
+}
+static DEVICE_ATTR(flags, S_IRUGO, show_flags, NULL);
+
+/*
+ * write_undock - write method for "undock" file in sysfs
+ */
+static ssize_t write_undock(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int ret;
+ struct dock_station *dock_station = *((struct dock_station **)
+ dev->platform_data);
+
+ if (!count)
+ return -EINVAL;
+
+ begin_undock(dock_station);
+ ret = handle_eject_request(dock_station, ACPI_NOTIFY_EJECT_REQUEST);
+ return ret ? ret: count;
+}
+static DEVICE_ATTR(undock, S_IWUSR, NULL, write_undock);
+
+/*
+ * show_dock_uid - read method for "uid" file in sysfs
+ */
+static ssize_t show_dock_uid(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long long lbuf;
+ struct dock_station *dock_station = *((struct dock_station **)
+ dev->platform_data);
+ acpi_status status = acpi_evaluate_integer(dock_station->handle,
+ "_UID", NULL, &lbuf);
+ if (ACPI_FAILURE(status))
+ return 0;
+
+ return snprintf(buf, PAGE_SIZE, "%llx\n", lbuf);
+}
+static DEVICE_ATTR(uid, S_IRUGO, show_dock_uid, NULL);
+
+static ssize_t show_dock_type(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dock_station *dock_station = *((struct dock_station **)
+ dev->platform_data);
+ char *type;
+
+ if (dock_station->flags & DOCK_IS_DOCK)
+ type = "dock_station";
+ else if (dock_station->flags & DOCK_IS_ATA)
+ type = "ata_bay";
+ else if (dock_station->flags & DOCK_IS_BAT)
+ type = "battery_bay";
+ else
+ type = "unknown";
+
+ return snprintf(buf, PAGE_SIZE, "%s\n", type);
+}
+static DEVICE_ATTR(type, S_IRUGO, show_dock_type, NULL);
+
+/**
+ * dock_add - add a new dock station
+ * @handle: the dock station handle
+ *
+ * allocated and initialize a new dock station device. Find all devices
+ * that are on the dock station, and register for dock event notifications.
+ */
+static int dock_add(acpi_handle handle)
+{
+ int ret;
+ struct dock_dependent_device *dd;
+ struct dock_station *dock_station;
+ struct platform_device *dock_device;
+
+ /* allocate & initialize the dock_station private data */
+ dock_station = kzalloc(sizeof(*dock_station), GFP_KERNEL);
+ if (!dock_station)
+ return -ENOMEM;
+ dock_station->handle = handle;
+ dock_station->last_dock_time = jiffies - HZ;
+ INIT_LIST_HEAD(&dock_station->dependent_devices);
+ INIT_LIST_HEAD(&dock_station->hotplug_devices);
+ INIT_LIST_HEAD(&dock_station->sibiling);
+ spin_lock_init(&dock_station->dd_lock);
+ mutex_init(&dock_station->hp_lock);
+ ATOMIC_INIT_NOTIFIER_HEAD(&dock_notifier_list);
+
+ /* initialize platform device stuff */
+ dock_station->dock_device =
+ platform_device_register_simple(dock_device_name,
+ dock_station_count, NULL, 0);
+ dock_device = dock_station->dock_device;
+ if (IS_ERR(dock_device)) {
+ kfree(dock_station);
+ dock_station = NULL;
+ return PTR_ERR(dock_device);
+ }
+ platform_device_add_data(dock_device, &dock_station,
+ sizeof(struct dock_station *));
+
+ /* we want the dock device to send uevents */
+ dock_device->dev.uevent_suppress = 0;
+
+ if (is_dock(handle))
+ dock_station->flags |= DOCK_IS_DOCK;
+ if (is_ata(handle))
+ dock_station->flags |= DOCK_IS_ATA;
+ if (is_battery(handle))
+ dock_station->flags |= DOCK_IS_BAT;
+
+ ret = device_create_file(&dock_device->dev, &dev_attr_docked);
+ if (ret) {
+ printk("Error %d adding sysfs file\n", ret);
+ platform_device_unregister(dock_device);
+ kfree(dock_station);
+ dock_station = NULL;
+ return ret;
+ }
+ ret = device_create_file(&dock_device->dev, &dev_attr_undock);
+ if (ret) {
+ printk("Error %d adding sysfs file\n", ret);
+ device_remove_file(&dock_device->dev, &dev_attr_docked);
+ platform_device_unregister(dock_device);
+ kfree(dock_station);
+ dock_station = NULL;
+ return ret;
+ }
+ ret = device_create_file(&dock_device->dev, &dev_attr_uid);
+ if (ret) {
+ printk("Error %d adding sysfs file\n", ret);
+ device_remove_file(&dock_device->dev, &dev_attr_docked);
+ device_remove_file(&dock_device->dev, &dev_attr_undock);
+ platform_device_unregister(dock_device);
+ kfree(dock_station);
+ dock_station = NULL;
+ return ret;
+ }
+ ret = device_create_file(&dock_device->dev, &dev_attr_flags);
+ if (ret) {
+ printk("Error %d adding sysfs file\n", ret);
+ device_remove_file(&dock_device->dev, &dev_attr_docked);
+ device_remove_file(&dock_device->dev, &dev_attr_undock);
+ device_remove_file(&dock_device->dev, &dev_attr_uid);
+ platform_device_unregister(dock_device);
+ kfree(dock_station);
+ dock_station = NULL;
+ return ret;
+ }
+ ret = device_create_file(&dock_device->dev, &dev_attr_type);
+ if (ret)
+ printk(KERN_ERR"Error %d adding sysfs file\n", ret);
+
+ /* Find dependent devices */
+ acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
+ ACPI_UINT32_MAX, find_dock_devices, dock_station,
+ NULL);
+
+ /* add the dock station as a device dependent on itself */
+ dd = alloc_dock_dependent_device(handle);
+ if (!dd) {
+ kfree(dock_station);
+ dock_station = NULL;
+ ret = -ENOMEM;
+ goto dock_add_err_unregister;
+ }
+ add_dock_dependent_device(dock_station, dd);
+
+ dock_station_count++;
+ list_add(&dock_station->sibiling, &dock_stations);
+ return 0;
+
+dock_add_err_unregister:
+ device_remove_file(&dock_device->dev, &dev_attr_type);
+ device_remove_file(&dock_device->dev, &dev_attr_docked);
+ device_remove_file(&dock_device->dev, &dev_attr_undock);
+ device_remove_file(&dock_device->dev, &dev_attr_uid);
+ device_remove_file(&dock_device->dev, &dev_attr_flags);
+ platform_device_unregister(dock_device);
+ kfree(dock_station);
+ dock_station = NULL;
+ return ret;
+}
+
+/**
+ * dock_remove - free up resources related to the dock station
+ */
+static int dock_remove(struct dock_station *dock_station)
+{
+ struct dock_dependent_device *dd, *tmp;
+ struct platform_device *dock_device = dock_station->dock_device;
+
+ if (!dock_station_count)
+ return 0;
+
+ /* remove dependent devices */
+ list_for_each_entry_safe(dd, tmp, &dock_station->dependent_devices,
+ list)
+ kfree(dd);
+
+ /* cleanup sysfs */
+ device_remove_file(&dock_device->dev, &dev_attr_type);
+ device_remove_file(&dock_device->dev, &dev_attr_docked);
+ device_remove_file(&dock_device->dev, &dev_attr_undock);
+ device_remove_file(&dock_device->dev, &dev_attr_uid);
+ device_remove_file(&dock_device->dev, &dev_attr_flags);
+ platform_device_unregister(dock_device);
+
+ /* free dock station memory */
+ kfree(dock_station);
+ dock_station = NULL;
+ return 0;
+}
+
+/**
+ * find_dock - look for a dock station
+ * @handle: acpi handle of a device
+ * @lvl: unused
+ * @context: counter of dock stations found
+ * @rv: unused
+ *
+ * This is called by acpi_walk_namespace to look for dock stations.
+ */
+static acpi_status
+find_dock(acpi_handle handle, u32 lvl, void *context, void **rv)
+{
+ acpi_status status = AE_OK;
+
+ if (is_dock(handle)) {
+ if (dock_add(handle) >= 0) {
+ status = AE_CTRL_TERMINATE;
+ }
+ }
+ return status;
+}
+
+static acpi_status
+find_bay(acpi_handle handle, u32 lvl, void *context, void **rv)
+{
+ /* If bay is a dock, it's already handled */
+ if (is_ejectable_bay(handle) && !is_dock(handle))
+ dock_add(handle);
+ return AE_OK;
+}
+
+static int __init dock_init(void)
+{
+ if (acpi_disabled)
+ return 0;
+
+ /* look for a dock station */
+ acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
+ ACPI_UINT32_MAX, find_dock, NULL, NULL);
+
+ /* look for bay */
+ acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
+ ACPI_UINT32_MAX, find_bay, NULL, NULL);
+ if (!dock_station_count) {
+ printk(KERN_INFO PREFIX "No dock devices found.\n");
+ return 0;
+ }
+
+ register_acpi_bus_notifier(&dock_acpi_notifier);
+ printk(KERN_INFO PREFIX "%s: %d docks/bays found\n",
+ ACPI_DOCK_DRIVER_DESCRIPTION, dock_station_count);
+ return 0;
+}
+
+static void __exit dock_exit(void)
+{
+ struct dock_station *dock_station;
+
+ unregister_acpi_bus_notifier(&dock_acpi_notifier);
+ list_for_each_entry(dock_station, &dock_stations, sibiling)
+ dock_remove(dock_station);
+}
+
+/*
+ * Must be called before drivers of devices in dock, otherwise we can't know
+ * which devices are in a dock
+ */
+subsys_initcall(dock_init);
+module_exit(dock_exit);
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
new file mode 100644
index 0000000..4aa9477
--- /dev/null
+++ b/drivers/acpi/ec.c
@@ -0,0 +1,1106 @@
+/*
+ * ec.c - ACPI Embedded Controller Driver (v2.1)
+ *
+ * Copyright (C) 2006-2008 Alexey Starikovskiy <astarikovskiy@suse.de>
+ * Copyright (C) 2006 Denis Sadykov <denis.m.sadykov@intel.com>
+ * Copyright (C) 2004 Luming Yu <luming.yu@intel.com>
+ * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
+ * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+
+/* Uncomment next line to get verbose printout */
+/* #define DEBUG */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <asm/io.h>
+#include <acpi/acpi_bus.h>
+#include <acpi/acpi_drivers.h>
+#include <acpi/actypes.h>
+
+#define ACPI_EC_CLASS "embedded_controller"
+#define ACPI_EC_DEVICE_NAME "Embedded Controller"
+#define ACPI_EC_FILE_INFO "info"
+
+#undef PREFIX
+#define PREFIX "ACPI: EC: "
+
+/* EC status register */
+#define ACPI_EC_FLAG_OBF 0x01 /* Output buffer full */
+#define ACPI_EC_FLAG_IBF 0x02 /* Input buffer full */
+#define ACPI_EC_FLAG_BURST 0x10 /* burst mode */
+#define ACPI_EC_FLAG_SCI 0x20 /* EC-SCI occurred */
+
+/* EC commands */
+enum ec_command {
+ ACPI_EC_COMMAND_READ = 0x80,
+ ACPI_EC_COMMAND_WRITE = 0x81,
+ ACPI_EC_BURST_ENABLE = 0x82,
+ ACPI_EC_BURST_DISABLE = 0x83,
+ ACPI_EC_COMMAND_QUERY = 0x84,
+};
+
+#define ACPI_EC_DELAY 500 /* Wait 500ms max. during EC ops */
+#define ACPI_EC_UDELAY_GLK 1000 /* Wait 1ms max. to get global lock */
+#define ACPI_EC_UDELAY 100 /* Wait 100us before polling EC again */
+
+#define ACPI_EC_STORM_THRESHOLD 8 /* number of false interrupts
+ per one transaction */
+
+enum {
+ EC_FLAGS_QUERY_PENDING, /* Query is pending */
+ EC_FLAGS_GPE_MODE, /* Expect GPE to be sent
+ * for status change */
+ EC_FLAGS_NO_GPE, /* Don't use GPE mode */
+ EC_FLAGS_GPE_STORM, /* GPE storm detected */
+ EC_FLAGS_HANDLERS_INSTALLED /* Handlers for GPE and
+ * OpReg are installed */
+};
+
+/* If we find an EC via the ECDT, we need to keep a ptr to its context */
+/* External interfaces use first EC only, so remember */
+typedef int (*acpi_ec_query_func) (void *data);
+
+struct acpi_ec_query_handler {
+ struct list_head node;
+ acpi_ec_query_func func;
+ acpi_handle handle;
+ void *data;
+ u8 query_bit;
+};
+
+struct transaction {
+ const u8 *wdata;
+ u8 *rdata;
+ unsigned short irq_count;
+ u8 command;
+ u8 wi;
+ u8 ri;
+ u8 wlen;
+ u8 rlen;
+ bool done;
+};
+
+static struct acpi_ec {
+ acpi_handle handle;
+ unsigned long gpe;
+ unsigned long command_addr;
+ unsigned long data_addr;
+ unsigned long global_lock;
+ unsigned long flags;
+ struct mutex lock;
+ wait_queue_head_t wait;
+ struct list_head list;
+ struct transaction *curr;
+ spinlock_t curr_lock;
+} *boot_ec, *first_ec;
+
+/*
+ * Some Asus system have exchanged ECDT data/command IO addresses.
+ */
+static int print_ecdt_error(const struct dmi_system_id *id)
+{
+ printk(KERN_NOTICE PREFIX "%s detected - "
+ "ECDT has exchanged control/data I/O address\n",
+ id->ident);
+ return 0;
+}
+
+static struct dmi_system_id __cpuinitdata ec_dmi_table[] = {
+ {
+ print_ecdt_error, "Asus L4R", {
+ DMI_MATCH(DMI_BIOS_VERSION, "1008.006"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "L4R"),
+ DMI_MATCH(DMI_BOARD_NAME, "L4R") }, NULL},
+ {
+ print_ecdt_error, "Asus M6R", {
+ DMI_MATCH(DMI_BIOS_VERSION, "0207"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "M6R"),
+ DMI_MATCH(DMI_BOARD_NAME, "M6R") }, NULL},
+ {},
+};
+
+/* --------------------------------------------------------------------------
+ Transaction Management
+ -------------------------------------------------------------------------- */
+
+static inline u8 acpi_ec_read_status(struct acpi_ec *ec)
+{
+ u8 x = inb(ec->command_addr);
+ pr_debug(PREFIX "---> status = 0x%2.2x\n", x);
+ return x;
+}
+
+static inline u8 acpi_ec_read_data(struct acpi_ec *ec)
+{
+ u8 x = inb(ec->data_addr);
+ pr_debug(PREFIX "---> data = 0x%2.2x\n", x);
+ return x;
+}
+
+static inline void acpi_ec_write_cmd(struct acpi_ec *ec, u8 command)
+{
+ pr_debug(PREFIX "<--- command = 0x%2.2x\n", command);
+ outb(command, ec->command_addr);
+}
+
+static inline void acpi_ec_write_data(struct acpi_ec *ec, u8 data)
+{
+ pr_debug(PREFIX "<--- data = 0x%2.2x\n", data);
+ outb(data, ec->data_addr);
+}
+
+static int ec_transaction_done(struct acpi_ec *ec)
+{
+ unsigned long flags;
+ int ret = 0;
+ spin_lock_irqsave(&ec->curr_lock, flags);
+ if (!ec->curr || ec->curr->done)
+ ret = 1;
+ spin_unlock_irqrestore(&ec->curr_lock, flags);
+ return ret;
+}
+
+static void start_transaction(struct acpi_ec *ec)
+{
+ ec->curr->irq_count = ec->curr->wi = ec->curr->ri = 0;
+ ec->curr->done = false;
+ acpi_ec_write_cmd(ec, ec->curr->command);
+}
+
+static void gpe_transaction(struct acpi_ec *ec, u8 status)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&ec->curr_lock, flags);
+ if (!ec->curr)
+ goto unlock;
+ if (ec->curr->wlen > ec->curr->wi) {
+ if ((status & ACPI_EC_FLAG_IBF) == 0)
+ acpi_ec_write_data(ec,
+ ec->curr->wdata[ec->curr->wi++]);
+ else
+ goto err;
+ } else if (ec->curr->rlen > ec->curr->ri) {
+ if ((status & ACPI_EC_FLAG_OBF) == 1) {
+ ec->curr->rdata[ec->curr->ri++] = acpi_ec_read_data(ec);
+ if (ec->curr->rlen == ec->curr->ri)
+ ec->curr->done = true;
+ } else
+ goto err;
+ } else if (ec->curr->wlen == ec->curr->wi &&
+ (status & ACPI_EC_FLAG_IBF) == 0)
+ ec->curr->done = true;
+ goto unlock;
+err:
+ /* false interrupt, state didn't change */
+ if (in_interrupt())
+ ++ec->curr->irq_count;
+unlock:
+ spin_unlock_irqrestore(&ec->curr_lock, flags);
+}
+
+static int acpi_ec_wait(struct acpi_ec *ec)
+{
+ if (wait_event_timeout(ec->wait, ec_transaction_done(ec),
+ msecs_to_jiffies(ACPI_EC_DELAY)))
+ return 0;
+ /* try restart command if we get any false interrupts */
+ if (ec->curr->irq_count &&
+ (acpi_ec_read_status(ec) & ACPI_EC_FLAG_IBF) == 0) {
+ pr_debug(PREFIX "controller reset, restart transaction\n");
+ start_transaction(ec);
+ if (wait_event_timeout(ec->wait, ec_transaction_done(ec),
+ msecs_to_jiffies(ACPI_EC_DELAY)))
+ return 0;
+ }
+ /* missing GPEs, switch back to poll mode */
+ if (printk_ratelimit())
+ pr_info(PREFIX "missing confirmations, "
+ "switch off interrupt mode.\n");
+ set_bit(EC_FLAGS_NO_GPE, &ec->flags);
+ clear_bit(EC_FLAGS_GPE_MODE, &ec->flags);
+ return 1;
+}
+
+static void acpi_ec_gpe_query(void *ec_cxt);
+
+static int ec_check_sci(struct acpi_ec *ec, u8 state)
+{
+ if (state & ACPI_EC_FLAG_SCI) {
+ if (!test_and_set_bit(EC_FLAGS_QUERY_PENDING, &ec->flags))
+ return acpi_os_execute(OSL_EC_BURST_HANDLER,
+ acpi_ec_gpe_query, ec);
+ }
+ return 0;
+}
+
+static int ec_poll(struct acpi_ec *ec)
+{
+ unsigned long delay = jiffies + msecs_to_jiffies(ACPI_EC_DELAY);
+ udelay(ACPI_EC_UDELAY);
+ while (time_before(jiffies, delay)) {
+ gpe_transaction(ec, acpi_ec_read_status(ec));
+ udelay(ACPI_EC_UDELAY);
+ if (ec_transaction_done(ec))
+ return 0;
+ }
+ return -ETIME;
+}
+
+static int acpi_ec_transaction_unlocked(struct acpi_ec *ec,
+ struct transaction *t,
+ int force_poll)
+{
+ unsigned long tmp;
+ int ret = 0;
+ pr_debug(PREFIX "transaction start\n");
+ /* disable GPE during transaction if storm is detected */
+ if (test_bit(EC_FLAGS_GPE_STORM, &ec->flags)) {
+ clear_bit(EC_FLAGS_GPE_MODE, &ec->flags);
+ acpi_disable_gpe(NULL, ec->gpe);
+ }
+ /* start transaction */
+ spin_lock_irqsave(&ec->curr_lock, tmp);
+ /* following two actions should be kept atomic */
+ ec->curr = t;
+ start_transaction(ec);
+ if (ec->curr->command == ACPI_EC_COMMAND_QUERY)
+ clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags);
+ spin_unlock_irqrestore(&ec->curr_lock, tmp);
+ /* if we selected poll mode or failed in GPE-mode do a poll loop */
+ if (force_poll ||
+ !test_bit(EC_FLAGS_GPE_MODE, &ec->flags) ||
+ acpi_ec_wait(ec))
+ ret = ec_poll(ec);
+ pr_debug(PREFIX "transaction end\n");
+ spin_lock_irqsave(&ec->curr_lock, tmp);
+ ec->curr = NULL;
+ spin_unlock_irqrestore(&ec->curr_lock, tmp);
+ if (test_bit(EC_FLAGS_GPE_STORM, &ec->flags)) {
+ /* check if we received SCI during transaction */
+ ec_check_sci(ec, acpi_ec_read_status(ec));
+ /* it is safe to enable GPE outside of transaction */
+ acpi_enable_gpe(NULL, ec->gpe);
+ } else if (test_bit(EC_FLAGS_GPE_MODE, &ec->flags) &&
+ t->irq_count > ACPI_EC_STORM_THRESHOLD) {
+ pr_info(PREFIX "GPE storm detected, "
+ "transactions will use polling mode\n");
+ set_bit(EC_FLAGS_GPE_STORM, &ec->flags);
+ }
+ return ret;
+}
+
+static int ec_check_ibf0(struct acpi_ec *ec)
+{
+ u8 status = acpi_ec_read_status(ec);
+ return (status & ACPI_EC_FLAG_IBF) == 0;
+}
+
+static int ec_wait_ibf0(struct acpi_ec *ec)
+{
+ unsigned long delay = jiffies + msecs_to_jiffies(ACPI_EC_DELAY);
+ /* interrupt wait manually if GPE mode is not active */
+ unsigned long timeout = test_bit(EC_FLAGS_GPE_MODE, &ec->flags) ?
+ msecs_to_jiffies(ACPI_EC_DELAY) : msecs_to_jiffies(1);
+ while (time_before(jiffies, delay))
+ if (wait_event_timeout(ec->wait, ec_check_ibf0(ec), timeout))
+ return 0;
+ return -ETIME;
+}
+
+static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t,
+ int force_poll)
+{
+ int status;
+ u32 glk;
+ if (!ec || (!t) || (t->wlen && !t->wdata) || (t->rlen && !t->rdata))
+ return -EINVAL;
+ if (t->rdata)
+ memset(t->rdata, 0, t->rlen);
+ mutex_lock(&ec->lock);
+ if (ec->global_lock) {
+ status = acpi_acquire_global_lock(ACPI_EC_UDELAY_GLK, &glk);
+ if (ACPI_FAILURE(status)) {
+ status = -ENODEV;
+ goto unlock;
+ }
+ }
+ if (ec_wait_ibf0(ec)) {
+ pr_err(PREFIX "input buffer is not empty, "
+ "aborting transaction\n");
+ status = -ETIME;
+ goto end;
+ }
+ status = acpi_ec_transaction_unlocked(ec, t, force_poll);
+end:
+ if (ec->global_lock)
+ acpi_release_global_lock(glk);
+unlock:
+ mutex_unlock(&ec->lock);
+ return status;
+}
+
+/*
+ * Note: samsung nv5000 doesn't work with ec burst mode.
+ * http://bugzilla.kernel.org/show_bug.cgi?id=4980
+ */
+int acpi_ec_burst_enable(struct acpi_ec *ec)
+{
+ u8 d;
+ struct transaction t = {.command = ACPI_EC_BURST_ENABLE,
+ .wdata = NULL, .rdata = &d,
+ .wlen = 0, .rlen = 1};
+
+ return acpi_ec_transaction(ec, &t, 0);
+}
+
+int acpi_ec_burst_disable(struct acpi_ec *ec)
+{
+ struct transaction t = {.command = ACPI_EC_BURST_DISABLE,
+ .wdata = NULL, .rdata = NULL,
+ .wlen = 0, .rlen = 0};
+
+ return (acpi_ec_read_status(ec) & ACPI_EC_FLAG_BURST) ?
+ acpi_ec_transaction(ec, &t, 0) : 0;
+}
+
+static int acpi_ec_read(struct acpi_ec *ec, u8 address, u8 * data)
+{
+ int result;
+ u8 d;
+ struct transaction t = {.command = ACPI_EC_COMMAND_READ,
+ .wdata = &address, .rdata = &d,
+ .wlen = 1, .rlen = 1};
+
+ result = acpi_ec_transaction(ec, &t, 0);
+ *data = d;
+ return result;
+}
+
+static int acpi_ec_write(struct acpi_ec *ec, u8 address, u8 data)
+{
+ u8 wdata[2] = { address, data };
+ struct transaction t = {.command = ACPI_EC_COMMAND_WRITE,
+ .wdata = wdata, .rdata = NULL,
+ .wlen = 2, .rlen = 0};
+
+ return acpi_ec_transaction(ec, &t, 0);
+}
+
+/*
+ * Externally callable EC access functions. For now, assume 1 EC only
+ */
+int ec_burst_enable(void)
+{
+ if (!first_ec)
+ return -ENODEV;
+ return acpi_ec_burst_enable(first_ec);
+}
+
+EXPORT_SYMBOL(ec_burst_enable);
+
+int ec_burst_disable(void)
+{
+ if (!first_ec)
+ return -ENODEV;
+ return acpi_ec_burst_disable(first_ec);
+}
+
+EXPORT_SYMBOL(ec_burst_disable);
+
+int ec_read(u8 addr, u8 * val)
+{
+ int err;
+ u8 temp_data;
+
+ if (!first_ec)
+ return -ENODEV;
+
+ err = acpi_ec_read(first_ec, addr, &temp_data);
+
+ if (!err) {
+ *val = temp_data;
+ return 0;
+ } else
+ return err;
+}
+
+EXPORT_SYMBOL(ec_read);
+
+int ec_write(u8 addr, u8 val)
+{
+ int err;
+
+ if (!first_ec)
+ return -ENODEV;
+
+ err = acpi_ec_write(first_ec, addr, val);
+
+ return err;
+}
+
+EXPORT_SYMBOL(ec_write);
+
+int ec_transaction(u8 command,
+ const u8 * wdata, unsigned wdata_len,
+ u8 * rdata, unsigned rdata_len,
+ int force_poll)
+{
+ struct transaction t = {.command = command,
+ .wdata = wdata, .rdata = rdata,
+ .wlen = wdata_len, .rlen = rdata_len};
+ if (!first_ec)
+ return -ENODEV;
+
+ return acpi_ec_transaction(first_ec, &t, force_poll);
+}
+
+EXPORT_SYMBOL(ec_transaction);
+
+static int acpi_ec_query(struct acpi_ec *ec, u8 * data)
+{
+ int result;
+ u8 d;
+ struct transaction t = {.command = ACPI_EC_COMMAND_QUERY,
+ .wdata = NULL, .rdata = &d,
+ .wlen = 0, .rlen = 1};
+ if (!ec || !data)
+ return -EINVAL;
+
+ /*
+ * Query the EC to find out which _Qxx method we need to evaluate.
+ * Note that successful completion of the query causes the ACPI_EC_SCI
+ * bit to be cleared (and thus clearing the interrupt source).
+ */
+
+ result = acpi_ec_transaction(ec, &t, 0);
+ if (result)
+ return result;
+
+ if (!d)
+ return -ENODATA;
+
+ *data = d;
+ return 0;
+}
+
+/* --------------------------------------------------------------------------
+ Event Management
+ -------------------------------------------------------------------------- */
+int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit,
+ acpi_handle handle, acpi_ec_query_func func,
+ void *data)
+{
+ struct acpi_ec_query_handler *handler =
+ kzalloc(sizeof(struct acpi_ec_query_handler), GFP_KERNEL);
+ if (!handler)
+ return -ENOMEM;
+
+ handler->query_bit = query_bit;
+ handler->handle = handle;
+ handler->func = func;
+ handler->data = data;
+ mutex_lock(&ec->lock);
+ list_add(&handler->node, &ec->list);
+ mutex_unlock(&ec->lock);
+ return 0;
+}
+
+EXPORT_SYMBOL_GPL(acpi_ec_add_query_handler);
+
+void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit)
+{
+ struct acpi_ec_query_handler *handler, *tmp;
+ mutex_lock(&ec->lock);
+ list_for_each_entry_safe(handler, tmp, &ec->list, node) {
+ if (query_bit == handler->query_bit) {
+ list_del(&handler->node);
+ kfree(handler);
+ }
+ }
+ mutex_unlock(&ec->lock);
+}
+
+EXPORT_SYMBOL_GPL(acpi_ec_remove_query_handler);
+
+static void acpi_ec_gpe_query(void *ec_cxt)
+{
+ struct acpi_ec *ec = ec_cxt;
+ u8 value = 0;
+ struct acpi_ec_query_handler *handler, copy;
+
+ if (!ec || acpi_ec_query(ec, &value))
+ return;
+ mutex_lock(&ec->lock);
+ list_for_each_entry(handler, &ec->list, node) {
+ if (value == handler->query_bit) {
+ /* have custom handler for this bit */
+ memcpy(&copy, handler, sizeof(copy));
+ mutex_unlock(&ec->lock);
+ if (copy.func) {
+ copy.func(copy.data);
+ } else if (copy.handle) {
+ acpi_evaluate_object(copy.handle, NULL, NULL, NULL);
+ }
+ return;
+ }
+ }
+ mutex_unlock(&ec->lock);
+}
+
+static u32 acpi_ec_gpe_handler(void *data)
+{
+ struct acpi_ec *ec = data;
+ u8 status;
+
+ pr_debug(PREFIX "~~~> interrupt\n");
+ status = acpi_ec_read_status(ec);
+
+ if (test_bit(EC_FLAGS_GPE_MODE, &ec->flags)) {
+ gpe_transaction(ec, status);
+ if (ec_transaction_done(ec) &&
+ (status & ACPI_EC_FLAG_IBF) == 0)
+ wake_up(&ec->wait);
+ }
+
+ ec_check_sci(ec, status);
+ if (!test_bit(EC_FLAGS_GPE_MODE, &ec->flags) &&
+ !test_bit(EC_FLAGS_NO_GPE, &ec->flags)) {
+ /* this is non-query, must be confirmation */
+ if (!test_bit(EC_FLAGS_GPE_STORM, &ec->flags)) {
+ if (printk_ratelimit())
+ pr_info(PREFIX "non-query interrupt received,"
+ " switching to interrupt mode\n");
+ } else {
+ /* hush, STORM switches the mode every transaction */
+ pr_debug(PREFIX "non-query interrupt received,"
+ " switching to interrupt mode\n");
+ }
+ set_bit(EC_FLAGS_GPE_MODE, &ec->flags);
+ }
+ return ACPI_INTERRUPT_HANDLED;
+}
+
+/* --------------------------------------------------------------------------
+ Address Space Management
+ -------------------------------------------------------------------------- */
+
+static acpi_status
+acpi_ec_space_handler(u32 function, acpi_physical_address address,
+ u32 bits, acpi_integer *value,
+ void *handler_context, void *region_context)
+{
+ struct acpi_ec *ec = handler_context;
+ int result = 0, i;
+ u8 temp = 0;
+
+ if ((address > 0xFF) || !value || !handler_context)
+ return AE_BAD_PARAMETER;
+
+ if (function != ACPI_READ && function != ACPI_WRITE)
+ return AE_BAD_PARAMETER;
+
+ if (bits != 8 && acpi_strict)
+ return AE_BAD_PARAMETER;
+
+ acpi_ec_burst_enable(ec);
+
+ if (function == ACPI_READ) {
+ result = acpi_ec_read(ec, address, &temp);
+ *value = temp;
+ } else {
+ temp = 0xff & (*value);
+ result = acpi_ec_write(ec, address, temp);
+ }
+
+ for (i = 8; unlikely(bits - i > 0); i += 8) {
+ ++address;
+ if (function == ACPI_READ) {
+ result = acpi_ec_read(ec, address, &temp);
+ (*value) |= ((acpi_integer)temp) << i;
+ } else {
+ temp = 0xff & ((*value) >> i);
+ result = acpi_ec_write(ec, address, temp);
+ }
+ }
+
+ acpi_ec_burst_disable(ec);
+
+ switch (result) {
+ case -EINVAL:
+ return AE_BAD_PARAMETER;
+ break;
+ case -ENODEV:
+ return AE_NOT_FOUND;
+ break;
+ case -ETIME:
+ return AE_TIME;
+ break;
+ default:
+ return AE_OK;
+ }
+}
+
+/* --------------------------------------------------------------------------
+ FS Interface (/proc)
+ -------------------------------------------------------------------------- */
+
+static struct proc_dir_entry *acpi_ec_dir;
+
+static int acpi_ec_read_info(struct seq_file *seq, void *offset)
+{
+ struct acpi_ec *ec = seq->private;
+
+ if (!ec)
+ goto end;
+
+ seq_printf(seq, "gpe:\t\t\t0x%02x\n", (u32) ec->gpe);
+ seq_printf(seq, "ports:\t\t\t0x%02x, 0x%02x\n",
+ (unsigned)ec->command_addr, (unsigned)ec->data_addr);
+ seq_printf(seq, "use global lock:\t%s\n",
+ ec->global_lock ? "yes" : "no");
+ end:
+ return 0;
+}
+
+static int acpi_ec_info_open_fs(struct inode *inode, struct file *file)
+{
+ return single_open(file, acpi_ec_read_info, PDE(inode)->data);
+}
+
+static struct file_operations acpi_ec_info_ops = {
+ .open = acpi_ec_info_open_fs,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static int acpi_ec_add_fs(struct acpi_device *device)
+{
+ struct proc_dir_entry *entry = NULL;
+
+ if (!acpi_device_dir(device)) {
+ acpi_device_dir(device) = proc_mkdir(acpi_device_bid(device),
+ acpi_ec_dir);
+ if (!acpi_device_dir(device))
+ return -ENODEV;
+ }
+
+ entry = proc_create_data(ACPI_EC_FILE_INFO, S_IRUGO,
+ acpi_device_dir(device),
+ &acpi_ec_info_ops, acpi_driver_data(device));
+ if (!entry)
+ return -ENODEV;
+ return 0;
+}
+
+static int acpi_ec_remove_fs(struct acpi_device *device)
+{
+
+ if (acpi_device_dir(device)) {
+ remove_proc_entry(ACPI_EC_FILE_INFO, acpi_device_dir(device));
+ remove_proc_entry(acpi_device_bid(device), acpi_ec_dir);
+ acpi_device_dir(device) = NULL;
+ }
+
+ return 0;
+}
+
+/* --------------------------------------------------------------------------
+ Driver Interface
+ -------------------------------------------------------------------------- */
+static acpi_status
+ec_parse_io_ports(struct acpi_resource *resource, void *context);
+
+static struct acpi_ec *make_acpi_ec(void)
+{
+ struct acpi_ec *ec = kzalloc(sizeof(struct acpi_ec), GFP_KERNEL);
+ if (!ec)
+ return NULL;
+ ec->flags = 1 << EC_FLAGS_QUERY_PENDING;
+ mutex_init(&ec->lock);
+ init_waitqueue_head(&ec->wait);
+ INIT_LIST_HEAD(&ec->list);
+ spin_lock_init(&ec->curr_lock);
+ return ec;
+}
+
+static acpi_status
+acpi_ec_register_query_methods(acpi_handle handle, u32 level,
+ void *context, void **return_value)
+{
+ struct acpi_namespace_node *node = handle;
+ struct acpi_ec *ec = context;
+ int value = 0;
+
+ if (sscanf(node->name.ascii, "_Q%2x", &value) == 1)
+ acpi_ec_add_query_handler(ec, value, handle, NULL, NULL);
+
+ return AE_OK;
+}
+
+static acpi_status
+ec_parse_device(acpi_handle handle, u32 Level, void *context, void **retval)
+{
+ acpi_status status;
+ unsigned long long tmp = 0;
+
+ struct acpi_ec *ec = context;
+ status = acpi_walk_resources(handle, METHOD_NAME__CRS,
+ ec_parse_io_ports, ec);
+ if (ACPI_FAILURE(status))
+ return status;
+
+ /* Get GPE bit assignment (EC events). */
+ /* TODO: Add support for _GPE returning a package */
+ status = acpi_evaluate_integer(handle, "_GPE", NULL, &tmp);
+ if (ACPI_FAILURE(status))
+ return status;
+ ec->gpe = tmp;
+ /* Use the global lock for all EC transactions? */
+ tmp = 0;
+ acpi_evaluate_integer(handle, "_GLK", NULL, &tmp);
+ ec->global_lock = tmp;
+ ec->handle = handle;
+ return AE_CTRL_TERMINATE;
+}
+
+static void ec_remove_handlers(struct acpi_ec *ec)
+{
+ if (ACPI_FAILURE(acpi_remove_address_space_handler(ec->handle,
+ ACPI_ADR_SPACE_EC, &acpi_ec_space_handler)))
+ pr_err(PREFIX "failed to remove space handler\n");
+ if (ACPI_FAILURE(acpi_remove_gpe_handler(NULL, ec->gpe,
+ &acpi_ec_gpe_handler)))
+ pr_err(PREFIX "failed to remove gpe handler\n");
+ clear_bit(EC_FLAGS_HANDLERS_INSTALLED, &ec->flags);
+}
+
+static int acpi_ec_add(struct acpi_device *device)
+{
+ struct acpi_ec *ec = NULL;
+
+ if (!device)
+ return -EINVAL;
+ strcpy(acpi_device_name(device), ACPI_EC_DEVICE_NAME);
+ strcpy(acpi_device_class(device), ACPI_EC_CLASS);
+
+ /* Check for boot EC */
+ if (boot_ec &&
+ (boot_ec->handle == device->handle ||
+ boot_ec->handle == ACPI_ROOT_OBJECT)) {
+ ec = boot_ec;
+ boot_ec = NULL;
+ } else {
+ ec = make_acpi_ec();
+ if (!ec)
+ return -ENOMEM;
+ if (ec_parse_device(device->handle, 0, ec, NULL) !=
+ AE_CTRL_TERMINATE) {
+ kfree(ec);
+ return -EINVAL;
+ }
+ }
+
+ ec->handle = device->handle;
+
+ /* Find and register all query methods */
+ acpi_walk_namespace(ACPI_TYPE_METHOD, ec->handle, 1,
+ acpi_ec_register_query_methods, ec, NULL);
+
+ if (!first_ec)
+ first_ec = ec;
+ device->driver_data = ec;
+ acpi_ec_add_fs(device);
+ pr_info(PREFIX "GPE = 0x%lx, I/O: command/status = 0x%lx, data = 0x%lx\n",
+ ec->gpe, ec->command_addr, ec->data_addr);
+ pr_info(PREFIX "driver started in %s mode\n",
+ (test_bit(EC_FLAGS_GPE_MODE, &ec->flags))?"interrupt":"poll");
+ return 0;
+}
+
+static int acpi_ec_remove(struct acpi_device *device, int type)
+{
+ struct acpi_ec *ec;
+ struct acpi_ec_query_handler *handler, *tmp;
+
+ if (!device)
+ return -EINVAL;
+
+ ec = acpi_driver_data(device);
+ mutex_lock(&ec->lock);
+ list_for_each_entry_safe(handler, tmp, &ec->list, node) {
+ list_del(&handler->node);
+ kfree(handler);
+ }
+ mutex_unlock(&ec->lock);
+ acpi_ec_remove_fs(device);
+ device->driver_data = NULL;
+ if (ec == first_ec)
+ first_ec = NULL;
+ kfree(ec);
+ return 0;
+}
+
+static acpi_status
+ec_parse_io_ports(struct acpi_resource *resource, void *context)
+{
+ struct acpi_ec *ec = context;
+
+ if (resource->type != ACPI_RESOURCE_TYPE_IO)
+ return AE_OK;
+
+ /*
+ * The first address region returned is the data port, and
+ * the second address region returned is the status/command
+ * port.
+ */
+ if (ec->data_addr == 0)
+ ec->data_addr = resource->data.io.minimum;
+ else if (ec->command_addr == 0)
+ ec->command_addr = resource->data.io.minimum;
+ else
+ return AE_CTRL_TERMINATE;
+
+ return AE_OK;
+}
+
+static int ec_install_handlers(struct acpi_ec *ec)
+{
+ acpi_status status;
+ if (test_bit(EC_FLAGS_HANDLERS_INSTALLED, &ec->flags))
+ return 0;
+ status = acpi_install_gpe_handler(NULL, ec->gpe,
+ ACPI_GPE_EDGE_TRIGGERED,
+ &acpi_ec_gpe_handler, ec);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+ acpi_set_gpe_type(NULL, ec->gpe, ACPI_GPE_TYPE_RUNTIME);
+ acpi_enable_gpe(NULL, ec->gpe);
+ status = acpi_install_address_space_handler(ec->handle,
+ ACPI_ADR_SPACE_EC,
+ &acpi_ec_space_handler,
+ NULL, ec);
+ if (ACPI_FAILURE(status)) {
+ if (status == AE_NOT_FOUND) {
+ /*
+ * Maybe OS fails in evaluating the _REG object.
+ * The AE_NOT_FOUND error will be ignored and OS
+ * continue to initialize EC.
+ */
+ printk(KERN_ERR "Fail in evaluating the _REG object"
+ " of EC device. Broken bios is suspected.\n");
+ } else {
+ acpi_remove_gpe_handler(NULL, ec->gpe,
+ &acpi_ec_gpe_handler);
+ return -ENODEV;
+ }
+ }
+
+ set_bit(EC_FLAGS_HANDLERS_INSTALLED, &ec->flags);
+ return 0;
+}
+
+static int acpi_ec_start(struct acpi_device *device)
+{
+ struct acpi_ec *ec;
+ int ret = 0;
+
+ if (!device)
+ return -EINVAL;
+
+ ec = acpi_driver_data(device);
+
+ if (!ec)
+ return -EINVAL;
+
+ ret = ec_install_handlers(ec);
+
+ /* EC is fully operational, allow queries */
+ clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags);
+ return ret;
+}
+
+static int acpi_ec_stop(struct acpi_device *device, int type)
+{
+ struct acpi_ec *ec;
+ if (!device)
+ return -EINVAL;
+ ec = acpi_driver_data(device);
+ if (!ec)
+ return -EINVAL;
+ ec_remove_handlers(ec);
+
+ return 0;
+}
+
+int __init acpi_boot_ec_enable(void)
+{
+ if (!boot_ec || test_bit(EC_FLAGS_HANDLERS_INSTALLED, &boot_ec->flags))
+ return 0;
+ if (!ec_install_handlers(boot_ec)) {
+ first_ec = boot_ec;
+ return 0;
+ }
+ return -EFAULT;
+}
+
+static const struct acpi_device_id ec_device_ids[] = {
+ {"PNP0C09", 0},
+ {"", 0},
+};
+
+int __init acpi_ec_ecdt_probe(void)
+{
+ int ret;
+ acpi_status status;
+ struct acpi_table_ecdt *ecdt_ptr;
+
+ boot_ec = make_acpi_ec();
+ if (!boot_ec)
+ return -ENOMEM;
+ /*
+ * Generate a boot ec context
+ */
+ status = acpi_get_table(ACPI_SIG_ECDT, 1,
+ (struct acpi_table_header **)&ecdt_ptr);
+ if (ACPI_SUCCESS(status)) {
+ pr_info(PREFIX "EC description table is found, configuring boot EC\n");
+ boot_ec->command_addr = ecdt_ptr->control.address;
+ boot_ec->data_addr = ecdt_ptr->data.address;
+ if (dmi_check_system(ec_dmi_table)) {
+ /*
+ * If the board falls into ec_dmi_table, it means
+ * that ECDT table gives the incorrect command/status
+ * & data I/O address. Just fix it.
+ */
+ boot_ec->data_addr = ecdt_ptr->control.address;
+ boot_ec->command_addr = ecdt_ptr->data.address;
+ }
+ boot_ec->gpe = ecdt_ptr->gpe;
+ boot_ec->handle = ACPI_ROOT_OBJECT;
+ acpi_get_handle(ACPI_ROOT_OBJECT, ecdt_ptr->id, &boot_ec->handle);
+ } else {
+ /* This workaround is needed only on some broken machines,
+ * which require early EC, but fail to provide ECDT */
+ acpi_handle x;
+ printk(KERN_DEBUG PREFIX "Look up EC in DSDT\n");
+ status = acpi_get_devices(ec_device_ids[0].id, ec_parse_device,
+ boot_ec, NULL);
+ /* Check that acpi_get_devices actually find something */
+ if (ACPI_FAILURE(status) || !boot_ec->handle)
+ goto error;
+ /* We really need to limit this workaround, the only ASUS,
+ * which needs it, has fake EC._INI method, so use it as flag.
+ * Keep boot_ec struct as it will be needed soon.
+ */
+ if (ACPI_FAILURE(acpi_get_handle(boot_ec->handle, "_INI", &x)))
+ return -ENODEV;
+ }
+
+ ret = ec_install_handlers(boot_ec);
+ if (!ret) {
+ first_ec = boot_ec;
+ return 0;
+ }
+ error:
+ kfree(boot_ec);
+ boot_ec = NULL;
+ return -ENODEV;
+}
+
+static int acpi_ec_suspend(struct acpi_device *device, pm_message_t state)
+{
+ struct acpi_ec *ec = acpi_driver_data(device);
+ /* Stop using GPE */
+ set_bit(EC_FLAGS_NO_GPE, &ec->flags);
+ clear_bit(EC_FLAGS_GPE_MODE, &ec->flags);
+ acpi_disable_gpe(NULL, ec->gpe);
+ return 0;
+}
+
+static int acpi_ec_resume(struct acpi_device *device)
+{
+ struct acpi_ec *ec = acpi_driver_data(device);
+ /* Enable use of GPE back */
+ clear_bit(EC_FLAGS_NO_GPE, &ec->flags);
+ acpi_enable_gpe(NULL, ec->gpe);
+ return 0;
+}
+
+static struct acpi_driver acpi_ec_driver = {
+ .name = "ec",
+ .class = ACPI_EC_CLASS,
+ .ids = ec_device_ids,
+ .ops = {
+ .add = acpi_ec_add,
+ .remove = acpi_ec_remove,
+ .start = acpi_ec_start,
+ .stop = acpi_ec_stop,
+ .suspend = acpi_ec_suspend,
+ .resume = acpi_ec_resume,
+ },
+};
+
+static int __init acpi_ec_init(void)
+{
+ int result = 0;
+
+ if (acpi_disabled)
+ return 0;
+
+ acpi_ec_dir = proc_mkdir(ACPI_EC_CLASS, acpi_root_dir);
+ if (!acpi_ec_dir)
+ return -ENODEV;
+
+ /* Now register the driver for the EC */
+ result = acpi_bus_register_driver(&acpi_ec_driver);
+ if (result < 0) {
+ remove_proc_entry(ACPI_EC_CLASS, acpi_root_dir);
+ return -ENODEV;
+ }
+
+ return result;
+}
+
+subsys_initcall(acpi_ec_init);
+
+/* EC driver currently not unloadable */
+#if 0
+static void __exit acpi_ec_exit(void)
+{
+
+ acpi_bus_unregister_driver(&acpi_ec_driver);
+
+ remove_proc_entry(ACPI_EC_CLASS, acpi_root_dir);
+
+ return;
+}
+#endif /* 0 */
diff --git a/drivers/acpi/event.c b/drivers/acpi/event.c
new file mode 100644
index 0000000..0c24bd4
--- /dev/null
+++ b/drivers/acpi/event.c
@@ -0,0 +1,307 @@
+/*
+ * event.c - exporting ACPI events via procfs
+ *
+ * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
+ * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
+ *
+ */
+
+#include <linux/spinlock.h>
+#include <linux/proc_fs.h>
+#include <linux/init.h>
+#include <linux/poll.h>
+#include <acpi/acpi_drivers.h>
+#include <net/netlink.h>
+#include <net/genetlink.h>
+
+#define _COMPONENT ACPI_SYSTEM_COMPONENT
+ACPI_MODULE_NAME("event");
+
+#ifdef CONFIG_ACPI_PROC_EVENT
+/* Global vars for handling event proc entry */
+static DEFINE_SPINLOCK(acpi_system_event_lock);
+int event_is_open = 0;
+extern struct list_head acpi_bus_event_list;
+extern wait_queue_head_t acpi_bus_event_queue;
+
+static int acpi_system_open_event(struct inode *inode, struct file *file)
+{
+ spin_lock_irq(&acpi_system_event_lock);
+
+ if (event_is_open)
+ goto out_busy;
+
+ event_is_open = 1;
+
+ spin_unlock_irq(&acpi_system_event_lock);
+ return 0;
+
+ out_busy:
+ spin_unlock_irq(&acpi_system_event_lock);
+ return -EBUSY;
+}
+
+static ssize_t
+acpi_system_read_event(struct file *file, char __user * buffer, size_t count,
+ loff_t * ppos)
+{
+ int result = 0;
+ struct acpi_bus_event event;
+ static char str[ACPI_MAX_STRING];
+ static int chars_remaining = 0;
+ static char *ptr;
+
+ if (!chars_remaining) {
+ memset(&event, 0, sizeof(struct acpi_bus_event));
+
+ if ((file->f_flags & O_NONBLOCK)
+ && (list_empty(&acpi_bus_event_list)))
+ return -EAGAIN;
+
+ result = acpi_bus_receive_event(&event);
+ if (result)
+ return result;
+
+ chars_remaining = sprintf(str, "%s %s %08x %08x\n",
+ event.device_class ? event.
+ device_class : "<unknown>",
+ event.bus_id ? event.
+ bus_id : "<unknown>", event.type,
+ event.data);
+ ptr = str;
+ }
+
+ if (chars_remaining < count) {
+ count = chars_remaining;
+ }
+
+ if (copy_to_user(buffer, ptr, count))
+ return -EFAULT;
+
+ *ppos += count;
+ chars_remaining -= count;
+ ptr += count;
+
+ return count;
+}
+
+static int acpi_system_close_event(struct inode *inode, struct file *file)
+{
+ spin_lock_irq(&acpi_system_event_lock);
+ event_is_open = 0;
+ spin_unlock_irq(&acpi_system_event_lock);
+ return 0;
+}
+
+static unsigned int acpi_system_poll_event(struct file *file, poll_table * wait)
+{
+ poll_wait(file, &acpi_bus_event_queue, wait);
+ if (!list_empty(&acpi_bus_event_list))
+ return POLLIN | POLLRDNORM;
+ return 0;
+}
+
+static const struct file_operations acpi_system_event_ops = {
+ .owner = THIS_MODULE,
+ .open = acpi_system_open_event,
+ .read = acpi_system_read_event,
+ .release = acpi_system_close_event,
+ .poll = acpi_system_poll_event,
+};
+#endif /* CONFIG_ACPI_PROC_EVENT */
+
+/* ACPI notifier chain */
+static BLOCKING_NOTIFIER_HEAD(acpi_chain_head);
+
+int acpi_notifier_call_chain(struct acpi_device *dev, u32 type, u32 data)
+{
+ struct acpi_bus_event event;
+
+ strcpy(event.device_class, dev->pnp.device_class);
+ strcpy(event.bus_id, dev->pnp.bus_id);
+ event.type = type;
+ event.data = data;
+ return (blocking_notifier_call_chain(&acpi_chain_head, 0, (void *)&event)
+ == NOTIFY_BAD) ? -EINVAL : 0;
+}
+EXPORT_SYMBOL(acpi_notifier_call_chain);
+
+int register_acpi_notifier(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_register(&acpi_chain_head, nb);
+}
+EXPORT_SYMBOL(register_acpi_notifier);
+
+int unregister_acpi_notifier(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_unregister(&acpi_chain_head, nb);
+}
+EXPORT_SYMBOL(unregister_acpi_notifier);
+
+#ifdef CONFIG_NET
+static unsigned int acpi_event_seqnum;
+struct acpi_genl_event {
+ acpi_device_class device_class;
+ char bus_id[15];
+ u32 type;
+ u32 data;
+};
+
+/* attributes of acpi_genl_family */
+enum {
+ ACPI_GENL_ATTR_UNSPEC,
+ ACPI_GENL_ATTR_EVENT, /* ACPI event info needed by user space */
+ __ACPI_GENL_ATTR_MAX,
+};
+#define ACPI_GENL_ATTR_MAX (__ACPI_GENL_ATTR_MAX - 1)
+
+/* commands supported by the acpi_genl_family */
+enum {
+ ACPI_GENL_CMD_UNSPEC,
+ ACPI_GENL_CMD_EVENT, /* kernel->user notifications for ACPI events */
+ __ACPI_GENL_CMD_MAX,
+};
+#define ACPI_GENL_CMD_MAX (__ACPI_GENL_CMD_MAX - 1)
+
+#define ACPI_GENL_FAMILY_NAME "acpi_event"
+#define ACPI_GENL_VERSION 0x01
+#define ACPI_GENL_MCAST_GROUP_NAME "acpi_mc_group"
+
+static struct genl_family acpi_event_genl_family = {
+ .id = GENL_ID_GENERATE,
+ .name = ACPI_GENL_FAMILY_NAME,
+ .version = ACPI_GENL_VERSION,
+ .maxattr = ACPI_GENL_ATTR_MAX,
+};
+
+static struct genl_multicast_group acpi_event_mcgrp = {
+ .name = ACPI_GENL_MCAST_GROUP_NAME,
+};
+
+int acpi_bus_generate_netlink_event(const char *device_class,
+ const char *bus_id,
+ u8 type, int data)
+{
+ struct sk_buff *skb;
+ struct nlattr *attr;
+ struct acpi_genl_event *event;
+ void *msg_header;
+ int size;
+ int result;
+
+ /* allocate memory */
+ size = nla_total_size(sizeof(struct acpi_genl_event)) +
+ nla_total_size(0);
+
+ skb = genlmsg_new(size, GFP_ATOMIC);
+ if (!skb)
+ return -ENOMEM;
+
+ /* add the genetlink message header */
+ msg_header = genlmsg_put(skb, 0, acpi_event_seqnum++,
+ &acpi_event_genl_family, 0,
+ ACPI_GENL_CMD_EVENT);
+ if (!msg_header) {
+ nlmsg_free(skb);
+ return -ENOMEM;
+ }
+
+ /* fill the data */
+ attr =
+ nla_reserve(skb, ACPI_GENL_ATTR_EVENT,
+ sizeof(struct acpi_genl_event));
+ if (!attr) {
+ nlmsg_free(skb);
+ return -EINVAL;
+ }
+
+ event = nla_data(attr);
+ if (!event) {
+ nlmsg_free(skb);
+ return -EINVAL;
+ }
+
+ memset(event, 0, sizeof(struct acpi_genl_event));
+
+ strcpy(event->device_class, device_class);
+ strcpy(event->bus_id, bus_id);
+ event->type = type;
+ event->data = data;
+
+ /* send multicast genetlink message */
+ result = genlmsg_end(skb, msg_header);
+ if (result < 0) {
+ nlmsg_free(skb);
+ return result;
+ }
+
+ result =
+ genlmsg_multicast(skb, 0, acpi_event_mcgrp.id, GFP_ATOMIC);
+ if (result)
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Failed to send a Genetlink message!\n"));
+ return 0;
+}
+
+EXPORT_SYMBOL(acpi_bus_generate_netlink_event);
+
+static int acpi_event_genetlink_init(void)
+{
+ int result;
+
+ result = genl_register_family(&acpi_event_genl_family);
+ if (result)
+ return result;
+
+ result = genl_register_mc_group(&acpi_event_genl_family,
+ &acpi_event_mcgrp);
+ if (result)
+ genl_unregister_family(&acpi_event_genl_family);
+
+ return result;
+}
+
+#else
+int acpi_bus_generate_netlink_event(const char *device_class,
+ const char *bus_id,
+ u8 type, int data)
+{
+ return 0;
+}
+
+EXPORT_SYMBOL(acpi_bus_generate_netlink_event);
+
+static int acpi_event_genetlink_init(void)
+{
+ return -ENODEV;
+}
+#endif
+
+static int __init acpi_event_init(void)
+{
+#ifdef CONFIG_ACPI_PROC_EVENT
+ struct proc_dir_entry *entry;
+#endif
+ int error = 0;
+
+ if (acpi_disabled)
+ return 0;
+
+ /* create genetlink for acpi event */
+ error = acpi_event_genetlink_init();
+ if (error)
+ printk(KERN_WARNING PREFIX
+ "Failed to create genetlink family for ACPI event\n");
+
+#ifdef CONFIG_ACPI_PROC_EVENT
+ /* 'event' [R] */
+ entry = proc_create("event", S_IRUSR, acpi_root_dir,
+ &acpi_system_event_ops);
+ if (!entry)
+ return -ENODEV;
+#endif
+
+ return 0;
+}
+
+fs_initcall(acpi_event_init);
diff --git a/drivers/acpi/events/Makefile b/drivers/acpi/events/Makefile
new file mode 100644
index 0000000..d29f2ee
--- /dev/null
+++ b/drivers/acpi/events/Makefile
@@ -0,0 +1,9 @@
+#
+# Makefile for all Linux ACPI interpreter subdirectories
+#
+
+obj-y := evevent.o evregion.o evsci.o evxfevnt.o \
+ evmisc.o evrgnini.o evxface.o evxfregn.o \
+ evgpe.o evgpeblk.o
+
+EXTRA_CFLAGS += $(ACPI_CFLAGS)
diff --git a/drivers/acpi/events/evevent.c b/drivers/acpi/events/evevent.c
new file mode 100644
index 0000000..c56c5c6
--- /dev/null
+++ b/drivers/acpi/events/evevent.c
@@ -0,0 +1,312 @@
+/******************************************************************************
+ *
+ * Module Name: evevent - Fixed Event handling and dispatch
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include <acpi/acevents.h>
+
+#define _COMPONENT ACPI_EVENTS
+ACPI_MODULE_NAME("evevent")
+
+/* Local prototypes */
+static acpi_status acpi_ev_fixed_event_initialize(void);
+
+static u32 acpi_ev_fixed_event_dispatch(u32 event);
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_initialize_events
+ *
+ * PARAMETERS: None
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Initialize global data structures for ACPI events (Fixed, GPE)
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ev_initialize_events(void)
+{
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(ev_initialize_events);
+
+ /*
+ * Initialize the Fixed and General Purpose Events. This is done prior to
+ * enabling SCIs to prevent interrupts from occurring before the handlers are
+ * installed.
+ */
+ status = acpi_ev_fixed_event_initialize();
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status,
+ "Unable to initialize fixed events"));
+ return_ACPI_STATUS(status);
+ }
+
+ status = acpi_ev_gpe_initialize();
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status,
+ "Unable to initialize general purpose events"));
+ return_ACPI_STATUS(status);
+ }
+
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_install_fadt_gpes
+ *
+ * PARAMETERS: None
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Completes initialization of the FADT-defined GPE blocks
+ * (0 and 1). This causes the _PRW methods to be run, so the HW
+ * must be fully initialized at this point, including global lock
+ * support.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ev_install_fadt_gpes(void)
+{
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(ev_install_fadt_gpes);
+
+ /* Namespace must be locked */
+
+ status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+
+ /* FADT GPE Block 0 */
+
+ (void)acpi_ev_initialize_gpe_block(acpi_gbl_fadt_gpe_device,
+ acpi_gbl_gpe_fadt_blocks[0]);
+
+ /* FADT GPE Block 1 */
+
+ (void)acpi_ev_initialize_gpe_block(acpi_gbl_fadt_gpe_device,
+ acpi_gbl_gpe_fadt_blocks[1]);
+
+ (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+ return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_install_xrupt_handlers
+ *
+ * PARAMETERS: None
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Install interrupt handlers for the SCI and Global Lock
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ev_install_xrupt_handlers(void)
+{
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(ev_install_xrupt_handlers);
+
+ /* Install the SCI handler */
+
+ status = acpi_ev_install_sci_handler();
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status,
+ "Unable to install System Control Interrupt handler"));
+ return_ACPI_STATUS(status);
+ }
+
+ /* Install the handler for the Global Lock */
+
+ status = acpi_ev_init_global_lock_handler();
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status,
+ "Unable to initialize Global Lock handler"));
+ return_ACPI_STATUS(status);
+ }
+
+ acpi_gbl_events_initialized = TRUE;
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_fixed_event_initialize
+ *
+ * PARAMETERS: None
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Install the fixed event handlers and enable the fixed events.
+ *
+ ******************************************************************************/
+
+static acpi_status acpi_ev_fixed_event_initialize(void)
+{
+ u32 i;
+ acpi_status status;
+
+ /*
+ * Initialize the structure that keeps track of fixed event handlers
+ * and enable the fixed events.
+ */
+ for (i = 0; i < ACPI_NUM_FIXED_EVENTS; i++) {
+ acpi_gbl_fixed_event_handlers[i].handler = NULL;
+ acpi_gbl_fixed_event_handlers[i].context = NULL;
+
+ /* Enable the fixed event */
+
+ if (acpi_gbl_fixed_event_info[i].enable_register_id != 0xFF) {
+ status =
+ acpi_set_register(acpi_gbl_fixed_event_info[i].
+ enable_register_id, 0);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+ }
+ }
+
+ return (AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_fixed_event_detect
+ *
+ * PARAMETERS: None
+ *
+ * RETURN: INTERRUPT_HANDLED or INTERRUPT_NOT_HANDLED
+ *
+ * DESCRIPTION: Checks the PM status register for active fixed events
+ *
+ ******************************************************************************/
+
+u32 acpi_ev_fixed_event_detect(void)
+{
+ u32 int_status = ACPI_INTERRUPT_NOT_HANDLED;
+ u32 fixed_status;
+ u32 fixed_enable;
+ u32 i;
+
+ ACPI_FUNCTION_NAME(ev_fixed_event_detect);
+
+ /*
+ * Read the fixed feature status and enable registers, as all the cases
+ * depend on their values. Ignore errors here.
+ */
+ (void)acpi_hw_register_read(ACPI_REGISTER_PM1_STATUS, &fixed_status);
+ (void)acpi_hw_register_read(ACPI_REGISTER_PM1_ENABLE, &fixed_enable);
+
+ ACPI_DEBUG_PRINT((ACPI_DB_INTERRUPTS,
+ "Fixed Event Block: Enable %08X Status %08X\n",
+ fixed_enable, fixed_status));
+
+ /*
+ * Check for all possible Fixed Events and dispatch those that are active
+ */
+ for (i = 0; i < ACPI_NUM_FIXED_EVENTS; i++) {
+
+ /* Both the status and enable bits must be on for this event */
+
+ if ((fixed_status & acpi_gbl_fixed_event_info[i].
+ status_bit_mask)
+ && (fixed_enable & acpi_gbl_fixed_event_info[i].
+ enable_bit_mask)) {
+
+ /* Found an active (signalled) event */
+ acpi_os_fixed_event_count(i);
+ int_status |= acpi_ev_fixed_event_dispatch(i);
+ }
+ }
+
+ return (int_status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_fixed_event_dispatch
+ *
+ * PARAMETERS: Event - Event type
+ *
+ * RETURN: INTERRUPT_HANDLED or INTERRUPT_NOT_HANDLED
+ *
+ * DESCRIPTION: Clears the status bit for the requested event, calls the
+ * handler that previously registered for the event.
+ *
+ ******************************************************************************/
+
+static u32 acpi_ev_fixed_event_dispatch(u32 event)
+{
+
+ ACPI_FUNCTION_ENTRY();
+
+ /* Clear the status bit */
+
+ (void)acpi_set_register(acpi_gbl_fixed_event_info[event].
+ status_register_id, 1);
+
+ /*
+ * Make sure we've got a handler. If not, report an error.
+ * The event is disabled to prevent further interrupts.
+ */
+ if (NULL == acpi_gbl_fixed_event_handlers[event].handler) {
+ (void)acpi_set_register(acpi_gbl_fixed_event_info[event].
+ enable_register_id, 0);
+
+ ACPI_ERROR((AE_INFO,
+ "No installed handler for fixed event [%08X]",
+ event));
+
+ return (ACPI_INTERRUPT_NOT_HANDLED);
+ }
+
+ /* Invoke the Fixed Event handler */
+
+ return ((acpi_gbl_fixed_event_handlers[event].
+ handler) (acpi_gbl_fixed_event_handlers[event].context));
+}
diff --git a/drivers/acpi/events/evgpe.c b/drivers/acpi/events/evgpe.c
new file mode 100644
index 0000000..f45c74f
--- /dev/null
+++ b/drivers/acpi/events/evgpe.c
@@ -0,0 +1,725 @@
+/******************************************************************************
+ *
+ * Module Name: evgpe - General Purpose Event handling and dispatch
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include <acpi/acevents.h>
+#include <acpi/acnamesp.h>
+
+#define _COMPONENT ACPI_EVENTS
+ACPI_MODULE_NAME("evgpe")
+
+/* Local prototypes */
+static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context);
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_set_gpe_type
+ *
+ * PARAMETERS: gpe_event_info - GPE to set
+ * Type - New type
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Sets the new type for the GPE (wake, run, or wake/run)
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ev_set_gpe_type(struct acpi_gpe_event_info *gpe_event_info, u8 type)
+{
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(ev_set_gpe_type);
+
+ /* Validate type and update register enable masks */
+
+ switch (type) {
+ case ACPI_GPE_TYPE_WAKE:
+ case ACPI_GPE_TYPE_RUNTIME:
+ case ACPI_GPE_TYPE_WAKE_RUN:
+ break;
+
+ default:
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
+
+ /* Disable the GPE if currently enabled */
+
+ status = acpi_ev_disable_gpe(gpe_event_info);
+
+ /* Type was validated above */
+
+ gpe_event_info->flags &= ~ACPI_GPE_TYPE_MASK; /* Clear type bits */
+ gpe_event_info->flags |= type; /* Insert type */
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_update_gpe_enable_masks
+ *
+ * PARAMETERS: gpe_event_info - GPE to update
+ * Type - What to do: ACPI_GPE_DISABLE or
+ * ACPI_GPE_ENABLE
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Updates GPE register enable masks based on the GPE type
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ev_update_gpe_enable_masks(struct acpi_gpe_event_info *gpe_event_info,
+ u8 type)
+{
+ struct acpi_gpe_register_info *gpe_register_info;
+ u8 register_bit;
+
+ ACPI_FUNCTION_TRACE(ev_update_gpe_enable_masks);
+
+ gpe_register_info = gpe_event_info->register_info;
+ if (!gpe_register_info) {
+ return_ACPI_STATUS(AE_NOT_EXIST);
+ }
+ register_bit = (u8)
+ (1 <<
+ (gpe_event_info->gpe_number - gpe_register_info->base_gpe_number));
+
+ /* 1) Disable case. Simply clear all enable bits */
+
+ if (type == ACPI_GPE_DISABLE) {
+ ACPI_CLEAR_BIT(gpe_register_info->enable_for_wake,
+ register_bit);
+ ACPI_CLEAR_BIT(gpe_register_info->enable_for_run, register_bit);
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ /* 2) Enable case. Set/Clear the appropriate enable bits */
+
+ switch (gpe_event_info->flags & ACPI_GPE_TYPE_MASK) {
+ case ACPI_GPE_TYPE_WAKE:
+ ACPI_SET_BIT(gpe_register_info->enable_for_wake, register_bit);
+ ACPI_CLEAR_BIT(gpe_register_info->enable_for_run, register_bit);
+ break;
+
+ case ACPI_GPE_TYPE_RUNTIME:
+ ACPI_CLEAR_BIT(gpe_register_info->enable_for_wake,
+ register_bit);
+ ACPI_SET_BIT(gpe_register_info->enable_for_run, register_bit);
+ break;
+
+ case ACPI_GPE_TYPE_WAKE_RUN:
+ ACPI_SET_BIT(gpe_register_info->enable_for_wake, register_bit);
+ ACPI_SET_BIT(gpe_register_info->enable_for_run, register_bit);
+ break;
+
+ default:
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
+
+ return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_enable_gpe
+ *
+ * PARAMETERS: gpe_event_info - GPE to enable
+ * write_to_hardware - Enable now, or just mark data structs
+ * (WAKE GPEs should be deferred)
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Enable a GPE based on the GPE type
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info,
+ u8 write_to_hardware)
+{
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(ev_enable_gpe);
+
+ /* Make sure HW enable masks are updated */
+
+ status =
+ acpi_ev_update_gpe_enable_masks(gpe_event_info, ACPI_GPE_ENABLE);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /* Mark wake-enabled or HW enable, or both */
+
+ switch (gpe_event_info->flags & ACPI_GPE_TYPE_MASK) {
+ case ACPI_GPE_TYPE_WAKE:
+
+ ACPI_SET_BIT(gpe_event_info->flags, ACPI_GPE_WAKE_ENABLED);
+ break;
+
+ case ACPI_GPE_TYPE_WAKE_RUN:
+
+ ACPI_SET_BIT(gpe_event_info->flags, ACPI_GPE_WAKE_ENABLED);
+
+ /*lint -fallthrough */
+
+ case ACPI_GPE_TYPE_RUNTIME:
+
+ ACPI_SET_BIT(gpe_event_info->flags, ACPI_GPE_RUN_ENABLED);
+
+ if (write_to_hardware) {
+
+ /* Clear the GPE (of stale events), then enable it */
+
+ status = acpi_hw_clear_gpe(gpe_event_info);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /* Enable the requested runtime GPE */
+
+ status = acpi_hw_write_gpe_enable_reg(gpe_event_info);
+ }
+ break;
+
+ default:
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
+
+ return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_disable_gpe
+ *
+ * PARAMETERS: gpe_event_info - GPE to disable
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Disable a GPE based on the GPE type
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ev_disable_gpe(struct acpi_gpe_event_info *gpe_event_info)
+{
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(ev_disable_gpe);
+
+ /* Make sure HW enable masks are updated */
+
+ status =
+ acpi_ev_update_gpe_enable_masks(gpe_event_info, ACPI_GPE_DISABLE);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /* Clear the appropriate enabled flags for this GPE */
+
+ switch (gpe_event_info->flags & ACPI_GPE_TYPE_MASK) {
+ case ACPI_GPE_TYPE_WAKE:
+ ACPI_CLEAR_BIT(gpe_event_info->flags, ACPI_GPE_WAKE_ENABLED);
+ break;
+
+ case ACPI_GPE_TYPE_WAKE_RUN:
+ ACPI_CLEAR_BIT(gpe_event_info->flags, ACPI_GPE_WAKE_ENABLED);
+
+ /* fallthrough */
+
+ case ACPI_GPE_TYPE_RUNTIME:
+
+ /* Disable the requested runtime GPE */
+
+ ACPI_CLEAR_BIT(gpe_event_info->flags, ACPI_GPE_RUN_ENABLED);
+ break;
+
+ default:
+ break;
+ }
+
+ /*
+ * Even if we don't know the GPE type, make sure that we always
+ * disable it. low_disable_gpe will just clear the enable bit for this
+ * GPE and write it. It will not write out the current GPE enable mask,
+ * since this may inadvertently enable GPEs too early, if a rogue GPE has
+ * come in during ACPICA initialization - possibly as a result of AML or
+ * other code that has enabled the GPE.
+ */
+ status = acpi_hw_low_disable_gpe(gpe_event_info);
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_get_gpe_event_info
+ *
+ * PARAMETERS: gpe_device - Device node. NULL for GPE0/GPE1
+ * gpe_number - Raw GPE number
+ *
+ * RETURN: A GPE event_info struct. NULL if not a valid GPE
+ *
+ * DESCRIPTION: Returns the event_info struct associated with this GPE.
+ * Validates the gpe_block and the gpe_number
+ *
+ * Should be called only when the GPE lists are semaphore locked
+ * and not subject to change.
+ *
+ ******************************************************************************/
+
+struct acpi_gpe_event_info *acpi_ev_get_gpe_event_info(acpi_handle gpe_device,
+ u32 gpe_number)
+{
+ union acpi_operand_object *obj_desc;
+ struct acpi_gpe_block_info *gpe_block;
+ u32 i;
+
+ ACPI_FUNCTION_ENTRY();
+
+ /* A NULL gpe_block means use the FADT-defined GPE block(s) */
+
+ if (!gpe_device) {
+
+ /* Examine GPE Block 0 and 1 (These blocks are permanent) */
+
+ for (i = 0; i < ACPI_MAX_GPE_BLOCKS; i++) {
+ gpe_block = acpi_gbl_gpe_fadt_blocks[i];
+ if (gpe_block) {
+ if ((gpe_number >= gpe_block->block_base_number)
+ && (gpe_number <
+ gpe_block->block_base_number +
+ (gpe_block->register_count * 8))) {
+ return (&gpe_block->
+ event_info[gpe_number -
+ gpe_block->
+ block_base_number]);
+ }
+ }
+ }
+
+ /* The gpe_number was not in the range of either FADT GPE block */
+
+ return (NULL);
+ }
+
+ /* A Non-NULL gpe_device means this is a GPE Block Device */
+
+ obj_desc = acpi_ns_get_attached_object((struct acpi_namespace_node *)
+ gpe_device);
+ if (!obj_desc || !obj_desc->device.gpe_block) {
+ return (NULL);
+ }
+
+ gpe_block = obj_desc->device.gpe_block;
+
+ if ((gpe_number >= gpe_block->block_base_number) &&
+ (gpe_number <
+ gpe_block->block_base_number + (gpe_block->register_count * 8))) {
+ return (&gpe_block->
+ event_info[gpe_number - gpe_block->block_base_number]);
+ }
+
+ return (NULL);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_gpe_detect
+ *
+ * PARAMETERS: gpe_xrupt_list - Interrupt block for this interrupt.
+ * Can have multiple GPE blocks attached.
+ *
+ * RETURN: INTERRUPT_HANDLED or INTERRUPT_NOT_HANDLED
+ *
+ * DESCRIPTION: Detect if any GP events have occurred. This function is
+ * executed at interrupt level.
+ *
+ ******************************************************************************/
+
+u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list)
+{
+ acpi_status status;
+ struct acpi_gpe_block_info *gpe_block;
+ struct acpi_gpe_register_info *gpe_register_info;
+ u32 int_status = ACPI_INTERRUPT_NOT_HANDLED;
+ u8 enabled_status_byte;
+ u32 status_reg;
+ u32 enable_reg;
+ acpi_cpu_flags flags;
+ u32 i;
+ u32 j;
+
+ ACPI_FUNCTION_NAME(ev_gpe_detect);
+
+ /* Check for the case where there are no GPEs */
+
+ if (!gpe_xrupt_list) {
+ return (int_status);
+ }
+
+ /*
+ * We need to obtain the GPE lock for both the data structs and registers
+ * Note: Not necessary to obtain the hardware lock, since the GPE registers
+ * are owned by the gpe_lock.
+ */
+ flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+
+ /* Examine all GPE blocks attached to this interrupt level */
+
+ gpe_block = gpe_xrupt_list->gpe_block_list_head;
+ while (gpe_block) {
+ /*
+ * Read all of the 8-bit GPE status and enable registers
+ * in this GPE block, saving all of them.
+ * Find all currently active GP events.
+ */
+ for (i = 0; i < gpe_block->register_count; i++) {
+
+ /* Get the next status/enable pair */
+
+ gpe_register_info = &gpe_block->register_info[i];
+
+ /* Read the Status Register */
+
+ status =
+ acpi_hw_low_level_read(ACPI_GPE_REGISTER_WIDTH,
+ &status_reg,
+ &gpe_register_info->
+ status_address);
+ if (ACPI_FAILURE(status)) {
+ goto unlock_and_exit;
+ }
+
+ /* Read the Enable Register */
+
+ status =
+ acpi_hw_low_level_read(ACPI_GPE_REGISTER_WIDTH,
+ &enable_reg,
+ &gpe_register_info->
+ enable_address);
+ if (ACPI_FAILURE(status)) {
+ goto unlock_and_exit;
+ }
+
+ ACPI_DEBUG_PRINT((ACPI_DB_INTERRUPTS,
+ "Read GPE Register at GPE%X: Status=%02X, Enable=%02X\n",
+ gpe_register_info->base_gpe_number,
+ status_reg, enable_reg));
+
+ /* Check if there is anything active at all in this register */
+
+ enabled_status_byte = (u8) (status_reg & enable_reg);
+ if (!enabled_status_byte) {
+
+ /* No active GPEs in this register, move on */
+
+ continue;
+ }
+
+ /* Now look at the individual GPEs in this byte register */
+
+ for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) {
+
+ /* Examine one GPE bit */
+
+ if (enabled_status_byte & (1 << j)) {
+ /*
+ * Found an active GPE. Dispatch the event to a handler
+ * or method.
+ */
+ int_status |=
+ acpi_ev_gpe_dispatch(&gpe_block->
+ event_info[((acpi_size) i * ACPI_GPE_REGISTER_WIDTH) + j], j + gpe_register_info->base_gpe_number);
+ }
+ }
+ }
+
+ gpe_block = gpe_block->next;
+ }
+
+ unlock_and_exit:
+
+ acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
+ return (int_status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_asynch_execute_gpe_method
+ *
+ * PARAMETERS: Context (gpe_event_info) - Info for this GPE
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Perform the actual execution of a GPE control method. This
+ * function is called from an invocation of acpi_os_execute and
+ * therefore does NOT execute at interrupt level - so that
+ * the control method itself is not executed in the context of
+ * an interrupt handler.
+ *
+ ******************************************************************************/
+static void acpi_ev_asynch_enable_gpe(void *context);
+
+static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context)
+{
+ struct acpi_gpe_event_info *gpe_event_info = (void *)context;
+ acpi_status status;
+ struct acpi_gpe_event_info local_gpe_event_info;
+ struct acpi_evaluate_info *info;
+
+ ACPI_FUNCTION_TRACE(ev_asynch_execute_gpe_method);
+
+ status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
+ if (ACPI_FAILURE(status)) {
+ return_VOID;
+ }
+
+ /* Must revalidate the gpe_number/gpe_block */
+
+ if (!acpi_ev_valid_gpe_event(gpe_event_info)) {
+ status = acpi_ut_release_mutex(ACPI_MTX_EVENTS);
+ return_VOID;
+ }
+
+ /* Set the GPE flags for return to enabled state */
+
+ (void)acpi_ev_enable_gpe(gpe_event_info, FALSE);
+
+ /*
+ * Take a snapshot of the GPE info for this level - we copy the
+ * info to prevent a race condition with remove_handler/remove_block.
+ */
+ ACPI_MEMCPY(&local_gpe_event_info, gpe_event_info,
+ sizeof(struct acpi_gpe_event_info));
+
+ status = acpi_ut_release_mutex(ACPI_MTX_EVENTS);
+ if (ACPI_FAILURE(status)) {
+ return_VOID;
+ }
+
+ /*
+ * Must check for control method type dispatch one more
+ * time to avoid race with ev_gpe_install_handler
+ */
+ if ((local_gpe_event_info.flags & ACPI_GPE_DISPATCH_MASK) ==
+ ACPI_GPE_DISPATCH_METHOD) {
+
+ /* Allocate the evaluation information block */
+
+ info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info));
+ if (!info) {
+ status = AE_NO_MEMORY;
+ } else {
+ /*
+ * Invoke the GPE Method (_Lxx, _Exx) i.e., evaluate the _Lxx/_Exx
+ * control method that corresponds to this GPE
+ */
+ info->prefix_node =
+ local_gpe_event_info.dispatch.method_node;
+ info->flags = ACPI_IGNORE_RETURN_VALUE;
+
+ status = acpi_ns_evaluate(info);
+ ACPI_FREE(info);
+ }
+
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status,
+ "while evaluating GPE method [%4.4s]",
+ acpi_ut_get_node_name
+ (local_gpe_event_info.dispatch.
+ method_node)));
+ }
+ }
+ /* Defer enabling of GPE until all notify handlers are done */
+ acpi_os_execute(OSL_NOTIFY_HANDLER, acpi_ev_asynch_enable_gpe,
+ gpe_event_info);
+ return_VOID;
+}
+
+static void acpi_ev_asynch_enable_gpe(void *context)
+{
+ struct acpi_gpe_event_info *gpe_event_info = context;
+ acpi_status status;
+ if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) ==
+ ACPI_GPE_LEVEL_TRIGGERED) {
+ /*
+ * GPE is level-triggered, we clear the GPE status bit after
+ * handling the event.
+ */
+ status = acpi_hw_clear_gpe(gpe_event_info);
+ if (ACPI_FAILURE(status)) {
+ return_VOID;
+ }
+ }
+
+ /* Enable this GPE */
+ (void)acpi_hw_write_gpe_enable_reg(gpe_event_info);
+ return_VOID;
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_gpe_dispatch
+ *
+ * PARAMETERS: gpe_event_info - Info for this GPE
+ * gpe_number - Number relative to the parent GPE block
+ *
+ * RETURN: INTERRUPT_HANDLED or INTERRUPT_NOT_HANDLED
+ *
+ * DESCRIPTION: Dispatch a General Purpose Event to either a function (e.g. EC)
+ * or method (e.g. _Lxx/_Exx) handler.
+ *
+ * This function executes at interrupt level.
+ *
+ ******************************************************************************/
+
+u32
+acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
+{
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(ev_gpe_dispatch);
+
+ acpi_os_gpe_count(gpe_number);
+
+ /*
+ * If edge-triggered, clear the GPE status bit now. Note that
+ * level-triggered events are cleared after the GPE is serviced.
+ */
+ if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) ==
+ ACPI_GPE_EDGE_TRIGGERED) {
+ status = acpi_hw_clear_gpe(gpe_event_info);
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status,
+ "Unable to clear GPE[%2X]",
+ gpe_number));
+ return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
+ }
+ }
+
+ /*
+ * Dispatch the GPE to either an installed handler, or the control method
+ * associated with this GPE (_Lxx or _Exx). If a handler exists, we invoke
+ * it and do not attempt to run the method. If there is neither a handler
+ * nor a method, we disable this GPE to prevent further such pointless
+ * events from firing.
+ */
+ switch (gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) {
+ case ACPI_GPE_DISPATCH_HANDLER:
+
+ /*
+ * Invoke the installed handler (at interrupt level)
+ * Ignore return status for now. TBD: leave GPE disabled on error?
+ */
+ (void)gpe_event_info->dispatch.handler->address(gpe_event_info->
+ dispatch.
+ handler->
+ context);
+
+ /* It is now safe to clear level-triggered events. */
+
+ if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) ==
+ ACPI_GPE_LEVEL_TRIGGERED) {
+ status = acpi_hw_clear_gpe(gpe_event_info);
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status,
+ "Unable to clear GPE[%2X]",
+ gpe_number));
+ return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
+ }
+ }
+ break;
+
+ case ACPI_GPE_DISPATCH_METHOD:
+
+ /*
+ * Disable the GPE, so it doesn't keep firing before the method has a
+ * chance to run (it runs asynchronously with interrupts enabled).
+ */
+ status = acpi_ev_disable_gpe(gpe_event_info);
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status,
+ "Unable to disable GPE[%2X]",
+ gpe_number));
+ return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
+ }
+
+ /*
+ * Execute the method associated with the GPE
+ * NOTE: Level-triggered GPEs are cleared after the method completes.
+ */
+ status = acpi_os_execute(OSL_GPE_HANDLER,
+ acpi_ev_asynch_execute_gpe_method,
+ gpe_event_info);
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status,
+ "Unable to queue handler for GPE[%2X] - event disabled",
+ gpe_number));
+ }
+ break;
+
+ default:
+
+ /* No handler or method to run! */
+
+ ACPI_ERROR((AE_INFO,
+ "No handler or method for GPE[%2X], disabling event",
+ gpe_number));
+
+ /*
+ * Disable the GPE. The GPE will remain disabled until the ACPI
+ * Core Subsystem is restarted, or a handler is installed.
+ */
+ status = acpi_ev_disable_gpe(gpe_event_info);
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status,
+ "Unable to disable GPE[%2X]",
+ gpe_number));
+ return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
+ }
+ break;
+ }
+
+ return_UINT32(ACPI_INTERRUPT_HANDLED);
+}
diff --git a/drivers/acpi/events/evgpeblk.c b/drivers/acpi/events/evgpeblk.c
new file mode 100644
index 0000000..73c058e
--- /dev/null
+++ b/drivers/acpi/events/evgpeblk.c
@@ -0,0 +1,1215 @@
+/******************************************************************************
+ *
+ * Module Name: evgpeblk - GPE block creation and initialization.
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include <acpi/acevents.h>
+#include <acpi/acnamesp.h>
+
+#define _COMPONENT ACPI_EVENTS
+ACPI_MODULE_NAME("evgpeblk")
+
+/* Local prototypes */
+static acpi_status
+acpi_ev_save_method_info(acpi_handle obj_handle,
+ u32 level, void *obj_desc, void **return_value);
+
+static acpi_status
+acpi_ev_match_prw_and_gpe(acpi_handle obj_handle,
+ u32 level, void *info, void **return_value);
+
+static struct acpi_gpe_xrupt_info *acpi_ev_get_gpe_xrupt_block(u32
+ interrupt_number);
+
+static acpi_status
+acpi_ev_delete_gpe_xrupt(struct acpi_gpe_xrupt_info *gpe_xrupt);
+
+static acpi_status
+acpi_ev_install_gpe_block(struct acpi_gpe_block_info *gpe_block,
+ u32 interrupt_number);
+
+static acpi_status
+acpi_ev_create_gpe_info_blocks(struct acpi_gpe_block_info *gpe_block);
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_valid_gpe_event
+ *
+ * PARAMETERS: gpe_event_info - Info for this GPE
+ *
+ * RETURN: TRUE if the gpe_event is valid
+ *
+ * DESCRIPTION: Validate a GPE event. DO NOT CALL FROM INTERRUPT LEVEL.
+ * Should be called only when the GPE lists are semaphore locked
+ * and not subject to change.
+ *
+ ******************************************************************************/
+
+u8 acpi_ev_valid_gpe_event(struct acpi_gpe_event_info *gpe_event_info)
+{
+ struct acpi_gpe_xrupt_info *gpe_xrupt_block;
+ struct acpi_gpe_block_info *gpe_block;
+
+ ACPI_FUNCTION_ENTRY();
+
+ /* No need for spin lock since we are not changing any list elements */
+
+ /* Walk the GPE interrupt levels */
+
+ gpe_xrupt_block = acpi_gbl_gpe_xrupt_list_head;
+ while (gpe_xrupt_block) {
+ gpe_block = gpe_xrupt_block->gpe_block_list_head;
+
+ /* Walk the GPE blocks on this interrupt level */
+
+ while (gpe_block) {
+ if ((&gpe_block->event_info[0] <= gpe_event_info) &&
+ (&gpe_block->
+ event_info[((acpi_size) gpe_block->
+ register_count) * 8] >
+ gpe_event_info)) {
+ return (TRUE);
+ }
+
+ gpe_block = gpe_block->next;
+ }
+
+ gpe_xrupt_block = gpe_xrupt_block->next;
+ }
+
+ return (FALSE);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_walk_gpe_list
+ *
+ * PARAMETERS: gpe_walk_callback - Routine called for each GPE block
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Walk the GPE lists.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ev_walk_gpe_list(acpi_gpe_callback gpe_walk_callback)
+{
+ struct acpi_gpe_block_info *gpe_block;
+ struct acpi_gpe_xrupt_info *gpe_xrupt_info;
+ acpi_status status = AE_OK;
+ acpi_cpu_flags flags;
+
+ ACPI_FUNCTION_TRACE(ev_walk_gpe_list);
+
+ flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+
+ /* Walk the interrupt level descriptor list */
+
+ gpe_xrupt_info = acpi_gbl_gpe_xrupt_list_head;
+ while (gpe_xrupt_info) {
+
+ /* Walk all Gpe Blocks attached to this interrupt level */
+
+ gpe_block = gpe_xrupt_info->gpe_block_list_head;
+ while (gpe_block) {
+
+ /* One callback per GPE block */
+
+ status = gpe_walk_callback(gpe_xrupt_info, gpe_block);
+ if (ACPI_FAILURE(status)) {
+ goto unlock_and_exit;
+ }
+
+ gpe_block = gpe_block->next;
+ }
+
+ gpe_xrupt_info = gpe_xrupt_info->next;
+ }
+
+ unlock_and_exit:
+ acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_delete_gpe_handlers
+ *
+ * PARAMETERS: gpe_xrupt_info - GPE Interrupt info
+ * gpe_block - Gpe Block info
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Delete all Handler objects found in the GPE data structs.
+ * Used only prior to termination.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ev_delete_gpe_handlers(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
+ struct acpi_gpe_block_info *gpe_block)
+{
+ struct acpi_gpe_event_info *gpe_event_info;
+ u32 i;
+ u32 j;
+
+ ACPI_FUNCTION_TRACE(ev_delete_gpe_handlers);
+
+ /* Examine each GPE Register within the block */
+
+ for (i = 0; i < gpe_block->register_count; i++) {
+
+ /* Now look at the individual GPEs in this byte register */
+
+ for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) {
+ gpe_event_info =
+ &gpe_block->
+ event_info[((acpi_size) i *
+ ACPI_GPE_REGISTER_WIDTH) + j];
+
+ if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
+ ACPI_GPE_DISPATCH_HANDLER) {
+ ACPI_FREE(gpe_event_info->dispatch.handler);
+ gpe_event_info->dispatch.handler = NULL;
+ gpe_event_info->flags &=
+ ~ACPI_GPE_DISPATCH_MASK;
+ }
+ }
+ }
+
+ return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_save_method_info
+ *
+ * PARAMETERS: Callback from walk_namespace
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Called from acpi_walk_namespace. Expects each object to be a
+ * control method under the _GPE portion of the namespace.
+ * Extract the name and GPE type from the object, saving this
+ * information for quick lookup during GPE dispatch
+ *
+ * The name of each GPE control method is of the form:
+ * "_Lxx" or "_Exx"
+ * Where:
+ * L - means that the GPE is level triggered
+ * E - means that the GPE is edge triggered
+ * xx - is the GPE number [in HEX]
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ev_save_method_info(acpi_handle obj_handle,
+ u32 level, void *obj_desc, void **return_value)
+{
+ struct acpi_gpe_block_info *gpe_block = (void *)obj_desc;
+ struct acpi_gpe_event_info *gpe_event_info;
+ u32 gpe_number;
+ char name[ACPI_NAME_SIZE + 1];
+ u8 type;
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(ev_save_method_info);
+
+ /*
+ * _Lxx and _Exx GPE method support
+ *
+ * 1) Extract the name from the object and convert to a string
+ */
+ ACPI_MOVE_32_TO_32(name,
+ &((struct acpi_namespace_node *)obj_handle)->name.
+ integer);
+ name[ACPI_NAME_SIZE] = 0;
+
+ /*
+ * 2) Edge/Level determination is based on the 2nd character
+ * of the method name
+ *
+ * NOTE: Default GPE type is RUNTIME. May be changed later to WAKE
+ * if a _PRW object is found that points to this GPE.
+ */
+ switch (name[1]) {
+ case 'L':
+ type = ACPI_GPE_LEVEL_TRIGGERED;
+ break;
+
+ case 'E':
+ type = ACPI_GPE_EDGE_TRIGGERED;
+ break;
+
+ default:
+ /* Unknown method type, just ignore it! */
+
+ ACPI_DEBUG_PRINT((ACPI_DB_LOAD,
+ "Ignoring unknown GPE method type: %s (name not of form _Lxx or _Exx)",
+ name));
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ /* Convert the last two characters of the name to the GPE Number */
+
+ gpe_number = ACPI_STRTOUL(&name[2], NULL, 16);
+ if (gpe_number == ACPI_UINT32_MAX) {
+
+ /* Conversion failed; invalid method, just ignore it */
+
+ ACPI_DEBUG_PRINT((ACPI_DB_LOAD,
+ "Could not extract GPE number from name: %s (name is not of form _Lxx or _Exx)",
+ name));
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ /* Ensure that we have a valid GPE number for this GPE block */
+
+ if ((gpe_number < gpe_block->block_base_number) ||
+ (gpe_number >=
+ (gpe_block->block_base_number +
+ (gpe_block->register_count * 8)))) {
+ /*
+ * Not valid for this GPE block, just ignore it
+ * However, it may be valid for a different GPE block, since GPE0 and GPE1
+ * methods both appear under \_GPE.
+ */
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ /*
+ * Now we can add this information to the gpe_event_info block
+ * for use during dispatch of this GPE. Default type is RUNTIME, although
+ * this may change when the _PRW methods are executed later.
+ */
+ gpe_event_info =
+ &gpe_block->event_info[gpe_number - gpe_block->block_base_number];
+
+ gpe_event_info->flags = (u8)
+ (type | ACPI_GPE_DISPATCH_METHOD | ACPI_GPE_TYPE_RUNTIME);
+
+ gpe_event_info->dispatch.method_node =
+ (struct acpi_namespace_node *)obj_handle;
+
+ /* Update enable mask, but don't enable the HW GPE as of yet */
+
+ status = acpi_ev_enable_gpe(gpe_event_info, FALSE);
+
+ ACPI_DEBUG_PRINT((ACPI_DB_LOAD,
+ "Registered GPE method %s as GPE number 0x%.2X\n",
+ name, gpe_number));
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_match_prw_and_gpe
+ *
+ * PARAMETERS: Callback from walk_namespace
+ *
+ * RETURN: Status. NOTE: We ignore errors so that the _PRW walk is
+ * not aborted on a single _PRW failure.
+ *
+ * DESCRIPTION: Called from acpi_walk_namespace. Expects each object to be a
+ * Device. Run the _PRW method. If present, extract the GPE
+ * number and mark the GPE as a WAKE GPE.
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ev_match_prw_and_gpe(acpi_handle obj_handle,
+ u32 level, void *info, void **return_value)
+{
+ struct acpi_gpe_walk_info *gpe_info = (void *)info;
+ struct acpi_namespace_node *gpe_device;
+ struct acpi_gpe_block_info *gpe_block;
+ struct acpi_namespace_node *target_gpe_device;
+ struct acpi_gpe_event_info *gpe_event_info;
+ union acpi_operand_object *pkg_desc;
+ union acpi_operand_object *obj_desc;
+ u32 gpe_number;
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(ev_match_prw_and_gpe);
+
+ /* Check for a _PRW method under this device */
+
+ status = acpi_ut_evaluate_object(obj_handle, METHOD_NAME__PRW,
+ ACPI_BTYPE_PACKAGE, &pkg_desc);
+ if (ACPI_FAILURE(status)) {
+
+ /* Ignore all errors from _PRW, we don't want to abort the subsystem */
+
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ /* The returned _PRW package must have at least two elements */
+
+ if (pkg_desc->package.count < 2) {
+ goto cleanup;
+ }
+
+ /* Extract pointers from the input context */
+
+ gpe_device = gpe_info->gpe_device;
+ gpe_block = gpe_info->gpe_block;
+
+ /*
+ * The _PRW object must return a package, we are only interested
+ * in the first element
+ */
+ obj_desc = pkg_desc->package.elements[0];
+
+ if (ACPI_GET_OBJECT_TYPE(obj_desc) == ACPI_TYPE_INTEGER) {
+
+ /* Use FADT-defined GPE device (from definition of _PRW) */
+
+ target_gpe_device = acpi_gbl_fadt_gpe_device;
+
+ /* Integer is the GPE number in the FADT described GPE blocks */
+
+ gpe_number = (u32) obj_desc->integer.value;
+ } else if (ACPI_GET_OBJECT_TYPE(obj_desc) == ACPI_TYPE_PACKAGE) {
+
+ /* Package contains a GPE reference and GPE number within a GPE block */
+
+ if ((obj_desc->package.count < 2) ||
+ (ACPI_GET_OBJECT_TYPE(obj_desc->package.elements[0]) !=
+ ACPI_TYPE_LOCAL_REFERENCE)
+ || (ACPI_GET_OBJECT_TYPE(obj_desc->package.elements[1]) !=
+ ACPI_TYPE_INTEGER)) {
+ goto cleanup;
+ }
+
+ /* Get GPE block reference and decode */
+
+ target_gpe_device =
+ obj_desc->package.elements[0]->reference.node;
+ gpe_number = (u32) obj_desc->package.elements[1]->integer.value;
+ } else {
+ /* Unknown type, just ignore it */
+
+ goto cleanup;
+ }
+
+ /*
+ * Is this GPE within this block?
+ *
+ * TRUE iff these conditions are true:
+ * 1) The GPE devices match.
+ * 2) The GPE index(number) is within the range of the Gpe Block
+ * associated with the GPE device.
+ */
+ if ((gpe_device == target_gpe_device) &&
+ (gpe_number >= gpe_block->block_base_number) &&
+ (gpe_number <
+ gpe_block->block_base_number + (gpe_block->register_count * 8))) {
+ gpe_event_info =
+ &gpe_block->event_info[gpe_number -
+ gpe_block->block_base_number];
+
+ /* Mark GPE for WAKE-ONLY but WAKE_DISABLED */
+
+ gpe_event_info->flags &=
+ ~(ACPI_GPE_WAKE_ENABLED | ACPI_GPE_RUN_ENABLED);
+
+ status =
+ acpi_ev_set_gpe_type(gpe_event_info, ACPI_GPE_TYPE_WAKE);
+ if (ACPI_FAILURE(status)) {
+ goto cleanup;
+ }
+ status =
+ acpi_ev_update_gpe_enable_masks(gpe_event_info,
+ ACPI_GPE_DISABLE);
+ }
+
+ cleanup:
+ acpi_ut_remove_reference(pkg_desc);
+ return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_get_gpe_xrupt_block
+ *
+ * PARAMETERS: interrupt_number - Interrupt for a GPE block
+ *
+ * RETURN: A GPE interrupt block
+ *
+ * DESCRIPTION: Get or Create a GPE interrupt block. There is one interrupt
+ * block per unique interrupt level used for GPEs.
+ * Should be called only when the GPE lists are semaphore locked
+ * and not subject to change.
+ *
+ ******************************************************************************/
+
+static struct acpi_gpe_xrupt_info *acpi_ev_get_gpe_xrupt_block(u32
+ interrupt_number)
+{
+ struct acpi_gpe_xrupt_info *next_gpe_xrupt;
+ struct acpi_gpe_xrupt_info *gpe_xrupt;
+ acpi_status status;
+ acpi_cpu_flags flags;
+
+ ACPI_FUNCTION_TRACE(ev_get_gpe_xrupt_block);
+
+ /* No need for lock since we are not changing any list elements here */
+
+ next_gpe_xrupt = acpi_gbl_gpe_xrupt_list_head;
+ while (next_gpe_xrupt) {
+ if (next_gpe_xrupt->interrupt_number == interrupt_number) {
+ return_PTR(next_gpe_xrupt);
+ }
+
+ next_gpe_xrupt = next_gpe_xrupt->next;
+ }
+
+ /* Not found, must allocate a new xrupt descriptor */
+
+ gpe_xrupt = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_gpe_xrupt_info));
+ if (!gpe_xrupt) {
+ return_PTR(NULL);
+ }
+
+ gpe_xrupt->interrupt_number = interrupt_number;
+
+ /* Install new interrupt descriptor with spin lock */
+
+ flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+ if (acpi_gbl_gpe_xrupt_list_head) {
+ next_gpe_xrupt = acpi_gbl_gpe_xrupt_list_head;
+ while (next_gpe_xrupt->next) {
+ next_gpe_xrupt = next_gpe_xrupt->next;
+ }
+
+ next_gpe_xrupt->next = gpe_xrupt;
+ gpe_xrupt->previous = next_gpe_xrupt;
+ } else {
+ acpi_gbl_gpe_xrupt_list_head = gpe_xrupt;
+ }
+ acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
+
+ /* Install new interrupt handler if not SCI_INT */
+
+ if (interrupt_number != acpi_gbl_FADT.sci_interrupt) {
+ status = acpi_os_install_interrupt_handler(interrupt_number,
+ acpi_ev_gpe_xrupt_handler,
+ gpe_xrupt);
+ if (ACPI_FAILURE(status)) {
+ ACPI_ERROR((AE_INFO,
+ "Could not install GPE interrupt handler at level 0x%X",
+ interrupt_number));
+ return_PTR(NULL);
+ }
+ }
+
+ return_PTR(gpe_xrupt);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_delete_gpe_xrupt
+ *
+ * PARAMETERS: gpe_xrupt - A GPE interrupt info block
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Remove and free a gpe_xrupt block. Remove an associated
+ * interrupt handler if not the SCI interrupt.
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ev_delete_gpe_xrupt(struct acpi_gpe_xrupt_info *gpe_xrupt)
+{
+ acpi_status status;
+ acpi_cpu_flags flags;
+
+ ACPI_FUNCTION_TRACE(ev_delete_gpe_xrupt);
+
+ /* We never want to remove the SCI interrupt handler */
+
+ if (gpe_xrupt->interrupt_number == acpi_gbl_FADT.sci_interrupt) {
+ gpe_xrupt->gpe_block_list_head = NULL;
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ /* Disable this interrupt */
+
+ status =
+ acpi_os_remove_interrupt_handler(gpe_xrupt->interrupt_number,
+ acpi_ev_gpe_xrupt_handler);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /* Unlink the interrupt block with lock */
+
+ flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+ if (gpe_xrupt->previous) {
+ gpe_xrupt->previous->next = gpe_xrupt->next;
+ } else {
+ /* No previous, update list head */
+
+ acpi_gbl_gpe_xrupt_list_head = gpe_xrupt->next;
+ }
+
+ if (gpe_xrupt->next) {
+ gpe_xrupt->next->previous = gpe_xrupt->previous;
+ }
+ acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
+
+ /* Free the block */
+
+ ACPI_FREE(gpe_xrupt);
+ return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_install_gpe_block
+ *
+ * PARAMETERS: gpe_block - New GPE block
+ * interrupt_number - Xrupt to be associated with this GPE block
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Install new GPE block with mutex support
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ev_install_gpe_block(struct acpi_gpe_block_info *gpe_block,
+ u32 interrupt_number)
+{
+ struct acpi_gpe_block_info *next_gpe_block;
+ struct acpi_gpe_xrupt_info *gpe_xrupt_block;
+ acpi_status status;
+ acpi_cpu_flags flags;
+
+ ACPI_FUNCTION_TRACE(ev_install_gpe_block);
+
+ status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ gpe_xrupt_block = acpi_ev_get_gpe_xrupt_block(interrupt_number);
+ if (!gpe_xrupt_block) {
+ status = AE_NO_MEMORY;
+ goto unlock_and_exit;
+ }
+
+ /* Install the new block at the end of the list with lock */
+
+ flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+ if (gpe_xrupt_block->gpe_block_list_head) {
+ next_gpe_block = gpe_xrupt_block->gpe_block_list_head;
+ while (next_gpe_block->next) {
+ next_gpe_block = next_gpe_block->next;
+ }
+
+ next_gpe_block->next = gpe_block;
+ gpe_block->previous = next_gpe_block;
+ } else {
+ gpe_xrupt_block->gpe_block_list_head = gpe_block;
+ }
+
+ gpe_block->xrupt_block = gpe_xrupt_block;
+ acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
+
+ unlock_and_exit:
+ status = acpi_ut_release_mutex(ACPI_MTX_EVENTS);
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_delete_gpe_block
+ *
+ * PARAMETERS: gpe_block - Existing GPE block
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Remove a GPE block
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ev_delete_gpe_block(struct acpi_gpe_block_info *gpe_block)
+{
+ acpi_status status;
+ acpi_cpu_flags flags;
+
+ ACPI_FUNCTION_TRACE(ev_install_gpe_block);
+
+ status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /* Disable all GPEs in this block */
+
+ status = acpi_hw_disable_gpe_block(gpe_block->xrupt_block, gpe_block);
+
+ if (!gpe_block->previous && !gpe_block->next) {
+
+ /* This is the last gpe_block on this interrupt */
+
+ status = acpi_ev_delete_gpe_xrupt(gpe_block->xrupt_block);
+ if (ACPI_FAILURE(status)) {
+ goto unlock_and_exit;
+ }
+ } else {
+ /* Remove the block on this interrupt with lock */
+
+ flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+ if (gpe_block->previous) {
+ gpe_block->previous->next = gpe_block->next;
+ } else {
+ gpe_block->xrupt_block->gpe_block_list_head =
+ gpe_block->next;
+ }
+
+ if (gpe_block->next) {
+ gpe_block->next->previous = gpe_block->previous;
+ }
+ acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
+ }
+
+ /* Free the gpe_block */
+
+ ACPI_FREE(gpe_block->register_info);
+ ACPI_FREE(gpe_block->event_info);
+ ACPI_FREE(gpe_block);
+
+ unlock_and_exit:
+ status = acpi_ut_release_mutex(ACPI_MTX_EVENTS);
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_create_gpe_info_blocks
+ *
+ * PARAMETERS: gpe_block - New GPE block
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Create the register_info and event_info blocks for this GPE block
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ev_create_gpe_info_blocks(struct acpi_gpe_block_info *gpe_block)
+{
+ struct acpi_gpe_register_info *gpe_register_info = NULL;
+ struct acpi_gpe_event_info *gpe_event_info = NULL;
+ struct acpi_gpe_event_info *this_event;
+ struct acpi_gpe_register_info *this_register;
+ u32 i;
+ u32 j;
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(ev_create_gpe_info_blocks);
+
+ /* Allocate the GPE register information block */
+
+ gpe_register_info = ACPI_ALLOCATE_ZEROED((acpi_size) gpe_block->
+ register_count *
+ sizeof(struct
+ acpi_gpe_register_info));
+ if (!gpe_register_info) {
+ ACPI_ERROR((AE_INFO,
+ "Could not allocate the GpeRegisterInfo table"));
+ return_ACPI_STATUS(AE_NO_MEMORY);
+ }
+
+ /*
+ * Allocate the GPE event_info block. There are eight distinct GPEs
+ * per register. Initialization to zeros is sufficient.
+ */
+ gpe_event_info = ACPI_ALLOCATE_ZEROED(((acpi_size) gpe_block->
+ register_count *
+ ACPI_GPE_REGISTER_WIDTH) *
+ sizeof(struct
+ acpi_gpe_event_info));
+ if (!gpe_event_info) {
+ ACPI_ERROR((AE_INFO,
+ "Could not allocate the GpeEventInfo table"));
+ status = AE_NO_MEMORY;
+ goto error_exit;
+ }
+
+ /* Save the new Info arrays in the GPE block */
+
+ gpe_block->register_info = gpe_register_info;
+ gpe_block->event_info = gpe_event_info;
+
+ /*
+ * Initialize the GPE Register and Event structures. A goal of these
+ * tables is to hide the fact that there are two separate GPE register sets
+ * in a given GPE hardware block, the status registers occupy the first half,
+ * and the enable registers occupy the second half.
+ */
+ this_register = gpe_register_info;
+ this_event = gpe_event_info;
+
+ for (i = 0; i < gpe_block->register_count; i++) {
+
+ /* Init the register_info for this GPE register (8 GPEs) */
+
+ this_register->base_gpe_number =
+ (u8) (gpe_block->block_base_number +
+ (i * ACPI_GPE_REGISTER_WIDTH));
+
+ this_register->status_address.address =
+ gpe_block->block_address.address + i;
+
+ this_register->enable_address.address =
+ gpe_block->block_address.address + i +
+ gpe_block->register_count;
+
+ this_register->status_address.space_id =
+ gpe_block->block_address.space_id;
+ this_register->enable_address.space_id =
+ gpe_block->block_address.space_id;
+ this_register->status_address.bit_width =
+ ACPI_GPE_REGISTER_WIDTH;
+ this_register->enable_address.bit_width =
+ ACPI_GPE_REGISTER_WIDTH;
+ this_register->status_address.bit_offset =
+ ACPI_GPE_REGISTER_WIDTH;
+ this_register->enable_address.bit_offset =
+ ACPI_GPE_REGISTER_WIDTH;
+
+ /* Init the event_info for each GPE within this register */
+
+ for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) {
+ this_event->gpe_number =
+ (u8) (this_register->base_gpe_number + j);
+ this_event->register_info = this_register;
+ this_event++;
+ }
+
+ /* Disable all GPEs within this register */
+
+ status = acpi_hw_low_level_write(ACPI_GPE_REGISTER_WIDTH, 0x00,
+ &this_register->
+ enable_address);
+ if (ACPI_FAILURE(status)) {
+ goto error_exit;
+ }
+
+ /* Clear any pending GPE events within this register */
+
+ status = acpi_hw_low_level_write(ACPI_GPE_REGISTER_WIDTH, 0xFF,
+ &this_register->
+ status_address);
+ if (ACPI_FAILURE(status)) {
+ goto error_exit;
+ }
+
+ this_register++;
+ }
+
+ return_ACPI_STATUS(AE_OK);
+
+ error_exit:
+ if (gpe_register_info) {
+ ACPI_FREE(gpe_register_info);
+ }
+ if (gpe_event_info) {
+ ACPI_FREE(gpe_event_info);
+ }
+
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_create_gpe_block
+ *
+ * PARAMETERS: gpe_device - Handle to the parent GPE block
+ * gpe_block_address - Address and space_iD
+ * register_count - Number of GPE register pairs in the block
+ * gpe_block_base_number - Starting GPE number for the block
+ * interrupt_number - H/W interrupt for the block
+ * return_gpe_block - Where the new block descriptor is returned
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Create and Install a block of GPE registers. All GPEs within
+ * the block are disabled at exit.
+ * Note: Assumes namespace is locked.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device,
+ struct acpi_generic_address *gpe_block_address,
+ u32 register_count,
+ u8 gpe_block_base_number,
+ u32 interrupt_number,
+ struct acpi_gpe_block_info **return_gpe_block)
+{
+ acpi_status status;
+ struct acpi_gpe_block_info *gpe_block;
+
+ ACPI_FUNCTION_TRACE(ev_create_gpe_block);
+
+ if (!register_count) {
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ /* Allocate a new GPE block */
+
+ gpe_block = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_gpe_block_info));
+ if (!gpe_block) {
+ return_ACPI_STATUS(AE_NO_MEMORY);
+ }
+
+ /* Initialize the new GPE block */
+
+ gpe_block->node = gpe_device;
+ gpe_block->register_count = register_count;
+ gpe_block->block_base_number = gpe_block_base_number;
+
+ ACPI_MEMCPY(&gpe_block->block_address, gpe_block_address,
+ sizeof(struct acpi_generic_address));
+
+ /*
+ * Create the register_info and event_info sub-structures
+ * Note: disables and clears all GPEs in the block
+ */
+ status = acpi_ev_create_gpe_info_blocks(gpe_block);
+ if (ACPI_FAILURE(status)) {
+ ACPI_FREE(gpe_block);
+ return_ACPI_STATUS(status);
+ }
+
+ /* Install the new block in the global lists */
+
+ status = acpi_ev_install_gpe_block(gpe_block, interrupt_number);
+ if (ACPI_FAILURE(status)) {
+ ACPI_FREE(gpe_block);
+ return_ACPI_STATUS(status);
+ }
+
+ /* Find all GPE methods (_Lxx, _Exx) for this block */
+
+ status = acpi_ns_walk_namespace(ACPI_TYPE_METHOD, gpe_device,
+ ACPI_UINT32_MAX, ACPI_NS_WALK_NO_UNLOCK,
+ acpi_ev_save_method_info, gpe_block,
+ NULL);
+
+ /* Return the new block */
+
+ if (return_gpe_block) {
+ (*return_gpe_block) = gpe_block;
+ }
+
+ ACPI_DEBUG_PRINT((ACPI_DB_INIT,
+ "GPE %02X to %02X [%4.4s] %u regs on int 0x%X\n",
+ (u32) gpe_block->block_base_number,
+ (u32) (gpe_block->block_base_number +
+ ((gpe_block->register_count *
+ ACPI_GPE_REGISTER_WIDTH) - 1)),
+ gpe_device->name.ascii, gpe_block->register_count,
+ interrupt_number));
+
+ return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_initialize_gpe_block
+ *
+ * PARAMETERS: gpe_device - Handle to the parent GPE block
+ * gpe_block - Gpe Block info
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Initialize and enable a GPE block. First find and run any
+ * _PRT methods associated with the block, then enable the
+ * appropriate GPEs.
+ * Note: Assumes namespace is locked.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ev_initialize_gpe_block(struct acpi_namespace_node *gpe_device,
+ struct acpi_gpe_block_info *gpe_block)
+{
+ acpi_status status;
+ struct acpi_gpe_event_info *gpe_event_info;
+ struct acpi_gpe_walk_info gpe_info;
+ u32 wake_gpe_count;
+ u32 gpe_enabled_count;
+ u32 i;
+ u32 j;
+
+ ACPI_FUNCTION_TRACE(ev_initialize_gpe_block);
+
+ /* Ignore a null GPE block (e.g., if no GPE block 1 exists) */
+
+ if (!gpe_block) {
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ /*
+ * Runtime option: Should wake GPEs be enabled at runtime? The default
+ * is no, they should only be enabled just as the machine goes to sleep.
+ */
+ if (acpi_gbl_leave_wake_gpes_disabled) {
+ /*
+ * Differentiate runtime vs wake GPEs, via the _PRW control methods.
+ * Each GPE that has one or more _PRWs that reference it is by
+ * definition a wake GPE and will not be enabled while the machine
+ * is running.
+ */
+ gpe_info.gpe_block = gpe_block;
+ gpe_info.gpe_device = gpe_device;
+
+ status =
+ acpi_ns_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
+ ACPI_UINT32_MAX, ACPI_NS_WALK_UNLOCK,
+ acpi_ev_match_prw_and_gpe, &gpe_info,
+ NULL);
+ }
+
+ /*
+ * Enable all GPEs in this block that have these attributes:
+ * 1) are "runtime" or "run/wake" GPEs, and
+ * 2) have a corresponding _Lxx or _Exx method
+ *
+ * Any other GPEs within this block must be enabled via the acpi_enable_gpe()
+ * external interface.
+ */
+ wake_gpe_count = 0;
+ gpe_enabled_count = 0;
+
+ for (i = 0; i < gpe_block->register_count; i++) {
+ for (j = 0; j < 8; j++) {
+
+ /* Get the info block for this particular GPE */
+
+ gpe_event_info =
+ &gpe_block->
+ event_info[((acpi_size) i *
+ ACPI_GPE_REGISTER_WIDTH) + j];
+
+ if (((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
+ ACPI_GPE_DISPATCH_METHOD)
+ && (gpe_event_info->flags & ACPI_GPE_TYPE_RUNTIME)) {
+ gpe_enabled_count++;
+ }
+
+ if (gpe_event_info->flags & ACPI_GPE_TYPE_WAKE) {
+ wake_gpe_count++;
+ }
+ }
+ }
+
+ ACPI_DEBUG_PRINT((ACPI_DB_INIT,
+ "Found %u Wake, Enabled %u Runtime GPEs in this block\n",
+ wake_gpe_count, gpe_enabled_count));
+
+ /* Enable all valid runtime GPEs found above */
+
+ status = acpi_hw_enable_runtime_gpe_block(NULL, gpe_block);
+ if (ACPI_FAILURE(status)) {
+ ACPI_ERROR((AE_INFO, "Could not enable GPEs in GpeBlock %p",
+ gpe_block));
+ }
+
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_gpe_initialize
+ *
+ * PARAMETERS: None
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Initialize the GPE data structures
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ev_gpe_initialize(void)
+{
+ u32 register_count0 = 0;
+ u32 register_count1 = 0;
+ u32 gpe_number_max = 0;
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(ev_gpe_initialize);
+
+ status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /*
+ * Initialize the GPE Block(s) defined in the FADT
+ *
+ * Why the GPE register block lengths are divided by 2: From the ACPI Spec,
+ * section "General-Purpose Event Registers", we have:
+ *
+ * "Each register block contains two registers of equal length
+ * GPEx_STS and GPEx_EN (where x is 0 or 1). The length of the
+ * GPE0_STS and GPE0_EN registers is equal to half the GPE0_LEN
+ * The length of the GPE1_STS and GPE1_EN registers is equal to
+ * half the GPE1_LEN. If a generic register block is not supported
+ * then its respective block pointer and block length values in the
+ * FADT table contain zeros. The GPE0_LEN and GPE1_LEN do not need
+ * to be the same size."
+ */
+
+ /*
+ * Determine the maximum GPE number for this machine.
+ *
+ * Note: both GPE0 and GPE1 are optional, and either can exist without
+ * the other.
+ *
+ * If EITHER the register length OR the block address are zero, then that
+ * particular block is not supported.
+ */
+ if (acpi_gbl_FADT.gpe0_block_length &&
+ acpi_gbl_FADT.xgpe0_block.address) {
+
+ /* GPE block 0 exists (has both length and address > 0) */
+
+ register_count0 = (u16) (acpi_gbl_FADT.gpe0_block_length / 2);
+
+ gpe_number_max =
+ (register_count0 * ACPI_GPE_REGISTER_WIDTH) - 1;
+
+ /* Install GPE Block 0 */
+
+ status = acpi_ev_create_gpe_block(acpi_gbl_fadt_gpe_device,
+ &acpi_gbl_FADT.xgpe0_block,
+ register_count0, 0,
+ acpi_gbl_FADT.sci_interrupt,
+ &acpi_gbl_gpe_fadt_blocks[0]);
+
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status,
+ "Could not create GPE Block 0"));
+ }
+ }
+
+ if (acpi_gbl_FADT.gpe1_block_length &&
+ acpi_gbl_FADT.xgpe1_block.address) {
+
+ /* GPE block 1 exists (has both length and address > 0) */
+
+ register_count1 = (u16) (acpi_gbl_FADT.gpe1_block_length / 2);
+
+ /* Check for GPE0/GPE1 overlap (if both banks exist) */
+
+ if ((register_count0) &&
+ (gpe_number_max >= acpi_gbl_FADT.gpe1_base)) {
+ ACPI_ERROR((AE_INFO,
+ "GPE0 block (GPE 0 to %d) overlaps the GPE1 block (GPE %d to %d) - Ignoring GPE1",
+ gpe_number_max, acpi_gbl_FADT.gpe1_base,
+ acpi_gbl_FADT.gpe1_base +
+ ((register_count1 *
+ ACPI_GPE_REGISTER_WIDTH) - 1)));
+
+ /* Ignore GPE1 block by setting the register count to zero */
+
+ register_count1 = 0;
+ } else {
+ /* Install GPE Block 1 */
+
+ status =
+ acpi_ev_create_gpe_block(acpi_gbl_fadt_gpe_device,
+ &acpi_gbl_FADT.xgpe1_block,
+ register_count1,
+ acpi_gbl_FADT.gpe1_base,
+ acpi_gbl_FADT.
+ sci_interrupt,
+ &acpi_gbl_gpe_fadt_blocks
+ [1]);
+
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status,
+ "Could not create GPE Block 1"));
+ }
+
+ /*
+ * GPE0 and GPE1 do not have to be contiguous in the GPE number
+ * space. However, GPE0 always starts at GPE number zero.
+ */
+ gpe_number_max = acpi_gbl_FADT.gpe1_base +
+ ((register_count1 * ACPI_GPE_REGISTER_WIDTH) - 1);
+ }
+ }
+
+ /* Exit if there are no GPE registers */
+
+ if ((register_count0 + register_count1) == 0) {
+
+ /* GPEs are not required by ACPI, this is OK */
+
+ ACPI_DEBUG_PRINT((ACPI_DB_INIT,
+ "There are no GPE blocks defined in the FADT\n"));
+ status = AE_OK;
+ goto cleanup;
+ }
+
+ /* Check for Max GPE number out-of-range */
+
+ if (gpe_number_max > ACPI_GPE_MAX) {
+ ACPI_ERROR((AE_INFO,
+ "Maximum GPE number from FADT is too large: 0x%X",
+ gpe_number_max));
+ status = AE_BAD_VALUE;
+ goto cleanup;
+ }
+
+ cleanup:
+ (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+ return_ACPI_STATUS(AE_OK);
+}
diff --git a/drivers/acpi/events/evmisc.c b/drivers/acpi/events/evmisc.c
new file mode 100644
index 0000000..1d5670b
--- /dev/null
+++ b/drivers/acpi/events/evmisc.c
@@ -0,0 +1,631 @@
+/******************************************************************************
+ *
+ * Module Name: evmisc - Miscellaneous event manager support functions
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include <acpi/acevents.h>
+#include <acpi/acnamesp.h>
+#include <acpi/acinterp.h>
+
+#define _COMPONENT ACPI_EVENTS
+ACPI_MODULE_NAME("evmisc")
+
+/* Pointer to FACS needed for the Global Lock */
+static struct acpi_table_facs *facs = NULL;
+
+/* Local prototypes */
+
+static void ACPI_SYSTEM_XFACE acpi_ev_notify_dispatch(void *context);
+
+static u32 acpi_ev_global_lock_handler(void *context);
+
+static acpi_status acpi_ev_remove_global_lock_handler(void);
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_is_notify_object
+ *
+ * PARAMETERS: Node - Node to check
+ *
+ * RETURN: TRUE if notifies allowed on this object
+ *
+ * DESCRIPTION: Check type of node for a object that supports notifies.
+ *
+ * TBD: This could be replaced by a flag bit in the node.
+ *
+ ******************************************************************************/
+
+u8 acpi_ev_is_notify_object(struct acpi_namespace_node *node)
+{
+ switch (node->type) {
+ case ACPI_TYPE_DEVICE:
+ case ACPI_TYPE_PROCESSOR:
+ case ACPI_TYPE_THERMAL:
+ /*
+ * These are the ONLY objects that can receive ACPI notifications
+ */
+ return (TRUE);
+
+ default:
+ return (FALSE);
+ }
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_queue_notify_request
+ *
+ * PARAMETERS: Node - NS node for the notified object
+ * notify_value - Value from the Notify() request
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Dispatch a device notification event to a previously
+ * installed handler.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ev_queue_notify_request(struct acpi_namespace_node * node,
+ u32 notify_value)
+{
+ union acpi_operand_object *obj_desc;
+ union acpi_operand_object *handler_obj = NULL;
+ union acpi_generic_state *notify_info;
+ acpi_status status = AE_OK;
+
+ ACPI_FUNCTION_NAME(ev_queue_notify_request);
+
+ /*
+ * For value 3 (Ejection Request), some device method may need to be run.
+ * For value 2 (Device Wake) if _PRW exists, the _PS0 method may need
+ * to be run.
+ * For value 0x80 (Status Change) on the power button or sleep button,
+ * initiate soft-off or sleep operation?
+ */
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Dispatching Notify on [%4.4s] Node %p Value 0x%2.2X (%s)\n",
+ acpi_ut_get_node_name(node), node, notify_value,
+ acpi_ut_get_notify_name(notify_value)));
+
+ /* Get the notify object attached to the NS Node */
+
+ obj_desc = acpi_ns_get_attached_object(node);
+ if (obj_desc) {
+
+ /* We have the notify object, Get the right handler */
+
+ switch (node->type) {
+
+ /* Notify allowed only on these types */
+
+ case ACPI_TYPE_DEVICE:
+ case ACPI_TYPE_THERMAL:
+ case ACPI_TYPE_PROCESSOR:
+
+ if (notify_value <= ACPI_MAX_SYS_NOTIFY) {
+ handler_obj =
+ obj_desc->common_notify.system_notify;
+ } else {
+ handler_obj =
+ obj_desc->common_notify.device_notify;
+ }
+ break;
+
+ default:
+ /* All other types are not supported */
+ return (AE_TYPE);
+ }
+ }
+
+ /*
+ * If there is any handler to run, schedule the dispatcher.
+ * Check for:
+ * 1) Global system notify handler
+ * 2) Global device notify handler
+ * 3) Per-device notify handler
+ */
+ if ((acpi_gbl_system_notify.handler
+ && (notify_value <= ACPI_MAX_SYS_NOTIFY))
+ || (acpi_gbl_device_notify.handler
+ && (notify_value > ACPI_MAX_SYS_NOTIFY)) || handler_obj) {
+ notify_info = acpi_ut_create_generic_state();
+ if (!notify_info) {
+ return (AE_NO_MEMORY);
+ }
+
+ if (!handler_obj) {
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Executing system notify handler for Notify (%4.4s, %X) node %p\n",
+ acpi_ut_get_node_name(node),
+ notify_value, node));
+ }
+
+ notify_info->common.descriptor_type =
+ ACPI_DESC_TYPE_STATE_NOTIFY;
+ notify_info->notify.node = node;
+ notify_info->notify.value = (u16) notify_value;
+ notify_info->notify.handler_obj = handler_obj;
+
+ status =
+ acpi_os_execute(OSL_NOTIFY_HANDLER, acpi_ev_notify_dispatch,
+ notify_info);
+ if (ACPI_FAILURE(status)) {
+ acpi_ut_delete_generic_state(notify_info);
+ }
+ } else {
+ /*
+ * There is no notify handler (per-device or system) for this device.
+ */
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "No notify handler for Notify (%4.4s, %X) node %p\n",
+ acpi_ut_get_node_name(node), notify_value,
+ node));
+ }
+
+ return (status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_notify_dispatch
+ *
+ * PARAMETERS: Context - To be passed to the notify handler
+ *
+ * RETURN: None.
+ *
+ * DESCRIPTION: Dispatch a device notification event to a previously
+ * installed handler.
+ *
+ ******************************************************************************/
+
+static void ACPI_SYSTEM_XFACE acpi_ev_notify_dispatch(void *context)
+{
+ union acpi_generic_state *notify_info =
+ (union acpi_generic_state *)context;
+ acpi_notify_handler global_handler = NULL;
+ void *global_context = NULL;
+ union acpi_operand_object *handler_obj;
+
+ ACPI_FUNCTION_ENTRY();
+
+ /*
+ * We will invoke a global notify handler if installed.
+ * This is done _before_ we invoke the per-device handler attached
+ * to the device.
+ */
+ if (notify_info->notify.value <= ACPI_MAX_SYS_NOTIFY) {
+
+ /* Global system notification handler */
+
+ if (acpi_gbl_system_notify.handler) {
+ global_handler = acpi_gbl_system_notify.handler;
+ global_context = acpi_gbl_system_notify.context;
+ }
+ } else {
+ /* Global driver notification handler */
+
+ if (acpi_gbl_device_notify.handler) {
+ global_handler = acpi_gbl_device_notify.handler;
+ global_context = acpi_gbl_device_notify.context;
+ }
+ }
+
+ /* Invoke the system handler first, if present */
+
+ if (global_handler) {
+ global_handler(notify_info->notify.node,
+ notify_info->notify.value, global_context);
+ }
+
+ /* Now invoke the per-device handler, if present */
+
+ handler_obj = notify_info->notify.handler_obj;
+ if (handler_obj) {
+ handler_obj->notify.handler(notify_info->notify.node,
+ notify_info->notify.value,
+ handler_obj->notify.context);
+ }
+
+ /* All done with the info object */
+
+ acpi_ut_delete_generic_state(notify_info);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_global_lock_handler
+ *
+ * PARAMETERS: Context - From thread interface, not used
+ *
+ * RETURN: ACPI_INTERRUPT_HANDLED
+ *
+ * DESCRIPTION: Invoked directly from the SCI handler when a global lock
+ * release interrupt occurs. Attempt to acquire the global lock,
+ * if successful, signal the thread waiting for the lock.
+ *
+ * NOTE: Assumes that the semaphore can be signaled from interrupt level. If
+ * this is not possible for some reason, a separate thread will have to be
+ * scheduled to do this.
+ *
+ ******************************************************************************/
+
+static u32 acpi_ev_global_lock_handler(void *context)
+{
+ u8 acquired = FALSE;
+
+ /*
+ * Attempt to get the lock.
+ *
+ * If we don't get it now, it will be marked pending and we will
+ * take another interrupt when it becomes free.
+ */
+ ACPI_ACQUIRE_GLOBAL_LOCK(facs, acquired);
+ if (acquired) {
+
+ /* Got the lock, now wake all threads waiting for it */
+
+ acpi_gbl_global_lock_acquired = TRUE;
+ /* Send a unit to the semaphore */
+
+ if (ACPI_FAILURE
+ (acpi_os_signal_semaphore
+ (acpi_gbl_global_lock_semaphore, 1))) {
+ ACPI_ERROR((AE_INFO,
+ "Could not signal Global Lock semaphore"));
+ }
+ }
+
+ return (ACPI_INTERRUPT_HANDLED);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_init_global_lock_handler
+ *
+ * PARAMETERS: None
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Install a handler for the global lock release event
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ev_init_global_lock_handler(void)
+{
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(ev_init_global_lock_handler);
+
+ status = acpi_get_table_by_index(ACPI_TABLE_INDEX_FACS,
+ ACPI_CAST_INDIRECT_PTR(struct
+ acpi_table_header,
+ &facs));
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ acpi_gbl_global_lock_present = TRUE;
+ status = acpi_install_fixed_event_handler(ACPI_EVENT_GLOBAL,
+ acpi_ev_global_lock_handler,
+ NULL);
+
+ /*
+ * If the global lock does not exist on this platform, the attempt
+ * to enable GBL_STATUS will fail (the GBL_ENABLE bit will not stick)
+ * Map to AE_OK, but mark global lock as not present.
+ * Any attempt to actually use the global lock will be flagged
+ * with an error.
+ */
+ if (status == AE_NO_HARDWARE_RESPONSE) {
+ ACPI_ERROR((AE_INFO,
+ "No response from Global Lock hardware, disabling lock"));
+
+ acpi_gbl_global_lock_present = FALSE;
+ status = AE_OK;
+ }
+
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_remove_global_lock_handler
+ *
+ * PARAMETERS: None
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Remove the handler for the Global Lock
+ *
+ ******************************************************************************/
+
+static acpi_status acpi_ev_remove_global_lock_handler(void)
+{
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(ev_remove_global_lock_handler);
+
+ acpi_gbl_global_lock_present = FALSE;
+ status = acpi_remove_fixed_event_handler(ACPI_EVENT_GLOBAL,
+ acpi_ev_global_lock_handler);
+
+ return_ACPI_STATUS(status);
+}
+
+/******************************************************************************
+ *
+ * FUNCTION: acpi_ev_acquire_global_lock
+ *
+ * PARAMETERS: Timeout - Max time to wait for the lock, in millisec.
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Attempt to gain ownership of the Global Lock.
+ *
+ * MUTEX: Interpreter must be locked
+ *
+ * Note: The original implementation allowed multiple threads to "acquire" the
+ * Global Lock, and the OS would hold the lock until the last thread had
+ * released it. However, this could potentially starve the BIOS out of the
+ * lock, especially in the case where there is a tight handshake between the
+ * Embedded Controller driver and the BIOS. Therefore, this implementation
+ * allows only one thread to acquire the HW Global Lock at a time, and makes
+ * the global lock appear as a standard mutex on the OS side.
+ *
+ *****************************************************************************/
+static acpi_thread_id acpi_ev_global_lock_thread_id;
+static int acpi_ev_global_lock_acquired;
+
+acpi_status acpi_ev_acquire_global_lock(u16 timeout)
+{
+ acpi_status status = AE_OK;
+ u8 acquired = FALSE;
+
+ ACPI_FUNCTION_TRACE(ev_acquire_global_lock);
+
+ /*
+ * Only one thread can acquire the GL at a time, the global_lock_mutex
+ * enforces this. This interface releases the interpreter if we must wait.
+ */
+ status = acpi_ex_system_wait_mutex(
+ acpi_gbl_global_lock_mutex->mutex.os_mutex, 0);
+ if (status == AE_TIME) {
+ if (acpi_ev_global_lock_thread_id == acpi_os_get_thread_id()) {
+ acpi_ev_global_lock_acquired++;
+ return AE_OK;
+ }
+ }
+
+ if (ACPI_FAILURE(status)) {
+ status = acpi_ex_system_wait_mutex(
+ acpi_gbl_global_lock_mutex->mutex.os_mutex,
+ timeout);
+ }
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ acpi_ev_global_lock_thread_id = acpi_os_get_thread_id();
+ acpi_ev_global_lock_acquired++;
+
+ /*
+ * Update the global lock handle and check for wraparound. The handle is
+ * only used for the external global lock interfaces, but it is updated
+ * here to properly handle the case where a single thread may acquire the
+ * lock via both the AML and the acpi_acquire_global_lock interfaces. The
+ * handle is therefore updated on the first acquire from a given thread
+ * regardless of where the acquisition request originated.
+ */
+ acpi_gbl_global_lock_handle++;
+ if (acpi_gbl_global_lock_handle == 0) {
+ acpi_gbl_global_lock_handle = 1;
+ }
+
+ /*
+ * Make sure that a global lock actually exists. If not, just treat
+ * the lock as a standard mutex.
+ */
+ if (!acpi_gbl_global_lock_present) {
+ acpi_gbl_global_lock_acquired = TRUE;
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ /* Attempt to acquire the actual hardware lock */
+
+ ACPI_ACQUIRE_GLOBAL_LOCK(facs, acquired);
+ if (acquired) {
+
+ /* We got the lock */
+
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+ "Acquired hardware Global Lock\n"));
+
+ acpi_gbl_global_lock_acquired = TRUE;
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ /*
+ * Did not get the lock. The pending bit was set above, and we must now
+ * wait until we get the global lock released interrupt.
+ */
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Waiting for hardware Global Lock\n"));
+
+ /*
+ * Wait for handshake with the global lock interrupt handler.
+ * This interface releases the interpreter if we must wait.
+ */
+ status = acpi_ex_system_wait_semaphore(acpi_gbl_global_lock_semaphore,
+ ACPI_WAIT_FOREVER);
+
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_release_global_lock
+ *
+ * PARAMETERS: None
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Releases ownership of the Global Lock.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ev_release_global_lock(void)
+{
+ u8 pending = FALSE;
+ acpi_status status = AE_OK;
+
+ ACPI_FUNCTION_TRACE(ev_release_global_lock);
+
+ /* Lock must be already acquired */
+
+ if (!acpi_gbl_global_lock_acquired) {
+ ACPI_WARNING((AE_INFO,
+ "Cannot release the ACPI Global Lock, it has not been acquired"));
+ return_ACPI_STATUS(AE_NOT_ACQUIRED);
+ }
+
+ acpi_ev_global_lock_acquired--;
+ if (acpi_ev_global_lock_acquired > 0) {
+ return AE_OK;
+ }
+
+ if (acpi_gbl_global_lock_present) {
+
+ /* Allow any thread to release the lock */
+
+ ACPI_RELEASE_GLOBAL_LOCK(facs, pending);
+
+ /*
+ * If the pending bit was set, we must write GBL_RLS to the control
+ * register
+ */
+ if (pending) {
+ status =
+ acpi_set_register(ACPI_BITREG_GLOBAL_LOCK_RELEASE,
+ 1);
+ }
+
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+ "Released hardware Global Lock\n"));
+ }
+
+ acpi_gbl_global_lock_acquired = FALSE;
+
+ /* Release the local GL mutex */
+ acpi_ev_global_lock_thread_id = NULL;
+ acpi_ev_global_lock_acquired = 0;
+ acpi_os_release_mutex(acpi_gbl_global_lock_mutex->mutex.os_mutex);
+ return_ACPI_STATUS(status);
+}
+
+/******************************************************************************
+ *
+ * FUNCTION: acpi_ev_terminate
+ *
+ * PARAMETERS: none
+ *
+ * RETURN: none
+ *
+ * DESCRIPTION: Disable events and free memory allocated for table storage.
+ *
+ ******************************************************************************/
+
+void acpi_ev_terminate(void)
+{
+ u32 i;
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(ev_terminate);
+
+ if (acpi_gbl_events_initialized) {
+ /*
+ * Disable all event-related functionality.
+ * In all cases, on error, print a message but obviously we don't abort.
+ */
+
+ /* Disable all fixed events */
+
+ for (i = 0; i < ACPI_NUM_FIXED_EVENTS; i++) {
+ status = acpi_disable_event(i, 0);
+ if (ACPI_FAILURE(status)) {
+ ACPI_ERROR((AE_INFO,
+ "Could not disable fixed event %d",
+ (u32) i));
+ }
+ }
+
+ /* Disable all GPEs in all GPE blocks */
+
+ status = acpi_ev_walk_gpe_list(acpi_hw_disable_gpe_block);
+
+ /* Remove SCI handler */
+
+ status = acpi_ev_remove_sci_handler();
+ if (ACPI_FAILURE(status)) {
+ ACPI_ERROR((AE_INFO, "Could not remove SCI handler"));
+ }
+
+ status = acpi_ev_remove_global_lock_handler();
+ if (ACPI_FAILURE(status)) {
+ ACPI_ERROR((AE_INFO,
+ "Could not remove Global Lock handler"));
+ }
+ }
+
+ /* Deallocate all handler objects installed within GPE info structs */
+
+ status = acpi_ev_walk_gpe_list(acpi_ev_delete_gpe_handlers);
+
+ /* Return to original mode if necessary */
+
+ if (acpi_gbl_original_mode == ACPI_SYS_MODE_LEGACY) {
+ status = acpi_disable();
+ if (ACPI_FAILURE(status)) {
+ ACPI_WARNING((AE_INFO, "AcpiDisable failed"));
+ }
+ }
+ return_VOID;
+}
diff --git a/drivers/acpi/events/evregion.c b/drivers/acpi/events/evregion.c
new file mode 100644
index 0000000..236fbd1
--- /dev/null
+++ b/drivers/acpi/events/evregion.c
@@ -0,0 +1,1074 @@
+/******************************************************************************
+ *
+ * Module Name: evregion - ACPI address_space (op_region) handler dispatch
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include <acpi/acevents.h>
+#include <acpi/acnamesp.h>
+#include <acpi/acinterp.h>
+
+#define _COMPONENT ACPI_EVENTS
+ACPI_MODULE_NAME("evregion")
+#define ACPI_NUM_DEFAULT_SPACES 4
+static u8 acpi_gbl_default_address_spaces[ACPI_NUM_DEFAULT_SPACES] = {
+ ACPI_ADR_SPACE_SYSTEM_MEMORY,
+ ACPI_ADR_SPACE_SYSTEM_IO,
+ ACPI_ADR_SPACE_PCI_CONFIG,
+ ACPI_ADR_SPACE_DATA_TABLE
+};
+
+/* Local prototypes */
+
+static acpi_status
+acpi_ev_reg_run(acpi_handle obj_handle,
+ u32 level, void *context, void **return_value);
+
+static acpi_status
+acpi_ev_install_handler(acpi_handle obj_handle,
+ u32 level, void *context, void **return_value);
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_install_region_handlers
+ *
+ * PARAMETERS: None
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Installs the core subsystem default address space handlers.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ev_install_region_handlers(void)
+{
+ acpi_status status;
+ u32 i;
+
+ ACPI_FUNCTION_TRACE(ev_install_region_handlers);
+
+ status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /*
+ * All address spaces (PCI Config, EC, SMBus) are scope dependent
+ * and registration must occur for a specific device.
+ *
+ * In the case of the system memory and IO address spaces there is currently
+ * no device associated with the address space. For these we use the root.
+ *
+ * We install the default PCI config space handler at the root so
+ * that this space is immediately available even though the we have
+ * not enumerated all the PCI Root Buses yet. This is to conform
+ * to the ACPI specification which states that the PCI config
+ * space must be always available -- even though we are nowhere
+ * near ready to find the PCI root buses at this point.
+ *
+ * NOTE: We ignore AE_ALREADY_EXISTS because this means that a handler
+ * has already been installed (via acpi_install_address_space_handler).
+ * Similar for AE_SAME_HANDLER.
+ */
+ for (i = 0; i < ACPI_NUM_DEFAULT_SPACES; i++) {
+ status = acpi_ev_install_space_handler(acpi_gbl_root_node,
+ acpi_gbl_default_address_spaces
+ [i],
+ ACPI_DEFAULT_HANDLER,
+ NULL, NULL);
+ switch (status) {
+ case AE_OK:
+ case AE_SAME_HANDLER:
+ case AE_ALREADY_EXISTS:
+
+ /* These exceptions are all OK */
+
+ status = AE_OK;
+ break;
+
+ default:
+
+ goto unlock_and_exit;
+ }
+ }
+
+ unlock_and_exit:
+ (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_initialize_op_regions
+ *
+ * PARAMETERS: None
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Execute _REG methods for all Operation Regions that have
+ * an installed default region handler.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ev_initialize_op_regions(void)
+{
+ acpi_status status;
+ u32 i;
+
+ ACPI_FUNCTION_TRACE(ev_initialize_op_regions);
+
+ status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /*
+ * Run the _REG methods for op_regions in each default address space
+ */
+ for (i = 0; i < ACPI_NUM_DEFAULT_SPACES; i++) {
+
+ /* TBD: Make sure handler is the DEFAULT handler, otherwise
+ * _REG will have already been run.
+ */
+ status = acpi_ev_execute_reg_methods(acpi_gbl_root_node,
+ acpi_gbl_default_address_spaces
+ [i]);
+ }
+
+ (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_execute_reg_method
+ *
+ * PARAMETERS: region_obj - Region object
+ * Function - Passed to _REG: On (1) or Off (0)
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Execute _REG method for a region
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ev_execute_reg_method(union acpi_operand_object *region_obj, u32 function)
+{
+ struct acpi_evaluate_info *info;
+ union acpi_operand_object *args[3];
+ union acpi_operand_object *region_obj2;
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(ev_execute_reg_method);
+
+ region_obj2 = acpi_ns_get_secondary_object(region_obj);
+ if (!region_obj2) {
+ return_ACPI_STATUS(AE_NOT_EXIST);
+ }
+
+ if (region_obj2->extra.method_REG == NULL) {
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ /* Allocate and initialize the evaluation information block */
+
+ info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info));
+ if (!info) {
+ return_ACPI_STATUS(AE_NO_MEMORY);
+ }
+
+ info->prefix_node = region_obj2->extra.method_REG;
+ info->pathname = NULL;
+ info->parameters = args;
+ info->flags = ACPI_IGNORE_RETURN_VALUE;
+
+ /*
+ * The _REG method has two arguments:
+ *
+ * Arg0 - Integer:
+ * Operation region space ID Same value as region_obj->Region.space_id
+ *
+ * Arg1 - Integer:
+ * connection status 1 for connecting the handler, 0 for disconnecting
+ * the handler (Passed as a parameter)
+ */
+ args[0] = acpi_ut_create_internal_object(ACPI_TYPE_INTEGER);
+ if (!args[0]) {
+ status = AE_NO_MEMORY;
+ goto cleanup1;
+ }
+
+ args[1] = acpi_ut_create_internal_object(ACPI_TYPE_INTEGER);
+ if (!args[1]) {
+ status = AE_NO_MEMORY;
+ goto cleanup2;
+ }
+
+ /* Setup the parameter objects */
+
+ args[0]->integer.value = region_obj->region.space_id;
+ args[1]->integer.value = function;
+ args[2] = NULL;
+
+ /* Execute the method, no return value */
+
+ ACPI_DEBUG_EXEC(acpi_ut_display_init_pathname
+ (ACPI_TYPE_METHOD, info->prefix_node, NULL));
+
+ status = acpi_ns_evaluate(info);
+ acpi_ut_remove_reference(args[1]);
+
+ cleanup2:
+ acpi_ut_remove_reference(args[0]);
+
+ cleanup1:
+ ACPI_FREE(info);
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_address_space_dispatch
+ *
+ * PARAMETERS: region_obj - Internal region object
+ * Function - Read or Write operation
+ * Address - Where in the space to read or write
+ * bit_width - Field width in bits (8, 16, 32, or 64)
+ * Value - Pointer to in or out value, must be
+ * full 64-bit acpi_integer
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Dispatch an address space or operation region access to
+ * a previously installed handler.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
+ u32 function,
+ acpi_physical_address address,
+ u32 bit_width, acpi_integer * value)
+{
+ acpi_status status;
+ acpi_adr_space_handler handler;
+ acpi_adr_space_setup region_setup;
+ union acpi_operand_object *handler_desc;
+ union acpi_operand_object *region_obj2;
+ void *region_context = NULL;
+
+ ACPI_FUNCTION_TRACE(ev_address_space_dispatch);
+
+ region_obj2 = acpi_ns_get_secondary_object(region_obj);
+ if (!region_obj2) {
+ return_ACPI_STATUS(AE_NOT_EXIST);
+ }
+
+ /* Ensure that there is a handler associated with this region */
+
+ handler_desc = region_obj->region.handler;
+ if (!handler_desc) {
+ ACPI_ERROR((AE_INFO,
+ "No handler for Region [%4.4s] (%p) [%s]",
+ acpi_ut_get_node_name(region_obj->region.node),
+ region_obj,
+ acpi_ut_get_region_name(region_obj->region.
+ space_id)));
+
+ return_ACPI_STATUS(AE_NOT_EXIST);
+ }
+
+ /*
+ * It may be the case that the region has never been initialized
+ * Some types of regions require special init code
+ */
+ if (!(region_obj->region.flags & AOPOBJ_SETUP_COMPLETE)) {
+ /*
+ * This region has not been initialized yet, do it
+ */
+ region_setup = handler_desc->address_space.setup;
+ if (!region_setup) {
+
+ /* No initialization routine, exit with error */
+
+ ACPI_ERROR((AE_INFO,
+ "No init routine for region(%p) [%s]",
+ region_obj,
+ acpi_ut_get_region_name(region_obj->region.
+ space_id)));
+ return_ACPI_STATUS(AE_NOT_EXIST);
+ }
+
+ /*
+ * We must exit the interpreter because the region
+ * setup will potentially execute control methods
+ * (e.g., _REG method for this region)
+ */
+ acpi_ex_exit_interpreter();
+
+ status = region_setup(region_obj, ACPI_REGION_ACTIVATE,
+ handler_desc->address_space.context,
+ &region_context);
+
+ /* Re-enter the interpreter */
+
+ acpi_ex_enter_interpreter();
+
+ /* Check for failure of the Region Setup */
+
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status,
+ "During region initialization: [%s]",
+ acpi_ut_get_region_name(region_obj->
+ region.
+ space_id)));
+ return_ACPI_STATUS(status);
+ }
+
+ /*
+ * Region initialization may have been completed by region_setup
+ */
+ if (!(region_obj->region.flags & AOPOBJ_SETUP_COMPLETE)) {
+ region_obj->region.flags |= AOPOBJ_SETUP_COMPLETE;
+
+ if (region_obj2->extra.region_context) {
+
+ /* The handler for this region was already installed */
+
+ ACPI_FREE(region_context);
+ } else {
+ /*
+ * Save the returned context for use in all accesses to
+ * this particular region
+ */
+ region_obj2->extra.region_context =
+ region_context;
+ }
+ }
+ }
+
+ /* We have everything we need, we can invoke the address space handler */
+
+ handler = handler_desc->address_space.handler;
+
+ ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
+ "Handler %p (@%p) Address %8.8X%8.8X [%s]\n",
+ &region_obj->region.handler->address_space, handler,
+ ACPI_FORMAT_NATIVE_UINT(address),
+ acpi_ut_get_region_name(region_obj->region.
+ space_id)));
+
+ if (!(handler_desc->address_space.handler_flags &
+ ACPI_ADDR_HANDLER_DEFAULT_INSTALLED)) {
+ /*
+ * For handlers other than the default (supplied) handlers, we must
+ * exit the interpreter because the handler *might* block -- we don't
+ * know what it will do, so we can't hold the lock on the intepreter.
+ */
+ acpi_ex_exit_interpreter();
+ }
+
+ /* Call the handler */
+
+ status = handler(function, address, bit_width, value,
+ handler_desc->address_space.context,
+ region_obj2->extra.region_context);
+
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status, "Returned by Handler for [%s]",
+ acpi_ut_get_region_name(region_obj->region.
+ space_id)));
+ }
+
+ if (!(handler_desc->address_space.handler_flags &
+ ACPI_ADDR_HANDLER_DEFAULT_INSTALLED)) {
+ /*
+ * We just returned from a non-default handler, we must re-enter the
+ * interpreter
+ */
+ acpi_ex_enter_interpreter();
+ }
+
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_detach_region
+ *
+ * PARAMETERS: region_obj - Region Object
+ * acpi_ns_is_locked - Namespace Region Already Locked?
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Break the association between the handler and the region
+ * this is a two way association.
+ *
+ ******************************************************************************/
+
+void
+acpi_ev_detach_region(union acpi_operand_object *region_obj,
+ u8 acpi_ns_is_locked)
+{
+ union acpi_operand_object *handler_obj;
+ union acpi_operand_object *obj_desc;
+ union acpi_operand_object **last_obj_ptr;
+ acpi_adr_space_setup region_setup;
+ void **region_context;
+ union acpi_operand_object *region_obj2;
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(ev_detach_region);
+
+ region_obj2 = acpi_ns_get_secondary_object(region_obj);
+ if (!region_obj2) {
+ return_VOID;
+ }
+ region_context = &region_obj2->extra.region_context;
+
+ /* Get the address handler from the region object */
+
+ handler_obj = region_obj->region.handler;
+ if (!handler_obj) {
+
+ /* This region has no handler, all done */
+
+ return_VOID;
+ }
+
+ /* Find this region in the handler's list */
+
+ obj_desc = handler_obj->address_space.region_list;
+ last_obj_ptr = &handler_obj->address_space.region_list;
+
+ while (obj_desc) {
+
+ /* Is this the correct Region? */
+
+ if (obj_desc == region_obj) {
+ ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
+ "Removing Region %p from address handler %p\n",
+ region_obj, handler_obj));
+
+ /* This is it, remove it from the handler's list */
+
+ *last_obj_ptr = obj_desc->region.next;
+ obj_desc->region.next = NULL; /* Must clear field */
+
+ if (acpi_ns_is_locked) {
+ status =
+ acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+ if (ACPI_FAILURE(status)) {
+ return_VOID;
+ }
+ }
+
+ /* Now stop region accesses by executing the _REG method */
+
+ status = acpi_ev_execute_reg_method(region_obj, 0);
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status,
+ "from region _REG, [%s]",
+ acpi_ut_get_region_name
+ (region_obj->region.space_id)));
+ }
+
+ if (acpi_ns_is_locked) {
+ status =
+ acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+ if (ACPI_FAILURE(status)) {
+ return_VOID;
+ }
+ }
+
+ /*
+ * If the region has been activated, call the setup handler
+ * with the deactivate notification
+ */
+ if (region_obj->region.flags & AOPOBJ_SETUP_COMPLETE) {
+ region_setup = handler_obj->address_space.setup;
+ status =
+ region_setup(region_obj,
+ ACPI_REGION_DEACTIVATE,
+ handler_obj->address_space.
+ context, region_context);
+
+ /* Init routine may fail, Just ignore errors */
+
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status,
+ "from region handler - deactivate, [%s]",
+ acpi_ut_get_region_name
+ (region_obj->region.
+ space_id)));
+ }
+
+ region_obj->region.flags &=
+ ~(AOPOBJ_SETUP_COMPLETE);
+ }
+
+ /*
+ * Remove handler reference in the region
+ *
+ * NOTE: this doesn't mean that the region goes away, the region
+ * is just inaccessible as indicated to the _REG method
+ *
+ * If the region is on the handler's list, this must be the
+ * region's handler
+ */
+ region_obj->region.handler = NULL;
+ acpi_ut_remove_reference(handler_obj);
+
+ return_VOID;
+ }
+
+ /* Walk the linked list of handlers */
+
+ last_obj_ptr = &obj_desc->region.next;
+ obj_desc = obj_desc->region.next;
+ }
+
+ /* If we get here, the region was not in the handler's region list */
+
+ ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
+ "Cannot remove region %p from address handler %p\n",
+ region_obj, handler_obj));
+
+ return_VOID;
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_attach_region
+ *
+ * PARAMETERS: handler_obj - Handler Object
+ * region_obj - Region Object
+ * acpi_ns_is_locked - Namespace Region Already Locked?
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Create the association between the handler and the region
+ * this is a two way association.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ev_attach_region(union acpi_operand_object *handler_obj,
+ union acpi_operand_object *region_obj,
+ u8 acpi_ns_is_locked)
+{
+
+ ACPI_FUNCTION_TRACE(ev_attach_region);
+
+ ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
+ "Adding Region [%4.4s] %p to address handler %p [%s]\n",
+ acpi_ut_get_node_name(region_obj->region.node),
+ region_obj, handler_obj,
+ acpi_ut_get_region_name(region_obj->region.
+ space_id)));
+
+ /* Link this region to the front of the handler's list */
+
+ region_obj->region.next = handler_obj->address_space.region_list;
+ handler_obj->address_space.region_list = region_obj;
+
+ /* Install the region's handler */
+
+ if (region_obj->region.handler) {
+ return_ACPI_STATUS(AE_ALREADY_EXISTS);
+ }
+
+ region_obj->region.handler = handler_obj;
+ acpi_ut_add_reference(handler_obj);
+
+ return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_install_handler
+ *
+ * PARAMETERS: walk_namespace callback
+ *
+ * DESCRIPTION: This routine installs an address handler into objects that are
+ * of type Region or Device.
+ *
+ * If the Object is a Device, and the device has a handler of
+ * the same type then the search is terminated in that branch.
+ *
+ * This is because the existing handler is closer in proximity
+ * to any more regions than the one we are trying to install.
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ev_install_handler(acpi_handle obj_handle,
+ u32 level, void *context, void **return_value)
+{
+ union acpi_operand_object *handler_obj;
+ union acpi_operand_object *next_handler_obj;
+ union acpi_operand_object *obj_desc;
+ struct acpi_namespace_node *node;
+ acpi_status status;
+
+ ACPI_FUNCTION_NAME(ev_install_handler);
+
+ handler_obj = (union acpi_operand_object *)context;
+
+ /* Parameter validation */
+
+ if (!handler_obj) {
+ return (AE_OK);
+ }
+
+ /* Convert and validate the device handle */
+
+ node = acpi_ns_map_handle_to_node(obj_handle);
+ if (!node) {
+ return (AE_BAD_PARAMETER);
+ }
+
+ /*
+ * We only care about regions.and objects
+ * that are allowed to have address space handlers
+ */
+ if ((node->type != ACPI_TYPE_DEVICE) &&
+ (node->type != ACPI_TYPE_REGION) && (node != acpi_gbl_root_node)) {
+ return (AE_OK);
+ }
+
+ /* Check for an existing internal object */
+
+ obj_desc = acpi_ns_get_attached_object(node);
+ if (!obj_desc) {
+
+ /* No object, just exit */
+
+ return (AE_OK);
+ }
+
+ /* Devices are handled different than regions */
+
+ if (ACPI_GET_OBJECT_TYPE(obj_desc) == ACPI_TYPE_DEVICE) {
+
+ /* Check if this Device already has a handler for this address space */
+
+ next_handler_obj = obj_desc->device.handler;
+ while (next_handler_obj) {
+
+ /* Found a handler, is it for the same address space? */
+
+ if (next_handler_obj->address_space.space_id ==
+ handler_obj->address_space.space_id) {
+ ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
+ "Found handler for region [%s] in device %p(%p) handler %p\n",
+ acpi_ut_get_region_name
+ (handler_obj->address_space.
+ space_id), obj_desc,
+ next_handler_obj,
+ handler_obj));
+
+ /*
+ * Since the object we found it on was a device, then it
+ * means that someone has already installed a handler for
+ * the branch of the namespace from this device on. Just
+ * bail out telling the walk routine to not traverse this
+ * branch. This preserves the scoping rule for handlers.
+ */
+ return (AE_CTRL_DEPTH);
+ }
+
+ /* Walk the linked list of handlers attached to this device */
+
+ next_handler_obj = next_handler_obj->address_space.next;
+ }
+
+ /*
+ * As long as the device didn't have a handler for this
+ * space we don't care about it. We just ignore it and
+ * proceed.
+ */
+ return (AE_OK);
+ }
+
+ /* Object is a Region */
+
+ if (obj_desc->region.space_id != handler_obj->address_space.space_id) {
+ /*
+ * This region is for a different address space
+ * -- just ignore it
+ */
+ return (AE_OK);
+ }
+
+ /*
+ * Now we have a region and it is for the handler's address
+ * space type.
+ *
+ * First disconnect region for any previous handler (if any)
+ */
+ acpi_ev_detach_region(obj_desc, FALSE);
+
+ /* Connect the region to the new handler */
+
+ status = acpi_ev_attach_region(handler_obj, obj_desc, FALSE);
+ return (status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_install_space_handler
+ *
+ * PARAMETERS: Node - Namespace node for the device
+ * space_id - The address space ID
+ * Handler - Address of the handler
+ * Setup - Address of the setup function
+ * Context - Value passed to the handler on each access
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Install a handler for all op_regions of a given space_id.
+ * Assumes namespace is locked
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ev_install_space_handler(struct acpi_namespace_node * node,
+ acpi_adr_space_type space_id,
+ acpi_adr_space_handler handler,
+ acpi_adr_space_setup setup, void *context)
+{
+ union acpi_operand_object *obj_desc;
+ union acpi_operand_object *handler_obj;
+ acpi_status status;
+ acpi_object_type type;
+ u8 flags = 0;
+
+ ACPI_FUNCTION_TRACE(ev_install_space_handler);
+
+ /*
+ * This registration is valid for only the types below
+ * and the root. This is where the default handlers
+ * get placed.
+ */
+ if ((node->type != ACPI_TYPE_DEVICE) &&
+ (node->type != ACPI_TYPE_PROCESSOR) &&
+ (node->type != ACPI_TYPE_THERMAL) && (node != acpi_gbl_root_node)) {
+ status = AE_BAD_PARAMETER;
+ goto unlock_and_exit;
+ }
+
+ if (handler == ACPI_DEFAULT_HANDLER) {
+ flags = ACPI_ADDR_HANDLER_DEFAULT_INSTALLED;
+
+ switch (space_id) {
+ case ACPI_ADR_SPACE_SYSTEM_MEMORY:
+ handler = acpi_ex_system_memory_space_handler;
+ setup = acpi_ev_system_memory_region_setup;
+ break;
+
+ case ACPI_ADR_SPACE_SYSTEM_IO:
+ handler = acpi_ex_system_io_space_handler;
+ setup = acpi_ev_io_space_region_setup;
+ break;
+
+ case ACPI_ADR_SPACE_PCI_CONFIG:
+ handler = acpi_ex_pci_config_space_handler;
+ setup = acpi_ev_pci_config_region_setup;
+ break;
+
+ case ACPI_ADR_SPACE_CMOS:
+ handler = acpi_ex_cmos_space_handler;
+ setup = acpi_ev_cmos_region_setup;
+ break;
+
+ case ACPI_ADR_SPACE_PCI_BAR_TARGET:
+ handler = acpi_ex_pci_bar_space_handler;
+ setup = acpi_ev_pci_bar_region_setup;
+ break;
+
+ case ACPI_ADR_SPACE_DATA_TABLE:
+ handler = acpi_ex_data_table_space_handler;
+ setup = NULL;
+ break;
+
+ default:
+ status = AE_BAD_PARAMETER;
+ goto unlock_and_exit;
+ }
+ }
+
+ /* If the caller hasn't specified a setup routine, use the default */
+
+ if (!setup) {
+ setup = acpi_ev_default_region_setup;
+ }
+
+ /* Check for an existing internal object */
+
+ obj_desc = acpi_ns_get_attached_object(node);
+ if (obj_desc) {
+ /*
+ * The attached device object already exists.
+ * Make sure the handler is not already installed.
+ */
+ handler_obj = obj_desc->device.handler;
+
+ /* Walk the handler list for this device */
+
+ while (handler_obj) {
+
+ /* Same space_id indicates a handler already installed */
+
+ if (handler_obj->address_space.space_id == space_id) {
+ if (handler_obj->address_space.handler ==
+ handler) {
+ /*
+ * It is (relatively) OK to attempt to install the SAME
+ * handler twice. This can easily happen
+ * with PCI_Config space.
+ */
+ status = AE_SAME_HANDLER;
+ goto unlock_and_exit;
+ } else {
+ /* A handler is already installed */
+
+ status = AE_ALREADY_EXISTS;
+ }
+ goto unlock_and_exit;
+ }
+
+ /* Walk the linked list of handlers */
+
+ handler_obj = handler_obj->address_space.next;
+ }
+ } else {
+ ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
+ "Creating object on Device %p while installing handler\n",
+ node));
+
+ /* obj_desc does not exist, create one */
+
+ if (node->type == ACPI_TYPE_ANY) {
+ type = ACPI_TYPE_DEVICE;
+ } else {
+ type = node->type;
+ }
+
+ obj_desc = acpi_ut_create_internal_object(type);
+ if (!obj_desc) {
+ status = AE_NO_MEMORY;
+ goto unlock_and_exit;
+ }
+
+ /* Init new descriptor */
+
+ obj_desc->common.type = (u8) type;
+
+ /* Attach the new object to the Node */
+
+ status = acpi_ns_attach_object(node, obj_desc, type);
+
+ /* Remove local reference to the object */
+
+ acpi_ut_remove_reference(obj_desc);
+
+ if (ACPI_FAILURE(status)) {
+ goto unlock_and_exit;
+ }
+ }
+
+ ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
+ "Installing address handler for region %s(%X) on Device %4.4s %p(%p)\n",
+ acpi_ut_get_region_name(space_id), space_id,
+ acpi_ut_get_node_name(node), node, obj_desc));
+
+ /*
+ * Install the handler
+ *
+ * At this point there is no existing handler.
+ * Just allocate the object for the handler and link it
+ * into the list.
+ */
+ handler_obj =
+ acpi_ut_create_internal_object(ACPI_TYPE_LOCAL_ADDRESS_HANDLER);
+ if (!handler_obj) {
+ status = AE_NO_MEMORY;
+ goto unlock_and_exit;
+ }
+
+ /* Init handler obj */
+
+ handler_obj->address_space.space_id = (u8) space_id;
+ handler_obj->address_space.handler_flags = flags;
+ handler_obj->address_space.region_list = NULL;
+ handler_obj->address_space.node = node;
+ handler_obj->address_space.handler = handler;
+ handler_obj->address_space.context = context;
+ handler_obj->address_space.setup = setup;
+
+ /* Install at head of Device.address_space list */
+
+ handler_obj->address_space.next = obj_desc->device.handler;
+
+ /*
+ * The Device object is the first reference on the handler_obj.
+ * Each region that uses the handler adds a reference.
+ */
+ obj_desc->device.handler = handler_obj;
+
+ /*
+ * Walk the namespace finding all of the regions this
+ * handler will manage.
+ *
+ * Start at the device and search the branch toward
+ * the leaf nodes until either the leaf is encountered or
+ * a device is detected that has an address handler of the
+ * same type.
+ *
+ * In either case, back up and search down the remainder
+ * of the branch
+ */
+ status = acpi_ns_walk_namespace(ACPI_TYPE_ANY, node, ACPI_UINT32_MAX,
+ ACPI_NS_WALK_UNLOCK,
+ acpi_ev_install_handler, handler_obj,
+ NULL);
+
+ unlock_and_exit:
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_execute_reg_methods
+ *
+ * PARAMETERS: Node - Namespace node for the device
+ * space_id - The address space ID
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Run all _REG methods for the input Space ID;
+ * Note: assumes namespace is locked, or system init time.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ev_execute_reg_methods(struct acpi_namespace_node *node,
+ acpi_adr_space_type space_id)
+{
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(ev_execute_reg_methods);
+
+ /*
+ * Run all _REG methods for all Operation Regions for this
+ * space ID. This is a separate walk in order to handle any
+ * interdependencies between regions and _REG methods. (i.e. handlers
+ * must be installed for all regions of this Space ID before we
+ * can run any _REG methods)
+ */
+ status = acpi_ns_walk_namespace(ACPI_TYPE_ANY, node, ACPI_UINT32_MAX,
+ ACPI_NS_WALK_UNLOCK, acpi_ev_reg_run,
+ &space_id, NULL);
+
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_reg_run
+ *
+ * PARAMETERS: walk_namespace callback
+ *
+ * DESCRIPTION: Run _REG method for region objects of the requested space_iD
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ev_reg_run(acpi_handle obj_handle,
+ u32 level, void *context, void **return_value)
+{
+ union acpi_operand_object *obj_desc;
+ struct acpi_namespace_node *node;
+ acpi_adr_space_type space_id;
+ acpi_status status;
+
+ space_id = *ACPI_CAST_PTR(acpi_adr_space_type, context);
+
+ /* Convert and validate the device handle */
+
+ node = acpi_ns_map_handle_to_node(obj_handle);
+ if (!node) {
+ return (AE_BAD_PARAMETER);
+ }
+
+ /*
+ * We only care about regions.and objects
+ * that are allowed to have address space handlers
+ */
+ if ((node->type != ACPI_TYPE_REGION) && (node != acpi_gbl_root_node)) {
+ return (AE_OK);
+ }
+
+ /* Check for an existing internal object */
+
+ obj_desc = acpi_ns_get_attached_object(node);
+ if (!obj_desc) {
+
+ /* No object, just exit */
+
+ return (AE_OK);
+ }
+
+ /* Object is a Region */
+
+ if (obj_desc->region.space_id != space_id) {
+ /*
+ * This region is for a different address space
+ * -- just ignore it
+ */
+ return (AE_OK);
+ }
+
+ status = acpi_ev_execute_reg_method(obj_desc, 1);
+ return (status);
+}
diff --git a/drivers/acpi/events/evrgnini.c b/drivers/acpi/events/evrgnini.c
new file mode 100644
index 0000000..6b94b38
--- /dev/null
+++ b/drivers/acpi/events/evrgnini.c
@@ -0,0 +1,688 @@
+/******************************************************************************
+ *
+ * Module Name: evrgnini- ACPI address_space (op_region) init
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include <acpi/acevents.h>
+#include <acpi/acnamesp.h>
+
+#define _COMPONENT ACPI_EVENTS
+ACPI_MODULE_NAME("evrgnini")
+
+/* Local prototypes */
+static u8 acpi_ev_match_pci_root_bridge(char *id);
+
+static u8 acpi_ev_is_pci_root_bridge(struct acpi_namespace_node *node);
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_system_memory_region_setup
+ *
+ * PARAMETERS: Handle - Region we are interested in
+ * Function - Start or stop
+ * handler_context - Address space handler context
+ * region_context - Region specific context
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Setup a system_memory operation region
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ev_system_memory_region_setup(acpi_handle handle,
+ u32 function,
+ void *handler_context, void **region_context)
+{
+ union acpi_operand_object *region_desc =
+ (union acpi_operand_object *)handle;
+ struct acpi_mem_space_context *local_region_context;
+
+ ACPI_FUNCTION_TRACE(ev_system_memory_region_setup);
+
+ if (function == ACPI_REGION_DEACTIVATE) {
+ if (*region_context) {
+ local_region_context =
+ (struct acpi_mem_space_context *)*region_context;
+
+ /* Delete a cached mapping if present */
+
+ if (local_region_context->mapped_length) {
+ acpi_os_unmap_memory(local_region_context->
+ mapped_logical_address,
+ local_region_context->
+ mapped_length);
+ }
+ ACPI_FREE(local_region_context);
+ *region_context = NULL;
+ }
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ /* Create a new context */
+
+ local_region_context =
+ ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_mem_space_context));
+ if (!(local_region_context)) {
+ return_ACPI_STATUS(AE_NO_MEMORY);
+ }
+
+ /* Save the region length and address for use in the handler */
+
+ local_region_context->length = region_desc->region.length;
+ local_region_context->address = region_desc->region.address;
+
+ *region_context = local_region_context;
+ return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_io_space_region_setup
+ *
+ * PARAMETERS: Handle - Region we are interested in
+ * Function - Start or stop
+ * handler_context - Address space handler context
+ * region_context - Region specific context
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Setup a IO operation region
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ev_io_space_region_setup(acpi_handle handle,
+ u32 function,
+ void *handler_context, void **region_context)
+{
+ ACPI_FUNCTION_TRACE(ev_io_space_region_setup);
+
+ if (function == ACPI_REGION_DEACTIVATE) {
+ *region_context = NULL;
+ } else {
+ *region_context = handler_context;
+ }
+
+ return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_pci_config_region_setup
+ *
+ * PARAMETERS: Handle - Region we are interested in
+ * Function - Start or stop
+ * handler_context - Address space handler context
+ * region_context - Region specific context
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Setup a PCI_Config operation region
+ *
+ * MUTEX: Assumes namespace is not locked
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ev_pci_config_region_setup(acpi_handle handle,
+ u32 function,
+ void *handler_context, void **region_context)
+{
+ acpi_status status = AE_OK;
+ acpi_integer pci_value;
+ struct acpi_pci_id *pci_id = *region_context;
+ union acpi_operand_object *handler_obj;
+ struct acpi_namespace_node *parent_node;
+ struct acpi_namespace_node *pci_root_node;
+ struct acpi_namespace_node *pci_device_node;
+ union acpi_operand_object *region_obj =
+ (union acpi_operand_object *)handle;
+
+ ACPI_FUNCTION_TRACE(ev_pci_config_region_setup);
+
+ handler_obj = region_obj->region.handler;
+ if (!handler_obj) {
+ /*
+ * No installed handler. This shouldn't happen because the dispatch
+ * routine checks before we get here, but we check again just in case.
+ */
+ ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
+ "Attempting to init a region %p, with no handler\n",
+ region_obj));
+ return_ACPI_STATUS(AE_NOT_EXIST);
+ }
+
+ *region_context = NULL;
+ if (function == ACPI_REGION_DEACTIVATE) {
+ if (pci_id) {
+ ACPI_FREE(pci_id);
+ }
+ return_ACPI_STATUS(status);
+ }
+
+ parent_node = acpi_ns_get_parent_node(region_obj->region.node);
+
+ /*
+ * Get the _SEG and _BBN values from the device upon which the handler
+ * is installed.
+ *
+ * We need to get the _SEG and _BBN objects relative to the PCI BUS device.
+ * This is the device the handler has been registered to handle.
+ */
+
+ /*
+ * If the address_space.Node is still pointing to the root, we need
+ * to scan upward for a PCI Root bridge and re-associate the op_region
+ * handlers with that device.
+ */
+ if (handler_obj->address_space.node == acpi_gbl_root_node) {
+
+ /* Start search from the parent object */
+
+ pci_root_node = parent_node;
+ while (pci_root_node != acpi_gbl_root_node) {
+
+ /* Get the _HID/_CID in order to detect a root_bridge */
+
+ if (acpi_ev_is_pci_root_bridge(pci_root_node)) {
+
+ /* Install a handler for this PCI root bridge */
+
+ status =
+ acpi_install_address_space_handler((acpi_handle) pci_root_node, ACPI_ADR_SPACE_PCI_CONFIG, ACPI_DEFAULT_HANDLER, NULL, NULL);
+ if (ACPI_FAILURE(status)) {
+ if (status == AE_SAME_HANDLER) {
+ /*
+ * It is OK if the handler is already installed on the root
+ * bridge. Still need to return a context object for the
+ * new PCI_Config operation region, however.
+ */
+ status = AE_OK;
+ } else {
+ ACPI_EXCEPTION((AE_INFO, status,
+ "Could not install PciConfig handler for Root Bridge %4.4s",
+ acpi_ut_get_node_name
+ (pci_root_node)));
+ }
+ }
+ break;
+ }
+
+ pci_root_node = acpi_ns_get_parent_node(pci_root_node);
+ }
+
+ /* PCI root bridge not found, use namespace root node */
+ } else {
+ pci_root_node = handler_obj->address_space.node;
+ }
+
+ /*
+ * If this region is now initialized, we are done.
+ * (install_address_space_handler could have initialized it)
+ */
+ if (region_obj->region.flags & AOPOBJ_SETUP_COMPLETE) {
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ /* Region is still not initialized. Create a new context */
+
+ pci_id = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_pci_id));
+ if (!pci_id) {
+ return_ACPI_STATUS(AE_NO_MEMORY);
+ }
+
+ /*
+ * For PCI_Config space access, we need the segment, bus,
+ * device and function numbers. Acquire them here.
+ *
+ * Find the parent device object. (This allows the operation region to be
+ * within a subscope under the device, such as a control method.)
+ */
+ pci_device_node = region_obj->region.node;
+ while (pci_device_node && (pci_device_node->type != ACPI_TYPE_DEVICE)) {
+ pci_device_node = acpi_ns_get_parent_node(pci_device_node);
+ }
+
+ if (!pci_device_node) {
+ ACPI_FREE(pci_id);
+ return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
+ }
+
+ /*
+ * Get the PCI device and function numbers from the _ADR object
+ * contained in the parent's scope.
+ */
+ status =
+ acpi_ut_evaluate_numeric_object(METHOD_NAME__ADR, pci_device_node,
+ &pci_value);
+
+ /*
+ * The default is zero, and since the allocation above zeroed
+ * the data, just do nothing on failure.
+ */
+ if (ACPI_SUCCESS(status)) {
+ pci_id->device = ACPI_HIWORD(ACPI_LODWORD(pci_value));
+ pci_id->function = ACPI_LOWORD(ACPI_LODWORD(pci_value));
+ }
+
+ /* The PCI segment number comes from the _SEG method */
+
+ status =
+ acpi_ut_evaluate_numeric_object(METHOD_NAME__SEG, pci_root_node,
+ &pci_value);
+ if (ACPI_SUCCESS(status)) {
+ pci_id->segment = ACPI_LOWORD(pci_value);
+ }
+
+ /* The PCI bus number comes from the _BBN method */
+
+ status =
+ acpi_ut_evaluate_numeric_object(METHOD_NAME__BBN, pci_root_node,
+ &pci_value);
+ if (ACPI_SUCCESS(status)) {
+ pci_id->bus = ACPI_LOWORD(pci_value);
+ }
+
+ /* Complete this device's pci_id */
+
+ acpi_os_derive_pci_id(pci_root_node, region_obj->region.node, &pci_id);
+
+ *region_context = pci_id;
+ return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_match_pci_root_bridge
+ *
+ * PARAMETERS: Id - The HID/CID in string format
+ *
+ * RETURN: TRUE if the Id is a match for a PCI/PCI-Express Root Bridge
+ *
+ * DESCRIPTION: Determine if the input ID is a PCI Root Bridge ID.
+ *
+ ******************************************************************************/
+
+static u8 acpi_ev_match_pci_root_bridge(char *id)
+{
+
+ /*
+ * Check if this is a PCI root.
+ * ACPI 3.0+: check for a PCI Express root also.
+ */
+ if (!(ACPI_STRNCMP(id,
+ PCI_ROOT_HID_STRING,
+ sizeof(PCI_ROOT_HID_STRING))) ||
+ !(ACPI_STRNCMP(id,
+ PCI_EXPRESS_ROOT_HID_STRING,
+ sizeof(PCI_EXPRESS_ROOT_HID_STRING)))) {
+ return (TRUE);
+ }
+
+ return (FALSE);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_is_pci_root_bridge
+ *
+ * PARAMETERS: Node - Device node being examined
+ *
+ * RETURN: TRUE if device is a PCI/PCI-Express Root Bridge
+ *
+ * DESCRIPTION: Determine if the input device represents a PCI Root Bridge by
+ * examining the _HID and _CID for the device.
+ *
+ ******************************************************************************/
+
+static u8 acpi_ev_is_pci_root_bridge(struct acpi_namespace_node *node)
+{
+ acpi_status status;
+ struct acpica_device_id hid;
+ struct acpi_compatible_id_list *cid;
+ u32 i;
+
+ /*
+ * Get the _HID and check for a PCI Root Bridge
+ */
+ status = acpi_ut_execute_HID(node, &hid);
+ if (ACPI_FAILURE(status)) {
+ return (FALSE);
+ }
+
+ if (acpi_ev_match_pci_root_bridge(hid.value)) {
+ return (TRUE);
+ }
+
+ /*
+ * The _HID did not match.
+ * Get the _CID and check for a PCI Root Bridge
+ */
+ status = acpi_ut_execute_CID(node, &cid);
+ if (ACPI_FAILURE(status)) {
+ return (FALSE);
+ }
+
+ /* Check all _CIDs in the returned list */
+
+ for (i = 0; i < cid->count; i++) {
+ if (acpi_ev_match_pci_root_bridge(cid->id[i].value)) {
+ ACPI_FREE(cid);
+ return (TRUE);
+ }
+ }
+
+ ACPI_FREE(cid);
+ return (FALSE);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_pci_bar_region_setup
+ *
+ * PARAMETERS: Handle - Region we are interested in
+ * Function - Start or stop
+ * handler_context - Address space handler context
+ * region_context - Region specific context
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Setup a pci_bAR operation region
+ *
+ * MUTEX: Assumes namespace is not locked
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ev_pci_bar_region_setup(acpi_handle handle,
+ u32 function,
+ void *handler_context, void **region_context)
+{
+ ACPI_FUNCTION_TRACE(ev_pci_bar_region_setup);
+
+ return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_cmos_region_setup
+ *
+ * PARAMETERS: Handle - Region we are interested in
+ * Function - Start or stop
+ * handler_context - Address space handler context
+ * region_context - Region specific context
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Setup a CMOS operation region
+ *
+ * MUTEX: Assumes namespace is not locked
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ev_cmos_region_setup(acpi_handle handle,
+ u32 function,
+ void *handler_context, void **region_context)
+{
+ ACPI_FUNCTION_TRACE(ev_cmos_region_setup);
+
+ return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_default_region_setup
+ *
+ * PARAMETERS: Handle - Region we are interested in
+ * Function - Start or stop
+ * handler_context - Address space handler context
+ * region_context - Region specific context
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Default region initialization
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ev_default_region_setup(acpi_handle handle,
+ u32 function,
+ void *handler_context, void **region_context)
+{
+ ACPI_FUNCTION_TRACE(ev_default_region_setup);
+
+ if (function == ACPI_REGION_DEACTIVATE) {
+ *region_context = NULL;
+ } else {
+ *region_context = handler_context;
+ }
+
+ return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_initialize_region
+ *
+ * PARAMETERS: region_obj - Region we are initializing
+ * acpi_ns_locked - Is namespace locked?
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Initializes the region, finds any _REG methods and saves them
+ * for execution at a later time
+ *
+ * Get the appropriate address space handler for a newly
+ * created region.
+ *
+ * This also performs address space specific initialization. For
+ * example, PCI regions must have an _ADR object that contains
+ * a PCI address in the scope of the definition. This address is
+ * required to perform an access to PCI config space.
+ *
+ * MUTEX: Interpreter should be unlocked, because we may run the _REG
+ * method for this region.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ev_initialize_region(union acpi_operand_object *region_obj,
+ u8 acpi_ns_locked)
+{
+ union acpi_operand_object *handler_obj;
+ union acpi_operand_object *obj_desc;
+ acpi_adr_space_type space_id;
+ struct acpi_namespace_node *node;
+ acpi_status status;
+ struct acpi_namespace_node *method_node;
+ acpi_name *reg_name_ptr = (acpi_name *) METHOD_NAME__REG;
+ union acpi_operand_object *region_obj2;
+
+ ACPI_FUNCTION_TRACE_U32(ev_initialize_region, acpi_ns_locked);
+
+ if (!region_obj) {
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
+
+ if (region_obj->common.flags & AOPOBJ_OBJECT_INITIALIZED) {
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ region_obj2 = acpi_ns_get_secondary_object(region_obj);
+ if (!region_obj2) {
+ return_ACPI_STATUS(AE_NOT_EXIST);
+ }
+
+ node = acpi_ns_get_parent_node(region_obj->region.node);
+ space_id = region_obj->region.space_id;
+
+ /* Setup defaults */
+
+ region_obj->region.handler = NULL;
+ region_obj2->extra.method_REG = NULL;
+ region_obj->common.flags &= ~(AOPOBJ_SETUP_COMPLETE);
+ region_obj->common.flags |= AOPOBJ_OBJECT_INITIALIZED;
+
+ /* Find any "_REG" method associated with this region definition */
+
+ status =
+ acpi_ns_search_one_scope(*reg_name_ptr, node, ACPI_TYPE_METHOD,
+ &method_node);
+ if (ACPI_SUCCESS(status)) {
+ /*
+ * The _REG method is optional and there can be only one per region
+ * definition. This will be executed when the handler is attached
+ * or removed
+ */
+ region_obj2->extra.method_REG = method_node;
+ }
+
+ /*
+ * The following loop depends upon the root Node having no parent
+ * ie: acpi_gbl_root_node->parent_entry being set to NULL
+ */
+ while (node) {
+
+ /* Check to see if a handler exists */
+
+ handler_obj = NULL;
+ obj_desc = acpi_ns_get_attached_object(node);
+ if (obj_desc) {
+
+ /* Can only be a handler if the object exists */
+
+ switch (node->type) {
+ case ACPI_TYPE_DEVICE:
+
+ handler_obj = obj_desc->device.handler;
+ break;
+
+ case ACPI_TYPE_PROCESSOR:
+
+ handler_obj = obj_desc->processor.handler;
+ break;
+
+ case ACPI_TYPE_THERMAL:
+
+ handler_obj = obj_desc->thermal_zone.handler;
+ break;
+
+ default:
+ /* Ignore other objects */
+ break;
+ }
+
+ while (handler_obj) {
+
+ /* Is this handler of the correct type? */
+
+ if (handler_obj->address_space.space_id ==
+ space_id) {
+
+ /* Found correct handler */
+
+ ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
+ "Found handler %p for region %p in obj %p\n",
+ handler_obj,
+ region_obj,
+ obj_desc));
+
+ status =
+ acpi_ev_attach_region(handler_obj,
+ region_obj,
+ acpi_ns_locked);
+
+ /*
+ * Tell all users that this region is usable by running the _REG
+ * method
+ */
+ if (acpi_ns_locked) {
+ status =
+ acpi_ut_release_mutex
+ (ACPI_MTX_NAMESPACE);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS
+ (status);
+ }
+ }
+
+ status =
+ acpi_ev_execute_reg_method
+ (region_obj, 1);
+
+ if (acpi_ns_locked) {
+ status =
+ acpi_ut_acquire_mutex
+ (ACPI_MTX_NAMESPACE);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS
+ (status);
+ }
+ }
+
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ /* Try next handler in the list */
+
+ handler_obj = handler_obj->address_space.next;
+ }
+ }
+
+ /*
+ * This node does not have the handler we need;
+ * Pop up one level
+ */
+ node = acpi_ns_get_parent_node(node);
+ }
+
+ /* If we get here, there is no handler for this region */
+
+ ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
+ "No handler for RegionType %s(%X) (RegionObj %p)\n",
+ acpi_ut_get_region_name(space_id), space_id,
+ region_obj));
+
+ return_ACPI_STATUS(AE_NOT_EXIST);
+}
diff --git a/drivers/acpi/events/evsci.c b/drivers/acpi/events/evsci.c
new file mode 100644
index 0000000..2a8b778
--- /dev/null
+++ b/drivers/acpi/events/evsci.c
@@ -0,0 +1,184 @@
+/*******************************************************************************
+ *
+ * Module Name: evsci - System Control Interrupt configuration and
+ * legacy to ACPI mode state transition functions
+ *
+ ******************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include <acpi/acevents.h>
+
+#define _COMPONENT ACPI_EVENTS
+ACPI_MODULE_NAME("evsci")
+
+/* Local prototypes */
+static u32 ACPI_SYSTEM_XFACE acpi_ev_sci_xrupt_handler(void *context);
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_sci_xrupt_handler
+ *
+ * PARAMETERS: Context - Calling Context
+ *
+ * RETURN: Status code indicates whether interrupt was handled.
+ *
+ * DESCRIPTION: Interrupt handler that will figure out what function or
+ * control method to call to deal with a SCI.
+ *
+ ******************************************************************************/
+
+static u32 ACPI_SYSTEM_XFACE acpi_ev_sci_xrupt_handler(void *context)
+{
+ struct acpi_gpe_xrupt_info *gpe_xrupt_list = context;
+ u32 interrupt_handled = ACPI_INTERRUPT_NOT_HANDLED;
+
+ ACPI_FUNCTION_TRACE(ev_sci_xrupt_handler);
+
+ /*
+ * We are guaranteed by the ACPI CA initialization/shutdown code that
+ * if this interrupt handler is installed, ACPI is enabled.
+ */
+
+ /*
+ * Fixed Events:
+ * Check for and dispatch any Fixed Events that have occurred
+ */
+ interrupt_handled |= acpi_ev_fixed_event_detect();
+
+ /*
+ * General Purpose Events:
+ * Check for and dispatch any GPEs that have occurred
+ */
+ interrupt_handled |= acpi_ev_gpe_detect(gpe_xrupt_list);
+
+ return_UINT32(interrupt_handled);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_gpe_xrupt_handler
+ *
+ * PARAMETERS: Context - Calling Context
+ *
+ * RETURN: Status code indicates whether interrupt was handled.
+ *
+ * DESCRIPTION: Handler for GPE Block Device interrupts
+ *
+ ******************************************************************************/
+
+u32 ACPI_SYSTEM_XFACE acpi_ev_gpe_xrupt_handler(void *context)
+{
+ struct acpi_gpe_xrupt_info *gpe_xrupt_list = context;
+ u32 interrupt_handled = ACPI_INTERRUPT_NOT_HANDLED;
+
+ ACPI_FUNCTION_TRACE(ev_gpe_xrupt_handler);
+
+ /*
+ * We are guaranteed by the ACPI CA initialization/shutdown code that
+ * if this interrupt handler is installed, ACPI is enabled.
+ */
+
+ /*
+ * GPEs:
+ * Check for and dispatch any GPEs that have occurred
+ */
+ interrupt_handled |= acpi_ev_gpe_detect(gpe_xrupt_list);
+
+ return_UINT32(interrupt_handled);
+}
+
+/******************************************************************************
+ *
+ * FUNCTION: acpi_ev_install_sci_handler
+ *
+ * PARAMETERS: none
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Installs SCI handler.
+ *
+ ******************************************************************************/
+
+u32 acpi_ev_install_sci_handler(void)
+{
+ u32 status = AE_OK;
+
+ ACPI_FUNCTION_TRACE(ev_install_sci_handler);
+
+ status =
+ acpi_os_install_interrupt_handler((u32) acpi_gbl_FADT.sci_interrupt,
+ acpi_ev_sci_xrupt_handler,
+ acpi_gbl_gpe_xrupt_list_head);
+ return_ACPI_STATUS(status);
+}
+
+/******************************************************************************
+ *
+ * FUNCTION: acpi_ev_remove_sci_handler
+ *
+ * PARAMETERS: none
+ *
+ * RETURN: E_OK if handler uninstalled OK, E_ERROR if handler was not
+ * installed to begin with
+ *
+ * DESCRIPTION: Remove the SCI interrupt handler. No further SCIs will be
+ * taken.
+ *
+ * Note: It doesn't seem important to disable all events or set the event
+ * enable registers to their original values. The OS should disable
+ * the SCI interrupt level when the handler is removed, so no more
+ * events will come in.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ev_remove_sci_handler(void)
+{
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(ev_remove_sci_handler);
+
+ /* Just let the OS remove the handler and disable the level */
+
+ status =
+ acpi_os_remove_interrupt_handler((u32) acpi_gbl_FADT.sci_interrupt,
+ acpi_ev_sci_xrupt_handler);
+
+ return_ACPI_STATUS(status);
+}
diff --git a/drivers/acpi/events/evxface.c b/drivers/acpi/events/evxface.c
new file mode 100644
index 0000000..94a6efe
--- /dev/null
+++ b/drivers/acpi/events/evxface.c
@@ -0,0 +1,820 @@
+/******************************************************************************
+ *
+ * Module Name: evxface - External interfaces for ACPI events
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include <acpi/acnamesp.h>
+#include <acpi/acevents.h>
+#include <acpi/acinterp.h>
+
+#define _COMPONENT ACPI_EVENTS
+ACPI_MODULE_NAME("evxface")
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_install_exception_handler
+ *
+ * PARAMETERS: Handler - Pointer to the handler function for the
+ * event
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Saves the pointer to the handler function
+ *
+ ******************************************************************************/
+#ifdef ACPI_FUTURE_USAGE
+acpi_status acpi_install_exception_handler(acpi_exception_handler handler)
+{
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(acpi_install_exception_handler);
+
+ status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /* Don't allow two handlers. */
+
+ if (acpi_gbl_exception_handler) {
+ status = AE_ALREADY_EXISTS;
+ goto cleanup;
+ }
+
+ /* Install the handler */
+
+ acpi_gbl_exception_handler = handler;
+
+ cleanup:
+ (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
+ return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_install_exception_handler)
+#endif /* ACPI_FUTURE_USAGE */
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_install_fixed_event_handler
+ *
+ * PARAMETERS: Event - Event type to enable.
+ * Handler - Pointer to the handler function for the
+ * event
+ * Context - Value passed to the handler on each GPE
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Saves the pointer to the handler function and then enables the
+ * event.
+ *
+ ******************************************************************************/
+acpi_status
+acpi_install_fixed_event_handler(u32 event,
+ acpi_event_handler handler, void *context)
+{
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(acpi_install_fixed_event_handler);
+
+ /* Parameter validation */
+
+ if (event > ACPI_EVENT_MAX) {
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
+
+ status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /* Don't allow two handlers. */
+
+ if (NULL != acpi_gbl_fixed_event_handlers[event].handler) {
+ status = AE_ALREADY_EXISTS;
+ goto cleanup;
+ }
+
+ /* Install the handler before enabling the event */
+
+ acpi_gbl_fixed_event_handlers[event].handler = handler;
+ acpi_gbl_fixed_event_handlers[event].context = context;
+
+ status = acpi_clear_event(event);
+ if (ACPI_SUCCESS(status))
+ status = acpi_enable_event(event, 0);
+ if (ACPI_FAILURE(status)) {
+ ACPI_WARNING((AE_INFO, "Could not enable fixed event %X",
+ event));
+
+ /* Remove the handler */
+
+ acpi_gbl_fixed_event_handlers[event].handler = NULL;
+ acpi_gbl_fixed_event_handlers[event].context = NULL;
+ } else {
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Enabled fixed event %X, Handler=%p\n", event,
+ handler));
+ }
+
+ cleanup:
+ (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
+ return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_install_fixed_event_handler)
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_remove_fixed_event_handler
+ *
+ * PARAMETERS: Event - Event type to disable.
+ * Handler - Address of the handler
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Disables the event and unregisters the event handler.
+ *
+ ******************************************************************************/
+acpi_status
+acpi_remove_fixed_event_handler(u32 event, acpi_event_handler handler)
+{
+ acpi_status status = AE_OK;
+
+ ACPI_FUNCTION_TRACE(acpi_remove_fixed_event_handler);
+
+ /* Parameter validation */
+
+ if (event > ACPI_EVENT_MAX) {
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
+
+ status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /* Disable the event before removing the handler */
+
+ status = acpi_disable_event(event, 0);
+
+ /* Always Remove the handler */
+
+ acpi_gbl_fixed_event_handlers[event].handler = NULL;
+ acpi_gbl_fixed_event_handlers[event].context = NULL;
+
+ if (ACPI_FAILURE(status)) {
+ ACPI_WARNING((AE_INFO,
+ "Could not write to fixed event enable register %X",
+ event));
+ } else {
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Disabled fixed event %X\n",
+ event));
+ }
+
+ (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
+ return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_remove_fixed_event_handler)
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_install_notify_handler
+ *
+ * PARAMETERS: Device - The device for which notifies will be handled
+ * handler_type - The type of handler:
+ * ACPI_SYSTEM_NOTIFY: system_handler (00-7f)
+ * ACPI_DEVICE_NOTIFY: driver_handler (80-ff)
+ * ACPI_ALL_NOTIFY: both system and device
+ * Handler - Address of the handler
+ * Context - Value passed to the handler on each GPE
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Install a handler for notifies on an ACPI device
+ *
+ ******************************************************************************/
+acpi_status
+acpi_install_notify_handler(acpi_handle device,
+ u32 handler_type,
+ acpi_notify_handler handler, void *context)
+{
+ union acpi_operand_object *obj_desc;
+ union acpi_operand_object *notify_obj;
+ struct acpi_namespace_node *node;
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(acpi_install_notify_handler);
+
+ /* Parameter validation */
+
+ if ((!device) ||
+ (!handler) || (handler_type > ACPI_MAX_NOTIFY_HANDLER_TYPE)) {
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
+
+ status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /* Convert and validate the device handle */
+
+ node = acpi_ns_map_handle_to_node(device);
+ if (!node) {
+ status = AE_BAD_PARAMETER;
+ goto unlock_and_exit;
+ }
+
+ /*
+ * Root Object:
+ * Registering a notify handler on the root object indicates that the
+ * caller wishes to receive notifications for all objects. Note that
+ * only one <external> global handler can be regsitered (per notify type).
+ */
+ if (device == ACPI_ROOT_OBJECT) {
+
+ /* Make sure the handler is not already installed */
+
+ if (((handler_type & ACPI_SYSTEM_NOTIFY) &&
+ acpi_gbl_system_notify.handler) ||
+ ((handler_type & ACPI_DEVICE_NOTIFY) &&
+ acpi_gbl_device_notify.handler)) {
+ status = AE_ALREADY_EXISTS;
+ goto unlock_and_exit;
+ }
+
+ if (handler_type & ACPI_SYSTEM_NOTIFY) {
+ acpi_gbl_system_notify.node = node;
+ acpi_gbl_system_notify.handler = handler;
+ acpi_gbl_system_notify.context = context;
+ }
+
+ if (handler_type & ACPI_DEVICE_NOTIFY) {
+ acpi_gbl_device_notify.node = node;
+ acpi_gbl_device_notify.handler = handler;
+ acpi_gbl_device_notify.context = context;
+ }
+
+ /* Global notify handler installed */
+ }
+
+ /*
+ * All Other Objects:
+ * Caller will only receive notifications specific to the target object.
+ * Note that only certain object types can receive notifications.
+ */
+ else {
+ /* Notifies allowed on this object? */
+
+ if (!acpi_ev_is_notify_object(node)) {
+ status = AE_TYPE;
+ goto unlock_and_exit;
+ }
+
+ /* Check for an existing internal object */
+
+ obj_desc = acpi_ns_get_attached_object(node);
+ if (obj_desc) {
+
+ /* Object exists - make sure there's no handler */
+
+ if (((handler_type & ACPI_SYSTEM_NOTIFY) &&
+ obj_desc->common_notify.system_notify) ||
+ ((handler_type & ACPI_DEVICE_NOTIFY) &&
+ obj_desc->common_notify.device_notify)) {
+ status = AE_ALREADY_EXISTS;
+ goto unlock_and_exit;
+ }
+ } else {
+ /* Create a new object */
+
+ obj_desc = acpi_ut_create_internal_object(node->type);
+ if (!obj_desc) {
+ status = AE_NO_MEMORY;
+ goto unlock_and_exit;
+ }
+
+ /* Attach new object to the Node */
+
+ status =
+ acpi_ns_attach_object(device, obj_desc, node->type);
+
+ /* Remove local reference to the object */
+
+ acpi_ut_remove_reference(obj_desc);
+ if (ACPI_FAILURE(status)) {
+ goto unlock_and_exit;
+ }
+ }
+
+ /* Install the handler */
+
+ notify_obj =
+ acpi_ut_create_internal_object(ACPI_TYPE_LOCAL_NOTIFY);
+ if (!notify_obj) {
+ status = AE_NO_MEMORY;
+ goto unlock_and_exit;
+ }
+
+ notify_obj->notify.node = node;
+ notify_obj->notify.handler = handler;
+ notify_obj->notify.context = context;
+
+ if (handler_type & ACPI_SYSTEM_NOTIFY) {
+ obj_desc->common_notify.system_notify = notify_obj;
+ }
+
+ if (handler_type & ACPI_DEVICE_NOTIFY) {
+ obj_desc->common_notify.device_notify = notify_obj;
+ }
+
+ if (handler_type == ACPI_ALL_NOTIFY) {
+
+ /* Extra ref if installed in both */
+
+ acpi_ut_add_reference(notify_obj);
+ }
+ }
+
+ unlock_and_exit:
+ (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+ return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_install_notify_handler)
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_remove_notify_handler
+ *
+ * PARAMETERS: Device - The device for which notifies will be handled
+ * handler_type - The type of handler:
+ * ACPI_SYSTEM_NOTIFY: system_handler (00-7f)
+ * ACPI_DEVICE_NOTIFY: driver_handler (80-ff)
+ * ACPI_ALL_NOTIFY: both system and device
+ * Handler - Address of the handler
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Remove a handler for notifies on an ACPI device
+ *
+ ******************************************************************************/
+acpi_status
+acpi_remove_notify_handler(acpi_handle device,
+ u32 handler_type, acpi_notify_handler handler)
+{
+ union acpi_operand_object *notify_obj;
+ union acpi_operand_object *obj_desc;
+ struct acpi_namespace_node *node;
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(acpi_remove_notify_handler);
+
+ /* Parameter validation */
+
+ if ((!device) ||
+ (!handler) || (handler_type > ACPI_MAX_NOTIFY_HANDLER_TYPE)) {
+ status = AE_BAD_PARAMETER;
+ goto exit;
+ }
+
+ status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+ if (ACPI_FAILURE(status)) {
+ goto exit;
+ }
+
+ /* Convert and validate the device handle */
+
+ node = acpi_ns_map_handle_to_node(device);
+ if (!node) {
+ status = AE_BAD_PARAMETER;
+ goto unlock_and_exit;
+ }
+
+ /* Root Object */
+
+ if (device == ACPI_ROOT_OBJECT) {
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Removing notify handler for namespace root object\n"));
+
+ if (((handler_type & ACPI_SYSTEM_NOTIFY) &&
+ !acpi_gbl_system_notify.handler) ||
+ ((handler_type & ACPI_DEVICE_NOTIFY) &&
+ !acpi_gbl_device_notify.handler)) {
+ status = AE_NOT_EXIST;
+ goto unlock_and_exit;
+ }
+
+ /* Make sure all deferred tasks are completed */
+
+ (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+ acpi_os_wait_events_complete(NULL);
+ status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+ if (ACPI_FAILURE(status)) {
+ goto exit;
+ }
+
+ if (handler_type & ACPI_SYSTEM_NOTIFY) {
+ acpi_gbl_system_notify.node = NULL;
+ acpi_gbl_system_notify.handler = NULL;
+ acpi_gbl_system_notify.context = NULL;
+ }
+
+ if (handler_type & ACPI_DEVICE_NOTIFY) {
+ acpi_gbl_device_notify.node = NULL;
+ acpi_gbl_device_notify.handler = NULL;
+ acpi_gbl_device_notify.context = NULL;
+ }
+ }
+
+ /* All Other Objects */
+
+ else {
+ /* Notifies allowed on this object? */
+
+ if (!acpi_ev_is_notify_object(node)) {
+ status = AE_TYPE;
+ goto unlock_and_exit;
+ }
+
+ /* Check for an existing internal object */
+
+ obj_desc = acpi_ns_get_attached_object(node);
+ if (!obj_desc) {
+ status = AE_NOT_EXIST;
+ goto unlock_and_exit;
+ }
+
+ /* Object exists - make sure there's an existing handler */
+
+ if (handler_type & ACPI_SYSTEM_NOTIFY) {
+ notify_obj = obj_desc->common_notify.system_notify;
+ if (!notify_obj) {
+ status = AE_NOT_EXIST;
+ goto unlock_and_exit;
+ }
+
+ if (notify_obj->notify.handler != handler) {
+ status = AE_BAD_PARAMETER;
+ goto unlock_and_exit;
+ }
+ /* Make sure all deferred tasks are completed */
+
+ (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+ acpi_os_wait_events_complete(NULL);
+ status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+ if (ACPI_FAILURE(status)) {
+ goto exit;
+ }
+
+ /* Remove the handler */
+ obj_desc->common_notify.system_notify = NULL;
+ acpi_ut_remove_reference(notify_obj);
+ }
+
+ if (handler_type & ACPI_DEVICE_NOTIFY) {
+ notify_obj = obj_desc->common_notify.device_notify;
+ if (!notify_obj) {
+ status = AE_NOT_EXIST;
+ goto unlock_and_exit;
+ }
+
+ if (notify_obj->notify.handler != handler) {
+ status = AE_BAD_PARAMETER;
+ goto unlock_and_exit;
+ }
+ /* Make sure all deferred tasks are completed */
+
+ (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+ acpi_os_wait_events_complete(NULL);
+ status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+ if (ACPI_FAILURE(status)) {
+ goto exit;
+ }
+
+ /* Remove the handler */
+ obj_desc->common_notify.device_notify = NULL;
+ acpi_ut_remove_reference(notify_obj);
+ }
+ }
+
+ unlock_and_exit:
+ (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+ exit:
+ if (ACPI_FAILURE(status))
+ ACPI_EXCEPTION((AE_INFO, status, "Removing notify handler"));
+ return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_remove_notify_handler)
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_install_gpe_handler
+ *
+ * PARAMETERS: gpe_device - Namespace node for the GPE (NULL for FADT
+ * defined GPEs)
+ * gpe_number - The GPE number within the GPE block
+ * Type - Whether this GPE should be treated as an
+ * edge- or level-triggered interrupt.
+ * Address - Address of the handler
+ * Context - Value passed to the handler on each GPE
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Install a handler for a General Purpose Event.
+ *
+ ******************************************************************************/
+acpi_status
+acpi_install_gpe_handler(acpi_handle gpe_device,
+ u32 gpe_number,
+ u32 type, acpi_event_handler address, void *context)
+{
+ struct acpi_gpe_event_info *gpe_event_info;
+ struct acpi_handler_info *handler;
+ acpi_status status;
+ acpi_cpu_flags flags;
+
+ ACPI_FUNCTION_TRACE(acpi_install_gpe_handler);
+
+ /* Parameter validation */
+
+ if ((!address) || (type > ACPI_GPE_XRUPT_TYPE_MASK)) {
+ status = AE_BAD_PARAMETER;
+ goto exit;
+ }
+
+ status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
+ if (ACPI_FAILURE(status)) {
+ goto exit;
+ }
+
+ /* Ensure that we have a valid GPE number */
+
+ gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
+ if (!gpe_event_info) {
+ status = AE_BAD_PARAMETER;
+ goto unlock_and_exit;
+ }
+
+ /* Make sure that there isn't a handler there already */
+
+ if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
+ ACPI_GPE_DISPATCH_HANDLER) {
+ status = AE_ALREADY_EXISTS;
+ goto unlock_and_exit;
+ }
+
+ /* Allocate and init handler object */
+
+ handler = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_handler_info));
+ if (!handler) {
+ status = AE_NO_MEMORY;
+ goto unlock_and_exit;
+ }
+
+ handler->address = address;
+ handler->context = context;
+ handler->method_node = gpe_event_info->dispatch.method_node;
+
+ /* Disable the GPE before installing the handler */
+
+ status = acpi_ev_disable_gpe(gpe_event_info);
+ if (ACPI_FAILURE(status)) {
+ goto unlock_and_exit;
+ }
+
+ /* Install the handler */
+
+ flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+ gpe_event_info->dispatch.handler = handler;
+
+ /* Setup up dispatch flags to indicate handler (vs. method) */
+
+ gpe_event_info->flags &= ~(ACPI_GPE_XRUPT_TYPE_MASK | ACPI_GPE_DISPATCH_MASK); /* Clear bits */
+ gpe_event_info->flags |= (u8) (type | ACPI_GPE_DISPATCH_HANDLER);
+
+ acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
+
+ unlock_and_exit:
+ (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
+ exit:
+ if (ACPI_FAILURE(status))
+ ACPI_EXCEPTION((AE_INFO, status,
+ "Installing notify handler failed"));
+ return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_install_gpe_handler)
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_remove_gpe_handler
+ *
+ * PARAMETERS: gpe_device - Namespace node for the GPE (NULL for FADT
+ * defined GPEs)
+ * gpe_number - The event to remove a handler
+ * Address - Address of the handler
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Remove a handler for a General Purpose acpi_event.
+ *
+ ******************************************************************************/
+acpi_status
+acpi_remove_gpe_handler(acpi_handle gpe_device,
+ u32 gpe_number, acpi_event_handler address)
+{
+ struct acpi_gpe_event_info *gpe_event_info;
+ struct acpi_handler_info *handler;
+ acpi_status status;
+ acpi_cpu_flags flags;
+
+ ACPI_FUNCTION_TRACE(acpi_remove_gpe_handler);
+
+ /* Parameter validation */
+
+ if (!address) {
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
+
+ status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /* Ensure that we have a valid GPE number */
+
+ gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
+ if (!gpe_event_info) {
+ status = AE_BAD_PARAMETER;
+ goto unlock_and_exit;
+ }
+
+ /* Make sure that a handler is indeed installed */
+
+ if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) !=
+ ACPI_GPE_DISPATCH_HANDLER) {
+ status = AE_NOT_EXIST;
+ goto unlock_and_exit;
+ }
+
+ /* Make sure that the installed handler is the same */
+
+ if (gpe_event_info->dispatch.handler->address != address) {
+ status = AE_BAD_PARAMETER;
+ goto unlock_and_exit;
+ }
+
+ /* Disable the GPE before removing the handler */
+
+ status = acpi_ev_disable_gpe(gpe_event_info);
+ if (ACPI_FAILURE(status)) {
+ goto unlock_and_exit;
+ }
+
+ /* Make sure all deferred tasks are completed */
+
+ (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
+ acpi_os_wait_events_complete(NULL);
+ status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /* Remove the handler */
+
+ flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+ handler = gpe_event_info->dispatch.handler;
+
+ /* Restore Method node (if any), set dispatch flags */
+
+ gpe_event_info->dispatch.method_node = handler->method_node;
+ gpe_event_info->flags &= ~ACPI_GPE_DISPATCH_MASK; /* Clear bits */
+ if (handler->method_node) {
+ gpe_event_info->flags |= ACPI_GPE_DISPATCH_METHOD;
+ }
+ acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
+
+ /* Now we can free the handler object */
+
+ ACPI_FREE(handler);
+
+ unlock_and_exit:
+ (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
+ return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_remove_gpe_handler)
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_acquire_global_lock
+ *
+ * PARAMETERS: Timeout - How long the caller is willing to wait
+ * Handle - Where the handle to the lock is returned
+ * (if acquired)
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Acquire the ACPI Global Lock
+ *
+ * Note: Allows callers with the same thread ID to acquire the global lock
+ * multiple times. In other words, externally, the behavior of the global lock
+ * is identical to an AML mutex. On the first acquire, a new handle is
+ * returned. On any subsequent calls to acquire by the same thread, the same
+ * handle is returned.
+ *
+ ******************************************************************************/
+acpi_status acpi_acquire_global_lock(u16 timeout, u32 * handle)
+{
+ acpi_status status;
+
+ if (!handle) {
+ return (AE_BAD_PARAMETER);
+ }
+
+ /* Must lock interpreter to prevent race conditions */
+
+ acpi_ex_enter_interpreter();
+
+ status = acpi_ex_acquire_mutex_object(timeout,
+ acpi_gbl_global_lock_mutex,
+ acpi_os_get_thread_id());
+
+ if (ACPI_SUCCESS(status)) {
+
+ /* Return the global lock handle (updated in acpi_ev_acquire_global_lock) */
+
+ *handle = acpi_gbl_global_lock_handle;
+ }
+
+ acpi_ex_exit_interpreter();
+ return (status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_acquire_global_lock)
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_release_global_lock
+ *
+ * PARAMETERS: Handle - Returned from acpi_acquire_global_lock
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Release the ACPI Global Lock. The handle must be valid.
+ *
+ ******************************************************************************/
+acpi_status acpi_release_global_lock(u32 handle)
+{
+ acpi_status status;
+
+ if (!handle || (handle != acpi_gbl_global_lock_handle)) {
+ return (AE_NOT_ACQUIRED);
+ }
+
+ status = acpi_ex_release_mutex_object(acpi_gbl_global_lock_mutex);
+ return (status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_release_global_lock)
diff --git a/drivers/acpi/events/evxfevnt.c b/drivers/acpi/events/evxfevnt.c
new file mode 100644
index 0000000..41554f7
--- /dev/null
+++ b/drivers/acpi/events/evxfevnt.c
@@ -0,0 +1,719 @@
+/******************************************************************************
+ *
+ * Module Name: evxfevnt - External Interfaces, ACPI event disable/enable
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include <acpi/acevents.h>
+#include <acpi/acnamesp.h>
+#include <acpi/actables.h>
+
+#define _COMPONENT ACPI_EVENTS
+ACPI_MODULE_NAME("evxfevnt")
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_enable
+ *
+ * PARAMETERS: None
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Transfers the system into ACPI mode.
+ *
+ ******************************************************************************/
+acpi_status acpi_enable(void)
+{
+ acpi_status status = AE_OK;
+
+ ACPI_FUNCTION_TRACE(acpi_enable);
+
+ /* ACPI tables must be present */
+
+ if (!acpi_tb_tables_loaded()) {
+ return_ACPI_STATUS(AE_NO_ACPI_TABLES);
+ }
+
+ /* Check current mode */
+
+ if (acpi_hw_get_mode() == ACPI_SYS_MODE_ACPI) {
+ ACPI_DEBUG_PRINT((ACPI_DB_INIT,
+ "System is already in ACPI mode\n"));
+ } else {
+ /* Transition to ACPI mode */
+
+ status = acpi_hw_set_mode(ACPI_SYS_MODE_ACPI);
+ if (ACPI_FAILURE(status)) {
+ ACPI_ERROR((AE_INFO,
+ "Could not transition to ACPI mode"));
+ return_ACPI_STATUS(status);
+ }
+
+ ACPI_DEBUG_PRINT((ACPI_DB_INIT,
+ "Transition to ACPI mode successful\n"));
+ }
+
+ return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_enable)
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_disable
+ *
+ * PARAMETERS: None
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Transfers the system into LEGACY (non-ACPI) mode.
+ *
+ ******************************************************************************/
+acpi_status acpi_disable(void)
+{
+ acpi_status status = AE_OK;
+
+ ACPI_FUNCTION_TRACE(acpi_disable);
+
+ if (acpi_hw_get_mode() == ACPI_SYS_MODE_LEGACY) {
+ ACPI_DEBUG_PRINT((ACPI_DB_INIT,
+ "System is already in legacy (non-ACPI) mode\n"));
+ } else {
+ /* Transition to LEGACY mode */
+
+ status = acpi_hw_set_mode(ACPI_SYS_MODE_LEGACY);
+
+ if (ACPI_FAILURE(status)) {
+ ACPI_ERROR((AE_INFO,
+ "Could not exit ACPI mode to legacy mode"));
+ return_ACPI_STATUS(status);
+ }
+
+ ACPI_DEBUG_PRINT((ACPI_DB_INIT, "ACPI mode disabled\n"));
+ }
+
+ return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_disable)
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_enable_event
+ *
+ * PARAMETERS: Event - The fixed eventto be enabled
+ * Flags - Reserved
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Enable an ACPI event (fixed)
+ *
+ ******************************************************************************/
+acpi_status acpi_enable_event(u32 event, u32 flags)
+{
+ acpi_status status = AE_OK;
+ u32 value;
+
+ ACPI_FUNCTION_TRACE(acpi_enable_event);
+
+ /* Decode the Fixed Event */
+
+ if (event > ACPI_EVENT_MAX) {
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
+
+ /*
+ * Enable the requested fixed event (by writing a one to the
+ * enable register bit)
+ */
+ status =
+ acpi_set_register(acpi_gbl_fixed_event_info[event].
+ enable_register_id, 1);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /* Make sure that the hardware responded */
+
+ status =
+ acpi_get_register(acpi_gbl_fixed_event_info[event].
+ enable_register_id, &value);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ if (value != 1) {
+ ACPI_ERROR((AE_INFO,
+ "Could not enable %s event",
+ acpi_ut_get_event_name(event)));
+ return_ACPI_STATUS(AE_NO_HARDWARE_RESPONSE);
+ }
+
+ return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_enable_event)
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_set_gpe_type
+ *
+ * PARAMETERS: gpe_device - Parent GPE Device
+ * gpe_number - GPE level within the GPE block
+ * Type - New GPE type
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Set the type of an individual GPE
+ *
+ ******************************************************************************/
+acpi_status acpi_set_gpe_type(acpi_handle gpe_device, u32 gpe_number, u8 type)
+{
+ acpi_status status = AE_OK;
+ struct acpi_gpe_event_info *gpe_event_info;
+
+ ACPI_FUNCTION_TRACE(acpi_set_gpe_type);
+
+ /* Ensure that we have a valid GPE number */
+
+ gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
+ if (!gpe_event_info) {
+ status = AE_BAD_PARAMETER;
+ goto unlock_and_exit;
+ }
+
+ if ((gpe_event_info->flags & ACPI_GPE_TYPE_MASK) == type) {
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ /* Set the new type (will disable GPE if currently enabled) */
+
+ status = acpi_ev_set_gpe_type(gpe_event_info, type);
+
+ unlock_and_exit:
+ return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_set_gpe_type)
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_enable_gpe
+ *
+ * PARAMETERS: gpe_device - Parent GPE Device
+ * gpe_number - GPE level within the GPE block
+ * Flags - Just enable, or also wake enable?
+ * Called from ISR or not
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Enable an ACPI event (general purpose)
+ *
+ ******************************************************************************/
+acpi_status acpi_enable_gpe(acpi_handle gpe_device, u32 gpe_number)
+{
+ acpi_status status = AE_OK;
+ acpi_cpu_flags flags;
+ struct acpi_gpe_event_info *gpe_event_info;
+
+ ACPI_FUNCTION_TRACE(acpi_enable_gpe);
+
+ flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+
+ /* Ensure that we have a valid GPE number */
+
+ gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
+ if (!gpe_event_info) {
+ status = AE_BAD_PARAMETER;
+ goto unlock_and_exit;
+ }
+
+ /* Perform the enable */
+
+ status = acpi_ev_enable_gpe(gpe_event_info, TRUE);
+
+ unlock_and_exit:
+ acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
+ return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_enable_gpe)
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_disable_gpe
+ *
+ * PARAMETERS: gpe_device - Parent GPE Device
+ * gpe_number - GPE level within the GPE block
+ * Flags - Just disable, or also wake disable?
+ * Called from ISR or not
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Disable an ACPI event (general purpose)
+ *
+ ******************************************************************************/
+acpi_status acpi_disable_gpe(acpi_handle gpe_device, u32 gpe_number)
+{
+ acpi_status status = AE_OK;
+ acpi_cpu_flags flags;
+ struct acpi_gpe_event_info *gpe_event_info;
+
+ ACPI_FUNCTION_TRACE(acpi_disable_gpe);
+
+ flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+ /* Ensure that we have a valid GPE number */
+
+ gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
+ if (!gpe_event_info) {
+ status = AE_BAD_PARAMETER;
+ goto unlock_and_exit;
+ }
+
+ status = acpi_ev_disable_gpe(gpe_event_info);
+
+unlock_and_exit:
+ acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
+ return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_disable_gpe)
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_disable_event
+ *
+ * PARAMETERS: Event - The fixed eventto be enabled
+ * Flags - Reserved
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Disable an ACPI event (fixed)
+ *
+ ******************************************************************************/
+acpi_status acpi_disable_event(u32 event, u32 flags)
+{
+ acpi_status status = AE_OK;
+ u32 value;
+
+ ACPI_FUNCTION_TRACE(acpi_disable_event);
+
+ /* Decode the Fixed Event */
+
+ if (event > ACPI_EVENT_MAX) {
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
+
+ /*
+ * Disable the requested fixed event (by writing a zero to the
+ * enable register bit)
+ */
+ status =
+ acpi_set_register(acpi_gbl_fixed_event_info[event].
+ enable_register_id, 0);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ status =
+ acpi_get_register(acpi_gbl_fixed_event_info[event].
+ enable_register_id, &value);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ if (value != 0) {
+ ACPI_ERROR((AE_INFO,
+ "Could not disable %s events",
+ acpi_ut_get_event_name(event)));
+ return_ACPI_STATUS(AE_NO_HARDWARE_RESPONSE);
+ }
+
+ return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_disable_event)
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_clear_event
+ *
+ * PARAMETERS: Event - The fixed event to be cleared
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Clear an ACPI event (fixed)
+ *
+ ******************************************************************************/
+acpi_status acpi_clear_event(u32 event)
+{
+ acpi_status status = AE_OK;
+
+ ACPI_FUNCTION_TRACE(acpi_clear_event);
+
+ /* Decode the Fixed Event */
+
+ if (event > ACPI_EVENT_MAX) {
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
+
+ /*
+ * Clear the requested fixed event (By writing a one to the
+ * status register bit)
+ */
+ status =
+ acpi_set_register(acpi_gbl_fixed_event_info[event].
+ status_register_id, 1);
+
+ return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_clear_event)
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_clear_gpe
+ *
+ * PARAMETERS: gpe_device - Parent GPE Device
+ * gpe_number - GPE level within the GPE block
+ * Flags - Called from an ISR or not
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Clear an ACPI event (general purpose)
+ *
+ ******************************************************************************/
+acpi_status acpi_clear_gpe(acpi_handle gpe_device, u32 gpe_number, u32 flags)
+{
+ acpi_status status = AE_OK;
+ struct acpi_gpe_event_info *gpe_event_info;
+
+ ACPI_FUNCTION_TRACE(acpi_clear_gpe);
+
+ /* Use semaphore lock if not executing at interrupt level */
+
+ if (flags & ACPI_NOT_ISR) {
+ status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+ }
+
+ /* Ensure that we have a valid GPE number */
+
+ gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
+ if (!gpe_event_info) {
+ status = AE_BAD_PARAMETER;
+ goto unlock_and_exit;
+ }
+
+ status = acpi_hw_clear_gpe(gpe_event_info);
+
+ unlock_and_exit:
+ if (flags & ACPI_NOT_ISR) {
+ (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
+ }
+ return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_clear_gpe)
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_get_event_status
+ *
+ * PARAMETERS: Event - The fixed event
+ * event_status - Where the current status of the event will
+ * be returned
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Obtains and returns the current status of the event
+ *
+ ******************************************************************************/
+acpi_status acpi_get_event_status(u32 event, acpi_event_status * event_status)
+{
+ acpi_status status = AE_OK;
+ u32 value;
+
+ ACPI_FUNCTION_TRACE(acpi_get_event_status);
+
+ if (!event_status) {
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
+
+ /* Decode the Fixed Event */
+
+ if (event > ACPI_EVENT_MAX) {
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
+
+ /* Get the status of the requested fixed event */
+
+ status =
+ acpi_get_register(acpi_gbl_fixed_event_info[event].
+ enable_register_id, &value);
+ if (ACPI_FAILURE(status))
+ return_ACPI_STATUS(status);
+
+ *event_status = value;
+
+ status =
+ acpi_get_register(acpi_gbl_fixed_event_info[event].
+ status_register_id, &value);
+ if (ACPI_FAILURE(status))
+ return_ACPI_STATUS(status);
+
+ if (value)
+ *event_status |= ACPI_EVENT_FLAG_SET;
+
+ if (acpi_gbl_fixed_event_handlers[event].handler)
+ *event_status |= ACPI_EVENT_FLAG_HANDLE;
+
+ return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_get_event_status)
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_get_gpe_status
+ *
+ * PARAMETERS: gpe_device - Parent GPE Device
+ * gpe_number - GPE level within the GPE block
+ * Flags - Called from an ISR or not
+ * event_status - Where the current status of the event will
+ * be returned
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Get status of an event (general purpose)
+ *
+ ******************************************************************************/
+acpi_status
+acpi_get_gpe_status(acpi_handle gpe_device,
+ u32 gpe_number, u32 flags, acpi_event_status * event_status)
+{
+ acpi_status status = AE_OK;
+ struct acpi_gpe_event_info *gpe_event_info;
+
+ ACPI_FUNCTION_TRACE(acpi_get_gpe_status);
+
+ /* Use semaphore lock if not executing at interrupt level */
+
+ if (flags & ACPI_NOT_ISR) {
+ status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+ }
+
+ /* Ensure that we have a valid GPE number */
+
+ gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
+ if (!gpe_event_info) {
+ status = AE_BAD_PARAMETER;
+ goto unlock_and_exit;
+ }
+
+ /* Obtain status on the requested GPE number */
+
+ status = acpi_hw_get_gpe_status(gpe_event_info, event_status);
+
+ if (gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK)
+ *event_status |= ACPI_EVENT_FLAG_HANDLE;
+
+ unlock_and_exit:
+ if (flags & ACPI_NOT_ISR) {
+ (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
+ }
+ return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_get_gpe_status)
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_install_gpe_block
+ *
+ * PARAMETERS: gpe_device - Handle to the parent GPE Block Device
+ * gpe_block_address - Address and space_iD
+ * register_count - Number of GPE register pairs in the block
+ * interrupt_number - H/W interrupt for the block
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Create and Install a block of GPE registers
+ *
+ ******************************************************************************/
+acpi_status
+acpi_install_gpe_block(acpi_handle gpe_device,
+ struct acpi_generic_address *gpe_block_address,
+ u32 register_count, u32 interrupt_number)
+{
+ acpi_status status;
+ union acpi_operand_object *obj_desc;
+ struct acpi_namespace_node *node;
+ struct acpi_gpe_block_info *gpe_block;
+
+ ACPI_FUNCTION_TRACE(acpi_install_gpe_block);
+
+ if ((!gpe_device) || (!gpe_block_address) || (!register_count)) {
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
+
+ status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+
+ node = acpi_ns_map_handle_to_node(gpe_device);
+ if (!node) {
+ status = AE_BAD_PARAMETER;
+ goto unlock_and_exit;
+ }
+
+ /*
+ * For user-installed GPE Block Devices, the gpe_block_base_number
+ * is always zero
+ */
+ status =
+ acpi_ev_create_gpe_block(node, gpe_block_address, register_count, 0,
+ interrupt_number, &gpe_block);
+ if (ACPI_FAILURE(status)) {
+ goto unlock_and_exit;
+ }
+
+ /* Run the _PRW methods and enable the GPEs */
+
+ status = acpi_ev_initialize_gpe_block(node, gpe_block);
+ if (ACPI_FAILURE(status)) {
+ goto unlock_and_exit;
+ }
+
+ /* Get the device_object attached to the node */
+
+ obj_desc = acpi_ns_get_attached_object(node);
+ if (!obj_desc) {
+
+ /* No object, create a new one */
+
+ obj_desc = acpi_ut_create_internal_object(ACPI_TYPE_DEVICE);
+ if (!obj_desc) {
+ status = AE_NO_MEMORY;
+ goto unlock_and_exit;
+ }
+
+ status =
+ acpi_ns_attach_object(node, obj_desc, ACPI_TYPE_DEVICE);
+
+ /* Remove local reference to the object */
+
+ acpi_ut_remove_reference(obj_desc);
+
+ if (ACPI_FAILURE(status)) {
+ goto unlock_and_exit;
+ }
+ }
+
+ /* Install the GPE block in the device_object */
+
+ obj_desc->device.gpe_block = gpe_block;
+
+ unlock_and_exit:
+ (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+ return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_install_gpe_block)
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_remove_gpe_block
+ *
+ * PARAMETERS: gpe_device - Handle to the parent GPE Block Device
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Remove a previously installed block of GPE registers
+ *
+ ******************************************************************************/
+acpi_status acpi_remove_gpe_block(acpi_handle gpe_device)
+{
+ union acpi_operand_object *obj_desc;
+ acpi_status status;
+ struct acpi_namespace_node *node;
+
+ ACPI_FUNCTION_TRACE(acpi_remove_gpe_block);
+
+ if (!gpe_device) {
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
+
+ status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+
+ node = acpi_ns_map_handle_to_node(gpe_device);
+ if (!node) {
+ status = AE_BAD_PARAMETER;
+ goto unlock_and_exit;
+ }
+
+ /* Get the device_object attached to the node */
+
+ obj_desc = acpi_ns_get_attached_object(node);
+ if (!obj_desc || !obj_desc->device.gpe_block) {
+ return_ACPI_STATUS(AE_NULL_OBJECT);
+ }
+
+ /* Delete the GPE block (but not the device_object) */
+
+ status = acpi_ev_delete_gpe_block(obj_desc->device.gpe_block);
+ if (ACPI_SUCCESS(status)) {
+ obj_desc->device.gpe_block = NULL;
+ }
+
+ unlock_and_exit:
+ (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+ return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_remove_gpe_block)
diff --git a/drivers/acpi/events/evxfregn.c b/drivers/acpi/events/evxfregn.c
new file mode 100644
index 0000000..e875080
--- /dev/null
+++ b/drivers/acpi/events/evxfregn.c
@@ -0,0 +1,253 @@
+/******************************************************************************
+ *
+ * Module Name: evxfregn - External Interfaces, ACPI Operation Regions and
+ * Address Spaces.
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include <acpi/acnamesp.h>
+#include <acpi/acevents.h>
+
+#define _COMPONENT ACPI_EVENTS
+ACPI_MODULE_NAME("evxfregn")
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_install_address_space_handler
+ *
+ * PARAMETERS: Device - Handle for the device
+ * space_id - The address space ID
+ * Handler - Address of the handler
+ * Setup - Address of the setup function
+ * Context - Value passed to the handler on each access
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Install a handler for all op_regions of a given space_id.
+ *
+ ******************************************************************************/
+acpi_status
+acpi_install_address_space_handler(acpi_handle device,
+ acpi_adr_space_type space_id,
+ acpi_adr_space_handler handler,
+ acpi_adr_space_setup setup, void *context)
+{
+ struct acpi_namespace_node *node;
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(acpi_install_address_space_handler);
+
+ /* Parameter validation */
+
+ if (!device) {
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
+
+ status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /* Convert and validate the device handle */
+
+ node = acpi_ns_map_handle_to_node(device);
+ if (!node) {
+ status = AE_BAD_PARAMETER;
+ goto unlock_and_exit;
+ }
+
+ /* Install the handler for all Regions for this Space ID */
+
+ status =
+ acpi_ev_install_space_handler(node, space_id, handler, setup,
+ context);
+ if (ACPI_FAILURE(status)) {
+ goto unlock_and_exit;
+ }
+
+ /* Run all _REG methods for this address space */
+
+ status = acpi_ev_execute_reg_methods(node, space_id);
+
+ unlock_and_exit:
+ (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+ return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_install_address_space_handler)
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_remove_address_space_handler
+ *
+ * PARAMETERS: Device - Handle for the device
+ * space_id - The address space ID
+ * Handler - Address of the handler
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Remove a previously installed handler.
+ *
+ ******************************************************************************/
+acpi_status
+acpi_remove_address_space_handler(acpi_handle device,
+ acpi_adr_space_type space_id,
+ acpi_adr_space_handler handler)
+{
+ union acpi_operand_object *obj_desc;
+ union acpi_operand_object *handler_obj;
+ union acpi_operand_object *region_obj;
+ union acpi_operand_object **last_obj_ptr;
+ struct acpi_namespace_node *node;
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(acpi_remove_address_space_handler);
+
+ /* Parameter validation */
+
+ if (!device) {
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
+
+ status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /* Convert and validate the device handle */
+
+ node = acpi_ns_map_handle_to_node(device);
+ if (!node ||
+ ((node->type != ACPI_TYPE_DEVICE) &&
+ (node->type != ACPI_TYPE_PROCESSOR) &&
+ (node->type != ACPI_TYPE_THERMAL) &&
+ (node != acpi_gbl_root_node))) {
+ status = AE_BAD_PARAMETER;
+ goto unlock_and_exit;
+ }
+
+ /* Make sure the internal object exists */
+
+ obj_desc = acpi_ns_get_attached_object(node);
+ if (!obj_desc) {
+ status = AE_NOT_EXIST;
+ goto unlock_and_exit;
+ }
+
+ /* Find the address handler the user requested */
+
+ handler_obj = obj_desc->device.handler;
+ last_obj_ptr = &obj_desc->device.handler;
+ while (handler_obj) {
+
+ /* We have a handler, see if user requested this one */
+
+ if (handler_obj->address_space.space_id == space_id) {
+
+ /* Handler must be the same as the installed handler */
+
+ if (handler_obj->address_space.handler != handler) {
+ status = AE_BAD_PARAMETER;
+ goto unlock_and_exit;
+ }
+
+ /* Matched space_id, first dereference this in the Regions */
+
+ ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
+ "Removing address handler %p(%p) for region %s on Device %p(%p)\n",
+ handler_obj, handler,
+ acpi_ut_get_region_name(space_id),
+ node, obj_desc));
+
+ region_obj = handler_obj->address_space.region_list;
+
+ /* Walk the handler's region list */
+
+ while (region_obj) {
+ /*
+ * First disassociate the handler from the region.
+ *
+ * NOTE: this doesn't mean that the region goes away
+ * The region is just inaccessible as indicated to
+ * the _REG method
+ */
+ acpi_ev_detach_region(region_obj, TRUE);
+
+ /*
+ * Walk the list: Just grab the head because the
+ * detach_region removed the previous head.
+ */
+ region_obj =
+ handler_obj->address_space.region_list;
+
+ }
+
+ /* Remove this Handler object from the list */
+
+ *last_obj_ptr = handler_obj->address_space.next;
+
+ /* Now we can delete the handler object */
+
+ acpi_ut_remove_reference(handler_obj);
+ goto unlock_and_exit;
+ }
+
+ /* Walk the linked list of handlers */
+
+ last_obj_ptr = &handler_obj->address_space.next;
+ handler_obj = handler_obj->address_space.next;
+ }
+
+ /* The handler does not exist */
+
+ ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
+ "Unable to remove address handler %p for %s(%X), DevNode %p, obj %p\n",
+ handler, acpi_ut_get_region_name(space_id), space_id,
+ node, obj_desc));
+
+ status = AE_NOT_EXIST;
+
+ unlock_and_exit:
+ (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+ return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_remove_address_space_handler)
diff --git a/drivers/acpi/executer/Makefile b/drivers/acpi/executer/Makefile
new file mode 100644
index 0000000..e09998a
--- /dev/null
+++ b/drivers/acpi/executer/Makefile
@@ -0,0 +1,10 @@
+#
+# Makefile for all Linux ACPI interpreter subdirectories
+#
+
+obj-y := exconfig.o exfield.o exnames.o exoparg6.o exresolv.o exstorob.o\
+ exconvrt.o exfldio.o exoparg1.o exprep.o exresop.o exsystem.o\
+ excreate.o exmisc.o exoparg2.o exregion.o exstore.o exutils.o \
+ exdump.o exmutex.o exoparg3.o exresnte.o exstoren.o
+
+EXTRA_CFLAGS += $(ACPI_CFLAGS)
diff --git a/drivers/acpi/executer/exconfig.c b/drivers/acpi/executer/exconfig.c
new file mode 100644
index 0000000..74da6fa
--- /dev/null
+++ b/drivers/acpi/executer/exconfig.c
@@ -0,0 +1,535 @@
+/******************************************************************************
+ *
+ * Module Name: exconfig - Namespace reconfiguration (Load/Unload opcodes)
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include <acpi/acinterp.h>
+#include <acpi/acnamesp.h>
+#include <acpi/actables.h>
+#include <acpi/acdispat.h>
+
+#define _COMPONENT ACPI_EXECUTER
+ACPI_MODULE_NAME("exconfig")
+
+/* Local prototypes */
+static acpi_status
+acpi_ex_add_table(u32 table_index,
+ struct acpi_namespace_node *parent_node,
+ union acpi_operand_object **ddb_handle);
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ex_add_table
+ *
+ * PARAMETERS: Table - Pointer to raw table
+ * parent_node - Where to load the table (scope)
+ * ddb_handle - Where to return the table handle.
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Common function to Install and Load an ACPI table with a
+ * returned table handle.
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ex_add_table(u32 table_index,
+ struct acpi_namespace_node *parent_node,
+ union acpi_operand_object **ddb_handle)
+{
+ acpi_status status;
+ union acpi_operand_object *obj_desc;
+
+ ACPI_FUNCTION_TRACE(ex_add_table);
+
+ /* Create an object to be the table handle */
+
+ obj_desc = acpi_ut_create_internal_object(ACPI_TYPE_LOCAL_REFERENCE);
+ if (!obj_desc) {
+ return_ACPI_STATUS(AE_NO_MEMORY);
+ }
+
+ /* Init the table handle */
+
+ obj_desc->reference.class = ACPI_REFCLASS_TABLE;
+ *ddb_handle = obj_desc;
+
+ /* Install the new table into the local data structures */
+
+ obj_desc->reference.value = table_index;
+
+ /* Add the table to the namespace */
+
+ status = acpi_ns_load_table(table_index, parent_node);
+ if (ACPI_FAILURE(status)) {
+ acpi_ut_remove_reference(obj_desc);
+ *ddb_handle = NULL;
+ }
+
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ex_load_table_op
+ *
+ * PARAMETERS: walk_state - Current state with operands
+ * return_desc - Where to store the return object
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Load an ACPI table from the RSDT/XSDT
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ex_load_table_op(struct acpi_walk_state *walk_state,
+ union acpi_operand_object **return_desc)
+{
+ acpi_status status;
+ union acpi_operand_object **operand = &walk_state->operands[0];
+ struct acpi_namespace_node *parent_node;
+ struct acpi_namespace_node *start_node;
+ struct acpi_namespace_node *parameter_node = NULL;
+ union acpi_operand_object *ddb_handle;
+ struct acpi_table_header *table;
+ u32 table_index;
+
+ ACPI_FUNCTION_TRACE(ex_load_table_op);
+
+ /* Validate lengths for the signature_string, OEMIDString, OEMtable_iD */
+
+ if ((operand[0]->string.length > ACPI_NAME_SIZE) ||
+ (operand[1]->string.length > ACPI_OEM_ID_SIZE) ||
+ (operand[2]->string.length > ACPI_OEM_TABLE_ID_SIZE)) {
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
+
+ /* Find the ACPI table in the RSDT/XSDT */
+
+ status = acpi_tb_find_table(operand[0]->string.pointer,
+ operand[1]->string.pointer,
+ operand[2]->string.pointer, &table_index);
+ if (ACPI_FAILURE(status)) {
+ if (status != AE_NOT_FOUND) {
+ return_ACPI_STATUS(status);
+ }
+
+ /* Table not found, return an Integer=0 and AE_OK */
+
+ ddb_handle = acpi_ut_create_internal_object(ACPI_TYPE_INTEGER);
+ if (!ddb_handle) {
+ return_ACPI_STATUS(AE_NO_MEMORY);
+ }
+
+ ddb_handle->integer.value = 0;
+ *return_desc = ddb_handle;
+
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ /* Default nodes */
+
+ start_node = walk_state->scope_info->scope.node;
+ parent_node = acpi_gbl_root_node;
+
+ /* root_path (optional parameter) */
+
+ if (operand[3]->string.length > 0) {
+ /*
+ * Find the node referenced by the root_path_string. This is the
+ * location within the namespace where the table will be loaded.
+ */
+ status =
+ acpi_ns_get_node(start_node, operand[3]->string.pointer,
+ ACPI_NS_SEARCH_PARENT, &parent_node);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+ }
+
+ /* parameter_path (optional parameter) */
+
+ if (operand[4]->string.length > 0) {
+ if ((operand[4]->string.pointer[0] != '\\') &&
+ (operand[4]->string.pointer[0] != '^')) {
+ /*
+ * Path is not absolute, so it will be relative to the node
+ * referenced by the root_path_string (or the NS root if omitted)
+ */
+ start_node = parent_node;
+ }
+
+ /* Find the node referenced by the parameter_path_string */
+
+ status =
+ acpi_ns_get_node(start_node, operand[4]->string.pointer,
+ ACPI_NS_SEARCH_PARENT, &parameter_node);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+ }
+
+ /* Load the table into the namespace */
+
+ status = acpi_ex_add_table(table_index, parent_node, &ddb_handle);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /* Parameter Data (optional) */
+
+ if (parameter_node) {
+
+ /* Store the parameter data into the optional parameter object */
+
+ status = acpi_ex_store(operand[5],
+ ACPI_CAST_PTR(union acpi_operand_object,
+ parameter_node),
+ walk_state);
+ if (ACPI_FAILURE(status)) {
+ (void)acpi_ex_unload_table(ddb_handle);
+ return_ACPI_STATUS(status);
+ }
+ }
+
+ status = acpi_get_table_by_index(table_index, &table);
+ if (ACPI_SUCCESS(status)) {
+ ACPI_INFO((AE_INFO,
+ "Dynamic OEM Table Load - [%.4s] OemId [%.6s] OemTableId [%.8s]",
+ table->signature, table->oem_id,
+ table->oem_table_id));
+ }
+
+ /* Invoke table handler if present */
+
+ if (acpi_gbl_table_handler) {
+ (void)acpi_gbl_table_handler(ACPI_TABLE_EVENT_LOAD, table,
+ acpi_gbl_table_handler_context);
+ }
+
+ *return_desc = ddb_handle;
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ex_load_op
+ *
+ * PARAMETERS: obj_desc - Region or Buffer/Field where the table will be
+ * obtained
+ * Target - Where a handle to the table will be stored
+ * walk_state - Current state
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Load an ACPI table from a field or operation region
+ *
+ * NOTE: Region Fields (Field, bank_field, index_fields) are resolved to buffer
+ * objects before this code is reached.
+ *
+ * If source is an operation region, it must refer to system_memory, as
+ * per the ACPI specification.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ex_load_op(union acpi_operand_object *obj_desc,
+ union acpi_operand_object *target,
+ struct acpi_walk_state *walk_state)
+{
+ union acpi_operand_object *ddb_handle;
+ struct acpi_table_header *table;
+ struct acpi_table_desc table_desc;
+ u32 table_index;
+ acpi_status status;
+ u32 length;
+
+ ACPI_FUNCTION_TRACE(ex_load_op);
+
+ ACPI_MEMSET(&table_desc, 0, sizeof(struct acpi_table_desc));
+
+ /* Source Object can be either an op_region or a Buffer/Field */
+
+ switch (ACPI_GET_OBJECT_TYPE(obj_desc)) {
+ case ACPI_TYPE_REGION:
+
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+ "Load table from Region %p\n", obj_desc));
+
+ /* Region must be system_memory (from ACPI spec) */
+
+ if (obj_desc->region.space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) {
+ return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
+ }
+
+ /*
+ * If the Region Address and Length have not been previously evaluated,
+ * evaluate them now and save the results.
+ */
+ if (!(obj_desc->common.flags & AOPOBJ_DATA_VALID)) {
+ status = acpi_ds_get_region_arguments(obj_desc);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+ }
+
+ /*
+ * Map the table header and get the actual table length. The region
+ * length is not guaranteed to be the same as the table length.
+ */
+ table = acpi_os_map_memory(obj_desc->region.address,
+ sizeof(struct acpi_table_header));
+ if (!table) {
+ return_ACPI_STATUS(AE_NO_MEMORY);
+ }
+
+ length = table->length;
+ acpi_os_unmap_memory(table, sizeof(struct acpi_table_header));
+
+ /* Must have at least an ACPI table header */
+
+ if (length < sizeof(struct acpi_table_header)) {
+ return_ACPI_STATUS(AE_INVALID_TABLE_LENGTH);
+ }
+
+ /*
+ * The memory region is not guaranteed to remain stable and we must
+ * copy the table to a local buffer. For example, the memory region
+ * is corrupted after suspend on some machines. Dynamically loaded
+ * tables are usually small, so this overhead is minimal.
+ */
+
+ /* Allocate a buffer for the table */
+
+ table_desc.pointer = ACPI_ALLOCATE(length);
+ if (!table_desc.pointer) {
+ return_ACPI_STATUS(AE_NO_MEMORY);
+ }
+
+ /* Map the entire table and copy it */
+
+ table = acpi_os_map_memory(obj_desc->region.address, length);
+ if (!table) {
+ ACPI_FREE(table_desc.pointer);
+ return_ACPI_STATUS(AE_NO_MEMORY);
+ }
+
+ ACPI_MEMCPY(table_desc.pointer, table, length);
+ acpi_os_unmap_memory(table, length);
+
+ table_desc.address = obj_desc->region.address;
+ break;
+
+ case ACPI_TYPE_BUFFER: /* Buffer or resolved region_field */
+
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+ "Load table from Buffer or Field %p\n",
+ obj_desc));
+
+ /* Must have at least an ACPI table header */
+
+ if (obj_desc->buffer.length < sizeof(struct acpi_table_header)) {
+ return_ACPI_STATUS(AE_INVALID_TABLE_LENGTH);
+ }
+
+ /* Get the actual table length from the table header */
+
+ table =
+ ACPI_CAST_PTR(struct acpi_table_header,
+ obj_desc->buffer.pointer);
+ length = table->length;
+
+ /* Table cannot extend beyond the buffer */
+
+ if (length > obj_desc->buffer.length) {
+ return_ACPI_STATUS(AE_AML_BUFFER_LIMIT);
+ }
+ if (length < sizeof(struct acpi_table_header)) {
+ return_ACPI_STATUS(AE_INVALID_TABLE_LENGTH);
+ }
+
+ /*
+ * Copy the table from the buffer because the buffer could be modified
+ * or even deleted in the future
+ */
+ table_desc.pointer = ACPI_ALLOCATE(length);
+ if (!table_desc.pointer) {
+ return_ACPI_STATUS(AE_NO_MEMORY);
+ }
+
+ ACPI_MEMCPY(table_desc.pointer, table, length);
+ table_desc.address = ACPI_TO_INTEGER(table_desc.pointer);
+ break;
+
+ default:
+ return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
+ }
+
+ /* Validate table checksum (will not get validated in tb_add_table) */
+
+ status = acpi_tb_verify_checksum(table_desc.pointer, length);
+ if (ACPI_FAILURE(status)) {
+ ACPI_FREE(table_desc.pointer);
+ return_ACPI_STATUS(status);
+ }
+
+ /* Complete the table descriptor */
+
+ table_desc.length = length;
+ table_desc.flags = ACPI_TABLE_ORIGIN_ALLOCATED;
+
+ /* Install the new table into the local data structures */
+
+ status = acpi_tb_add_table(&table_desc, &table_index);
+ if (ACPI_FAILURE(status)) {
+ goto cleanup;
+ }
+
+ /*
+ * Add the table to the namespace.
+ *
+ * Note: Load the table objects relative to the root of the namespace.
+ * This appears to go against the ACPI specification, but we do it for
+ * compatibility with other ACPI implementations.
+ */
+ status =
+ acpi_ex_add_table(table_index, acpi_gbl_root_node, &ddb_handle);
+ if (ACPI_FAILURE(status)) {
+
+ /* On error, table_ptr was deallocated above */
+
+ return_ACPI_STATUS(status);
+ }
+
+ /* Store the ddb_handle into the Target operand */
+
+ status = acpi_ex_store(ddb_handle, target, walk_state);
+ if (ACPI_FAILURE(status)) {
+ (void)acpi_ex_unload_table(ddb_handle);
+
+ /* table_ptr was deallocated above */
+
+ acpi_ut_remove_reference(ddb_handle);
+ return_ACPI_STATUS(status);
+ }
+
+ /* Invoke table handler if present */
+
+ if (acpi_gbl_table_handler) {
+ (void)acpi_gbl_table_handler(ACPI_TABLE_EVENT_LOAD,
+ table_desc.pointer,
+ acpi_gbl_table_handler_context);
+ }
+
+ cleanup:
+ if (ACPI_FAILURE(status)) {
+
+ /* Delete allocated table buffer */
+
+ acpi_tb_delete_table(&table_desc);
+ }
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ex_unload_table
+ *
+ * PARAMETERS: ddb_handle - Handle to a previously loaded table
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Unload an ACPI table
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ex_unload_table(union acpi_operand_object *ddb_handle)
+{
+ acpi_status status = AE_OK;
+ union acpi_operand_object *table_desc = ddb_handle;
+ u32 table_index;
+ struct acpi_table_header *table;
+
+ ACPI_FUNCTION_TRACE(ex_unload_table);
+
+ /*
+ * Validate the handle
+ * Although the handle is partially validated in acpi_ex_reconfiguration(),
+ * when it calls acpi_ex_resolve_operands(), the handle is more completely
+ * validated here.
+ */
+ if ((!ddb_handle) ||
+ (ACPI_GET_DESCRIPTOR_TYPE(ddb_handle) != ACPI_DESC_TYPE_OPERAND) ||
+ (ACPI_GET_OBJECT_TYPE(ddb_handle) != ACPI_TYPE_LOCAL_REFERENCE)) {
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
+
+ /* Get the table index from the ddb_handle */
+
+ table_index = table_desc->reference.value;
+
+ /* Invoke table handler if present */
+
+ if (acpi_gbl_table_handler) {
+ status = acpi_get_table_by_index(table_index, &table);
+ if (ACPI_SUCCESS(status)) {
+ (void)acpi_gbl_table_handler(ACPI_TABLE_EVENT_UNLOAD,
+ table,
+ acpi_gbl_table_handler_context);
+ }
+ }
+
+ /*
+ * Delete the entire namespace under this table Node
+ * (Offset contains the table_id)
+ */
+ acpi_tb_delete_namespace_by_owner(table_index);
+ (void)acpi_tb_release_owner_id(table_index);
+
+ acpi_tb_set_table_loaded_flag(table_index, FALSE);
+
+ /* Table unloaded, remove a reference to the ddb_handle object */
+
+ acpi_ut_remove_reference(ddb_handle);
+ return_ACPI_STATUS(AE_OK);
+}
diff --git a/drivers/acpi/executer/exconvrt.c b/drivers/acpi/executer/exconvrt.c
new file mode 100644
index 0000000..1d1f35a
--- /dev/null
+++ b/drivers/acpi/executer/exconvrt.c
@@ -0,0 +1,691 @@
+/******************************************************************************
+ *
+ * Module Name: exconvrt - Object conversion routines
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include <acpi/acinterp.h>
+#include <acpi/amlcode.h>
+
+#define _COMPONENT ACPI_EXECUTER
+ACPI_MODULE_NAME("exconvrt")
+
+/* Local prototypes */
+static u32
+acpi_ex_convert_to_ascii(acpi_integer integer,
+ u16 base, u8 * string, u8 max_length);
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ex_convert_to_integer
+ *
+ * PARAMETERS: obj_desc - Object to be converted. Must be an
+ * Integer, Buffer, or String
+ * result_desc - Where the new Integer object is returned
+ * Flags - Used for string conversion
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Convert an ACPI Object to an integer.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ex_convert_to_integer(union acpi_operand_object *obj_desc,
+ union acpi_operand_object **result_desc, u32 flags)
+{
+ union acpi_operand_object *return_desc;
+ u8 *pointer;
+ acpi_integer result;
+ u32 i;
+ u32 count;
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE_PTR(ex_convert_to_integer, obj_desc);
+
+ switch (ACPI_GET_OBJECT_TYPE(obj_desc)) {
+ case ACPI_TYPE_INTEGER:
+
+ /* No conversion necessary */
+
+ *result_desc = obj_desc;
+ return_ACPI_STATUS(AE_OK);
+
+ case ACPI_TYPE_BUFFER:
+ case ACPI_TYPE_STRING:
+
+ /* Note: Takes advantage of common buffer/string fields */
+
+ pointer = obj_desc->buffer.pointer;
+ count = obj_desc->buffer.length;
+ break;
+
+ default:
+ return_ACPI_STATUS(AE_TYPE);
+ }
+
+ /*
+ * Convert the buffer/string to an integer. Note that both buffers and
+ * strings are treated as raw data - we don't convert ascii to hex for
+ * strings.
+ *
+ * There are two terminating conditions for the loop:
+ * 1) The size of an integer has been reached, or
+ * 2) The end of the buffer or string has been reached
+ */
+ result = 0;
+
+ /* String conversion is different than Buffer conversion */
+
+ switch (ACPI_GET_OBJECT_TYPE(obj_desc)) {
+ case ACPI_TYPE_STRING:
+
+ /*
+ * Convert string to an integer - for most cases, the string must be
+ * hexadecimal as per the ACPI specification. The only exception (as
+ * of ACPI 3.0) is that the to_integer() operator allows both decimal
+ * and hexadecimal strings (hex prefixed with "0x").
+ */
+ status = acpi_ut_strtoul64((char *)pointer, flags, &result);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+ break;
+
+ case ACPI_TYPE_BUFFER:
+
+ /* Check for zero-length buffer */
+
+ if (!count) {
+ return_ACPI_STATUS(AE_AML_BUFFER_LIMIT);
+ }
+
+ /* Transfer no more than an integer's worth of data */
+
+ if (count > acpi_gbl_integer_byte_width) {
+ count = acpi_gbl_integer_byte_width;
+ }
+
+ /*
+ * Convert buffer to an integer - we simply grab enough raw data
+ * from the buffer to fill an integer
+ */
+ for (i = 0; i < count; i++) {
+ /*
+ * Get next byte and shift it into the Result.
+ * Little endian is used, meaning that the first byte of the buffer
+ * is the LSB of the integer
+ */
+ result |= (((acpi_integer) pointer[i]) << (i * 8));
+ }
+ break;
+
+ default:
+
+ /* No other types can get here */
+ break;
+ }
+
+ /* Create a new integer */
+
+ return_desc = acpi_ut_create_internal_object(ACPI_TYPE_INTEGER);
+ if (!return_desc) {
+ return_ACPI_STATUS(AE_NO_MEMORY);
+ }
+
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Converted value: %8.8X%8.8X\n",
+ ACPI_FORMAT_UINT64(result)));
+
+ /* Save the Result */
+
+ return_desc->integer.value = result;
+ acpi_ex_truncate_for32bit_table(return_desc);
+ *result_desc = return_desc;
+ return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ex_convert_to_buffer
+ *
+ * PARAMETERS: obj_desc - Object to be converted. Must be an
+ * Integer, Buffer, or String
+ * result_desc - Where the new buffer object is returned
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Convert an ACPI Object to a Buffer
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ex_convert_to_buffer(union acpi_operand_object *obj_desc,
+ union acpi_operand_object **result_desc)
+{
+ union acpi_operand_object *return_desc;
+ u8 *new_buf;
+
+ ACPI_FUNCTION_TRACE_PTR(ex_convert_to_buffer, obj_desc);
+
+ switch (ACPI_GET_OBJECT_TYPE(obj_desc)) {
+ case ACPI_TYPE_BUFFER:
+
+ /* No conversion necessary */
+
+ *result_desc = obj_desc;
+ return_ACPI_STATUS(AE_OK);
+
+ case ACPI_TYPE_INTEGER:
+
+ /*
+ * Create a new Buffer object.
+ * Need enough space for one integer
+ */
+ return_desc =
+ acpi_ut_create_buffer_object(acpi_gbl_integer_byte_width);
+ if (!return_desc) {
+ return_ACPI_STATUS(AE_NO_MEMORY);
+ }
+
+ /* Copy the integer to the buffer, LSB first */
+
+ new_buf = return_desc->buffer.pointer;
+ ACPI_MEMCPY(new_buf,
+ &obj_desc->integer.value,
+ acpi_gbl_integer_byte_width);
+ break;
+
+ case ACPI_TYPE_STRING:
+
+ /*
+ * Create a new Buffer object
+ * Size will be the string length
+ *
+ * NOTE: Add one to the string length to include the null terminator.
+ * The ACPI spec is unclear on this subject, but there is existing
+ * ASL/AML code that depends on the null being transferred to the new
+ * buffer.
+ */
+ return_desc = acpi_ut_create_buffer_object((acpi_size)
+ obj_desc->string.
+ length + 1);
+ if (!return_desc) {
+ return_ACPI_STATUS(AE_NO_MEMORY);
+ }
+
+ /* Copy the string to the buffer */
+
+ new_buf = return_desc->buffer.pointer;
+ ACPI_STRNCPY((char *)new_buf, (char *)obj_desc->string.pointer,
+ obj_desc->string.length);
+ break;
+
+ default:
+ return_ACPI_STATUS(AE_TYPE);
+ }
+
+ /* Mark buffer initialized */
+
+ return_desc->common.flags |= AOPOBJ_DATA_VALID;
+ *result_desc = return_desc;
+ return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ex_convert_to_ascii
+ *
+ * PARAMETERS: Integer - Value to be converted
+ * Base - ACPI_STRING_DECIMAL or ACPI_STRING_HEX
+ * String - Where the string is returned
+ * data_width - Size of data item to be converted, in bytes
+ *
+ * RETURN: Actual string length
+ *
+ * DESCRIPTION: Convert an ACPI Integer to a hex or decimal string
+ *
+ ******************************************************************************/
+
+static u32
+acpi_ex_convert_to_ascii(acpi_integer integer,
+ u16 base, u8 * string, u8 data_width)
+{
+ acpi_integer digit;
+ u32 i;
+ u32 j;
+ u32 k = 0;
+ u32 hex_length;
+ u32 decimal_length;
+ u32 remainder;
+ u8 supress_zeros;
+
+ ACPI_FUNCTION_ENTRY();
+
+ switch (base) {
+ case 10:
+
+ /* Setup max length for the decimal number */
+
+ switch (data_width) {
+ case 1:
+ decimal_length = ACPI_MAX8_DECIMAL_DIGITS;
+ break;
+
+ case 4:
+ decimal_length = ACPI_MAX32_DECIMAL_DIGITS;
+ break;
+
+ case 8:
+ default:
+ decimal_length = ACPI_MAX64_DECIMAL_DIGITS;
+ break;
+ }
+
+ supress_zeros = TRUE; /* No leading zeros */
+ remainder = 0;
+
+ for (i = decimal_length; i > 0; i--) {
+
+ /* Divide by nth factor of 10 */
+
+ digit = integer;
+ for (j = 0; j < i; j++) {
+ (void)acpi_ut_short_divide(digit, 10, &digit,
+ &remainder);
+ }
+
+ /* Handle leading zeros */
+
+ if (remainder != 0) {
+ supress_zeros = FALSE;
+ }
+
+ if (!supress_zeros) {
+ string[k] = (u8) (ACPI_ASCII_ZERO + remainder);
+ k++;
+ }
+ }
+ break;
+
+ case 16:
+
+ /* hex_length: 2 ascii hex chars per data byte */
+
+ hex_length = ACPI_MUL_2(data_width);
+ for (i = 0, j = (hex_length - 1); i < hex_length; i++, j--) {
+
+ /* Get one hex digit, most significant digits first */
+
+ string[k] =
+ (u8) acpi_ut_hex_to_ascii_char(integer,
+ ACPI_MUL_4(j));
+ k++;
+ }
+ break;
+
+ default:
+ return (0);
+ }
+
+ /*
+ * Since leading zeros are suppressed, we must check for the case where
+ * the integer equals 0
+ *
+ * Finally, null terminate the string and return the length
+ */
+ if (!k) {
+ string[0] = ACPI_ASCII_ZERO;
+ k = 1;
+ }
+
+ string[k] = 0;
+ return ((u32) k);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ex_convert_to_string
+ *
+ * PARAMETERS: obj_desc - Object to be converted. Must be an
+ * Integer, Buffer, or String
+ * result_desc - Where the string object is returned
+ * Type - String flags (base and conversion type)
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Convert an ACPI Object to a string
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ex_convert_to_string(union acpi_operand_object * obj_desc,
+ union acpi_operand_object ** result_desc, u32 type)
+{
+ union acpi_operand_object *return_desc;
+ u8 *new_buf;
+ u32 i;
+ u32 string_length = 0;
+ u16 base = 16;
+ u8 separator = ',';
+
+ ACPI_FUNCTION_TRACE_PTR(ex_convert_to_string, obj_desc);
+
+ switch (ACPI_GET_OBJECT_TYPE(obj_desc)) {
+ case ACPI_TYPE_STRING:
+
+ /* No conversion necessary */
+
+ *result_desc = obj_desc;
+ return_ACPI_STATUS(AE_OK);
+
+ case ACPI_TYPE_INTEGER:
+
+ switch (type) {
+ case ACPI_EXPLICIT_CONVERT_DECIMAL:
+
+ /* Make room for maximum decimal number */
+
+ string_length = ACPI_MAX_DECIMAL_DIGITS;
+ base = 10;
+ break;
+
+ default:
+
+ /* Two hex string characters for each integer byte */
+
+ string_length = ACPI_MUL_2(acpi_gbl_integer_byte_width);
+ break;
+ }
+
+ /*
+ * Create a new String
+ * Need enough space for one ASCII integer (plus null terminator)
+ */
+ return_desc =
+ acpi_ut_create_string_object((acpi_size) string_length);
+ if (!return_desc) {
+ return_ACPI_STATUS(AE_NO_MEMORY);
+ }
+
+ new_buf = return_desc->buffer.pointer;
+
+ /* Convert integer to string */
+
+ string_length =
+ acpi_ex_convert_to_ascii(obj_desc->integer.value, base,
+ new_buf,
+ acpi_gbl_integer_byte_width);
+
+ /* Null terminate at the correct place */
+
+ return_desc->string.length = string_length;
+ new_buf[string_length] = 0;
+ break;
+
+ case ACPI_TYPE_BUFFER:
+
+ /* Setup string length, base, and separator */
+
+ switch (type) {
+ case ACPI_EXPLICIT_CONVERT_DECIMAL: /* Used by to_decimal_string */
+ /*
+ * From ACPI: "If Data is a buffer, it is converted to a string of
+ * decimal values separated by commas."
+ */
+ base = 10;
+
+ /*
+ * Calculate the final string length. Individual string values
+ * are variable length (include separator for each)
+ */
+ for (i = 0; i < obj_desc->buffer.length; i++) {
+ if (obj_desc->buffer.pointer[i] >= 100) {
+ string_length += 4;
+ } else if (obj_desc->buffer.pointer[i] >= 10) {
+ string_length += 3;
+ } else {
+ string_length += 2;
+ }
+ }
+ break;
+
+ case ACPI_IMPLICIT_CONVERT_HEX:
+ /*
+ * From the ACPI spec:
+ *"The entire contents of the buffer are converted to a string of
+ * two-character hexadecimal numbers, each separated by a space."
+ */
+ separator = ' ';
+ string_length = (obj_desc->buffer.length * 3);
+ break;
+
+ case ACPI_EXPLICIT_CONVERT_HEX: /* Used by to_hex_string */
+ /*
+ * From ACPI: "If Data is a buffer, it is converted to a string of
+ * hexadecimal values separated by commas."
+ */
+ string_length = (obj_desc->buffer.length * 3);
+ break;
+
+ default:
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
+
+ /*
+ * Create a new string object and string buffer
+ * (-1 because of extra separator included in string_length from above)
+ * Allow creation of zero-length strings from zero-length buffers.
+ */
+ if (string_length) {
+ string_length--;
+ }
+
+ return_desc = acpi_ut_create_string_object((acpi_size)
+ string_length);
+ if (!return_desc) {
+ return_ACPI_STATUS(AE_NO_MEMORY);
+ }
+
+ new_buf = return_desc->buffer.pointer;
+
+ /*
+ * Convert buffer bytes to hex or decimal values
+ * (separated by commas or spaces)
+ */
+ for (i = 0; i < obj_desc->buffer.length; i++) {
+ new_buf += acpi_ex_convert_to_ascii((acpi_integer)
+ obj_desc->buffer.
+ pointer[i], base,
+ new_buf, 1);
+ *new_buf++ = separator; /* each separated by a comma or space */
+ }
+
+ /*
+ * Null terminate the string
+ * (overwrites final comma/space from above)
+ */
+ if (obj_desc->buffer.length) {
+ new_buf--;
+ }
+ *new_buf = 0;
+ break;
+
+ default:
+ return_ACPI_STATUS(AE_TYPE);
+ }
+
+ *result_desc = return_desc;
+ return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ex_convert_to_target_type
+ *
+ * PARAMETERS: destination_type - Current type of the destination
+ * source_desc - Source object to be converted.
+ * result_desc - Where the converted object is returned
+ * walk_state - Current method state
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Implements "implicit conversion" rules for storing an object.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ex_convert_to_target_type(acpi_object_type destination_type,
+ union acpi_operand_object *source_desc,
+ union acpi_operand_object **result_desc,
+ struct acpi_walk_state *walk_state)
+{
+ acpi_status status = AE_OK;
+
+ ACPI_FUNCTION_TRACE(ex_convert_to_target_type);
+
+ /* Default behavior */
+
+ *result_desc = source_desc;
+
+ /*
+ * If required by the target,
+ * perform implicit conversion on the source before we store it.
+ */
+ switch (GET_CURRENT_ARG_TYPE(walk_state->op_info->runtime_args)) {
+ case ARGI_SIMPLE_TARGET:
+ case ARGI_FIXED_TARGET:
+ case ARGI_INTEGER_REF: /* Handles Increment, Decrement cases */
+
+ switch (destination_type) {
+ case ACPI_TYPE_LOCAL_REGION_FIELD:
+ /*
+ * Named field can always handle conversions
+ */
+ break;
+
+ default:
+ /* No conversion allowed for these types */
+
+ if (destination_type !=
+ ACPI_GET_OBJECT_TYPE(source_desc)) {
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Explicit operator, will store (%s) over existing type (%s)\n",
+ acpi_ut_get_object_type_name
+ (source_desc),
+ acpi_ut_get_type_name
+ (destination_type)));
+ status = AE_TYPE;
+ }
+ }
+ break;
+
+ case ARGI_TARGETREF:
+
+ switch (destination_type) {
+ case ACPI_TYPE_INTEGER:
+ case ACPI_TYPE_BUFFER_FIELD:
+ case ACPI_TYPE_LOCAL_BANK_FIELD:
+ case ACPI_TYPE_LOCAL_INDEX_FIELD:
+ /*
+ * These types require an Integer operand. We can convert
+ * a Buffer or a String to an Integer if necessary.
+ */
+ status =
+ acpi_ex_convert_to_integer(source_desc, result_desc,
+ 16);
+ break;
+
+ case ACPI_TYPE_STRING:
+ /*
+ * The operand must be a String. We can convert an
+ * Integer or Buffer if necessary
+ */
+ status =
+ acpi_ex_convert_to_string(source_desc, result_desc,
+ ACPI_IMPLICIT_CONVERT_HEX);
+ break;
+
+ case ACPI_TYPE_BUFFER:
+ /*
+ * The operand must be a Buffer. We can convert an
+ * Integer or String if necessary
+ */
+ status =
+ acpi_ex_convert_to_buffer(source_desc, result_desc);
+ break;
+
+ default:
+ ACPI_ERROR((AE_INFO,
+ "Bad destination type during conversion: %X",
+ destination_type));
+ status = AE_AML_INTERNAL;
+ break;
+ }
+ break;
+
+ case ARGI_REFERENCE:
+ /*
+ * create_xxxx_field cases - we are storing the field object into the name
+ */
+ break;
+
+ default:
+ ACPI_ERROR((AE_INFO,
+ "Unknown Target type ID 0x%X AmlOpcode %X DestType %s",
+ GET_CURRENT_ARG_TYPE(walk_state->op_info->
+ runtime_args),
+ walk_state->opcode,
+ acpi_ut_get_type_name(destination_type)));
+ status = AE_AML_INTERNAL;
+ }
+
+ /*
+ * Source-to-Target conversion semantics:
+ *
+ * If conversion to the target type cannot be performed, then simply
+ * overwrite the target with the new object and type.
+ */
+ if (status == AE_TYPE) {
+ status = AE_OK;
+ }
+
+ return_ACPI_STATUS(status);
+}
diff --git a/drivers/acpi/executer/excreate.c b/drivers/acpi/executer/excreate.c
new file mode 100644
index 0000000..ad09696
--- /dev/null
+++ b/drivers/acpi/executer/excreate.c
@@ -0,0 +1,521 @@
+/******************************************************************************
+ *
+ * Module Name: excreate - Named object creation
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include <acpi/acinterp.h>
+#include <acpi/amlcode.h>
+#include <acpi/acnamesp.h>
+
+#define _COMPONENT ACPI_EXECUTER
+ACPI_MODULE_NAME("excreate")
+#ifndef ACPI_NO_METHOD_EXECUTION
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ex_create_alias
+ *
+ * PARAMETERS: walk_state - Current state, contains operands
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Create a new named alias
+ *
+ ******************************************************************************/
+acpi_status acpi_ex_create_alias(struct acpi_walk_state *walk_state)
+{
+ struct acpi_namespace_node *target_node;
+ struct acpi_namespace_node *alias_node;
+ acpi_status status = AE_OK;
+
+ ACPI_FUNCTION_TRACE(ex_create_alias);
+
+ /* Get the source/alias operands (both namespace nodes) */
+
+ alias_node = (struct acpi_namespace_node *)walk_state->operands[0];
+ target_node = (struct acpi_namespace_node *)walk_state->operands[1];
+
+ if ((target_node->type == ACPI_TYPE_LOCAL_ALIAS) ||
+ (target_node->type == ACPI_TYPE_LOCAL_METHOD_ALIAS)) {
+ /*
+ * Dereference an existing alias so that we don't create a chain
+ * of aliases. With this code, we guarantee that an alias is
+ * always exactly one level of indirection away from the
+ * actual aliased name.
+ */
+ target_node =
+ ACPI_CAST_PTR(struct acpi_namespace_node,
+ target_node->object);
+ }
+
+ /*
+ * For objects that can never change (i.e., the NS node will
+ * permanently point to the same object), we can simply attach
+ * the object to the new NS node. For other objects (such as
+ * Integers, buffers, etc.), we have to point the Alias node
+ * to the original Node.
+ */
+ switch (target_node->type) {
+
+ /* For these types, the sub-object can change dynamically via a Store */
+
+ case ACPI_TYPE_INTEGER:
+ case ACPI_TYPE_STRING:
+ case ACPI_TYPE_BUFFER:
+ case ACPI_TYPE_PACKAGE:
+ case ACPI_TYPE_BUFFER_FIELD:
+
+ /*
+ * These types open a new scope, so we need the NS node in order to access
+ * any children.
+ */
+ case ACPI_TYPE_DEVICE:
+ case ACPI_TYPE_POWER:
+ case ACPI_TYPE_PROCESSOR:
+ case ACPI_TYPE_THERMAL:
+ case ACPI_TYPE_LOCAL_SCOPE:
+
+ /*
+ * The new alias has the type ALIAS and points to the original
+ * NS node, not the object itself.
+ */
+ alias_node->type = ACPI_TYPE_LOCAL_ALIAS;
+ alias_node->object =
+ ACPI_CAST_PTR(union acpi_operand_object, target_node);
+ break;
+
+ case ACPI_TYPE_METHOD:
+
+ /*
+ * Control method aliases need to be differentiated
+ */
+ alias_node->type = ACPI_TYPE_LOCAL_METHOD_ALIAS;
+ alias_node->object =
+ ACPI_CAST_PTR(union acpi_operand_object, target_node);
+ break;
+
+ default:
+
+ /* Attach the original source object to the new Alias Node */
+
+ /*
+ * The new alias assumes the type of the target, and it points
+ * to the same object. The reference count of the object has an
+ * additional reference to prevent deletion out from under either the
+ * target node or the alias Node
+ */
+ status = acpi_ns_attach_object(alias_node,
+ acpi_ns_get_attached_object
+ (target_node),
+ target_node->type);
+ break;
+ }
+
+ /* Since both operands are Nodes, we don't need to delete them */
+
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ex_create_event
+ *
+ * PARAMETERS: walk_state - Current state
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Create a new event object
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ex_create_event(struct acpi_walk_state *walk_state)
+{
+ acpi_status status;
+ union acpi_operand_object *obj_desc;
+
+ ACPI_FUNCTION_TRACE(ex_create_event);
+
+ obj_desc = acpi_ut_create_internal_object(ACPI_TYPE_EVENT);
+ if (!obj_desc) {
+ status = AE_NO_MEMORY;
+ goto cleanup;
+ }
+
+ /*
+ * Create the actual OS semaphore, with zero initial units -- meaning
+ * that the event is created in an unsignalled state
+ */
+ status = acpi_os_create_semaphore(ACPI_NO_UNIT_LIMIT, 0,
+ &obj_desc->event.os_semaphore);
+ if (ACPI_FAILURE(status)) {
+ goto cleanup;
+ }
+
+ /* Attach object to the Node */
+
+ status =
+ acpi_ns_attach_object((struct acpi_namespace_node *)walk_state->
+ operands[0], obj_desc, ACPI_TYPE_EVENT);
+
+ cleanup:
+ /*
+ * Remove local reference to the object (on error, will cause deletion
+ * of both object and semaphore if present.)
+ */
+ acpi_ut_remove_reference(obj_desc);
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ex_create_mutex
+ *
+ * PARAMETERS: walk_state - Current state
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Create a new mutex object
+ *
+ * Mutex (Name[0], sync_level[1])
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ex_create_mutex(struct acpi_walk_state *walk_state)
+{
+ acpi_status status = AE_OK;
+ union acpi_operand_object *obj_desc;
+
+ ACPI_FUNCTION_TRACE_PTR(ex_create_mutex, ACPI_WALK_OPERANDS);
+
+ /* Create the new mutex object */
+
+ obj_desc = acpi_ut_create_internal_object(ACPI_TYPE_MUTEX);
+ if (!obj_desc) {
+ status = AE_NO_MEMORY;
+ goto cleanup;
+ }
+
+ /* Create the actual OS Mutex */
+
+ status = acpi_os_create_mutex(&obj_desc->mutex.os_mutex);
+ if (ACPI_FAILURE(status)) {
+ goto cleanup;
+ }
+
+ /* Init object and attach to NS node */
+
+ obj_desc->mutex.sync_level =
+ (u8) walk_state->operands[1]->integer.value;
+ obj_desc->mutex.node =
+ (struct acpi_namespace_node *)walk_state->operands[0];
+
+ status =
+ acpi_ns_attach_object(obj_desc->mutex.node, obj_desc,
+ ACPI_TYPE_MUTEX);
+
+ cleanup:
+ /*
+ * Remove local reference to the object (on error, will cause deletion
+ * of both object and semaphore if present.)
+ */
+ acpi_ut_remove_reference(obj_desc);
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ex_create_region
+ *
+ * PARAMETERS: aml_start - Pointer to the region declaration AML
+ * aml_length - Max length of the declaration AML
+ * region_space - space_iD for the region
+ * walk_state - Current state
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Create a new operation region object
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ex_create_region(u8 * aml_start,
+ u32 aml_length,
+ u8 region_space, struct acpi_walk_state *walk_state)
+{
+ acpi_status status;
+ union acpi_operand_object *obj_desc;
+ struct acpi_namespace_node *node;
+ union acpi_operand_object *region_obj2;
+
+ ACPI_FUNCTION_TRACE(ex_create_region);
+
+ /* Get the Namespace Node */
+
+ node = walk_state->op->common.node;
+
+ /*
+ * If the region object is already attached to this node,
+ * just return
+ */
+ if (acpi_ns_get_attached_object(node)) {
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ /*
+ * Space ID must be one of the predefined IDs, or in the user-defined
+ * range
+ */
+ if ((region_space >= ACPI_NUM_PREDEFINED_REGIONS) &&
+ (region_space < ACPI_USER_REGION_BEGIN)) {
+ ACPI_ERROR((AE_INFO, "Invalid AddressSpace type %X",
+ region_space));
+ return_ACPI_STATUS(AE_AML_INVALID_SPACE_ID);
+ }
+
+ ACPI_DEBUG_PRINT((ACPI_DB_LOAD, "Region Type - %s (%X)\n",
+ acpi_ut_get_region_name(region_space), region_space));
+
+ /* Create the region descriptor */
+
+ obj_desc = acpi_ut_create_internal_object(ACPI_TYPE_REGION);
+ if (!obj_desc) {
+ status = AE_NO_MEMORY;
+ goto cleanup;
+ }
+
+ /*
+ * Remember location in AML stream of address & length
+ * operands since they need to be evaluated at run time.
+ */
+ region_obj2 = obj_desc->common.next_object;
+ region_obj2->extra.aml_start = aml_start;
+ region_obj2->extra.aml_length = aml_length;
+
+ /* Init the region from the operands */
+
+ obj_desc->region.space_id = region_space;
+ obj_desc->region.address = 0;
+ obj_desc->region.length = 0;
+ obj_desc->region.node = node;
+
+ /* Install the new region object in the parent Node */
+
+ status = acpi_ns_attach_object(node, obj_desc, ACPI_TYPE_REGION);
+
+ cleanup:
+
+ /* Remove local reference to the object */
+
+ acpi_ut_remove_reference(obj_desc);
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ex_create_processor
+ *
+ * PARAMETERS: walk_state - Current state
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Create a new processor object and populate the fields
+ *
+ * Processor (Name[0], cpu_iD[1], pblock_addr[2], pblock_length[3])
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ex_create_processor(struct acpi_walk_state *walk_state)
+{
+ union acpi_operand_object **operand = &walk_state->operands[0];
+ union acpi_operand_object *obj_desc;
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE_PTR(ex_create_processor, walk_state);
+
+ /* Create the processor object */
+
+ obj_desc = acpi_ut_create_internal_object(ACPI_TYPE_PROCESSOR);
+ if (!obj_desc) {
+ return_ACPI_STATUS(AE_NO_MEMORY);
+ }
+
+ /* Initialize the processor object from the operands */
+
+ obj_desc->processor.proc_id = (u8) operand[1]->integer.value;
+ obj_desc->processor.length = (u8) operand[3]->integer.value;
+ obj_desc->processor.address =
+ (acpi_io_address) operand[2]->integer.value;
+
+ /* Install the processor object in the parent Node */
+
+ status = acpi_ns_attach_object((struct acpi_namespace_node *)operand[0],
+ obj_desc, ACPI_TYPE_PROCESSOR);
+
+ /* Remove local reference to the object */
+
+ acpi_ut_remove_reference(obj_desc);
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ex_create_power_resource
+ *
+ * PARAMETERS: walk_state - Current state
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Create a new power_resource object and populate the fields
+ *
+ * power_resource (Name[0], system_level[1], resource_order[2])
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ex_create_power_resource(struct acpi_walk_state *walk_state)
+{
+ union acpi_operand_object **operand = &walk_state->operands[0];
+ acpi_status status;
+ union acpi_operand_object *obj_desc;
+
+ ACPI_FUNCTION_TRACE_PTR(ex_create_power_resource, walk_state);
+
+ /* Create the power resource object */
+
+ obj_desc = acpi_ut_create_internal_object(ACPI_TYPE_POWER);
+ if (!obj_desc) {
+ return_ACPI_STATUS(AE_NO_MEMORY);
+ }
+
+ /* Initialize the power object from the operands */
+
+ obj_desc->power_resource.system_level = (u8) operand[1]->integer.value;
+ obj_desc->power_resource.resource_order =
+ (u16) operand[2]->integer.value;
+
+ /* Install the power resource object in the parent Node */
+
+ status = acpi_ns_attach_object((struct acpi_namespace_node *)operand[0],
+ obj_desc, ACPI_TYPE_POWER);
+
+ /* Remove local reference to the object */
+
+ acpi_ut_remove_reference(obj_desc);
+ return_ACPI_STATUS(status);
+}
+#endif
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ex_create_method
+ *
+ * PARAMETERS: aml_start - First byte of the method's AML
+ * aml_length - AML byte count for this method
+ * walk_state - Current state
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Create a new method object
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ex_create_method(u8 * aml_start,
+ u32 aml_length, struct acpi_walk_state *walk_state)
+{
+ union acpi_operand_object **operand = &walk_state->operands[0];
+ union acpi_operand_object *obj_desc;
+ acpi_status status;
+ u8 method_flags;
+
+ ACPI_FUNCTION_TRACE_PTR(ex_create_method, walk_state);
+
+ /* Create a new method object */
+
+ obj_desc = acpi_ut_create_internal_object(ACPI_TYPE_METHOD);
+ if (!obj_desc) {
+ status = AE_NO_MEMORY;
+ goto exit;
+ }
+
+ /* Save the method's AML pointer and length */
+
+ obj_desc->method.aml_start = aml_start;
+ obj_desc->method.aml_length = aml_length;
+
+ /*
+ * Disassemble the method flags. Split off the Arg Count
+ * for efficiency
+ */
+ method_flags = (u8) operand[1]->integer.value;
+
+ obj_desc->method.method_flags =
+ (u8) (method_flags & ~AML_METHOD_ARG_COUNT);
+ obj_desc->method.param_count =
+ (u8) (method_flags & AML_METHOD_ARG_COUNT);
+
+ /*
+ * Get the sync_level. If method is serialized, a mutex will be
+ * created for this method when it is parsed.
+ */
+ if (method_flags & AML_METHOD_SERIALIZED) {
+ /*
+ * ACPI 1.0: sync_level = 0
+ * ACPI 2.0: sync_level = sync_level in method declaration
+ */
+ obj_desc->method.sync_level = (u8)
+ ((method_flags & AML_METHOD_SYNCH_LEVEL) >> 4);
+ }
+
+ /* Attach the new object to the method Node */
+
+ status = acpi_ns_attach_object((struct acpi_namespace_node *)operand[0],
+ obj_desc, ACPI_TYPE_METHOD);
+
+ /* Remove local reference to the object */
+
+ acpi_ut_remove_reference(obj_desc);
+
+ exit:
+ /* Remove a reference to the operand */
+
+ acpi_ut_remove_reference(operand[1]);
+ return_ACPI_STATUS(status);
+}
diff --git a/drivers/acpi/executer/exdump.c b/drivers/acpi/executer/exdump.c
new file mode 100644
index 0000000..d087a7d
--- /dev/null
+++ b/drivers/acpi/executer/exdump.c
@@ -0,0 +1,1059 @@
+/******************************************************************************
+ *
+ * Module Name: exdump - Interpreter debug output routines
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include <acpi/acinterp.h>
+#include <acpi/amlcode.h>
+#include <acpi/acnamesp.h>
+
+#define _COMPONENT ACPI_EXECUTER
+ACPI_MODULE_NAME("exdump")
+
+/*
+ * The following routines are used for debug output only
+ */
+#if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER)
+/* Local prototypes */
+static void acpi_ex_out_string(char *title, char *value);
+
+static void acpi_ex_out_pointer(char *title, void *value);
+
+static void
+acpi_ex_dump_object(union acpi_operand_object *obj_desc,
+ struct acpi_exdump_info *info);
+
+static void acpi_ex_dump_reference_obj(union acpi_operand_object *obj_desc);
+
+static void
+acpi_ex_dump_package_obj(union acpi_operand_object *obj_desc,
+ u32 level, u32 index);
+
+/*******************************************************************************
+ *
+ * Object Descriptor info tables
+ *
+ * Note: The first table entry must be an INIT opcode and must contain
+ * the table length (number of table entries)
+ *
+ ******************************************************************************/
+
+static struct acpi_exdump_info acpi_ex_dump_integer[2] = {
+ {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_integer), NULL},
+ {ACPI_EXD_UINT64, ACPI_EXD_OFFSET(integer.value), "Value"}
+};
+
+static struct acpi_exdump_info acpi_ex_dump_string[4] = {
+ {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_string), NULL},
+ {ACPI_EXD_UINT32, ACPI_EXD_OFFSET(string.length), "Length"},
+ {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(string.pointer), "Pointer"},
+ {ACPI_EXD_STRING, 0, NULL}
+};
+
+static struct acpi_exdump_info acpi_ex_dump_buffer[5] = {
+ {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_buffer), NULL},
+ {ACPI_EXD_UINT32, ACPI_EXD_OFFSET(buffer.length), "Length"},
+ {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(buffer.pointer), "Pointer"},
+ {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(buffer.node), "Parent Node"},
+ {ACPI_EXD_BUFFER, 0, NULL}
+};
+
+static struct acpi_exdump_info acpi_ex_dump_package[5] = {
+ {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_package), NULL},
+ {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(package.flags), "Flags"},
+ {ACPI_EXD_UINT32, ACPI_EXD_OFFSET(package.count), "Elements"},
+ {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(package.elements), "Element List"},
+ {ACPI_EXD_PACKAGE, 0, NULL}
+};
+
+static struct acpi_exdump_info acpi_ex_dump_device[4] = {
+ {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_device), NULL},
+ {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(device.handler), "Handler"},
+ {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(device.system_notify),
+ "System Notify"},
+ {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(device.device_notify),
+ "Device Notify"}
+};
+
+static struct acpi_exdump_info acpi_ex_dump_event[2] = {
+ {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_event), NULL},
+ {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(event.os_semaphore), "OsSemaphore"}
+};
+
+static struct acpi_exdump_info acpi_ex_dump_method[8] = {
+ {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_method), NULL},
+ {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(method.param_count), "ParamCount"},
+ {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(method.sync_level), "Sync Level"},
+ {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(method.mutex), "Mutex"},
+ {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(method.owner_id), "Owner Id"},
+ {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(method.thread_count), "Thread Count"},
+ {ACPI_EXD_UINT32, ACPI_EXD_OFFSET(method.aml_length), "Aml Length"},
+ {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(method.aml_start), "Aml Start"}
+};
+
+static struct acpi_exdump_info acpi_ex_dump_mutex[5] = {
+ {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_mutex), NULL},
+ {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(mutex.sync_level), "Sync Level"},
+ {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(mutex.owner_thread), "Owner Thread"},
+ {ACPI_EXD_UINT16, ACPI_EXD_OFFSET(mutex.acquisition_depth),
+ "Acquire Depth"},
+ {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(mutex.os_mutex), "OsMutex"}
+};
+
+static struct acpi_exdump_info acpi_ex_dump_region[7] = {
+ {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_region), NULL},
+ {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(region.space_id), "Space Id"},
+ {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(region.flags), "Flags"},
+ {ACPI_EXD_ADDRESS, ACPI_EXD_OFFSET(region.address), "Address"},
+ {ACPI_EXD_UINT32, ACPI_EXD_OFFSET(region.length), "Length"},
+ {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(region.handler), "Handler"},
+ {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(region.next), "Next"}
+};
+
+static struct acpi_exdump_info acpi_ex_dump_power[5] = {
+ {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_power), NULL},
+ {ACPI_EXD_UINT32, ACPI_EXD_OFFSET(power_resource.system_level),
+ "System Level"},
+ {ACPI_EXD_UINT32, ACPI_EXD_OFFSET(power_resource.resource_order),
+ "Resource Order"},
+ {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(power_resource.system_notify),
+ "System Notify"},
+ {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(power_resource.device_notify),
+ "Device Notify"}
+};
+
+static struct acpi_exdump_info acpi_ex_dump_processor[7] = {
+ {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_processor), NULL},
+ {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(processor.proc_id), "Processor ID"},
+ {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(processor.length), "Length"},
+ {ACPI_EXD_ADDRESS, ACPI_EXD_OFFSET(processor.address), "Address"},
+ {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(processor.system_notify),
+ "System Notify"},
+ {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(processor.device_notify),
+ "Device Notify"},
+ {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(processor.handler), "Handler"}
+};
+
+static struct acpi_exdump_info acpi_ex_dump_thermal[4] = {
+ {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_thermal), NULL},
+ {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(thermal_zone.system_notify),
+ "System Notify"},
+ {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(thermal_zone.device_notify),
+ "Device Notify"},
+ {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(thermal_zone.handler), "Handler"}
+};
+
+static struct acpi_exdump_info acpi_ex_dump_buffer_field[3] = {
+ {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_buffer_field), NULL},
+ {ACPI_EXD_FIELD, 0, NULL},
+ {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(buffer_field.buffer_obj),
+ "Buffer Object"}
+};
+
+static struct acpi_exdump_info acpi_ex_dump_region_field[3] = {
+ {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_region_field), NULL},
+ {ACPI_EXD_FIELD, 0, NULL},
+ {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(field.region_obj), "Region Object"}
+};
+
+static struct acpi_exdump_info acpi_ex_dump_bank_field[5] = {
+ {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_bank_field), NULL},
+ {ACPI_EXD_FIELD, 0, NULL},
+ {ACPI_EXD_UINT32, ACPI_EXD_OFFSET(bank_field.value), "Value"},
+ {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(bank_field.region_obj),
+ "Region Object"},
+ {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(bank_field.bank_obj), "Bank Object"}
+};
+
+static struct acpi_exdump_info acpi_ex_dump_index_field[5] = {
+ {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_bank_field), NULL},
+ {ACPI_EXD_FIELD, 0, NULL},
+ {ACPI_EXD_UINT32, ACPI_EXD_OFFSET(index_field.value), "Value"},
+ {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(index_field.index_obj),
+ "Index Object"},
+ {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(index_field.data_obj), "Data Object"}
+};
+
+static struct acpi_exdump_info acpi_ex_dump_reference[8] = {
+ {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_reference), NULL},
+ {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(reference.class), "Class"},
+ {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(reference.target_type), "Target Type"},
+ {ACPI_EXD_UINT32, ACPI_EXD_OFFSET(reference.value), "Value"},
+ {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(reference.object), "Object Desc"},
+ {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(reference.node), "Node"},
+ {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(reference.where), "Where"},
+ {ACPI_EXD_REFERENCE, 0, NULL}
+};
+
+static struct acpi_exdump_info acpi_ex_dump_address_handler[6] = {
+ {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_address_handler),
+ NULL},
+ {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(address_space.space_id), "Space Id"},
+ {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(address_space.next), "Next"},
+ {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(address_space.region_list),
+ "Region List"},
+ {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(address_space.node), "Node"},
+ {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(address_space.context), "Context"}
+};
+
+static struct acpi_exdump_info acpi_ex_dump_notify[3] = {
+ {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_notify), NULL},
+ {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(notify.node), "Node"},
+ {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(notify.context), "Context"}
+};
+
+/* Miscellaneous tables */
+
+static struct acpi_exdump_info acpi_ex_dump_common[4] = {
+ {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_common), NULL},
+ {ACPI_EXD_TYPE, 0, NULL},
+ {ACPI_EXD_UINT16, ACPI_EXD_OFFSET(common.reference_count),
+ "Reference Count"},
+ {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(common.flags), "Flags"}
+};
+
+static struct acpi_exdump_info acpi_ex_dump_field_common[7] = {
+ {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_field_common), NULL},
+ {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(common_field.field_flags),
+ "Field Flags"},
+ {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(common_field.access_byte_width),
+ "Access Byte Width"},
+ {ACPI_EXD_UINT32, ACPI_EXD_OFFSET(common_field.bit_length),
+ "Bit Length"},
+ {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(common_field.start_field_bit_offset),
+ "Field Bit Offset"},
+ {ACPI_EXD_UINT32, ACPI_EXD_OFFSET(common_field.base_byte_offset),
+ "Base Byte Offset"},
+ {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(common_field.node), "Parent Node"}
+};
+
+static struct acpi_exdump_info acpi_ex_dump_node[5] = {
+ {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_node), NULL},
+ {ACPI_EXD_UINT8, ACPI_EXD_NSOFFSET(flags), "Flags"},
+ {ACPI_EXD_UINT8, ACPI_EXD_NSOFFSET(owner_id), "Owner Id"},
+ {ACPI_EXD_POINTER, ACPI_EXD_NSOFFSET(child), "Child List"},
+ {ACPI_EXD_POINTER, ACPI_EXD_NSOFFSET(peer), "Next Peer"}
+};
+
+/* Dispatch table, indexed by object type */
+
+static struct acpi_exdump_info *acpi_ex_dump_info[] = {
+ NULL,
+ acpi_ex_dump_integer,
+ acpi_ex_dump_string,
+ acpi_ex_dump_buffer,
+ acpi_ex_dump_package,
+ NULL,
+ acpi_ex_dump_device,
+ acpi_ex_dump_event,
+ acpi_ex_dump_method,
+ acpi_ex_dump_mutex,
+ acpi_ex_dump_region,
+ acpi_ex_dump_power,
+ acpi_ex_dump_processor,
+ acpi_ex_dump_thermal,
+ acpi_ex_dump_buffer_field,
+ NULL,
+ NULL,
+ acpi_ex_dump_region_field,
+ acpi_ex_dump_bank_field,
+ acpi_ex_dump_index_field,
+ acpi_ex_dump_reference,
+ NULL,
+ NULL,
+ acpi_ex_dump_notify,
+ acpi_ex_dump_address_handler,
+ NULL,
+ NULL,
+ NULL
+};
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ex_dump_object
+ *
+ * PARAMETERS: obj_desc - Descriptor to dump
+ * Info - Info table corresponding to this object
+ * type
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Walk the info table for this object
+ *
+ ******************************************************************************/
+
+static void
+acpi_ex_dump_object(union acpi_operand_object *obj_desc,
+ struct acpi_exdump_info *info)
+{
+ u8 *target;
+ char *name;
+ u8 count;
+
+ if (!info) {
+ acpi_os_printf
+ ("ExDumpObject: Display not implemented for object type %s\n",
+ acpi_ut_get_object_type_name(obj_desc));
+ return;
+ }
+
+ /* First table entry must contain the table length (# of table entries) */
+
+ count = info->offset;
+
+ while (count) {
+ target = ACPI_ADD_PTR(u8, obj_desc, info->offset);
+ name = info->name;
+
+ switch (info->opcode) {
+ case ACPI_EXD_INIT:
+ break;
+
+ case ACPI_EXD_TYPE:
+ acpi_ex_out_string("Type",
+ acpi_ut_get_object_type_name
+ (obj_desc));
+ break;
+
+ case ACPI_EXD_UINT8:
+
+ acpi_os_printf("%20s : %2.2X\n", name, *target);
+ break;
+
+ case ACPI_EXD_UINT16:
+
+ acpi_os_printf("%20s : %4.4X\n", name,
+ ACPI_GET16(target));
+ break;
+
+ case ACPI_EXD_UINT32:
+
+ acpi_os_printf("%20s : %8.8X\n", name,
+ ACPI_GET32(target));
+ break;
+
+ case ACPI_EXD_UINT64:
+
+ acpi_os_printf("%20s : %8.8X%8.8X\n", "Value",
+ ACPI_FORMAT_UINT64(ACPI_GET64(target)));
+ break;
+
+ case ACPI_EXD_POINTER:
+ case ACPI_EXD_ADDRESS:
+
+ acpi_ex_out_pointer(name,
+ *ACPI_CAST_PTR(void *, target));
+ break;
+
+ case ACPI_EXD_STRING:
+
+ acpi_ut_print_string(obj_desc->string.pointer,
+ ACPI_UINT8_MAX);
+ acpi_os_printf("\n");
+ break;
+
+ case ACPI_EXD_BUFFER:
+
+ ACPI_DUMP_BUFFER(obj_desc->buffer.pointer,
+ obj_desc->buffer.length);
+ break;
+
+ case ACPI_EXD_PACKAGE:
+
+ /* Dump the package contents */
+
+ acpi_os_printf("\nPackage Contents:\n");
+ acpi_ex_dump_package_obj(obj_desc, 0, 0);
+ break;
+
+ case ACPI_EXD_FIELD:
+
+ acpi_ex_dump_object(obj_desc,
+ acpi_ex_dump_field_common);
+ break;
+
+ case ACPI_EXD_REFERENCE:
+
+ acpi_ex_out_string("Class Name",
+ (char *)
+ acpi_ut_get_reference_name
+ (obj_desc));
+ acpi_ex_dump_reference_obj(obj_desc);
+ break;
+
+ default:
+ acpi_os_printf("**** Invalid table opcode [%X] ****\n",
+ info->opcode);
+ return;
+ }
+
+ info++;
+ count--;
+ }
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ex_dump_operand
+ *
+ * PARAMETERS: *obj_desc - Pointer to entry to be dumped
+ * Depth - Current nesting depth
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Dump an operand object
+ *
+ ******************************************************************************/
+
+void acpi_ex_dump_operand(union acpi_operand_object *obj_desc, u32 depth)
+{
+ u32 length;
+ u32 index;
+
+ ACPI_FUNCTION_NAME(ex_dump_operand)
+
+ if (!((ACPI_LV_EXEC & acpi_dbg_level)
+ && (_COMPONENT & acpi_dbg_layer))) {
+ return;
+ }
+
+ if (!obj_desc) {
+
+ /* This could be a null element of a package */
+
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Null Object Descriptor\n"));
+ return;
+ }
+
+ if (ACPI_GET_DESCRIPTOR_TYPE(obj_desc) == ACPI_DESC_TYPE_NAMED) {
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "%p Namespace Node: ",
+ obj_desc));
+ ACPI_DUMP_ENTRY(obj_desc, ACPI_LV_EXEC);
+ return;
+ }
+
+ if (ACPI_GET_DESCRIPTOR_TYPE(obj_desc) != ACPI_DESC_TYPE_OPERAND) {
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+ "%p is not a node or operand object: [%s]\n",
+ obj_desc,
+ acpi_ut_get_descriptor_name(obj_desc)));
+ ACPI_DUMP_BUFFER(obj_desc, sizeof(union acpi_operand_object));
+ return;
+ }
+
+ /* obj_desc is a valid object */
+
+ if (depth > 0) {
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "%*s[%u] %p ",
+ depth, " ", depth, obj_desc));
+ } else {
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "%p ", obj_desc));
+ }
+
+ /* Decode object type */
+
+ switch (ACPI_GET_OBJECT_TYPE(obj_desc)) {
+ case ACPI_TYPE_LOCAL_REFERENCE:
+
+ acpi_os_printf("Reference: [%s] ",
+ acpi_ut_get_reference_name(obj_desc));
+
+ switch (obj_desc->reference.class) {
+ case ACPI_REFCLASS_DEBUG:
+
+ acpi_os_printf("\n");
+ break;
+
+ case ACPI_REFCLASS_INDEX:
+
+ acpi_os_printf("%p\n", obj_desc->reference.object);
+ break;
+
+ case ACPI_REFCLASS_TABLE:
+
+ acpi_os_printf("Table Index %X\n",
+ obj_desc->reference.value);
+ break;
+
+ case ACPI_REFCLASS_REFOF:
+
+ acpi_os_printf("%p [%s]\n", obj_desc->reference.object,
+ acpi_ut_get_type_name(((union
+ acpi_operand_object
+ *)
+ obj_desc->
+ reference.
+ object)->common.
+ type));
+ break;
+
+ case ACPI_REFCLASS_ARG:
+
+ acpi_os_printf("%X", obj_desc->reference.value);
+
+ if (ACPI_GET_OBJECT_TYPE(obj_desc) == ACPI_TYPE_INTEGER) {
+
+ /* Value is an Integer */
+
+ acpi_os_printf(" value is [%8.8X%8.8x]",
+ ACPI_FORMAT_UINT64(obj_desc->
+ integer.
+ value));
+ }
+
+ acpi_os_printf("\n");
+ break;
+
+ case ACPI_REFCLASS_LOCAL:
+
+ acpi_os_printf("%X", obj_desc->reference.value);
+
+ if (ACPI_GET_OBJECT_TYPE(obj_desc) == ACPI_TYPE_INTEGER) {
+
+ /* Value is an Integer */
+
+ acpi_os_printf(" value is [%8.8X%8.8x]",
+ ACPI_FORMAT_UINT64(obj_desc->
+ integer.
+ value));
+ }
+
+ acpi_os_printf("\n");
+ break;
+
+ case ACPI_REFCLASS_NAME:
+
+ acpi_os_printf("- [%4.4s]\n",
+ obj_desc->reference.node->name.ascii);
+ break;
+
+ default: /* Unknown reference class */
+
+ acpi_os_printf("%2.2X\n", obj_desc->reference.class);
+ break;
+ }
+ break;
+
+ case ACPI_TYPE_BUFFER:
+
+ acpi_os_printf("Buffer length %.2X @ %p\n",
+ obj_desc->buffer.length,
+ obj_desc->buffer.pointer);
+
+ /* Debug only -- dump the buffer contents */
+
+ if (obj_desc->buffer.pointer) {
+ length = obj_desc->buffer.length;
+ if (length > 128) {
+ length = 128;
+ }
+
+ acpi_os_printf
+ ("Buffer Contents: (displaying length 0x%.2X)\n",
+ length);
+ ACPI_DUMP_BUFFER(obj_desc->buffer.pointer, length);
+ }
+ break;
+
+ case ACPI_TYPE_INTEGER:
+
+ acpi_os_printf("Integer %8.8X%8.8X\n",
+ ACPI_FORMAT_UINT64(obj_desc->integer.value));
+ break;
+
+ case ACPI_TYPE_PACKAGE:
+
+ acpi_os_printf("Package [Len %X] ElementArray %p\n",
+ obj_desc->package.count,
+ obj_desc->package.elements);
+
+ /*
+ * If elements exist, package element pointer is valid,
+ * and debug_level exceeds 1, dump package's elements.
+ */
+ if (obj_desc->package.count &&
+ obj_desc->package.elements && acpi_dbg_level > 1) {
+ for (index = 0; index < obj_desc->package.count;
+ index++) {
+ acpi_ex_dump_operand(obj_desc->package.
+ elements[index],
+ depth + 1);
+ }
+ }
+ break;
+
+ case ACPI_TYPE_REGION:
+
+ acpi_os_printf("Region %s (%X)",
+ acpi_ut_get_region_name(obj_desc->region.
+ space_id),
+ obj_desc->region.space_id);
+
+ /*
+ * If the address and length have not been evaluated,
+ * don't print them.
+ */
+ if (!(obj_desc->region.flags & AOPOBJ_DATA_VALID)) {
+ acpi_os_printf("\n");
+ } else {
+ acpi_os_printf(" base %8.8X%8.8X Length %X\n",
+ ACPI_FORMAT_NATIVE_UINT(obj_desc->region.
+ address),
+ obj_desc->region.length);
+ }
+ break;
+
+ case ACPI_TYPE_STRING:
+
+ acpi_os_printf("String length %X @ %p ",
+ obj_desc->string.length,
+ obj_desc->string.pointer);
+
+ acpi_ut_print_string(obj_desc->string.pointer, ACPI_UINT8_MAX);
+ acpi_os_printf("\n");
+ break;
+
+ case ACPI_TYPE_LOCAL_BANK_FIELD:
+
+ acpi_os_printf("BankField\n");
+ break;
+
+ case ACPI_TYPE_LOCAL_REGION_FIELD:
+
+ acpi_os_printf
+ ("RegionField: Bits=%X AccWidth=%X Lock=%X Update=%X at byte=%X bit=%X of below:\n",
+ obj_desc->field.bit_length,
+ obj_desc->field.access_byte_width,
+ obj_desc->field.field_flags & AML_FIELD_LOCK_RULE_MASK,
+ obj_desc->field.field_flags & AML_FIELD_UPDATE_RULE_MASK,
+ obj_desc->field.base_byte_offset,
+ obj_desc->field.start_field_bit_offset);
+
+ acpi_ex_dump_operand(obj_desc->field.region_obj, depth + 1);
+ break;
+
+ case ACPI_TYPE_LOCAL_INDEX_FIELD:
+
+ acpi_os_printf("IndexField\n");
+ break;
+
+ case ACPI_TYPE_BUFFER_FIELD:
+
+ acpi_os_printf("BufferField: %X bits at byte %X bit %X of\n",
+ obj_desc->buffer_field.bit_length,
+ obj_desc->buffer_field.base_byte_offset,
+ obj_desc->buffer_field.start_field_bit_offset);
+
+ if (!obj_desc->buffer_field.buffer_obj) {
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "*NULL*\n"));
+ } else
+ if (ACPI_GET_OBJECT_TYPE(obj_desc->buffer_field.buffer_obj)
+ != ACPI_TYPE_BUFFER) {
+ acpi_os_printf("*not a Buffer*\n");
+ } else {
+ acpi_ex_dump_operand(obj_desc->buffer_field.buffer_obj,
+ depth + 1);
+ }
+ break;
+
+ case ACPI_TYPE_EVENT:
+
+ acpi_os_printf("Event\n");
+ break;
+
+ case ACPI_TYPE_METHOD:
+
+ acpi_os_printf("Method(%X) @ %p:%X\n",
+ obj_desc->method.param_count,
+ obj_desc->method.aml_start,
+ obj_desc->method.aml_length);
+ break;
+
+ case ACPI_TYPE_MUTEX:
+
+ acpi_os_printf("Mutex\n");
+ break;
+
+ case ACPI_TYPE_DEVICE:
+
+ acpi_os_printf("Device\n");
+ break;
+
+ case ACPI_TYPE_POWER:
+
+ acpi_os_printf("Power\n");
+ break;
+
+ case ACPI_TYPE_PROCESSOR:
+
+ acpi_os_printf("Processor\n");
+ break;
+
+ case ACPI_TYPE_THERMAL:
+
+ acpi_os_printf("Thermal\n");
+ break;
+
+ default:
+ /* Unknown Type */
+
+ acpi_os_printf("Unknown Type %X\n",
+ ACPI_GET_OBJECT_TYPE(obj_desc));
+ break;
+ }
+
+ return;
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ex_dump_operands
+ *
+ * PARAMETERS: Operands - A list of Operand objects
+ * opcode_name - AML opcode name
+ * num_operands - Operand count for this opcode
+ *
+ * DESCRIPTION: Dump the operands associated with the opcode
+ *
+ ******************************************************************************/
+
+void
+acpi_ex_dump_operands(union acpi_operand_object **operands,
+ const char *opcode_name, u32 num_operands)
+{
+ ACPI_FUNCTION_NAME(ex_dump_operands);
+
+ if (!opcode_name) {
+ opcode_name = "UNKNOWN";
+ }
+
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+ "**** Start operand dump for opcode [%s], %d operands\n",
+ opcode_name, num_operands));
+
+ if (num_operands == 0) {
+ num_operands = 1;
+ }
+
+ /* Dump the individual operands */
+
+ while (num_operands) {
+ acpi_ex_dump_operand(*operands, 0);
+ operands++;
+ num_operands--;
+ }
+
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+ "**** End operand dump for [%s]\n", opcode_name));
+ return;
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ex_out* functions
+ *
+ * PARAMETERS: Title - Descriptive text
+ * Value - Value to be displayed
+ *
+ * DESCRIPTION: Object dump output formatting functions. These functions
+ * reduce the number of format strings required and keeps them
+ * all in one place for easy modification.
+ *
+ ******************************************************************************/
+
+static void acpi_ex_out_string(char *title, char *value)
+{
+ acpi_os_printf("%20s : %s\n", title, value);
+}
+
+static void acpi_ex_out_pointer(char *title, void *value)
+{
+ acpi_os_printf("%20s : %p\n", title, value);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ex_dump_namespace_node
+ *
+ * PARAMETERS: Node - Descriptor to dump
+ * Flags - Force display if TRUE
+ *
+ * DESCRIPTION: Dumps the members of the given.Node
+ *
+ ******************************************************************************/
+
+void acpi_ex_dump_namespace_node(struct acpi_namespace_node *node, u32 flags)
+{
+
+ ACPI_FUNCTION_ENTRY();
+
+ if (!flags) {
+ if (!((ACPI_LV_OBJECTS & acpi_dbg_level)
+ && (_COMPONENT & acpi_dbg_layer))) {
+ return;
+ }
+ }
+
+ acpi_os_printf("%20s : %4.4s\n", "Name", acpi_ut_get_node_name(node));
+ acpi_ex_out_string("Type", acpi_ut_get_type_name(node->type));
+ acpi_ex_out_pointer("Attached Object",
+ acpi_ns_get_attached_object(node));
+ acpi_ex_out_pointer("Parent", acpi_ns_get_parent_node(node));
+
+ acpi_ex_dump_object(ACPI_CAST_PTR(union acpi_operand_object, node),
+ acpi_ex_dump_node);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ex_dump_reference_obj
+ *
+ * PARAMETERS: Object - Descriptor to dump
+ *
+ * DESCRIPTION: Dumps a reference object
+ *
+ ******************************************************************************/
+
+static void acpi_ex_dump_reference_obj(union acpi_operand_object *obj_desc)
+{
+ struct acpi_buffer ret_buf;
+ acpi_status status;
+
+ ret_buf.length = ACPI_ALLOCATE_LOCAL_BUFFER;
+
+ if (obj_desc->reference.class == ACPI_REFCLASS_NAME) {
+ acpi_os_printf(" %p ", obj_desc->reference.node);
+
+ status =
+ acpi_ns_handle_to_pathname(obj_desc->reference.node,
+ &ret_buf);
+ if (ACPI_FAILURE(status)) {
+ acpi_os_printf(" Could not convert name to pathname\n");
+ } else {
+ acpi_os_printf("%s\n", (char *)ret_buf.pointer);
+ ACPI_FREE(ret_buf.pointer);
+ }
+ } else if (obj_desc->reference.object) {
+ if (ACPI_GET_DESCRIPTOR_TYPE(obj_desc) ==
+ ACPI_DESC_TYPE_OPERAND) {
+ acpi_os_printf(" Target: %p",
+ obj_desc->reference.object);
+ if (obj_desc->reference.class == ACPI_REFCLASS_TABLE) {
+ acpi_os_printf(" Table Index: %X\n",
+ obj_desc->reference.value);
+ } else {
+ acpi_os_printf(" Target: %p [%s]\n",
+ obj_desc->reference.object,
+ acpi_ut_get_type_name(((union
+ acpi_operand_object
+ *)
+ obj_desc->
+ reference.
+ object)->
+ common.
+ type));
+ }
+ } else {
+ acpi_os_printf(" Target: %p\n",
+ obj_desc->reference.object);
+ }
+ }
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ex_dump_package_obj
+ *
+ * PARAMETERS: obj_desc - Descriptor to dump
+ * Level - Indentation Level
+ * Index - Package index for this object
+ *
+ * DESCRIPTION: Dumps the elements of the package
+ *
+ ******************************************************************************/
+
+static void
+acpi_ex_dump_package_obj(union acpi_operand_object *obj_desc,
+ u32 level, u32 index)
+{
+ u32 i;
+
+ /* Indentation and index output */
+
+ if (level > 0) {
+ for (i = 0; i < level; i++) {
+ acpi_os_printf(" ");
+ }
+
+ acpi_os_printf("[%.2d] ", index);
+ }
+
+ acpi_os_printf("%p ", obj_desc);
+
+ /* Null package elements are allowed */
+
+ if (!obj_desc) {
+ acpi_os_printf("[Null Object]\n");
+ return;
+ }
+
+ /* Packages may only contain a few object types */
+
+ switch (ACPI_GET_OBJECT_TYPE(obj_desc)) {
+ case ACPI_TYPE_INTEGER:
+
+ acpi_os_printf("[Integer] = %8.8X%8.8X\n",
+ ACPI_FORMAT_UINT64(obj_desc->integer.value));
+ break;
+
+ case ACPI_TYPE_STRING:
+
+ acpi_os_printf("[String] Value: ");
+ for (i = 0; i < obj_desc->string.length; i++) {
+ acpi_os_printf("%c", obj_desc->string.pointer[i]);
+ }
+ acpi_os_printf("\n");
+ break;
+
+ case ACPI_TYPE_BUFFER:
+
+ acpi_os_printf("[Buffer] Length %.2X = ",
+ obj_desc->buffer.length);
+ if (obj_desc->buffer.length) {
+ acpi_ut_dump_buffer(ACPI_CAST_PTR
+ (u8, obj_desc->buffer.pointer),
+ obj_desc->buffer.length,
+ DB_DWORD_DISPLAY, _COMPONENT);
+ } else {
+ acpi_os_printf("\n");
+ }
+ break;
+
+ case ACPI_TYPE_PACKAGE:
+
+ acpi_os_printf("[Package] Contains %d Elements:\n",
+ obj_desc->package.count);
+
+ for (i = 0; i < obj_desc->package.count; i++) {
+ acpi_ex_dump_package_obj(obj_desc->package.elements[i],
+ level + 1, i);
+ }
+ break;
+
+ case ACPI_TYPE_LOCAL_REFERENCE:
+
+ acpi_os_printf("[Object Reference] Type [%s] %2.2X",
+ acpi_ut_get_reference_name(obj_desc),
+ obj_desc->reference.class);
+ acpi_ex_dump_reference_obj(obj_desc);
+ break;
+
+ default:
+
+ acpi_os_printf("[Unknown Type] %X\n",
+ ACPI_GET_OBJECT_TYPE(obj_desc));
+ break;
+ }
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ex_dump_object_descriptor
+ *
+ * PARAMETERS: obj_desc - Descriptor to dump
+ * Flags - Force display if TRUE
+ *
+ * DESCRIPTION: Dumps the members of the object descriptor given.
+ *
+ ******************************************************************************/
+
+void
+acpi_ex_dump_object_descriptor(union acpi_operand_object *obj_desc, u32 flags)
+{
+ ACPI_FUNCTION_TRACE(ex_dump_object_descriptor);
+
+ if (!obj_desc) {
+ return_VOID;
+ }
+
+ if (!flags) {
+ if (!((ACPI_LV_OBJECTS & acpi_dbg_level)
+ && (_COMPONENT & acpi_dbg_layer))) {
+ return_VOID;
+ }
+ }
+
+ if (ACPI_GET_DESCRIPTOR_TYPE(obj_desc) == ACPI_DESC_TYPE_NAMED) {
+ acpi_ex_dump_namespace_node((struct acpi_namespace_node *)
+ obj_desc, flags);
+
+ acpi_os_printf("\nAttached Object (%p):\n",
+ ((struct acpi_namespace_node *)obj_desc)->
+ object);
+
+ acpi_ex_dump_object_descriptor(((struct acpi_namespace_node *)
+ obj_desc)->object, flags);
+ return_VOID;
+ }
+
+ if (ACPI_GET_DESCRIPTOR_TYPE(obj_desc) != ACPI_DESC_TYPE_OPERAND) {
+ acpi_os_printf
+ ("ExDumpObjectDescriptor: %p is not an ACPI operand object: [%s]\n",
+ obj_desc, acpi_ut_get_descriptor_name(obj_desc));
+ return_VOID;
+ }
+
+ if (obj_desc->common.type > ACPI_TYPE_NS_NODE_MAX) {
+ return_VOID;
+ }
+
+ /* Common Fields */
+
+ acpi_ex_dump_object(obj_desc, acpi_ex_dump_common);
+
+ /* Object-specific fields */
+
+ acpi_ex_dump_object(obj_desc, acpi_ex_dump_info[obj_desc->common.type]);
+ return_VOID;
+}
+
+#endif
diff --git a/drivers/acpi/executer/exfield.c b/drivers/acpi/executer/exfield.c
new file mode 100644
index 0000000..3e440d8
--- /dev/null
+++ b/drivers/acpi/executer/exfield.c
@@ -0,0 +1,339 @@
+/******************************************************************************
+ *
+ * Module Name: exfield - ACPI AML (p-code) execution - field manipulation
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include <acpi/acdispat.h>
+#include <acpi/acinterp.h>
+
+#define _COMPONENT ACPI_EXECUTER
+ACPI_MODULE_NAME("exfield")
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ex_read_data_from_field
+ *
+ * PARAMETERS: walk_state - Current execution state
+ * obj_desc - The named field
+ * ret_buffer_desc - Where the return data object is stored
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Read from a named field. Returns either an Integer or a
+ * Buffer, depending on the size of the field.
+ *
+ ******************************************************************************/
+acpi_status
+acpi_ex_read_data_from_field(struct acpi_walk_state *walk_state,
+ union acpi_operand_object *obj_desc,
+ union acpi_operand_object **ret_buffer_desc)
+{
+ acpi_status status;
+ union acpi_operand_object *buffer_desc;
+ acpi_size length;
+ void *buffer;
+
+ ACPI_FUNCTION_TRACE_PTR(ex_read_data_from_field, obj_desc);
+
+ /* Parameter validation */
+
+ if (!obj_desc) {
+ return_ACPI_STATUS(AE_AML_NO_OPERAND);
+ }
+ if (!ret_buffer_desc) {
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
+
+ if (ACPI_GET_OBJECT_TYPE(obj_desc) == ACPI_TYPE_BUFFER_FIELD) {
+ /*
+ * If the buffer_field arguments have not been previously evaluated,
+ * evaluate them now and save the results.
+ */
+ if (!(obj_desc->common.flags & AOPOBJ_DATA_VALID)) {
+ status = acpi_ds_get_buffer_field_arguments(obj_desc);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+ }
+ } else
+ if ((ACPI_GET_OBJECT_TYPE(obj_desc) == ACPI_TYPE_LOCAL_REGION_FIELD)
+ && (obj_desc->field.region_obj->region.space_id ==
+ ACPI_ADR_SPACE_SMBUS)) {
+ /*
+ * This is an SMBus read. We must create a buffer to hold the data
+ * and directly access the region handler.
+ */
+ buffer_desc =
+ acpi_ut_create_buffer_object(ACPI_SMBUS_BUFFER_SIZE);
+ if (!buffer_desc) {
+ return_ACPI_STATUS(AE_NO_MEMORY);
+ }
+
+ /* Lock entire transaction if requested */
+
+ acpi_ex_acquire_global_lock(obj_desc->common_field.field_flags);
+
+ /*
+ * Perform the read.
+ * Note: Smbus protocol value is passed in upper 16-bits of Function
+ */
+ status = acpi_ex_access_region(obj_desc, 0,
+ ACPI_CAST_PTR(acpi_integer,
+ buffer_desc->
+ buffer.pointer),
+ ACPI_READ | (obj_desc->field.
+ attribute << 16));
+ acpi_ex_release_global_lock(obj_desc->common_field.field_flags);
+ goto exit;
+ }
+
+ /*
+ * Allocate a buffer for the contents of the field.
+ *
+ * If the field is larger than the size of an acpi_integer, create
+ * a BUFFER to hold it. Otherwise, use an INTEGER. This allows
+ * the use of arithmetic operators on the returned value if the
+ * field size is equal or smaller than an Integer.
+ *
+ * Note: Field.length is in bits.
+ */
+ length =
+ (acpi_size) ACPI_ROUND_BITS_UP_TO_BYTES(obj_desc->field.bit_length);
+ if (length > acpi_gbl_integer_byte_width) {
+
+ /* Field is too large for an Integer, create a Buffer instead */
+
+ buffer_desc = acpi_ut_create_buffer_object(length);
+ if (!buffer_desc) {
+ return_ACPI_STATUS(AE_NO_MEMORY);
+ }
+ buffer = buffer_desc->buffer.pointer;
+ } else {
+ /* Field will fit within an Integer (normal case) */
+
+ buffer_desc = acpi_ut_create_internal_object(ACPI_TYPE_INTEGER);
+ if (!buffer_desc) {
+ return_ACPI_STATUS(AE_NO_MEMORY);
+ }
+
+ length = acpi_gbl_integer_byte_width;
+ buffer_desc->integer.value = 0;
+ buffer = &buffer_desc->integer.value;
+ }
+
+ ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
+ "FieldRead [TO]: Obj %p, Type %X, Buf %p, ByteLen %X\n",
+ obj_desc, ACPI_GET_OBJECT_TYPE(obj_desc), buffer,
+ (u32) length));
+ ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
+ "FieldRead [FROM]: BitLen %X, BitOff %X, ByteOff %X\n",
+ obj_desc->common_field.bit_length,
+ obj_desc->common_field.start_field_bit_offset,
+ obj_desc->common_field.base_byte_offset));
+
+ /* Lock entire transaction if requested */
+
+ acpi_ex_acquire_global_lock(obj_desc->common_field.field_flags);
+
+ /* Read from the field */
+
+ status = acpi_ex_extract_from_field(obj_desc, buffer, (u32) length);
+ acpi_ex_release_global_lock(obj_desc->common_field.field_flags);
+
+ exit:
+ if (ACPI_FAILURE(status)) {
+ acpi_ut_remove_reference(buffer_desc);
+ } else {
+ *ret_buffer_desc = buffer_desc;
+ }
+
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ex_write_data_to_field
+ *
+ * PARAMETERS: source_desc - Contains data to write
+ * obj_desc - The named field
+ * result_desc - Where the return value is returned, if any
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Write to a named field
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ex_write_data_to_field(union acpi_operand_object *source_desc,
+ union acpi_operand_object *obj_desc,
+ union acpi_operand_object **result_desc)
+{
+ acpi_status status;
+ u32 length;
+ void *buffer;
+ union acpi_operand_object *buffer_desc;
+
+ ACPI_FUNCTION_TRACE_PTR(ex_write_data_to_field, obj_desc);
+
+ /* Parameter validation */
+
+ if (!source_desc || !obj_desc) {
+ return_ACPI_STATUS(AE_AML_NO_OPERAND);
+ }
+
+ if (ACPI_GET_OBJECT_TYPE(obj_desc) == ACPI_TYPE_BUFFER_FIELD) {
+ /*
+ * If the buffer_field arguments have not been previously evaluated,
+ * evaluate them now and save the results.
+ */
+ if (!(obj_desc->common.flags & AOPOBJ_DATA_VALID)) {
+ status = acpi_ds_get_buffer_field_arguments(obj_desc);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+ }
+ } else
+ if ((ACPI_GET_OBJECT_TYPE(obj_desc) == ACPI_TYPE_LOCAL_REGION_FIELD)
+ && (obj_desc->field.region_obj->region.space_id ==
+ ACPI_ADR_SPACE_SMBUS)) {
+ /*
+ * This is an SMBus write. We will bypass the entire field mechanism
+ * and handoff the buffer directly to the handler.
+ *
+ * Source must be a buffer of sufficient size (ACPI_SMBUS_BUFFER_SIZE).
+ */
+ if (ACPI_GET_OBJECT_TYPE(source_desc) != ACPI_TYPE_BUFFER) {
+ ACPI_ERROR((AE_INFO,
+ "SMBus write requires Buffer, found type %s",
+ acpi_ut_get_object_type_name(source_desc)));
+
+ return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
+ }
+
+ if (source_desc->buffer.length < ACPI_SMBUS_BUFFER_SIZE) {
+ ACPI_ERROR((AE_INFO,
+ "SMBus write requires Buffer of length %X, found length %X",
+ ACPI_SMBUS_BUFFER_SIZE,
+ source_desc->buffer.length));
+
+ return_ACPI_STATUS(AE_AML_BUFFER_LIMIT);
+ }
+
+ buffer_desc =
+ acpi_ut_create_buffer_object(ACPI_SMBUS_BUFFER_SIZE);
+ if (!buffer_desc) {
+ return_ACPI_STATUS(AE_NO_MEMORY);
+ }
+
+ buffer = buffer_desc->buffer.pointer;
+ ACPI_MEMCPY(buffer, source_desc->buffer.pointer,
+ ACPI_SMBUS_BUFFER_SIZE);
+
+ /* Lock entire transaction if requested */
+
+ acpi_ex_acquire_global_lock(obj_desc->common_field.field_flags);
+
+ /*
+ * Perform the write (returns status and perhaps data in the
+ * same buffer)
+ * Note: SMBus protocol type is passed in upper 16-bits of Function.
+ */
+ status = acpi_ex_access_region(obj_desc, 0,
+ (acpi_integer *) buffer,
+ ACPI_WRITE | (obj_desc->field.
+ attribute << 16));
+ acpi_ex_release_global_lock(obj_desc->common_field.field_flags);
+
+ *result_desc = buffer_desc;
+ return_ACPI_STATUS(status);
+ }
+
+ /* Get a pointer to the data to be written */
+
+ switch (ACPI_GET_OBJECT_TYPE(source_desc)) {
+ case ACPI_TYPE_INTEGER:
+ buffer = &source_desc->integer.value;
+ length = sizeof(source_desc->integer.value);
+ break;
+
+ case ACPI_TYPE_BUFFER:
+ buffer = source_desc->buffer.pointer;
+ length = source_desc->buffer.length;
+ break;
+
+ case ACPI_TYPE_STRING:
+ buffer = source_desc->string.pointer;
+ length = source_desc->string.length;
+ break;
+
+ default:
+ return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
+ }
+
+ ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
+ "FieldWrite [FROM]: Obj %p (%s:%X), Buf %p, ByteLen %X\n",
+ source_desc,
+ acpi_ut_get_type_name(ACPI_GET_OBJECT_TYPE
+ (source_desc)),
+ ACPI_GET_OBJECT_TYPE(source_desc), buffer, length));
+
+ ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
+ "FieldWrite [TO]: Obj %p (%s:%X), BitLen %X, BitOff %X, ByteOff %X\n",
+ obj_desc,
+ acpi_ut_get_type_name(ACPI_GET_OBJECT_TYPE(obj_desc)),
+ ACPI_GET_OBJECT_TYPE(obj_desc),
+ obj_desc->common_field.bit_length,
+ obj_desc->common_field.start_field_bit_offset,
+ obj_desc->common_field.base_byte_offset));
+
+ /* Lock entire transaction if requested */
+
+ acpi_ex_acquire_global_lock(obj_desc->common_field.field_flags);
+
+ /* Write to the field */
+
+ status = acpi_ex_insert_into_field(obj_desc, buffer, length);
+ acpi_ex_release_global_lock(obj_desc->common_field.field_flags);
+
+ return_ACPI_STATUS(status);
+}
diff --git a/drivers/acpi/executer/exfldio.c b/drivers/acpi/executer/exfldio.c
new file mode 100644
index 0000000..9ff9d1f
--- /dev/null
+++ b/drivers/acpi/executer/exfldio.c
@@ -0,0 +1,957 @@
+/******************************************************************************
+ *
+ * Module Name: exfldio - Aml Field I/O
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include <acpi/acinterp.h>
+#include <acpi/amlcode.h>
+#include <acpi/acevents.h>
+#include <acpi/acdispat.h>
+
+#define _COMPONENT ACPI_EXECUTER
+ACPI_MODULE_NAME("exfldio")
+
+/* Local prototypes */
+static acpi_status
+acpi_ex_field_datum_io(union acpi_operand_object *obj_desc,
+ u32 field_datum_byte_offset,
+ acpi_integer * value, u32 read_write);
+
+static u8
+acpi_ex_register_overflow(union acpi_operand_object *obj_desc,
+ acpi_integer value);
+
+static acpi_status
+acpi_ex_setup_region(union acpi_operand_object *obj_desc,
+ u32 field_datum_byte_offset);
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ex_setup_region
+ *
+ * PARAMETERS: obj_desc - Field to be read or written
+ * field_datum_byte_offset - Byte offset of this datum within the
+ * parent field
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Common processing for acpi_ex_extract_from_field and
+ * acpi_ex_insert_into_field. Initialize the Region if necessary and
+ * validate the request.
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ex_setup_region(union acpi_operand_object *obj_desc,
+ u32 field_datum_byte_offset)
+{
+ acpi_status status = AE_OK;
+ union acpi_operand_object *rgn_desc;
+
+ ACPI_FUNCTION_TRACE_U32(ex_setup_region, field_datum_byte_offset);
+
+ rgn_desc = obj_desc->common_field.region_obj;
+
+ /* We must have a valid region */
+
+ if (ACPI_GET_OBJECT_TYPE(rgn_desc) != ACPI_TYPE_REGION) {
+ ACPI_ERROR((AE_INFO, "Needed Region, found type %X (%s)",
+ ACPI_GET_OBJECT_TYPE(rgn_desc),
+ acpi_ut_get_object_type_name(rgn_desc)));
+
+ return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
+ }
+
+ /*
+ * If the Region Address and Length have not been previously evaluated,
+ * evaluate them now and save the results.
+ */
+ if (!(rgn_desc->common.flags & AOPOBJ_DATA_VALID)) {
+ status = acpi_ds_get_region_arguments(rgn_desc);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+ }
+
+ /* Exit if Address/Length have been disallowed by the host OS */
+
+ if (rgn_desc->common.flags & AOPOBJ_INVALID) {
+ return_ACPI_STATUS(AE_AML_ILLEGAL_ADDRESS);
+ }
+
+ /*
+ * Exit now for SMBus address space, it has a non-linear address space
+ * and the request cannot be directly validated
+ */
+ if (rgn_desc->region.space_id == ACPI_ADR_SPACE_SMBUS) {
+
+ /* SMBus has a non-linear address space */
+
+ return_ACPI_STATUS(AE_OK);
+ }
+#ifdef ACPI_UNDER_DEVELOPMENT
+ /*
+ * If the Field access is any_acc, we can now compute the optimal
+ * access (because we know know the length of the parent region)
+ */
+ if (!(obj_desc->common.flags & AOPOBJ_DATA_VALID)) {
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+ }
+#endif
+
+ /*
+ * Validate the request. The entire request from the byte offset for a
+ * length of one field datum (access width) must fit within the region.
+ * (Region length is specified in bytes)
+ */
+ if (rgn_desc->region.length <
+ (obj_desc->common_field.base_byte_offset +
+ field_datum_byte_offset +
+ obj_desc->common_field.access_byte_width)) {
+ if (acpi_gbl_enable_interpreter_slack) {
+ /*
+ * Slack mode only: We will go ahead and allow access to this
+ * field if it is within the region length rounded up to the next
+ * access width boundary. acpi_size cast for 64-bit compile.
+ */
+ if (ACPI_ROUND_UP(rgn_desc->region.length,
+ obj_desc->common_field.
+ access_byte_width) >=
+ ((acpi_size) obj_desc->common_field.
+ base_byte_offset +
+ obj_desc->common_field.access_byte_width +
+ field_datum_byte_offset)) {
+ return_ACPI_STATUS(AE_OK);
+ }
+ }
+
+ if (rgn_desc->region.length <
+ obj_desc->common_field.access_byte_width) {
+ /*
+ * This is the case where the access_type (acc_word, etc.) is wider
+ * than the region itself. For example, a region of length one
+ * byte, and a field with Dword access specified.
+ */
+ ACPI_ERROR((AE_INFO,
+ "Field [%4.4s] access width (%d bytes) too large for region [%4.4s] (length %X)",
+ acpi_ut_get_node_name(obj_desc->
+ common_field.node),
+ obj_desc->common_field.access_byte_width,
+ acpi_ut_get_node_name(rgn_desc->region.
+ node),
+ rgn_desc->region.length));
+ }
+
+ /*
+ * Offset rounded up to next multiple of field width
+ * exceeds region length, indicate an error
+ */
+ ACPI_ERROR((AE_INFO,
+ "Field [%4.4s] Base+Offset+Width %X+%X+%X is beyond end of region [%4.4s] (length %X)",
+ acpi_ut_get_node_name(obj_desc->common_field.node),
+ obj_desc->common_field.base_byte_offset,
+ field_datum_byte_offset,
+ obj_desc->common_field.access_byte_width,
+ acpi_ut_get_node_name(rgn_desc->region.node),
+ rgn_desc->region.length));
+
+ return_ACPI_STATUS(AE_AML_REGION_LIMIT);
+ }
+
+ return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ex_access_region
+ *
+ * PARAMETERS: obj_desc - Field to be read
+ * field_datum_byte_offset - Byte offset of this datum within the
+ * parent field
+ * Value - Where to store value (must at least
+ * the size of acpi_integer)
+ * Function - Read or Write flag plus other region-
+ * dependent flags
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Read or Write a single field datum to an Operation Region.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ex_access_region(union acpi_operand_object *obj_desc,
+ u32 field_datum_byte_offset,
+ acpi_integer * value, u32 function)
+{
+ acpi_status status;
+ union acpi_operand_object *rgn_desc;
+ acpi_physical_address address;
+
+ ACPI_FUNCTION_TRACE(ex_access_region);
+
+ /*
+ * Ensure that the region operands are fully evaluated and verify
+ * the validity of the request
+ */
+ status = acpi_ex_setup_region(obj_desc, field_datum_byte_offset);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /*
+ * The physical address of this field datum is:
+ *
+ * 1) The base of the region, plus
+ * 2) The base offset of the field, plus
+ * 3) The current offset into the field
+ */
+ rgn_desc = obj_desc->common_field.region_obj;
+ address = rgn_desc->region.address +
+ obj_desc->common_field.base_byte_offset + field_datum_byte_offset;
+
+ if ((function & ACPI_IO_MASK) == ACPI_READ) {
+ ACPI_DEBUG_PRINT((ACPI_DB_BFIELD, "[READ]"));
+ } else {
+ ACPI_DEBUG_PRINT((ACPI_DB_BFIELD, "[WRITE]"));
+ }
+
+ ACPI_DEBUG_PRINT_RAW((ACPI_DB_BFIELD,
+ " Region [%s:%X], Width %X, ByteBase %X, Offset %X at %p\n",
+ acpi_ut_get_region_name(rgn_desc->region.
+ space_id),
+ rgn_desc->region.space_id,
+ obj_desc->common_field.access_byte_width,
+ obj_desc->common_field.base_byte_offset,
+ field_datum_byte_offset, ACPI_CAST_PTR(void,
+ address)));
+
+ /* Invoke the appropriate address_space/op_region handler */
+
+ status = acpi_ev_address_space_dispatch(rgn_desc, function,
+ address,
+ ACPI_MUL_8(obj_desc->
+ common_field.
+ access_byte_width),
+ value);
+
+ if (ACPI_FAILURE(status)) {
+ if (status == AE_NOT_IMPLEMENTED) {
+ ACPI_ERROR((AE_INFO,
+ "Region %s(%X) not implemented",
+ acpi_ut_get_region_name(rgn_desc->region.
+ space_id),
+ rgn_desc->region.space_id));
+ } else if (status == AE_NOT_EXIST) {
+ ACPI_ERROR((AE_INFO,
+ "Region %s(%X) has no handler",
+ acpi_ut_get_region_name(rgn_desc->region.
+ space_id),
+ rgn_desc->region.space_id));
+ }
+ }
+
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ex_register_overflow
+ *
+ * PARAMETERS: obj_desc - Register(Field) to be written
+ * Value - Value to be stored
+ *
+ * RETURN: TRUE if value overflows the field, FALSE otherwise
+ *
+ * DESCRIPTION: Check if a value is out of range of the field being written.
+ * Used to check if the values written to Index and Bank registers
+ * are out of range. Normally, the value is simply truncated
+ * to fit the field, but this case is most likely a serious
+ * coding error in the ASL.
+ *
+ ******************************************************************************/
+
+static u8
+acpi_ex_register_overflow(union acpi_operand_object *obj_desc,
+ acpi_integer value)
+{
+
+ if (obj_desc->common_field.bit_length >= ACPI_INTEGER_BIT_SIZE) {
+ /*
+ * The field is large enough to hold the maximum integer, so we can
+ * never overflow it.
+ */
+ return (FALSE);
+ }
+
+ if (value >= ((acpi_integer) 1 << obj_desc->common_field.bit_length)) {
+ /*
+ * The Value is larger than the maximum value that can fit into
+ * the register.
+ */
+ return (TRUE);
+ }
+
+ /* The Value will fit into the field with no truncation */
+
+ return (FALSE);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ex_field_datum_io
+ *
+ * PARAMETERS: obj_desc - Field to be read
+ * field_datum_byte_offset - Byte offset of this datum within the
+ * parent field
+ * Value - Where to store value (must be 64 bits)
+ * read_write - Read or Write flag
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Read or Write a single datum of a field. The field_type is
+ * demultiplexed here to handle the different types of fields
+ * (buffer_field, region_field, index_field, bank_field)
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ex_field_datum_io(union acpi_operand_object *obj_desc,
+ u32 field_datum_byte_offset,
+ acpi_integer * value, u32 read_write)
+{
+ acpi_status status;
+ acpi_integer local_value;
+
+ ACPI_FUNCTION_TRACE_U32(ex_field_datum_io, field_datum_byte_offset);
+
+ if (read_write == ACPI_READ) {
+ if (!value) {
+ local_value = 0;
+
+ /* To support reads without saving return value */
+ value = &local_value;
+ }
+
+ /* Clear the entire return buffer first, [Very Important!] */
+
+ *value = 0;
+ }
+
+ /*
+ * The four types of fields are:
+ *
+ * buffer_field - Read/write from/to a Buffer
+ * region_field - Read/write from/to a Operation Region.
+ * bank_field - Write to a Bank Register, then read/write from/to an
+ * operation_region
+ * index_field - Write to an Index Register, then read/write from/to a
+ * Data Register
+ */
+ switch (ACPI_GET_OBJECT_TYPE(obj_desc)) {
+ case ACPI_TYPE_BUFFER_FIELD:
+ /*
+ * If the buffer_field arguments have not been previously evaluated,
+ * evaluate them now and save the results.
+ */
+ if (!(obj_desc->common.flags & AOPOBJ_DATA_VALID)) {
+ status = acpi_ds_get_buffer_field_arguments(obj_desc);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+ }
+
+ if (read_write == ACPI_READ) {
+ /*
+ * Copy the data from the source buffer.
+ * Length is the field width in bytes.
+ */
+ ACPI_MEMCPY(value,
+ (obj_desc->buffer_field.buffer_obj)->buffer.
+ pointer +
+ obj_desc->buffer_field.base_byte_offset +
+ field_datum_byte_offset,
+ obj_desc->common_field.access_byte_width);
+ } else {
+ /*
+ * Copy the data to the target buffer.
+ * Length is the field width in bytes.
+ */
+ ACPI_MEMCPY((obj_desc->buffer_field.buffer_obj)->buffer.
+ pointer +
+ obj_desc->buffer_field.base_byte_offset +
+ field_datum_byte_offset, value,
+ obj_desc->common_field.access_byte_width);
+ }
+
+ status = AE_OK;
+ break;
+
+ case ACPI_TYPE_LOCAL_BANK_FIELD:
+
+ /*
+ * Ensure that the bank_value is not beyond the capacity of
+ * the register
+ */
+ if (acpi_ex_register_overflow(obj_desc->bank_field.bank_obj,
+ (acpi_integer) obj_desc->
+ bank_field.value)) {
+ return_ACPI_STATUS(AE_AML_REGISTER_LIMIT);
+ }
+
+ /*
+ * For bank_fields, we must write the bank_value to the bank_register
+ * (itself a region_field) before we can access the data.
+ */
+ status =
+ acpi_ex_insert_into_field(obj_desc->bank_field.bank_obj,
+ &obj_desc->bank_field.value,
+ sizeof(obj_desc->bank_field.
+ value));
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /*
+ * Now that the Bank has been selected, fall through to the
+ * region_field case and write the datum to the Operation Region
+ */
+
+ /*lint -fallthrough */
+
+ case ACPI_TYPE_LOCAL_REGION_FIELD:
+ /*
+ * For simple region_fields, we just directly access the owning
+ * Operation Region.
+ */
+ status =
+ acpi_ex_access_region(obj_desc, field_datum_byte_offset,
+ value, read_write);
+ break;
+
+ case ACPI_TYPE_LOCAL_INDEX_FIELD:
+
+ /*
+ * Ensure that the index_value is not beyond the capacity of
+ * the register
+ */
+ if (acpi_ex_register_overflow(obj_desc->index_field.index_obj,
+ (acpi_integer) obj_desc->
+ index_field.value)) {
+ return_ACPI_STATUS(AE_AML_REGISTER_LIMIT);
+ }
+
+ /* Write the index value to the index_register (itself a region_field) */
+
+ field_datum_byte_offset += obj_desc->index_field.value;
+
+ ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
+ "Write to Index Register: Value %8.8X\n",
+ field_datum_byte_offset));
+
+ status =
+ acpi_ex_insert_into_field(obj_desc->index_field.index_obj,
+ &field_datum_byte_offset,
+ sizeof(field_datum_byte_offset));
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
+ "I/O to Data Register: ValuePtr %p\n",
+ value));
+
+ if (read_write == ACPI_READ) {
+
+ /* Read the datum from the data_register */
+
+ status =
+ acpi_ex_extract_from_field(obj_desc->index_field.
+ data_obj, value,
+ sizeof(acpi_integer));
+ } else {
+ /* Write the datum to the data_register */
+
+ status =
+ acpi_ex_insert_into_field(obj_desc->index_field.
+ data_obj, value,
+ sizeof(acpi_integer));
+ }
+ break;
+
+ default:
+
+ ACPI_ERROR((AE_INFO, "Wrong object type in field I/O %X",
+ ACPI_GET_OBJECT_TYPE(obj_desc)));
+ status = AE_AML_INTERNAL;
+ break;
+ }
+
+ if (ACPI_SUCCESS(status)) {
+ if (read_write == ACPI_READ) {
+ ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
+ "Value Read %8.8X%8.8X, Width %d\n",
+ ACPI_FORMAT_UINT64(*value),
+ obj_desc->common_field.
+ access_byte_width));
+ } else {
+ ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
+ "Value Written %8.8X%8.8X, Width %d\n",
+ ACPI_FORMAT_UINT64(*value),
+ obj_desc->common_field.
+ access_byte_width));
+ }
+ }
+
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ex_write_with_update_rule
+ *
+ * PARAMETERS: obj_desc - Field to be written
+ * Mask - bitmask within field datum
+ * field_value - Value to write
+ * field_datum_byte_offset - Offset of datum within field
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Apply the field update rule to a field write
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ex_write_with_update_rule(union acpi_operand_object *obj_desc,
+ acpi_integer mask,
+ acpi_integer field_value,
+ u32 field_datum_byte_offset)
+{
+ acpi_status status = AE_OK;
+ acpi_integer merged_value;
+ acpi_integer current_value;
+
+ ACPI_FUNCTION_TRACE_U32(ex_write_with_update_rule, mask);
+
+ /* Start with the new bits */
+
+ merged_value = field_value;
+
+ /* If the mask is all ones, we don't need to worry about the update rule */
+
+ if (mask != ACPI_INTEGER_MAX) {
+
+ /* Decode the update rule */
+
+ switch (obj_desc->common_field.
+ field_flags & AML_FIELD_UPDATE_RULE_MASK) {
+ case AML_FIELD_UPDATE_PRESERVE:
+ /*
+ * Check if update rule needs to be applied (not if mask is all
+ * ones) The left shift drops the bits we want to ignore.
+ */
+ if ((~mask << (ACPI_MUL_8(sizeof(mask)) -
+ ACPI_MUL_8(obj_desc->common_field.
+ access_byte_width))) != 0) {
+ /*
+ * Read the current contents of the byte/word/dword containing
+ * the field, and merge with the new field value.
+ */
+ status =
+ acpi_ex_field_datum_io(obj_desc,
+ field_datum_byte_offset,
+ &current_value,
+ ACPI_READ);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ merged_value |= (current_value & ~mask);
+ }
+ break;
+
+ case AML_FIELD_UPDATE_WRITE_AS_ONES:
+
+ /* Set positions outside the field to all ones */
+
+ merged_value |= ~mask;
+ break;
+
+ case AML_FIELD_UPDATE_WRITE_AS_ZEROS:
+
+ /* Set positions outside the field to all zeros */
+
+ merged_value &= mask;
+ break;
+
+ default:
+
+ ACPI_ERROR((AE_INFO,
+ "Unknown UpdateRule value: %X",
+ (obj_desc->common_field.
+ field_flags &
+ AML_FIELD_UPDATE_RULE_MASK)));
+ return_ACPI_STATUS(AE_AML_OPERAND_VALUE);
+ }
+ }
+
+ ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
+ "Mask %8.8X%8.8X, DatumOffset %X, Width %X, Value %8.8X%8.8X, MergedValue %8.8X%8.8X\n",
+ ACPI_FORMAT_UINT64(mask),
+ field_datum_byte_offset,
+ obj_desc->common_field.access_byte_width,
+ ACPI_FORMAT_UINT64(field_value),
+ ACPI_FORMAT_UINT64(merged_value)));
+
+ /* Write the merged value */
+
+ status = acpi_ex_field_datum_io(obj_desc, field_datum_byte_offset,
+ &merged_value, ACPI_WRITE);
+
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ex_extract_from_field
+ *
+ * PARAMETERS: obj_desc - Field to be read
+ * Buffer - Where to store the field data
+ * buffer_length - Length of Buffer
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Retrieve the current value of the given field
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ex_extract_from_field(union acpi_operand_object *obj_desc,
+ void *buffer, u32 buffer_length)
+{
+ acpi_status status;
+ acpi_integer raw_datum;
+ acpi_integer merged_datum;
+ u32 field_offset = 0;
+ u32 buffer_offset = 0;
+ u32 buffer_tail_bits;
+ u32 datum_count;
+ u32 field_datum_count;
+ u32 i;
+
+ ACPI_FUNCTION_TRACE(ex_extract_from_field);
+
+ /* Validate target buffer and clear it */
+
+ if (buffer_length <
+ ACPI_ROUND_BITS_UP_TO_BYTES(obj_desc->common_field.bit_length)) {
+ ACPI_ERROR((AE_INFO,
+ "Field size %X (bits) is too large for buffer (%X)",
+ obj_desc->common_field.bit_length, buffer_length));
+
+ return_ACPI_STATUS(AE_BUFFER_OVERFLOW);
+ }
+ ACPI_MEMSET(buffer, 0, buffer_length);
+
+ /* Compute the number of datums (access width data items) */
+
+ datum_count = ACPI_ROUND_UP_TO(obj_desc->common_field.bit_length,
+ obj_desc->common_field.access_bit_width);
+ field_datum_count = ACPI_ROUND_UP_TO(obj_desc->common_field.bit_length +
+ obj_desc->common_field.
+ start_field_bit_offset,
+ obj_desc->common_field.
+ access_bit_width);
+
+ /* Priming read from the field */
+
+ status =
+ acpi_ex_field_datum_io(obj_desc, field_offset, &raw_datum,
+ ACPI_READ);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+ merged_datum =
+ raw_datum >> obj_desc->common_field.start_field_bit_offset;
+
+ /* Read the rest of the field */
+
+ for (i = 1; i < field_datum_count; i++) {
+
+ /* Get next input datum from the field */
+
+ field_offset += obj_desc->common_field.access_byte_width;
+ status = acpi_ex_field_datum_io(obj_desc, field_offset,
+ &raw_datum, ACPI_READ);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /*
+ * Merge with previous datum if necessary.
+ *
+ * Note: Before the shift, check if the shift value will be larger than
+ * the integer size. If so, there is no need to perform the operation.
+ * This avoids the differences in behavior between different compilers
+ * concerning shift values larger than the target data width.
+ */
+ if ((obj_desc->common_field.access_bit_width -
+ obj_desc->common_field.start_field_bit_offset) <
+ ACPI_INTEGER_BIT_SIZE) {
+ merged_datum |=
+ raw_datum << (obj_desc->common_field.
+ access_bit_width -
+ obj_desc->common_field.
+ start_field_bit_offset);
+ }
+
+ if (i == datum_count) {
+ break;
+ }
+
+ /* Write merged datum to target buffer */
+
+ ACPI_MEMCPY(((char *)buffer) + buffer_offset, &merged_datum,
+ ACPI_MIN(obj_desc->common_field.access_byte_width,
+ buffer_length - buffer_offset));
+
+ buffer_offset += obj_desc->common_field.access_byte_width;
+ merged_datum =
+ raw_datum >> obj_desc->common_field.start_field_bit_offset;
+ }
+
+ /* Mask off any extra bits in the last datum */
+
+ buffer_tail_bits = obj_desc->common_field.bit_length %
+ obj_desc->common_field.access_bit_width;
+ if (buffer_tail_bits) {
+ merged_datum &= ACPI_MASK_BITS_ABOVE(buffer_tail_bits);
+ }
+
+ /* Write the last datum to the buffer */
+
+ ACPI_MEMCPY(((char *)buffer) + buffer_offset, &merged_datum,
+ ACPI_MIN(obj_desc->common_field.access_byte_width,
+ buffer_length - buffer_offset));
+
+ return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ex_insert_into_field
+ *
+ * PARAMETERS: obj_desc - Field to be written
+ * Buffer - Data to be written
+ * buffer_length - Length of Buffer
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Store the Buffer contents into the given field
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ex_insert_into_field(union acpi_operand_object *obj_desc,
+ void *buffer, u32 buffer_length)
+{
+ acpi_status status;
+ acpi_integer mask;
+ acpi_integer width_mask;
+ acpi_integer merged_datum;
+ acpi_integer raw_datum = 0;
+ u32 field_offset = 0;
+ u32 buffer_offset = 0;
+ u32 buffer_tail_bits;
+ u32 datum_count;
+ u32 field_datum_count;
+ u32 i;
+ u32 required_length;
+ void *new_buffer;
+
+ ACPI_FUNCTION_TRACE(ex_insert_into_field);
+
+ /* Validate input buffer */
+
+ new_buffer = NULL;
+ required_length =
+ ACPI_ROUND_BITS_UP_TO_BYTES(obj_desc->common_field.bit_length);
+ /*
+ * We must have a buffer that is at least as long as the field
+ * we are writing to. This is because individual fields are
+ * indivisible and partial writes are not supported -- as per
+ * the ACPI specification.
+ */
+ if (buffer_length < required_length) {
+
+ /* We need to create a new buffer */
+
+ new_buffer = ACPI_ALLOCATE_ZEROED(required_length);
+ if (!new_buffer) {
+ return_ACPI_STATUS(AE_NO_MEMORY);
+ }
+
+ /*
+ * Copy the original data to the new buffer, starting
+ * at Byte zero. All unused (upper) bytes of the
+ * buffer will be 0.
+ */
+ ACPI_MEMCPY((char *)new_buffer, (char *)buffer, buffer_length);
+ buffer = new_buffer;
+ buffer_length = required_length;
+ }
+
+ /*
+ * Create the bitmasks used for bit insertion.
+ * Note: This if/else is used to bypass compiler differences with the
+ * shift operator
+ */
+ if (obj_desc->common_field.access_bit_width == ACPI_INTEGER_BIT_SIZE) {
+ width_mask = ACPI_INTEGER_MAX;
+ } else {
+ width_mask =
+ ACPI_MASK_BITS_ABOVE(obj_desc->common_field.
+ access_bit_width);
+ }
+
+ mask = width_mask &
+ ACPI_MASK_BITS_BELOW(obj_desc->common_field.start_field_bit_offset);
+
+ /* Compute the number of datums (access width data items) */
+
+ datum_count = ACPI_ROUND_UP_TO(obj_desc->common_field.bit_length,
+ obj_desc->common_field.access_bit_width);
+
+ field_datum_count = ACPI_ROUND_UP_TO(obj_desc->common_field.bit_length +
+ obj_desc->common_field.
+ start_field_bit_offset,
+ obj_desc->common_field.
+ access_bit_width);
+
+ /* Get initial Datum from the input buffer */
+
+ ACPI_MEMCPY(&raw_datum, buffer,
+ ACPI_MIN(obj_desc->common_field.access_byte_width,
+ buffer_length - buffer_offset));
+
+ merged_datum =
+ raw_datum << obj_desc->common_field.start_field_bit_offset;
+
+ /* Write the entire field */
+
+ for (i = 1; i < field_datum_count; i++) {
+
+ /* Write merged datum to the target field */
+
+ merged_datum &= mask;
+ status = acpi_ex_write_with_update_rule(obj_desc, mask,
+ merged_datum,
+ field_offset);
+ if (ACPI_FAILURE(status)) {
+ goto exit;
+ }
+
+ field_offset += obj_desc->common_field.access_byte_width;
+
+ /*
+ * Start new output datum by merging with previous input datum
+ * if necessary.
+ *
+ * Note: Before the shift, check if the shift value will be larger than
+ * the integer size. If so, there is no need to perform the operation.
+ * This avoids the differences in behavior between different compilers
+ * concerning shift values larger than the target data width.
+ */
+ if ((obj_desc->common_field.access_bit_width -
+ obj_desc->common_field.start_field_bit_offset) <
+ ACPI_INTEGER_BIT_SIZE) {
+ merged_datum =
+ raw_datum >> (obj_desc->common_field.
+ access_bit_width -
+ obj_desc->common_field.
+ start_field_bit_offset);
+ } else {
+ merged_datum = 0;
+ }
+
+ mask = width_mask;
+
+ if (i == datum_count) {
+ break;
+ }
+
+ /* Get the next input datum from the buffer */
+
+ buffer_offset += obj_desc->common_field.access_byte_width;
+ ACPI_MEMCPY(&raw_datum, ((char *)buffer) + buffer_offset,
+ ACPI_MIN(obj_desc->common_field.access_byte_width,
+ buffer_length - buffer_offset));
+ merged_datum |=
+ raw_datum << obj_desc->common_field.start_field_bit_offset;
+ }
+
+ /* Mask off any extra bits in the last datum */
+
+ buffer_tail_bits = (obj_desc->common_field.bit_length +
+ obj_desc->common_field.start_field_bit_offset) %
+ obj_desc->common_field.access_bit_width;
+ if (buffer_tail_bits) {
+ mask &= ACPI_MASK_BITS_ABOVE(buffer_tail_bits);
+ }
+
+ /* Write the last datum to the field */
+
+ merged_datum &= mask;
+ status = acpi_ex_write_with_update_rule(obj_desc,
+ mask, merged_datum,
+ field_offset);
+
+ exit:
+ /* Free temporary buffer if we used one */
+
+ if (new_buffer) {
+ ACPI_FREE(new_buffer);
+ }
+ return_ACPI_STATUS(status);
+}
diff --git a/drivers/acpi/executer/exmisc.c b/drivers/acpi/executer/exmisc.c
new file mode 100644
index 0000000..efb1913
--- /dev/null
+++ b/drivers/acpi/executer/exmisc.c
@@ -0,0 +1,725 @@
+
+/******************************************************************************
+ *
+ * Module Name: exmisc - ACPI AML (p-code) execution - specific opcodes
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include <acpi/acinterp.h>
+#include <acpi/amlcode.h>
+#include <acpi/amlresrc.h>
+
+#define _COMPONENT ACPI_EXECUTER
+ACPI_MODULE_NAME("exmisc")
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ex_get_object_reference
+ *
+ * PARAMETERS: obj_desc - Create a reference to this object
+ * return_desc - Where to store the reference
+ * walk_state - Current state
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Obtain and return a "reference" to the target object
+ * Common code for the ref_of_op and the cond_ref_of_op.
+ *
+ ******************************************************************************/
+acpi_status
+acpi_ex_get_object_reference(union acpi_operand_object *obj_desc,
+ union acpi_operand_object **return_desc,
+ struct acpi_walk_state *walk_state)
+{
+ union acpi_operand_object *reference_obj;
+ union acpi_operand_object *referenced_obj;
+
+ ACPI_FUNCTION_TRACE_PTR(ex_get_object_reference, obj_desc);
+
+ *return_desc = NULL;
+
+ switch (ACPI_GET_DESCRIPTOR_TYPE(obj_desc)) {
+ case ACPI_DESC_TYPE_OPERAND:
+
+ if (ACPI_GET_OBJECT_TYPE(obj_desc) != ACPI_TYPE_LOCAL_REFERENCE) {
+ return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
+ }
+
+ /*
+ * Must be a reference to a Local or Arg
+ */
+ switch (obj_desc->reference.class) {
+ case ACPI_REFCLASS_LOCAL:
+ case ACPI_REFCLASS_ARG:
+ case ACPI_REFCLASS_DEBUG:
+
+ /* The referenced object is the pseudo-node for the local/arg */
+
+ referenced_obj = obj_desc->reference.object;
+ break;
+
+ default:
+
+ ACPI_ERROR((AE_INFO, "Unknown Reference Class %2.2X",
+ obj_desc->reference.class));
+ return_ACPI_STATUS(AE_AML_INTERNAL);
+ }
+ break;
+
+ case ACPI_DESC_TYPE_NAMED:
+
+ /*
+ * A named reference that has already been resolved to a Node
+ */
+ referenced_obj = obj_desc;
+ break;
+
+ default:
+
+ ACPI_ERROR((AE_INFO, "Invalid descriptor type %X",
+ ACPI_GET_DESCRIPTOR_TYPE(obj_desc)));
+ return_ACPI_STATUS(AE_TYPE);
+ }
+
+ /* Create a new reference object */
+
+ reference_obj =
+ acpi_ut_create_internal_object(ACPI_TYPE_LOCAL_REFERENCE);
+ if (!reference_obj) {
+ return_ACPI_STATUS(AE_NO_MEMORY);
+ }
+
+ reference_obj->reference.class = ACPI_REFCLASS_REFOF;
+ reference_obj->reference.object = referenced_obj;
+ *return_desc = reference_obj;
+
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+ "Object %p Type [%s], returning Reference %p\n",
+ obj_desc, acpi_ut_get_object_type_name(obj_desc),
+ *return_desc));
+
+ return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ex_concat_template
+ *
+ * PARAMETERS: Operand0 - First source object
+ * Operand1 - Second source object
+ * actual_return_desc - Where to place the return object
+ * walk_state - Current walk state
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Concatenate two resource templates
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ex_concat_template(union acpi_operand_object *operand0,
+ union acpi_operand_object *operand1,
+ union acpi_operand_object **actual_return_desc,
+ struct acpi_walk_state *walk_state)
+{
+ acpi_status status;
+ union acpi_operand_object *return_desc;
+ u8 *new_buf;
+ u8 *end_tag;
+ acpi_size length0;
+ acpi_size length1;
+ acpi_size new_length;
+
+ ACPI_FUNCTION_TRACE(ex_concat_template);
+
+ /*
+ * Find the end_tag descriptor in each resource template.
+ * Note1: returned pointers point TO the end_tag, not past it.
+ * Note2: zero-length buffers are allowed; treated like one end_tag
+ */
+
+ /* Get the length of the first resource template */
+
+ status = acpi_ut_get_resource_end_tag(operand0, &end_tag);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ length0 = ACPI_PTR_DIFF(end_tag, operand0->buffer.pointer);
+
+ /* Get the length of the second resource template */
+
+ status = acpi_ut_get_resource_end_tag(operand1, &end_tag);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ length1 = ACPI_PTR_DIFF(end_tag, operand1->buffer.pointer);
+
+ /* Combine both lengths, minimum size will be 2 for end_tag */
+
+ new_length = length0 + length1 + sizeof(struct aml_resource_end_tag);
+
+ /* Create a new buffer object for the result (with one end_tag) */
+
+ return_desc = acpi_ut_create_buffer_object(new_length);
+ if (!return_desc) {
+ return_ACPI_STATUS(AE_NO_MEMORY);
+ }
+
+ /*
+ * Copy the templates to the new buffer, 0 first, then 1 follows. One
+ * end_tag descriptor is copied from Operand1.
+ */
+ new_buf = return_desc->buffer.pointer;
+ ACPI_MEMCPY(new_buf, operand0->buffer.pointer, length0);
+ ACPI_MEMCPY(new_buf + length0, operand1->buffer.pointer, length1);
+
+ /* Insert end_tag and set the checksum to zero, means "ignore checksum" */
+
+ new_buf[new_length - 1] = 0;
+ new_buf[new_length - 2] = ACPI_RESOURCE_NAME_END_TAG | 1;
+
+ /* Return the completed resource template */
+
+ *actual_return_desc = return_desc;
+ return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ex_do_concatenate
+ *
+ * PARAMETERS: Operand0 - First source object
+ * Operand1 - Second source object
+ * actual_return_desc - Where to place the return object
+ * walk_state - Current walk state
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Concatenate two objects OF THE SAME TYPE.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ex_do_concatenate(union acpi_operand_object *operand0,
+ union acpi_operand_object *operand1,
+ union acpi_operand_object **actual_return_desc,
+ struct acpi_walk_state *walk_state)
+{
+ union acpi_operand_object *local_operand1 = operand1;
+ union acpi_operand_object *return_desc;
+ char *new_buf;
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(ex_do_concatenate);
+
+ /*
+ * Convert the second operand if necessary. The first operand
+ * determines the type of the second operand, (See the Data Types
+ * section of the ACPI specification.) Both object types are
+ * guaranteed to be either Integer/String/Buffer by the operand
+ * resolution mechanism.
+ */
+ switch (ACPI_GET_OBJECT_TYPE(operand0)) {
+ case ACPI_TYPE_INTEGER:
+ status =
+ acpi_ex_convert_to_integer(operand1, &local_operand1, 16);
+ break;
+
+ case ACPI_TYPE_STRING:
+ status = acpi_ex_convert_to_string(operand1, &local_operand1,
+ ACPI_IMPLICIT_CONVERT_HEX);
+ break;
+
+ case ACPI_TYPE_BUFFER:
+ status = acpi_ex_convert_to_buffer(operand1, &local_operand1);
+ break;
+
+ default:
+ ACPI_ERROR((AE_INFO, "Invalid object type: %X",
+ ACPI_GET_OBJECT_TYPE(operand0)));
+ status = AE_AML_INTERNAL;
+ }
+
+ if (ACPI_FAILURE(status)) {
+ goto cleanup;
+ }
+
+ /*
+ * Both operands are now known to be the same object type
+ * (Both are Integer, String, or Buffer), and we can now perform the
+ * concatenation.
+ */
+
+ /*
+ * There are three cases to handle:
+ *
+ * 1) Two Integers concatenated to produce a new Buffer
+ * 2) Two Strings concatenated to produce a new String
+ * 3) Two Buffers concatenated to produce a new Buffer
+ */
+ switch (ACPI_GET_OBJECT_TYPE(operand0)) {
+ case ACPI_TYPE_INTEGER:
+
+ /* Result of two Integers is a Buffer */
+ /* Need enough buffer space for two integers */
+
+ return_desc = acpi_ut_create_buffer_object((acpi_size)
+ ACPI_MUL_2
+ (acpi_gbl_integer_byte_width));
+ if (!return_desc) {
+ status = AE_NO_MEMORY;
+ goto cleanup;
+ }
+
+ new_buf = (char *)return_desc->buffer.pointer;
+
+ /* Copy the first integer, LSB first */
+
+ ACPI_MEMCPY(new_buf, &operand0->integer.value,
+ acpi_gbl_integer_byte_width);
+
+ /* Copy the second integer (LSB first) after the first */
+
+ ACPI_MEMCPY(new_buf + acpi_gbl_integer_byte_width,
+ &local_operand1->integer.value,
+ acpi_gbl_integer_byte_width);
+ break;
+
+ case ACPI_TYPE_STRING:
+
+ /* Result of two Strings is a String */
+
+ return_desc = acpi_ut_create_string_object(((acpi_size)
+ operand0->string.
+ length +
+ local_operand1->
+ string.length));
+ if (!return_desc) {
+ status = AE_NO_MEMORY;
+ goto cleanup;
+ }
+
+ new_buf = return_desc->string.pointer;
+
+ /* Concatenate the strings */
+
+ ACPI_STRCPY(new_buf, operand0->string.pointer);
+ ACPI_STRCPY(new_buf + operand0->string.length,
+ local_operand1->string.pointer);
+ break;
+
+ case ACPI_TYPE_BUFFER:
+
+ /* Result of two Buffers is a Buffer */
+
+ return_desc = acpi_ut_create_buffer_object(((acpi_size)
+ operand0->buffer.
+ length +
+ local_operand1->
+ buffer.length));
+ if (!return_desc) {
+ status = AE_NO_MEMORY;
+ goto cleanup;
+ }
+
+ new_buf = (char *)return_desc->buffer.pointer;
+
+ /* Concatenate the buffers */
+
+ ACPI_MEMCPY(new_buf, operand0->buffer.pointer,
+ operand0->buffer.length);
+ ACPI_MEMCPY(new_buf + operand0->buffer.length,
+ local_operand1->buffer.pointer,
+ local_operand1->buffer.length);
+ break;
+
+ default:
+
+ /* Invalid object type, should not happen here */
+
+ ACPI_ERROR((AE_INFO, "Invalid object type: %X",
+ ACPI_GET_OBJECT_TYPE(operand0)));
+ status = AE_AML_INTERNAL;
+ goto cleanup;
+ }
+
+ *actual_return_desc = return_desc;
+
+ cleanup:
+ if (local_operand1 != operand1) {
+ acpi_ut_remove_reference(local_operand1);
+ }
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ex_do_math_op
+ *
+ * PARAMETERS: Opcode - AML opcode
+ * Integer0 - Integer operand #0
+ * Integer1 - Integer operand #1
+ *
+ * RETURN: Integer result of the operation
+ *
+ * DESCRIPTION: Execute a math AML opcode. The purpose of having all of the
+ * math functions here is to prevent a lot of pointer dereferencing
+ * to obtain the operands.
+ *
+ ******************************************************************************/
+
+acpi_integer
+acpi_ex_do_math_op(u16 opcode, acpi_integer integer0, acpi_integer integer1)
+{
+
+ ACPI_FUNCTION_ENTRY();
+
+ switch (opcode) {
+ case AML_ADD_OP: /* Add (Integer0, Integer1, Result) */
+
+ return (integer0 + integer1);
+
+ case AML_BIT_AND_OP: /* And (Integer0, Integer1, Result) */
+
+ return (integer0 & integer1);
+
+ case AML_BIT_NAND_OP: /* NAnd (Integer0, Integer1, Result) */
+
+ return (~(integer0 & integer1));
+
+ case AML_BIT_OR_OP: /* Or (Integer0, Integer1, Result) */
+
+ return (integer0 | integer1);
+
+ case AML_BIT_NOR_OP: /* NOr (Integer0, Integer1, Result) */
+
+ return (~(integer0 | integer1));
+
+ case AML_BIT_XOR_OP: /* XOr (Integer0, Integer1, Result) */
+
+ return (integer0 ^ integer1);
+
+ case AML_MULTIPLY_OP: /* Multiply (Integer0, Integer1, Result) */
+
+ return (integer0 * integer1);
+
+ case AML_SHIFT_LEFT_OP: /* shift_left (Operand, shift_count, Result) */
+
+ /*
+ * We need to check if the shiftcount is larger than the integer bit
+ * width since the behavior of this is not well-defined in the C language.
+ */
+ if (integer1 >= acpi_gbl_integer_bit_width) {
+ return (0);
+ }
+ return (integer0 << integer1);
+
+ case AML_SHIFT_RIGHT_OP: /* shift_right (Operand, shift_count, Result) */
+
+ /*
+ * We need to check if the shiftcount is larger than the integer bit
+ * width since the behavior of this is not well-defined in the C language.
+ */
+ if (integer1 >= acpi_gbl_integer_bit_width) {
+ return (0);
+ }
+ return (integer0 >> integer1);
+
+ case AML_SUBTRACT_OP: /* Subtract (Integer0, Integer1, Result) */
+
+ return (integer0 - integer1);
+
+ default:
+
+ return (0);
+ }
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ex_do_logical_numeric_op
+ *
+ * PARAMETERS: Opcode - AML opcode
+ * Integer0 - Integer operand #0
+ * Integer1 - Integer operand #1
+ * logical_result - TRUE/FALSE result of the operation
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Execute a logical "Numeric" AML opcode. For these Numeric
+ * operators (LAnd and LOr), both operands must be integers.
+ *
+ * Note: cleanest machine code seems to be produced by the code
+ * below, rather than using statements of the form:
+ * Result = (Integer0 && Integer1);
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ex_do_logical_numeric_op(u16 opcode,
+ acpi_integer integer0,
+ acpi_integer integer1, u8 * logical_result)
+{
+ acpi_status status = AE_OK;
+ u8 local_result = FALSE;
+
+ ACPI_FUNCTION_TRACE(ex_do_logical_numeric_op);
+
+ switch (opcode) {
+ case AML_LAND_OP: /* LAnd (Integer0, Integer1) */
+
+ if (integer0 && integer1) {
+ local_result = TRUE;
+ }
+ break;
+
+ case AML_LOR_OP: /* LOr (Integer0, Integer1) */
+
+ if (integer0 || integer1) {
+ local_result = TRUE;
+ }
+ break;
+
+ default:
+ status = AE_AML_INTERNAL;
+ break;
+ }
+
+ /* Return the logical result and status */
+
+ *logical_result = local_result;
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ex_do_logical_op
+ *
+ * PARAMETERS: Opcode - AML opcode
+ * Operand0 - operand #0
+ * Operand1 - operand #1
+ * logical_result - TRUE/FALSE result of the operation
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Execute a logical AML opcode. The purpose of having all of the
+ * functions here is to prevent a lot of pointer dereferencing
+ * to obtain the operands and to simplify the generation of the
+ * logical value. For the Numeric operators (LAnd and LOr), both
+ * operands must be integers. For the other logical operators,
+ * operands can be any combination of Integer/String/Buffer. The
+ * first operand determines the type to which the second operand
+ * will be converted.
+ *
+ * Note: cleanest machine code seems to be produced by the code
+ * below, rather than using statements of the form:
+ * Result = (Operand0 == Operand1);
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ex_do_logical_op(u16 opcode,
+ union acpi_operand_object *operand0,
+ union acpi_operand_object *operand1, u8 * logical_result)
+{
+ union acpi_operand_object *local_operand1 = operand1;
+ acpi_integer integer0;
+ acpi_integer integer1;
+ u32 length0;
+ u32 length1;
+ acpi_status status = AE_OK;
+ u8 local_result = FALSE;
+ int compare;
+
+ ACPI_FUNCTION_TRACE(ex_do_logical_op);
+
+ /*
+ * Convert the second operand if necessary. The first operand
+ * determines the type of the second operand, (See the Data Types
+ * section of the ACPI 3.0+ specification.) Both object types are
+ * guaranteed to be either Integer/String/Buffer by the operand
+ * resolution mechanism.
+ */
+ switch (ACPI_GET_OBJECT_TYPE(operand0)) {
+ case ACPI_TYPE_INTEGER:
+ status =
+ acpi_ex_convert_to_integer(operand1, &local_operand1, 16);
+ break;
+
+ case ACPI_TYPE_STRING:
+ status = acpi_ex_convert_to_string(operand1, &local_operand1,
+ ACPI_IMPLICIT_CONVERT_HEX);
+ break;
+
+ case ACPI_TYPE_BUFFER:
+ status = acpi_ex_convert_to_buffer(operand1, &local_operand1);
+ break;
+
+ default:
+ status = AE_AML_INTERNAL;
+ break;
+ }
+
+ if (ACPI_FAILURE(status)) {
+ goto cleanup;
+ }
+
+ /*
+ * Two cases: 1) Both Integers, 2) Both Strings or Buffers
+ */
+ if (ACPI_GET_OBJECT_TYPE(operand0) == ACPI_TYPE_INTEGER) {
+ /*
+ * 1) Both operands are of type integer
+ * Note: local_operand1 may have changed above
+ */
+ integer0 = operand0->integer.value;
+ integer1 = local_operand1->integer.value;
+
+ switch (opcode) {
+ case AML_LEQUAL_OP: /* LEqual (Operand0, Operand1) */
+
+ if (integer0 == integer1) {
+ local_result = TRUE;
+ }
+ break;
+
+ case AML_LGREATER_OP: /* LGreater (Operand0, Operand1) */
+
+ if (integer0 > integer1) {
+ local_result = TRUE;
+ }
+ break;
+
+ case AML_LLESS_OP: /* LLess (Operand0, Operand1) */
+
+ if (integer0 < integer1) {
+ local_result = TRUE;
+ }
+ break;
+
+ default:
+ status = AE_AML_INTERNAL;
+ break;
+ }
+ } else {
+ /*
+ * 2) Both operands are Strings or both are Buffers
+ * Note: Code below takes advantage of common Buffer/String
+ * object fields. local_operand1 may have changed above. Use
+ * memcmp to handle nulls in buffers.
+ */
+ length0 = operand0->buffer.length;
+ length1 = local_operand1->buffer.length;
+
+ /* Lexicographic compare: compare the data bytes */
+
+ compare = ACPI_MEMCMP(operand0->buffer.pointer,
+ local_operand1->buffer.pointer,
+ (length0 > length1) ? length1 : length0);
+
+ switch (opcode) {
+ case AML_LEQUAL_OP: /* LEqual (Operand0, Operand1) */
+
+ /* Length and all bytes must be equal */
+
+ if ((length0 == length1) && (compare == 0)) {
+
+ /* Length and all bytes match ==> TRUE */
+
+ local_result = TRUE;
+ }
+ break;
+
+ case AML_LGREATER_OP: /* LGreater (Operand0, Operand1) */
+
+ if (compare > 0) {
+ local_result = TRUE;
+ goto cleanup; /* TRUE */
+ }
+ if (compare < 0) {
+ goto cleanup; /* FALSE */
+ }
+
+ /* Bytes match (to shortest length), compare lengths */
+
+ if (length0 > length1) {
+ local_result = TRUE;
+ }
+ break;
+
+ case AML_LLESS_OP: /* LLess (Operand0, Operand1) */
+
+ if (compare > 0) {
+ goto cleanup; /* FALSE */
+ }
+ if (compare < 0) {
+ local_result = TRUE;
+ goto cleanup; /* TRUE */
+ }
+
+ /* Bytes match (to shortest length), compare lengths */
+
+ if (length0 < length1) {
+ local_result = TRUE;
+ }
+ break;
+
+ default:
+ status = AE_AML_INTERNAL;
+ break;
+ }
+ }
+
+ cleanup:
+
+ /* New object was created if implicit conversion performed - delete */
+
+ if (local_operand1 != operand1) {
+ acpi_ut_remove_reference(local_operand1);
+ }
+
+ /* Return the logical result and status */
+
+ *logical_result = local_result;
+ return_ACPI_STATUS(status);
+}
diff --git a/drivers/acpi/executer/exmutex.c b/drivers/acpi/executer/exmutex.c
new file mode 100644
index 0000000..a8bf3d7
--- /dev/null
+++ b/drivers/acpi/executer/exmutex.c
@@ -0,0 +1,473 @@
+
+/******************************************************************************
+ *
+ * Module Name: exmutex - ASL Mutex Acquire/Release functions
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include <acpi/acinterp.h>
+#include <acpi/acevents.h>
+
+#define _COMPONENT ACPI_EXECUTER
+ACPI_MODULE_NAME("exmutex")
+
+/* Local prototypes */
+static void
+acpi_ex_link_mutex(union acpi_operand_object *obj_desc,
+ struct acpi_thread_state *thread);
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ex_unlink_mutex
+ *
+ * PARAMETERS: obj_desc - The mutex to be unlinked
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Remove a mutex from the "AcquiredMutex" list
+ *
+ ******************************************************************************/
+
+void acpi_ex_unlink_mutex(union acpi_operand_object *obj_desc)
+{
+ struct acpi_thread_state *thread = obj_desc->mutex.owner_thread;
+
+ if (!thread) {
+ return;
+ }
+
+ /* Doubly linked list */
+
+ if (obj_desc->mutex.next) {
+ (obj_desc->mutex.next)->mutex.prev = obj_desc->mutex.prev;
+ }
+
+ if (obj_desc->mutex.prev) {
+ (obj_desc->mutex.prev)->mutex.next = obj_desc->mutex.next;
+ } else {
+ thread->acquired_mutex_list = obj_desc->mutex.next;
+ }
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ex_link_mutex
+ *
+ * PARAMETERS: obj_desc - The mutex to be linked
+ * Thread - Current executing thread object
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Add a mutex to the "AcquiredMutex" list for this walk
+ *
+ ******************************************************************************/
+
+static void
+acpi_ex_link_mutex(union acpi_operand_object *obj_desc,
+ struct acpi_thread_state *thread)
+{
+ union acpi_operand_object *list_head;
+
+ list_head = thread->acquired_mutex_list;
+
+ /* This object will be the first object in the list */
+
+ obj_desc->mutex.prev = NULL;
+ obj_desc->mutex.next = list_head;
+
+ /* Update old first object to point back to this object */
+
+ if (list_head) {
+ list_head->mutex.prev = obj_desc;
+ }
+
+ /* Update list head */
+
+ thread->acquired_mutex_list = obj_desc;
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ex_acquire_mutex_object
+ *
+ * PARAMETERS: time_desc - Timeout in milliseconds
+ * obj_desc - Mutex object
+ * Thread - Current thread state
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Acquire an AML mutex, low-level interface. Provides a common
+ * path that supports multiple acquires by the same thread.
+ *
+ * MUTEX: Interpreter must be locked
+ *
+ * NOTE: This interface is called from three places:
+ * 1) From acpi_ex_acquire_mutex, via an AML Acquire() operator
+ * 2) From acpi_ex_acquire_global_lock when an AML Field access requires the
+ * global lock
+ * 3) From the external interface, acpi_acquire_global_lock
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ex_acquire_mutex_object(u16 timeout,
+ union acpi_operand_object *obj_desc,
+ acpi_thread_id thread_id)
+{
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE_PTR(ex_acquire_mutex_object, obj_desc);
+
+ if (!obj_desc) {
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
+
+ /* Support for multiple acquires by the owning thread */
+
+ if (obj_desc->mutex.thread_id == thread_id) {
+ /*
+ * The mutex is already owned by this thread, just increment the
+ * acquisition depth
+ */
+ obj_desc->mutex.acquisition_depth++;
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ /* Acquire the mutex, wait if necessary. Special case for Global Lock */
+
+ if (obj_desc == acpi_gbl_global_lock_mutex) {
+ status = acpi_ev_acquire_global_lock(timeout);
+ } else {
+ status = acpi_ex_system_wait_mutex(obj_desc->mutex.os_mutex,
+ timeout);
+ }
+
+ if (ACPI_FAILURE(status)) {
+
+ /* Includes failure from a timeout on time_desc */
+
+ return_ACPI_STATUS(status);
+ }
+
+ /* Acquired the mutex: update mutex object */
+
+ obj_desc->mutex.thread_id = thread_id;
+ obj_desc->mutex.acquisition_depth = 1;
+ obj_desc->mutex.original_sync_level = 0;
+ obj_desc->mutex.owner_thread = NULL; /* Used only for AML Acquire() */
+
+ return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ex_acquire_mutex
+ *
+ * PARAMETERS: time_desc - Timeout integer
+ * obj_desc - Mutex object
+ * walk_state - Current method execution state
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Acquire an AML mutex
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ex_acquire_mutex(union acpi_operand_object *time_desc,
+ union acpi_operand_object *obj_desc,
+ struct acpi_walk_state *walk_state)
+{
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE_PTR(ex_acquire_mutex, obj_desc);
+
+ if (!obj_desc) {
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
+
+ /* Must have a valid thread ID */
+
+ if (!walk_state->thread) {
+ ACPI_ERROR((AE_INFO,
+ "Cannot acquire Mutex [%4.4s], null thread info",
+ acpi_ut_get_node_name(obj_desc->mutex.node)));
+ return_ACPI_STATUS(AE_AML_INTERNAL);
+ }
+
+ /*
+ * Current sync level must be less than or equal to the sync level of the
+ * mutex. This mechanism provides some deadlock prevention
+ */
+ if (walk_state->thread->current_sync_level > obj_desc->mutex.sync_level) {
+ ACPI_ERROR((AE_INFO,
+ "Cannot acquire Mutex [%4.4s], current SyncLevel is too large (%d)",
+ acpi_ut_get_node_name(obj_desc->mutex.node),
+ walk_state->thread->current_sync_level));
+ return_ACPI_STATUS(AE_AML_MUTEX_ORDER);
+ }
+
+ status = acpi_ex_acquire_mutex_object((u16) time_desc->integer.value,
+ obj_desc,
+ walk_state->thread->thread_id);
+ if (ACPI_SUCCESS(status) && obj_desc->mutex.acquisition_depth == 1) {
+
+ /* Save Thread object, original/current sync levels */
+
+ obj_desc->mutex.owner_thread = walk_state->thread;
+ obj_desc->mutex.original_sync_level =
+ walk_state->thread->current_sync_level;
+ walk_state->thread->current_sync_level =
+ obj_desc->mutex.sync_level;
+
+ /* Link the mutex to the current thread for force-unlock at method exit */
+
+ acpi_ex_link_mutex(obj_desc, walk_state->thread);
+ }
+
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ex_release_mutex_object
+ *
+ * PARAMETERS: obj_desc - The object descriptor for this op
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Release a previously acquired Mutex, low level interface.
+ * Provides a common path that supports multiple releases (after
+ * previous multiple acquires) by the same thread.
+ *
+ * MUTEX: Interpreter must be locked
+ *
+ * NOTE: This interface is called from three places:
+ * 1) From acpi_ex_release_mutex, via an AML Acquire() operator
+ * 2) From acpi_ex_release_global_lock when an AML Field access requires the
+ * global lock
+ * 3) From the external interface, acpi_release_global_lock
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ex_release_mutex_object(union acpi_operand_object *obj_desc)
+{
+ acpi_status status = AE_OK;
+
+ ACPI_FUNCTION_TRACE(ex_release_mutex_object);
+
+ if (obj_desc->mutex.acquisition_depth == 0) {
+ return (AE_NOT_ACQUIRED);
+ }
+
+ /* Match multiple Acquires with multiple Releases */
+
+ obj_desc->mutex.acquisition_depth--;
+ if (obj_desc->mutex.acquisition_depth != 0) {
+
+ /* Just decrement the depth and return */
+
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ if (obj_desc->mutex.owner_thread) {
+
+ /* Unlink the mutex from the owner's list */
+
+ acpi_ex_unlink_mutex(obj_desc);
+ obj_desc->mutex.owner_thread = NULL;
+ }
+
+ /* Release the mutex, special case for Global Lock */
+
+ if (obj_desc == acpi_gbl_global_lock_mutex) {
+ status = acpi_ev_release_global_lock();
+ } else {
+ acpi_os_release_mutex(obj_desc->mutex.os_mutex);
+ }
+
+ /* Clear mutex info */
+
+ obj_desc->mutex.thread_id = NULL;
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ex_release_mutex
+ *
+ * PARAMETERS: obj_desc - The object descriptor for this op
+ * walk_state - Current method execution state
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Release a previously acquired Mutex.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
+ struct acpi_walk_state *walk_state)
+{
+ acpi_status status = AE_OK;
+
+ ACPI_FUNCTION_TRACE(ex_release_mutex);
+
+ if (!obj_desc) {
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
+
+ /* The mutex must have been previously acquired in order to release it */
+
+ if (!obj_desc->mutex.owner_thread) {
+ ACPI_ERROR((AE_INFO,
+ "Cannot release Mutex [%4.4s], not acquired",
+ acpi_ut_get_node_name(obj_desc->mutex.node)));
+ return_ACPI_STATUS(AE_AML_MUTEX_NOT_ACQUIRED);
+ }
+
+ /*
+ * The Mutex is owned, but this thread must be the owner.
+ * Special case for Global Lock, any thread can release
+ */
+ if ((obj_desc->mutex.owner_thread->thread_id !=
+ walk_state->thread->thread_id)
+ && (obj_desc != acpi_gbl_global_lock_mutex)) {
+ ACPI_ERROR((AE_INFO,
+ "Thread %lX cannot release Mutex [%4.4s] acquired by thread %lX",
+ (unsigned long)walk_state->thread->thread_id,
+ acpi_ut_get_node_name(obj_desc->mutex.node),
+ (unsigned long)obj_desc->mutex.owner_thread->
+ thread_id));
+ return_ACPI_STATUS(AE_AML_NOT_OWNER);
+ }
+
+ /* Must have a valid thread ID */
+
+ if (!walk_state->thread) {
+ ACPI_ERROR((AE_INFO,
+ "Cannot release Mutex [%4.4s], null thread info",
+ acpi_ut_get_node_name(obj_desc->mutex.node)));
+ return_ACPI_STATUS(AE_AML_INTERNAL);
+ }
+
+ /*
+ * The sync level of the mutex must be less than or equal to the current
+ * sync level
+ */
+ if (obj_desc->mutex.sync_level > walk_state->thread->current_sync_level) {
+ ACPI_ERROR((AE_INFO,
+ "Cannot release Mutex [%4.4s], SyncLevel mismatch: mutex %d current %d",
+ acpi_ut_get_node_name(obj_desc->mutex.node),
+ obj_desc->mutex.sync_level,
+ walk_state->thread->current_sync_level));
+ return_ACPI_STATUS(AE_AML_MUTEX_ORDER);
+ }
+
+ status = acpi_ex_release_mutex_object(obj_desc);
+
+ if (obj_desc->mutex.acquisition_depth == 0) {
+
+ /* Restore the original sync_level */
+
+ walk_state->thread->current_sync_level =
+ obj_desc->mutex.original_sync_level;
+ }
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ex_release_all_mutexes
+ *
+ * PARAMETERS: Thread - Current executing thread object
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Release all mutexes held by this thread
+ *
+ * NOTE: This function is called as the thread is exiting the interpreter.
+ * Mutexes are not released when an individual control method is exited, but
+ * only when the parent thread actually exits the interpreter. This allows one
+ * method to acquire a mutex, and a different method to release it, as long as
+ * this is performed underneath a single parent control method.
+ *
+ ******************************************************************************/
+
+void acpi_ex_release_all_mutexes(struct acpi_thread_state *thread)
+{
+ union acpi_operand_object *next = thread->acquired_mutex_list;
+ union acpi_operand_object *obj_desc;
+
+ ACPI_FUNCTION_ENTRY();
+
+ /* Traverse the list of owned mutexes, releasing each one */
+
+ while (next) {
+ obj_desc = next;
+ next = obj_desc->mutex.next;
+
+ obj_desc->mutex.prev = NULL;
+ obj_desc->mutex.next = NULL;
+ obj_desc->mutex.acquisition_depth = 0;
+
+ /* Release the mutex, special case for Global Lock */
+
+ if (obj_desc == acpi_gbl_global_lock_mutex) {
+
+ /* Ignore errors */
+
+ (void)acpi_ev_release_global_lock();
+ } else {
+ acpi_os_release_mutex(obj_desc->mutex.os_mutex);
+ }
+
+ /* Mark mutex unowned */
+
+ obj_desc->mutex.owner_thread = NULL;
+ obj_desc->mutex.thread_id = NULL;
+
+ /* Update Thread sync_level (Last mutex is the important one) */
+
+ thread->current_sync_level =
+ obj_desc->mutex.original_sync_level;
+ }
+}
diff --git a/drivers/acpi/executer/exnames.c b/drivers/acpi/executer/exnames.c
new file mode 100644
index 0000000..817e67b
--- /dev/null
+++ b/drivers/acpi/executer/exnames.c
@@ -0,0 +1,435 @@
+
+/******************************************************************************
+ *
+ * Module Name: exnames - interpreter/scanner name load/execute
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include <acpi/acinterp.h>
+#include <acpi/amlcode.h>
+
+#define _COMPONENT ACPI_EXECUTER
+ACPI_MODULE_NAME("exnames")
+
+/* Local prototypes */
+static char *acpi_ex_allocate_name_string(u32 prefix_count, u32 num_name_segs);
+
+static acpi_status
+acpi_ex_name_segment(u8 ** in_aml_address, char *name_string);
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ex_allocate_name_string
+ *
+ * PARAMETERS: prefix_count - Count of parent levels. Special cases:
+ * (-1)==root, 0==none
+ * num_name_segs - count of 4-character name segments
+ *
+ * RETURN: A pointer to the allocated string segment. This segment must
+ * be deleted by the caller.
+ *
+ * DESCRIPTION: Allocate a buffer for a name string. Ensure allocated name
+ * string is long enough, and set up prefix if any.
+ *
+ ******************************************************************************/
+
+static char *acpi_ex_allocate_name_string(u32 prefix_count, u32 num_name_segs)
+{
+ char *temp_ptr;
+ char *name_string;
+ u32 size_needed;
+
+ ACPI_FUNCTION_TRACE(ex_allocate_name_string);
+
+ /*
+ * Allow room for all \ and ^ prefixes, all segments and a multi_name_prefix.
+ * Also, one byte for the null terminator.
+ * This may actually be somewhat longer than needed.
+ */
+ if (prefix_count == ACPI_UINT32_MAX) {
+
+ /* Special case for root */
+
+ size_needed = 1 + (ACPI_NAME_SIZE * num_name_segs) + 2 + 1;
+ } else {
+ size_needed =
+ prefix_count + (ACPI_NAME_SIZE * num_name_segs) + 2 + 1;
+ }
+
+ /*
+ * Allocate a buffer for the name.
+ * This buffer must be deleted by the caller!
+ */
+ name_string = ACPI_ALLOCATE(size_needed);
+ if (!name_string) {
+ ACPI_ERROR((AE_INFO,
+ "Could not allocate size %d", size_needed));
+ return_PTR(NULL);
+ }
+
+ temp_ptr = name_string;
+
+ /* Set up Root or Parent prefixes if needed */
+
+ if (prefix_count == ACPI_UINT32_MAX) {
+ *temp_ptr++ = AML_ROOT_PREFIX;
+ } else {
+ while (prefix_count--) {
+ *temp_ptr++ = AML_PARENT_PREFIX;
+ }
+ }
+
+ /* Set up Dual or Multi prefixes if needed */
+
+ if (num_name_segs > 2) {
+
+ /* Set up multi prefixes */
+
+ *temp_ptr++ = AML_MULTI_NAME_PREFIX_OP;
+ *temp_ptr++ = (char)num_name_segs;
+ } else if (2 == num_name_segs) {
+
+ /* Set up dual prefixes */
+
+ *temp_ptr++ = AML_DUAL_NAME_PREFIX;
+ }
+
+ /*
+ * Terminate string following prefixes. acpi_ex_name_segment() will
+ * append the segment(s)
+ */
+ *temp_ptr = 0;
+
+ return_PTR(name_string);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ex_name_segment
+ *
+ * PARAMETERS: in_aml_address - Pointer to the name in the AML code
+ * name_string - Where to return the name. The name is appended
+ * to any existing string to form a namepath
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Extract an ACPI name (4 bytes) from the AML byte stream
+ *
+ ******************************************************************************/
+
+static acpi_status acpi_ex_name_segment(u8 ** in_aml_address, char *name_string)
+{
+ char *aml_address = (void *)*in_aml_address;
+ acpi_status status = AE_OK;
+ u32 index;
+ char char_buf[5];
+
+ ACPI_FUNCTION_TRACE(ex_name_segment);
+
+ /*
+ * If first character is a digit, then we know that we aren't looking at a
+ * valid name segment
+ */
+ char_buf[0] = *aml_address;
+
+ if ('0' <= char_buf[0] && char_buf[0] <= '9') {
+ ACPI_ERROR((AE_INFO, "Invalid leading digit: %c", char_buf[0]));
+ return_ACPI_STATUS(AE_CTRL_PENDING);
+ }
+
+ ACPI_DEBUG_PRINT((ACPI_DB_LOAD, "Bytes from stream:\n"));
+
+ for (index = 0; (index < ACPI_NAME_SIZE)
+ && (acpi_ut_valid_acpi_char(*aml_address, 0)); index++) {
+ char_buf[index] = *aml_address++;
+ ACPI_DEBUG_PRINT((ACPI_DB_LOAD, "%c\n", char_buf[index]));
+ }
+
+ /* Valid name segment */
+
+ if (index == 4) {
+
+ /* Found 4 valid characters */
+
+ char_buf[4] = '\0';
+
+ if (name_string) {
+ ACPI_STRCAT(name_string, char_buf);
+ ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
+ "Appended to - %s\n", name_string));
+ } else {
+ ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
+ "No Name string - %s\n", char_buf));
+ }
+ } else if (index == 0) {
+ /*
+ * First character was not a valid name character,
+ * so we are looking at something other than a name.
+ */
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Leading character is not alpha: %02Xh (not a name)\n",
+ char_buf[0]));
+ status = AE_CTRL_PENDING;
+ } else {
+ /*
+ * Segment started with one or more valid characters, but fewer than
+ * the required 4
+ */
+ status = AE_AML_BAD_NAME;
+ ACPI_ERROR((AE_INFO,
+ "Bad character %02x in name, at %p",
+ *aml_address, aml_address));
+ }
+
+ *in_aml_address = ACPI_CAST_PTR(u8, aml_address);
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ex_get_name_string
+ *
+ * PARAMETERS: data_type - Object type to be associated with this
+ * name
+ * in_aml_address - Pointer to the namestring in the AML code
+ * out_name_string - Where the namestring is returned
+ * out_name_length - Length of the returned string
+ *
+ * RETURN: Status, namestring and length
+ *
+ * DESCRIPTION: Extract a full namepath from the AML byte stream,
+ * including any prefixes.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ex_get_name_string(acpi_object_type data_type,
+ u8 * in_aml_address,
+ char **out_name_string, u32 * out_name_length)
+{
+ acpi_status status = AE_OK;
+ u8 *aml_address = in_aml_address;
+ char *name_string = NULL;
+ u32 num_segments;
+ u32 prefix_count = 0;
+ u8 has_prefix = FALSE;
+
+ ACPI_FUNCTION_TRACE_PTR(ex_get_name_string, aml_address);
+
+ if (ACPI_TYPE_LOCAL_REGION_FIELD == data_type ||
+ ACPI_TYPE_LOCAL_BANK_FIELD == data_type ||
+ ACPI_TYPE_LOCAL_INDEX_FIELD == data_type) {
+
+ /* Disallow prefixes for types associated with field_unit names */
+
+ name_string = acpi_ex_allocate_name_string(0, 1);
+ if (!name_string) {
+ status = AE_NO_MEMORY;
+ } else {
+ status =
+ acpi_ex_name_segment(&aml_address, name_string);
+ }
+ } else {
+ /*
+ * data_type is not a field name.
+ * Examine first character of name for root or parent prefix operators
+ */
+ switch (*aml_address) {
+ case AML_ROOT_PREFIX:
+
+ ACPI_DEBUG_PRINT((ACPI_DB_LOAD,
+ "RootPrefix(\\) at %p\n",
+ aml_address));
+
+ /*
+ * Remember that we have a root_prefix --
+ * see comment in acpi_ex_allocate_name_string()
+ */
+ aml_address++;
+ prefix_count = ACPI_UINT32_MAX;
+ has_prefix = TRUE;
+ break;
+
+ case AML_PARENT_PREFIX:
+
+ /* Increment past possibly multiple parent prefixes */
+
+ do {
+ ACPI_DEBUG_PRINT((ACPI_DB_LOAD,
+ "ParentPrefix (^) at %p\n",
+ aml_address));
+
+ aml_address++;
+ prefix_count++;
+
+ } while (*aml_address == AML_PARENT_PREFIX);
+
+ has_prefix = TRUE;
+ break;
+
+ default:
+
+ /* Not a prefix character */
+
+ break;
+ }
+
+ /* Examine first character of name for name segment prefix operator */
+
+ switch (*aml_address) {
+ case AML_DUAL_NAME_PREFIX:
+
+ ACPI_DEBUG_PRINT((ACPI_DB_LOAD,
+ "DualNamePrefix at %p\n",
+ aml_address));
+
+ aml_address++;
+ name_string =
+ acpi_ex_allocate_name_string(prefix_count, 2);
+ if (!name_string) {
+ status = AE_NO_MEMORY;
+ break;
+ }
+
+ /* Indicate that we processed a prefix */
+
+ has_prefix = TRUE;
+
+ status =
+ acpi_ex_name_segment(&aml_address, name_string);
+ if (ACPI_SUCCESS(status)) {
+ status =
+ acpi_ex_name_segment(&aml_address,
+ name_string);
+ }
+ break;
+
+ case AML_MULTI_NAME_PREFIX_OP:
+
+ ACPI_DEBUG_PRINT((ACPI_DB_LOAD,
+ "MultiNamePrefix at %p\n",
+ aml_address));
+
+ /* Fetch count of segments remaining in name path */
+
+ aml_address++;
+ num_segments = *aml_address;
+
+ name_string =
+ acpi_ex_allocate_name_string(prefix_count,
+ num_segments);
+ if (!name_string) {
+ status = AE_NO_MEMORY;
+ break;
+ }
+
+ /* Indicate that we processed a prefix */
+
+ aml_address++;
+ has_prefix = TRUE;
+
+ while (num_segments &&
+ (status =
+ acpi_ex_name_segment(&aml_address,
+ name_string)) == AE_OK) {
+ num_segments--;
+ }
+
+ break;
+
+ case 0:
+
+ /* null_name valid as of 8-12-98 ASL/AML Grammar Update */
+
+ if (prefix_count == ACPI_UINT32_MAX) {
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+ "NameSeg is \"\\\" followed by NULL\n"));
+ }
+
+ /* Consume the NULL byte */
+
+ aml_address++;
+ name_string =
+ acpi_ex_allocate_name_string(prefix_count, 0);
+ if (!name_string) {
+ status = AE_NO_MEMORY;
+ break;
+ }
+
+ break;
+
+ default:
+
+ /* Name segment string */
+
+ name_string =
+ acpi_ex_allocate_name_string(prefix_count, 1);
+ if (!name_string) {
+ status = AE_NO_MEMORY;
+ break;
+ }
+
+ status =
+ acpi_ex_name_segment(&aml_address, name_string);
+ break;
+ }
+ }
+
+ if (AE_CTRL_PENDING == status && has_prefix) {
+
+ /* Ran out of segments after processing a prefix */
+
+ ACPI_ERROR((AE_INFO, "Malformed Name at %p", name_string));
+ status = AE_AML_BAD_NAME;
+ }
+
+ if (ACPI_FAILURE(status)) {
+ if (name_string) {
+ ACPI_FREE(name_string);
+ }
+ return_ACPI_STATUS(status);
+ }
+
+ *out_name_string = name_string;
+ *out_name_length = (u32) (aml_address - in_aml_address);
+
+ return_ACPI_STATUS(status);
+}
diff --git a/drivers/acpi/executer/exoparg1.c b/drivers/acpi/executer/exoparg1.c
new file mode 100644
index 0000000..f622f9e
--- /dev/null
+++ b/drivers/acpi/executer/exoparg1.c
@@ -0,0 +1,1049 @@
+
+/******************************************************************************
+ *
+ * Module Name: exoparg1 - AML execution - opcodes with 1 argument
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include <acpi/acparser.h>
+#include <acpi/acdispat.h>
+#include <acpi/acinterp.h>
+#include <acpi/amlcode.h>
+#include <acpi/acnamesp.h>
+
+#define _COMPONENT ACPI_EXECUTER
+ACPI_MODULE_NAME("exoparg1")
+
+/*!
+ * Naming convention for AML interpreter execution routines.
+ *
+ * The routines that begin execution of AML opcodes are named with a common
+ * convention based upon the number of arguments, the number of target operands,
+ * and whether or not a value is returned:
+ *
+ * AcpiExOpcode_xA_yT_zR
+ *
+ * Where:
+ *
+ * xA - ARGUMENTS: The number of arguments (input operands) that are
+ * required for this opcode type (0 through 6 args).
+ * yT - TARGETS: The number of targets (output operands) that are required
+ * for this opcode type (0, 1, or 2 targets).
+ * zR - RETURN VALUE: Indicates whether this opcode type returns a value
+ * as the function return (0 or 1).
+ *
+ * The AcpiExOpcode* functions are called via the Dispatcher component with
+ * fully resolved operands.
+!*/
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ex_opcode_0A_0T_1R
+ *
+ * PARAMETERS: walk_state - Current state (contains AML opcode)
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Execute operator with no operands, one return value
+ *
+ ******************************************************************************/
+acpi_status acpi_ex_opcode_0A_0T_1R(struct acpi_walk_state *walk_state)
+{
+ acpi_status status = AE_OK;
+ union acpi_operand_object *return_desc = NULL;
+
+ ACPI_FUNCTION_TRACE_STR(ex_opcode_0A_0T_1R,
+ acpi_ps_get_opcode_name(walk_state->opcode));
+
+ /* Examine the AML opcode */
+
+ switch (walk_state->opcode) {
+ case AML_TIMER_OP: /* Timer () */
+
+ /* Create a return object of type Integer */
+
+ return_desc = acpi_ut_create_internal_object(ACPI_TYPE_INTEGER);
+ if (!return_desc) {
+ status = AE_NO_MEMORY;
+ goto cleanup;
+ }
+ return_desc->integer.value = acpi_os_get_timer();
+ break;
+
+ default: /* Unknown opcode */
+
+ ACPI_ERROR((AE_INFO, "Unknown AML opcode %X",
+ walk_state->opcode));
+ status = AE_AML_BAD_OPCODE;
+ break;
+ }
+
+ cleanup:
+
+ /* Delete return object on error */
+
+ if ((ACPI_FAILURE(status)) || walk_state->result_obj) {
+ acpi_ut_remove_reference(return_desc);
+ walk_state->result_obj = NULL;
+ } else {
+ /* Save the return value */
+
+ walk_state->result_obj = return_desc;
+ }
+
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ex_opcode_1A_0T_0R
+ *
+ * PARAMETERS: walk_state - Current state (contains AML opcode)
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Execute Type 1 monadic operator with numeric operand on
+ * object stack
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ex_opcode_1A_0T_0R(struct acpi_walk_state *walk_state)
+{
+ union acpi_operand_object **operand = &walk_state->operands[0];
+ acpi_status status = AE_OK;
+
+ ACPI_FUNCTION_TRACE_STR(ex_opcode_1A_0T_0R,
+ acpi_ps_get_opcode_name(walk_state->opcode));
+
+ /* Examine the AML opcode */
+
+ switch (walk_state->opcode) {
+ case AML_RELEASE_OP: /* Release (mutex_object) */
+
+ status = acpi_ex_release_mutex(operand[0], walk_state);
+ break;
+
+ case AML_RESET_OP: /* Reset (event_object) */
+
+ status = acpi_ex_system_reset_event(operand[0]);
+ break;
+
+ case AML_SIGNAL_OP: /* Signal (event_object) */
+
+ status = acpi_ex_system_signal_event(operand[0]);
+ break;
+
+ case AML_SLEEP_OP: /* Sleep (msec_time) */
+
+ status = acpi_ex_system_do_suspend(operand[0]->integer.value);
+ break;
+
+ case AML_STALL_OP: /* Stall (usec_time) */
+
+ status =
+ acpi_ex_system_do_stall((u32) operand[0]->integer.value);
+ break;
+
+ case AML_UNLOAD_OP: /* Unload (Handle) */
+
+ status = acpi_ex_unload_table(operand[0]);
+ break;
+
+ default: /* Unknown opcode */
+
+ ACPI_ERROR((AE_INFO, "Unknown AML opcode %X",
+ walk_state->opcode));
+ status = AE_AML_BAD_OPCODE;
+ break;
+ }
+
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ex_opcode_1A_1T_0R
+ *
+ * PARAMETERS: walk_state - Current state (contains AML opcode)
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Execute opcode with one argument, one target, and no
+ * return value.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ex_opcode_1A_1T_0R(struct acpi_walk_state *walk_state)
+{
+ acpi_status status = AE_OK;
+ union acpi_operand_object **operand = &walk_state->operands[0];
+
+ ACPI_FUNCTION_TRACE_STR(ex_opcode_1A_1T_0R,
+ acpi_ps_get_opcode_name(walk_state->opcode));
+
+ /* Examine the AML opcode */
+
+ switch (walk_state->opcode) {
+ case AML_LOAD_OP:
+
+ status = acpi_ex_load_op(operand[0], operand[1], walk_state);
+ break;
+
+ default: /* Unknown opcode */
+
+ ACPI_ERROR((AE_INFO, "Unknown AML opcode %X",
+ walk_state->opcode));
+ status = AE_AML_BAD_OPCODE;
+ goto cleanup;
+ }
+
+ cleanup:
+
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ex_opcode_1A_1T_1R
+ *
+ * PARAMETERS: walk_state - Current state (contains AML opcode)
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Execute opcode with one argument, one target, and a
+ * return value.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ex_opcode_1A_1T_1R(struct acpi_walk_state *walk_state)
+{
+ acpi_status status = AE_OK;
+ union acpi_operand_object **operand = &walk_state->operands[0];
+ union acpi_operand_object *return_desc = NULL;
+ union acpi_operand_object *return_desc2 = NULL;
+ u32 temp32;
+ u32 i;
+ acpi_integer power_of_ten;
+ acpi_integer digit;
+
+ ACPI_FUNCTION_TRACE_STR(ex_opcode_1A_1T_1R,
+ acpi_ps_get_opcode_name(walk_state->opcode));
+
+ /* Examine the AML opcode */
+
+ switch (walk_state->opcode) {
+ case AML_BIT_NOT_OP:
+ case AML_FIND_SET_LEFT_BIT_OP:
+ case AML_FIND_SET_RIGHT_BIT_OP:
+ case AML_FROM_BCD_OP:
+ case AML_TO_BCD_OP:
+ case AML_COND_REF_OF_OP:
+
+ /* Create a return object of type Integer for these opcodes */
+
+ return_desc = acpi_ut_create_internal_object(ACPI_TYPE_INTEGER);
+ if (!return_desc) {
+ status = AE_NO_MEMORY;
+ goto cleanup;
+ }
+
+ switch (walk_state->opcode) {
+ case AML_BIT_NOT_OP: /* Not (Operand, Result) */
+
+ return_desc->integer.value = ~operand[0]->integer.value;
+ break;
+
+ case AML_FIND_SET_LEFT_BIT_OP: /* find_set_left_bit (Operand, Result) */
+
+ return_desc->integer.value = operand[0]->integer.value;
+
+ /*
+ * Acpi specification describes Integer type as a little
+ * endian unsigned value, so this boundary condition is valid.
+ */
+ for (temp32 = 0; return_desc->integer.value &&
+ temp32 < ACPI_INTEGER_BIT_SIZE; ++temp32) {
+ return_desc->integer.value >>= 1;
+ }
+
+ return_desc->integer.value = temp32;
+ break;
+
+ case AML_FIND_SET_RIGHT_BIT_OP: /* find_set_right_bit (Operand, Result) */
+
+ return_desc->integer.value = operand[0]->integer.value;
+
+ /*
+ * The Acpi specification describes Integer type as a little
+ * endian unsigned value, so this boundary condition is valid.
+ */
+ for (temp32 = 0; return_desc->integer.value &&
+ temp32 < ACPI_INTEGER_BIT_SIZE; ++temp32) {
+ return_desc->integer.value <<= 1;
+ }
+
+ /* Since the bit position is one-based, subtract from 33 (65) */
+
+ return_desc->integer.value =
+ temp32 ==
+ 0 ? 0 : (ACPI_INTEGER_BIT_SIZE + 1) - temp32;
+ break;
+
+ case AML_FROM_BCD_OP: /* from_bcd (BCDValue, Result) */
+
+ /*
+ * The 64-bit ACPI integer can hold 16 4-bit BCD characters
+ * (if table is 32-bit, integer can hold 8 BCD characters)
+ * Convert each 4-bit BCD value
+ */
+ power_of_ten = 1;
+ return_desc->integer.value = 0;
+ digit = operand[0]->integer.value;
+
+ /* Convert each BCD digit (each is one nybble wide) */
+
+ for (i = 0;
+ (i < acpi_gbl_integer_nybble_width) && (digit > 0);
+ i++) {
+
+ /* Get the least significant 4-bit BCD digit */
+
+ temp32 = ((u32) digit) & 0xF;
+
+ /* Check the range of the digit */
+
+ if (temp32 > 9) {
+ ACPI_ERROR((AE_INFO,
+ "BCD digit too large (not decimal): 0x%X",
+ temp32));
+
+ status = AE_AML_NUMERIC_OVERFLOW;
+ goto cleanup;
+ }
+
+ /* Sum the digit into the result with the current power of 10 */
+
+ return_desc->integer.value +=
+ (((acpi_integer) temp32) * power_of_ten);
+
+ /* Shift to next BCD digit */
+
+ digit >>= 4;
+
+ /* Next power of 10 */
+
+ power_of_ten *= 10;
+ }
+ break;
+
+ case AML_TO_BCD_OP: /* to_bcd (Operand, Result) */
+
+ return_desc->integer.value = 0;
+ digit = operand[0]->integer.value;
+
+ /* Each BCD digit is one nybble wide */
+
+ for (i = 0;
+ (i < acpi_gbl_integer_nybble_width) && (digit > 0);
+ i++) {
+ (void)acpi_ut_short_divide(digit, 10, &digit,
+ &temp32);
+
+ /*
+ * Insert the BCD digit that resides in the
+ * remainder from above
+ */
+ return_desc->integer.value |=
+ (((acpi_integer) temp32) << ACPI_MUL_4(i));
+ }
+
+ /* Overflow if there is any data left in Digit */
+
+ if (digit > 0) {
+ ACPI_ERROR((AE_INFO,
+ "Integer too large to convert to BCD: %8.8X%8.8X",
+ ACPI_FORMAT_UINT64(operand[0]->
+ integer.value)));
+ status = AE_AML_NUMERIC_OVERFLOW;
+ goto cleanup;
+ }
+ break;
+
+ case AML_COND_REF_OF_OP: /* cond_ref_of (source_object, Result) */
+
+ /*
+ * This op is a little strange because the internal return value is
+ * different than the return value stored in the result descriptor
+ * (There are really two return values)
+ */
+ if ((struct acpi_namespace_node *)operand[0] ==
+ acpi_gbl_root_node) {
+ /*
+ * This means that the object does not exist in the namespace,
+ * return FALSE
+ */
+ return_desc->integer.value = 0;
+ goto cleanup;
+ }
+
+ /* Get the object reference, store it, and remove our reference */
+
+ status = acpi_ex_get_object_reference(operand[0],
+ &return_desc2,
+ walk_state);
+ if (ACPI_FAILURE(status)) {
+ goto cleanup;
+ }
+
+ status =
+ acpi_ex_store(return_desc2, operand[1], walk_state);
+ acpi_ut_remove_reference(return_desc2);
+
+ /* The object exists in the namespace, return TRUE */
+
+ return_desc->integer.value = ACPI_INTEGER_MAX;
+ goto cleanup;
+
+ default:
+ /* No other opcodes get here */
+ break;
+ }
+ break;
+
+ case AML_STORE_OP: /* Store (Source, Target) */
+
+ /*
+ * A store operand is typically a number, string, buffer or lvalue
+ * Be careful about deleting the source object,
+ * since the object itself may have been stored.
+ */
+ status = acpi_ex_store(operand[0], operand[1], walk_state);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /* It is possible that the Store already produced a return object */
+
+ if (!walk_state->result_obj) {
+ /*
+ * Normally, we would remove a reference on the Operand[0]
+ * parameter; But since it is being used as the internal return
+ * object (meaning we would normally increment it), the two
+ * cancel out, and we simply don't do anything.
+ */
+ walk_state->result_obj = operand[0];
+ walk_state->operands[0] = NULL; /* Prevent deletion */
+ }
+ return_ACPI_STATUS(status);
+
+ /*
+ * ACPI 2.0 Opcodes
+ */
+ case AML_COPY_OP: /* Copy (Source, Target) */
+
+ status =
+ acpi_ut_copy_iobject_to_iobject(operand[0], &return_desc,
+ walk_state);
+ break;
+
+ case AML_TO_DECSTRING_OP: /* to_decimal_string (Data, Result) */
+
+ status = acpi_ex_convert_to_string(operand[0], &return_desc,
+ ACPI_EXPLICIT_CONVERT_DECIMAL);
+ if (return_desc == operand[0]) {
+
+ /* No conversion performed, add ref to handle return value */
+ acpi_ut_add_reference(return_desc);
+ }
+ break;
+
+ case AML_TO_HEXSTRING_OP: /* to_hex_string (Data, Result) */
+
+ status = acpi_ex_convert_to_string(operand[0], &return_desc,
+ ACPI_EXPLICIT_CONVERT_HEX);
+ if (return_desc == operand[0]) {
+
+ /* No conversion performed, add ref to handle return value */
+ acpi_ut_add_reference(return_desc);
+ }
+ break;
+
+ case AML_TO_BUFFER_OP: /* to_buffer (Data, Result) */
+
+ status = acpi_ex_convert_to_buffer(operand[0], &return_desc);
+ if (return_desc == operand[0]) {
+
+ /* No conversion performed, add ref to handle return value */
+ acpi_ut_add_reference(return_desc);
+ }
+ break;
+
+ case AML_TO_INTEGER_OP: /* to_integer (Data, Result) */
+
+ status = acpi_ex_convert_to_integer(operand[0], &return_desc,
+ ACPI_ANY_BASE);
+ if (return_desc == operand[0]) {
+
+ /* No conversion performed, add ref to handle return value */
+ acpi_ut_add_reference(return_desc);
+ }
+ break;
+
+ case AML_SHIFT_LEFT_BIT_OP: /* shift_left_bit (Source, bit_num) */
+ case AML_SHIFT_RIGHT_BIT_OP: /* shift_right_bit (Source, bit_num) */
+
+ /* These are two obsolete opcodes */
+
+ ACPI_ERROR((AE_INFO,
+ "%s is obsolete and not implemented",
+ acpi_ps_get_opcode_name(walk_state->opcode)));
+ status = AE_SUPPORT;
+ goto cleanup;
+
+ default: /* Unknown opcode */
+
+ ACPI_ERROR((AE_INFO, "Unknown AML opcode %X",
+ walk_state->opcode));
+ status = AE_AML_BAD_OPCODE;
+ goto cleanup;
+ }
+
+ if (ACPI_SUCCESS(status)) {
+
+ /* Store the return value computed above into the target object */
+
+ status = acpi_ex_store(return_desc, operand[1], walk_state);
+ }
+
+ cleanup:
+
+ /* Delete return object on error */
+
+ if (ACPI_FAILURE(status)) {
+ acpi_ut_remove_reference(return_desc);
+ }
+
+ /* Save return object on success */
+
+ else if (!walk_state->result_obj) {
+ walk_state->result_obj = return_desc;
+ }
+
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ex_opcode_1A_0T_1R
+ *
+ * PARAMETERS: walk_state - Current state (contains AML opcode)
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Execute opcode with one argument, no target, and a return value
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state)
+{
+ union acpi_operand_object **operand = &walk_state->operands[0];
+ union acpi_operand_object *temp_desc;
+ union acpi_operand_object *return_desc = NULL;
+ acpi_status status = AE_OK;
+ u32 type;
+ acpi_integer value;
+
+ ACPI_FUNCTION_TRACE_STR(ex_opcode_1A_0T_1R,
+ acpi_ps_get_opcode_name(walk_state->opcode));
+
+ /* Examine the AML opcode */
+
+ switch (walk_state->opcode) {
+ case AML_LNOT_OP: /* LNot (Operand) */
+
+ return_desc = acpi_ut_create_internal_object(ACPI_TYPE_INTEGER);
+ if (!return_desc) {
+ status = AE_NO_MEMORY;
+ goto cleanup;
+ }
+
+ /*
+ * Set result to ONES (TRUE) if Value == 0. Note:
+ * return_desc->Integer.Value is initially == 0 (FALSE) from above.
+ */
+ if (!operand[0]->integer.value) {
+ return_desc->integer.value = ACPI_INTEGER_MAX;
+ }
+ break;
+
+ case AML_DECREMENT_OP: /* Decrement (Operand) */
+ case AML_INCREMENT_OP: /* Increment (Operand) */
+
+ /*
+ * Create a new integer. Can't just get the base integer and
+ * increment it because it may be an Arg or Field.
+ */
+ return_desc = acpi_ut_create_internal_object(ACPI_TYPE_INTEGER);
+ if (!return_desc) {
+ status = AE_NO_MEMORY;
+ goto cleanup;
+ }
+
+ /*
+ * Since we are expecting a Reference operand, it can be either a
+ * NS Node or an internal object.
+ */
+ temp_desc = operand[0];
+ if (ACPI_GET_DESCRIPTOR_TYPE(temp_desc) ==
+ ACPI_DESC_TYPE_OPERAND) {
+
+ /* Internal reference object - prevent deletion */
+
+ acpi_ut_add_reference(temp_desc);
+ }
+
+ /*
+ * Convert the Reference operand to an Integer (This removes a
+ * reference on the Operand[0] object)
+ *
+ * NOTE: We use LNOT_OP here in order to force resolution of the
+ * reference operand to an actual integer.
+ */
+ status =
+ acpi_ex_resolve_operands(AML_LNOT_OP, &temp_desc,
+ walk_state);
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status,
+ "While resolving operands for [%s]",
+ acpi_ps_get_opcode_name(walk_state->
+ opcode)));
+
+ goto cleanup;
+ }
+
+ /*
+ * temp_desc is now guaranteed to be an Integer object --
+ * Perform the actual increment or decrement
+ */
+ if (walk_state->opcode == AML_INCREMENT_OP) {
+ return_desc->integer.value =
+ temp_desc->integer.value + 1;
+ } else {
+ return_desc->integer.value =
+ temp_desc->integer.value - 1;
+ }
+
+ /* Finished with this Integer object */
+
+ acpi_ut_remove_reference(temp_desc);
+
+ /*
+ * Store the result back (indirectly) through the original
+ * Reference object
+ */
+ status = acpi_ex_store(return_desc, operand[0], walk_state);
+ break;
+
+ case AML_TYPE_OP: /* object_type (source_object) */
+
+ /*
+ * Note: The operand is not resolved at this point because we want to
+ * get the associated object, not its value. For example, we don't
+ * want to resolve a field_unit to its value, we want the actual
+ * field_unit object.
+ */
+
+ /* Get the type of the base object */
+
+ status =
+ acpi_ex_resolve_multiple(walk_state, operand[0], &type,
+ NULL);
+ if (ACPI_FAILURE(status)) {
+ goto cleanup;
+ }
+
+ /* Allocate a descriptor to hold the type. */
+
+ return_desc = acpi_ut_create_internal_object(ACPI_TYPE_INTEGER);
+ if (!return_desc) {
+ status = AE_NO_MEMORY;
+ goto cleanup;
+ }
+
+ return_desc->integer.value = type;
+ break;
+
+ case AML_SIZE_OF_OP: /* size_of (source_object) */
+
+ /*
+ * Note: The operand is not resolved at this point because we want to
+ * get the associated object, not its value.
+ */
+
+ /* Get the base object */
+
+ status = acpi_ex_resolve_multiple(walk_state,
+ operand[0], &type,
+ &temp_desc);
+ if (ACPI_FAILURE(status)) {
+ goto cleanup;
+ }
+
+ /*
+ * The type of the base object must be integer, buffer, string, or
+ * package. All others are not supported.
+ *
+ * NOTE: Integer is not specifically supported by the ACPI spec,
+ * but is supported implicitly via implicit operand conversion.
+ * rather than bother with conversion, we just use the byte width
+ * global (4 or 8 bytes).
+ */
+ switch (type) {
+ case ACPI_TYPE_INTEGER:
+ value = acpi_gbl_integer_byte_width;
+ break;
+
+ case ACPI_TYPE_STRING:
+ value = temp_desc->string.length;
+ break;
+
+ case ACPI_TYPE_BUFFER:
+
+ /* Buffer arguments may not be evaluated at this point */
+
+ status = acpi_ds_get_buffer_arguments(temp_desc);
+ value = temp_desc->buffer.length;
+ break;
+
+ case ACPI_TYPE_PACKAGE:
+
+ /* Package arguments may not be evaluated at this point */
+
+ status = acpi_ds_get_package_arguments(temp_desc);
+ value = temp_desc->package.count;
+ break;
+
+ default:
+ ACPI_ERROR((AE_INFO,
+ "Operand must be Buffer/Integer/String/Package - found type %s",
+ acpi_ut_get_type_name(type)));
+ status = AE_AML_OPERAND_TYPE;
+ goto cleanup;
+ }
+
+ if (ACPI_FAILURE(status)) {
+ goto cleanup;
+ }
+
+ /*
+ * Now that we have the size of the object, create a result
+ * object to hold the value
+ */
+ return_desc = acpi_ut_create_internal_object(ACPI_TYPE_INTEGER);
+ if (!return_desc) {
+ status = AE_NO_MEMORY;
+ goto cleanup;
+ }
+
+ return_desc->integer.value = value;
+ break;
+
+ case AML_REF_OF_OP: /* ref_of (source_object) */
+
+ status =
+ acpi_ex_get_object_reference(operand[0], &return_desc,
+ walk_state);
+ if (ACPI_FAILURE(status)) {
+ goto cleanup;
+ }
+ break;
+
+ case AML_DEREF_OF_OP: /* deref_of (obj_reference | String) */
+
+ /* Check for a method local or argument, or standalone String */
+
+ if (ACPI_GET_DESCRIPTOR_TYPE(operand[0]) ==
+ ACPI_DESC_TYPE_NAMED) {
+ temp_desc =
+ acpi_ns_get_attached_object((struct
+ acpi_namespace_node *)
+ operand[0]);
+ if (temp_desc
+ &&
+ ((ACPI_GET_OBJECT_TYPE(temp_desc) ==
+ ACPI_TYPE_STRING)
+ || (ACPI_GET_OBJECT_TYPE(temp_desc) ==
+ ACPI_TYPE_LOCAL_REFERENCE))) {
+ operand[0] = temp_desc;
+ acpi_ut_add_reference(temp_desc);
+ } else {
+ status = AE_AML_OPERAND_TYPE;
+ goto cleanup;
+ }
+ } else {
+ switch (ACPI_GET_OBJECT_TYPE(operand[0])) {
+ case ACPI_TYPE_LOCAL_REFERENCE:
+ /*
+ * This is a deref_of (local_x | arg_x)
+ *
+ * Must resolve/dereference the local/arg reference first
+ */
+ switch (operand[0]->reference.class) {
+ case ACPI_REFCLASS_LOCAL:
+ case ACPI_REFCLASS_ARG:
+
+ /* Set Operand[0] to the value of the local/arg */
+
+ status =
+ acpi_ds_method_data_get_value
+ (operand[0]->reference.class,
+ operand[0]->reference.value,
+ walk_state, &temp_desc);
+ if (ACPI_FAILURE(status)) {
+ goto cleanup;
+ }
+
+ /*
+ * Delete our reference to the input object and
+ * point to the object just retrieved
+ */
+ acpi_ut_remove_reference(operand[0]);
+ operand[0] = temp_desc;
+ break;
+
+ case ACPI_REFCLASS_REFOF:
+
+ /* Get the object to which the reference refers */
+
+ temp_desc =
+ operand[0]->reference.object;
+ acpi_ut_remove_reference(operand[0]);
+ operand[0] = temp_desc;
+ break;
+
+ default:
+
+ /* Must be an Index op - handled below */
+ break;
+ }
+ break;
+
+ case ACPI_TYPE_STRING:
+ break;
+
+ default:
+ status = AE_AML_OPERAND_TYPE;
+ goto cleanup;
+ }
+ }
+
+ if (ACPI_GET_DESCRIPTOR_TYPE(operand[0]) !=
+ ACPI_DESC_TYPE_NAMED) {
+ if (ACPI_GET_OBJECT_TYPE(operand[0]) ==
+ ACPI_TYPE_STRING) {
+ /*
+ * This is a deref_of (String). The string is a reference
+ * to a named ACPI object.
+ *
+ * 1) Find the owning Node
+ * 2) Dereference the node to an actual object. Could be a
+ * Field, so we need to resolve the node to a value.
+ */
+ status =
+ acpi_ns_get_node(walk_state->scope_info->
+ scope.node,
+ operand[0]->string.pointer,
+ ACPI_NS_SEARCH_PARENT,
+ ACPI_CAST_INDIRECT_PTR
+ (struct
+ acpi_namespace_node,
+ &return_desc));
+ if (ACPI_FAILURE(status)) {
+ goto cleanup;
+ }
+
+ status =
+ acpi_ex_resolve_node_to_value
+ (ACPI_CAST_INDIRECT_PTR
+ (struct acpi_namespace_node, &return_desc),
+ walk_state);
+ goto cleanup;
+ }
+ }
+
+ /* Operand[0] may have changed from the code above */
+
+ if (ACPI_GET_DESCRIPTOR_TYPE(operand[0]) ==
+ ACPI_DESC_TYPE_NAMED) {
+ /*
+ * This is a deref_of (object_reference)
+ * Get the actual object from the Node (This is the dereference).
+ * This case may only happen when a local_x or arg_x is
+ * dereferenced above.
+ */
+ return_desc = acpi_ns_get_attached_object((struct
+ acpi_namespace_node
+ *)
+ operand[0]);
+ acpi_ut_add_reference(return_desc);
+ } else {
+ /*
+ * This must be a reference object produced by either the
+ * Index() or ref_of() operator
+ */
+ switch (operand[0]->reference.class) {
+ case ACPI_REFCLASS_INDEX:
+
+ /*
+ * The target type for the Index operator must be
+ * either a Buffer or a Package
+ */
+ switch (operand[0]->reference.target_type) {
+ case ACPI_TYPE_BUFFER_FIELD:
+
+ temp_desc =
+ operand[0]->reference.object;
+
+ /*
+ * Create a new object that contains one element of the
+ * buffer -- the element pointed to by the index.
+ *
+ * NOTE: index into a buffer is NOT a pointer to a
+ * sub-buffer of the main buffer, it is only a pointer to a
+ * single element (byte) of the buffer!
+ */
+ return_desc =
+ acpi_ut_create_internal_object
+ (ACPI_TYPE_INTEGER);
+ if (!return_desc) {
+ status = AE_NO_MEMORY;
+ goto cleanup;
+ }
+
+ /*
+ * Since we are returning the value of the buffer at the
+ * indexed location, we don't need to add an additional
+ * reference to the buffer itself.
+ */
+ return_desc->integer.value =
+ temp_desc->buffer.
+ pointer[operand[0]->reference.
+ value];
+ break;
+
+ case ACPI_TYPE_PACKAGE:
+
+ /*
+ * Return the referenced element of the package. We must
+ * add another reference to the referenced object, however.
+ */
+ return_desc =
+ *(operand[0]->reference.where);
+ if (return_desc) {
+ acpi_ut_add_reference
+ (return_desc);
+ }
+ break;
+
+ default:
+
+ ACPI_ERROR((AE_INFO,
+ "Unknown Index TargetType %X in reference object %p",
+ operand[0]->reference.
+ target_type, operand[0]));
+ status = AE_AML_OPERAND_TYPE;
+ goto cleanup;
+ }
+ break;
+
+ case ACPI_REFCLASS_REFOF:
+
+ return_desc = operand[0]->reference.object;
+
+ if (ACPI_GET_DESCRIPTOR_TYPE(return_desc) ==
+ ACPI_DESC_TYPE_NAMED) {
+ return_desc =
+ acpi_ns_get_attached_object((struct
+ acpi_namespace_node
+ *)
+ return_desc);
+ }
+
+ /* Add another reference to the object! */
+
+ acpi_ut_add_reference(return_desc);
+ break;
+
+ default:
+ ACPI_ERROR((AE_INFO,
+ "Unknown class in reference(%p) - %2.2X",
+ operand[0],
+ operand[0]->reference.class));
+
+ status = AE_TYPE;
+ goto cleanup;
+ }
+ }
+ break;
+
+ default:
+
+ ACPI_ERROR((AE_INFO, "Unknown AML opcode %X",
+ walk_state->opcode));
+ status = AE_AML_BAD_OPCODE;
+ goto cleanup;
+ }
+
+ cleanup:
+
+ /* Delete return object on error */
+
+ if (ACPI_FAILURE(status)) {
+ acpi_ut_remove_reference(return_desc);
+ }
+
+ /* Save return object on success */
+
+ else {
+ walk_state->result_obj = return_desc;
+ }
+
+ return_ACPI_STATUS(status);
+}
diff --git a/drivers/acpi/executer/exoparg2.c b/drivers/acpi/executer/exoparg2.c
new file mode 100644
index 0000000..368def5
--- /dev/null
+++ b/drivers/acpi/executer/exoparg2.c
@@ -0,0 +1,604 @@
+/******************************************************************************
+ *
+ * Module Name: exoparg2 - AML execution - opcodes with 2 arguments
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include <acpi/acparser.h>
+#include <acpi/acinterp.h>
+#include <acpi/acevents.h>
+#include <acpi/amlcode.h>
+
+#define _COMPONENT ACPI_EXECUTER
+ACPI_MODULE_NAME("exoparg2")
+
+/*!
+ * Naming convention for AML interpreter execution routines.
+ *
+ * The routines that begin execution of AML opcodes are named with a common
+ * convention based upon the number of arguments, the number of target operands,
+ * and whether or not a value is returned:
+ *
+ * AcpiExOpcode_xA_yT_zR
+ *
+ * Where:
+ *
+ * xA - ARGUMENTS: The number of arguments (input operands) that are
+ * required for this opcode type (1 through 6 args).
+ * yT - TARGETS: The number of targets (output operands) that are required
+ * for this opcode type (0, 1, or 2 targets).
+ * zR - RETURN VALUE: Indicates whether this opcode type returns a value
+ * as the function return (0 or 1).
+ *
+ * The AcpiExOpcode* functions are called via the Dispatcher component with
+ * fully resolved operands.
+!*/
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ex_opcode_2A_0T_0R
+ *
+ * PARAMETERS: walk_state - Current walk state
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Execute opcode with two arguments, no target, and no return
+ * value.
+ *
+ * ALLOCATION: Deletes both operands
+ *
+ ******************************************************************************/
+acpi_status acpi_ex_opcode_2A_0T_0R(struct acpi_walk_state *walk_state)
+{
+ union acpi_operand_object **operand = &walk_state->operands[0];
+ struct acpi_namespace_node *node;
+ u32 value;
+ acpi_status status = AE_OK;
+
+ ACPI_FUNCTION_TRACE_STR(ex_opcode_2A_0T_0R,
+ acpi_ps_get_opcode_name(walk_state->opcode));
+
+ /* Examine the opcode */
+
+ switch (walk_state->opcode) {
+ case AML_NOTIFY_OP: /* Notify (notify_object, notify_value) */
+
+ /* The first operand is a namespace node */
+
+ node = (struct acpi_namespace_node *)operand[0];
+
+ /* Second value is the notify value */
+
+ value = (u32) operand[1]->integer.value;
+
+ /* Are notifies allowed on this object? */
+
+ if (!acpi_ev_is_notify_object(node)) {
+ ACPI_ERROR((AE_INFO,
+ "Unexpected notify object type [%s]",
+ acpi_ut_get_type_name(node->type)));
+
+ status = AE_AML_OPERAND_TYPE;
+ break;
+ }
+#ifdef ACPI_GPE_NOTIFY_CHECK
+ /*
+ * GPE method wake/notify check. Here, we want to ensure that we
+ * don't receive any "DeviceWake" Notifies from a GPE _Lxx or _Exx
+ * GPE method during system runtime. If we do, the GPE is marked
+ * as "wake-only" and disabled.
+ *
+ * 1) Is the Notify() value == device_wake?
+ * 2) Is this a GPE deferred method? (An _Lxx or _Exx method)
+ * 3) Did the original GPE happen at system runtime?
+ * (versus during wake)
+ *
+ * If all three cases are true, this is a wake-only GPE that should
+ * be disabled at runtime.
+ */
+ if (value == 2) { /* device_wake */
+ status =
+ acpi_ev_check_for_wake_only_gpe(walk_state->
+ gpe_event_info);
+ if (ACPI_FAILURE(status)) {
+
+ /* AE_WAKE_ONLY_GPE only error, means ignore this notify */
+
+ return_ACPI_STATUS(AE_OK)
+ }
+ }
+#endif
+
+ /*
+ * Dispatch the notify to the appropriate handler
+ * NOTE: the request is queued for execution after this method
+ * completes. The notify handlers are NOT invoked synchronously
+ * from this thread -- because handlers may in turn run other
+ * control methods.
+ */
+ status = acpi_ev_queue_notify_request(node, value);
+ break;
+
+ default:
+
+ ACPI_ERROR((AE_INFO, "Unknown AML opcode %X",
+ walk_state->opcode));
+ status = AE_AML_BAD_OPCODE;
+ }
+
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ex_opcode_2A_2T_1R
+ *
+ * PARAMETERS: walk_state - Current walk state
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Execute a dyadic operator (2 operands) with 2 output targets
+ * and one implicit return value.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ex_opcode_2A_2T_1R(struct acpi_walk_state *walk_state)
+{
+ union acpi_operand_object **operand = &walk_state->operands[0];
+ union acpi_operand_object *return_desc1 = NULL;
+ union acpi_operand_object *return_desc2 = NULL;
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE_STR(ex_opcode_2A_2T_1R,
+ acpi_ps_get_opcode_name(walk_state->opcode));
+
+ /* Execute the opcode */
+
+ switch (walk_state->opcode) {
+ case AML_DIVIDE_OP:
+
+ /* Divide (Dividend, Divisor, remainder_result quotient_result) */
+
+ return_desc1 =
+ acpi_ut_create_internal_object(ACPI_TYPE_INTEGER);
+ if (!return_desc1) {
+ status = AE_NO_MEMORY;
+ goto cleanup;
+ }
+
+ return_desc2 =
+ acpi_ut_create_internal_object(ACPI_TYPE_INTEGER);
+ if (!return_desc2) {
+ status = AE_NO_MEMORY;
+ goto cleanup;
+ }
+
+ /* Quotient to return_desc1, remainder to return_desc2 */
+
+ status = acpi_ut_divide(operand[0]->integer.value,
+ operand[1]->integer.value,
+ &return_desc1->integer.value,
+ &return_desc2->integer.value);
+ if (ACPI_FAILURE(status)) {
+ goto cleanup;
+ }
+ break;
+
+ default:
+
+ ACPI_ERROR((AE_INFO, "Unknown AML opcode %X",
+ walk_state->opcode));
+ status = AE_AML_BAD_OPCODE;
+ goto cleanup;
+ }
+
+ /* Store the results to the target reference operands */
+
+ status = acpi_ex_store(return_desc2, operand[2], walk_state);
+ if (ACPI_FAILURE(status)) {
+ goto cleanup;
+ }
+
+ status = acpi_ex_store(return_desc1, operand[3], walk_state);
+ if (ACPI_FAILURE(status)) {
+ goto cleanup;
+ }
+
+ cleanup:
+ /*
+ * Since the remainder is not returned indirectly, remove a reference to
+ * it. Only the quotient is returned indirectly.
+ */
+ acpi_ut_remove_reference(return_desc2);
+
+ if (ACPI_FAILURE(status)) {
+
+ /* Delete the return object */
+
+ acpi_ut_remove_reference(return_desc1);
+ }
+
+ /* Save return object (the remainder) on success */
+
+ else {
+ walk_state->result_obj = return_desc1;
+ }
+
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ex_opcode_2A_1T_1R
+ *
+ * PARAMETERS: walk_state - Current walk state
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Execute opcode with two arguments, one target, and a return
+ * value.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ex_opcode_2A_1T_1R(struct acpi_walk_state *walk_state)
+{
+ union acpi_operand_object **operand = &walk_state->operands[0];
+ union acpi_operand_object *return_desc = NULL;
+ acpi_integer index;
+ acpi_status status = AE_OK;
+ acpi_size length;
+
+ ACPI_FUNCTION_TRACE_STR(ex_opcode_2A_1T_1R,
+ acpi_ps_get_opcode_name(walk_state->opcode));
+
+ /* Execute the opcode */
+
+ if (walk_state->op_info->flags & AML_MATH) {
+
+ /* All simple math opcodes (add, etc.) */
+
+ return_desc = acpi_ut_create_internal_object(ACPI_TYPE_INTEGER);
+ if (!return_desc) {
+ status = AE_NO_MEMORY;
+ goto cleanup;
+ }
+
+ return_desc->integer.value =
+ acpi_ex_do_math_op(walk_state->opcode,
+ operand[0]->integer.value,
+ operand[1]->integer.value);
+ goto store_result_to_target;
+ }
+
+ switch (walk_state->opcode) {
+ case AML_MOD_OP: /* Mod (Dividend, Divisor, remainder_result (ACPI 2.0) */
+
+ return_desc = acpi_ut_create_internal_object(ACPI_TYPE_INTEGER);
+ if (!return_desc) {
+ status = AE_NO_MEMORY;
+ goto cleanup;
+ }
+
+ /* return_desc will contain the remainder */
+
+ status = acpi_ut_divide(operand[0]->integer.value,
+ operand[1]->integer.value,
+ NULL, &return_desc->integer.value);
+ break;
+
+ case AML_CONCAT_OP: /* Concatenate (Data1, Data2, Result) */
+
+ status = acpi_ex_do_concatenate(operand[0], operand[1],
+ &return_desc, walk_state);
+ break;
+
+ case AML_TO_STRING_OP: /* to_string (Buffer, Length, Result) (ACPI 2.0) */
+
+ /*
+ * Input object is guaranteed to be a buffer at this point (it may have
+ * been converted.) Copy the raw buffer data to a new object of
+ * type String.
+ */
+
+ /*
+ * Get the length of the new string. It is the smallest of:
+ * 1) Length of the input buffer
+ * 2) Max length as specified in the to_string operator
+ * 3) Length of input buffer up to a zero byte (null terminator)
+ *
+ * NOTE: A length of zero is ok, and will create a zero-length, null
+ * terminated string.
+ */
+ length = 0;
+ while ((length < operand[0]->buffer.length) &&
+ (length < operand[1]->integer.value) &&
+ (operand[0]->buffer.pointer[length])) {
+ length++;
+ }
+
+ /* Allocate a new string object */
+
+ return_desc = acpi_ut_create_string_object(length);
+ if (!return_desc) {
+ status = AE_NO_MEMORY;
+ goto cleanup;
+ }
+
+ /*
+ * Copy the raw buffer data with no transform.
+ * (NULL terminated already)
+ */
+ ACPI_MEMCPY(return_desc->string.pointer,
+ operand[0]->buffer.pointer, length);
+ break;
+
+ case AML_CONCAT_RES_OP:
+
+ /* concatenate_res_template (Buffer, Buffer, Result) (ACPI 2.0) */
+
+ status = acpi_ex_concat_template(operand[0], operand[1],
+ &return_desc, walk_state);
+ break;
+
+ case AML_INDEX_OP: /* Index (Source Index Result) */
+
+ /* Create the internal return object */
+
+ return_desc =
+ acpi_ut_create_internal_object(ACPI_TYPE_LOCAL_REFERENCE);
+ if (!return_desc) {
+ status = AE_NO_MEMORY;
+ goto cleanup;
+ }
+
+ /* Initialize the Index reference object */
+
+ index = operand[1]->integer.value;
+ return_desc->reference.value = (u32) index;
+ return_desc->reference.class = ACPI_REFCLASS_INDEX;
+
+ /*
+ * At this point, the Source operand is a String, Buffer, or Package.
+ * Verify that the index is within range.
+ */
+ switch (ACPI_GET_OBJECT_TYPE(operand[0])) {
+ case ACPI_TYPE_STRING:
+
+ if (index >= operand[0]->string.length) {
+ status = AE_AML_STRING_LIMIT;
+ }
+
+ return_desc->reference.target_type =
+ ACPI_TYPE_BUFFER_FIELD;
+ break;
+
+ case ACPI_TYPE_BUFFER:
+
+ if (index >= operand[0]->buffer.length) {
+ status = AE_AML_BUFFER_LIMIT;
+ }
+
+ return_desc->reference.target_type =
+ ACPI_TYPE_BUFFER_FIELD;
+ break;
+
+ case ACPI_TYPE_PACKAGE:
+
+ if (index >= operand[0]->package.count) {
+ status = AE_AML_PACKAGE_LIMIT;
+ }
+
+ return_desc->reference.target_type = ACPI_TYPE_PACKAGE;
+ return_desc->reference.where =
+ &operand[0]->package.elements[index];
+ break;
+
+ default:
+
+ status = AE_AML_INTERNAL;
+ goto cleanup;
+ }
+
+ /* Failure means that the Index was beyond the end of the object */
+
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status,
+ "Index (%X%8.8X) is beyond end of object",
+ ACPI_FORMAT_UINT64(index)));
+ goto cleanup;
+ }
+
+ /*
+ * Save the target object and add a reference to it for the life
+ * of the index
+ */
+ return_desc->reference.object = operand[0];
+ acpi_ut_add_reference(operand[0]);
+
+ /* Store the reference to the Target */
+
+ status = acpi_ex_store(return_desc, operand[2], walk_state);
+
+ /* Return the reference */
+
+ walk_state->result_obj = return_desc;
+ goto cleanup;
+
+ default:
+
+ ACPI_ERROR((AE_INFO, "Unknown AML opcode %X",
+ walk_state->opcode));
+ status = AE_AML_BAD_OPCODE;
+ break;
+ }
+
+ store_result_to_target:
+
+ if (ACPI_SUCCESS(status)) {
+ /*
+ * Store the result of the operation (which is now in return_desc) into
+ * the Target descriptor.
+ */
+ status = acpi_ex_store(return_desc, operand[2], walk_state);
+ if (ACPI_FAILURE(status)) {
+ goto cleanup;
+ }
+
+ if (!walk_state->result_obj) {
+ walk_state->result_obj = return_desc;
+ }
+ }
+
+ cleanup:
+
+ /* Delete return object on error */
+
+ if (ACPI_FAILURE(status)) {
+ acpi_ut_remove_reference(return_desc);
+ walk_state->result_obj = NULL;
+ }
+
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ex_opcode_2A_0T_1R
+ *
+ * PARAMETERS: walk_state - Current walk state
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Execute opcode with 2 arguments, no target, and a return value
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ex_opcode_2A_0T_1R(struct acpi_walk_state *walk_state)
+{
+ union acpi_operand_object **operand = &walk_state->operands[0];
+ union acpi_operand_object *return_desc = NULL;
+ acpi_status status = AE_OK;
+ u8 logical_result = FALSE;
+
+ ACPI_FUNCTION_TRACE_STR(ex_opcode_2A_0T_1R,
+ acpi_ps_get_opcode_name(walk_state->opcode));
+
+ /* Create the internal return object */
+
+ return_desc = acpi_ut_create_internal_object(ACPI_TYPE_INTEGER);
+ if (!return_desc) {
+ status = AE_NO_MEMORY;
+ goto cleanup;
+ }
+
+ /* Execute the Opcode */
+
+ if (walk_state->op_info->flags & AML_LOGICAL_NUMERIC) {
+
+ /* logical_op (Operand0, Operand1) */
+
+ status = acpi_ex_do_logical_numeric_op(walk_state->opcode,
+ operand[0]->integer.
+ value,
+ operand[1]->integer.
+ value, &logical_result);
+ goto store_logical_result;
+ } else if (walk_state->op_info->flags & AML_LOGICAL) {
+
+ /* logical_op (Operand0, Operand1) */
+
+ status = acpi_ex_do_logical_op(walk_state->opcode, operand[0],
+ operand[1], &logical_result);
+ goto store_logical_result;
+ }
+
+ switch (walk_state->opcode) {
+ case AML_ACQUIRE_OP: /* Acquire (mutex_object, Timeout) */
+
+ status =
+ acpi_ex_acquire_mutex(operand[1], operand[0], walk_state);
+ if (status == AE_TIME) {
+ logical_result = TRUE; /* TRUE = Acquire timed out */
+ status = AE_OK;
+ }
+ break;
+
+ case AML_WAIT_OP: /* Wait (event_object, Timeout) */
+
+ status = acpi_ex_system_wait_event(operand[1], operand[0]);
+ if (status == AE_TIME) {
+ logical_result = TRUE; /* TRUE, Wait timed out */
+ status = AE_OK;
+ }
+ break;
+
+ default:
+
+ ACPI_ERROR((AE_INFO, "Unknown AML opcode %X",
+ walk_state->opcode));
+ status = AE_AML_BAD_OPCODE;
+ goto cleanup;
+ }
+
+ store_logical_result:
+ /*
+ * Set return value to according to logical_result. logical TRUE (all ones)
+ * Default is FALSE (zero)
+ */
+ if (logical_result) {
+ return_desc->integer.value = ACPI_INTEGER_MAX;
+ }
+
+ cleanup:
+
+ /* Delete return object on error */
+
+ if (ACPI_FAILURE(status)) {
+ acpi_ut_remove_reference(return_desc);
+ }
+
+ /* Save return object on success */
+
+ else {
+ walk_state->result_obj = return_desc;
+ }
+
+ return_ACPI_STATUS(status);
+}
diff --git a/drivers/acpi/executer/exoparg3.c b/drivers/acpi/executer/exoparg3.c
new file mode 100644
index 0000000..9cb4197
--- /dev/null
+++ b/drivers/acpi/executer/exoparg3.c
@@ -0,0 +1,272 @@
+
+/******************************************************************************
+ *
+ * Module Name: exoparg3 - AML execution - opcodes with 3 arguments
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include <acpi/acinterp.h>
+#include <acpi/acparser.h>
+#include <acpi/amlcode.h>
+
+#define _COMPONENT ACPI_EXECUTER
+ACPI_MODULE_NAME("exoparg3")
+
+/*!
+ * Naming convention for AML interpreter execution routines.
+ *
+ * The routines that begin execution of AML opcodes are named with a common
+ * convention based upon the number of arguments, the number of target operands,
+ * and whether or not a value is returned:
+ *
+ * AcpiExOpcode_xA_yT_zR
+ *
+ * Where:
+ *
+ * xA - ARGUMENTS: The number of arguments (input operands) that are
+ * required for this opcode type (1 through 6 args).
+ * yT - TARGETS: The number of targets (output operands) that are required
+ * for this opcode type (0, 1, or 2 targets).
+ * zR - RETURN VALUE: Indicates whether this opcode type returns a value
+ * as the function return (0 or 1).
+ *
+ * The AcpiExOpcode* functions are called via the Dispatcher component with
+ * fully resolved operands.
+!*/
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ex_opcode_3A_0T_0R
+ *
+ * PARAMETERS: walk_state - Current walk state
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Execute Triadic operator (3 operands)
+ *
+ ******************************************************************************/
+acpi_status acpi_ex_opcode_3A_0T_0R(struct acpi_walk_state *walk_state)
+{
+ union acpi_operand_object **operand = &walk_state->operands[0];
+ struct acpi_signal_fatal_info *fatal;
+ acpi_status status = AE_OK;
+
+ ACPI_FUNCTION_TRACE_STR(ex_opcode_3A_0T_0R,
+ acpi_ps_get_opcode_name(walk_state->opcode));
+
+ switch (walk_state->opcode) {
+ case AML_FATAL_OP: /* Fatal (fatal_type fatal_code fatal_arg) */
+
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "FatalOp: Type %X Code %X Arg %X <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\n",
+ (u32) operand[0]->integer.value,
+ (u32) operand[1]->integer.value,
+ (u32) operand[2]->integer.value));
+
+ fatal = ACPI_ALLOCATE(sizeof(struct acpi_signal_fatal_info));
+ if (fatal) {
+ fatal->type = (u32) operand[0]->integer.value;
+ fatal->code = (u32) operand[1]->integer.value;
+ fatal->argument = (u32) operand[2]->integer.value;
+ }
+
+ /* Always signal the OS! */
+
+ status = acpi_os_signal(ACPI_SIGNAL_FATAL, fatal);
+
+ /* Might return while OS is shutting down, just continue */
+
+ ACPI_FREE(fatal);
+ break;
+
+ default:
+
+ ACPI_ERROR((AE_INFO, "Unknown AML opcode %X",
+ walk_state->opcode));
+ status = AE_AML_BAD_OPCODE;
+ goto cleanup;
+ }
+
+ cleanup:
+
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ex_opcode_3A_1T_1R
+ *
+ * PARAMETERS: walk_state - Current walk state
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Execute Triadic operator (3 operands)
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ex_opcode_3A_1T_1R(struct acpi_walk_state *walk_state)
+{
+ union acpi_operand_object **operand = &walk_state->operands[0];
+ union acpi_operand_object *return_desc = NULL;
+ char *buffer = NULL;
+ acpi_status status = AE_OK;
+ acpi_integer index;
+ acpi_size length;
+
+ ACPI_FUNCTION_TRACE_STR(ex_opcode_3A_1T_1R,
+ acpi_ps_get_opcode_name(walk_state->opcode));
+
+ switch (walk_state->opcode) {
+ case AML_MID_OP: /* Mid (Source[0], Index[1], Length[2], Result[3]) */
+
+ /*
+ * Create the return object. The Source operand is guaranteed to be
+ * either a String or a Buffer, so just use its type.
+ */
+ return_desc =
+ acpi_ut_create_internal_object(ACPI_GET_OBJECT_TYPE
+ (operand[0]));
+ if (!return_desc) {
+ status = AE_NO_MEMORY;
+ goto cleanup;
+ }
+
+ /* Get the Integer values from the objects */
+
+ index = operand[1]->integer.value;
+ length = (acpi_size) operand[2]->integer.value;
+
+ /*
+ * If the index is beyond the length of the String/Buffer, or if the
+ * requested length is zero, return a zero-length String/Buffer
+ */
+ if (index >= operand[0]->string.length) {
+ length = 0;
+ }
+
+ /* Truncate request if larger than the actual String/Buffer */
+
+ else if ((index + length) > operand[0]->string.length) {
+ length = (acpi_size) operand[0]->string.length -
+ (acpi_size) index;
+ }
+
+ /* Strings always have a sub-pointer, not so for buffers */
+
+ switch (ACPI_GET_OBJECT_TYPE(operand[0])) {
+ case ACPI_TYPE_STRING:
+
+ /* Always allocate a new buffer for the String */
+
+ buffer = ACPI_ALLOCATE_ZEROED((acpi_size) length + 1);
+ if (!buffer) {
+ status = AE_NO_MEMORY;
+ goto cleanup;
+ }
+ break;
+
+ case ACPI_TYPE_BUFFER:
+
+ /* If the requested length is zero, don't allocate a buffer */
+
+ if (length > 0) {
+
+ /* Allocate a new buffer for the Buffer */
+
+ buffer = ACPI_ALLOCATE_ZEROED(length);
+ if (!buffer) {
+ status = AE_NO_MEMORY;
+ goto cleanup;
+ }
+ }
+ break;
+
+ default: /* Should not happen */
+
+ status = AE_AML_OPERAND_TYPE;
+ goto cleanup;
+ }
+
+ if (buffer) {
+
+ /* We have a buffer, copy the portion requested */
+
+ ACPI_MEMCPY(buffer, operand[0]->string.pointer + index,
+ length);
+ }
+
+ /* Set the length of the new String/Buffer */
+
+ return_desc->string.pointer = buffer;
+ return_desc->string.length = (u32) length;
+
+ /* Mark buffer initialized */
+
+ return_desc->buffer.flags |= AOPOBJ_DATA_VALID;
+ break;
+
+ default:
+
+ ACPI_ERROR((AE_INFO, "Unknown AML opcode %X",
+ walk_state->opcode));
+ status = AE_AML_BAD_OPCODE;
+ goto cleanup;
+ }
+
+ /* Store the result in the target */
+
+ status = acpi_ex_store(return_desc, operand[3], walk_state);
+
+ cleanup:
+
+ /* Delete return object on error */
+
+ if (ACPI_FAILURE(status) || walk_state->result_obj) {
+ acpi_ut_remove_reference(return_desc);
+ walk_state->result_obj = NULL;
+ }
+
+ /* Set the return object and exit */
+
+ else {
+ walk_state->result_obj = return_desc;
+ }
+ return_ACPI_STATUS(status);
+}
diff --git a/drivers/acpi/executer/exoparg6.c b/drivers/acpi/executer/exoparg6.c
new file mode 100644
index 0000000..67d4873
--- /dev/null
+++ b/drivers/acpi/executer/exoparg6.c
@@ -0,0 +1,340 @@
+
+/******************************************************************************
+ *
+ * Module Name: exoparg6 - AML execution - opcodes with 6 arguments
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include <acpi/acinterp.h>
+#include <acpi/acparser.h>
+#include <acpi/amlcode.h>
+
+#define _COMPONENT ACPI_EXECUTER
+ACPI_MODULE_NAME("exoparg6")
+
+/*!
+ * Naming convention for AML interpreter execution routines.
+ *
+ * The routines that begin execution of AML opcodes are named with a common
+ * convention based upon the number of arguments, the number of target operands,
+ * and whether or not a value is returned:
+ *
+ * AcpiExOpcode_xA_yT_zR
+ *
+ * Where:
+ *
+ * xA - ARGUMENTS: The number of arguments (input operands) that are
+ * required for this opcode type (1 through 6 args).
+ * yT - TARGETS: The number of targets (output operands) that are required
+ * for this opcode type (0, 1, or 2 targets).
+ * zR - RETURN VALUE: Indicates whether this opcode type returns a value
+ * as the function return (0 or 1).
+ *
+ * The AcpiExOpcode* functions are called via the Dispatcher component with
+ * fully resolved operands.
+!*/
+/* Local prototypes */
+static u8
+acpi_ex_do_match(u32 match_op,
+ union acpi_operand_object *package_obj,
+ union acpi_operand_object *match_obj);
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ex_do_match
+ *
+ * PARAMETERS: match_op - The AML match operand
+ * package_obj - Object from the target package
+ * match_obj - Object to be matched
+ *
+ * RETURN: TRUE if the match is successful, FALSE otherwise
+ *
+ * DESCRIPTION: Implements the low-level match for the ASL Match operator.
+ * Package elements will be implicitly converted to the type of
+ * the match object (Integer/Buffer/String).
+ *
+ ******************************************************************************/
+
+static u8
+acpi_ex_do_match(u32 match_op,
+ union acpi_operand_object *package_obj,
+ union acpi_operand_object *match_obj)
+{
+ u8 logical_result = TRUE;
+ acpi_status status;
+
+ /*
+ * Note: Since the package_obj/match_obj ordering is opposite to that of
+ * the standard logical operators, we have to reverse them when we call
+ * do_logical_op in order to make the implicit conversion rules work
+ * correctly. However, this means we have to flip the entire equation
+ * also. A bit ugly perhaps, but overall, better than fussing the
+ * parameters around at runtime, over and over again.
+ *
+ * Below, P[i] refers to the package element, M refers to the Match object.
+ */
+ switch (match_op) {
+ case MATCH_MTR:
+
+ /* Always true */
+
+ break;
+
+ case MATCH_MEQ:
+
+ /*
+ * True if equal: (P[i] == M)
+ * Change to: (M == P[i])
+ */
+ status =
+ acpi_ex_do_logical_op(AML_LEQUAL_OP, match_obj, package_obj,
+ &logical_result);
+ if (ACPI_FAILURE(status)) {
+ return (FALSE);
+ }
+ break;
+
+ case MATCH_MLE:
+
+ /*
+ * True if less than or equal: (P[i] <= M) (P[i] not_greater than M)
+ * Change to: (M >= P[i]) (M not_less than P[i])
+ */
+ status =
+ acpi_ex_do_logical_op(AML_LLESS_OP, match_obj, package_obj,
+ &logical_result);
+ if (ACPI_FAILURE(status)) {
+ return (FALSE);
+ }
+ logical_result = (u8) ! logical_result;
+ break;
+
+ case MATCH_MLT:
+
+ /*
+ * True if less than: (P[i] < M)
+ * Change to: (M > P[i])
+ */
+ status =
+ acpi_ex_do_logical_op(AML_LGREATER_OP, match_obj,
+ package_obj, &logical_result);
+ if (ACPI_FAILURE(status)) {
+ return (FALSE);
+ }
+ break;
+
+ case MATCH_MGE:
+
+ /*
+ * True if greater than or equal: (P[i] >= M) (P[i] not_less than M)
+ * Change to: (M <= P[i]) (M not_greater than P[i])
+ */
+ status =
+ acpi_ex_do_logical_op(AML_LGREATER_OP, match_obj,
+ package_obj, &logical_result);
+ if (ACPI_FAILURE(status)) {
+ return (FALSE);
+ }
+ logical_result = (u8) ! logical_result;
+ break;
+
+ case MATCH_MGT:
+
+ /*
+ * True if greater than: (P[i] > M)
+ * Change to: (M < P[i])
+ */
+ status =
+ acpi_ex_do_logical_op(AML_LLESS_OP, match_obj, package_obj,
+ &logical_result);
+ if (ACPI_FAILURE(status)) {
+ return (FALSE);
+ }
+ break;
+
+ default:
+
+ /* Undefined */
+
+ return (FALSE);
+ }
+
+ return logical_result;
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ex_opcode_6A_0T_1R
+ *
+ * PARAMETERS: walk_state - Current walk state
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Execute opcode with 6 arguments, no target, and a return value
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ex_opcode_6A_0T_1R(struct acpi_walk_state * walk_state)
+{
+ union acpi_operand_object **operand = &walk_state->operands[0];
+ union acpi_operand_object *return_desc = NULL;
+ acpi_status status = AE_OK;
+ acpi_integer index;
+ union acpi_operand_object *this_element;
+
+ ACPI_FUNCTION_TRACE_STR(ex_opcode_6A_0T_1R,
+ acpi_ps_get_opcode_name(walk_state->opcode));
+
+ switch (walk_state->opcode) {
+ case AML_MATCH_OP:
+ /*
+ * Match (search_pkg[0], match_op1[1], match_obj1[2],
+ * match_op2[3], match_obj2[4], start_index[5])
+ */
+
+ /* Validate both Match Term Operators (MTR, MEQ, etc.) */
+
+ if ((operand[1]->integer.value > MAX_MATCH_OPERATOR) ||
+ (operand[3]->integer.value > MAX_MATCH_OPERATOR)) {
+ ACPI_ERROR((AE_INFO, "Match operator out of range"));
+ status = AE_AML_OPERAND_VALUE;
+ goto cleanup;
+ }
+
+ /* Get the package start_index, validate against the package length */
+
+ index = operand[5]->integer.value;
+ if (index >= operand[0]->package.count) {
+ ACPI_ERROR((AE_INFO,
+ "Index (%X%8.8X) beyond package end (%X)",
+ ACPI_FORMAT_UINT64(index),
+ operand[0]->package.count));
+ status = AE_AML_PACKAGE_LIMIT;
+ goto cleanup;
+ }
+
+ /* Create an integer for the return value */
+
+ return_desc = acpi_ut_create_internal_object(ACPI_TYPE_INTEGER);
+ if (!return_desc) {
+ status = AE_NO_MEMORY;
+ goto cleanup;
+
+ }
+
+ /* Default return value if no match found */
+
+ return_desc->integer.value = ACPI_INTEGER_MAX;
+
+ /*
+ * Examine each element until a match is found. Both match conditions
+ * must be satisfied for a match to occur. Within the loop,
+ * "continue" signifies that the current element does not match
+ * and the next should be examined.
+ *
+ * Upon finding a match, the loop will terminate via "break" at
+ * the bottom. If it terminates "normally", match_value will be
+ * ACPI_INTEGER_MAX (Ones) (its initial value) indicating that no
+ * match was found.
+ */
+ for (; index < operand[0]->package.count; index++) {
+
+ /* Get the current package element */
+
+ this_element = operand[0]->package.elements[index];
+
+ /* Treat any uninitialized (NULL) elements as non-matching */
+
+ if (!this_element) {
+ continue;
+ }
+
+ /*
+ * Both match conditions must be satisfied. Execution of a continue
+ * (proceed to next iteration of enclosing for loop) signifies a
+ * non-match.
+ */
+ if (!acpi_ex_do_match((u32) operand[1]->integer.value,
+ this_element, operand[2])) {
+ continue;
+ }
+
+ if (!acpi_ex_do_match((u32) operand[3]->integer.value,
+ this_element, operand[4])) {
+ continue;
+ }
+
+ /* Match found: Index is the return value */
+
+ return_desc->integer.value = index;
+ break;
+ }
+ break;
+
+ case AML_LOAD_TABLE_OP:
+
+ status = acpi_ex_load_table_op(walk_state, &return_desc);
+ break;
+
+ default:
+
+ ACPI_ERROR((AE_INFO, "Unknown AML opcode %X",
+ walk_state->opcode));
+ status = AE_AML_BAD_OPCODE;
+ goto cleanup;
+ }
+
+ cleanup:
+
+ /* Delete return object on error */
+
+ if (ACPI_FAILURE(status)) {
+ acpi_ut_remove_reference(return_desc);
+ }
+
+ /* Save return object on success */
+
+ else {
+ walk_state->result_obj = return_desc;
+ }
+
+ return_ACPI_STATUS(status);
+}
diff --git a/drivers/acpi/executer/exprep.c b/drivers/acpi/executer/exprep.c
new file mode 100644
index 0000000..5d438c3
--- /dev/null
+++ b/drivers/acpi/executer/exprep.c
@@ -0,0 +1,589 @@
+
+/******************************************************************************
+ *
+ * Module Name: exprep - ACPI AML (p-code) execution - field prep utilities
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include <acpi/acinterp.h>
+#include <acpi/amlcode.h>
+#include <acpi/acnamesp.h>
+
+#define _COMPONENT ACPI_EXECUTER
+ACPI_MODULE_NAME("exprep")
+
+/* Local prototypes */
+static u32
+acpi_ex_decode_field_access(union acpi_operand_object *obj_desc,
+ u8 field_flags, u32 * return_byte_alignment);
+
+#ifdef ACPI_UNDER_DEVELOPMENT
+
+static u32
+acpi_ex_generate_access(u32 field_bit_offset,
+ u32 field_bit_length, u32 region_length);
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ex_generate_access
+ *
+ * PARAMETERS: field_bit_offset - Start of field within parent region/buffer
+ * field_bit_length - Length of field in bits
+ * region_length - Length of parent in bytes
+ *
+ * RETURN: Field granularity (8, 16, 32 or 64) and
+ * byte_alignment (1, 2, 3, or 4)
+ *
+ * DESCRIPTION: Generate an optimal access width for fields defined with the
+ * any_acc keyword.
+ *
+ * NOTE: Need to have the region_length in order to check for boundary
+ * conditions (end-of-region). However, the region_length is a deferred
+ * operation. Therefore, to complete this implementation, the generation
+ * of this access width must be deferred until the region length has
+ * been evaluated.
+ *
+ ******************************************************************************/
+
+static u32
+acpi_ex_generate_access(u32 field_bit_offset,
+ u32 field_bit_length, u32 region_length)
+{
+ u32 field_byte_length;
+ u32 field_byte_offset;
+ u32 field_byte_end_offset;
+ u32 access_byte_width;
+ u32 field_start_offset;
+ u32 field_end_offset;
+ u32 minimum_access_width = 0xFFFFFFFF;
+ u32 minimum_accesses = 0xFFFFFFFF;
+ u32 accesses;
+
+ ACPI_FUNCTION_TRACE(ex_generate_access);
+
+ /* Round Field start offset and length to "minimal" byte boundaries */
+
+ field_byte_offset = ACPI_DIV_8(ACPI_ROUND_DOWN(field_bit_offset, 8));
+ field_byte_end_offset = ACPI_DIV_8(ACPI_ROUND_UP(field_bit_length +
+ field_bit_offset, 8));
+ field_byte_length = field_byte_end_offset - field_byte_offset;
+
+ ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
+ "Bit length %d, Bit offset %d\n",
+ field_bit_length, field_bit_offset));
+
+ ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
+ "Byte Length %d, Byte Offset %d, End Offset %d\n",
+ field_byte_length, field_byte_offset,
+ field_byte_end_offset));
+
+ /*
+ * Iterative search for the maximum access width that is both aligned
+ * and does not go beyond the end of the region
+ *
+ * Start at byte_acc and work upwards to qword_acc max. (1,2,4,8 bytes)
+ */
+ for (access_byte_width = 1; access_byte_width <= 8;
+ access_byte_width <<= 1) {
+ /*
+ * 1) Round end offset up to next access boundary and make sure that
+ * this does not go beyond the end of the parent region.
+ * 2) When the Access width is greater than the field_byte_length, we
+ * are done. (This does not optimize for the perfectly aligned
+ * case yet).
+ */
+ if (ACPI_ROUND_UP(field_byte_end_offset, access_byte_width) <=
+ region_length) {
+ field_start_offset =
+ ACPI_ROUND_DOWN(field_byte_offset,
+ access_byte_width) /
+ access_byte_width;
+
+ field_end_offset =
+ ACPI_ROUND_UP((field_byte_length +
+ field_byte_offset),
+ access_byte_width) /
+ access_byte_width;
+
+ accesses = field_end_offset - field_start_offset;
+
+ ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
+ "AccessWidth %d end is within region\n",
+ access_byte_width));
+
+ ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
+ "Field Start %d, Field End %d -- requires %d accesses\n",
+ field_start_offset, field_end_offset,
+ accesses));
+
+ /* Single access is optimal */
+
+ if (accesses <= 1) {
+ ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
+ "Entire field can be accessed with one operation of size %d\n",
+ access_byte_width));
+ return_VALUE(access_byte_width);
+ }
+
+ /*
+ * Fits in the region, but requires more than one read/write.
+ * try the next wider access on next iteration
+ */
+ if (accesses < minimum_accesses) {
+ minimum_accesses = accesses;
+ minimum_access_width = access_byte_width;
+ }
+ } else {
+ ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
+ "AccessWidth %d end is NOT within region\n",
+ access_byte_width));
+ if (access_byte_width == 1) {
+ ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
+ "Field goes beyond end-of-region!\n"));
+
+ /* Field does not fit in the region at all */
+
+ return_VALUE(0);
+ }
+
+ /*
+ * This width goes beyond the end-of-region, back off to
+ * previous access
+ */
+ ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
+ "Backing off to previous optimal access width of %d\n",
+ minimum_access_width));
+ return_VALUE(minimum_access_width);
+ }
+ }
+
+ /*
+ * Could not read/write field with one operation,
+ * just use max access width
+ */
+ ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
+ "Cannot access field in one operation, using width 8\n"));
+ return_VALUE(8);
+}
+#endif /* ACPI_UNDER_DEVELOPMENT */
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ex_decode_field_access
+ *
+ * PARAMETERS: obj_desc - Field object
+ * field_flags - Encoded fieldflags (contains access bits)
+ * return_byte_alignment - Where the byte alignment is returned
+ *
+ * RETURN: Field granularity (8, 16, 32 or 64) and
+ * byte_alignment (1, 2, 3, or 4)
+ *
+ * DESCRIPTION: Decode the access_type bits of a field definition.
+ *
+ ******************************************************************************/
+
+static u32
+acpi_ex_decode_field_access(union acpi_operand_object *obj_desc,
+ u8 field_flags, u32 * return_byte_alignment)
+{
+ u32 access;
+ u32 byte_alignment;
+ u32 bit_length;
+
+ ACPI_FUNCTION_TRACE(ex_decode_field_access);
+
+ access = (field_flags & AML_FIELD_ACCESS_TYPE_MASK);
+
+ switch (access) {
+ case AML_FIELD_ACCESS_ANY:
+
+#ifdef ACPI_UNDER_DEVELOPMENT
+ byte_alignment =
+ acpi_ex_generate_access(obj_desc->common_field.
+ start_field_bit_offset,
+ obj_desc->common_field.bit_length,
+ 0xFFFFFFFF
+ /* Temp until we pass region_length as parameter */
+ );
+ bit_length = byte_alignment * 8;
+#endif
+
+ byte_alignment = 1;
+ bit_length = 8;
+ break;
+
+ case AML_FIELD_ACCESS_BYTE:
+ case AML_FIELD_ACCESS_BUFFER: /* ACPI 2.0 (SMBus Buffer) */
+ byte_alignment = 1;
+ bit_length = 8;
+ break;
+
+ case AML_FIELD_ACCESS_WORD:
+ byte_alignment = 2;
+ bit_length = 16;
+ break;
+
+ case AML_FIELD_ACCESS_DWORD:
+ byte_alignment = 4;
+ bit_length = 32;
+ break;
+
+ case AML_FIELD_ACCESS_QWORD: /* ACPI 2.0 */
+ byte_alignment = 8;
+ bit_length = 64;
+ break;
+
+ default:
+ /* Invalid field access type */
+
+ ACPI_ERROR((AE_INFO, "Unknown field access type %X", access));
+ return_UINT32(0);
+ }
+
+ if (ACPI_GET_OBJECT_TYPE(obj_desc) == ACPI_TYPE_BUFFER_FIELD) {
+ /*
+ * buffer_field access can be on any byte boundary, so the
+ * byte_alignment is always 1 byte -- regardless of any byte_alignment
+ * implied by the field access type.
+ */
+ byte_alignment = 1;
+ }
+
+ *return_byte_alignment = byte_alignment;
+ return_UINT32(bit_length);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ex_prep_common_field_object
+ *
+ * PARAMETERS: obj_desc - The field object
+ * field_flags - Access, lock_rule, and update_rule.
+ * The format of a field_flag is described
+ * in the ACPI specification
+ * field_attribute - Special attributes (not used)
+ * field_bit_position - Field start position
+ * field_bit_length - Field length in number of bits
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Initialize the areas of the field object that are common
+ * to the various types of fields. Note: This is very "sensitive"
+ * code because we are solving the general case for field
+ * alignment.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ex_prep_common_field_object(union acpi_operand_object *obj_desc,
+ u8 field_flags,
+ u8 field_attribute,
+ u32 field_bit_position, u32 field_bit_length)
+{
+ u32 access_bit_width;
+ u32 byte_alignment;
+ u32 nearest_byte_address;
+
+ ACPI_FUNCTION_TRACE(ex_prep_common_field_object);
+
+ /*
+ * Note: the structure being initialized is the
+ * ACPI_COMMON_FIELD_INFO; No structure fields outside of the common
+ * area are initialized by this procedure.
+ */
+ obj_desc->common_field.field_flags = field_flags;
+ obj_desc->common_field.attribute = field_attribute;
+ obj_desc->common_field.bit_length = field_bit_length;
+
+ /*
+ * Decode the access type so we can compute offsets. The access type gives
+ * two pieces of information - the width of each field access and the
+ * necessary byte_alignment (address granularity) of the access.
+ *
+ * For any_acc, the access_bit_width is the largest width that is both
+ * necessary and possible in an attempt to access the whole field in one
+ * I/O operation. However, for any_acc, the byte_alignment is always one
+ * byte.
+ *
+ * For all Buffer Fields, the byte_alignment is always one byte.
+ *
+ * For all other access types (Byte, Word, Dword, Qword), the Bitwidth is
+ * the same (equivalent) as the byte_alignment.
+ */
+ access_bit_width = acpi_ex_decode_field_access(obj_desc, field_flags,
+ &byte_alignment);
+ if (!access_bit_width) {
+ return_ACPI_STATUS(AE_AML_OPERAND_VALUE);
+ }
+
+ /* Setup width (access granularity) fields */
+
+ obj_desc->common_field.access_byte_width = (u8)
+ ACPI_DIV_8(access_bit_width); /* 1, 2, 4, 8 */
+
+ obj_desc->common_field.access_bit_width = (u8) access_bit_width;
+
+ /*
+ * base_byte_offset is the address of the start of the field within the
+ * region. It is the byte address of the first *datum* (field-width data
+ * unit) of the field. (i.e., the first datum that contains at least the
+ * first *bit* of the field.)
+ *
+ * Note: byte_alignment is always either equal to the access_bit_width or 8
+ * (Byte access), and it defines the addressing granularity of the parent
+ * region or buffer.
+ */
+ nearest_byte_address =
+ ACPI_ROUND_BITS_DOWN_TO_BYTES(field_bit_position);
+ obj_desc->common_field.base_byte_offset = (u32)
+ ACPI_ROUND_DOWN(nearest_byte_address, byte_alignment);
+
+ /*
+ * start_field_bit_offset is the offset of the first bit of the field within
+ * a field datum.
+ */
+ obj_desc->common_field.start_field_bit_offset = (u8)
+ (field_bit_position -
+ ACPI_MUL_8(obj_desc->common_field.base_byte_offset));
+
+ /*
+ * Does the entire field fit within a single field access element? (datum)
+ * (i.e., without crossing a datum boundary)
+ */
+ if ((obj_desc->common_field.start_field_bit_offset +
+ field_bit_length) <= (u16) access_bit_width) {
+ obj_desc->common.flags |= AOPOBJ_SINGLE_DATUM;
+ }
+
+ return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ex_prep_field_value
+ *
+ * PARAMETERS: Info - Contains all field creation info
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Construct an union acpi_operand_object of type def_field and
+ * connect it to the parent Node.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ex_prep_field_value(struct acpi_create_field_info *info)
+{
+ union acpi_operand_object *obj_desc;
+ union acpi_operand_object *second_desc = NULL;
+ u32 type;
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(ex_prep_field_value);
+
+ /* Parameter validation */
+
+ if (info->field_type != ACPI_TYPE_LOCAL_INDEX_FIELD) {
+ if (!info->region_node) {
+ ACPI_ERROR((AE_INFO, "Null RegionNode"));
+ return_ACPI_STATUS(AE_AML_NO_OPERAND);
+ }
+
+ type = acpi_ns_get_type(info->region_node);
+ if (type != ACPI_TYPE_REGION) {
+ ACPI_ERROR((AE_INFO,
+ "Needed Region, found type %X (%s)",
+ type, acpi_ut_get_type_name(type)));
+
+ return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
+ }
+ }
+
+ /* Allocate a new field object */
+
+ obj_desc = acpi_ut_create_internal_object(info->field_type);
+ if (!obj_desc) {
+ return_ACPI_STATUS(AE_NO_MEMORY);
+ }
+
+ /* Initialize areas of the object that are common to all fields */
+
+ obj_desc->common_field.node = info->field_node;
+ status = acpi_ex_prep_common_field_object(obj_desc, info->field_flags,
+ info->attribute,
+ info->field_bit_position,
+ info->field_bit_length);
+ if (ACPI_FAILURE(status)) {
+ acpi_ut_delete_object_desc(obj_desc);
+ return_ACPI_STATUS(status);
+ }
+
+ /* Initialize areas of the object that are specific to the field type */
+
+ switch (info->field_type) {
+ case ACPI_TYPE_LOCAL_REGION_FIELD:
+
+ obj_desc->field.region_obj =
+ acpi_ns_get_attached_object(info->region_node);
+
+ /* An additional reference for the container */
+
+ acpi_ut_add_reference(obj_desc->field.region_obj);
+
+ ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
+ "RegionField: BitOff %X, Off %X, Gran %X, Region %p\n",
+ obj_desc->field.start_field_bit_offset,
+ obj_desc->field.base_byte_offset,
+ obj_desc->field.access_byte_width,
+ obj_desc->field.region_obj));
+ break;
+
+ case ACPI_TYPE_LOCAL_BANK_FIELD:
+
+ obj_desc->bank_field.value = info->bank_value;
+ obj_desc->bank_field.region_obj =
+ acpi_ns_get_attached_object(info->region_node);
+ obj_desc->bank_field.bank_obj =
+ acpi_ns_get_attached_object(info->register_node);
+
+ /* An additional reference for the attached objects */
+
+ acpi_ut_add_reference(obj_desc->bank_field.region_obj);
+ acpi_ut_add_reference(obj_desc->bank_field.bank_obj);
+
+ ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
+ "Bank Field: BitOff %X, Off %X, Gran %X, Region %p, BankReg %p\n",
+ obj_desc->bank_field.start_field_bit_offset,
+ obj_desc->bank_field.base_byte_offset,
+ obj_desc->field.access_byte_width,
+ obj_desc->bank_field.region_obj,
+ obj_desc->bank_field.bank_obj));
+
+ /*
+ * Remember location in AML stream of the field unit
+ * opcode and operands -- since the bank_value
+ * operands must be evaluated.
+ */
+ second_desc = obj_desc->common.next_object;
+ second_desc->extra.aml_start =
+ ACPI_CAST_PTR(union acpi_parse_object,
+ info->data_register_node)->named.data;
+ second_desc->extra.aml_length =
+ ACPI_CAST_PTR(union acpi_parse_object,
+ info->data_register_node)->named.length;
+
+ break;
+
+ case ACPI_TYPE_LOCAL_INDEX_FIELD:
+
+ /* Get the Index and Data registers */
+
+ obj_desc->index_field.index_obj =
+ acpi_ns_get_attached_object(info->register_node);
+ obj_desc->index_field.data_obj =
+ acpi_ns_get_attached_object(info->data_register_node);
+
+ if (!obj_desc->index_field.data_obj
+ || !obj_desc->index_field.index_obj) {
+ ACPI_ERROR((AE_INFO,
+ "Null Index Object during field prep"));
+ acpi_ut_delete_object_desc(obj_desc);
+ return_ACPI_STATUS(AE_AML_INTERNAL);
+ }
+
+ /* An additional reference for the attached objects */
+
+ acpi_ut_add_reference(obj_desc->index_field.data_obj);
+ acpi_ut_add_reference(obj_desc->index_field.index_obj);
+
+ /*
+ * April 2006: Changed to match MS behavior
+ *
+ * The value written to the Index register is the byte offset of the
+ * target field in units of the granularity of the index_field
+ *
+ * Previously, the value was calculated as an index in terms of the
+ * width of the Data register, as below:
+ *
+ * obj_desc->index_field.Value = (u32)
+ * (Info->field_bit_position / ACPI_MUL_8 (
+ * obj_desc->Field.access_byte_width));
+ *
+ * February 2006: Tried value as a byte offset:
+ * obj_desc->index_field.Value = (u32)
+ * ACPI_DIV_8 (Info->field_bit_position);
+ */
+ obj_desc->index_field.value =
+ (u32) ACPI_ROUND_DOWN(ACPI_DIV_8(info->field_bit_position),
+ obj_desc->index_field.
+ access_byte_width);
+
+ ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
+ "IndexField: BitOff %X, Off %X, Value %X, Gran %X, Index %p, Data %p\n",
+ obj_desc->index_field.start_field_bit_offset,
+ obj_desc->index_field.base_byte_offset,
+ obj_desc->index_field.value,
+ obj_desc->field.access_byte_width,
+ obj_desc->index_field.index_obj,
+ obj_desc->index_field.data_obj));
+ break;
+
+ default:
+ /* No other types should get here */
+ break;
+ }
+
+ /*
+ * Store the constructed descriptor (obj_desc) into the parent Node,
+ * preserving the current type of that named_obj.
+ */
+ status = acpi_ns_attach_object(info->field_node, obj_desc,
+ acpi_ns_get_type(info->field_node));
+
+ ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
+ "Set NamedObj %p [%4.4s], ObjDesc %p\n",
+ info->field_node,
+ acpi_ut_get_node_name(info->field_node), obj_desc));
+
+ /* Remove local reference to the object */
+
+ acpi_ut_remove_reference(obj_desc);
+ return_ACPI_STATUS(status);
+}
diff --git a/drivers/acpi/executer/exregion.c b/drivers/acpi/executer/exregion.c
new file mode 100644
index 0000000..7a41c40
--- /dev/null
+++ b/drivers/acpi/executer/exregion.c
@@ -0,0 +1,498 @@
+
+/******************************************************************************
+ *
+ * Module Name: exregion - ACPI default op_region (address space) handlers
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include <acpi/acinterp.h>
+
+#define _COMPONENT ACPI_EXECUTER
+ACPI_MODULE_NAME("exregion")
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ex_system_memory_space_handler
+ *
+ * PARAMETERS: Function - Read or Write operation
+ * Address - Where in the space to read or write
+ * bit_width - Field width in bits (8, 16, or 32)
+ * Value - Pointer to in or out value
+ * handler_context - Pointer to Handler's context
+ * region_context - Pointer to context specific to the
+ * accessed region
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Handler for the System Memory address space (Op Region)
+ *
+ ******************************************************************************/
+acpi_status
+acpi_ex_system_memory_space_handler(u32 function,
+ acpi_physical_address address,
+ u32 bit_width,
+ acpi_integer * value,
+ void *handler_context, void *region_context)
+{
+ acpi_status status = AE_OK;
+ void *logical_addr_ptr = NULL;
+ struct acpi_mem_space_context *mem_info = region_context;
+ u32 length;
+ acpi_size window_size;
+#ifdef ACPI_MISALIGNMENT_NOT_SUPPORTED
+ u32 remainder;
+#endif
+
+ ACPI_FUNCTION_TRACE(ex_system_memory_space_handler);
+
+ /* Validate and translate the bit width */
+
+ switch (bit_width) {
+ case 8:
+ length = 1;
+ break;
+
+ case 16:
+ length = 2;
+ break;
+
+ case 32:
+ length = 4;
+ break;
+
+ case 64:
+ length = 8;
+ break;
+
+ default:
+ ACPI_ERROR((AE_INFO, "Invalid SystemMemory width %d",
+ bit_width));
+ return_ACPI_STATUS(AE_AML_OPERAND_VALUE);
+ }
+
+#ifdef ACPI_MISALIGNMENT_NOT_SUPPORTED
+ /*
+ * Hardware does not support non-aligned data transfers, we must verify
+ * the request.
+ */
+ (void)acpi_ut_short_divide((acpi_integer) address, length, NULL,
+ &remainder);
+ if (remainder != 0) {
+ return_ACPI_STATUS(AE_AML_ALIGNMENT);
+ }
+#endif
+
+ /*
+ * Does the request fit into the cached memory mapping?
+ * Is 1) Address below the current mapping? OR
+ * 2) Address beyond the current mapping?
+ */
+ if ((address < mem_info->mapped_physical_address) ||
+ (((acpi_integer) address + length) > ((acpi_integer)
+ mem_info->
+ mapped_physical_address +
+ mem_info->mapped_length))) {
+ /*
+ * The request cannot be resolved by the current memory mapping;
+ * Delete the existing mapping and create a new one.
+ */
+ if (mem_info->mapped_length) {
+
+ /* Valid mapping, delete it */
+
+ acpi_os_unmap_memory(mem_info->mapped_logical_address,
+ mem_info->mapped_length);
+ }
+
+ /*
+ * Don't attempt to map memory beyond the end of the region, and
+ * constrain the maximum mapping size to something reasonable.
+ */
+ window_size = (acpi_size)
+ ((mem_info->address + mem_info->length) - address);
+
+ if (window_size > ACPI_SYSMEM_REGION_WINDOW_SIZE) {
+ window_size = ACPI_SYSMEM_REGION_WINDOW_SIZE;
+ }
+
+ /* Create a new mapping starting at the address given */
+
+ mem_info->mapped_logical_address =
+ acpi_os_map_memory((acpi_physical_address) address, window_size);
+ if (!mem_info->mapped_logical_address) {
+ ACPI_ERROR((AE_INFO,
+ "Could not map memory at %8.8X%8.8X, size %X",
+ ACPI_FORMAT_NATIVE_UINT(address),
+ (u32) window_size));
+ mem_info->mapped_length = 0;
+ return_ACPI_STATUS(AE_NO_MEMORY);
+ }
+
+ /* Save the physical address and mapping size */
+
+ mem_info->mapped_physical_address = address;
+ mem_info->mapped_length = window_size;
+ }
+
+ /*
+ * Generate a logical pointer corresponding to the address we want to
+ * access
+ */
+ logical_addr_ptr = mem_info->mapped_logical_address +
+ ((acpi_integer) address -
+ (acpi_integer) mem_info->mapped_physical_address);
+
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "System-Memory (width %d) R/W %d Address=%8.8X%8.8X\n",
+ bit_width, function,
+ ACPI_FORMAT_NATIVE_UINT(address)));
+
+ /*
+ * Perform the memory read or write
+ *
+ * Note: For machines that do not support non-aligned transfers, the target
+ * address was checked for alignment above. We do not attempt to break the
+ * transfer up into smaller (byte-size) chunks because the AML specifically
+ * asked for a transfer width that the hardware may require.
+ */
+ switch (function) {
+ case ACPI_READ:
+
+ *value = 0;
+ switch (bit_width) {
+ case 8:
+ *value = (acpi_integer) ACPI_GET8(logical_addr_ptr);
+ break;
+
+ case 16:
+ *value = (acpi_integer) ACPI_GET16(logical_addr_ptr);
+ break;
+
+ case 32:
+ *value = (acpi_integer) ACPI_GET32(logical_addr_ptr);
+ break;
+
+ case 64:
+ *value = (acpi_integer) ACPI_GET64(logical_addr_ptr);
+ break;
+
+ default:
+ /* bit_width was already validated */
+ break;
+ }
+ break;
+
+ case ACPI_WRITE:
+
+ switch (bit_width) {
+ case 8:
+ ACPI_SET8(logical_addr_ptr) = (u8) * value;
+ break;
+
+ case 16:
+ ACPI_SET16(logical_addr_ptr) = (u16) * value;
+ break;
+
+ case 32:
+ ACPI_SET32(logical_addr_ptr) = (u32) * value;
+ break;
+
+ case 64:
+ ACPI_SET64(logical_addr_ptr) = (u64) * value;
+ break;
+
+ default:
+ /* bit_width was already validated */
+ break;
+ }
+ break;
+
+ default:
+ status = AE_BAD_PARAMETER;
+ break;
+ }
+
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ex_system_io_space_handler
+ *
+ * PARAMETERS: Function - Read or Write operation
+ * Address - Where in the space to read or write
+ * bit_width - Field width in bits (8, 16, or 32)
+ * Value - Pointer to in or out value
+ * handler_context - Pointer to Handler's context
+ * region_context - Pointer to context specific to the
+ * accessed region
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Handler for the System IO address space (Op Region)
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ex_system_io_space_handler(u32 function,
+ acpi_physical_address address,
+ u32 bit_width,
+ acpi_integer * value,
+ void *handler_context, void *region_context)
+{
+ acpi_status status = AE_OK;
+ u32 value32;
+
+ ACPI_FUNCTION_TRACE(ex_system_io_space_handler);
+
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "System-IO (width %d) R/W %d Address=%8.8X%8.8X\n",
+ bit_width, function,
+ ACPI_FORMAT_NATIVE_UINT(address)));
+
+ /* Decode the function parameter */
+
+ switch (function) {
+ case ACPI_READ:
+
+ status = acpi_os_read_port((acpi_io_address) address,
+ &value32, bit_width);
+ *value = value32;
+ break;
+
+ case ACPI_WRITE:
+
+ status = acpi_os_write_port((acpi_io_address) address,
+ (u32) * value, bit_width);
+ break;
+
+ default:
+ status = AE_BAD_PARAMETER;
+ break;
+ }
+
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ex_pci_config_space_handler
+ *
+ * PARAMETERS: Function - Read or Write operation
+ * Address - Where in the space to read or write
+ * bit_width - Field width in bits (8, 16, or 32)
+ * Value - Pointer to in or out value
+ * handler_context - Pointer to Handler's context
+ * region_context - Pointer to context specific to the
+ * accessed region
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Handler for the PCI Config address space (Op Region)
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ex_pci_config_space_handler(u32 function,
+ acpi_physical_address address,
+ u32 bit_width,
+ acpi_integer * value,
+ void *handler_context, void *region_context)
+{
+ acpi_status status = AE_OK;
+ struct acpi_pci_id *pci_id;
+ u16 pci_register;
+ u32 value32;
+
+ ACPI_FUNCTION_TRACE(ex_pci_config_space_handler);
+
+ /*
+ * The arguments to acpi_os(Read|Write)pci_configuration are:
+ *
+ * pci_segment is the PCI bus segment range 0-31
+ * pci_bus is the PCI bus number range 0-255
+ * pci_device is the PCI device number range 0-31
+ * pci_function is the PCI device function number
+ * pci_register is the Config space register range 0-255 bytes
+ *
+ * Value - input value for write, output address for read
+ *
+ */
+ pci_id = (struct acpi_pci_id *)region_context;
+ pci_register = (u16) (u32) address;
+
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Pci-Config %d (%d) Seg(%04x) Bus(%04x) Dev(%04x) Func(%04x) Reg(%04x)\n",
+ function, bit_width, pci_id->segment, pci_id->bus,
+ pci_id->device, pci_id->function, pci_register));
+
+ switch (function) {
+ case ACPI_READ:
+
+ status = acpi_os_read_pci_configuration(pci_id, pci_register,
+ &value32, bit_width);
+ *value = value32;
+ break;
+
+ case ACPI_WRITE:
+
+ status = acpi_os_write_pci_configuration(pci_id, pci_register,
+ *value, bit_width);
+ break;
+
+ default:
+
+ status = AE_BAD_PARAMETER;
+ break;
+ }
+
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ex_cmos_space_handler
+ *
+ * PARAMETERS: Function - Read or Write operation
+ * Address - Where in the space to read or write
+ * bit_width - Field width in bits (8, 16, or 32)
+ * Value - Pointer to in or out value
+ * handler_context - Pointer to Handler's context
+ * region_context - Pointer to context specific to the
+ * accessed region
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Handler for the CMOS address space (Op Region)
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ex_cmos_space_handler(u32 function,
+ acpi_physical_address address,
+ u32 bit_width,
+ acpi_integer * value,
+ void *handler_context, void *region_context)
+{
+ acpi_status status = AE_OK;
+
+ ACPI_FUNCTION_TRACE(ex_cmos_space_handler);
+
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ex_pci_bar_space_handler
+ *
+ * PARAMETERS: Function - Read or Write operation
+ * Address - Where in the space to read or write
+ * bit_width - Field width in bits (8, 16, or 32)
+ * Value - Pointer to in or out value
+ * handler_context - Pointer to Handler's context
+ * region_context - Pointer to context specific to the
+ * accessed region
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Handler for the PCI bar_target address space (Op Region)
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ex_pci_bar_space_handler(u32 function,
+ acpi_physical_address address,
+ u32 bit_width,
+ acpi_integer * value,
+ void *handler_context, void *region_context)
+{
+ acpi_status status = AE_OK;
+
+ ACPI_FUNCTION_TRACE(ex_pci_bar_space_handler);
+
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ex_data_table_space_handler
+ *
+ * PARAMETERS: Function - Read or Write operation
+ * Address - Where in the space to read or write
+ * bit_width - Field width in bits (8, 16, or 32)
+ * Value - Pointer to in or out value
+ * handler_context - Pointer to Handler's context
+ * region_context - Pointer to context specific to the
+ * accessed region
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Handler for the Data Table address space (Op Region)
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ex_data_table_space_handler(u32 function,
+ acpi_physical_address address,
+ u32 bit_width,
+ acpi_integer * value,
+ void *handler_context, void *region_context)
+{
+ ACPI_FUNCTION_TRACE(ex_data_table_space_handler);
+
+ /* Perform the memory read or write */
+
+ switch (function) {
+ case ACPI_READ:
+
+ ACPI_MEMCPY(ACPI_CAST_PTR(char, value),
+ ACPI_PHYSADDR_TO_PTR(address),
+ ACPI_DIV_8(bit_width));
+ break;
+
+ case ACPI_WRITE:
+ default:
+
+ return_ACPI_STATUS(AE_SUPPORT);
+ }
+
+ return_ACPI_STATUS(AE_OK);
+}
diff --git a/drivers/acpi/executer/exresnte.c b/drivers/acpi/executer/exresnte.c
new file mode 100644
index 0000000..423ad36
--- /dev/null
+++ b/drivers/acpi/executer/exresnte.c
@@ -0,0 +1,277 @@
+
+/******************************************************************************
+ *
+ * Module Name: exresnte - AML Interpreter object resolution
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include <acpi/acdispat.h>
+#include <acpi/acinterp.h>
+#include <acpi/acnamesp.h>
+
+#define _COMPONENT ACPI_EXECUTER
+ACPI_MODULE_NAME("exresnte")
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ex_resolve_node_to_value
+ *
+ * PARAMETERS: object_ptr - Pointer to a location that contains
+ * a pointer to a NS node, and will receive a
+ * pointer to the resolved object.
+ * walk_state - Current state. Valid only if executing AML
+ * code. NULL if simply resolving an object
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Resolve a Namespace node to a valued object
+ *
+ * Note: for some of the data types, the pointer attached to the Node
+ * can be either a pointer to an actual internal object or a pointer into the
+ * AML stream itself. These types are currently:
+ *
+ * ACPI_TYPE_INTEGER
+ * ACPI_TYPE_STRING
+ * ACPI_TYPE_BUFFER
+ * ACPI_TYPE_MUTEX
+ * ACPI_TYPE_PACKAGE
+ *
+ ******************************************************************************/
+acpi_status
+acpi_ex_resolve_node_to_value(struct acpi_namespace_node **object_ptr,
+ struct acpi_walk_state *walk_state)
+{
+ acpi_status status = AE_OK;
+ union acpi_operand_object *source_desc;
+ union acpi_operand_object *obj_desc = NULL;
+ struct acpi_namespace_node *node;
+ acpi_object_type entry_type;
+
+ ACPI_FUNCTION_TRACE(ex_resolve_node_to_value);
+
+ /*
+ * The stack pointer points to a struct acpi_namespace_node (Node). Get the
+ * object that is attached to the Node.
+ */
+ node = *object_ptr;
+ source_desc = acpi_ns_get_attached_object(node);
+ entry_type = acpi_ns_get_type((acpi_handle) node);
+
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Entry=%p SourceDesc=%p [%s]\n",
+ node, source_desc,
+ acpi_ut_get_type_name(entry_type)));
+
+ if ((entry_type == ACPI_TYPE_LOCAL_ALIAS) ||
+ (entry_type == ACPI_TYPE_LOCAL_METHOD_ALIAS)) {
+
+ /* There is always exactly one level of indirection */
+
+ node = ACPI_CAST_PTR(struct acpi_namespace_node, node->object);
+ source_desc = acpi_ns_get_attached_object(node);
+ entry_type = acpi_ns_get_type((acpi_handle) node);
+ *object_ptr = node;
+ }
+
+ /*
+ * Several object types require no further processing:
+ * 1) Device/Thermal objects don't have a "real" subobject, return the Node
+ * 2) Method locals and arguments have a pseudo-Node
+ * 3) 10/2007: Added method type to assist with Package construction.
+ */
+ if ((entry_type == ACPI_TYPE_DEVICE) ||
+ (entry_type == ACPI_TYPE_THERMAL) ||
+ (entry_type == ACPI_TYPE_METHOD) ||
+ (node->flags & (ANOBJ_METHOD_ARG | ANOBJ_METHOD_LOCAL))) {
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ if (!source_desc) {
+ ACPI_ERROR((AE_INFO, "No object attached to node %p", node));
+ return_ACPI_STATUS(AE_AML_NO_OPERAND);
+ }
+
+ /*
+ * Action is based on the type of the Node, which indicates the type
+ * of the attached object or pointer
+ */
+ switch (entry_type) {
+ case ACPI_TYPE_PACKAGE:
+
+ if (ACPI_GET_OBJECT_TYPE(source_desc) != ACPI_TYPE_PACKAGE) {
+ ACPI_ERROR((AE_INFO, "Object not a Package, type %s",
+ acpi_ut_get_object_type_name(source_desc)));
+ return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
+ }
+
+ status = acpi_ds_get_package_arguments(source_desc);
+ if (ACPI_SUCCESS(status)) {
+
+ /* Return an additional reference to the object */
+
+ obj_desc = source_desc;
+ acpi_ut_add_reference(obj_desc);
+ }
+ break;
+
+ case ACPI_TYPE_BUFFER:
+
+ if (ACPI_GET_OBJECT_TYPE(source_desc) != ACPI_TYPE_BUFFER) {
+ ACPI_ERROR((AE_INFO, "Object not a Buffer, type %s",
+ acpi_ut_get_object_type_name(source_desc)));
+ return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
+ }
+
+ status = acpi_ds_get_buffer_arguments(source_desc);
+ if (ACPI_SUCCESS(status)) {
+
+ /* Return an additional reference to the object */
+
+ obj_desc = source_desc;
+ acpi_ut_add_reference(obj_desc);
+ }
+ break;
+
+ case ACPI_TYPE_STRING:
+
+ if (ACPI_GET_OBJECT_TYPE(source_desc) != ACPI_TYPE_STRING) {
+ ACPI_ERROR((AE_INFO, "Object not a String, type %s",
+ acpi_ut_get_object_type_name(source_desc)));
+ return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
+ }
+
+ /* Return an additional reference to the object */
+
+ obj_desc = source_desc;
+ acpi_ut_add_reference(obj_desc);
+ break;
+
+ case ACPI_TYPE_INTEGER:
+
+ if (ACPI_GET_OBJECT_TYPE(source_desc) != ACPI_TYPE_INTEGER) {
+ ACPI_ERROR((AE_INFO, "Object not a Integer, type %s",
+ acpi_ut_get_object_type_name(source_desc)));
+ return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
+ }
+
+ /* Return an additional reference to the object */
+
+ obj_desc = source_desc;
+ acpi_ut_add_reference(obj_desc);
+ break;
+
+ case ACPI_TYPE_BUFFER_FIELD:
+ case ACPI_TYPE_LOCAL_REGION_FIELD:
+ case ACPI_TYPE_LOCAL_BANK_FIELD:
+ case ACPI_TYPE_LOCAL_INDEX_FIELD:
+
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+ "FieldRead Node=%p SourceDesc=%p Type=%X\n",
+ node, source_desc, entry_type));
+
+ status =
+ acpi_ex_read_data_from_field(walk_state, source_desc,
+ &obj_desc);
+ break;
+
+ /* For these objects, just return the object attached to the Node */
+
+ case ACPI_TYPE_MUTEX:
+ case ACPI_TYPE_POWER:
+ case ACPI_TYPE_PROCESSOR:
+ case ACPI_TYPE_EVENT:
+ case ACPI_TYPE_REGION:
+
+ /* Return an additional reference to the object */
+
+ obj_desc = source_desc;
+ acpi_ut_add_reference(obj_desc);
+ break;
+
+ /* TYPE_ANY is untyped, and thus there is no object associated with it */
+
+ case ACPI_TYPE_ANY:
+
+ ACPI_ERROR((AE_INFO,
+ "Untyped entry %p, no attached object!", node));
+
+ return_ACPI_STATUS(AE_AML_OPERAND_TYPE); /* Cannot be AE_TYPE */
+
+ case ACPI_TYPE_LOCAL_REFERENCE:
+
+ switch (source_desc->reference.class) {
+ case ACPI_REFCLASS_TABLE: /* This is a ddb_handle */
+ case ACPI_REFCLASS_REFOF:
+ case ACPI_REFCLASS_INDEX:
+
+ /* Return an additional reference to the object */
+
+ obj_desc = source_desc;
+ acpi_ut_add_reference(obj_desc);
+ break;
+
+ default:
+ /* No named references are allowed here */
+
+ ACPI_ERROR((AE_INFO,
+ "Unsupported Reference type %X",
+ source_desc->reference.class));
+
+ return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
+ }
+ break;
+
+ default:
+
+ /* Default case is for unknown types */
+
+ ACPI_ERROR((AE_INFO,
+ "Node %p - Unknown object type %X",
+ node, entry_type));
+
+ return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
+
+ } /* switch (entry_type) */
+
+ /* Return the object descriptor */
+
+ *object_ptr = (void *)obj_desc;
+ return_ACPI_STATUS(status);
+}
diff --git a/drivers/acpi/executer/exresolv.c b/drivers/acpi/executer/exresolv.c
new file mode 100644
index 0000000..89571b9
--- /dev/null
+++ b/drivers/acpi/executer/exresolv.c
@@ -0,0 +1,550 @@
+
+/******************************************************************************
+ *
+ * Module Name: exresolv - AML Interpreter object resolution
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include <acpi/amlcode.h>
+#include <acpi/acdispat.h>
+#include <acpi/acinterp.h>
+#include <acpi/acnamesp.h>
+
+#define _COMPONENT ACPI_EXECUTER
+ACPI_MODULE_NAME("exresolv")
+
+/* Local prototypes */
+static acpi_status
+acpi_ex_resolve_object_to_value(union acpi_operand_object **stack_ptr,
+ struct acpi_walk_state *walk_state);
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ex_resolve_to_value
+ *
+ * PARAMETERS: **stack_ptr - Points to entry on obj_stack, which can
+ * be either an (union acpi_operand_object *)
+ * or an acpi_handle.
+ * walk_state - Current method state
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Convert Reference objects to values
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ex_resolve_to_value(union acpi_operand_object **stack_ptr,
+ struct acpi_walk_state *walk_state)
+{
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE_PTR(ex_resolve_to_value, stack_ptr);
+
+ if (!stack_ptr || !*stack_ptr) {
+ ACPI_ERROR((AE_INFO, "Internal - null pointer"));
+ return_ACPI_STATUS(AE_AML_NO_OPERAND);
+ }
+
+ /*
+ * The entity pointed to by the stack_ptr can be either
+ * 1) A valid union acpi_operand_object, or
+ * 2) A struct acpi_namespace_node (named_obj)
+ */
+ if (ACPI_GET_DESCRIPTOR_TYPE(*stack_ptr) == ACPI_DESC_TYPE_OPERAND) {
+ status = acpi_ex_resolve_object_to_value(stack_ptr, walk_state);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ if (!*stack_ptr) {
+ ACPI_ERROR((AE_INFO, "Internal - null pointer"));
+ return_ACPI_STATUS(AE_AML_NO_OPERAND);
+ }
+ }
+
+ /*
+ * Object on the stack may have changed if acpi_ex_resolve_object_to_value()
+ * was called (i.e., we can't use an _else_ here.)
+ */
+ if (ACPI_GET_DESCRIPTOR_TYPE(*stack_ptr) == ACPI_DESC_TYPE_NAMED) {
+ status =
+ acpi_ex_resolve_node_to_value(ACPI_CAST_INDIRECT_PTR
+ (struct acpi_namespace_node,
+ stack_ptr), walk_state);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+ }
+
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Resolved object %p\n", *stack_ptr));
+ return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ex_resolve_object_to_value
+ *
+ * PARAMETERS: stack_ptr - Pointer to an internal object
+ * walk_state - Current method state
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Retrieve the value from an internal object. The Reference type
+ * uses the associated AML opcode to determine the value.
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ex_resolve_object_to_value(union acpi_operand_object **stack_ptr,
+ struct acpi_walk_state *walk_state)
+{
+ acpi_status status = AE_OK;
+ union acpi_operand_object *stack_desc;
+ union acpi_operand_object *obj_desc = NULL;
+ u8 ref_type;
+
+ ACPI_FUNCTION_TRACE(ex_resolve_object_to_value);
+
+ stack_desc = *stack_ptr;
+
+ /* This is an union acpi_operand_object */
+
+ switch (ACPI_GET_OBJECT_TYPE(stack_desc)) {
+ case ACPI_TYPE_LOCAL_REFERENCE:
+
+ ref_type = stack_desc->reference.class;
+
+ switch (ref_type) {
+ case ACPI_REFCLASS_LOCAL:
+ case ACPI_REFCLASS_ARG:
+
+ /*
+ * Get the local from the method's state info
+ * Note: this increments the local's object reference count
+ */
+ status = acpi_ds_method_data_get_value(ref_type,
+ stack_desc->
+ reference.value,
+ walk_state,
+ &obj_desc);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+ "[Arg/Local %X] ValueObj is %p\n",
+ stack_desc->reference.value,
+ obj_desc));
+
+ /*
+ * Now we can delete the original Reference Object and
+ * replace it with the resolved value
+ */
+ acpi_ut_remove_reference(stack_desc);
+ *stack_ptr = obj_desc;
+ break;
+
+ case ACPI_REFCLASS_INDEX:
+
+ switch (stack_desc->reference.target_type) {
+ case ACPI_TYPE_BUFFER_FIELD:
+
+ /* Just return - do not dereference */
+ break;
+
+ case ACPI_TYPE_PACKAGE:
+
+ /* If method call or copy_object - do not dereference */
+
+ if ((walk_state->opcode ==
+ AML_INT_METHODCALL_OP)
+ || (walk_state->opcode == AML_COPY_OP)) {
+ break;
+ }
+
+ /* Otherwise, dereference the package_index to a package element */
+
+ obj_desc = *stack_desc->reference.where;
+ if (obj_desc) {
+ /*
+ * Valid object descriptor, copy pointer to return value
+ * (i.e., dereference the package index)
+ * Delete the ref object, increment the returned object
+ */
+ acpi_ut_remove_reference(stack_desc);
+ acpi_ut_add_reference(obj_desc);
+ *stack_ptr = obj_desc;
+ } else {
+ /*
+ * A NULL object descriptor means an uninitialized element of
+ * the package, can't dereference it
+ */
+ ACPI_ERROR((AE_INFO,
+ "Attempt to dereference an Index to NULL package element Idx=%p",
+ stack_desc));
+ status = AE_AML_UNINITIALIZED_ELEMENT;
+ }
+ break;
+
+ default:
+
+ /* Invalid reference object */
+
+ ACPI_ERROR((AE_INFO,
+ "Unknown TargetType %X in Index/Reference object %p",
+ stack_desc->reference.target_type,
+ stack_desc));
+ status = AE_AML_INTERNAL;
+ break;
+ }
+ break;
+
+ case ACPI_REFCLASS_REFOF:
+ case ACPI_REFCLASS_DEBUG:
+ case ACPI_REFCLASS_TABLE:
+
+ /* Just leave the object as-is, do not dereference */
+
+ break;
+
+ case ACPI_REFCLASS_NAME: /* Reference to a named object */
+
+ /* Dereference the name */
+
+ if ((stack_desc->reference.node->type ==
+ ACPI_TYPE_DEVICE)
+ || (stack_desc->reference.node->type ==
+ ACPI_TYPE_THERMAL)) {
+
+ /* These node types do not have 'real' subobjects */
+
+ *stack_ptr = (void *)stack_desc->reference.node;
+ } else {
+ /* Get the object pointed to by the namespace node */
+
+ *stack_ptr =
+ (stack_desc->reference.node)->object;
+ acpi_ut_add_reference(*stack_ptr);
+ }
+
+ acpi_ut_remove_reference(stack_desc);
+ break;
+
+ default:
+
+ ACPI_ERROR((AE_INFO,
+ "Unknown Reference type %X in %p", ref_type,
+ stack_desc));
+ status = AE_AML_INTERNAL;
+ break;
+ }
+ break;
+
+ case ACPI_TYPE_BUFFER:
+
+ status = acpi_ds_get_buffer_arguments(stack_desc);
+ break;
+
+ case ACPI_TYPE_PACKAGE:
+
+ status = acpi_ds_get_package_arguments(stack_desc);
+ break;
+
+ case ACPI_TYPE_BUFFER_FIELD:
+ case ACPI_TYPE_LOCAL_REGION_FIELD:
+ case ACPI_TYPE_LOCAL_BANK_FIELD:
+ case ACPI_TYPE_LOCAL_INDEX_FIELD:
+
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+ "FieldRead SourceDesc=%p Type=%X\n",
+ stack_desc,
+ ACPI_GET_OBJECT_TYPE(stack_desc)));
+
+ status =
+ acpi_ex_read_data_from_field(walk_state, stack_desc,
+ &obj_desc);
+
+ /* Remove a reference to the original operand, then override */
+
+ acpi_ut_remove_reference(*stack_ptr);
+ *stack_ptr = (void *)obj_desc;
+ break;
+
+ default:
+ break;
+ }
+
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ex_resolve_multiple
+ *
+ * PARAMETERS: walk_state - Current state (contains AML opcode)
+ * Operand - Starting point for resolution
+ * return_type - Where the object type is returned
+ * return_desc - Where the resolved object is returned
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Return the base object and type. Traverse a reference list if
+ * necessary to get to the base object.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ex_resolve_multiple(struct acpi_walk_state *walk_state,
+ union acpi_operand_object *operand,
+ acpi_object_type * return_type,
+ union acpi_operand_object **return_desc)
+{
+ union acpi_operand_object *obj_desc = (void *)operand;
+ struct acpi_namespace_node *node;
+ acpi_object_type type;
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(acpi_ex_resolve_multiple);
+
+ /* Operand can be either a namespace node or an operand descriptor */
+
+ switch (ACPI_GET_DESCRIPTOR_TYPE(obj_desc)) {
+ case ACPI_DESC_TYPE_OPERAND:
+ type = obj_desc->common.type;
+ break;
+
+ case ACPI_DESC_TYPE_NAMED:
+ type = ((struct acpi_namespace_node *)obj_desc)->type;
+ obj_desc =
+ acpi_ns_get_attached_object((struct acpi_namespace_node *)
+ obj_desc);
+
+ /* If we had an Alias node, use the attached object for type info */
+
+ if (type == ACPI_TYPE_LOCAL_ALIAS) {
+ type = ((struct acpi_namespace_node *)obj_desc)->type;
+ obj_desc =
+ acpi_ns_get_attached_object((struct
+ acpi_namespace_node *)
+ obj_desc);
+ }
+ break;
+
+ default:
+ return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
+ }
+
+ /* If type is anything other than a reference, we are done */
+
+ if (type != ACPI_TYPE_LOCAL_REFERENCE) {
+ goto exit;
+ }
+
+ /*
+ * For reference objects created via the ref_of, Index, or Load/load_table
+ * operators, we need to get to the base object (as per the ACPI
+ * specification of the object_type and size_of operators). This means
+ * traversing the list of possibly many nested references.
+ */
+ while (ACPI_GET_OBJECT_TYPE(obj_desc) == ACPI_TYPE_LOCAL_REFERENCE) {
+ switch (obj_desc->reference.class) {
+ case ACPI_REFCLASS_REFOF:
+ case ACPI_REFCLASS_NAME:
+
+ /* Dereference the reference pointer */
+
+ if (obj_desc->reference.class == ACPI_REFCLASS_REFOF) {
+ node = obj_desc->reference.object;
+ } else { /* AML_INT_NAMEPATH_OP */
+
+ node = obj_desc->reference.node;
+ }
+
+ /* All "References" point to a NS node */
+
+ if (ACPI_GET_DESCRIPTOR_TYPE(node) !=
+ ACPI_DESC_TYPE_NAMED) {
+ ACPI_ERROR((AE_INFO, "Not a NS node %p [%s]",
+ node,
+ acpi_ut_get_descriptor_name(node)));
+ return_ACPI_STATUS(AE_AML_INTERNAL);
+ }
+
+ /* Get the attached object */
+
+ obj_desc = acpi_ns_get_attached_object(node);
+ if (!obj_desc) {
+
+ /* No object, use the NS node type */
+
+ type = acpi_ns_get_type(node);
+ goto exit;
+ }
+
+ /* Check for circular references */
+
+ if (obj_desc == operand) {
+ return_ACPI_STATUS(AE_AML_CIRCULAR_REFERENCE);
+ }
+ break;
+
+ case ACPI_REFCLASS_INDEX:
+
+ /* Get the type of this reference (index into another object) */
+
+ type = obj_desc->reference.target_type;
+ if (type != ACPI_TYPE_PACKAGE) {
+ goto exit;
+ }
+
+ /*
+ * The main object is a package, we want to get the type
+ * of the individual package element that is referenced by
+ * the index.
+ *
+ * This could of course in turn be another reference object.
+ */
+ obj_desc = *(obj_desc->reference.where);
+ if (!obj_desc) {
+
+ /* NULL package elements are allowed */
+
+ type = 0; /* Uninitialized */
+ goto exit;
+ }
+ break;
+
+ case ACPI_REFCLASS_TABLE:
+
+ type = ACPI_TYPE_DDB_HANDLE;
+ goto exit;
+
+ case ACPI_REFCLASS_LOCAL:
+ case ACPI_REFCLASS_ARG:
+
+ if (return_desc) {
+ status =
+ acpi_ds_method_data_get_value(obj_desc->
+ reference.
+ class,
+ obj_desc->
+ reference.
+ value,
+ walk_state,
+ &obj_desc);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+ acpi_ut_remove_reference(obj_desc);
+ } else {
+ status =
+ acpi_ds_method_data_get_node(obj_desc->
+ reference.
+ class,
+ obj_desc->
+ reference.
+ value,
+ walk_state,
+ &node);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ obj_desc = acpi_ns_get_attached_object(node);
+ if (!obj_desc) {
+ type = ACPI_TYPE_ANY;
+ goto exit;
+ }
+ }
+ break;
+
+ case ACPI_REFCLASS_DEBUG:
+
+ /* The Debug Object is of type "DebugObject" */
+
+ type = ACPI_TYPE_DEBUG_OBJECT;
+ goto exit;
+
+ default:
+
+ ACPI_ERROR((AE_INFO,
+ "Unknown Reference Class %2.2X",
+ obj_desc->reference.class));
+ return_ACPI_STATUS(AE_AML_INTERNAL);
+ }
+ }
+
+ /*
+ * Now we are guaranteed to have an object that has not been created
+ * via the ref_of or Index operators.
+ */
+ type = ACPI_GET_OBJECT_TYPE(obj_desc);
+
+ exit:
+ /* Convert internal types to external types */
+
+ switch (type) {
+ case ACPI_TYPE_LOCAL_REGION_FIELD:
+ case ACPI_TYPE_LOCAL_BANK_FIELD:
+ case ACPI_TYPE_LOCAL_INDEX_FIELD:
+
+ type = ACPI_TYPE_FIELD_UNIT;
+ break;
+
+ case ACPI_TYPE_LOCAL_SCOPE:
+
+ /* Per ACPI Specification, Scope is untyped */
+
+ type = ACPI_TYPE_ANY;
+ break;
+
+ default:
+ /* No change to Type required */
+ break;
+ }
+
+ *return_type = type;
+ if (return_desc) {
+ *return_desc = obj_desc;
+ }
+ return_ACPI_STATUS(AE_OK);
+}
diff --git a/drivers/acpi/executer/exresop.c b/drivers/acpi/executer/exresop.c
new file mode 100644
index 0000000..0bb8259
--- /dev/null
+++ b/drivers/acpi/executer/exresop.c
@@ -0,0 +1,700 @@
+
+/******************************************************************************
+ *
+ * Module Name: exresop - AML Interpreter operand/object resolution
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include <acpi/amlcode.h>
+#include <acpi/acparser.h>
+#include <acpi/acinterp.h>
+#include <acpi/acnamesp.h>
+
+#define _COMPONENT ACPI_EXECUTER
+ACPI_MODULE_NAME("exresop")
+
+/* Local prototypes */
+static acpi_status
+acpi_ex_check_object_type(acpi_object_type type_needed,
+ acpi_object_type this_type, void *object);
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ex_check_object_type
+ *
+ * PARAMETERS: type_needed Object type needed
+ * this_type Actual object type
+ * Object Object pointer
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Check required type against actual type
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ex_check_object_type(acpi_object_type type_needed,
+ acpi_object_type this_type, void *object)
+{
+ ACPI_FUNCTION_ENTRY();
+
+ if (type_needed == ACPI_TYPE_ANY) {
+
+ /* All types OK, so we don't perform any typechecks */
+
+ return (AE_OK);
+ }
+
+ if (type_needed == ACPI_TYPE_LOCAL_REFERENCE) {
+ /*
+ * Allow the AML "Constant" opcodes (Zero, One, etc.) to be reference
+ * objects and thus allow them to be targets. (As per the ACPI
+ * specification, a store to a constant is a noop.)
+ */
+ if ((this_type == ACPI_TYPE_INTEGER) &&
+ (((union acpi_operand_object *)object)->common.
+ flags & AOPOBJ_AML_CONSTANT)) {
+ return (AE_OK);
+ }
+ }
+
+ if (type_needed != this_type) {
+ ACPI_ERROR((AE_INFO,
+ "Needed type [%s], found [%s] %p",
+ acpi_ut_get_type_name(type_needed),
+ acpi_ut_get_type_name(this_type), object));
+
+ return (AE_AML_OPERAND_TYPE);
+ }
+
+ return (AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ex_resolve_operands
+ *
+ * PARAMETERS: Opcode - Opcode being interpreted
+ * stack_ptr - Pointer to the operand stack to be
+ * resolved
+ * walk_state - Current state
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Convert multiple input operands to the types required by the
+ * target operator.
+ *
+ * Each 5-bit group in arg_types represents one required
+ * operand and indicates the required Type. The corresponding operand
+ * will be converted to the required type if possible, otherwise we
+ * abort with an exception.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ex_resolve_operands(u16 opcode,
+ union acpi_operand_object ** stack_ptr,
+ struct acpi_walk_state * walk_state)
+{
+ union acpi_operand_object *obj_desc;
+ acpi_status status = AE_OK;
+ u8 object_type;
+ u32 arg_types;
+ const struct acpi_opcode_info *op_info;
+ u32 this_arg_type;
+ acpi_object_type type_needed;
+ u16 target_op = 0;
+
+ ACPI_FUNCTION_TRACE_U32(ex_resolve_operands, opcode);
+
+ op_info = acpi_ps_get_opcode_info(opcode);
+ if (op_info->class == AML_CLASS_UNKNOWN) {
+ return_ACPI_STATUS(AE_AML_BAD_OPCODE);
+ }
+
+ arg_types = op_info->runtime_args;
+ if (arg_types == ARGI_INVALID_OPCODE) {
+ ACPI_ERROR((AE_INFO, "Unknown AML opcode %X", opcode));
+
+ return_ACPI_STATUS(AE_AML_INTERNAL);
+ }
+
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+ "Opcode %X [%s] RequiredOperandTypes=%8.8X\n",
+ opcode, op_info->name, arg_types));
+
+ /*
+ * Normal exit is with (arg_types == 0) at end of argument list.
+ * Function will return an exception from within the loop upon
+ * finding an entry which is not (or cannot be converted
+ * to) the required type; if stack underflows; or upon
+ * finding a NULL stack entry (which should not happen).
+ */
+ while (GET_CURRENT_ARG_TYPE(arg_types)) {
+ if (!stack_ptr || !*stack_ptr) {
+ ACPI_ERROR((AE_INFO, "Null stack entry at %p",
+ stack_ptr));
+
+ return_ACPI_STATUS(AE_AML_INTERNAL);
+ }
+
+ /* Extract useful items */
+
+ obj_desc = *stack_ptr;
+
+ /* Decode the descriptor type */
+
+ switch (ACPI_GET_DESCRIPTOR_TYPE(obj_desc)) {
+ case ACPI_DESC_TYPE_NAMED:
+
+ /* Namespace Node */
+
+ object_type =
+ ((struct acpi_namespace_node *)obj_desc)->type;
+
+ /*
+ * Resolve an alias object. The construction of these objects
+ * guarantees that there is only one level of alias indirection;
+ * thus, the attached object is always the aliased namespace node
+ */
+ if (object_type == ACPI_TYPE_LOCAL_ALIAS) {
+ obj_desc =
+ acpi_ns_get_attached_object((struct
+ acpi_namespace_node
+ *)obj_desc);
+ *stack_ptr = obj_desc;
+ object_type =
+ ((struct acpi_namespace_node *)obj_desc)->
+ type;
+ }
+ break;
+
+ case ACPI_DESC_TYPE_OPERAND:
+
+ /* ACPI internal object */
+
+ object_type = ACPI_GET_OBJECT_TYPE(obj_desc);
+
+ /* Check for bad acpi_object_type */
+
+ if (!acpi_ut_valid_object_type(object_type)) {
+ ACPI_ERROR((AE_INFO,
+ "Bad operand object type [%X]",
+ object_type));
+
+ return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
+ }
+
+ if (object_type == (u8) ACPI_TYPE_LOCAL_REFERENCE) {
+
+ /* Validate the Reference */
+
+ switch (obj_desc->reference.class) {
+ case ACPI_REFCLASS_DEBUG:
+
+ target_op = AML_DEBUG_OP;
+
+ /*lint -fallthrough */
+
+ case ACPI_REFCLASS_ARG:
+ case ACPI_REFCLASS_LOCAL:
+ case ACPI_REFCLASS_INDEX:
+ case ACPI_REFCLASS_REFOF:
+ case ACPI_REFCLASS_TABLE: /* ddb_handle from LOAD_OP or LOAD_TABLE_OP */
+ case ACPI_REFCLASS_NAME: /* Reference to a named object */
+
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+ "Operand is a Reference, Class [%s] %2.2X\n",
+ acpi_ut_get_reference_name
+ (obj_desc),
+ obj_desc->reference.
+ class));
+ break;
+
+ default:
+
+ ACPI_ERROR((AE_INFO,
+ "Unknown Reference Class %2.2X in %p",
+ obj_desc->reference.class,
+ obj_desc));
+
+ return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
+ }
+ }
+ break;
+
+ default:
+
+ /* Invalid descriptor */
+
+ ACPI_ERROR((AE_INFO, "Invalid descriptor %p [%s]",
+ obj_desc,
+ acpi_ut_get_descriptor_name(obj_desc)));
+
+ return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
+ }
+
+ /* Get one argument type, point to the next */
+
+ this_arg_type = GET_CURRENT_ARG_TYPE(arg_types);
+ INCREMENT_ARG_LIST(arg_types);
+
+ /*
+ * Handle cases where the object does not need to be
+ * resolved to a value
+ */
+ switch (this_arg_type) {
+ case ARGI_REF_OR_STRING: /* Can be a String or Reference */
+
+ if ((ACPI_GET_DESCRIPTOR_TYPE(obj_desc) ==
+ ACPI_DESC_TYPE_OPERAND)
+ && (ACPI_GET_OBJECT_TYPE(obj_desc) ==
+ ACPI_TYPE_STRING)) {
+ /*
+ * String found - the string references a named object and
+ * must be resolved to a node
+ */
+ goto next_operand;
+ }
+
+ /*
+ * Else not a string - fall through to the normal Reference
+ * case below
+ */
+ /*lint -fallthrough */
+
+ case ARGI_REFERENCE: /* References: */
+ case ARGI_INTEGER_REF:
+ case ARGI_OBJECT_REF:
+ case ARGI_DEVICE_REF:
+ case ARGI_TARGETREF: /* Allows implicit conversion rules before store */
+ case ARGI_FIXED_TARGET: /* No implicit conversion before store to target */
+ case ARGI_SIMPLE_TARGET: /* Name, Local, or Arg - no implicit conversion */
+
+ /*
+ * Need an operand of type ACPI_TYPE_LOCAL_REFERENCE
+ * A Namespace Node is OK as-is
+ */
+ if (ACPI_GET_DESCRIPTOR_TYPE(obj_desc) ==
+ ACPI_DESC_TYPE_NAMED) {
+ goto next_operand;
+ }
+
+ status =
+ acpi_ex_check_object_type(ACPI_TYPE_LOCAL_REFERENCE,
+ object_type, obj_desc);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+ goto next_operand;
+
+ case ARGI_DATAREFOBJ: /* Store operator only */
+
+ /*
+ * We don't want to resolve index_op reference objects during
+ * a store because this would be an implicit de_ref_of operation.
+ * Instead, we just want to store the reference object.
+ * -- All others must be resolved below.
+ */
+ if ((opcode == AML_STORE_OP) &&
+ (ACPI_GET_OBJECT_TYPE(*stack_ptr) ==
+ ACPI_TYPE_LOCAL_REFERENCE)
+ && ((*stack_ptr)->reference.class == ACPI_REFCLASS_INDEX)) {
+ goto next_operand;
+ }
+ break;
+
+ default:
+ /* All cases covered above */
+ break;
+ }
+
+ /*
+ * Resolve this object to a value
+ */
+ status = acpi_ex_resolve_to_value(stack_ptr, walk_state);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /* Get the resolved object */
+
+ obj_desc = *stack_ptr;
+
+ /*
+ * Check the resulting object (value) type
+ */
+ switch (this_arg_type) {
+ /*
+ * For the simple cases, only one type of resolved object
+ * is allowed
+ */
+ case ARGI_MUTEX:
+
+ /* Need an operand of type ACPI_TYPE_MUTEX */
+
+ type_needed = ACPI_TYPE_MUTEX;
+ break;
+
+ case ARGI_EVENT:
+
+ /* Need an operand of type ACPI_TYPE_EVENT */
+
+ type_needed = ACPI_TYPE_EVENT;
+ break;
+
+ case ARGI_PACKAGE: /* Package */
+
+ /* Need an operand of type ACPI_TYPE_PACKAGE */
+
+ type_needed = ACPI_TYPE_PACKAGE;
+ break;
+
+ case ARGI_ANYTYPE:
+
+ /* Any operand type will do */
+
+ type_needed = ACPI_TYPE_ANY;
+ break;
+
+ case ARGI_DDBHANDLE:
+
+ /* Need an operand of type ACPI_TYPE_DDB_HANDLE */
+
+ type_needed = ACPI_TYPE_LOCAL_REFERENCE;
+ break;
+
+ /*
+ * The more complex cases allow multiple resolved object types
+ */
+ case ARGI_INTEGER:
+
+ /*
+ * Need an operand of type ACPI_TYPE_INTEGER,
+ * But we can implicitly convert from a STRING or BUFFER
+ * Aka - "Implicit Source Operand Conversion"
+ */
+ status =
+ acpi_ex_convert_to_integer(obj_desc, stack_ptr, 16);
+ if (ACPI_FAILURE(status)) {
+ if (status == AE_TYPE) {
+ ACPI_ERROR((AE_INFO,
+ "Needed [Integer/String/Buffer], found [%s] %p",
+ acpi_ut_get_object_type_name
+ (obj_desc), obj_desc));
+
+ return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
+ }
+
+ return_ACPI_STATUS(status);
+ }
+
+ if (obj_desc != *stack_ptr) {
+ acpi_ut_remove_reference(obj_desc);
+ }
+ goto next_operand;
+
+ case ARGI_BUFFER:
+
+ /*
+ * Need an operand of type ACPI_TYPE_BUFFER,
+ * But we can implicitly convert from a STRING or INTEGER
+ * Aka - "Implicit Source Operand Conversion"
+ */
+ status = acpi_ex_convert_to_buffer(obj_desc, stack_ptr);
+ if (ACPI_FAILURE(status)) {
+ if (status == AE_TYPE) {
+ ACPI_ERROR((AE_INFO,
+ "Needed [Integer/String/Buffer], found [%s] %p",
+ acpi_ut_get_object_type_name
+ (obj_desc), obj_desc));
+
+ return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
+ }
+
+ return_ACPI_STATUS(status);
+ }
+
+ if (obj_desc != *stack_ptr) {
+ acpi_ut_remove_reference(obj_desc);
+ }
+ goto next_operand;
+
+ case ARGI_STRING:
+
+ /*
+ * Need an operand of type ACPI_TYPE_STRING,
+ * But we can implicitly convert from a BUFFER or INTEGER
+ * Aka - "Implicit Source Operand Conversion"
+ */
+ status = acpi_ex_convert_to_string(obj_desc, stack_ptr,
+ ACPI_IMPLICIT_CONVERT_HEX);
+ if (ACPI_FAILURE(status)) {
+ if (status == AE_TYPE) {
+ ACPI_ERROR((AE_INFO,
+ "Needed [Integer/String/Buffer], found [%s] %p",
+ acpi_ut_get_object_type_name
+ (obj_desc), obj_desc));
+
+ return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
+ }
+
+ return_ACPI_STATUS(status);
+ }
+
+ if (obj_desc != *stack_ptr) {
+ acpi_ut_remove_reference(obj_desc);
+ }
+ goto next_operand;
+
+ case ARGI_COMPUTEDATA:
+
+ /* Need an operand of type INTEGER, STRING or BUFFER */
+
+ switch (ACPI_GET_OBJECT_TYPE(obj_desc)) {
+ case ACPI_TYPE_INTEGER:
+ case ACPI_TYPE_STRING:
+ case ACPI_TYPE_BUFFER:
+
+ /* Valid operand */
+ break;
+
+ default:
+ ACPI_ERROR((AE_INFO,
+ "Needed [Integer/String/Buffer], found [%s] %p",
+ acpi_ut_get_object_type_name
+ (obj_desc), obj_desc));
+
+ return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
+ }
+ goto next_operand;
+
+ case ARGI_BUFFER_OR_STRING:
+
+ /* Need an operand of type STRING or BUFFER */
+
+ switch (ACPI_GET_OBJECT_TYPE(obj_desc)) {
+ case ACPI_TYPE_STRING:
+ case ACPI_TYPE_BUFFER:
+
+ /* Valid operand */
+ break;
+
+ case ACPI_TYPE_INTEGER:
+
+ /* Highest priority conversion is to type Buffer */
+
+ status =
+ acpi_ex_convert_to_buffer(obj_desc,
+ stack_ptr);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ if (obj_desc != *stack_ptr) {
+ acpi_ut_remove_reference(obj_desc);
+ }
+ break;
+
+ default:
+ ACPI_ERROR((AE_INFO,
+ "Needed [Integer/String/Buffer], found [%s] %p",
+ acpi_ut_get_object_type_name
+ (obj_desc), obj_desc));
+
+ return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
+ }
+ goto next_operand;
+
+ case ARGI_DATAOBJECT:
+ /*
+ * ARGI_DATAOBJECT is only used by the size_of operator.
+ * Need a buffer, string, package, or ref_of reference.
+ *
+ * The only reference allowed here is a direct reference to
+ * a namespace node.
+ */
+ switch (ACPI_GET_OBJECT_TYPE(obj_desc)) {
+ case ACPI_TYPE_PACKAGE:
+ case ACPI_TYPE_STRING:
+ case ACPI_TYPE_BUFFER:
+ case ACPI_TYPE_LOCAL_REFERENCE:
+
+ /* Valid operand */
+ break;
+
+ default:
+ ACPI_ERROR((AE_INFO,
+ "Needed [Buffer/String/Package/Reference], found [%s] %p",
+ acpi_ut_get_object_type_name
+ (obj_desc), obj_desc));
+
+ return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
+ }
+ goto next_operand;
+
+ case ARGI_COMPLEXOBJ:
+
+ /* Need a buffer or package or (ACPI 2.0) String */
+
+ switch (ACPI_GET_OBJECT_TYPE(obj_desc)) {
+ case ACPI_TYPE_PACKAGE:
+ case ACPI_TYPE_STRING:
+ case ACPI_TYPE_BUFFER:
+
+ /* Valid operand */
+ break;
+
+ default:
+ ACPI_ERROR((AE_INFO,
+ "Needed [Buffer/String/Package], found [%s] %p",
+ acpi_ut_get_object_type_name
+ (obj_desc), obj_desc));
+
+ return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
+ }
+ goto next_operand;
+
+ case ARGI_REGION_OR_BUFFER: /* Used by Load() only */
+
+ /* Need an operand of type REGION or a BUFFER (which could be a resolved region field) */
+
+ switch (ACPI_GET_OBJECT_TYPE(obj_desc)) {
+ case ACPI_TYPE_BUFFER:
+ case ACPI_TYPE_REGION:
+
+ /* Valid operand */
+ break;
+
+ default:
+ ACPI_ERROR((AE_INFO,
+ "Needed [Region/Buffer], found [%s] %p",
+ acpi_ut_get_object_type_name
+ (obj_desc), obj_desc));
+
+ return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
+ }
+ goto next_operand;
+
+ case ARGI_DATAREFOBJ:
+
+ /* Used by the Store() operator only */
+
+ switch (ACPI_GET_OBJECT_TYPE(obj_desc)) {
+ case ACPI_TYPE_INTEGER:
+ case ACPI_TYPE_PACKAGE:
+ case ACPI_TYPE_STRING:
+ case ACPI_TYPE_BUFFER:
+ case ACPI_TYPE_BUFFER_FIELD:
+ case ACPI_TYPE_LOCAL_REFERENCE:
+ case ACPI_TYPE_LOCAL_REGION_FIELD:
+ case ACPI_TYPE_LOCAL_BANK_FIELD:
+ case ACPI_TYPE_LOCAL_INDEX_FIELD:
+ case ACPI_TYPE_DDB_HANDLE:
+
+ /* Valid operand */
+ break;
+
+ default:
+
+ if (acpi_gbl_enable_interpreter_slack) {
+ /*
+ * Enable original behavior of Store(), allowing any and all
+ * objects as the source operand. The ACPI spec does not
+ * allow this, however.
+ */
+ break;
+ }
+
+ if (target_op == AML_DEBUG_OP) {
+
+ /* Allow store of any object to the Debug object */
+
+ break;
+ }
+
+ ACPI_ERROR((AE_INFO,
+ "Needed Integer/Buffer/String/Package/Ref/Ddb], found [%s] %p",
+ acpi_ut_get_object_type_name
+ (obj_desc), obj_desc));
+
+ return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
+ }
+ goto next_operand;
+
+ default:
+
+ /* Unknown type */
+
+ ACPI_ERROR((AE_INFO,
+ "Internal - Unknown ARGI (required operand) type %X",
+ this_arg_type));
+
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
+
+ /*
+ * Make sure that the original object was resolved to the
+ * required object type (Simple cases only).
+ */
+ status = acpi_ex_check_object_type(type_needed,
+ ACPI_GET_OBJECT_TYPE
+ (*stack_ptr), *stack_ptr);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ next_operand:
+ /*
+ * If more operands needed, decrement stack_ptr to point
+ * to next operand on stack
+ */
+ if (GET_CURRENT_ARG_TYPE(arg_types)) {
+ stack_ptr--;
+ }
+ }
+
+ ACPI_DUMP_OPERANDS(walk_state->operands,
+ acpi_ps_get_opcode_name(opcode),
+ walk_state->num_operands);
+
+ return_ACPI_STATUS(status);
+}
diff --git a/drivers/acpi/executer/exstore.c b/drivers/acpi/executer/exstore.c
new file mode 100644
index 0000000..3318df4
--- /dev/null
+++ b/drivers/acpi/executer/exstore.c
@@ -0,0 +1,715 @@
+
+/******************************************************************************
+ *
+ * Module Name: exstore - AML Interpreter object store support
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include <acpi/acdispat.h>
+#include <acpi/acinterp.h>
+#include <acpi/amlcode.h>
+#include <acpi/acnamesp.h>
+
+#define _COMPONENT ACPI_EXECUTER
+ACPI_MODULE_NAME("exstore")
+
+/* Local prototypes */
+static void
+acpi_ex_do_debug_object(union acpi_operand_object *source_desc,
+ u32 level, u32 index);
+
+static acpi_status
+acpi_ex_store_object_to_index(union acpi_operand_object *val_desc,
+ union acpi_operand_object *dest_desc,
+ struct acpi_walk_state *walk_state);
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ex_do_debug_object
+ *
+ * PARAMETERS: source_desc - Value to be stored
+ * Level - Indentation level (used for packages)
+ * Index - Current package element, zero if not pkg
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Handles stores to the Debug Object.
+ *
+ ******************************************************************************/
+
+static void
+acpi_ex_do_debug_object(union acpi_operand_object *source_desc,
+ u32 level, u32 index)
+{
+ u32 i;
+
+ ACPI_FUNCTION_TRACE_PTR(ex_do_debug_object, source_desc);
+
+ /* Print line header as long as we are not in the middle of an object display */
+
+ if (!((level > 0) && index == 0)) {
+ ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "[ACPI Debug] %*s",
+ level, " "));
+ }
+
+ /* Display index for package output only */
+
+ if (index > 0) {
+ ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT,
+ "(%.2u) ", index - 1));
+ }
+
+ if (!source_desc) {
+ ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "[Null Object]\n"));
+ return_VOID;
+ }
+
+ if (ACPI_GET_DESCRIPTOR_TYPE(source_desc) == ACPI_DESC_TYPE_OPERAND) {
+ ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "%s ",
+ acpi_ut_get_object_type_name
+ (source_desc)));
+
+ if (!acpi_ut_valid_internal_object(source_desc)) {
+ ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT,
+ "%p, Invalid Internal Object!\n",
+ source_desc));
+ return_VOID;
+ }
+ } else if (ACPI_GET_DESCRIPTOR_TYPE(source_desc) ==
+ ACPI_DESC_TYPE_NAMED) {
+ ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "%s: %p\n",
+ acpi_ut_get_type_name(((struct
+ acpi_namespace_node
+ *)source_desc)->
+ type),
+ source_desc));
+ return_VOID;
+ } else {
+ return_VOID;
+ }
+
+ /* source_desc is of type ACPI_DESC_TYPE_OPERAND */
+
+ switch (ACPI_GET_OBJECT_TYPE(source_desc)) {
+ case ACPI_TYPE_INTEGER:
+
+ /* Output correct integer width */
+
+ if (acpi_gbl_integer_byte_width == 4) {
+ ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "0x%8.8X\n",
+ (u32) source_desc->integer.
+ value));
+ } else {
+ ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT,
+ "0x%8.8X%8.8X\n",
+ ACPI_FORMAT_UINT64(source_desc->
+ integer.
+ value)));
+ }
+ break;
+
+ case ACPI_TYPE_BUFFER:
+
+ ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "[0x%.2X]\n",
+ (u32) source_desc->buffer.length));
+ ACPI_DUMP_BUFFER(source_desc->buffer.pointer,
+ (source_desc->buffer.length <
+ 256) ? source_desc->buffer.length : 256);
+ break;
+
+ case ACPI_TYPE_STRING:
+
+ ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "[0x%.2X] \"%s\"\n",
+ source_desc->string.length,
+ source_desc->string.pointer));
+ break;
+
+ case ACPI_TYPE_PACKAGE:
+
+ ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT,
+ "[Contains 0x%.2X Elements]\n",
+ source_desc->package.count));
+
+ /* Output the entire contents of the package */
+
+ for (i = 0; i < source_desc->package.count; i++) {
+ acpi_ex_do_debug_object(source_desc->package.
+ elements[i], level + 4, i + 1);
+ }
+ break;
+
+ case ACPI_TYPE_LOCAL_REFERENCE:
+
+ ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "[%s] ",
+ acpi_ut_get_reference_name(source_desc)));
+
+ /* Decode the reference */
+
+ switch (source_desc->reference.class) {
+ case ACPI_REFCLASS_INDEX:
+
+ ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "0x%X\n",
+ source_desc->reference.value));
+ break;
+
+ case ACPI_REFCLASS_TABLE:
+
+ ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT,
+ "Table Index 0x%X\n",
+ source_desc->reference.value));
+ break;
+
+ default:
+ break;
+ }
+
+ ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, " "));
+
+ /* Check for valid node first, then valid object */
+
+ if (source_desc->reference.node) {
+ if (ACPI_GET_DESCRIPTOR_TYPE
+ (source_desc->reference.node) !=
+ ACPI_DESC_TYPE_NAMED) {
+ ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT,
+ " %p - Not a valid namespace node\n",
+ source_desc->reference.
+ node));
+ } else {
+ ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT,
+ "Node %p [%4.4s] ",
+ source_desc->reference.
+ node,
+ (source_desc->reference.
+ node)->name.ascii));
+
+ switch ((source_desc->reference.node)->type) {
+
+ /* These types have no attached object */
+
+ case ACPI_TYPE_DEVICE:
+ acpi_os_printf("Device\n");
+ break;
+
+ case ACPI_TYPE_THERMAL:
+ acpi_os_printf("Thermal Zone\n");
+ break;
+
+ default:
+ acpi_ex_do_debug_object((source_desc->
+ reference.
+ node)->object,
+ level + 4, 0);
+ break;
+ }
+ }
+ } else if (source_desc->reference.object) {
+ if (ACPI_GET_DESCRIPTOR_TYPE
+ (source_desc->reference.object) ==
+ ACPI_DESC_TYPE_NAMED) {
+ acpi_ex_do_debug_object(((struct
+ acpi_namespace_node *)
+ source_desc->reference.
+ object)->object,
+ level + 4, 0);
+ } else {
+ acpi_ex_do_debug_object(source_desc->reference.
+ object, level + 4, 0);
+ }
+ }
+ break;
+
+ default:
+
+ ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "%p\n",
+ source_desc));
+ break;
+ }
+
+ ACPI_DEBUG_PRINT_RAW((ACPI_DB_EXEC, "\n"));
+ return_VOID;
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ex_store
+ *
+ * PARAMETERS: *source_desc - Value to be stored
+ * *dest_desc - Where to store it. Must be an NS node
+ * or an union acpi_operand_object of type
+ * Reference;
+ * walk_state - Current walk state
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Store the value described by source_desc into the location
+ * described by dest_desc. Called by various interpreter
+ * functions to store the result of an operation into
+ * the destination operand -- not just simply the actual "Store"
+ * ASL operator.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ex_store(union acpi_operand_object *source_desc,
+ union acpi_operand_object *dest_desc,
+ struct acpi_walk_state *walk_state)
+{
+ acpi_status status = AE_OK;
+ union acpi_operand_object *ref_desc = dest_desc;
+
+ ACPI_FUNCTION_TRACE_PTR(ex_store, dest_desc);
+
+ /* Validate parameters */
+
+ if (!source_desc || !dest_desc) {
+ ACPI_ERROR((AE_INFO, "Null parameter"));
+ return_ACPI_STATUS(AE_AML_NO_OPERAND);
+ }
+
+ /* dest_desc can be either a namespace node or an ACPI object */
+
+ if (ACPI_GET_DESCRIPTOR_TYPE(dest_desc) == ACPI_DESC_TYPE_NAMED) {
+ /*
+ * Dest is a namespace node,
+ * Storing an object into a Named node.
+ */
+ status = acpi_ex_store_object_to_node(source_desc,
+ (struct
+ acpi_namespace_node *)
+ dest_desc, walk_state,
+ ACPI_IMPLICIT_CONVERSION);
+
+ return_ACPI_STATUS(status);
+ }
+
+ /* Destination object must be a Reference or a Constant object */
+
+ switch (ACPI_GET_OBJECT_TYPE(dest_desc)) {
+ case ACPI_TYPE_LOCAL_REFERENCE:
+ break;
+
+ case ACPI_TYPE_INTEGER:
+
+ /* Allow stores to Constants -- a Noop as per ACPI spec */
+
+ if (dest_desc->common.flags & AOPOBJ_AML_CONSTANT) {
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ /*lint -fallthrough */
+
+ default:
+
+ /* Destination is not a Reference object */
+
+ ACPI_ERROR((AE_INFO,
+ "Target is not a Reference or Constant object - %s [%p]",
+ acpi_ut_get_object_type_name(dest_desc),
+ dest_desc));
+
+ return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
+ }
+
+ /*
+ * Examine the Reference class. These cases are handled:
+ *
+ * 1) Store to Name (Change the object associated with a name)
+ * 2) Store to an indexed area of a Buffer or Package
+ * 3) Store to a Method Local or Arg
+ * 4) Store to the debug object
+ */
+ switch (ref_desc->reference.class) {
+ case ACPI_REFCLASS_REFOF:
+
+ /* Storing an object into a Name "container" */
+
+ status = acpi_ex_store_object_to_node(source_desc,
+ ref_desc->reference.
+ object, walk_state,
+ ACPI_IMPLICIT_CONVERSION);
+ break;
+
+ case ACPI_REFCLASS_INDEX:
+
+ /* Storing to an Index (pointer into a packager or buffer) */
+
+ status =
+ acpi_ex_store_object_to_index(source_desc, ref_desc,
+ walk_state);
+ break;
+
+ case ACPI_REFCLASS_LOCAL:
+ case ACPI_REFCLASS_ARG:
+
+ /* Store to a method local/arg */
+
+ status =
+ acpi_ds_store_object_to_local(ref_desc->reference.class,
+ ref_desc->reference.value,
+ source_desc, walk_state);
+ break;
+
+ case ACPI_REFCLASS_DEBUG:
+
+ /*
+ * Storing to the Debug object causes the value stored to be
+ * displayed and otherwise has no effect -- see ACPI Specification
+ */
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+ "**** Write to Debug Object: Object %p %s ****:\n\n",
+ source_desc,
+ acpi_ut_get_object_type_name(source_desc)));
+
+ acpi_ex_do_debug_object(source_desc, 0, 0);
+ break;
+
+ default:
+
+ ACPI_ERROR((AE_INFO, "Unknown Reference Class %2.2X",
+ ref_desc->reference.class));
+ ACPI_DUMP_ENTRY(ref_desc, ACPI_LV_INFO);
+
+ status = AE_AML_INTERNAL;
+ break;
+ }
+
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ex_store_object_to_index
+ *
+ * PARAMETERS: *source_desc - Value to be stored
+ * *dest_desc - Named object to receive the value
+ * walk_state - Current walk state
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Store the object to indexed Buffer or Package element
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ex_store_object_to_index(union acpi_operand_object *source_desc,
+ union acpi_operand_object *index_desc,
+ struct acpi_walk_state *walk_state)
+{
+ acpi_status status = AE_OK;
+ union acpi_operand_object *obj_desc;
+ union acpi_operand_object *new_desc;
+ u8 value = 0;
+ u32 i;
+
+ ACPI_FUNCTION_TRACE(ex_store_object_to_index);
+
+ /*
+ * Destination must be a reference pointer, and
+ * must point to either a buffer or a package
+ */
+ switch (index_desc->reference.target_type) {
+ case ACPI_TYPE_PACKAGE:
+ /*
+ * Storing to a package element. Copy the object and replace
+ * any existing object with the new object. No implicit
+ * conversion is performed.
+ *
+ * The object at *(index_desc->Reference.Where) is the
+ * element within the package that is to be modified.
+ * The parent package object is at index_desc->Reference.Object
+ */
+ obj_desc = *(index_desc->reference.where);
+
+ if (ACPI_GET_OBJECT_TYPE(source_desc) ==
+ ACPI_TYPE_LOCAL_REFERENCE
+ && source_desc->reference.class == ACPI_REFCLASS_TABLE) {
+
+ /* This is a DDBHandle, just add a reference to it */
+
+ acpi_ut_add_reference(source_desc);
+ new_desc = source_desc;
+ } else {
+ /* Normal object, copy it */
+
+ status =
+ acpi_ut_copy_iobject_to_iobject(source_desc,
+ &new_desc,
+ walk_state);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+ }
+
+ if (obj_desc) {
+
+ /* Decrement reference count by the ref count of the parent package */
+
+ for (i = 0; i < ((union acpi_operand_object *)
+ index_desc->reference.object)->common.
+ reference_count; i++) {
+ acpi_ut_remove_reference(obj_desc);
+ }
+ }
+
+ *(index_desc->reference.where) = new_desc;
+
+ /* Increment ref count by the ref count of the parent package-1 */
+
+ for (i = 1; i < ((union acpi_operand_object *)
+ index_desc->reference.object)->common.
+ reference_count; i++) {
+ acpi_ut_add_reference(new_desc);
+ }
+
+ break;
+
+ case ACPI_TYPE_BUFFER_FIELD:
+
+ /*
+ * Store into a Buffer or String (not actually a real buffer_field)
+ * at a location defined by an Index.
+ *
+ * The first 8-bit element of the source object is written to the
+ * 8-bit Buffer location defined by the Index destination object,
+ * according to the ACPI 2.0 specification.
+ */
+
+ /*
+ * Make sure the target is a Buffer or String. An error should
+ * not happen here, since the reference_object was constructed
+ * by the INDEX_OP code.
+ */
+ obj_desc = index_desc->reference.object;
+ if ((ACPI_GET_OBJECT_TYPE(obj_desc) != ACPI_TYPE_BUFFER) &&
+ (ACPI_GET_OBJECT_TYPE(obj_desc) != ACPI_TYPE_STRING)) {
+ return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
+ }
+
+ /*
+ * The assignment of the individual elements will be slightly
+ * different for each source type.
+ */
+ switch (ACPI_GET_OBJECT_TYPE(source_desc)) {
+ case ACPI_TYPE_INTEGER:
+
+ /* Use the least-significant byte of the integer */
+
+ value = (u8) (source_desc->integer.value);
+ break;
+
+ case ACPI_TYPE_BUFFER:
+ case ACPI_TYPE_STRING:
+
+ /* Note: Takes advantage of common string/buffer fields */
+
+ value = source_desc->buffer.pointer[0];
+ break;
+
+ default:
+
+ /* All other types are invalid */
+
+ ACPI_ERROR((AE_INFO,
+ "Source must be Integer/Buffer/String type, not %s",
+ acpi_ut_get_object_type_name(source_desc)));
+ return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
+ }
+
+ /* Store the source value into the target buffer byte */
+
+ obj_desc->buffer.pointer[index_desc->reference.value] = value;
+ break;
+
+ default:
+ ACPI_ERROR((AE_INFO, "Target is not a Package or BufferField"));
+ status = AE_AML_OPERAND_TYPE;
+ break;
+ }
+
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ex_store_object_to_node
+ *
+ * PARAMETERS: source_desc - Value to be stored
+ * Node - Named object to receive the value
+ * walk_state - Current walk state
+ * implicit_conversion - Perform implicit conversion (yes/no)
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Store the object to the named object.
+ *
+ * The Assignment of an object to a named object is handled here
+ * The value passed in will replace the current value (if any)
+ * with the input value.
+ *
+ * When storing into an object the data is converted to the
+ * target object type then stored in the object. This means
+ * that the target object type (for an initialized target) will
+ * not be changed by a store operation.
+ *
+ * Assumes parameters are already validated.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ex_store_object_to_node(union acpi_operand_object *source_desc,
+ struct acpi_namespace_node *node,
+ struct acpi_walk_state *walk_state,
+ u8 implicit_conversion)
+{
+ acpi_status status = AE_OK;
+ union acpi_operand_object *target_desc;
+ union acpi_operand_object *new_desc;
+ acpi_object_type target_type;
+
+ ACPI_FUNCTION_TRACE_PTR(ex_store_object_to_node, source_desc);
+
+ /* Get current type of the node, and object attached to Node */
+
+ target_type = acpi_ns_get_type(node);
+ target_desc = acpi_ns_get_attached_object(node);
+
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Storing %p(%s) into node %p(%s)\n",
+ source_desc,
+ acpi_ut_get_object_type_name(source_desc), node,
+ acpi_ut_get_type_name(target_type)));
+
+ /*
+ * Resolve the source object to an actual value
+ * (If it is a reference object)
+ */
+ status = acpi_ex_resolve_object(&source_desc, target_type, walk_state);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /* If no implicit conversion, drop into the default case below */
+
+ if ((!implicit_conversion) ||
+ ((walk_state->opcode == AML_COPY_OP) &&
+ (target_type != ACPI_TYPE_LOCAL_REGION_FIELD) &&
+ (target_type != ACPI_TYPE_LOCAL_BANK_FIELD) &&
+ (target_type != ACPI_TYPE_LOCAL_INDEX_FIELD))) {
+ /*
+ * Force execution of default (no implicit conversion). Note:
+ * copy_object does not perform an implicit conversion, as per the ACPI
+ * spec -- except in case of region/bank/index fields -- because these
+ * objects must retain their original type permanently.
+ */
+ target_type = ACPI_TYPE_ANY;
+ }
+
+ /* Do the actual store operation */
+
+ switch (target_type) {
+ case ACPI_TYPE_BUFFER_FIELD:
+ case ACPI_TYPE_LOCAL_REGION_FIELD:
+ case ACPI_TYPE_LOCAL_BANK_FIELD:
+ case ACPI_TYPE_LOCAL_INDEX_FIELD:
+
+ /* For fields, copy the source data to the target field. */
+
+ status = acpi_ex_write_data_to_field(source_desc, target_desc,
+ &walk_state->result_obj);
+ break;
+
+ case ACPI_TYPE_INTEGER:
+ case ACPI_TYPE_STRING:
+ case ACPI_TYPE_BUFFER:
+
+ /*
+ * These target types are all of type Integer/String/Buffer, and
+ * therefore support implicit conversion before the store.
+ *
+ * Copy and/or convert the source object to a new target object
+ */
+ status =
+ acpi_ex_store_object_to_object(source_desc, target_desc,
+ &new_desc, walk_state);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ if (new_desc != target_desc) {
+ /*
+ * Store the new new_desc as the new value of the Name, and set
+ * the Name's type to that of the value being stored in it.
+ * source_desc reference count is incremented by attach_object.
+ *
+ * Note: This may change the type of the node if an explicit store
+ * has been performed such that the node/object type has been
+ * changed.
+ */
+ status =
+ acpi_ns_attach_object(node, new_desc,
+ new_desc->common.type);
+
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+ "Store %s into %s via Convert/Attach\n",
+ acpi_ut_get_object_type_name
+ (source_desc),
+ acpi_ut_get_object_type_name
+ (new_desc)));
+ }
+ break;
+
+ default:
+
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+ "Storing %s (%p) directly into node (%p) with no implicit conversion\n",
+ acpi_ut_get_object_type_name(source_desc),
+ source_desc, node));
+
+ /* No conversions for all other types. Just attach the source object */
+
+ status = acpi_ns_attach_object(node, source_desc,
+ ACPI_GET_OBJECT_TYPE
+ (source_desc));
+ break;
+ }
+
+ return_ACPI_STATUS(status);
+}
diff --git a/drivers/acpi/executer/exstoren.c b/drivers/acpi/executer/exstoren.c
new file mode 100644
index 0000000..eef61a0
--- /dev/null
+++ b/drivers/acpi/executer/exstoren.c
@@ -0,0 +1,303 @@
+
+/******************************************************************************
+ *
+ * Module Name: exstoren - AML Interpreter object store support,
+ * Store to Node (namespace object)
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include <acpi/acinterp.h>
+#include <acpi/amlcode.h>
+
+#define _COMPONENT ACPI_EXECUTER
+ACPI_MODULE_NAME("exstoren")
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ex_resolve_object
+ *
+ * PARAMETERS: source_desc_ptr - Pointer to the source object
+ * target_type - Current type of the target
+ * walk_state - Current walk state
+ *
+ * RETURN: Status, resolved object in source_desc_ptr.
+ *
+ * DESCRIPTION: Resolve an object. If the object is a reference, dereference
+ * it and return the actual object in the source_desc_ptr.
+ *
+ ******************************************************************************/
+acpi_status
+acpi_ex_resolve_object(union acpi_operand_object **source_desc_ptr,
+ acpi_object_type target_type,
+ struct acpi_walk_state *walk_state)
+{
+ union acpi_operand_object *source_desc = *source_desc_ptr;
+ acpi_status status = AE_OK;
+
+ ACPI_FUNCTION_TRACE(ex_resolve_object);
+
+ /* Ensure we have a Target that can be stored to */
+
+ switch (target_type) {
+ case ACPI_TYPE_BUFFER_FIELD:
+ case ACPI_TYPE_LOCAL_REGION_FIELD:
+ case ACPI_TYPE_LOCAL_BANK_FIELD:
+ case ACPI_TYPE_LOCAL_INDEX_FIELD:
+ /*
+ * These cases all require only Integers or values that
+ * can be converted to Integers (Strings or Buffers)
+ */
+
+ case ACPI_TYPE_INTEGER:
+ case ACPI_TYPE_STRING:
+ case ACPI_TYPE_BUFFER:
+
+ /*
+ * Stores into a Field/Region or into a Integer/Buffer/String
+ * are all essentially the same. This case handles the
+ * "interchangeable" types Integer, String, and Buffer.
+ */
+ if (ACPI_GET_OBJECT_TYPE(source_desc) ==
+ ACPI_TYPE_LOCAL_REFERENCE) {
+
+ /* Resolve a reference object first */
+
+ status =
+ acpi_ex_resolve_to_value(source_desc_ptr,
+ walk_state);
+ if (ACPI_FAILURE(status)) {
+ break;
+ }
+ }
+
+ /* For copy_object, no further validation necessary */
+
+ if (walk_state->opcode == AML_COPY_OP) {
+ break;
+ }
+
+ /* Must have a Integer, Buffer, or String */
+
+ if ((ACPI_GET_OBJECT_TYPE(source_desc) != ACPI_TYPE_INTEGER) &&
+ (ACPI_GET_OBJECT_TYPE(source_desc) != ACPI_TYPE_BUFFER) &&
+ (ACPI_GET_OBJECT_TYPE(source_desc) != ACPI_TYPE_STRING) &&
+ !((ACPI_GET_OBJECT_TYPE(source_desc) ==
+ ACPI_TYPE_LOCAL_REFERENCE)
+ && (source_desc->reference.class ==
+ ACPI_REFCLASS_TABLE))) {
+
+ /* Conversion successful but still not a valid type */
+
+ ACPI_ERROR((AE_INFO,
+ "Cannot assign type %s to %s (must be type Int/Str/Buf)",
+ acpi_ut_get_object_type_name(source_desc),
+ acpi_ut_get_type_name(target_type)));
+ status = AE_AML_OPERAND_TYPE;
+ }
+ break;
+
+ case ACPI_TYPE_LOCAL_ALIAS:
+ case ACPI_TYPE_LOCAL_METHOD_ALIAS:
+
+ /*
+ * All aliases should have been resolved earlier, during the
+ * operand resolution phase.
+ */
+ ACPI_ERROR((AE_INFO, "Store into an unresolved Alias object"));
+ status = AE_AML_INTERNAL;
+ break;
+
+ case ACPI_TYPE_PACKAGE:
+ default:
+
+ /*
+ * All other types than Alias and the various Fields come here,
+ * including the untyped case - ACPI_TYPE_ANY.
+ */
+ break;
+ }
+
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ex_store_object_to_object
+ *
+ * PARAMETERS: source_desc - Object to store
+ * dest_desc - Object to receive a copy of the source
+ * new_desc - New object if dest_desc is obsoleted
+ * walk_state - Current walk state
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: "Store" an object to another object. This may include
+ * converting the source type to the target type (implicit
+ * conversion), and a copy of the value of the source to
+ * the target.
+ *
+ * The Assignment of an object to another (not named) object
+ * is handled here.
+ * The Source passed in will replace the current value (if any)
+ * with the input value.
+ *
+ * When storing into an object the data is converted to the
+ * target object type then stored in the object. This means
+ * that the target object type (for an initialized target) will
+ * not be changed by a store operation.
+ *
+ * This module allows destination types of Number, String,
+ * Buffer, and Package.
+ *
+ * Assumes parameters are already validated. NOTE: source_desc
+ * resolution (from a reference object) must be performed by
+ * the caller if necessary.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ex_store_object_to_object(union acpi_operand_object *source_desc,
+ union acpi_operand_object *dest_desc,
+ union acpi_operand_object **new_desc,
+ struct acpi_walk_state *walk_state)
+{
+ union acpi_operand_object *actual_src_desc;
+ acpi_status status = AE_OK;
+
+ ACPI_FUNCTION_TRACE_PTR(ex_store_object_to_object, source_desc);
+
+ actual_src_desc = source_desc;
+ if (!dest_desc) {
+ /*
+ * There is no destination object (An uninitialized node or
+ * package element), so we can simply copy the source object
+ * creating a new destination object
+ */
+ status =
+ acpi_ut_copy_iobject_to_iobject(actual_src_desc, new_desc,
+ walk_state);
+ return_ACPI_STATUS(status);
+ }
+
+ if (ACPI_GET_OBJECT_TYPE(source_desc) !=
+ ACPI_GET_OBJECT_TYPE(dest_desc)) {
+ /*
+ * The source type does not match the type of the destination.
+ * Perform the "implicit conversion" of the source to the current type
+ * of the target as per the ACPI specification.
+ *
+ * If no conversion performed, actual_src_desc = source_desc.
+ * Otherwise, actual_src_desc is a temporary object to hold the
+ * converted object.
+ */
+ status =
+ acpi_ex_convert_to_target_type(ACPI_GET_OBJECT_TYPE
+ (dest_desc), source_desc,
+ &actual_src_desc,
+ walk_state);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ if (source_desc == actual_src_desc) {
+ /*
+ * No conversion was performed. Return the source_desc as the
+ * new object.
+ */
+ *new_desc = source_desc;
+ return_ACPI_STATUS(AE_OK);
+ }
+ }
+
+ /*
+ * We now have two objects of identical types, and we can perform a
+ * copy of the *value* of the source object.
+ */
+ switch (ACPI_GET_OBJECT_TYPE(dest_desc)) {
+ case ACPI_TYPE_INTEGER:
+
+ dest_desc->integer.value = actual_src_desc->integer.value;
+
+ /* Truncate value if we are executing from a 32-bit ACPI table */
+
+ acpi_ex_truncate_for32bit_table(dest_desc);
+ break;
+
+ case ACPI_TYPE_STRING:
+
+ status =
+ acpi_ex_store_string_to_string(actual_src_desc, dest_desc);
+ break;
+
+ case ACPI_TYPE_BUFFER:
+
+ status =
+ acpi_ex_store_buffer_to_buffer(actual_src_desc, dest_desc);
+ break;
+
+ case ACPI_TYPE_PACKAGE:
+
+ status =
+ acpi_ut_copy_iobject_to_iobject(actual_src_desc, &dest_desc,
+ walk_state);
+ break;
+
+ default:
+ /*
+ * All other types come here.
+ */
+ ACPI_WARNING((AE_INFO, "Store into type %s not implemented",
+ acpi_ut_get_object_type_name(dest_desc)));
+
+ status = AE_NOT_IMPLEMENTED;
+ break;
+ }
+
+ if (actual_src_desc != source_desc) {
+
+ /* Delete the intermediate (temporary) source object */
+
+ acpi_ut_remove_reference(actual_src_desc);
+ }
+
+ *new_desc = dest_desc;
+ return_ACPI_STATUS(status);
+}
diff --git a/drivers/acpi/executer/exstorob.c b/drivers/acpi/executer/exstorob.c
new file mode 100644
index 0000000..9a75ff0
--- /dev/null
+++ b/drivers/acpi/executer/exstorob.c
@@ -0,0 +1,208 @@
+
+/******************************************************************************
+ *
+ * Module Name: exstorob - AML Interpreter object store support, store to object
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include <acpi/acinterp.h>
+
+#define _COMPONENT ACPI_EXECUTER
+ACPI_MODULE_NAME("exstorob")
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ex_store_buffer_to_buffer
+ *
+ * PARAMETERS: source_desc - Source object to copy
+ * target_desc - Destination object of the copy
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Copy a buffer object to another buffer object.
+ *
+ ******************************************************************************/
+acpi_status
+acpi_ex_store_buffer_to_buffer(union acpi_operand_object *source_desc,
+ union acpi_operand_object *target_desc)
+{
+ u32 length;
+ u8 *buffer;
+
+ ACPI_FUNCTION_TRACE_PTR(ex_store_buffer_to_buffer, source_desc);
+
+ /* We know that source_desc is a buffer by now */
+
+ buffer = ACPI_CAST_PTR(u8, source_desc->buffer.pointer);
+ length = source_desc->buffer.length;
+
+ /*
+ * If target is a buffer of length zero or is a static buffer,
+ * allocate a new buffer of the proper length
+ */
+ if ((target_desc->buffer.length == 0) ||
+ (target_desc->common.flags & AOPOBJ_STATIC_POINTER)) {
+ target_desc->buffer.pointer = ACPI_ALLOCATE(length);
+ if (!target_desc->buffer.pointer) {
+ return_ACPI_STATUS(AE_NO_MEMORY);
+ }
+
+ target_desc->buffer.length = length;
+ }
+
+ /* Copy source buffer to target buffer */
+
+ if (length <= target_desc->buffer.length) {
+
+ /* Clear existing buffer and copy in the new one */
+
+ ACPI_MEMSET(target_desc->buffer.pointer, 0,
+ target_desc->buffer.length);
+ ACPI_MEMCPY(target_desc->buffer.pointer, buffer, length);
+
+#ifdef ACPI_OBSOLETE_BEHAVIOR
+ /*
+ * NOTE: ACPI versions up to 3.0 specified that the buffer must be
+ * truncated if the string is smaller than the buffer. However, "other"
+ * implementations of ACPI never did this and thus became the defacto
+ * standard. ACPI 3.0_a changes this behavior such that the buffer
+ * is no longer truncated.
+ */
+
+ /*
+ * OBSOLETE BEHAVIOR:
+ * If the original source was a string, we must truncate the buffer,
+ * according to the ACPI spec. Integer-to-Buffer and Buffer-to-Buffer
+ * copy must not truncate the original buffer.
+ */
+ if (original_src_type == ACPI_TYPE_STRING) {
+
+ /* Set the new length of the target */
+
+ target_desc->buffer.length = length;
+ }
+#endif
+ } else {
+ /* Truncate the source, copy only what will fit */
+
+ ACPI_MEMCPY(target_desc->buffer.pointer, buffer,
+ target_desc->buffer.length);
+
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Truncating source buffer from %X to %X\n",
+ length, target_desc->buffer.length));
+ }
+
+ /* Copy flags */
+
+ target_desc->buffer.flags = source_desc->buffer.flags;
+ target_desc->common.flags &= ~AOPOBJ_STATIC_POINTER;
+ return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ex_store_string_to_string
+ *
+ * PARAMETERS: source_desc - Source object to copy
+ * target_desc - Destination object of the copy
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Copy a String object to another String object
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ex_store_string_to_string(union acpi_operand_object *source_desc,
+ union acpi_operand_object *target_desc)
+{
+ u32 length;
+ u8 *buffer;
+
+ ACPI_FUNCTION_TRACE_PTR(ex_store_string_to_string, source_desc);
+
+ /* We know that source_desc is a string by now */
+
+ buffer = ACPI_CAST_PTR(u8, source_desc->string.pointer);
+ length = source_desc->string.length;
+
+ /*
+ * Replace existing string value if it will fit and the string
+ * pointer is not a static pointer (part of an ACPI table)
+ */
+ if ((length < target_desc->string.length) &&
+ (!(target_desc->common.flags & AOPOBJ_STATIC_POINTER))) {
+ /*
+ * String will fit in existing non-static buffer.
+ * Clear old string and copy in the new one
+ */
+ ACPI_MEMSET(target_desc->string.pointer, 0,
+ (acpi_size) target_desc->string.length + 1);
+ ACPI_MEMCPY(target_desc->string.pointer, buffer, length);
+ } else {
+ /*
+ * Free the current buffer, then allocate a new buffer
+ * large enough to hold the value
+ */
+ if (target_desc->string.pointer &&
+ (!(target_desc->common.flags & AOPOBJ_STATIC_POINTER))) {
+
+ /* Only free if not a pointer into the DSDT */
+
+ ACPI_FREE(target_desc->string.pointer);
+ }
+
+ target_desc->string.pointer = ACPI_ALLOCATE_ZEROED((acpi_size)
+ length + 1);
+ if (!target_desc->string.pointer) {
+ return_ACPI_STATUS(AE_NO_MEMORY);
+ }
+
+ target_desc->common.flags &= ~AOPOBJ_STATIC_POINTER;
+ ACPI_MEMCPY(target_desc->string.pointer, buffer, length);
+ }
+
+ /* Set the new target length */
+
+ target_desc->string.length = length;
+ return_ACPI_STATUS(AE_OK);
+}
diff --git a/drivers/acpi/executer/exsystem.c b/drivers/acpi/executer/exsystem.c
new file mode 100644
index 0000000..68990f1
--- /dev/null
+++ b/drivers/acpi/executer/exsystem.c
@@ -0,0 +1,302 @@
+
+/******************************************************************************
+ *
+ * Module Name: exsystem - Interface to OS services
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include <acpi/acinterp.h>
+
+#define _COMPONENT ACPI_EXECUTER
+ACPI_MODULE_NAME("exsystem")
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ex_system_wait_semaphore
+ *
+ * PARAMETERS: Semaphore - Semaphore to wait on
+ * Timeout - Max time to wait
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Implements a semaphore wait with a check to see if the
+ * semaphore is available immediately. If it is not, the
+ * interpreter is released before waiting.
+ *
+ ******************************************************************************/
+acpi_status acpi_ex_system_wait_semaphore(acpi_semaphore semaphore, u16 timeout)
+{
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(ex_system_wait_semaphore);
+
+ status = acpi_os_wait_semaphore(semaphore, 1, ACPI_DO_NOT_WAIT);
+ if (ACPI_SUCCESS(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ if (status == AE_TIME) {
+
+ /* We must wait, so unlock the interpreter */
+
+ acpi_ex_relinquish_interpreter();
+
+ status = acpi_os_wait_semaphore(semaphore, 1, timeout);
+
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+ "*** Thread awake after blocking, %s\n",
+ acpi_format_exception(status)));
+
+ /* Reacquire the interpreter */
+
+ acpi_ex_reacquire_interpreter();
+ }
+
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ex_system_wait_mutex
+ *
+ * PARAMETERS: Mutex - Mutex to wait on
+ * Timeout - Max time to wait
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Implements a mutex wait with a check to see if the
+ * mutex is available immediately. If it is not, the
+ * interpreter is released before waiting.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ex_system_wait_mutex(acpi_mutex mutex, u16 timeout)
+{
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(ex_system_wait_mutex);
+
+ status = acpi_os_acquire_mutex(mutex, ACPI_DO_NOT_WAIT);
+ if (ACPI_SUCCESS(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ if (status == AE_TIME) {
+
+ /* We must wait, so unlock the interpreter */
+
+ acpi_ex_relinquish_interpreter();
+
+ status = acpi_os_acquire_mutex(mutex, timeout);
+
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+ "*** Thread awake after blocking, %s\n",
+ acpi_format_exception(status)));
+
+ /* Reacquire the interpreter */
+
+ acpi_ex_reacquire_interpreter();
+ }
+
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ex_system_do_stall
+ *
+ * PARAMETERS: how_long - The amount of time to stall,
+ * in microseconds
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Suspend running thread for specified amount of time.
+ * Note: ACPI specification requires that Stall() does not
+ * relinquish the processor, and delays longer than 100 usec
+ * should use Sleep() instead. We allow stalls up to 255 usec
+ * for compatibility with other interpreters and existing BIOSs.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ex_system_do_stall(u32 how_long)
+{
+ acpi_status status = AE_OK;
+
+ ACPI_FUNCTION_ENTRY();
+
+ if (how_long > 255) { /* 255 microseconds */
+ /*
+ * Longer than 255 usec, this is an error
+ *
+ * (ACPI specifies 100 usec as max, but this gives some slack in
+ * order to support existing BIOSs)
+ */
+ ACPI_ERROR((AE_INFO, "Time parameter is too large (%d)",
+ how_long));
+ status = AE_AML_OPERAND_VALUE;
+ } else {
+ acpi_os_stall(how_long);
+ }
+
+ return (status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ex_system_do_suspend
+ *
+ * PARAMETERS: how_long - The amount of time to suspend,
+ * in milliseconds
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Suspend running thread for specified amount of time.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ex_system_do_suspend(acpi_integer how_long)
+{
+ ACPI_FUNCTION_ENTRY();
+
+ /* Since this thread will sleep, we must release the interpreter */
+
+ acpi_ex_relinquish_interpreter();
+
+ acpi_os_sleep(how_long);
+
+ /* And now we must get the interpreter again */
+
+ acpi_ex_reacquire_interpreter();
+ return (AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ex_system_signal_event
+ *
+ * PARAMETERS: obj_desc - The object descriptor for this op
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Provides an access point to perform synchronization operations
+ * within the AML.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ex_system_signal_event(union acpi_operand_object * obj_desc)
+{
+ acpi_status status = AE_OK;
+
+ ACPI_FUNCTION_TRACE(ex_system_signal_event);
+
+ if (obj_desc) {
+ status =
+ acpi_os_signal_semaphore(obj_desc->event.os_semaphore, 1);
+ }
+
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ex_system_wait_event
+ *
+ * PARAMETERS: time_desc - The 'time to delay' object descriptor
+ * obj_desc - The object descriptor for this op
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Provides an access point to perform synchronization operations
+ * within the AML. This operation is a request to wait for an
+ * event.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ex_system_wait_event(union acpi_operand_object *time_desc,
+ union acpi_operand_object *obj_desc)
+{
+ acpi_status status = AE_OK;
+
+ ACPI_FUNCTION_TRACE(ex_system_wait_event);
+
+ if (obj_desc) {
+ status =
+ acpi_ex_system_wait_semaphore(obj_desc->event.os_semaphore,
+ (u16) time_desc->integer.
+ value);
+ }
+
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ex_system_reset_event
+ *
+ * PARAMETERS: obj_desc - The object descriptor for this op
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Reset an event to a known state.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ex_system_reset_event(union acpi_operand_object *obj_desc)
+{
+ acpi_status status = AE_OK;
+ acpi_semaphore temp_semaphore;
+
+ ACPI_FUNCTION_ENTRY();
+
+ /*
+ * We are going to simply delete the existing semaphore and
+ * create a new one!
+ */
+ status =
+ acpi_os_create_semaphore(ACPI_NO_UNIT_LIMIT, 0, &temp_semaphore);
+ if (ACPI_SUCCESS(status)) {
+ (void)acpi_os_delete_semaphore(obj_desc->event.os_semaphore);
+ obj_desc->event.os_semaphore = temp_semaphore;
+ }
+
+ return (status);
+}
diff --git a/drivers/acpi/executer/exutils.c b/drivers/acpi/executer/exutils.c
new file mode 100644
index 0000000..86c0388
--- /dev/null
+++ b/drivers/acpi/executer/exutils.c
@@ -0,0 +1,420 @@
+
+/******************************************************************************
+ *
+ * Module Name: exutils - interpreter/scanner utilities
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+/*
+ * DEFINE_AML_GLOBALS is tested in amlcode.h
+ * to determine whether certain global names should be "defined" or only
+ * "declared" in the current compilation. This enhances maintainability
+ * by enabling a single header file to embody all knowledge of the names
+ * in question.
+ *
+ * Exactly one module of any executable should #define DEFINE_GLOBALS
+ * before #including the header files which use this convention. The
+ * names in question will be defined and initialized in that module,
+ * and declared as extern in all other modules which #include those
+ * header files.
+ */
+
+#define DEFINE_AML_GLOBALS
+
+#include <acpi/acpi.h>
+#include <acpi/acinterp.h>
+#include <acpi/amlcode.h>
+
+#define _COMPONENT ACPI_EXECUTER
+ACPI_MODULE_NAME("exutils")
+
+/* Local prototypes */
+static u32 acpi_ex_digits_needed(acpi_integer value, u32 base);
+
+#ifndef ACPI_NO_METHOD_EXECUTION
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ex_enter_interpreter
+ *
+ * PARAMETERS: None
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Enter the interpreter execution region. Failure to enter
+ * the interpreter region is a fatal system error. Used in
+ * conjunction with exit_interpreter.
+ *
+ ******************************************************************************/
+
+void acpi_ex_enter_interpreter(void)
+{
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(ex_enter_interpreter);
+
+ status = acpi_ut_acquire_mutex(ACPI_MTX_INTERPRETER);
+ if (ACPI_FAILURE(status)) {
+ ACPI_ERROR((AE_INFO,
+ "Could not acquire AML Interpreter mutex"));
+ }
+
+ return_VOID;
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ex_reacquire_interpreter
+ *
+ * PARAMETERS: None
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Reacquire the interpreter execution region from within the
+ * interpreter code. Failure to enter the interpreter region is a
+ * fatal system error. Used in conjuction with
+ * relinquish_interpreter
+ *
+ ******************************************************************************/
+
+void acpi_ex_reacquire_interpreter(void)
+{
+ ACPI_FUNCTION_TRACE(ex_reacquire_interpreter);
+
+ /*
+ * If the global serialized flag is set, do not release the interpreter,
+ * since it was not actually released by acpi_ex_relinquish_interpreter.
+ * This forces the interpreter to be single threaded.
+ */
+ if (!acpi_gbl_all_methods_serialized) {
+ acpi_ex_enter_interpreter();
+ }
+
+ return_VOID;
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ex_exit_interpreter
+ *
+ * PARAMETERS: None
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Exit the interpreter execution region. This is the top level
+ * routine used to exit the interpreter when all processing has
+ * been completed.
+ *
+ ******************************************************************************/
+
+void acpi_ex_exit_interpreter(void)
+{
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(ex_exit_interpreter);
+
+ status = acpi_ut_release_mutex(ACPI_MTX_INTERPRETER);
+ if (ACPI_FAILURE(status)) {
+ ACPI_ERROR((AE_INFO,
+ "Could not release AML Interpreter mutex"));
+ }
+
+ return_VOID;
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ex_relinquish_interpreter
+ *
+ * PARAMETERS: None
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Exit the interpreter execution region, from within the
+ * interpreter - before attempting an operation that will possibly
+ * block the running thread.
+ *
+ * Cases where the interpreter is unlocked internally
+ * 1) Method to be blocked on a Sleep() AML opcode
+ * 2) Method to be blocked on an Acquire() AML opcode
+ * 3) Method to be blocked on a Wait() AML opcode
+ * 4) Method to be blocked to acquire the global lock
+ * 5) Method to be blocked waiting to execute a serialized control method
+ * that is currently executing
+ * 6) About to invoke a user-installed opregion handler
+ *
+ ******************************************************************************/
+
+void acpi_ex_relinquish_interpreter(void)
+{
+ ACPI_FUNCTION_TRACE(ex_relinquish_interpreter);
+
+ /*
+ * If the global serialized flag is set, do not release the interpreter.
+ * This forces the interpreter to be single threaded.
+ */
+ if (!acpi_gbl_all_methods_serialized) {
+ acpi_ex_exit_interpreter();
+ }
+
+ return_VOID;
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ex_truncate_for32bit_table
+ *
+ * PARAMETERS: obj_desc - Object to be truncated
+ *
+ * RETURN: none
+ *
+ * DESCRIPTION: Truncate an ACPI Integer to 32 bits if the execution mode is
+ * 32-bit, as determined by the revision of the DSDT.
+ *
+ ******************************************************************************/
+
+void acpi_ex_truncate_for32bit_table(union acpi_operand_object *obj_desc)
+{
+
+ ACPI_FUNCTION_ENTRY();
+
+ /*
+ * Object must be a valid number and we must be executing
+ * a control method. NS node could be there for AML_INT_NAMEPATH_OP.
+ */
+ if ((!obj_desc) ||
+ (ACPI_GET_DESCRIPTOR_TYPE(obj_desc) != ACPI_DESC_TYPE_OPERAND) ||
+ (ACPI_GET_OBJECT_TYPE(obj_desc) != ACPI_TYPE_INTEGER)) {
+ return;
+ }
+
+ if (acpi_gbl_integer_byte_width == 4) {
+ /*
+ * We are running a method that exists in a 32-bit ACPI table.
+ * Truncate the value to 32 bits by zeroing out the upper 32-bit field
+ */
+ obj_desc->integer.value &= (acpi_integer) ACPI_UINT32_MAX;
+ }
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ex_acquire_global_lock
+ *
+ * PARAMETERS: field_flags - Flags with Lock rule:
+ * always_lock or never_lock
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Obtain the ACPI hardware Global Lock, only if the field
+ * flags specifiy that it is to be obtained before field access.
+ *
+ ******************************************************************************/
+
+void acpi_ex_acquire_global_lock(u32 field_flags)
+{
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(ex_acquire_global_lock);
+
+ /* Only use the lock if the always_lock bit is set */
+
+ if (!(field_flags & AML_FIELD_LOCK_RULE_MASK)) {
+ return_VOID;
+ }
+
+ /* Attempt to get the global lock, wait forever */
+
+ status = acpi_ex_acquire_mutex_object(ACPI_WAIT_FOREVER,
+ acpi_gbl_global_lock_mutex,
+ acpi_os_get_thread_id());
+
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status,
+ "Could not acquire Global Lock"));
+ }
+
+ return_VOID;
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ex_release_global_lock
+ *
+ * PARAMETERS: field_flags - Flags with Lock rule:
+ * always_lock or never_lock
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Release the ACPI hardware Global Lock
+ *
+ ******************************************************************************/
+
+void acpi_ex_release_global_lock(u32 field_flags)
+{
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(ex_release_global_lock);
+
+ /* Only use the lock if the always_lock bit is set */
+
+ if (!(field_flags & AML_FIELD_LOCK_RULE_MASK)) {
+ return_VOID;
+ }
+
+ /* Release the global lock */
+
+ status = acpi_ex_release_mutex_object(acpi_gbl_global_lock_mutex);
+ if (ACPI_FAILURE(status)) {
+
+ /* Report the error, but there isn't much else we can do */
+
+ ACPI_EXCEPTION((AE_INFO, status,
+ "Could not release Global Lock"));
+ }
+
+ return_VOID;
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ex_digits_needed
+ *
+ * PARAMETERS: Value - Value to be represented
+ * Base - Base of representation
+ *
+ * RETURN: The number of digits.
+ *
+ * DESCRIPTION: Calculate the number of digits needed to represent the Value
+ * in the given Base (Radix)
+ *
+ ******************************************************************************/
+
+static u32 acpi_ex_digits_needed(acpi_integer value, u32 base)
+{
+ u32 num_digits;
+ acpi_integer current_value;
+
+ ACPI_FUNCTION_TRACE(ex_digits_needed);
+
+ /* acpi_integer is unsigned, so we don't worry about a '-' prefix */
+
+ if (value == 0) {
+ return_UINT32(1);
+ }
+
+ current_value = value;
+ num_digits = 0;
+
+ /* Count the digits in the requested base */
+
+ while (current_value) {
+ (void)acpi_ut_short_divide(current_value, base, &current_value,
+ NULL);
+ num_digits++;
+ }
+
+ return_UINT32(num_digits);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ex_eisa_id_to_string
+ *
+ * PARAMETERS: numeric_id - EISA ID to be converted
+ * out_string - Where to put the converted string (8 bytes)
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Convert a numeric EISA ID to string representation
+ *
+ ******************************************************************************/
+
+void acpi_ex_eisa_id_to_string(u32 numeric_id, char *out_string)
+{
+ u32 eisa_id;
+
+ ACPI_FUNCTION_ENTRY();
+
+ /* Swap ID to big-endian to get contiguous bits */
+
+ eisa_id = acpi_ut_dword_byte_swap(numeric_id);
+
+ out_string[0] = (char)('@' + (((unsigned long)eisa_id >> 26) & 0x1f));
+ out_string[1] = (char)('@' + ((eisa_id >> 21) & 0x1f));
+ out_string[2] = (char)('@' + ((eisa_id >> 16) & 0x1f));
+ out_string[3] = acpi_ut_hex_to_ascii_char((acpi_integer) eisa_id, 12);
+ out_string[4] = acpi_ut_hex_to_ascii_char((acpi_integer) eisa_id, 8);
+ out_string[5] = acpi_ut_hex_to_ascii_char((acpi_integer) eisa_id, 4);
+ out_string[6] = acpi_ut_hex_to_ascii_char((acpi_integer) eisa_id, 0);
+ out_string[7] = 0;
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ex_unsigned_integer_to_string
+ *
+ * PARAMETERS: Value - Value to be converted
+ * out_string - Where to put the converted string (8 bytes)
+ *
+ * RETURN: None, string
+ *
+ * DESCRIPTION: Convert a number to string representation. Assumes string
+ * buffer is large enough to hold the string.
+ *
+ ******************************************************************************/
+
+void acpi_ex_unsigned_integer_to_string(acpi_integer value, char *out_string)
+{
+ u32 count;
+ u32 digits_needed;
+ u32 remainder;
+
+ ACPI_FUNCTION_ENTRY();
+
+ digits_needed = acpi_ex_digits_needed(value, 10);
+ out_string[digits_needed] = 0;
+
+ for (count = digits_needed; count > 0; count--) {
+ (void)acpi_ut_short_divide(value, 10, &value, &remainder);
+ out_string[count - 1] = (char)('0' + remainder);
+ }
+}
+
+#endif
diff --git a/drivers/acpi/fan.c b/drivers/acpi/fan.c
new file mode 100644
index 0000000..eaaee16
--- /dev/null
+++ b/drivers/acpi/fan.c
@@ -0,0 +1,373 @@
+/*
+ * acpi_fan.c - ACPI Fan Driver ($Revision: 29 $)
+ *
+ * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
+ * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <asm/uaccess.h>
+#include <linux/thermal.h>
+#include <acpi/acpi_bus.h>
+#include <acpi/acpi_drivers.h>
+
+#define ACPI_FAN_CLASS "fan"
+#define ACPI_FAN_FILE_STATE "state"
+
+#define _COMPONENT ACPI_FAN_COMPONENT
+ACPI_MODULE_NAME("fan");
+
+MODULE_AUTHOR("Paul Diefenbaugh");
+MODULE_DESCRIPTION("ACPI Fan Driver");
+MODULE_LICENSE("GPL");
+
+static int acpi_fan_add(struct acpi_device *device);
+static int acpi_fan_remove(struct acpi_device *device, int type);
+static int acpi_fan_suspend(struct acpi_device *device, pm_message_t state);
+static int acpi_fan_resume(struct acpi_device *device);
+
+static const struct acpi_device_id fan_device_ids[] = {
+ {"PNP0C0B", 0},
+ {"", 0},
+};
+MODULE_DEVICE_TABLE(acpi, fan_device_ids);
+
+static struct acpi_driver acpi_fan_driver = {
+ .name = "fan",
+ .class = ACPI_FAN_CLASS,
+ .ids = fan_device_ids,
+ .ops = {
+ .add = acpi_fan_add,
+ .remove = acpi_fan_remove,
+ .suspend = acpi_fan_suspend,
+ .resume = acpi_fan_resume,
+ },
+};
+
+/* thermal cooling device callbacks */
+static int fan_get_max_state(struct thermal_cooling_device *cdev, char *buf)
+{
+ /* ACPI fan device only support two states: ON/OFF */
+ return sprintf(buf, "1\n");
+}
+
+static int fan_get_cur_state(struct thermal_cooling_device *cdev, char *buf)
+{
+ struct acpi_device *device = cdev->devdata;
+ int state;
+ int result;
+
+ if (!device)
+ return -EINVAL;
+
+ result = acpi_bus_get_power(device->handle, &state);
+ if (result)
+ return result;
+
+ return sprintf(buf, "%s\n", state == ACPI_STATE_D3 ? "0" :
+ (state == ACPI_STATE_D0 ? "1" : "unknown"));
+}
+
+static int
+fan_set_cur_state(struct thermal_cooling_device *cdev, unsigned int state)
+{
+ struct acpi_device *device = cdev->devdata;
+ int result;
+
+ if (!device || (state != 0 && state != 1))
+ return -EINVAL;
+
+ result = acpi_bus_set_power(device->handle,
+ state ? ACPI_STATE_D0 : ACPI_STATE_D3);
+
+ return result;
+}
+
+static struct thermal_cooling_device_ops fan_cooling_ops = {
+ .get_max_state = fan_get_max_state,
+ .get_cur_state = fan_get_cur_state,
+ .set_cur_state = fan_set_cur_state,
+};
+
+/* --------------------------------------------------------------------------
+ FS Interface (/proc)
+ -------------------------------------------------------------------------- */
+#ifdef CONFIG_ACPI_PROCFS
+
+static struct proc_dir_entry *acpi_fan_dir;
+
+static int acpi_fan_read_state(struct seq_file *seq, void *offset)
+{
+ struct acpi_device *device = seq->private;
+ int state = 0;
+
+
+ if (device) {
+ if (acpi_bus_get_power(device->handle, &state))
+ seq_printf(seq, "status: ERROR\n");
+ else
+ seq_printf(seq, "status: %s\n",
+ !state ? "on" : "off");
+ }
+ return 0;
+}
+
+static int acpi_fan_state_open_fs(struct inode *inode, struct file *file)
+{
+ return single_open(file, acpi_fan_read_state, PDE(inode)->data);
+}
+
+static ssize_t
+acpi_fan_write_state(struct file *file, const char __user * buffer,
+ size_t count, loff_t * ppos)
+{
+ int result = 0;
+ struct seq_file *m = file->private_data;
+ struct acpi_device *device = m->private;
+ char state_string[3] = { '\0' };
+
+ if (count > sizeof(state_string) - 1)
+ return -EINVAL;
+
+ if (copy_from_user(state_string, buffer, count))
+ return -EFAULT;
+
+ state_string[count] = '\0';
+ if ((state_string[0] < '0') || (state_string[0] > '3'))
+ return -EINVAL;
+ if (state_string[1] == '\n')
+ state_string[1] = '\0';
+ if (state_string[1] != '\0')
+ return -EINVAL;
+
+ result = acpi_bus_set_power(device->handle,
+ simple_strtoul(state_string, NULL, 0));
+ if (result)
+ return result;
+
+ return count;
+}
+
+static const struct file_operations acpi_fan_state_ops = {
+ .open = acpi_fan_state_open_fs,
+ .read = seq_read,
+ .write = acpi_fan_write_state,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static int acpi_fan_add_fs(struct acpi_device *device)
+{
+ struct proc_dir_entry *entry = NULL;
+
+
+ if (!device)
+ return -EINVAL;
+
+ if (!acpi_device_dir(device)) {
+ acpi_device_dir(device) = proc_mkdir(acpi_device_bid(device),
+ acpi_fan_dir);
+ if (!acpi_device_dir(device))
+ return -ENODEV;
+ acpi_device_dir(device)->owner = THIS_MODULE;
+ }
+
+ /* 'status' [R/W] */
+ entry = proc_create_data(ACPI_FAN_FILE_STATE,
+ S_IFREG | S_IRUGO | S_IWUSR,
+ acpi_device_dir(device),
+ &acpi_fan_state_ops,
+ device);
+ if (!entry)
+ return -ENODEV;
+ return 0;
+}
+
+static int acpi_fan_remove_fs(struct acpi_device *device)
+{
+
+ if (acpi_device_dir(device)) {
+ remove_proc_entry(ACPI_FAN_FILE_STATE, acpi_device_dir(device));
+ remove_proc_entry(acpi_device_bid(device), acpi_fan_dir);
+ acpi_device_dir(device) = NULL;
+ }
+
+ return 0;
+}
+#else
+static int acpi_fan_add_fs(struct acpi_device *device)
+{
+ return 0;
+}
+
+static int acpi_fan_remove_fs(struct acpi_device *device)
+{
+ return 0;
+}
+#endif
+/* --------------------------------------------------------------------------
+ Driver Interface
+ -------------------------------------------------------------------------- */
+
+static int acpi_fan_add(struct acpi_device *device)
+{
+ int result = 0;
+ int state = 0;
+ struct thermal_cooling_device *cdev;
+
+ if (!device)
+ return -EINVAL;
+
+ strcpy(acpi_device_name(device), "Fan");
+ strcpy(acpi_device_class(device), ACPI_FAN_CLASS);
+
+ result = acpi_bus_get_power(device->handle, &state);
+ if (result) {
+ printk(KERN_ERR PREFIX "Reading power state\n");
+ goto end;
+ }
+
+ device->flags.force_power_state = 1;
+ acpi_bus_set_power(device->handle, state);
+ device->flags.force_power_state = 0;
+
+ cdev = thermal_cooling_device_register("Fan", device,
+ &fan_cooling_ops);
+ if (IS_ERR(cdev)) {
+ result = PTR_ERR(cdev);
+ goto end;
+ }
+
+ dev_info(&device->dev, "registered as cooling_device%d\n", cdev->id);
+
+ device->driver_data = cdev;
+ result = sysfs_create_link(&device->dev.kobj,
+ &cdev->device.kobj,
+ "thermal_cooling");
+ if (result)
+ dev_err(&device->dev, "Failed to create sysfs link "
+ "'thermal_cooling'\n");
+
+ result = sysfs_create_link(&cdev->device.kobj,
+ &device->dev.kobj,
+ "device");
+ if (result)
+ dev_err(&device->dev, "Failed to create sysfs link "
+ "'device'\n");
+
+ result = acpi_fan_add_fs(device);
+ if (result)
+ goto end;
+
+ printk(KERN_INFO PREFIX "%s [%s] (%s)\n",
+ acpi_device_name(device), acpi_device_bid(device),
+ !device->power.state ? "on" : "off");
+
+ end:
+ return result;
+}
+
+static int acpi_fan_remove(struct acpi_device *device, int type)
+{
+ struct thermal_cooling_device *cdev = acpi_driver_data(device);
+
+ if (!device || !cdev)
+ return -EINVAL;
+
+ acpi_fan_remove_fs(device);
+ sysfs_remove_link(&device->dev.kobj, "thermal_cooling");
+ sysfs_remove_link(&cdev->device.kobj, "device");
+ thermal_cooling_device_unregister(cdev);
+
+ return 0;
+}
+
+static int acpi_fan_suspend(struct acpi_device *device, pm_message_t state)
+{
+ if (!device)
+ return -EINVAL;
+
+ acpi_bus_set_power(device->handle, ACPI_STATE_D0);
+
+ return AE_OK;
+}
+
+static int acpi_fan_resume(struct acpi_device *device)
+{
+ int result = 0;
+ int power_state = 0;
+
+ if (!device)
+ return -EINVAL;
+
+ result = acpi_bus_get_power(device->handle, &power_state);
+ if (result) {
+ printk(KERN_ERR PREFIX
+ "Error reading fan power state\n");
+ return result;
+ }
+
+ device->flags.force_power_state = 1;
+ acpi_bus_set_power(device->handle, power_state);
+ device->flags.force_power_state = 0;
+
+ return result;
+}
+
+static int __init acpi_fan_init(void)
+{
+ int result = 0;
+
+
+#ifdef CONFIG_ACPI_PROCFS
+ acpi_fan_dir = proc_mkdir(ACPI_FAN_CLASS, acpi_root_dir);
+ if (!acpi_fan_dir)
+ return -ENODEV;
+ acpi_fan_dir->owner = THIS_MODULE;
+#endif
+
+ result = acpi_bus_register_driver(&acpi_fan_driver);
+ if (result < 0) {
+ remove_proc_entry(ACPI_FAN_CLASS, acpi_root_dir);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static void __exit acpi_fan_exit(void)
+{
+
+ acpi_bus_unregister_driver(&acpi_fan_driver);
+
+ remove_proc_entry(ACPI_FAN_CLASS, acpi_root_dir);
+
+ return;
+}
+
+module_init(acpi_fan_init);
+module_exit(acpi_fan_exit);
diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c
new file mode 100644
index 0000000..adec3d1
--- /dev/null
+++ b/drivers/acpi/glue.c
@@ -0,0 +1,302 @@
+/*
+ * Link physical devices with ACPI devices support
+ *
+ * Copyright (c) 2005 David Shaohua Li <shaohua.li@intel.com>
+ * Copyright (c) 2005 Intel Corp.
+ *
+ * This file is released under the GPLv2.
+ */
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/device.h>
+#include <linux/rwsem.h>
+#include <linux/acpi.h>
+
+#define ACPI_GLUE_DEBUG 0
+#if ACPI_GLUE_DEBUG
+#define DBG(x...) printk(PREFIX x)
+#else
+#define DBG(x...) do { } while(0)
+#endif
+static LIST_HEAD(bus_type_list);
+static DECLARE_RWSEM(bus_type_sem);
+
+int register_acpi_bus_type(struct acpi_bus_type *type)
+{
+ if (acpi_disabled)
+ return -ENODEV;
+ if (type && type->bus && type->find_device) {
+ down_write(&bus_type_sem);
+ list_add_tail(&type->list, &bus_type_list);
+ up_write(&bus_type_sem);
+ printk(KERN_INFO PREFIX "bus type %s registered\n",
+ type->bus->name);
+ return 0;
+ }
+ return -ENODEV;
+}
+
+int unregister_acpi_bus_type(struct acpi_bus_type *type)
+{
+ if (acpi_disabled)
+ return 0;
+ if (type) {
+ down_write(&bus_type_sem);
+ list_del_init(&type->list);
+ up_write(&bus_type_sem);
+ printk(KERN_INFO PREFIX "ACPI bus type %s unregistered\n",
+ type->bus->name);
+ return 0;
+ }
+ return -ENODEV;
+}
+
+static struct acpi_bus_type *acpi_get_bus_type(struct bus_type *type)
+{
+ struct acpi_bus_type *tmp, *ret = NULL;
+
+ down_read(&bus_type_sem);
+ list_for_each_entry(tmp, &bus_type_list, list) {
+ if (tmp->bus == type) {
+ ret = tmp;
+ break;
+ }
+ }
+ up_read(&bus_type_sem);
+ return ret;
+}
+
+static int acpi_find_bridge_device(struct device *dev, acpi_handle * handle)
+{
+ struct acpi_bus_type *tmp;
+ int ret = -ENODEV;
+
+ down_read(&bus_type_sem);
+ list_for_each_entry(tmp, &bus_type_list, list) {
+ if (tmp->find_bridge && !tmp->find_bridge(dev, handle)) {
+ ret = 0;
+ break;
+ }
+ }
+ up_read(&bus_type_sem);
+ return ret;
+}
+
+/* Get device's handler per its address under its parent */
+struct acpi_find_child {
+ acpi_handle handle;
+ acpi_integer address;
+};
+
+static acpi_status
+do_acpi_find_child(acpi_handle handle, u32 lvl, void *context, void **rv)
+{
+ acpi_status status;
+ struct acpi_device_info *info;
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ struct acpi_find_child *find = context;
+
+ status = acpi_get_object_info(handle, &buffer);
+ if (ACPI_SUCCESS(status)) {
+ info = buffer.pointer;
+ if (info->address == find->address)
+ find->handle = handle;
+ kfree(buffer.pointer);
+ }
+ return AE_OK;
+}
+
+acpi_handle acpi_get_child(acpi_handle parent, acpi_integer address)
+{
+ struct acpi_find_child find = { NULL, address };
+
+ if (!parent)
+ return NULL;
+ acpi_walk_namespace(ACPI_TYPE_DEVICE, parent,
+ 1, do_acpi_find_child, &find, NULL);
+ return find.handle;
+}
+
+EXPORT_SYMBOL(acpi_get_child);
+
+/* Link ACPI devices with physical devices */
+static void acpi_glue_data_handler(acpi_handle handle,
+ u32 function, void *context)
+{
+ /* we provide an empty handler */
+}
+
+/* Note: a success call will increase reference count by one */
+struct device *acpi_get_physical_device(acpi_handle handle)
+{
+ acpi_status status;
+ struct device *dev;
+
+ status = acpi_get_data(handle, acpi_glue_data_handler, (void **)&dev);
+ if (ACPI_SUCCESS(status))
+ return get_device(dev);
+ return NULL;
+}
+
+EXPORT_SYMBOL(acpi_get_physical_device);
+
+/* ToDo: When a PCI bridge is found, return the PCI device behind the bridge
+ * This should work in general, but did not on a Lenovo T61 for the
+ * graphics card. But this must be fixed when the PCI device is
+ * bound and the kernel device struct is attached to the acpi device
+ * Note: A success call will increase reference count by one
+ * Do call put_device(dev) on the returned device then
+ */
+struct device *acpi_get_physical_pci_device(acpi_handle handle)
+{
+ struct device *dev;
+ long long device_id;
+ acpi_status status;
+
+ status =
+ acpi_evaluate_integer(handle, "_ADR", NULL, &device_id);
+
+ if (ACPI_FAILURE(status))
+ return NULL;
+
+ /* We need to attempt to determine whether the _ADR refers to a
+ PCI device or not. There's no terribly good way to do this,
+ so the best we can hope for is to assume that there'll never
+ be a device in the host bridge */
+ if (device_id >= 0x10000) {
+ /* It looks like a PCI device. Does it exist? */
+ dev = acpi_get_physical_device(handle);
+ } else {
+ /* It doesn't look like a PCI device. Does its parent
+ exist? */
+ acpi_handle phandle;
+ if (acpi_get_parent(handle, &phandle))
+ return NULL;
+ dev = acpi_get_physical_device(phandle);
+ }
+ if (!dev)
+ return NULL;
+ return dev;
+}
+EXPORT_SYMBOL(acpi_get_physical_pci_device);
+
+static int acpi_bind_one(struct device *dev, acpi_handle handle)
+{
+ struct acpi_device *acpi_dev;
+ acpi_status status;
+
+ if (dev->archdata.acpi_handle) {
+ dev_warn(dev, "Drivers changed 'acpi_handle'\n");
+ return -EINVAL;
+ }
+ get_device(dev);
+ status = acpi_attach_data(handle, acpi_glue_data_handler, dev);
+ if (ACPI_FAILURE(status)) {
+ put_device(dev);
+ return -EINVAL;
+ }
+ dev->archdata.acpi_handle = handle;
+
+ status = acpi_bus_get_device(handle, &acpi_dev);
+ if (!ACPI_FAILURE(status)) {
+ int ret;
+
+ ret = sysfs_create_link(&dev->kobj, &acpi_dev->dev.kobj,
+ "firmware_node");
+ ret = sysfs_create_link(&acpi_dev->dev.kobj, &dev->kobj,
+ "physical_node");
+ if (acpi_dev->wakeup.flags.valid) {
+ device_set_wakeup_capable(dev, true);
+ device_set_wakeup_enable(dev,
+ acpi_dev->wakeup.state.enabled);
+ }
+ }
+
+ return 0;
+}
+
+static int acpi_unbind_one(struct device *dev)
+{
+ if (!dev->archdata.acpi_handle)
+ return 0;
+ if (dev == acpi_get_physical_device(dev->archdata.acpi_handle)) {
+ struct acpi_device *acpi_dev;
+
+ /* acpi_get_physical_device increase refcnt by one */
+ put_device(dev);
+
+ if (!acpi_bus_get_device(dev->archdata.acpi_handle,
+ &acpi_dev)) {
+ sysfs_remove_link(&dev->kobj, "firmware_node");
+ sysfs_remove_link(&acpi_dev->dev.kobj, "physical_node");
+ }
+
+ acpi_detach_data(dev->archdata.acpi_handle,
+ acpi_glue_data_handler);
+ dev->archdata.acpi_handle = NULL;
+ /* acpi_bind_one increase refcnt by one */
+ put_device(dev);
+ } else {
+ dev_err(dev, "Oops, 'acpi_handle' corrupt\n");
+ }
+ return 0;
+}
+
+static int acpi_platform_notify(struct device *dev)
+{
+ struct acpi_bus_type *type;
+ acpi_handle handle;
+ int ret = -EINVAL;
+
+ if (!dev->bus || !dev->parent) {
+ /* bridge devices genernally haven't bus or parent */
+ ret = acpi_find_bridge_device(dev, &handle);
+ goto end;
+ }
+ type = acpi_get_bus_type(dev->bus);
+ if (!type) {
+ DBG("No ACPI bus support for %s\n", dev->bus_id);
+ ret = -EINVAL;
+ goto end;
+ }
+ if ((ret = type->find_device(dev, &handle)) != 0)
+ DBG("Can't get handler for %s\n", dev->bus_id);
+ end:
+ if (!ret)
+ acpi_bind_one(dev, handle);
+
+#if ACPI_GLUE_DEBUG
+ if (!ret) {
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+
+ acpi_get_name(dev->archdata.acpi_handle,
+ ACPI_FULL_PATHNAME, &buffer);
+ DBG("Device %s -> %s\n", dev->bus_id, (char *)buffer.pointer);
+ kfree(buffer.pointer);
+ } else
+ DBG("Device %s -> No ACPI support\n", dev->bus_id);
+#endif
+
+ return ret;
+}
+
+static int acpi_platform_notify_remove(struct device *dev)
+{
+ acpi_unbind_one(dev);
+ return 0;
+}
+
+static int __init init_acpi_device_notify(void)
+{
+ if (acpi_disabled)
+ return 0;
+ if (platform_notify || platform_notify_remove) {
+ printk(KERN_ERR PREFIX "Can't use platform_notify\n");
+ return 0;
+ }
+ platform_notify = acpi_platform_notify;
+ platform_notify_remove = acpi_platform_notify_remove;
+ return 0;
+}
+
+arch_initcall(init_acpi_device_notify);
diff --git a/drivers/acpi/hardware/Makefile b/drivers/acpi/hardware/Makefile
new file mode 100644
index 0000000..438ad37
--- /dev/null
+++ b/drivers/acpi/hardware/Makefile
@@ -0,0 +1,9 @@
+#
+# Makefile for all Linux ACPI interpreter subdirectories
+#
+
+obj-y := hwacpi.o hwgpe.o hwregs.o hwsleep.o
+
+obj-$(ACPI_FUTURE_USAGE) += hwtimer.o
+
+EXTRA_CFLAGS += $(ACPI_CFLAGS)
diff --git a/drivers/acpi/hardware/hwacpi.c b/drivers/acpi/hardware/hwacpi.c
new file mode 100644
index 0000000..816894e
--- /dev/null
+++ b/drivers/acpi/hardware/hwacpi.c
@@ -0,0 +1,184 @@
+
+/******************************************************************************
+ *
+ * Module Name: hwacpi - ACPI Hardware Initialization/Mode Interface
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+
+#define _COMPONENT ACPI_HARDWARE
+ACPI_MODULE_NAME("hwacpi")
+
+/******************************************************************************
+ *
+ * FUNCTION: acpi_hw_set_mode
+ *
+ * PARAMETERS: Mode - SYS_MODE_ACPI or SYS_MODE_LEGACY
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Transitions the system into the requested mode.
+ *
+ ******************************************************************************/
+acpi_status acpi_hw_set_mode(u32 mode)
+{
+
+ acpi_status status;
+ u32 retry;
+
+ ACPI_FUNCTION_TRACE(hw_set_mode);
+
+ /*
+ * ACPI 2.0 clarified that if SMI_CMD in FADT is zero,
+ * system does not support mode transition.
+ */
+ if (!acpi_gbl_FADT.smi_command) {
+ ACPI_ERROR((AE_INFO,
+ "No SMI_CMD in FADT, mode transition failed"));
+ return_ACPI_STATUS(AE_NO_HARDWARE_RESPONSE);
+ }
+
+ /*
+ * ACPI 2.0 clarified the meaning of ACPI_ENABLE and ACPI_DISABLE
+ * in FADT: If it is zero, enabling or disabling is not supported.
+ * As old systems may have used zero for mode transition,
+ * we make sure both the numbers are zero to determine these
+ * transitions are not supported.
+ */
+ if (!acpi_gbl_FADT.acpi_enable && !acpi_gbl_FADT.acpi_disable) {
+ ACPI_ERROR((AE_INFO,
+ "No ACPI mode transition supported in this system (enable/disable both zero)"));
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ switch (mode) {
+ case ACPI_SYS_MODE_ACPI:
+
+ /* BIOS should have disabled ALL fixed and GP events */
+
+ status = acpi_os_write_port(acpi_gbl_FADT.smi_command,
+ (u32) acpi_gbl_FADT.acpi_enable, 8);
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Attempting to enable ACPI mode\n"));
+ break;
+
+ case ACPI_SYS_MODE_LEGACY:
+
+ /*
+ * BIOS should clear all fixed status bits and restore fixed event
+ * enable bits to default
+ */
+ status = acpi_os_write_port(acpi_gbl_FADT.smi_command,
+ (u32) acpi_gbl_FADT.acpi_disable,
+ 8);
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Attempting to enable Legacy (non-ACPI) mode\n"));
+ break;
+
+ default:
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
+
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status,
+ "Could not write ACPI mode change"));
+ return_ACPI_STATUS(status);
+ }
+
+ /*
+ * Some hardware takes a LONG time to switch modes. Give them 3 sec to
+ * do so, but allow faster systems to proceed more quickly.
+ */
+ retry = 3000;
+ while (retry) {
+ if (acpi_hw_get_mode() == mode) {
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Mode %X successfully enabled\n",
+ mode));
+ return_ACPI_STATUS(AE_OK);
+ }
+ acpi_os_stall(1000);
+ retry--;
+ }
+
+ ACPI_ERROR((AE_INFO, "Hardware did not change modes"));
+ return_ACPI_STATUS(AE_NO_HARDWARE_RESPONSE);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_hw_get_mode
+ *
+ * PARAMETERS: none
+ *
+ * RETURN: SYS_MODE_ACPI or SYS_MODE_LEGACY
+ *
+ * DESCRIPTION: Return current operating state of system. Determined by
+ * querying the SCI_EN bit.
+ *
+ ******************************************************************************/
+
+u32 acpi_hw_get_mode(void)
+{
+ acpi_status status;
+ u32 value;
+
+ ACPI_FUNCTION_TRACE(hw_get_mode);
+
+ /*
+ * ACPI 2.0 clarified that if SMI_CMD in FADT is zero,
+ * system does not support mode transition.
+ */
+ if (!acpi_gbl_FADT.smi_command) {
+ return_UINT32(ACPI_SYS_MODE_ACPI);
+ }
+
+ status = acpi_get_register(ACPI_BITREG_SCI_ENABLE, &value);
+ if (ACPI_FAILURE(status)) {
+ return_UINT32(ACPI_SYS_MODE_LEGACY);
+ }
+
+ if (value) {
+ return_UINT32(ACPI_SYS_MODE_ACPI);
+ } else {
+ return_UINT32(ACPI_SYS_MODE_LEGACY);
+ }
+}
diff --git a/drivers/acpi/hardware/hwgpe.c b/drivers/acpi/hardware/hwgpe.c
new file mode 100644
index 0000000..0b80db9
--- /dev/null
+++ b/drivers/acpi/hardware/hwgpe.c
@@ -0,0 +1,477 @@
+
+/******************************************************************************
+ *
+ * Module Name: hwgpe - Low level GPE enable/disable/clear functions
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include <acpi/acevents.h>
+
+#define _COMPONENT ACPI_HARDWARE
+ACPI_MODULE_NAME("hwgpe")
+
+/* Local prototypes */
+static acpi_status
+acpi_hw_enable_wakeup_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
+ struct acpi_gpe_block_info *gpe_block);
+
+/******************************************************************************
+ *
+ * FUNCTION: acpi_hw_low_disable_gpe
+ *
+ * PARAMETERS: gpe_event_info - Info block for the GPE to be disabled
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Disable a single GPE in the enable register.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_hw_low_disable_gpe(struct acpi_gpe_event_info *gpe_event_info)
+{
+ struct acpi_gpe_register_info *gpe_register_info;
+ acpi_status status;
+ u32 enable_mask;
+
+ /* Get the info block for the entire GPE register */
+
+ gpe_register_info = gpe_event_info->register_info;
+ if (!gpe_register_info) {
+ return (AE_NOT_EXIST);
+ }
+
+ /* Get current value of the enable register that contains this GPE */
+
+ status = acpi_hw_low_level_read(ACPI_GPE_REGISTER_WIDTH, &enable_mask,
+ &gpe_register_info->enable_address);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+
+ /* Clear just the bit that corresponds to this GPE */
+
+ ACPI_CLEAR_BIT(enable_mask,
+ ((u32) 1 <<
+ (gpe_event_info->gpe_number -
+ gpe_register_info->base_gpe_number)));
+
+ /* Write the updated enable mask */
+
+ status = acpi_hw_low_level_write(ACPI_GPE_REGISTER_WIDTH, enable_mask,
+ &gpe_register_info->enable_address);
+
+ return (status);
+}
+
+/******************************************************************************
+ *
+ * FUNCTION: acpi_hw_write_gpe_enable_reg
+ *
+ * PARAMETERS: gpe_event_info - Info block for the GPE to be enabled
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Write a GPE enable register. Note: The bit for this GPE must
+ * already be cleared or set in the parent register
+ * enable_for_run mask.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_hw_write_gpe_enable_reg(struct acpi_gpe_event_info * gpe_event_info)
+{
+ struct acpi_gpe_register_info *gpe_register_info;
+ acpi_status status;
+
+ ACPI_FUNCTION_ENTRY();
+
+ /* Get the info block for the entire GPE register */
+
+ gpe_register_info = gpe_event_info->register_info;
+ if (!gpe_register_info) {
+ return (AE_NOT_EXIST);
+ }
+
+ /* Write the entire GPE (runtime) enable register */
+
+ status = acpi_hw_low_level_write(8, gpe_register_info->enable_for_run,
+ &gpe_register_info->enable_address);
+
+ return (status);
+}
+
+/******************************************************************************
+ *
+ * FUNCTION: acpi_hw_clear_gpe
+ *
+ * PARAMETERS: gpe_event_info - Info block for the GPE to be cleared
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Clear the status bit for a single GPE.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_hw_clear_gpe(struct acpi_gpe_event_info * gpe_event_info)
+{
+ acpi_status status;
+ u8 register_bit;
+
+ ACPI_FUNCTION_ENTRY();
+
+ register_bit = (u8)
+ (1 <<
+ (gpe_event_info->gpe_number -
+ gpe_event_info->register_info->base_gpe_number));
+
+ /*
+ * Write a one to the appropriate bit in the status register to
+ * clear this GPE.
+ */
+ status = acpi_hw_low_level_write(8, register_bit,
+ &gpe_event_info->register_info->
+ status_address);
+
+ return (status);
+}
+
+/******************************************************************************
+ *
+ * FUNCTION: acpi_hw_get_gpe_status
+ *
+ * PARAMETERS: gpe_event_info - Info block for the GPE to queried
+ * event_status - Where the GPE status is returned
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Return the status of a single GPE.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_hw_get_gpe_status(struct acpi_gpe_event_info * gpe_event_info,
+ acpi_event_status * event_status)
+{
+ u32 in_byte;
+ u8 register_bit;
+ struct acpi_gpe_register_info *gpe_register_info;
+ acpi_status status;
+ acpi_event_status local_event_status = 0;
+
+ ACPI_FUNCTION_ENTRY();
+
+ if (!event_status) {
+ return (AE_BAD_PARAMETER);
+ }
+
+ /* Get the info block for the entire GPE register */
+
+ gpe_register_info = gpe_event_info->register_info;
+
+ /* Get the register bitmask for this GPE */
+
+ register_bit = (u8)
+ (1 <<
+ (gpe_event_info->gpe_number -
+ gpe_event_info->register_info->base_gpe_number));
+
+ /* GPE currently enabled? (enabled for runtime?) */
+
+ if (register_bit & gpe_register_info->enable_for_run) {
+ local_event_status |= ACPI_EVENT_FLAG_ENABLED;
+ }
+
+ /* GPE enabled for wake? */
+
+ if (register_bit & gpe_register_info->enable_for_wake) {
+ local_event_status |= ACPI_EVENT_FLAG_WAKE_ENABLED;
+ }
+
+ /* GPE currently active (status bit == 1)? */
+
+ status =
+ acpi_hw_low_level_read(8, &in_byte,
+ &gpe_register_info->status_address);
+ if (ACPI_FAILURE(status)) {
+ goto unlock_and_exit;
+ }
+
+ if (register_bit & in_byte) {
+ local_event_status |= ACPI_EVENT_FLAG_SET;
+ }
+
+ /* Set return value */
+
+ (*event_status) = local_event_status;
+
+ unlock_and_exit:
+ return (status);
+}
+
+/******************************************************************************
+ *
+ * FUNCTION: acpi_hw_disable_gpe_block
+ *
+ * PARAMETERS: gpe_xrupt_info - GPE Interrupt info
+ * gpe_block - Gpe Block info
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Disable all GPEs within a single GPE block
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_hw_disable_gpe_block(struct acpi_gpe_xrupt_info * gpe_xrupt_info,
+ struct acpi_gpe_block_info * gpe_block)
+{
+ u32 i;
+ acpi_status status;
+
+ /* Examine each GPE Register within the block */
+
+ for (i = 0; i < gpe_block->register_count; i++) {
+
+ /* Disable all GPEs in this register */
+
+ status = acpi_hw_low_level_write(8, 0x00,
+ &gpe_block->register_info[i].
+ enable_address);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+ }
+
+ return (AE_OK);
+}
+
+/******************************************************************************
+ *
+ * FUNCTION: acpi_hw_clear_gpe_block
+ *
+ * PARAMETERS: gpe_xrupt_info - GPE Interrupt info
+ * gpe_block - Gpe Block info
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Clear status bits for all GPEs within a single GPE block
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_hw_clear_gpe_block(struct acpi_gpe_xrupt_info * gpe_xrupt_info,
+ struct acpi_gpe_block_info * gpe_block)
+{
+ u32 i;
+ acpi_status status;
+
+ /* Examine each GPE Register within the block */
+
+ for (i = 0; i < gpe_block->register_count; i++) {
+
+ /* Clear status on all GPEs in this register */
+
+ status = acpi_hw_low_level_write(8, 0xFF,
+ &gpe_block->register_info[i].
+ status_address);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+ }
+
+ return (AE_OK);
+}
+
+/******************************************************************************
+ *
+ * FUNCTION: acpi_hw_enable_runtime_gpe_block
+ *
+ * PARAMETERS: gpe_xrupt_info - GPE Interrupt info
+ * gpe_block - Gpe Block info
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Enable all "runtime" GPEs within a single GPE block. Includes
+ * combination wake/run GPEs.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_hw_enable_runtime_gpe_block(struct acpi_gpe_xrupt_info * gpe_xrupt_info,
+ struct acpi_gpe_block_info * gpe_block)
+{
+ u32 i;
+ acpi_status status;
+
+ /* NOTE: assumes that all GPEs are currently disabled */
+
+ /* Examine each GPE Register within the block */
+
+ for (i = 0; i < gpe_block->register_count; i++) {
+ if (!gpe_block->register_info[i].enable_for_run) {
+ continue;
+ }
+
+ /* Enable all "runtime" GPEs in this register */
+
+ status =
+ acpi_hw_low_level_write(8,
+ gpe_block->register_info[i].
+ enable_for_run,
+ &gpe_block->register_info[i].
+ enable_address);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+ }
+
+ return (AE_OK);
+}
+
+/******************************************************************************
+ *
+ * FUNCTION: acpi_hw_enable_wakeup_gpe_block
+ *
+ * PARAMETERS: gpe_xrupt_info - GPE Interrupt info
+ * gpe_block - Gpe Block info
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Enable all "wake" GPEs within a single GPE block. Includes
+ * combination wake/run GPEs.
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_hw_enable_wakeup_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
+ struct acpi_gpe_block_info *gpe_block)
+{
+ u32 i;
+ acpi_status status;
+
+ /* Examine each GPE Register within the block */
+
+ for (i = 0; i < gpe_block->register_count; i++) {
+ if (!gpe_block->register_info[i].enable_for_wake) {
+ continue;
+ }
+
+ /* Enable all "wake" GPEs in this register */
+
+ status = acpi_hw_low_level_write(8,
+ gpe_block->register_info[i].
+ enable_for_wake,
+ &gpe_block->register_info[i].
+ enable_address);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+ }
+
+ return (AE_OK);
+}
+
+/******************************************************************************
+ *
+ * FUNCTION: acpi_hw_disable_all_gpes
+ *
+ * PARAMETERS: None
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Disable and clear all GPEs in all GPE blocks
+ *
+ ******************************************************************************/
+
+acpi_status acpi_hw_disable_all_gpes(void)
+{
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(hw_disable_all_gpes);
+
+ status = acpi_ev_walk_gpe_list(acpi_hw_disable_gpe_block);
+ status = acpi_ev_walk_gpe_list(acpi_hw_clear_gpe_block);
+ return_ACPI_STATUS(status);
+}
+
+/******************************************************************************
+ *
+ * FUNCTION: acpi_hw_enable_all_runtime_gpes
+ *
+ * PARAMETERS: None
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Enable all "runtime" GPEs, in all GPE blocks
+ *
+ ******************************************************************************/
+
+acpi_status acpi_hw_enable_all_runtime_gpes(void)
+{
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(hw_enable_all_runtime_gpes);
+
+ status = acpi_ev_walk_gpe_list(acpi_hw_enable_runtime_gpe_block);
+ return_ACPI_STATUS(status);
+}
+
+/******************************************************************************
+ *
+ * FUNCTION: acpi_hw_enable_all_wakeup_gpes
+ *
+ * PARAMETERS: None
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Enable all "wakeup" GPEs, in all GPE blocks
+ *
+ ******************************************************************************/
+
+acpi_status acpi_hw_enable_all_wakeup_gpes(void)
+{
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(hw_enable_all_wakeup_gpes);
+
+ status = acpi_ev_walk_gpe_list(acpi_hw_enable_wakeup_gpe_block);
+ return_ACPI_STATUS(status);
+}
diff --git a/drivers/acpi/hardware/hwregs.c b/drivers/acpi/hardware/hwregs.c
new file mode 100644
index 0000000..ddf792a
--- /dev/null
+++ b/drivers/acpi/hardware/hwregs.c
@@ -0,0 +1,857 @@
+
+/*******************************************************************************
+ *
+ * Module Name: hwregs - Read/write access functions for the various ACPI
+ * control and status registers.
+ *
+ ******************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include <acpi/acnamesp.h>
+#include <acpi/acevents.h>
+
+#define _COMPONENT ACPI_HARDWARE
+ACPI_MODULE_NAME("hwregs")
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_hw_clear_acpi_status
+ *
+ * PARAMETERS: None
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Clears all fixed and general purpose status bits
+ * THIS FUNCTION MUST BE CALLED WITH INTERRUPTS DISABLED
+ *
+ ******************************************************************************/
+acpi_status acpi_hw_clear_acpi_status(void)
+{
+ acpi_status status;
+ acpi_cpu_flags lock_flags = 0;
+
+ ACPI_FUNCTION_TRACE(hw_clear_acpi_status);
+
+ ACPI_DEBUG_PRINT((ACPI_DB_IO, "About to write %04X to %04X\n",
+ ACPI_BITMASK_ALL_FIXED_STATUS,
+ (u16) acpi_gbl_FADT.xpm1a_event_block.address));
+
+ lock_flags = acpi_os_acquire_lock(acpi_gbl_hardware_lock);
+
+ status = acpi_hw_register_write(ACPI_REGISTER_PM1_STATUS,
+ ACPI_BITMASK_ALL_FIXED_STATUS);
+ if (ACPI_FAILURE(status)) {
+ goto unlock_and_exit;
+ }
+
+ /* Clear the fixed events */
+
+ if (acpi_gbl_FADT.xpm1b_event_block.address) {
+ status =
+ acpi_hw_low_level_write(16, ACPI_BITMASK_ALL_FIXED_STATUS,
+ &acpi_gbl_FADT.xpm1b_event_block);
+ if (ACPI_FAILURE(status)) {
+ goto unlock_and_exit;
+ }
+ }
+
+ /* Clear the GPE Bits in all GPE registers in all GPE blocks */
+
+ status = acpi_ev_walk_gpe_list(acpi_hw_clear_gpe_block);
+
+ unlock_and_exit:
+ acpi_os_release_lock(acpi_gbl_hardware_lock, lock_flags);
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_get_sleep_type_data
+ *
+ * PARAMETERS: sleep_state - Numeric sleep state
+ * *sleep_type_a - Where SLP_TYPa is returned
+ * *sleep_type_b - Where SLP_TYPb is returned
+ *
+ * RETURN: Status - ACPI status
+ *
+ * DESCRIPTION: Obtain the SLP_TYPa and SLP_TYPb values for the requested sleep
+ * state.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_get_sleep_type_data(u8 sleep_state, u8 * sleep_type_a, u8 * sleep_type_b)
+{
+ acpi_status status = AE_OK;
+ struct acpi_evaluate_info *info;
+
+ ACPI_FUNCTION_TRACE(acpi_get_sleep_type_data);
+
+ /* Validate parameters */
+
+ if ((sleep_state > ACPI_S_STATES_MAX) || !sleep_type_a || !sleep_type_b) {
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
+
+ /* Allocate the evaluation information block */
+
+ info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info));
+ if (!info) {
+ return_ACPI_STATUS(AE_NO_MEMORY);
+ }
+
+ info->pathname =
+ ACPI_CAST_PTR(char, acpi_gbl_sleep_state_names[sleep_state]);
+
+ /* Evaluate the namespace object containing the values for this state */
+
+ status = acpi_ns_evaluate(info);
+ if (ACPI_FAILURE(status)) {
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+ "%s while evaluating SleepState [%s]\n",
+ acpi_format_exception(status),
+ info->pathname));
+
+ goto cleanup;
+ }
+
+ /* Must have a return object */
+
+ if (!info->return_object) {
+ ACPI_ERROR((AE_INFO, "No Sleep State object returned from [%s]",
+ info->pathname));
+ status = AE_NOT_EXIST;
+ }
+
+ /* It must be of type Package */
+
+ else if (ACPI_GET_OBJECT_TYPE(info->return_object) != ACPI_TYPE_PACKAGE) {
+ ACPI_ERROR((AE_INFO,
+ "Sleep State return object is not a Package"));
+ status = AE_AML_OPERAND_TYPE;
+ }
+
+ /*
+ * The package must have at least two elements. NOTE (March 2005): This
+ * goes against the current ACPI spec which defines this object as a
+ * package with one encoded DWORD element. However, existing practice
+ * by BIOS vendors seems to be to have 2 or more elements, at least
+ * one per sleep type (A/B).
+ */
+ else if (info->return_object->package.count < 2) {
+ ACPI_ERROR((AE_INFO,
+ "Sleep State return package does not have at least two elements"));
+ status = AE_AML_NO_OPERAND;
+ }
+
+ /* The first two elements must both be of type Integer */
+
+ else if ((ACPI_GET_OBJECT_TYPE(info->return_object->package.elements[0])
+ != ACPI_TYPE_INTEGER) ||
+ (ACPI_GET_OBJECT_TYPE(info->return_object->package.elements[1])
+ != ACPI_TYPE_INTEGER)) {
+ ACPI_ERROR((AE_INFO,
+ "Sleep State return package elements are not both Integers (%s, %s)",
+ acpi_ut_get_object_type_name(info->return_object->
+ package.elements[0]),
+ acpi_ut_get_object_type_name(info->return_object->
+ package.elements[1])));
+ status = AE_AML_OPERAND_TYPE;
+ } else {
+ /* Valid _Sx_ package size, type, and value */
+
+ *sleep_type_a = (u8)
+ (info->return_object->package.elements[0])->integer.value;
+ *sleep_type_b = (u8)
+ (info->return_object->package.elements[1])->integer.value;
+ }
+
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status,
+ "While evaluating SleepState [%s], bad Sleep object %p type %s",
+ info->pathname, info->return_object,
+ acpi_ut_get_object_type_name(info->
+ return_object)));
+ }
+
+ acpi_ut_remove_reference(info->return_object);
+
+ cleanup:
+ ACPI_FREE(info);
+ return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_get_sleep_type_data)
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_hw_get_register_bit_mask
+ *
+ * PARAMETERS: register_id - Index of ACPI Register to access
+ *
+ * RETURN: The bitmask to be used when accessing the register
+ *
+ * DESCRIPTION: Map register_id into a register bitmask.
+ *
+ ******************************************************************************/
+struct acpi_bit_register_info *acpi_hw_get_bit_register_info(u32 register_id)
+{
+ ACPI_FUNCTION_ENTRY();
+
+ if (register_id > ACPI_BITREG_MAX) {
+ ACPI_ERROR((AE_INFO, "Invalid BitRegister ID: %X",
+ register_id));
+ return (NULL);
+ }
+
+ return (&acpi_gbl_bit_register_info[register_id]);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_get_register
+ *
+ * PARAMETERS: register_id - ID of ACPI bit_register to access
+ * return_value - Value that was read from the register
+ *
+ * RETURN: Status and the value read from specified Register. Value
+ * returned is normalized to bit0 (is shifted all the way right)
+ *
+ * DESCRIPTION: ACPI bit_register read function.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_get_register_unlocked(u32 register_id, u32 * return_value)
+{
+ u32 register_value = 0;
+ struct acpi_bit_register_info *bit_reg_info;
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(acpi_get_register);
+
+ /* Get the info structure corresponding to the requested ACPI Register */
+
+ bit_reg_info = acpi_hw_get_bit_register_info(register_id);
+ if (!bit_reg_info) {
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
+
+ /* Read from the register */
+
+ status = acpi_hw_register_read(bit_reg_info->parent_register,
+ &register_value);
+
+ if (ACPI_SUCCESS(status)) {
+
+ /* Normalize the value that was read */
+
+ register_value =
+ ((register_value & bit_reg_info->access_bit_mask)
+ >> bit_reg_info->bit_position);
+
+ *return_value = register_value;
+
+ ACPI_DEBUG_PRINT((ACPI_DB_IO, "Read value %8.8X register %X\n",
+ register_value,
+ bit_reg_info->parent_register));
+ }
+
+ return_ACPI_STATUS(status);
+}
+
+acpi_status acpi_get_register(u32 register_id, u32 * return_value)
+{
+ acpi_status status;
+ acpi_cpu_flags flags;
+ flags = acpi_os_acquire_lock(acpi_gbl_hardware_lock);
+ status = acpi_get_register_unlocked(register_id, return_value);
+ acpi_os_release_lock(acpi_gbl_hardware_lock, flags);
+ return status;
+}
+
+ACPI_EXPORT_SYMBOL(acpi_get_register)
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_set_register
+ *
+ * PARAMETERS: register_id - ID of ACPI bit_register to access
+ * Value - (only used on write) value to write to the
+ * Register, NOT pre-normalized to the bit pos
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: ACPI Bit Register write function.
+ *
+ ******************************************************************************/
+acpi_status acpi_set_register(u32 register_id, u32 value)
+{
+ u32 register_value = 0;
+ struct acpi_bit_register_info *bit_reg_info;
+ acpi_status status;
+ acpi_cpu_flags lock_flags;
+
+ ACPI_FUNCTION_TRACE_U32(acpi_set_register, register_id);
+
+ /* Get the info structure corresponding to the requested ACPI Register */
+
+ bit_reg_info = acpi_hw_get_bit_register_info(register_id);
+ if (!bit_reg_info) {
+ ACPI_ERROR((AE_INFO, "Bad ACPI HW RegisterId: %X",
+ register_id));
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
+
+ lock_flags = acpi_os_acquire_lock(acpi_gbl_hardware_lock);
+
+ /* Always do a register read first so we can insert the new bits */
+
+ status = acpi_hw_register_read(bit_reg_info->parent_register,
+ &register_value);
+ if (ACPI_FAILURE(status)) {
+ goto unlock_and_exit;
+ }
+
+ /*
+ * Decode the Register ID
+ * Register ID = [Register block ID] | [bit ID]
+ *
+ * Check bit ID to fine locate Register offset.
+ * Check Mask to determine Register offset, and then read-write.
+ */
+ switch (bit_reg_info->parent_register) {
+ case ACPI_REGISTER_PM1_STATUS:
+
+ /*
+ * Status Registers are different from the rest. Clear by
+ * writing 1, and writing 0 has no effect. So, the only relevant
+ * information is the single bit we're interested in, all others should
+ * be written as 0 so they will be left unchanged.
+ */
+ value = ACPI_REGISTER_PREPARE_BITS(value,
+ bit_reg_info->bit_position,
+ bit_reg_info->
+ access_bit_mask);
+ if (value) {
+ status = acpi_hw_register_write(ACPI_REGISTER_PM1_STATUS,
+ (u16) value);
+ register_value = 0;
+ }
+ break;
+
+ case ACPI_REGISTER_PM1_ENABLE:
+
+ ACPI_REGISTER_INSERT_VALUE(register_value,
+ bit_reg_info->bit_position,
+ bit_reg_info->access_bit_mask,
+ value);
+
+ status = acpi_hw_register_write(ACPI_REGISTER_PM1_ENABLE,
+ (u16) register_value);
+ break;
+
+ case ACPI_REGISTER_PM1_CONTROL:
+
+ /*
+ * Write the PM1 Control register.
+ * Note that at this level, the fact that there are actually TWO
+ * registers (A and B - and B may not exist) is abstracted.
+ */
+ ACPI_DEBUG_PRINT((ACPI_DB_IO, "PM1 control: Read %X\n",
+ register_value));
+
+ ACPI_REGISTER_INSERT_VALUE(register_value,
+ bit_reg_info->bit_position,
+ bit_reg_info->access_bit_mask,
+ value);
+
+ status = acpi_hw_register_write(ACPI_REGISTER_PM1_CONTROL,
+ (u16) register_value);
+ break;
+
+ case ACPI_REGISTER_PM2_CONTROL:
+
+ status = acpi_hw_register_read(ACPI_REGISTER_PM2_CONTROL,
+ &register_value);
+ if (ACPI_FAILURE(status)) {
+ goto unlock_and_exit;
+ }
+
+ ACPI_DEBUG_PRINT((ACPI_DB_IO,
+ "PM2 control: Read %X from %8.8X%8.8X\n",
+ register_value,
+ ACPI_FORMAT_UINT64(acpi_gbl_FADT.
+ xpm2_control_block.
+ address)));
+
+ ACPI_REGISTER_INSERT_VALUE(register_value,
+ bit_reg_info->bit_position,
+ bit_reg_info->access_bit_mask,
+ value);
+
+ ACPI_DEBUG_PRINT((ACPI_DB_IO,
+ "About to write %4.4X to %8.8X%8.8X\n",
+ register_value,
+ ACPI_FORMAT_UINT64(acpi_gbl_FADT.
+ xpm2_control_block.
+ address)));
+
+ status = acpi_hw_register_write(ACPI_REGISTER_PM2_CONTROL,
+ (u8) (register_value));
+ break;
+
+ default:
+ break;
+ }
+
+ unlock_and_exit:
+
+ acpi_os_release_lock(acpi_gbl_hardware_lock, lock_flags);
+
+ /* Normalize the value that was read */
+
+ ACPI_DEBUG_EXEC(register_value =
+ ((register_value & bit_reg_info->access_bit_mask) >>
+ bit_reg_info->bit_position));
+
+ ACPI_DEBUG_PRINT((ACPI_DB_IO,
+ "Set bits: %8.8X actual %8.8X register %X\n", value,
+ register_value, bit_reg_info->parent_register));
+ return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_set_register)
+
+/******************************************************************************
+ *
+ * FUNCTION: acpi_hw_register_read
+ *
+ * PARAMETERS: register_id - ACPI Register ID
+ * return_value - Where the register value is returned
+ *
+ * RETURN: Status and the value read.
+ *
+ * DESCRIPTION: Read from the specified ACPI register
+ *
+ ******************************************************************************/
+acpi_status
+acpi_hw_register_read(u32 register_id, u32 * return_value)
+{
+ u32 value1 = 0;
+ u32 value2 = 0;
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(hw_register_read);
+
+ switch (register_id) {
+ case ACPI_REGISTER_PM1_STATUS: /* 16-bit access */
+
+ status =
+ acpi_hw_low_level_read(16, &value1,
+ &acpi_gbl_FADT.xpm1a_event_block);
+ if (ACPI_FAILURE(status)) {
+ goto exit;
+ }
+
+ /* PM1B is optional */
+
+ status =
+ acpi_hw_low_level_read(16, &value2,
+ &acpi_gbl_FADT.xpm1b_event_block);
+ value1 |= value2;
+ break;
+
+ case ACPI_REGISTER_PM1_ENABLE: /* 16-bit access */
+
+ status =
+ acpi_hw_low_level_read(16, &value1, &acpi_gbl_xpm1a_enable);
+ if (ACPI_FAILURE(status)) {
+ goto exit;
+ }
+
+ /* PM1B is optional */
+
+ status =
+ acpi_hw_low_level_read(16, &value2, &acpi_gbl_xpm1b_enable);
+ value1 |= value2;
+ break;
+
+ case ACPI_REGISTER_PM1_CONTROL: /* 16-bit access */
+
+ status =
+ acpi_hw_low_level_read(16, &value1,
+ &acpi_gbl_FADT.xpm1a_control_block);
+ if (ACPI_FAILURE(status)) {
+ goto exit;
+ }
+
+ status =
+ acpi_hw_low_level_read(16, &value2,
+ &acpi_gbl_FADT.xpm1b_control_block);
+ value1 |= value2;
+ break;
+
+ case ACPI_REGISTER_PM2_CONTROL: /* 8-bit access */
+
+ status =
+ acpi_hw_low_level_read(8, &value1,
+ &acpi_gbl_FADT.xpm2_control_block);
+ break;
+
+ case ACPI_REGISTER_PM_TIMER: /* 32-bit access */
+
+ status =
+ acpi_hw_low_level_read(32, &value1,
+ &acpi_gbl_FADT.xpm_timer_block);
+ break;
+
+ case ACPI_REGISTER_SMI_COMMAND_BLOCK: /* 8-bit access */
+
+ status =
+ acpi_os_read_port(acpi_gbl_FADT.smi_command, &value1, 8);
+ break;
+
+ default:
+ ACPI_ERROR((AE_INFO, "Unknown Register ID: %X", register_id));
+ status = AE_BAD_PARAMETER;
+ break;
+ }
+
+ exit:
+
+ if (ACPI_SUCCESS(status)) {
+ *return_value = value1;
+ }
+
+ return_ACPI_STATUS(status);
+}
+
+/******************************************************************************
+ *
+ * FUNCTION: acpi_hw_register_write
+ *
+ * PARAMETERS: register_id - ACPI Register ID
+ * Value - The value to write
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Write to the specified ACPI register
+ *
+ * NOTE: In accordance with the ACPI specification, this function automatically
+ * preserves the value of the following bits, meaning that these bits cannot be
+ * changed via this interface:
+ *
+ * PM1_CONTROL[0] = SCI_EN
+ * PM1_CONTROL[9]
+ * PM1_STATUS[11]
+ *
+ * ACPI References:
+ * 1) Hardware Ignored Bits: When software writes to a register with ignored
+ * bit fields, it preserves the ignored bit fields
+ * 2) SCI_EN: OSPM always preserves this bit position
+ *
+ ******************************************************************************/
+
+acpi_status acpi_hw_register_write(u32 register_id, u32 value)
+{
+ acpi_status status;
+ u32 read_value;
+
+ ACPI_FUNCTION_TRACE(hw_register_write);
+
+ switch (register_id) {
+ case ACPI_REGISTER_PM1_STATUS: /* 16-bit access */
+
+ /* Perform a read first to preserve certain bits (per ACPI spec) */
+
+ status = acpi_hw_register_read(ACPI_REGISTER_PM1_STATUS,
+ &read_value);
+ if (ACPI_FAILURE(status)) {
+ goto exit;
+ }
+
+ /* Insert the bits to be preserved */
+
+ ACPI_INSERT_BITS(value, ACPI_PM1_STATUS_PRESERVED_BITS,
+ read_value);
+
+ /* Now we can write the data */
+
+ status =
+ acpi_hw_low_level_write(16, value,
+ &acpi_gbl_FADT.xpm1a_event_block);
+ if (ACPI_FAILURE(status)) {
+ goto exit;
+ }
+
+ /* PM1B is optional */
+
+ status =
+ acpi_hw_low_level_write(16, value,
+ &acpi_gbl_FADT.xpm1b_event_block);
+ break;
+
+ case ACPI_REGISTER_PM1_ENABLE: /* 16-bit access */
+
+ status =
+ acpi_hw_low_level_write(16, value, &acpi_gbl_xpm1a_enable);
+ if (ACPI_FAILURE(status)) {
+ goto exit;
+ }
+
+ /* PM1B is optional */
+
+ status =
+ acpi_hw_low_level_write(16, value, &acpi_gbl_xpm1b_enable);
+ break;
+
+ case ACPI_REGISTER_PM1_CONTROL: /* 16-bit access */
+
+ /*
+ * Perform a read first to preserve certain bits (per ACPI spec)
+ */
+ status = acpi_hw_register_read(ACPI_REGISTER_PM1_CONTROL,
+ &read_value);
+ if (ACPI_FAILURE(status)) {
+ goto exit;
+ }
+
+ /* Insert the bits to be preserved */
+
+ ACPI_INSERT_BITS(value, ACPI_PM1_CONTROL_PRESERVED_BITS,
+ read_value);
+
+ /* Now we can write the data */
+
+ status =
+ acpi_hw_low_level_write(16, value,
+ &acpi_gbl_FADT.xpm1a_control_block);
+ if (ACPI_FAILURE(status)) {
+ goto exit;
+ }
+
+ status =
+ acpi_hw_low_level_write(16, value,
+ &acpi_gbl_FADT.xpm1b_control_block);
+ break;
+
+ case ACPI_REGISTER_PM1A_CONTROL: /* 16-bit access */
+
+ status =
+ acpi_hw_low_level_write(16, value,
+ &acpi_gbl_FADT.xpm1a_control_block);
+ break;
+
+ case ACPI_REGISTER_PM1B_CONTROL: /* 16-bit access */
+
+ status =
+ acpi_hw_low_level_write(16, value,
+ &acpi_gbl_FADT.xpm1b_control_block);
+ break;
+
+ case ACPI_REGISTER_PM2_CONTROL: /* 8-bit access */
+
+ status =
+ acpi_hw_low_level_write(8, value,
+ &acpi_gbl_FADT.xpm2_control_block);
+ break;
+
+ case ACPI_REGISTER_PM_TIMER: /* 32-bit access */
+
+ status =
+ acpi_hw_low_level_write(32, value,
+ &acpi_gbl_FADT.xpm_timer_block);
+ break;
+
+ case ACPI_REGISTER_SMI_COMMAND_BLOCK: /* 8-bit access */
+
+ /* SMI_CMD is currently always in IO space */
+
+ status =
+ acpi_os_write_port(acpi_gbl_FADT.smi_command, value, 8);
+ break;
+
+ default:
+ status = AE_BAD_PARAMETER;
+ break;
+ }
+
+ exit:
+ return_ACPI_STATUS(status);
+}
+
+/******************************************************************************
+ *
+ * FUNCTION: acpi_hw_low_level_read
+ *
+ * PARAMETERS: Width - 8, 16, or 32
+ * Value - Where the value is returned
+ * Reg - GAS register structure
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Read from either memory or IO space.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_hw_low_level_read(u32 width, u32 * value, struct acpi_generic_address *reg)
+{
+ u64 address;
+ acpi_status status;
+
+ ACPI_FUNCTION_NAME(hw_low_level_read);
+
+ /*
+ * Must have a valid pointer to a GAS structure, and
+ * a non-zero address within. However, don't return an error
+ * because the PM1A/B code must not fail if B isn't present.
+ */
+ if (!reg) {
+ return (AE_OK);
+ }
+
+ /* Get a local copy of the address. Handles possible alignment issues */
+
+ ACPI_MOVE_64_TO_64(&address, &reg->address);
+ if (!address) {
+ return (AE_OK);
+ }
+ *value = 0;
+
+ /*
+ * Two address spaces supported: Memory or IO.
+ * PCI_Config is not supported here because the GAS struct is insufficient
+ */
+ switch (reg->space_id) {
+ case ACPI_ADR_SPACE_SYSTEM_MEMORY:
+
+ status = acpi_os_read_memory((acpi_physical_address) address,
+ value, width);
+ break;
+
+ case ACPI_ADR_SPACE_SYSTEM_IO:
+
+ status =
+ acpi_os_read_port((acpi_io_address) address, value, width);
+ break;
+
+ default:
+ ACPI_ERROR((AE_INFO,
+ "Unsupported address space: %X", reg->space_id));
+ return (AE_BAD_PARAMETER);
+ }
+
+ ACPI_DEBUG_PRINT((ACPI_DB_IO,
+ "Read: %8.8X width %2d from %8.8X%8.8X (%s)\n",
+ *value, width, ACPI_FORMAT_UINT64(address),
+ acpi_ut_get_region_name(reg->space_id)));
+
+ return (status);
+}
+
+/******************************************************************************
+ *
+ * FUNCTION: acpi_hw_low_level_write
+ *
+ * PARAMETERS: Width - 8, 16, or 32
+ * Value - To be written
+ * Reg - GAS register structure
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Write to either memory or IO space.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_hw_low_level_write(u32 width, u32 value, struct acpi_generic_address * reg)
+{
+ u64 address;
+ acpi_status status;
+
+ ACPI_FUNCTION_NAME(hw_low_level_write);
+
+ /*
+ * Must have a valid pointer to a GAS structure, and
+ * a non-zero address within. However, don't return an error
+ * because the PM1A/B code must not fail if B isn't present.
+ */
+ if (!reg) {
+ return (AE_OK);
+ }
+
+ /* Get a local copy of the address. Handles possible alignment issues */
+
+ ACPI_MOVE_64_TO_64(&address, &reg->address);
+ if (!address) {
+ return (AE_OK);
+ }
+
+ /*
+ * Two address spaces supported: Memory or IO.
+ * PCI_Config is not supported here because the GAS struct is insufficient
+ */
+ switch (reg->space_id) {
+ case ACPI_ADR_SPACE_SYSTEM_MEMORY:
+
+ status = acpi_os_write_memory((acpi_physical_address) address,
+ value, width);
+ break;
+
+ case ACPI_ADR_SPACE_SYSTEM_IO:
+
+ status = acpi_os_write_port((acpi_io_address) address, value,
+ width);
+ break;
+
+ default:
+ ACPI_ERROR((AE_INFO,
+ "Unsupported address space: %X", reg->space_id));
+ return (AE_BAD_PARAMETER);
+ }
+
+ ACPI_DEBUG_PRINT((ACPI_DB_IO,
+ "Wrote: %8.8X width %2d to %8.8X%8.8X (%s)\n",
+ value, width, ACPI_FORMAT_UINT64(address),
+ acpi_ut_get_region_name(reg->space_id)));
+
+ return (status);
+}
diff --git a/drivers/acpi/hardware/hwsleep.c b/drivers/acpi/hardware/hwsleep.c
new file mode 100644
index 0000000..25dccdf
--- /dev/null
+++ b/drivers/acpi/hardware/hwsleep.c
@@ -0,0 +1,643 @@
+
+/******************************************************************************
+ *
+ * Name: hwsleep.c - ACPI Hardware Sleep/Wake Interface
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include <acpi/actables.h>
+
+#define _COMPONENT ACPI_HARDWARE
+ACPI_MODULE_NAME("hwsleep")
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_set_firmware_waking_vector
+ *
+ * PARAMETERS: physical_address - Physical address of ACPI real mode
+ * entry point.
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Access function for the firmware_waking_vector field in FACS
+ *
+ ******************************************************************************/
+acpi_status
+acpi_set_firmware_waking_vector(acpi_physical_address physical_address)
+{
+ struct acpi_table_facs *facs;
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(acpi_set_firmware_waking_vector);
+
+ /* Get the FACS */
+
+ status = acpi_get_table_by_index(ACPI_TABLE_INDEX_FACS,
+ ACPI_CAST_INDIRECT_PTR(struct
+ acpi_table_header,
+ &facs));
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /*
+ * According to the ACPI specification 2.0c and later, the 64-bit
+ * waking vector should be cleared and the 32-bit waking vector should
+ * be used, unless we want the wake-up code to be called by the BIOS in
+ * Protected Mode. Some systems (for example HP dv5-1004nr) are known
+ * to fail to resume if the 64-bit vector is used.
+ */
+ if (facs->version >= 1)
+ facs->xfirmware_waking_vector = 0;
+
+ facs->firmware_waking_vector = (u32)physical_address;
+
+ return_ACPI_STATUS(AE_OK);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_set_firmware_waking_vector)
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_get_firmware_waking_vector
+ *
+ * PARAMETERS: *physical_address - Where the contents of
+ * the firmware_waking_vector field of
+ * the FACS will be returned.
+ *
+ * RETURN: Status, vector
+ *
+ * DESCRIPTION: Access function for the firmware_waking_vector field in FACS
+ *
+ ******************************************************************************/
+#ifdef ACPI_FUTURE_USAGE
+acpi_status
+acpi_get_firmware_waking_vector(acpi_physical_address * physical_address)
+{
+ struct acpi_table_facs *facs;
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(acpi_get_firmware_waking_vector);
+
+ if (!physical_address) {
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
+
+ /* Get the FACS */
+
+ status = acpi_get_table_by_index(ACPI_TABLE_INDEX_FACS,
+ ACPI_CAST_INDIRECT_PTR(struct
+ acpi_table_header,
+ &facs));
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /* Get the vector */
+ *physical_address = (acpi_physical_address)facs->firmware_waking_vector;
+
+ return_ACPI_STATUS(AE_OK);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_get_firmware_waking_vector)
+#endif
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_enter_sleep_state_prep
+ *
+ * PARAMETERS: sleep_state - Which sleep state to enter
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Prepare to enter a system sleep state (see ACPI 2.0 spec p 231)
+ * This function must execute with interrupts enabled.
+ * We break sleeping into 2 stages so that OSPM can handle
+ * various OS-specific tasks between the two steps.
+ *
+ ******************************************************************************/
+acpi_status acpi_enter_sleep_state_prep(u8 sleep_state)
+{
+ acpi_status status;
+ struct acpi_object_list arg_list;
+ union acpi_object arg;
+
+ ACPI_FUNCTION_TRACE(acpi_enter_sleep_state_prep);
+
+ /*
+ * _PSW methods could be run here to enable wake-on keyboard, LAN, etc.
+ */
+ status = acpi_get_sleep_type_data(sleep_state,
+ &acpi_gbl_sleep_type_a,
+ &acpi_gbl_sleep_type_b);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /* Setup parameter object */
+
+ arg_list.count = 1;
+ arg_list.pointer = &arg;
+
+ arg.type = ACPI_TYPE_INTEGER;
+ arg.integer.value = sleep_state;
+
+ /* Run the _PTS method */
+
+ status = acpi_evaluate_object(NULL, METHOD_NAME__PTS, &arg_list, NULL);
+ if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
+ return_ACPI_STATUS(status);
+ }
+
+ /* Setup the argument to _SST */
+
+ switch (sleep_state) {
+ case ACPI_STATE_S0:
+ arg.integer.value = ACPI_SST_WORKING;
+ break;
+
+ case ACPI_STATE_S1:
+ case ACPI_STATE_S2:
+ case ACPI_STATE_S3:
+ arg.integer.value = ACPI_SST_SLEEPING;
+ break;
+
+ case ACPI_STATE_S4:
+ arg.integer.value = ACPI_SST_SLEEP_CONTEXT;
+ break;
+
+ default:
+ arg.integer.value = ACPI_SST_INDICATOR_OFF; /* Default is off */
+ break;
+ }
+
+ /*
+ * Set the system indicators to show the desired sleep state.
+ * _SST is an optional method (return no error if not found)
+ */
+ status = acpi_evaluate_object(NULL, METHOD_NAME__SST, &arg_list, NULL);
+ if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
+ ACPI_EXCEPTION((AE_INFO, status,
+ "While executing method _SST"));
+ }
+
+ return_ACPI_STATUS(AE_OK);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_enter_sleep_state_prep)
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_enter_sleep_state
+ *
+ * PARAMETERS: sleep_state - Which sleep state to enter
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Enter a system sleep state (see ACPI 2.0 spec p 231)
+ * THIS FUNCTION MUST BE CALLED WITH INTERRUPTS DISABLED
+ *
+ ******************************************************************************/
+acpi_status asmlinkage acpi_enter_sleep_state(u8 sleep_state)
+{
+ u32 PM1Acontrol;
+ u32 PM1Bcontrol;
+ struct acpi_bit_register_info *sleep_type_reg_info;
+ struct acpi_bit_register_info *sleep_enable_reg_info;
+ u32 in_value;
+ struct acpi_object_list arg_list;
+ union acpi_object arg;
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(acpi_enter_sleep_state);
+
+ if ((acpi_gbl_sleep_type_a > ACPI_SLEEP_TYPE_MAX) ||
+ (acpi_gbl_sleep_type_b > ACPI_SLEEP_TYPE_MAX)) {
+ ACPI_ERROR((AE_INFO, "Sleep values out of range: A=%X B=%X",
+ acpi_gbl_sleep_type_a, acpi_gbl_sleep_type_b));
+ return_ACPI_STATUS(AE_AML_OPERAND_VALUE);
+ }
+
+ sleep_type_reg_info =
+ acpi_hw_get_bit_register_info(ACPI_BITREG_SLEEP_TYPE_A);
+ sleep_enable_reg_info =
+ acpi_hw_get_bit_register_info(ACPI_BITREG_SLEEP_ENABLE);
+
+ /* Clear wake status */
+
+ status = acpi_set_register(ACPI_BITREG_WAKE_STATUS, 1);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /* Clear all fixed and general purpose status bits */
+
+ status = acpi_hw_clear_acpi_status();
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /*
+ * 1) Disable/Clear all GPEs
+ * 2) Enable all wakeup GPEs
+ */
+ status = acpi_hw_disable_all_gpes();
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+ acpi_gbl_system_awake_and_running = FALSE;
+
+ status = acpi_hw_enable_all_wakeup_gpes();
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /* Execute the _GTS method */
+
+ arg_list.count = 1;
+ arg_list.pointer = &arg;
+ arg.type = ACPI_TYPE_INTEGER;
+ arg.integer.value = sleep_state;
+
+ status = acpi_evaluate_object(NULL, METHOD_NAME__GTS, &arg_list, NULL);
+ if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
+ return_ACPI_STATUS(status);
+ }
+
+ /* Get current value of PM1A control */
+
+ status = acpi_hw_register_read(ACPI_REGISTER_PM1_CONTROL, &PM1Acontrol);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+ ACPI_DEBUG_PRINT((ACPI_DB_INIT,
+ "Entering sleep state [S%d]\n", sleep_state));
+
+ /* Clear SLP_EN and SLP_TYP fields */
+
+ PM1Acontrol &= ~(sleep_type_reg_info->access_bit_mask |
+ sleep_enable_reg_info->access_bit_mask);
+ PM1Bcontrol = PM1Acontrol;
+
+ /* Insert SLP_TYP bits */
+
+ PM1Acontrol |=
+ (acpi_gbl_sleep_type_a << sleep_type_reg_info->bit_position);
+ PM1Bcontrol |=
+ (acpi_gbl_sleep_type_b << sleep_type_reg_info->bit_position);
+
+ /*
+ * We split the writes of SLP_TYP and SLP_EN to workaround
+ * poorly implemented hardware.
+ */
+
+ /* Write #1: fill in SLP_TYP data */
+
+ status = acpi_hw_register_write(ACPI_REGISTER_PM1A_CONTROL,
+ PM1Acontrol);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ status = acpi_hw_register_write(ACPI_REGISTER_PM1B_CONTROL,
+ PM1Bcontrol);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /* Insert SLP_ENABLE bit */
+
+ PM1Acontrol |= sleep_enable_reg_info->access_bit_mask;
+ PM1Bcontrol |= sleep_enable_reg_info->access_bit_mask;
+
+ /* Write #2: SLP_TYP + SLP_EN */
+
+ ACPI_FLUSH_CPU_CACHE();
+
+ status = acpi_hw_register_write(ACPI_REGISTER_PM1A_CONTROL,
+ PM1Acontrol);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ status = acpi_hw_register_write(ACPI_REGISTER_PM1B_CONTROL,
+ PM1Bcontrol);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ if (sleep_state > ACPI_STATE_S3) {
+ /*
+ * We wanted to sleep > S3, but it didn't happen (by virtue of the
+ * fact that we are still executing!)
+ *
+ * Wait ten seconds, then try again. This is to get S4/S5 to work on
+ * all machines.
+ *
+ * We wait so long to allow chipsets that poll this reg very slowly to
+ * still read the right value. Ideally, this block would go
+ * away entirely.
+ */
+ acpi_os_stall(10000000);
+
+ status = acpi_hw_register_write(ACPI_REGISTER_PM1_CONTROL,
+ sleep_enable_reg_info->
+ access_bit_mask);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+ }
+
+ /* Wait until we enter sleep state */
+
+ do {
+ status = acpi_get_register_unlocked(ACPI_BITREG_WAKE_STATUS,
+ &in_value);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /* Spin until we wake */
+
+ } while (!in_value);
+
+ return_ACPI_STATUS(AE_OK);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_enter_sleep_state)
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_enter_sleep_state_s4bios
+ *
+ * PARAMETERS: None
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Perform a S4 bios request.
+ * THIS FUNCTION MUST BE CALLED WITH INTERRUPTS DISABLED
+ *
+ ******************************************************************************/
+acpi_status asmlinkage acpi_enter_sleep_state_s4bios(void)
+{
+ u32 in_value;
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(acpi_enter_sleep_state_s4bios);
+
+ status = acpi_set_register(ACPI_BITREG_WAKE_STATUS, 1);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ status = acpi_hw_clear_acpi_status();
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /*
+ * 1) Disable/Clear all GPEs
+ * 2) Enable all wakeup GPEs
+ */
+ status = acpi_hw_disable_all_gpes();
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+ acpi_gbl_system_awake_and_running = FALSE;
+
+ status = acpi_hw_enable_all_wakeup_gpes();
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ ACPI_FLUSH_CPU_CACHE();
+
+ status = acpi_os_write_port(acpi_gbl_FADT.smi_command,
+ (u32) acpi_gbl_FADT.S4bios_request, 8);
+
+ do {
+ acpi_os_stall(1000);
+ status = acpi_get_register(ACPI_BITREG_WAKE_STATUS, &in_value);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+ } while (!in_value);
+
+ return_ACPI_STATUS(AE_OK);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_enter_sleep_state_s4bios)
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_leave_sleep_state_prep
+ *
+ * PARAMETERS: sleep_state - Which sleep state we are exiting
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Perform the first state of OS-independent ACPI cleanup after a
+ * sleep.
+ * Called with interrupts DISABLED.
+ *
+ ******************************************************************************/
+acpi_status acpi_leave_sleep_state_prep(u8 sleep_state)
+{
+ struct acpi_object_list arg_list;
+ union acpi_object arg;
+ acpi_status status;
+ struct acpi_bit_register_info *sleep_type_reg_info;
+ struct acpi_bit_register_info *sleep_enable_reg_info;
+ u32 PM1Acontrol;
+ u32 PM1Bcontrol;
+
+ ACPI_FUNCTION_TRACE(acpi_leave_sleep_state_prep);
+
+ /*
+ * Set SLP_TYPE and SLP_EN to state S0.
+ * This is unclear from the ACPI Spec, but it is required
+ * by some machines.
+ */
+ status = acpi_get_sleep_type_data(ACPI_STATE_S0,
+ &acpi_gbl_sleep_type_a,
+ &acpi_gbl_sleep_type_b);
+ if (ACPI_SUCCESS(status)) {
+ sleep_type_reg_info =
+ acpi_hw_get_bit_register_info(ACPI_BITREG_SLEEP_TYPE_A);
+ sleep_enable_reg_info =
+ acpi_hw_get_bit_register_info(ACPI_BITREG_SLEEP_ENABLE);
+
+ /* Get current value of PM1A control */
+
+ status = acpi_hw_register_read(ACPI_REGISTER_PM1_CONTROL,
+ &PM1Acontrol);
+ if (ACPI_SUCCESS(status)) {
+
+ /* Clear SLP_EN and SLP_TYP fields */
+
+ PM1Acontrol &= ~(sleep_type_reg_info->access_bit_mask |
+ sleep_enable_reg_info->
+ access_bit_mask);
+ PM1Bcontrol = PM1Acontrol;
+
+ /* Insert SLP_TYP bits */
+
+ PM1Acontrol |=
+ (acpi_gbl_sleep_type_a << sleep_type_reg_info->
+ bit_position);
+ PM1Bcontrol |=
+ (acpi_gbl_sleep_type_b << sleep_type_reg_info->
+ bit_position);
+
+ /* Just ignore any errors */
+
+ (void)acpi_hw_register_write(ACPI_REGISTER_PM1A_CONTROL,
+ PM1Acontrol);
+ (void)acpi_hw_register_write(ACPI_REGISTER_PM1B_CONTROL,
+ PM1Bcontrol);
+ }
+ }
+
+ /* Execute the _BFS method */
+
+ arg_list.count = 1;
+ arg_list.pointer = &arg;
+ arg.type = ACPI_TYPE_INTEGER;
+ arg.integer.value = sleep_state;
+
+ status = acpi_evaluate_object(NULL, METHOD_NAME__BFS, &arg_list, NULL);
+ if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
+ ACPI_EXCEPTION((AE_INFO, status, "During Method _BFS"));
+ }
+
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_leave_sleep_state
+ *
+ * PARAMETERS: sleep_state - Which sleep state we just exited
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Perform OS-independent ACPI cleanup after a sleep
+ * Called with interrupts ENABLED.
+ *
+ ******************************************************************************/
+acpi_status acpi_leave_sleep_state(u8 sleep_state)
+{
+ struct acpi_object_list arg_list;
+ union acpi_object arg;
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(acpi_leave_sleep_state);
+
+ /* Ensure enter_sleep_state_prep -> enter_sleep_state ordering */
+
+ acpi_gbl_sleep_type_a = ACPI_SLEEP_TYPE_INVALID;
+
+ /* Setup parameter object */
+
+ arg_list.count = 1;
+ arg_list.pointer = &arg;
+ arg.type = ACPI_TYPE_INTEGER;
+
+ /* Ignore any errors from these methods */
+
+ arg.integer.value = ACPI_SST_WAKING;
+ status = acpi_evaluate_object(NULL, METHOD_NAME__SST, &arg_list, NULL);
+ if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
+ ACPI_EXCEPTION((AE_INFO, status, "During Method _SST"));
+ }
+
+ /*
+ * GPEs must be enabled before _WAK is called as GPEs
+ * might get fired there
+ *
+ * Restore the GPEs:
+ * 1) Disable/Clear all GPEs
+ * 2) Enable all runtime GPEs
+ */
+ status = acpi_hw_disable_all_gpes();
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+ status = acpi_hw_enable_all_runtime_gpes();
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ arg.integer.value = sleep_state;
+ status = acpi_evaluate_object(NULL, METHOD_NAME__WAK, &arg_list, NULL);
+ if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
+ ACPI_EXCEPTION((AE_INFO, status, "During Method _WAK"));
+ }
+ /* TBD: _WAK "sometimes" returns stuff - do we want to look at it? */
+
+ /*
+ * Some BIOSes assume that WAK_STS will be cleared on resume and use
+ * it to determine whether the system is rebooting or resuming. Clear
+ * it for compatibility.
+ */
+ acpi_set_register(ACPI_BITREG_WAKE_STATUS, 1);
+
+ acpi_gbl_system_awake_and_running = TRUE;
+
+ /* Enable power button */
+
+ (void)
+ acpi_set_register(acpi_gbl_fixed_event_info
+ [ACPI_EVENT_POWER_BUTTON].enable_register_id, 1);
+
+ (void)
+ acpi_set_register(acpi_gbl_fixed_event_info
+ [ACPI_EVENT_POWER_BUTTON].status_register_id, 1);
+
+ arg.integer.value = ACPI_SST_WORKING;
+ status = acpi_evaluate_object(NULL, METHOD_NAME__SST, &arg_list, NULL);
+ if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
+ ACPI_EXCEPTION((AE_INFO, status, "During Method _SST"));
+ }
+
+ return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_leave_sleep_state)
diff --git a/drivers/acpi/hardware/hwtimer.c b/drivers/acpi/hardware/hwtimer.c
new file mode 100644
index 0000000..b53d575
--- /dev/null
+++ b/drivers/acpi/hardware/hwtimer.c
@@ -0,0 +1,187 @@
+
+/******************************************************************************
+ *
+ * Name: hwtimer.c - ACPI Power Management Timer Interface
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+
+#define _COMPONENT ACPI_HARDWARE
+ACPI_MODULE_NAME("hwtimer")
+
+/******************************************************************************
+ *
+ * FUNCTION: acpi_get_timer_resolution
+ *
+ * PARAMETERS: Resolution - Where the resolution is returned
+ *
+ * RETURN: Status and timer resolution
+ *
+ * DESCRIPTION: Obtains resolution of the ACPI PM Timer (24 or 32 bits).
+ *
+ ******************************************************************************/
+acpi_status acpi_get_timer_resolution(u32 * resolution)
+{
+ ACPI_FUNCTION_TRACE(acpi_get_timer_resolution);
+
+ if (!resolution) {
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
+
+ if ((acpi_gbl_FADT.flags & ACPI_FADT_32BIT_TIMER) == 0) {
+ *resolution = 24;
+ } else {
+ *resolution = 32;
+ }
+
+ return_ACPI_STATUS(AE_OK);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_get_timer_resolution)
+
+/******************************************************************************
+ *
+ * FUNCTION: acpi_get_timer
+ *
+ * PARAMETERS: Ticks - Where the timer value is returned
+ *
+ * RETURN: Status and current timer value (ticks)
+ *
+ * DESCRIPTION: Obtains current value of ACPI PM Timer (in ticks).
+ *
+ ******************************************************************************/
+acpi_status acpi_get_timer(u32 * ticks)
+{
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(acpi_get_timer);
+
+ if (!ticks) {
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
+
+ status =
+ acpi_hw_low_level_read(32, ticks, &acpi_gbl_FADT.xpm_timer_block);
+
+ return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_get_timer)
+
+/******************************************************************************
+ *
+ * FUNCTION: acpi_get_timer_duration
+ *
+ * PARAMETERS: start_ticks - Starting timestamp
+ * end_ticks - End timestamp
+ * time_elapsed - Where the elapsed time is returned
+ *
+ * RETURN: Status and time_elapsed
+ *
+ * DESCRIPTION: Computes the time elapsed (in microseconds) between two
+ * PM Timer time stamps, taking into account the possibility of
+ * rollovers, the timer resolution, and timer frequency.
+ *
+ * The PM Timer's clock ticks at roughly 3.6 times per
+ * _microsecond_, and its clock continues through Cx state
+ * transitions (unlike many CPU timestamp counters) -- making it
+ * a versatile and accurate timer.
+ *
+ * Note that this function accommodates only a single timer
+ * rollover. Thus for 24-bit timers, this function should only
+ * be used for calculating durations less than ~4.6 seconds
+ * (~20 minutes for 32-bit timers) -- calculations below:
+ *
+ * 2**24 Ticks / 3,600,000 Ticks/Sec = 4.66 sec
+ * 2**32 Ticks / 3,600,000 Ticks/Sec = 1193 sec or 19.88 minutes
+ *
+ ******************************************************************************/
+acpi_status
+acpi_get_timer_duration(u32 start_ticks, u32 end_ticks, u32 * time_elapsed)
+{
+ acpi_status status;
+ u32 delta_ticks;
+ acpi_integer quotient;
+
+ ACPI_FUNCTION_TRACE(acpi_get_timer_duration);
+
+ if (!time_elapsed) {
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
+
+ /*
+ * Compute Tick Delta:
+ * Handle (max one) timer rollovers on 24-bit versus 32-bit timers.
+ */
+ if (start_ticks < end_ticks) {
+ delta_ticks = end_ticks - start_ticks;
+ } else if (start_ticks > end_ticks) {
+ if ((acpi_gbl_FADT.flags & ACPI_FADT_32BIT_TIMER) == 0) {
+
+ /* 24-bit Timer */
+
+ delta_ticks =
+ (((0x00FFFFFF - start_ticks) +
+ end_ticks) & 0x00FFFFFF);
+ } else {
+ /* 32-bit Timer */
+
+ delta_ticks = (0xFFFFFFFF - start_ticks) + end_ticks;
+ }
+ } else { /* start_ticks == end_ticks */
+
+ *time_elapsed = 0;
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ /*
+ * Compute Duration (Requires a 64-bit multiply and divide):
+ *
+ * time_elapsed = (delta_ticks * 1000000) / PM_TIMER_FREQUENCY;
+ */
+ status = acpi_ut_short_divide(((u64) delta_ticks) * 1000000,
+ PM_TIMER_FREQUENCY, &quotient, NULL);
+
+ *time_elapsed = (u32) quotient;
+ return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_get_timer_duration)
diff --git a/drivers/acpi/namespace/Makefile b/drivers/acpi/namespace/Makefile
new file mode 100644
index 0000000..371a2da
--- /dev/null
+++ b/drivers/acpi/namespace/Makefile
@@ -0,0 +1,12 @@
+#
+# Makefile for all Linux ACPI interpreter subdirectories
+#
+
+obj-y := nsaccess.o nsload.o nssearch.o nsxfeval.o \
+ nsalloc.o nseval.o nsnames.o nsutils.o nsxfname.o \
+ nsdump.o nsinit.o nsobject.o nswalk.o nsxfobj.o \
+ nsparse.o nspredef.o
+
+obj-$(ACPI_FUTURE_USAGE) += nsdumpdv.o
+
+EXTRA_CFLAGS += $(ACPI_CFLAGS)
diff --git a/drivers/acpi/namespace/nsaccess.c b/drivers/acpi/namespace/nsaccess.c
new file mode 100644
index 0000000..c39a7f6
--- /dev/null
+++ b/drivers/acpi/namespace/nsaccess.c
@@ -0,0 +1,674 @@
+/*******************************************************************************
+ *
+ * Module Name: nsaccess - Top-level functions for accessing ACPI namespace
+ *
+ ******************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include <acpi/amlcode.h>
+#include <acpi/acnamesp.h>
+#include <acpi/acdispat.h>
+
+#define _COMPONENT ACPI_NAMESPACE
+ACPI_MODULE_NAME("nsaccess")
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ns_root_initialize
+ *
+ * PARAMETERS: None
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Allocate and initialize the default root named objects
+ *
+ * MUTEX: Locks namespace for entire execution
+ *
+ ******************************************************************************/
+acpi_status acpi_ns_root_initialize(void)
+{
+ acpi_status status;
+ const struct acpi_predefined_names *init_val = NULL;
+ struct acpi_namespace_node *new_node;
+ union acpi_operand_object *obj_desc;
+ acpi_string val = NULL;
+
+ ACPI_FUNCTION_TRACE(ns_root_initialize);
+
+ status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /*
+ * The global root ptr is initially NULL, so a non-NULL value indicates
+ * that acpi_ns_root_initialize() has already been called; just return.
+ */
+ if (acpi_gbl_root_node) {
+ status = AE_OK;
+ goto unlock_and_exit;
+ }
+
+ /*
+ * Tell the rest of the subsystem that the root is initialized
+ * (This is OK because the namespace is locked)
+ */
+ acpi_gbl_root_node = &acpi_gbl_root_node_struct;
+
+ /* Enter the pre-defined names in the name table */
+
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Entering predefined entries into namespace\n"));
+
+ for (init_val = acpi_gbl_pre_defined_names; init_val->name; init_val++) {
+
+ /* _OSI is optional for now, will be permanent later */
+
+ if (!ACPI_STRCMP(init_val->name, "_OSI")
+ && !acpi_gbl_create_osi_method) {
+ continue;
+ }
+
+ status = acpi_ns_lookup(NULL, init_val->name, init_val->type,
+ ACPI_IMODE_LOAD_PASS2,
+ ACPI_NS_NO_UPSEARCH, NULL, &new_node);
+
+ if (ACPI_FAILURE(status) || (!new_node)) { /* Must be on same line for code converter */
+ ACPI_EXCEPTION((AE_INFO, status,
+ "Could not create predefined name %s",
+ init_val->name));
+ }
+
+ /*
+ * Name entered successfully.
+ * If entry in pre_defined_names[] specifies an
+ * initial value, create the initial value.
+ */
+ if (init_val->val) {
+ status = acpi_os_predefined_override(init_val, &val);
+ if (ACPI_FAILURE(status)) {
+ ACPI_ERROR((AE_INFO,
+ "Could not override predefined %s",
+ init_val->name));
+ }
+
+ if (!val) {
+ val = init_val->val;
+ }
+
+ /*
+ * Entry requests an initial value, allocate a
+ * descriptor for it.
+ */
+ obj_desc =
+ acpi_ut_create_internal_object(init_val->type);
+ if (!obj_desc) {
+ status = AE_NO_MEMORY;
+ goto unlock_and_exit;
+ }
+
+ /*
+ * Convert value string from table entry to
+ * internal representation. Only types actually
+ * used for initial values are implemented here.
+ */
+ switch (init_val->type) {
+ case ACPI_TYPE_METHOD:
+ obj_desc->method.param_count =
+ (u8) ACPI_TO_INTEGER(val);
+ obj_desc->common.flags |= AOPOBJ_DATA_VALID;
+
+#if defined (ACPI_ASL_COMPILER)
+
+ /* Save the parameter count for the i_aSL compiler */
+
+ new_node->value = obj_desc->method.param_count;
+#else
+ /* Mark this as a very SPECIAL method */
+
+ obj_desc->method.method_flags =
+ AML_METHOD_INTERNAL_ONLY;
+
+#ifndef ACPI_DUMP_APP
+ obj_desc->method.implementation =
+ acpi_ut_osi_implementation;
+#endif
+#endif
+ break;
+
+ case ACPI_TYPE_INTEGER:
+
+ obj_desc->integer.value = ACPI_TO_INTEGER(val);
+ break;
+
+ case ACPI_TYPE_STRING:
+
+ /*
+ * Build an object around the static string
+ */
+ obj_desc->string.length =
+ (u32) ACPI_STRLEN(val);
+ obj_desc->string.pointer = val;
+ obj_desc->common.flags |= AOPOBJ_STATIC_POINTER;
+ break;
+
+ case ACPI_TYPE_MUTEX:
+
+ obj_desc->mutex.node = new_node;
+ obj_desc->mutex.sync_level =
+ (u8) (ACPI_TO_INTEGER(val) - 1);
+
+ /* Create a mutex */
+
+ status =
+ acpi_os_create_mutex(&obj_desc->mutex.
+ os_mutex);
+ if (ACPI_FAILURE(status)) {
+ acpi_ut_remove_reference(obj_desc);
+ goto unlock_and_exit;
+ }
+
+ /* Special case for ACPI Global Lock */
+
+ if (ACPI_STRCMP(init_val->name, "_GL_") == 0) {
+ acpi_gbl_global_lock_mutex = obj_desc;
+
+ /* Create additional counting semaphore for global lock */
+
+ status =
+ acpi_os_create_semaphore(1, 0,
+ &acpi_gbl_global_lock_semaphore);
+ if (ACPI_FAILURE(status)) {
+ acpi_ut_remove_reference
+ (obj_desc);
+ goto unlock_and_exit;
+ }
+ }
+ break;
+
+ default:
+
+ ACPI_ERROR((AE_INFO,
+ "Unsupported initial type value %X",
+ init_val->type));
+ acpi_ut_remove_reference(obj_desc);
+ obj_desc = NULL;
+ continue;
+ }
+
+ /* Store pointer to value descriptor in the Node */
+
+ status = acpi_ns_attach_object(new_node, obj_desc,
+ ACPI_GET_OBJECT_TYPE
+ (obj_desc));
+
+ /* Remove local reference to the object */
+
+ acpi_ut_remove_reference(obj_desc);
+ }
+ }
+
+ unlock_and_exit:
+ (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+
+ /* Save a handle to "_GPE", it is always present */
+
+ if (ACPI_SUCCESS(status)) {
+ status = acpi_ns_get_node(NULL, "\\_GPE", ACPI_NS_NO_UPSEARCH,
+ &acpi_gbl_fadt_gpe_device);
+ }
+
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ns_lookup
+ *
+ * PARAMETERS: scope_info - Current scope info block
+ * Pathname - Search pathname, in internal format
+ * (as represented in the AML stream)
+ * Type - Type associated with name
+ * interpreter_mode - IMODE_LOAD_PASS2 => add name if not found
+ * Flags - Flags describing the search restrictions
+ * walk_state - Current state of the walk
+ * return_node - Where the Node is placed (if found
+ * or created successfully)
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Find or enter the passed name in the name space.
+ * Log an error if name not found in Exec mode.
+ *
+ * MUTEX: Assumes namespace is locked.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ns_lookup(union acpi_generic_state *scope_info,
+ char *pathname,
+ acpi_object_type type,
+ acpi_interpreter_mode interpreter_mode,
+ u32 flags,
+ struct acpi_walk_state *walk_state,
+ struct acpi_namespace_node **return_node)
+{
+ acpi_status status;
+ char *path = pathname;
+ struct acpi_namespace_node *prefix_node;
+ struct acpi_namespace_node *current_node = NULL;
+ struct acpi_namespace_node *this_node = NULL;
+ u32 num_segments;
+ u32 num_carats;
+ acpi_name simple_name;
+ acpi_object_type type_to_check_for;
+ acpi_object_type this_search_type;
+ u32 search_parent_flag = ACPI_NS_SEARCH_PARENT;
+ u32 local_flags;
+
+ ACPI_FUNCTION_TRACE(ns_lookup);
+
+ if (!return_node) {
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
+
+ local_flags = flags & ~(ACPI_NS_ERROR_IF_FOUND | ACPI_NS_SEARCH_PARENT);
+ *return_node = ACPI_ENTRY_NOT_FOUND;
+ acpi_gbl_ns_lookup_count++;
+
+ if (!acpi_gbl_root_node) {
+ return_ACPI_STATUS(AE_NO_NAMESPACE);
+ }
+
+ /*
+ * Get the prefix scope.
+ * A null scope means use the root scope
+ */
+ if ((!scope_info) || (!scope_info->scope.node)) {
+ ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
+ "Null scope prefix, using root node (%p)\n",
+ acpi_gbl_root_node));
+
+ prefix_node = acpi_gbl_root_node;
+ } else {
+ prefix_node = scope_info->scope.node;
+ if (ACPI_GET_DESCRIPTOR_TYPE(prefix_node) !=
+ ACPI_DESC_TYPE_NAMED) {
+ ACPI_ERROR((AE_INFO, "%p is not a namespace node [%s]",
+ prefix_node,
+ acpi_ut_get_descriptor_name(prefix_node)));
+ return_ACPI_STATUS(AE_AML_INTERNAL);
+ }
+
+ if (!(flags & ACPI_NS_PREFIX_IS_SCOPE)) {
+ /*
+ * This node might not be a actual "scope" node (such as a
+ * Device/Method, etc.) It could be a Package or other object node.
+ * Backup up the tree to find the containing scope node.
+ */
+ while (!acpi_ns_opens_scope(prefix_node->type) &&
+ prefix_node->type != ACPI_TYPE_ANY) {
+ prefix_node =
+ acpi_ns_get_parent_node(prefix_node);
+ }
+ }
+ }
+
+ /* Save type TBD: may be no longer necessary */
+
+ type_to_check_for = type;
+
+ /*
+ * Begin examination of the actual pathname
+ */
+ if (!pathname) {
+
+ /* A Null name_path is allowed and refers to the root */
+
+ num_segments = 0;
+ this_node = acpi_gbl_root_node;
+ path = "";
+
+ ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
+ "Null Pathname (Zero segments), Flags=%X\n",
+ flags));
+ } else {
+ /*
+ * Name pointer is valid (and must be in internal name format)
+ *
+ * Check for scope prefixes:
+ *
+ * As represented in the AML stream, a namepath consists of an
+ * optional scope prefix followed by a name segment part.
+ *
+ * If present, the scope prefix is either a Root Prefix (in
+ * which case the name is fully qualified), or one or more
+ * Parent Prefixes (in which case the name's scope is relative
+ * to the current scope).
+ */
+ if (*path == (u8) AML_ROOT_PREFIX) {
+
+ /* Pathname is fully qualified, start from the root */
+
+ this_node = acpi_gbl_root_node;
+ search_parent_flag = ACPI_NS_NO_UPSEARCH;
+
+ /* Point to name segment part */
+
+ path++;
+
+ ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
+ "Path is absolute from root [%p]\n",
+ this_node));
+ } else {
+ /* Pathname is relative to current scope, start there */
+
+ ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
+ "Searching relative to prefix scope [%4.4s] (%p)\n",
+ acpi_ut_get_node_name(prefix_node),
+ prefix_node));
+
+ /*
+ * Handle multiple Parent Prefixes (carat) by just getting
+ * the parent node for each prefix instance.
+ */
+ this_node = prefix_node;
+ num_carats = 0;
+ while (*path == (u8) AML_PARENT_PREFIX) {
+
+ /* Name is fully qualified, no search rules apply */
+
+ search_parent_flag = ACPI_NS_NO_UPSEARCH;
+ /*
+ * Point past this prefix to the name segment
+ * part or the next Parent Prefix
+ */
+ path++;
+
+ /* Backup to the parent node */
+
+ num_carats++;
+ this_node = acpi_ns_get_parent_node(this_node);
+ if (!this_node) {
+
+ /* Current scope has no parent scope */
+
+ ACPI_ERROR((AE_INFO,
+ "ACPI path has too many parent prefixes (^) - reached beyond root node"));
+ return_ACPI_STATUS(AE_NOT_FOUND);
+ }
+ }
+
+ if (search_parent_flag == ACPI_NS_NO_UPSEARCH) {
+ ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
+ "Search scope is [%4.4s], path has %d carat(s)\n",
+ acpi_ut_get_node_name
+ (this_node), num_carats));
+ }
+ }
+
+ /*
+ * Determine the number of ACPI name segments in this pathname.
+ *
+ * The segment part consists of either:
+ * - A Null name segment (0)
+ * - A dual_name_prefix followed by two 4-byte name segments
+ * - A multi_name_prefix followed by a byte indicating the
+ * number of segments and the segments themselves.
+ * - A single 4-byte name segment
+ *
+ * Examine the name prefix opcode, if any, to determine the number of
+ * segments.
+ */
+ switch (*path) {
+ case 0:
+ /*
+ * Null name after a root or parent prefixes. We already
+ * have the correct target node and there are no name segments.
+ */
+ num_segments = 0;
+ type = this_node->type;
+
+ ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
+ "Prefix-only Pathname (Zero name segments), Flags=%X\n",
+ flags));
+ break;
+
+ case AML_DUAL_NAME_PREFIX:
+
+ /* More than one name_seg, search rules do not apply */
+
+ search_parent_flag = ACPI_NS_NO_UPSEARCH;
+
+ /* Two segments, point to first name segment */
+
+ num_segments = 2;
+ path++;
+
+ ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
+ "Dual Pathname (2 segments, Flags=%X)\n",
+ flags));
+ break;
+
+ case AML_MULTI_NAME_PREFIX_OP:
+
+ /* More than one name_seg, search rules do not apply */
+
+ search_parent_flag = ACPI_NS_NO_UPSEARCH;
+
+ /* Extract segment count, point to first name segment */
+
+ path++;
+ num_segments = (u32) (u8) * path;
+ path++;
+
+ ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
+ "Multi Pathname (%d Segments, Flags=%X)\n",
+ num_segments, flags));
+ break;
+
+ default:
+ /*
+ * Not a Null name, no Dual or Multi prefix, hence there is
+ * only one name segment and Pathname is already pointing to it.
+ */
+ num_segments = 1;
+
+ ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
+ "Simple Pathname (1 segment, Flags=%X)\n",
+ flags));
+ break;
+ }
+
+ ACPI_DEBUG_EXEC(acpi_ns_print_pathname(num_segments, path));
+ }
+
+ /*
+ * Search namespace for each segment of the name. Loop through and
+ * verify (or add to the namespace) each name segment.
+ *
+ * The object type is significant only at the last name
+ * segment. (We don't care about the types along the path, only
+ * the type of the final target object.)
+ */
+ this_search_type = ACPI_TYPE_ANY;
+ current_node = this_node;
+ while (num_segments && current_node) {
+ num_segments--;
+ if (!num_segments) {
+ /*
+ * This is the last segment, enable typechecking
+ */
+ this_search_type = type;
+
+ /*
+ * Only allow automatic parent search (search rules) if the caller
+ * requested it AND we have a single, non-fully-qualified name_seg
+ */
+ if ((search_parent_flag != ACPI_NS_NO_UPSEARCH) &&
+ (flags & ACPI_NS_SEARCH_PARENT)) {
+ local_flags |= ACPI_NS_SEARCH_PARENT;
+ }
+
+ /* Set error flag according to caller */
+
+ if (flags & ACPI_NS_ERROR_IF_FOUND) {
+ local_flags |= ACPI_NS_ERROR_IF_FOUND;
+ }
+ }
+
+ /* Extract one ACPI name from the front of the pathname */
+
+ ACPI_MOVE_32_TO_32(&simple_name, path);
+
+ /* Try to find the single (4 character) ACPI name */
+
+ status =
+ acpi_ns_search_and_enter(simple_name, walk_state,
+ current_node, interpreter_mode,
+ this_search_type, local_flags,
+ &this_node);
+ if (ACPI_FAILURE(status)) {
+ if (status == AE_NOT_FOUND) {
+
+ /* Name not found in ACPI namespace */
+
+ ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
+ "Name [%4.4s] not found in scope [%4.4s] %p\n",
+ (char *)&simple_name,
+ (char *)&current_node->name,
+ current_node));
+ }
+
+ *return_node = this_node;
+ return_ACPI_STATUS(status);
+ }
+
+ /* More segments to follow? */
+
+ if (num_segments > 0) {
+ /*
+ * If we have an alias to an object that opens a scope (such as a
+ * device or processor), we need to dereference the alias here so that
+ * we can access any children of the original node (via the remaining
+ * segments).
+ */
+ if (this_node->type == ACPI_TYPE_LOCAL_ALIAS) {
+ if (acpi_ns_opens_scope
+ (((struct acpi_namespace_node *)this_node->
+ object)->type)) {
+ this_node =
+ (struct acpi_namespace_node *)
+ this_node->object;
+ }
+ }
+ }
+
+ /* Special handling for the last segment (num_segments == 0) */
+
+ else {
+ /*
+ * Sanity typecheck of the target object:
+ *
+ * If 1) This is the last segment (num_segments == 0)
+ * 2) And we are looking for a specific type
+ * (Not checking for TYPE_ANY)
+ * 3) Which is not an alias
+ * 4) Which is not a local type (TYPE_SCOPE)
+ * 5) And the type of target object is known (not TYPE_ANY)
+ * 6) And target object does not match what we are looking for
+ *
+ * Then we have a type mismatch. Just warn and ignore it.
+ */
+ if ((type_to_check_for != ACPI_TYPE_ANY) &&
+ (type_to_check_for != ACPI_TYPE_LOCAL_ALIAS) &&
+ (type_to_check_for != ACPI_TYPE_LOCAL_METHOD_ALIAS)
+ && (type_to_check_for != ACPI_TYPE_LOCAL_SCOPE)
+ && (this_node->type != ACPI_TYPE_ANY)
+ && (this_node->type != type_to_check_for)) {
+
+ /* Complain about a type mismatch */
+
+ ACPI_WARNING((AE_INFO,
+ "NsLookup: Type mismatch on %4.4s (%s), searching for (%s)",
+ ACPI_CAST_PTR(char, &simple_name),
+ acpi_ut_get_type_name(this_node->
+ type),
+ acpi_ut_get_type_name
+ (type_to_check_for)));
+ }
+
+ /*
+ * If this is the last name segment and we are not looking for a
+ * specific type, but the type of found object is known, use that type
+ * to (later) see if it opens a scope.
+ */
+ if (type == ACPI_TYPE_ANY) {
+ type = this_node->type;
+ }
+ }
+
+ /* Point to next name segment and make this node current */
+
+ path += ACPI_NAME_SIZE;
+ current_node = this_node;
+ }
+
+ /*
+ * Always check if we need to open a new scope
+ */
+ if (!(flags & ACPI_NS_DONT_OPEN_SCOPE) && (walk_state)) {
+ /*
+ * If entry is a type which opens a scope, push the new scope on the
+ * scope stack.
+ */
+ if (acpi_ns_opens_scope(type)) {
+ status =
+ acpi_ds_scope_stack_push(this_node, type,
+ walk_state);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+ }
+ }
+
+ *return_node = this_node;
+ return_ACPI_STATUS(AE_OK);
+}
diff --git a/drivers/acpi/namespace/nsalloc.c b/drivers/acpi/namespace/nsalloc.c
new file mode 100644
index 0000000..3a1740a
--- /dev/null
+++ b/drivers/acpi/namespace/nsalloc.c
@@ -0,0 +1,496 @@
+/*******************************************************************************
+ *
+ * Module Name: nsalloc - Namespace allocation and deletion utilities
+ *
+ ******************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include <acpi/acnamesp.h>
+
+#define _COMPONENT ACPI_NAMESPACE
+ACPI_MODULE_NAME("nsalloc")
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ns_create_node
+ *
+ * PARAMETERS: Name - Name of the new node (4 char ACPI name)
+ *
+ * RETURN: New namespace node (Null on failure)
+ *
+ * DESCRIPTION: Create a namespace node
+ *
+ ******************************************************************************/
+struct acpi_namespace_node *acpi_ns_create_node(u32 name)
+{
+ struct acpi_namespace_node *node;
+#ifdef ACPI_DBG_TRACK_ALLOCATIONS
+ u32 temp;
+#endif
+
+ ACPI_FUNCTION_TRACE(ns_create_node);
+
+ node = acpi_os_acquire_object(acpi_gbl_namespace_cache);
+ if (!node) {
+ return_PTR(NULL);
+ }
+
+ ACPI_MEM_TRACKING(acpi_gbl_ns_node_list->total_allocated++);
+
+#ifdef ACPI_DBG_TRACK_ALLOCATIONS
+ temp =
+ acpi_gbl_ns_node_list->total_allocated -
+ acpi_gbl_ns_node_list->total_freed;
+ if (temp > acpi_gbl_ns_node_list->max_occupied) {
+ acpi_gbl_ns_node_list->max_occupied = temp;
+ }
+#endif
+
+ node->name.integer = name;
+ ACPI_SET_DESCRIPTOR_TYPE(node, ACPI_DESC_TYPE_NAMED);
+ return_PTR(node);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ns_delete_node
+ *
+ * PARAMETERS: Node - Node to be deleted
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Delete a namespace node
+ *
+ ******************************************************************************/
+
+void acpi_ns_delete_node(struct acpi_namespace_node *node)
+{
+ struct acpi_namespace_node *parent_node;
+ struct acpi_namespace_node *prev_node;
+ struct acpi_namespace_node *next_node;
+
+ ACPI_FUNCTION_TRACE_PTR(ns_delete_node, node);
+
+ parent_node = acpi_ns_get_parent_node(node);
+
+ prev_node = NULL;
+ next_node = parent_node->child;
+
+ /* Find the node that is the previous peer in the parent's child list */
+
+ while (next_node != node) {
+ prev_node = next_node;
+ next_node = prev_node->peer;
+ }
+
+ if (prev_node) {
+
+ /* Node is not first child, unlink it */
+
+ prev_node->peer = next_node->peer;
+ if (next_node->flags & ANOBJ_END_OF_PEER_LIST) {
+ prev_node->flags |= ANOBJ_END_OF_PEER_LIST;
+ }
+ } else {
+ /* Node is first child (has no previous peer) */
+
+ if (next_node->flags & ANOBJ_END_OF_PEER_LIST) {
+
+ /* No peers at all */
+
+ parent_node->child = NULL;
+ } else { /* Link peer list to parent */
+
+ parent_node->child = next_node->peer;
+ }
+ }
+
+ ACPI_MEM_TRACKING(acpi_gbl_ns_node_list->total_freed++);
+
+ /*
+ * Detach an object if there is one, then delete the node
+ */
+ acpi_ns_detach_object(node);
+ (void)acpi_os_release_object(acpi_gbl_namespace_cache, node);
+ return_VOID;
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ns_install_node
+ *
+ * PARAMETERS: walk_state - Current state of the walk
+ * parent_node - The parent of the new Node
+ * Node - The new Node to install
+ * Type - ACPI object type of the new Node
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Initialize a new namespace node and install it amongst
+ * its peers.
+ *
+ * Note: Current namespace lookup is linear search. This appears
+ * to be sufficient as namespace searches consume only a small
+ * fraction of the execution time of the ACPI subsystem.
+ *
+ ******************************************************************************/
+
+void acpi_ns_install_node(struct acpi_walk_state *walk_state, struct acpi_namespace_node *parent_node, /* Parent */
+ struct acpi_namespace_node *node, /* New Child */
+ acpi_object_type type)
+{
+ acpi_owner_id owner_id = 0;
+ struct acpi_namespace_node *child_node;
+
+ ACPI_FUNCTION_TRACE(ns_install_node);
+
+ /*
+ * Get the owner ID from the Walk state
+ * The owner ID is used to track table deletion and
+ * deletion of objects created by methods
+ */
+ if (walk_state) {
+ owner_id = walk_state->owner_id;
+ }
+
+ /* Link the new entry into the parent and existing children */
+
+ child_node = parent_node->child;
+ if (!child_node) {
+ parent_node->child = node;
+ node->flags |= ANOBJ_END_OF_PEER_LIST;
+ node->peer = parent_node;
+ } else {
+ while (!(child_node->flags & ANOBJ_END_OF_PEER_LIST)) {
+ child_node = child_node->peer;
+ }
+
+ child_node->peer = node;
+
+ /* Clear end-of-list flag */
+
+ child_node->flags &= ~ANOBJ_END_OF_PEER_LIST;
+ node->flags |= ANOBJ_END_OF_PEER_LIST;
+ node->peer = parent_node;
+ }
+
+ /* Init the new entry */
+
+ node->owner_id = owner_id;
+ node->type = (u8) type;
+
+ ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
+ "%4.4s (%s) [Node %p Owner %X] added to %4.4s (%s) [Node %p]\n",
+ acpi_ut_get_node_name(node),
+ acpi_ut_get_type_name(node->type), node, owner_id,
+ acpi_ut_get_node_name(parent_node),
+ acpi_ut_get_type_name(parent_node->type),
+ parent_node));
+
+ return_VOID;
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ns_delete_children
+ *
+ * PARAMETERS: parent_node - Delete this objects children
+ *
+ * RETURN: None.
+ *
+ * DESCRIPTION: Delete all children of the parent object. In other words,
+ * deletes a "scope".
+ *
+ ******************************************************************************/
+
+void acpi_ns_delete_children(struct acpi_namespace_node *parent_node)
+{
+ struct acpi_namespace_node *child_node;
+ struct acpi_namespace_node *next_node;
+ u8 flags;
+
+ ACPI_FUNCTION_TRACE_PTR(ns_delete_children, parent_node);
+
+ if (!parent_node) {
+ return_VOID;
+ }
+
+ /* If no children, all done! */
+
+ child_node = parent_node->child;
+ if (!child_node) {
+ return_VOID;
+ }
+
+ /*
+ * Deallocate all children at this level
+ */
+ do {
+
+ /* Get the things we need */
+
+ next_node = child_node->peer;
+ flags = child_node->flags;
+
+ /* Grandchildren should have all been deleted already */
+
+ if (child_node->child) {
+ ACPI_ERROR((AE_INFO, "Found a grandchild! P=%p C=%p",
+ parent_node, child_node));
+ }
+
+ /* Now we can free this child object */
+
+ ACPI_MEM_TRACKING(acpi_gbl_ns_node_list->total_freed++);
+
+ ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS,
+ "Object %p, Remaining %X\n", child_node,
+ acpi_gbl_current_node_count));
+
+ /*
+ * Detach an object if there is one, then free the child node
+ */
+ acpi_ns_detach_object(child_node);
+
+ /* Now we can delete the node */
+
+ (void)acpi_os_release_object(acpi_gbl_namespace_cache,
+ child_node);
+
+ /* And move on to the next child in the list */
+
+ child_node = next_node;
+
+ } while (!(flags & ANOBJ_END_OF_PEER_LIST));
+
+ /* Clear the parent's child pointer */
+
+ parent_node->child = NULL;
+
+ return_VOID;
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ns_delete_namespace_subtree
+ *
+ * PARAMETERS: parent_node - Root of the subtree to be deleted
+ *
+ * RETURN: None.
+ *
+ * DESCRIPTION: Delete a subtree of the namespace. This includes all objects
+ * stored within the subtree.
+ *
+ ******************************************************************************/
+
+void acpi_ns_delete_namespace_subtree(struct acpi_namespace_node *parent_node)
+{
+ struct acpi_namespace_node *child_node = NULL;
+ u32 level = 1;
+
+ ACPI_FUNCTION_TRACE(ns_delete_namespace_subtree);
+
+ if (!parent_node) {
+ return_VOID;
+ }
+
+ /*
+ * Traverse the tree of objects until we bubble back up
+ * to where we started.
+ */
+ while (level > 0) {
+
+ /* Get the next node in this scope (NULL if none) */
+
+ child_node =
+ acpi_ns_get_next_node(ACPI_TYPE_ANY, parent_node,
+ child_node);
+ if (child_node) {
+
+ /* Found a child node - detach any attached object */
+
+ acpi_ns_detach_object(child_node);
+
+ /* Check if this node has any children */
+
+ if (acpi_ns_get_next_node
+ (ACPI_TYPE_ANY, child_node, NULL)) {
+ /*
+ * There is at least one child of this node,
+ * visit the node
+ */
+ level++;
+ parent_node = child_node;
+ child_node = NULL;
+ }
+ } else {
+ /*
+ * No more children of this parent node.
+ * Move up to the grandparent.
+ */
+ level--;
+
+ /*
+ * Now delete all of the children of this parent
+ * all at the same time.
+ */
+ acpi_ns_delete_children(parent_node);
+
+ /* New "last child" is this parent node */
+
+ child_node = parent_node;
+
+ /* Move up the tree to the grandparent */
+
+ parent_node = acpi_ns_get_parent_node(parent_node);
+ }
+ }
+
+ return_VOID;
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ns_delete_namespace_by_owner
+ *
+ * PARAMETERS: owner_id - All nodes with this owner will be deleted
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Delete entries within the namespace that are owned by a
+ * specific ID. Used to delete entire ACPI tables. All
+ * reference counts are updated.
+ *
+ * MUTEX: Locks namespace during deletion walk.
+ *
+ ******************************************************************************/
+
+void acpi_ns_delete_namespace_by_owner(acpi_owner_id owner_id)
+{
+ struct acpi_namespace_node *child_node;
+ struct acpi_namespace_node *deletion_node;
+ struct acpi_namespace_node *parent_node;
+ u32 level;
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE_U32(ns_delete_namespace_by_owner, owner_id);
+
+ if (owner_id == 0) {
+ return_VOID;
+ }
+
+ /* Lock namespace for possible update */
+
+ status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+ if (ACPI_FAILURE(status)) {
+ return_VOID;
+ }
+
+ deletion_node = NULL;
+ parent_node = acpi_gbl_root_node;
+ child_node = NULL;
+ level = 1;
+
+ /*
+ * Traverse the tree of nodes until we bubble back up
+ * to where we started.
+ */
+ while (level > 0) {
+ /*
+ * Get the next child of this parent node. When child_node is NULL,
+ * the first child of the parent is returned
+ */
+ child_node =
+ acpi_ns_get_next_node(ACPI_TYPE_ANY, parent_node,
+ child_node);
+
+ if (deletion_node) {
+ acpi_ns_delete_children(deletion_node);
+ acpi_ns_delete_node(deletion_node);
+ deletion_node = NULL;
+ }
+
+ if (child_node) {
+ if (child_node->owner_id == owner_id) {
+
+ /* Found a matching child node - detach any attached object */
+
+ acpi_ns_detach_object(child_node);
+ }
+
+ /* Check if this node has any children */
+
+ if (acpi_ns_get_next_node
+ (ACPI_TYPE_ANY, child_node, NULL)) {
+ /*
+ * There is at least one child of this node,
+ * visit the node
+ */
+ level++;
+ parent_node = child_node;
+ child_node = NULL;
+ } else if (child_node->owner_id == owner_id) {
+ deletion_node = child_node;
+ }
+ } else {
+ /*
+ * No more children of this parent node.
+ * Move up to the grandparent.
+ */
+ level--;
+ if (level != 0) {
+ if (parent_node->owner_id == owner_id) {
+ deletion_node = parent_node;
+ }
+ }
+
+ /* New "last child" is this parent node */
+
+ child_node = parent_node;
+
+ /* Move up the tree to the grandparent */
+
+ parent_node = acpi_ns_get_parent_node(parent_node);
+ }
+ }
+
+ (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+ return_VOID;
+}
diff --git a/drivers/acpi/namespace/nsdump.c b/drivers/acpi/namespace/nsdump.c
new file mode 100644
index 0000000..cc0ae39
--- /dev/null
+++ b/drivers/acpi/namespace/nsdump.c
@@ -0,0 +1,708 @@
+/******************************************************************************
+ *
+ * Module Name: nsdump - table dumping routines for debug
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include <acpi/acnamesp.h>
+
+#define _COMPONENT ACPI_NAMESPACE
+ACPI_MODULE_NAME("nsdump")
+
+/* Local prototypes */
+#ifdef ACPI_OBSOLETE_FUNCTIONS
+void acpi_ns_dump_root_devices(void);
+
+static acpi_status
+acpi_ns_dump_one_device(acpi_handle obj_handle,
+ u32 level, void *context, void **return_value);
+#endif
+
+#if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER)
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ns_print_pathname
+ *
+ * PARAMETERS: num_segments - Number of ACPI name segments
+ * Pathname - The compressed (internal) path
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Print an object's full namespace pathname
+ *
+ ******************************************************************************/
+
+void acpi_ns_print_pathname(u32 num_segments, char *pathname)
+{
+ u32 i;
+
+ ACPI_FUNCTION_NAME(ns_print_pathname);
+
+ if (!(acpi_dbg_level & ACPI_LV_NAMES)
+ || !(acpi_dbg_layer & ACPI_NAMESPACE)) {
+ return;
+ }
+
+ /* Print the entire name */
+
+ ACPI_DEBUG_PRINT((ACPI_DB_NAMES, "["));
+
+ while (num_segments) {
+ for (i = 0; i < 4; i++) {
+ ACPI_IS_PRINT(pathname[i]) ?
+ acpi_os_printf("%c", pathname[i]) :
+ acpi_os_printf("?");
+ }
+
+ pathname += ACPI_NAME_SIZE;
+ num_segments--;
+ if (num_segments) {
+ acpi_os_printf(".");
+ }
+ }
+
+ acpi_os_printf("]\n");
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ns_dump_pathname
+ *
+ * PARAMETERS: Handle - Object
+ * Msg - Prefix message
+ * Level - Desired debug level
+ * Component - Caller's component ID
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Print an object's full namespace pathname
+ * Manages allocation/freeing of a pathname buffer
+ *
+ ******************************************************************************/
+
+void
+acpi_ns_dump_pathname(acpi_handle handle, char *msg, u32 level, u32 component)
+{
+
+ ACPI_FUNCTION_TRACE(ns_dump_pathname);
+
+ /* Do this only if the requested debug level and component are enabled */
+
+ if (!(acpi_dbg_level & level) || !(acpi_dbg_layer & component)) {
+ return_VOID;
+ }
+
+ /* Convert handle to a full pathname and print it (with supplied message) */
+
+ acpi_ns_print_node_pathname(handle, msg);
+ acpi_os_printf("\n");
+ return_VOID;
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ns_dump_one_object
+ *
+ * PARAMETERS: obj_handle - Node to be dumped
+ * Level - Nesting level of the handle
+ * Context - Passed into walk_namespace
+ * return_value - Not used
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Dump a single Node
+ * This procedure is a user_function called by acpi_ns_walk_namespace.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ns_dump_one_object(acpi_handle obj_handle,
+ u32 level, void *context, void **return_value)
+{
+ struct acpi_walk_info *info = (struct acpi_walk_info *)context;
+ struct acpi_namespace_node *this_node;
+ union acpi_operand_object *obj_desc = NULL;
+ acpi_object_type obj_type;
+ acpi_object_type type;
+ u32 bytes_to_dump;
+ u32 dbg_level;
+ u32 i;
+
+ ACPI_FUNCTION_NAME(ns_dump_one_object);
+
+ /* Is output enabled? */
+
+ if (!(acpi_dbg_level & info->debug_level)) {
+ return (AE_OK);
+ }
+
+ if (!obj_handle) {
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Null object handle\n"));
+ return (AE_OK);
+ }
+
+ this_node = acpi_ns_map_handle_to_node(obj_handle);
+ type = this_node->type;
+
+ /* Check if the owner matches */
+
+ if ((info->owner_id != ACPI_OWNER_ID_MAX) &&
+ (info->owner_id != this_node->owner_id)) {
+ return (AE_OK);
+ }
+
+ if (!(info->display_type & ACPI_DISPLAY_SHORT)) {
+
+ /* Indent the object according to the level */
+
+ acpi_os_printf("%2d%*s", (u32) level - 1, (int)level * 2, " ");
+
+ /* Check the node type and name */
+
+ if (type > ACPI_TYPE_LOCAL_MAX) {
+ ACPI_WARNING((AE_INFO, "Invalid ACPI Object Type %08X",
+ type));
+ }
+
+ if (!acpi_ut_valid_acpi_name(this_node->name.integer)) {
+ this_node->name.integer =
+ acpi_ut_repair_name(this_node->name.ascii);
+
+ ACPI_WARNING((AE_INFO, "Invalid ACPI Name %08X",
+ this_node->name.integer));
+ }
+
+ acpi_os_printf("%4.4s", acpi_ut_get_node_name(this_node));
+ }
+
+ /*
+ * Now we can print out the pertinent information
+ */
+ acpi_os_printf(" %-12s %p %2.2X ",
+ acpi_ut_get_type_name(type), this_node,
+ this_node->owner_id);
+
+ dbg_level = acpi_dbg_level;
+ acpi_dbg_level = 0;
+ obj_desc = acpi_ns_get_attached_object(this_node);
+ acpi_dbg_level = dbg_level;
+
+ /* Temp nodes are those nodes created by a control method */
+
+ if (this_node->flags & ANOBJ_TEMPORARY) {
+ acpi_os_printf("(T) ");
+ }
+
+ switch (info->display_type & ACPI_DISPLAY_MASK) {
+ case ACPI_DISPLAY_SUMMARY:
+
+ if (!obj_desc) {
+
+ /* No attached object, we are done */
+
+ acpi_os_printf("\n");
+ return (AE_OK);
+ }
+
+ switch (type) {
+ case ACPI_TYPE_PROCESSOR:
+
+ acpi_os_printf("ID %X Len %.4X Addr %p\n",
+ obj_desc->processor.proc_id,
+ obj_desc->processor.length,
+ ACPI_CAST_PTR(void,
+ obj_desc->processor.
+ address));
+ break;
+
+ case ACPI_TYPE_DEVICE:
+
+ acpi_os_printf("Notify Object: %p\n", obj_desc);
+ break;
+
+ case ACPI_TYPE_METHOD:
+
+ acpi_os_printf("Args %X Len %.4X Aml %p\n",
+ (u32) obj_desc->method.param_count,
+ obj_desc->method.aml_length,
+ obj_desc->method.aml_start);
+ break;
+
+ case ACPI_TYPE_INTEGER:
+
+ acpi_os_printf("= %8.8X%8.8X\n",
+ ACPI_FORMAT_UINT64(obj_desc->integer.
+ value));
+ break;
+
+ case ACPI_TYPE_PACKAGE:
+
+ if (obj_desc->common.flags & AOPOBJ_DATA_VALID) {
+ acpi_os_printf("Elements %.2X\n",
+ obj_desc->package.count);
+ } else {
+ acpi_os_printf("[Length not yet evaluated]\n");
+ }
+ break;
+
+ case ACPI_TYPE_BUFFER:
+
+ if (obj_desc->common.flags & AOPOBJ_DATA_VALID) {
+ acpi_os_printf("Len %.2X",
+ obj_desc->buffer.length);
+
+ /* Dump some of the buffer */
+
+ if (obj_desc->buffer.length > 0) {
+ acpi_os_printf(" =");
+ for (i = 0;
+ (i < obj_desc->buffer.length
+ && i < 12); i++) {
+ acpi_os_printf(" %.2hX",
+ obj_desc->buffer.
+ pointer[i]);
+ }
+ }
+ acpi_os_printf("\n");
+ } else {
+ acpi_os_printf("[Length not yet evaluated]\n");
+ }
+ break;
+
+ case ACPI_TYPE_STRING:
+
+ acpi_os_printf("Len %.2X ", obj_desc->string.length);
+ acpi_ut_print_string(obj_desc->string.pointer, 32);
+ acpi_os_printf("\n");
+ break;
+
+ case ACPI_TYPE_REGION:
+
+ acpi_os_printf("[%s]",
+ acpi_ut_get_region_name(obj_desc->region.
+ space_id));
+ if (obj_desc->region.flags & AOPOBJ_DATA_VALID) {
+ acpi_os_printf(" Addr %8.8X%8.8X Len %.4X\n",
+ ACPI_FORMAT_NATIVE_UINT
+ (obj_desc->region.address),
+ obj_desc->region.length);
+ } else {
+ acpi_os_printf
+ (" [Address/Length not yet evaluated]\n");
+ }
+ break;
+
+ case ACPI_TYPE_LOCAL_REFERENCE:
+
+ acpi_os_printf("[%s]\n",
+ acpi_ut_get_reference_name(obj_desc));
+ break;
+
+ case ACPI_TYPE_BUFFER_FIELD:
+
+ if (obj_desc->buffer_field.buffer_obj &&
+ obj_desc->buffer_field.buffer_obj->buffer.node) {
+ acpi_os_printf("Buf [%4.4s]",
+ acpi_ut_get_node_name(obj_desc->
+ buffer_field.
+ buffer_obj->
+ buffer.
+ node));
+ }
+ break;
+
+ case ACPI_TYPE_LOCAL_REGION_FIELD:
+
+ acpi_os_printf("Rgn [%4.4s]",
+ acpi_ut_get_node_name(obj_desc->
+ common_field.
+ region_obj->region.
+ node));
+ break;
+
+ case ACPI_TYPE_LOCAL_BANK_FIELD:
+
+ acpi_os_printf("Rgn [%4.4s] Bnk [%4.4s]",
+ acpi_ut_get_node_name(obj_desc->
+ common_field.
+ region_obj->region.
+ node),
+ acpi_ut_get_node_name(obj_desc->
+ bank_field.
+ bank_obj->
+ common_field.
+ node));
+ break;
+
+ case ACPI_TYPE_LOCAL_INDEX_FIELD:
+
+ acpi_os_printf("Idx [%4.4s] Dat [%4.4s]",
+ acpi_ut_get_node_name(obj_desc->
+ index_field.
+ index_obj->
+ common_field.node),
+ acpi_ut_get_node_name(obj_desc->
+ index_field.
+ data_obj->
+ common_field.
+ node));
+ break;
+
+ case ACPI_TYPE_LOCAL_ALIAS:
+ case ACPI_TYPE_LOCAL_METHOD_ALIAS:
+
+ acpi_os_printf("Target %4.4s (%p)\n",
+ acpi_ut_get_node_name(obj_desc),
+ obj_desc);
+ break;
+
+ default:
+
+ acpi_os_printf("Object %p\n", obj_desc);
+ break;
+ }
+
+ /* Common field handling */
+
+ switch (type) {
+ case ACPI_TYPE_BUFFER_FIELD:
+ case ACPI_TYPE_LOCAL_REGION_FIELD:
+ case ACPI_TYPE_LOCAL_BANK_FIELD:
+ case ACPI_TYPE_LOCAL_INDEX_FIELD:
+
+ acpi_os_printf(" Off %.3X Len %.2X Acc %.2hd\n",
+ (obj_desc->common_field.
+ base_byte_offset * 8)
+ +
+ obj_desc->common_field.
+ start_field_bit_offset,
+ obj_desc->common_field.bit_length,
+ obj_desc->common_field.
+ access_byte_width);
+ break;
+
+ default:
+ break;
+ }
+ break;
+
+ case ACPI_DISPLAY_OBJECTS:
+
+ acpi_os_printf("O:%p", obj_desc);
+ if (!obj_desc) {
+
+ /* No attached object, we are done */
+
+ acpi_os_printf("\n");
+ return (AE_OK);
+ }
+
+ acpi_os_printf("(R%d)", obj_desc->common.reference_count);
+
+ switch (type) {
+ case ACPI_TYPE_METHOD:
+
+ /* Name is a Method and its AML offset/length are set */
+
+ acpi_os_printf(" M:%p-%X\n", obj_desc->method.aml_start,
+ obj_desc->method.aml_length);
+ break;
+
+ case ACPI_TYPE_INTEGER:
+
+ acpi_os_printf(" I:%8.8X8.8%X\n",
+ ACPI_FORMAT_UINT64(obj_desc->integer.
+ value));
+ break;
+
+ case ACPI_TYPE_STRING:
+
+ acpi_os_printf(" S:%p-%X\n", obj_desc->string.pointer,
+ obj_desc->string.length);
+ break;
+
+ case ACPI_TYPE_BUFFER:
+
+ acpi_os_printf(" B:%p-%X\n", obj_desc->buffer.pointer,
+ obj_desc->buffer.length);
+ break;
+
+ default:
+
+ acpi_os_printf("\n");
+ break;
+ }
+ break;
+
+ default:
+ acpi_os_printf("\n");
+ break;
+ }
+
+ /* If debug turned off, done */
+
+ if (!(acpi_dbg_level & ACPI_LV_VALUES)) {
+ return (AE_OK);
+ }
+
+ /* If there is an attached object, display it */
+
+ dbg_level = acpi_dbg_level;
+ acpi_dbg_level = 0;
+ obj_desc = acpi_ns_get_attached_object(this_node);
+ acpi_dbg_level = dbg_level;
+
+ /* Dump attached objects */
+
+ while (obj_desc) {
+ obj_type = ACPI_TYPE_INVALID;
+ acpi_os_printf("Attached Object %p: ", obj_desc);
+
+ /* Decode the type of attached object and dump the contents */
+
+ switch (ACPI_GET_DESCRIPTOR_TYPE(obj_desc)) {
+ case ACPI_DESC_TYPE_NAMED:
+
+ acpi_os_printf("(Ptr to Node)\n");
+ bytes_to_dump = sizeof(struct acpi_namespace_node);
+ ACPI_DUMP_BUFFER(obj_desc, bytes_to_dump);
+ break;
+
+ case ACPI_DESC_TYPE_OPERAND:
+
+ obj_type = ACPI_GET_OBJECT_TYPE(obj_desc);
+
+ if (obj_type > ACPI_TYPE_LOCAL_MAX) {
+ acpi_os_printf
+ ("(Pointer to ACPI Object type %.2X [UNKNOWN])\n",
+ obj_type);
+ bytes_to_dump = 32;
+ } else {
+ acpi_os_printf
+ ("(Pointer to ACPI Object type %.2X [%s])\n",
+ obj_type, acpi_ut_get_type_name(obj_type));
+ bytes_to_dump =
+ sizeof(union acpi_operand_object);
+ }
+
+ ACPI_DUMP_BUFFER(obj_desc, bytes_to_dump);
+ break;
+
+ default:
+
+ break;
+ }
+
+ /* If value is NOT an internal object, we are done */
+
+ if (ACPI_GET_DESCRIPTOR_TYPE(obj_desc) !=
+ ACPI_DESC_TYPE_OPERAND) {
+ goto cleanup;
+ }
+
+ /*
+ * Valid object, get the pointer to next level, if any
+ */
+ switch (obj_type) {
+ case ACPI_TYPE_BUFFER:
+ case ACPI_TYPE_STRING:
+ /*
+ * NOTE: takes advantage of common fields between string/buffer
+ */
+ bytes_to_dump = obj_desc->string.length;
+ obj_desc = (void *)obj_desc->string.pointer;
+ acpi_os_printf("(Buffer/String pointer %p length %X)\n",
+ obj_desc, bytes_to_dump);
+ ACPI_DUMP_BUFFER(obj_desc, bytes_to_dump);
+ goto cleanup;
+
+ case ACPI_TYPE_BUFFER_FIELD:
+ obj_desc =
+ (union acpi_operand_object *)obj_desc->buffer_field.
+ buffer_obj;
+ break;
+
+ case ACPI_TYPE_PACKAGE:
+ obj_desc = (void *)obj_desc->package.elements;
+ break;
+
+ case ACPI_TYPE_METHOD:
+ obj_desc = (void *)obj_desc->method.aml_start;
+ break;
+
+ case ACPI_TYPE_LOCAL_REGION_FIELD:
+ obj_desc = (void *)obj_desc->field.region_obj;
+ break;
+
+ case ACPI_TYPE_LOCAL_BANK_FIELD:
+ obj_desc = (void *)obj_desc->bank_field.region_obj;
+ break;
+
+ case ACPI_TYPE_LOCAL_INDEX_FIELD:
+ obj_desc = (void *)obj_desc->index_field.index_obj;
+ break;
+
+ default:
+ goto cleanup;
+ }
+
+ obj_type = ACPI_TYPE_INVALID; /* Terminate loop after next pass */
+ }
+
+ cleanup:
+ acpi_os_printf("\n");
+ return (AE_OK);
+}
+
+#ifdef ACPI_FUTURE_USAGE
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ns_dump_objects
+ *
+ * PARAMETERS: Type - Object type to be dumped
+ * display_type - 0 or ACPI_DISPLAY_SUMMARY
+ * max_depth - Maximum depth of dump. Use ACPI_UINT32_MAX
+ * for an effectively unlimited depth.
+ * owner_id - Dump only objects owned by this ID. Use
+ * ACPI_UINT32_MAX to match all owners.
+ * start_handle - Where in namespace to start/end search
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Dump typed objects within the loaded namespace.
+ * Uses acpi_ns_walk_namespace in conjunction with acpi_ns_dump_one_object.
+ *
+ ******************************************************************************/
+
+void
+acpi_ns_dump_objects(acpi_object_type type,
+ u8 display_type,
+ u32 max_depth,
+ acpi_owner_id owner_id, acpi_handle start_handle)
+{
+ struct acpi_walk_info info;
+
+ ACPI_FUNCTION_ENTRY();
+
+ info.debug_level = ACPI_LV_TABLES;
+ info.owner_id = owner_id;
+ info.display_type = display_type;
+
+ (void)acpi_ns_walk_namespace(type, start_handle, max_depth,
+ ACPI_NS_WALK_NO_UNLOCK |
+ ACPI_NS_WALK_TEMP_NODES,
+ acpi_ns_dump_one_object, (void *)&info,
+ NULL);
+}
+#endif /* ACPI_FUTURE_USAGE */
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ns_dump_entry
+ *
+ * PARAMETERS: Handle - Node to be dumped
+ * debug_level - Output level
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Dump a single Node
+ *
+ ******************************************************************************/
+
+void acpi_ns_dump_entry(acpi_handle handle, u32 debug_level)
+{
+ struct acpi_walk_info info;
+
+ ACPI_FUNCTION_ENTRY();
+
+ info.debug_level = debug_level;
+ info.owner_id = ACPI_OWNER_ID_MAX;
+ info.display_type = ACPI_DISPLAY_SUMMARY;
+
+ (void)acpi_ns_dump_one_object(handle, 1, &info, NULL);
+}
+
+#ifdef ACPI_ASL_COMPILER
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ns_dump_tables
+ *
+ * PARAMETERS: search_base - Root of subtree to be dumped, or
+ * NS_ALL to dump the entire namespace
+ * max_depth - Maximum depth of dump. Use INT_MAX
+ * for an effectively unlimited depth.
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Dump the name space, or a portion of it.
+ *
+ ******************************************************************************/
+
+void acpi_ns_dump_tables(acpi_handle search_base, u32 max_depth)
+{
+ acpi_handle search_handle = search_base;
+
+ ACPI_FUNCTION_TRACE(ns_dump_tables);
+
+ if (!acpi_gbl_root_node) {
+ /*
+ * If the name space has not been initialized,
+ * there is nothing to dump.
+ */
+ ACPI_DEBUG_PRINT((ACPI_DB_TABLES,
+ "namespace not initialized!\n"));
+ return_VOID;
+ }
+
+ if (ACPI_NS_ALL == search_base) {
+
+ /* Entire namespace */
+
+ search_handle = acpi_gbl_root_node;
+ ACPI_DEBUG_PRINT((ACPI_DB_TABLES, "\\\n"));
+ }
+
+ acpi_ns_dump_objects(ACPI_TYPE_ANY, ACPI_DISPLAY_OBJECTS, max_depth,
+ ACPI_OWNER_ID_MAX, search_handle);
+ return_VOID;
+}
+#endif /* _ACPI_ASL_COMPILER */
+#endif /* defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER) */
diff --git a/drivers/acpi/namespace/nsdumpdv.c b/drivers/acpi/namespace/nsdumpdv.c
new file mode 100644
index 0000000..428f50f
--- /dev/null
+++ b/drivers/acpi/namespace/nsdumpdv.c
@@ -0,0 +1,140 @@
+/******************************************************************************
+ *
+ * Module Name: nsdump - table dumping routines for debug
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+
+/* TBD: This entire module is apparently obsolete and should be removed */
+
+#define _COMPONENT ACPI_NAMESPACE
+ACPI_MODULE_NAME("nsdumpdv")
+#ifdef ACPI_OBSOLETE_FUNCTIONS
+#if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER)
+#include <acpi/acnamesp.h>
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ns_dump_one_device
+ *
+ * PARAMETERS: Handle - Node to be dumped
+ * Level - Nesting level of the handle
+ * Context - Passed into walk_namespace
+ * return_value - Not used
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Dump a single Node that represents a device
+ * This procedure is a user_function called by acpi_ns_walk_namespace.
+ *
+ ******************************************************************************/
+static acpi_status
+acpi_ns_dump_one_device(acpi_handle obj_handle,
+ u32 level, void *context, void **return_value)
+{
+ struct acpi_buffer buffer;
+ struct acpi_device_info *info;
+ acpi_status status;
+ u32 i;
+
+ ACPI_FUNCTION_NAME(ns_dump_one_device);
+
+ status =
+ acpi_ns_dump_one_object(obj_handle, level, context, return_value);
+
+ buffer.length = ACPI_ALLOCATE_LOCAL_BUFFER;
+ status = acpi_get_object_info(obj_handle, &buffer);
+ if (ACPI_SUCCESS(status)) {
+ info = buffer.pointer;
+ for (i = 0; i < level; i++) {
+ ACPI_DEBUG_PRINT_RAW((ACPI_DB_TABLES, " "));
+ }
+
+ ACPI_DEBUG_PRINT_RAW((ACPI_DB_TABLES,
+ " HID: %s, ADR: %8.8X%8.8X, Status: %X\n",
+ info->hardware_id.value,
+ ACPI_FORMAT_UINT64(info->address),
+ info->current_status));
+ ACPI_FREE(info);
+ }
+
+ return (status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ns_dump_root_devices
+ *
+ * PARAMETERS: None
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Dump all objects of type "device"
+ *
+ ******************************************************************************/
+
+void acpi_ns_dump_root_devices(void)
+{
+ acpi_handle sys_bus_handle;
+ acpi_status status;
+
+ ACPI_FUNCTION_NAME(ns_dump_root_devices);
+
+ /* Only dump the table if tracing is enabled */
+
+ if (!(ACPI_LV_TABLES & acpi_dbg_level)) {
+ return;
+ }
+
+ status = acpi_get_handle(NULL, ACPI_NS_SYSTEM_BUS, &sys_bus_handle);
+ if (ACPI_FAILURE(status)) {
+ return;
+ }
+
+ ACPI_DEBUG_PRINT((ACPI_DB_TABLES,
+ "Display of all devices in the namespace:\n"));
+
+ status = acpi_ns_walk_namespace(ACPI_TYPE_DEVICE, sys_bus_handle,
+ ACPI_UINT32_MAX, ACPI_NS_WALK_NO_UNLOCK,
+ acpi_ns_dump_one_device, NULL, NULL);
+}
+
+#endif
+#endif
diff --git a/drivers/acpi/namespace/nseval.c b/drivers/acpi/namespace/nseval.c
new file mode 100644
index 0000000..4cdf03a
--- /dev/null
+++ b/drivers/acpi/namespace/nseval.c
@@ -0,0 +1,319 @@
+/*******************************************************************************
+ *
+ * Module Name: nseval - Object evaluation, includes control method execution
+ *
+ ******************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include <acpi/acparser.h>
+#include <acpi/acinterp.h>
+#include <acpi/acnamesp.h>
+
+#define _COMPONENT ACPI_NAMESPACE
+ACPI_MODULE_NAME("nseval")
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ns_evaluate
+ *
+ * PARAMETERS: Info - Evaluation info block, contains:
+ * prefix_node - Prefix or Method/Object Node to execute
+ * Pathname - Name of method to execute, If NULL, the
+ * Node is the object to execute
+ * Parameters - List of parameters to pass to the method,
+ * terminated by NULL. Params itself may be
+ * NULL if no parameters are being passed.
+ * return_object - Where to put method's return value (if
+ * any). If NULL, no value is returned.
+ * parameter_type - Type of Parameter list
+ * return_object - Where to put method's return value (if
+ * any). If NULL, no value is returned.
+ * Flags - ACPI_IGNORE_RETURN_VALUE to delete return
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Execute a control method or return the current value of an
+ * ACPI namespace object.
+ *
+ * MUTEX: Locks interpreter
+ *
+ ******************************************************************************/
+acpi_status acpi_ns_evaluate(struct acpi_evaluate_info * info)
+{
+ acpi_status status;
+ struct acpi_namespace_node *node;
+
+ ACPI_FUNCTION_TRACE(ns_evaluate);
+
+ if (!info) {
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
+
+ /* Initialize the return value to an invalid object */
+
+ info->return_object = NULL;
+
+ /*
+ * Get the actual namespace node for the target object. Handles these cases:
+ *
+ * 1) Null node, Pathname (absolute path)
+ * 2) Node, Pathname (path relative to Node)
+ * 3) Node, Null Pathname
+ */
+ status = acpi_ns_get_node(info->prefix_node, info->pathname,
+ ACPI_NS_NO_UPSEARCH, &info->resolved_node);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /*
+ * For a method alias, we must grab the actual method node so that proper
+ * scoping context will be established before execution.
+ */
+ if (acpi_ns_get_type(info->resolved_node) ==
+ ACPI_TYPE_LOCAL_METHOD_ALIAS) {
+ info->resolved_node =
+ ACPI_CAST_PTR(struct acpi_namespace_node,
+ info->resolved_node->object);
+ }
+
+ ACPI_DEBUG_PRINT((ACPI_DB_NAMES, "%s [%p] Value %p\n", info->pathname,
+ info->resolved_node,
+ acpi_ns_get_attached_object(info->resolved_node)));
+
+ node = info->resolved_node;
+
+ /*
+ * Two major cases here:
+ *
+ * 1) The object is a control method -- execute it
+ * 2) The object is not a method -- just return it's current value
+ */
+ if (acpi_ns_get_type(info->resolved_node) == ACPI_TYPE_METHOD) {
+ /*
+ * 1) Object is a control method - execute it
+ */
+
+ /* Verify that there is a method object associated with this node */
+
+ info->obj_desc =
+ acpi_ns_get_attached_object(info->resolved_node);
+ if (!info->obj_desc) {
+ ACPI_ERROR((AE_INFO,
+ "Control method has no attached sub-object"));
+ return_ACPI_STATUS(AE_NULL_OBJECT);
+ }
+
+ /*
+ * Calculate the number of arguments being passed to the method
+ */
+
+ info->param_count = 0;
+ if (info->parameters) {
+ while (info->parameters[info->param_count])
+ info->param_count++;
+ }
+
+ /*
+ * Warning if too few or too many arguments have been passed by the
+ * caller. We don't want to abort here with an error because an
+ * incorrect number of arguments may not cause the method to fail.
+ * However, the method will fail if there are too few arguments passed
+ * and the method attempts to use one of the missing ones.
+ */
+
+ if (info->param_count < info->obj_desc->method.param_count) {
+ ACPI_WARNING((AE_INFO,
+ "Insufficient arguments - "
+ "method [%4.4s] needs %d, found %d",
+ acpi_ut_get_node_name(info->resolved_node),
+ info->obj_desc->method.param_count,
+ info->param_count));
+ } else if (info->param_count >
+ info->obj_desc->method.param_count) {
+ ACPI_WARNING((AE_INFO,
+ "Excess arguments - "
+ "method [%4.4s] needs %d, found %d",
+ acpi_ut_get_node_name(info->
+ resolved_node),
+ info->obj_desc->method.param_count,
+ info->param_count));
+ }
+
+ ACPI_DUMP_PATHNAME(info->resolved_node, "Execute Method:",
+ ACPI_LV_INFO, _COMPONENT);
+
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+ "Method at AML address %p Length %X\n",
+ info->obj_desc->method.aml_start + 1,
+ info->obj_desc->method.aml_length - 1));
+
+ /*
+ * Any namespace deletion must acquire both the namespace and
+ * interpreter locks to ensure that no thread is using the portion of
+ * the namespace that is being deleted.
+ *
+ * Execute the method via the interpreter. The interpreter is locked
+ * here before calling into the AML parser
+ */
+ acpi_ex_enter_interpreter();
+ status = acpi_ps_execute_method(info);
+ acpi_ex_exit_interpreter();
+ } else {
+ /*
+ * 2) Object is not a method, return its current value
+ *
+ * Disallow certain object types. For these, "evaluation" is undefined.
+ */
+ switch (info->resolved_node->type) {
+ case ACPI_TYPE_DEVICE:
+ case ACPI_TYPE_EVENT:
+ case ACPI_TYPE_MUTEX:
+ case ACPI_TYPE_REGION:
+ case ACPI_TYPE_THERMAL:
+ case ACPI_TYPE_LOCAL_SCOPE:
+
+ ACPI_ERROR((AE_INFO,
+ "[%4.4s] Evaluation of object type [%s] is not supported",
+ info->resolved_node->name.ascii,
+ acpi_ut_get_type_name(info->resolved_node->
+ type)));
+
+ return_ACPI_STATUS(AE_TYPE);
+
+ default:
+ break;
+ }
+
+ /*
+ * Objects require additional resolution steps (e.g., the Node may be
+ * a field that must be read, etc.) -- we can't just grab the object
+ * out of the node.
+ *
+ * Use resolve_node_to_value() to get the associated value.
+ *
+ * NOTE: we can get away with passing in NULL for a walk state because
+ * resolved_node is guaranteed to not be a reference to either a method
+ * local or a method argument (because this interface is never called
+ * from a running method.)
+ *
+ * Even though we do not directly invoke the interpreter for object
+ * resolution, we must lock it because we could access an opregion.
+ * The opregion access code assumes that the interpreter is locked.
+ */
+ acpi_ex_enter_interpreter();
+
+ /* Function has a strange interface */
+
+ status =
+ acpi_ex_resolve_node_to_value(&info->resolved_node, NULL);
+ acpi_ex_exit_interpreter();
+
+ /*
+ * If acpi_ex_resolve_node_to_value() succeeded, the return value was placed
+ * in resolved_node.
+ */
+ if (ACPI_SUCCESS(status)) {
+ status = AE_CTRL_RETURN_VALUE;
+ info->return_object =
+ ACPI_CAST_PTR(union acpi_operand_object,
+ info->resolved_node);
+
+ ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
+ "Returning object %p [%s]\n",
+ info->return_object,
+ acpi_ut_get_object_type_name(info->
+ return_object)));
+ }
+ }
+
+ /* Validation of return values for ACPI-predefined methods and objects */
+
+ if ((status == AE_OK) || (status == AE_CTRL_RETURN_VALUE)) {
+ /*
+ * If this is the first evaluation, check the return value. This
+ * ensures that any warnings will only be emitted during the very
+ * first evaluation of the object.
+ */
+ if (!(node->flags & ANOBJ_EVALUATED)) {
+ /*
+ * Check for a predefined ACPI name. If found, validate the
+ * returned object.
+ *
+ * Note: Ignore return status for now, emit warnings if there are
+ * problems with the returned object. May change later to abort
+ * the method on invalid return object.
+ */
+ (void)acpi_ns_check_predefined_names(node,
+ info->
+ return_object);
+ }
+
+ /* Mark the node as having been evaluated */
+
+ node->flags |= ANOBJ_EVALUATED;
+ }
+
+ /* Check if there is a return value that must be dealt with */
+
+ if (status == AE_CTRL_RETURN_VALUE) {
+
+ /* If caller does not want the return value, delete it */
+
+ if (info->flags & ACPI_IGNORE_RETURN_VALUE) {
+ acpi_ut_remove_reference(info->return_object);
+ info->return_object = NULL;
+ }
+
+ /* Map AE_CTRL_RETURN_VALUE to AE_OK, we are done with it */
+
+ status = AE_OK;
+ }
+
+ ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
+ "*** Completed evaluation of object %s ***\n",
+ info->pathname));
+
+ /*
+ * Namespace was unlocked by the handling acpi_ns* function, so we
+ * just return
+ */
+ return_ACPI_STATUS(status);
+}
diff --git a/drivers/acpi/namespace/nsinit.c b/drivers/acpi/namespace/nsinit.c
new file mode 100644
index 0000000..e4c5751
--- /dev/null
+++ b/drivers/acpi/namespace/nsinit.c
@@ -0,0 +1,592 @@
+/******************************************************************************
+ *
+ * Module Name: nsinit - namespace initialization
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include <acpi/acnamesp.h>
+#include <acpi/acdispat.h>
+#include <acpi/acinterp.h>
+#include <linux/nmi.h>
+
+#define _COMPONENT ACPI_NAMESPACE
+ACPI_MODULE_NAME("nsinit")
+
+/* Local prototypes */
+static acpi_status
+acpi_ns_init_one_object(acpi_handle obj_handle,
+ u32 level, void *context, void **return_value);
+
+static acpi_status
+acpi_ns_init_one_device(acpi_handle obj_handle,
+ u32 nesting_level, void *context, void **return_value);
+
+static acpi_status
+acpi_ns_find_ini_methods(acpi_handle obj_handle,
+ u32 nesting_level, void *context, void **return_value);
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ns_initialize_objects
+ *
+ * PARAMETERS: None
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Walk the entire namespace and perform any necessary
+ * initialization on the objects found therein
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ns_initialize_objects(void)
+{
+ acpi_status status;
+ struct acpi_init_walk_info info;
+
+ ACPI_FUNCTION_TRACE(ns_initialize_objects);
+
+ ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
+ "**** Starting initialization of namespace objects ****\n"));
+ ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT,
+ "Completing Region/Field/Buffer/Package initialization:"));
+
+ /* Set all init info to zero */
+
+ ACPI_MEMSET(&info, 0, sizeof(struct acpi_init_walk_info));
+
+ /* Walk entire namespace from the supplied root */
+
+ status = acpi_walk_namespace(ACPI_TYPE_ANY, ACPI_ROOT_OBJECT,
+ ACPI_UINT32_MAX, acpi_ns_init_one_object,
+ &info, NULL);
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status, "During WalkNamespace"));
+ }
+
+ ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT,
+ "\nInitialized %hd/%hd Regions %hd/%hd Fields %hd/%hd Buffers %hd/%hd Packages (%hd nodes)\n",
+ info.op_region_init, info.op_region_count,
+ info.field_init, info.field_count,
+ info.buffer_init, info.buffer_count,
+ info.package_init, info.package_count,
+ info.object_count));
+
+ ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
+ "%hd Control Methods found\n", info.method_count));
+ ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
+ "%hd Op Regions found\n", info.op_region_count));
+
+ return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ns_initialize_devices
+ *
+ * PARAMETERS: None
+ *
+ * RETURN: acpi_status
+ *
+ * DESCRIPTION: Walk the entire namespace and initialize all ACPI devices.
+ * This means running _INI on all present devices.
+ *
+ * Note: We install PCI config space handler on region access,
+ * not here.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ns_initialize_devices(void)
+{
+ acpi_status status;
+ struct acpi_device_walk_info info;
+
+ ACPI_FUNCTION_TRACE(ns_initialize_devices);
+
+ /* Init counters */
+
+ info.device_count = 0;
+ info.num_STA = 0;
+ info.num_INI = 0;
+
+ ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT,
+ "Initializing Device/Processor/Thermal objects by executing _INI methods:"));
+
+ /* Tree analysis: find all subtrees that contain _INI methods */
+
+ status = acpi_ns_walk_namespace(ACPI_TYPE_ANY, ACPI_ROOT_OBJECT,
+ ACPI_UINT32_MAX, FALSE,
+ acpi_ns_find_ini_methods, &info, NULL);
+ if (ACPI_FAILURE(status)) {
+ goto error_exit;
+ }
+
+ /* Allocate the evaluation information block */
+
+ info.evaluate_info =
+ ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info));
+ if (!info.evaluate_info) {
+ status = AE_NO_MEMORY;
+ goto error_exit;
+ }
+
+ /* Walk namespace to execute all _INIs on present devices */
+
+ status = acpi_ns_walk_namespace(ACPI_TYPE_ANY, ACPI_ROOT_OBJECT,
+ ACPI_UINT32_MAX, FALSE,
+ acpi_ns_init_one_device, &info, NULL);
+
+ ACPI_FREE(info.evaluate_info);
+ if (ACPI_FAILURE(status)) {
+ goto error_exit;
+ }
+
+ ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT,
+ "\nExecuted %hd _INI methods requiring %hd _STA executions (examined %hd objects)\n",
+ info.num_INI, info.num_STA, info.device_count));
+
+ return_ACPI_STATUS(status);
+
+ error_exit:
+ ACPI_EXCEPTION((AE_INFO, status, "During device initialization"));
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ns_init_one_object
+ *
+ * PARAMETERS: obj_handle - Node
+ * Level - Current nesting level
+ * Context - Points to a init info struct
+ * return_value - Not used
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Callback from acpi_walk_namespace. Invoked for every object
+ * within the namespace.
+ *
+ * Currently, the only objects that require initialization are:
+ * 1) Methods
+ * 2) Op Regions
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ns_init_one_object(acpi_handle obj_handle,
+ u32 level, void *context, void **return_value)
+{
+ acpi_object_type type;
+ acpi_status status = AE_OK;
+ struct acpi_init_walk_info *info =
+ (struct acpi_init_walk_info *)context;
+ struct acpi_namespace_node *node =
+ (struct acpi_namespace_node *)obj_handle;
+ union acpi_operand_object *obj_desc;
+
+ ACPI_FUNCTION_NAME(ns_init_one_object);
+
+ info->object_count++;
+
+ /* And even then, we are only interested in a few object types */
+
+ type = acpi_ns_get_type(obj_handle);
+ obj_desc = acpi_ns_get_attached_object(node);
+ if (!obj_desc) {
+ return (AE_OK);
+ }
+
+ /* Increment counters for object types we are looking for */
+
+ switch (type) {
+ case ACPI_TYPE_REGION:
+ info->op_region_count++;
+ break;
+
+ case ACPI_TYPE_BUFFER_FIELD:
+ info->field_count++;
+ break;
+
+ case ACPI_TYPE_LOCAL_BANK_FIELD:
+ info->field_count++;
+ break;
+
+ case ACPI_TYPE_BUFFER:
+ info->buffer_count++;
+ break;
+
+ case ACPI_TYPE_PACKAGE:
+ info->package_count++;
+ break;
+
+ default:
+
+ /* No init required, just exit now */
+ return (AE_OK);
+ }
+
+ /*
+ * If the object is already initialized, nothing else to do
+ */
+ if (obj_desc->common.flags & AOPOBJ_DATA_VALID) {
+ return (AE_OK);
+ }
+
+ /*
+ * Must lock the interpreter before executing AML code
+ */
+ acpi_ex_enter_interpreter();
+
+ /*
+ * Each of these types can contain executable AML code within the
+ * declaration.
+ */
+ switch (type) {
+ case ACPI_TYPE_REGION:
+
+ info->op_region_init++;
+ status = acpi_ds_get_region_arguments(obj_desc);
+ break;
+
+ case ACPI_TYPE_BUFFER_FIELD:
+
+ info->field_init++;
+ status = acpi_ds_get_buffer_field_arguments(obj_desc);
+ break;
+
+ case ACPI_TYPE_LOCAL_BANK_FIELD:
+
+ info->field_init++;
+ status = acpi_ds_get_bank_field_arguments(obj_desc);
+ break;
+
+ case ACPI_TYPE_BUFFER:
+
+ info->buffer_init++;
+ status = acpi_ds_get_buffer_arguments(obj_desc);
+ break;
+
+ case ACPI_TYPE_PACKAGE:
+
+ info->package_init++;
+ status = acpi_ds_get_package_arguments(obj_desc);
+ break;
+
+ default:
+ /* No other types can get here */
+ break;
+ }
+
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status,
+ "Could not execute arguments for [%4.4s] (%s)",
+ acpi_ut_get_node_name(node),
+ acpi_ut_get_type_name(type)));
+ }
+
+ /*
+ * Print a dot for each object unless we are going to print the entire
+ * pathname
+ */
+ if (!(acpi_dbg_level & ACPI_LV_INIT_NAMES)) {
+ ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT, "."));
+ }
+
+ /*
+ * We ignore errors from above, and always return OK, since we don't want
+ * to abort the walk on any single error.
+ */
+ acpi_ex_exit_interpreter();
+ return (AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ns_find_ini_methods
+ *
+ * PARAMETERS: acpi_walk_callback
+ *
+ * RETURN: acpi_status
+ *
+ * DESCRIPTION: Called during namespace walk. Finds objects named _INI under
+ * device/processor/thermal objects, and marks the entire subtree
+ * with a SUBTREE_HAS_INI flag. This flag is used during the
+ * subsequent device initialization walk to avoid entire subtrees
+ * that do not contain an _INI.
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ns_find_ini_methods(acpi_handle obj_handle,
+ u32 nesting_level, void *context, void **return_value)
+{
+ struct acpi_device_walk_info *info =
+ ACPI_CAST_PTR(struct acpi_device_walk_info, context);
+ struct acpi_namespace_node *node;
+ struct acpi_namespace_node *parent_node;
+
+ /* Keep count of device/processor/thermal objects */
+
+ node = ACPI_CAST_PTR(struct acpi_namespace_node, obj_handle);
+ if ((node->type == ACPI_TYPE_DEVICE) ||
+ (node->type == ACPI_TYPE_PROCESSOR) ||
+ (node->type == ACPI_TYPE_THERMAL)) {
+ info->device_count++;
+ return (AE_OK);
+ }
+
+ /* We are only looking for methods named _INI */
+
+ if (!ACPI_COMPARE_NAME(node->name.ascii, METHOD_NAME__INI)) {
+ return (AE_OK);
+ }
+
+ /*
+ * The only _INI methods that we care about are those that are
+ * present under Device, Processor, and Thermal objects.
+ */
+ parent_node = acpi_ns_get_parent_node(node);
+ switch (parent_node->type) {
+ case ACPI_TYPE_DEVICE:
+ case ACPI_TYPE_PROCESSOR:
+ case ACPI_TYPE_THERMAL:
+
+ /* Mark parent and bubble up the INI present flag to the root */
+
+ while (parent_node) {
+ parent_node->flags |= ANOBJ_SUBTREE_HAS_INI;
+ parent_node = acpi_ns_get_parent_node(parent_node);
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ return (AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ns_init_one_device
+ *
+ * PARAMETERS: acpi_walk_callback
+ *
+ * RETURN: acpi_status
+ *
+ * DESCRIPTION: This is called once per device soon after ACPI is enabled
+ * to initialize each device. It determines if the device is
+ * present, and if so, calls _INI.
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ns_init_one_device(acpi_handle obj_handle,
+ u32 nesting_level, void *context, void **return_value)
+{
+ struct acpi_device_walk_info *walk_info =
+ ACPI_CAST_PTR(struct acpi_device_walk_info, context);
+ struct acpi_evaluate_info *info = walk_info->evaluate_info;
+ u32 flags;
+ acpi_status status;
+ struct acpi_namespace_node *device_node;
+
+ ACPI_FUNCTION_TRACE(ns_init_one_device);
+
+ /* We are interested in Devices, Processors and thermal_zones only */
+
+ device_node = ACPI_CAST_PTR(struct acpi_namespace_node, obj_handle);
+ if ((device_node->type != ACPI_TYPE_DEVICE) &&
+ (device_node->type != ACPI_TYPE_PROCESSOR) &&
+ (device_node->type != ACPI_TYPE_THERMAL)) {
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ /*
+ * Because of an earlier namespace analysis, all subtrees that contain an
+ * _INI method are tagged.
+ *
+ * If this device subtree does not contain any _INI methods, we
+ * can exit now and stop traversing this entire subtree.
+ */
+ if (!(device_node->flags & ANOBJ_SUBTREE_HAS_INI)) {
+ return_ACPI_STATUS(AE_CTRL_DEPTH);
+ }
+
+ /*
+ * Run _STA to determine if this device is present and functioning. We
+ * must know this information for two important reasons (from ACPI spec):
+ *
+ * 1) We can only run _INI if the device is present.
+ * 2) We must abort the device tree walk on this subtree if the device is
+ * not present and is not functional (we will not examine the children)
+ *
+ * The _STA method is not required to be present under the device, we
+ * assume the device is present if _STA does not exist.
+ */
+ ACPI_DEBUG_EXEC(acpi_ut_display_init_pathname
+ (ACPI_TYPE_METHOD, device_node, METHOD_NAME__STA));
+
+ status = acpi_ut_execute_STA(device_node, &flags);
+ if (ACPI_FAILURE(status)) {
+
+ /* Ignore error and move on to next device */
+
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ /*
+ * Flags == -1 means that _STA was not found. In this case, we assume that
+ * the device is both present and functional.
+ *
+ * From the ACPI spec, description of _STA:
+ *
+ * "If a device object (including the processor object) does not have an
+ * _STA object, then OSPM assumes that all of the above bits are set (in
+ * other words, the device is present, ..., and functioning)"
+ */
+ if (flags != ACPI_UINT32_MAX) {
+ walk_info->num_STA++;
+ }
+
+ /*
+ * Examine the PRESENT and FUNCTIONING status bits
+ *
+ * Note: ACPI spec does not seem to specify behavior for the present but
+ * not functioning case, so we assume functioning if present.
+ */
+ if (!(flags & ACPI_STA_DEVICE_PRESENT)) {
+
+ /* Device is not present, we must examine the Functioning bit */
+
+ if (flags & ACPI_STA_DEVICE_FUNCTIONING) {
+ /*
+ * Device is not present but is "functioning". In this case,
+ * we will not run _INI, but we continue to examine the children
+ * of this device.
+ *
+ * From the ACPI spec, description of _STA: (Note - no mention
+ * of whether to run _INI or not on the device in question)
+ *
+ * "_STA may return bit 0 clear (not present) with bit 3 set
+ * (device is functional). This case is used to indicate a valid
+ * device for which no device driver should be loaded (for example,
+ * a bridge device.) Children of this device may be present and
+ * valid. OSPM should continue enumeration below a device whose
+ * _STA returns this bit combination"
+ */
+ return_ACPI_STATUS(AE_OK);
+ } else {
+ /*
+ * Device is not present and is not functioning. We must abort the
+ * walk of this subtree immediately -- don't look at the children
+ * of such a device.
+ *
+ * From the ACPI spec, description of _INI:
+ *
+ * "If the _STA method indicates that the device is not present,
+ * OSPM will not run the _INI and will not examine the children
+ * of the device for _INI methods"
+ */
+ return_ACPI_STATUS(AE_CTRL_DEPTH);
+ }
+ }
+
+ /*
+ * The device is present or is assumed present if no _STA exists.
+ * Run the _INI if it exists (not required to exist)
+ *
+ * Note: We know there is an _INI within this subtree, but it may not be
+ * under this particular device, it may be lower in the branch.
+ */
+ ACPI_DEBUG_EXEC(acpi_ut_display_init_pathname
+ (ACPI_TYPE_METHOD, device_node, METHOD_NAME__INI));
+
+ info->prefix_node = device_node;
+ info->pathname = METHOD_NAME__INI;
+ info->parameters = NULL;
+ info->flags = ACPI_IGNORE_RETURN_VALUE;
+
+ /*
+ * Some hardware relies on this being executed as atomically
+ * as possible (without an NMI being received in the middle of
+ * this) - so disable NMIs and initialize the device:
+ */
+ acpi_nmi_disable();
+ status = acpi_ns_evaluate(info);
+ acpi_nmi_enable();
+
+ if (ACPI_SUCCESS(status)) {
+ walk_info->num_INI++;
+
+ if ((acpi_dbg_level <= ACPI_LV_ALL_EXCEPTIONS) &&
+ (!(acpi_dbg_level & ACPI_LV_INFO))) {
+ ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT, "."));
+ }
+ }
+#ifdef ACPI_DEBUG_OUTPUT
+ else if (status != AE_NOT_FOUND) {
+
+ /* Ignore error and move on to next device */
+
+ char *scope_name =
+ acpi_ns_get_external_pathname(info->resolved_node);
+
+ ACPI_EXCEPTION((AE_INFO, status, "during %s._INI execution",
+ scope_name));
+ ACPI_FREE(scope_name);
+ }
+#endif
+
+ /* Ignore errors from above */
+
+ status = AE_OK;
+
+ /*
+ * The _INI method has been run if present; call the Global Initialization
+ * Handler for this device.
+ */
+ if (acpi_gbl_init_handler) {
+ status =
+ acpi_gbl_init_handler(device_node, ACPI_INIT_DEVICE_INI);
+ }
+
+ return_ACPI_STATUS(status);
+}
diff --git a/drivers/acpi/namespace/nsload.c b/drivers/acpi/namespace/nsload.c
new file mode 100644
index 0000000..a4a412b
--- /dev/null
+++ b/drivers/acpi/namespace/nsload.c
@@ -0,0 +1,314 @@
+/******************************************************************************
+ *
+ * Module Name: nsload - namespace loading/expanding/contracting procedures
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include <acpi/acnamesp.h>
+#include <acpi/acdispat.h>
+#include <acpi/actables.h>
+
+#define _COMPONENT ACPI_NAMESPACE
+ACPI_MODULE_NAME("nsload")
+
+/* Local prototypes */
+#ifdef ACPI_FUTURE_IMPLEMENTATION
+acpi_status acpi_ns_unload_namespace(acpi_handle handle);
+
+static acpi_status acpi_ns_delete_subtree(acpi_handle start_handle);
+#endif
+
+#ifndef ACPI_NO_METHOD_EXECUTION
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ns_load_table
+ *
+ * PARAMETERS: table_index - Index for table to be loaded
+ * Node - Owning NS node
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Load one ACPI table into the namespace
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ns_load_table(u32 table_index, struct acpi_namespace_node *node)
+{
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(ns_load_table);
+
+ /*
+ * Parse the table and load the namespace with all named
+ * objects found within. Control methods are NOT parsed
+ * at this time. In fact, the control methods cannot be
+ * parsed until the entire namespace is loaded, because
+ * if a control method makes a forward reference (call)
+ * to another control method, we can't continue parsing
+ * because we don't know how many arguments to parse next!
+ */
+ status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /* If table already loaded into namespace, just return */
+
+ if (acpi_tb_is_table_loaded(table_index)) {
+ status = AE_ALREADY_EXISTS;
+ goto unlock;
+ }
+
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "**** Loading table into namespace ****\n"));
+
+ status = acpi_tb_allocate_owner_id(table_index);
+ if (ACPI_FAILURE(status)) {
+ goto unlock;
+ }
+
+ status = acpi_ns_parse_table(table_index, node);
+ if (ACPI_SUCCESS(status)) {
+ acpi_tb_set_table_loaded_flag(table_index, TRUE);
+ } else {
+ (void)acpi_tb_release_owner_id(table_index);
+ }
+
+ unlock:
+ (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /*
+ * Now we can parse the control methods. We always parse
+ * them here for a sanity check, and if configured for
+ * just-in-time parsing, we delete the control method
+ * parse trees.
+ */
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "**** Begin Table Method Parsing and Object Initialization ****\n"));
+
+ status = acpi_ds_initialize_objects(table_index, node);
+
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "**** Completed Table Method Parsing and Object Initialization ****\n"));
+
+ return_ACPI_STATUS(status);
+}
+
+#ifdef ACPI_OBSOLETE_FUNCTIONS
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_load_namespace
+ *
+ * PARAMETERS: None
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Load the name space from what ever is pointed to by DSDT.
+ * (DSDT points to either the BIOS or a buffer.)
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ns_load_namespace(void)
+{
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(acpi_load_name_space);
+
+ /* There must be at least a DSDT installed */
+
+ if (acpi_gbl_DSDT == NULL) {
+ ACPI_ERROR((AE_INFO, "DSDT is not in memory"));
+ return_ACPI_STATUS(AE_NO_ACPI_TABLES);
+ }
+
+ /*
+ * Load the namespace. The DSDT is required,
+ * but the SSDT and PSDT tables are optional.
+ */
+ status = acpi_ns_load_table_by_type(ACPI_TABLE_ID_DSDT);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /* Ignore exceptions from these */
+
+ (void)acpi_ns_load_table_by_type(ACPI_TABLE_ID_SSDT);
+ (void)acpi_ns_load_table_by_type(ACPI_TABLE_ID_PSDT);
+
+ ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT,
+ "ACPI Namespace successfully loaded at root %p\n",
+ acpi_gbl_root_node));
+
+ return_ACPI_STATUS(status);
+}
+#endif
+
+#ifdef ACPI_FUTURE_IMPLEMENTATION
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ns_delete_subtree
+ *
+ * PARAMETERS: start_handle - Handle in namespace where search begins
+ *
+ * RETURNS Status
+ *
+ * DESCRIPTION: Walks the namespace starting at the given handle and deletes
+ * all objects, entries, and scopes in the entire subtree.
+ *
+ * Namespace/Interpreter should be locked or the subsystem should
+ * be in shutdown before this routine is called.
+ *
+ ******************************************************************************/
+
+static acpi_status acpi_ns_delete_subtree(acpi_handle start_handle)
+{
+ acpi_status status;
+ acpi_handle child_handle;
+ acpi_handle parent_handle;
+ acpi_handle next_child_handle;
+ acpi_handle dummy;
+ u32 level;
+
+ ACPI_FUNCTION_TRACE(ns_delete_subtree);
+
+ parent_handle = start_handle;
+ child_handle = NULL;
+ level = 1;
+
+ /*
+ * Traverse the tree of objects until we bubble back up
+ * to where we started.
+ */
+ while (level > 0) {
+
+ /* Attempt to get the next object in this scope */
+
+ status = acpi_get_next_object(ACPI_TYPE_ANY, parent_handle,
+ child_handle, &next_child_handle);
+
+ child_handle = next_child_handle;
+
+ /* Did we get a new object? */
+
+ if (ACPI_SUCCESS(status)) {
+
+ /* Check if this object has any children */
+
+ if (ACPI_SUCCESS
+ (acpi_get_next_object
+ (ACPI_TYPE_ANY, child_handle, NULL, &dummy))) {
+ /*
+ * There is at least one child of this object,
+ * visit the object
+ */
+ level++;
+ parent_handle = child_handle;
+ child_handle = NULL;
+ }
+ } else {
+ /*
+ * No more children in this object, go back up to
+ * the object's parent
+ */
+ level--;
+
+ /* Delete all children now */
+
+ acpi_ns_delete_children(child_handle);
+
+ child_handle = parent_handle;
+ status = acpi_get_parent(parent_handle, &parent_handle);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+ }
+ }
+
+ /* Now delete the starting object, and we are done */
+
+ acpi_ns_delete_node(child_handle);
+
+ return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ns_unload_name_space
+ *
+ * PARAMETERS: Handle - Root of namespace subtree to be deleted
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Shrinks the namespace, typically in response to an undocking
+ * event. Deletes an entire subtree starting from (and
+ * including) the given handle.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ns_unload_namespace(acpi_handle handle)
+{
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(ns_unload_name_space);
+
+ /* Parameter validation */
+
+ if (!acpi_gbl_root_node) {
+ return_ACPI_STATUS(AE_NO_NAMESPACE);
+ }
+
+ if (!handle) {
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
+
+ /* This function does the real work */
+
+ status = acpi_ns_delete_subtree(handle);
+
+ return_ACPI_STATUS(status);
+}
+#endif
+#endif
diff --git a/drivers/acpi/namespace/nsnames.c b/drivers/acpi/namespace/nsnames.c
new file mode 100644
index 0000000..42a39a7
--- /dev/null
+++ b/drivers/acpi/namespace/nsnames.c
@@ -0,0 +1,264 @@
+/*******************************************************************************
+ *
+ * Module Name: nsnames - Name manipulation and search
+ *
+ ******************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include <acpi/amlcode.h>
+#include <acpi/acnamesp.h>
+
+#define _COMPONENT ACPI_NAMESPACE
+ACPI_MODULE_NAME("nsnames")
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ns_build_external_path
+ *
+ * PARAMETERS: Node - NS node whose pathname is needed
+ * Size - Size of the pathname
+ * *name_buffer - Where to return the pathname
+ *
+ * RETURN: Status
+ * Places the pathname into the name_buffer, in external format
+ * (name segments separated by path separators)
+ *
+ * DESCRIPTION: Generate a full pathaname
+ *
+ ******************************************************************************/
+acpi_status
+acpi_ns_build_external_path(struct acpi_namespace_node *node,
+ acpi_size size, char *name_buffer)
+{
+ acpi_size index;
+ struct acpi_namespace_node *parent_node;
+
+ ACPI_FUNCTION_ENTRY();
+
+ /* Special case for root */
+
+ index = size - 1;
+ if (index < ACPI_NAME_SIZE) {
+ name_buffer[0] = AML_ROOT_PREFIX;
+ name_buffer[1] = 0;
+ return (AE_OK);
+ }
+
+ /* Store terminator byte, then build name backwards */
+
+ parent_node = node;
+ name_buffer[index] = 0;
+
+ while ((index > ACPI_NAME_SIZE) && (parent_node != acpi_gbl_root_node)) {
+ index -= ACPI_NAME_SIZE;
+
+ /* Put the name into the buffer */
+
+ ACPI_MOVE_32_TO_32((name_buffer + index), &parent_node->name);
+ parent_node = acpi_ns_get_parent_node(parent_node);
+
+ /* Prefix name with the path separator */
+
+ index--;
+ name_buffer[index] = ACPI_PATH_SEPARATOR;
+ }
+
+ /* Overwrite final separator with the root prefix character */
+
+ name_buffer[index] = AML_ROOT_PREFIX;
+
+ if (index != 0) {
+ ACPI_ERROR((AE_INFO,
+ "Could not construct external pathname; index=%X, size=%X, Path=%s",
+ (u32) index, (u32) size, &name_buffer[size]));
+
+ return (AE_BAD_PARAMETER);
+ }
+
+ return (AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ns_get_external_pathname
+ *
+ * PARAMETERS: Node - Namespace node whose pathname is needed
+ *
+ * RETURN: Pointer to storage containing the fully qualified name of
+ * the node, In external format (name segments separated by path
+ * separators.)
+ *
+ * DESCRIPTION: Used for debug printing in acpi_ns_search_table().
+ *
+ ******************************************************************************/
+
+char *acpi_ns_get_external_pathname(struct acpi_namespace_node *node)
+{
+ acpi_status status;
+ char *name_buffer;
+ acpi_size size;
+
+ ACPI_FUNCTION_TRACE_PTR(ns_get_external_pathname, node);
+
+ /* Calculate required buffer size based on depth below root */
+
+ size = acpi_ns_get_pathname_length(node);
+ if (!size) {
+ return_PTR(NULL);
+ }
+
+ /* Allocate a buffer to be returned to caller */
+
+ name_buffer = ACPI_ALLOCATE_ZEROED(size);
+ if (!name_buffer) {
+ ACPI_ERROR((AE_INFO, "Allocation failure"));
+ return_PTR(NULL);
+ }
+
+ /* Build the path in the allocated buffer */
+
+ status = acpi_ns_build_external_path(node, size, name_buffer);
+ if (ACPI_FAILURE(status)) {
+ ACPI_FREE(name_buffer);
+ return_PTR(NULL);
+ }
+
+ return_PTR(name_buffer);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ns_get_pathname_length
+ *
+ * PARAMETERS: Node - Namespace node
+ *
+ * RETURN: Length of path, including prefix
+ *
+ * DESCRIPTION: Get the length of the pathname string for this node
+ *
+ ******************************************************************************/
+
+acpi_size acpi_ns_get_pathname_length(struct acpi_namespace_node *node)
+{
+ acpi_size size;
+ struct acpi_namespace_node *next_node;
+
+ ACPI_FUNCTION_ENTRY();
+
+ /*
+ * Compute length of pathname as 5 * number of name segments.
+ * Go back up the parent tree to the root
+ */
+ size = 0;
+ next_node = node;
+
+ while (next_node && (next_node != acpi_gbl_root_node)) {
+ if (ACPI_GET_DESCRIPTOR_TYPE(next_node) != ACPI_DESC_TYPE_NAMED) {
+ ACPI_ERROR((AE_INFO,
+ "Invalid Namespace Node (%p) while traversing namespace",
+ next_node));
+ return 0;
+ }
+ size += ACPI_PATH_SEGMENT_LENGTH;
+ next_node = acpi_ns_get_parent_node(next_node);
+ }
+
+ if (!size) {
+ size = 1; /* Root node case */
+ }
+
+ return (size + 1); /* +1 for null string terminator */
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ns_handle_to_pathname
+ *
+ * PARAMETERS: target_handle - Handle of named object whose name is
+ * to be found
+ * Buffer - Where the pathname is returned
+ *
+ * RETURN: Status, Buffer is filled with pathname if status is AE_OK
+ *
+ * DESCRIPTION: Build and return a full namespace pathname
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ns_handle_to_pathname(acpi_handle target_handle,
+ struct acpi_buffer * buffer)
+{
+ acpi_status status;
+ struct acpi_namespace_node *node;
+ acpi_size required_size;
+
+ ACPI_FUNCTION_TRACE_PTR(ns_handle_to_pathname, target_handle);
+
+ node = acpi_ns_map_handle_to_node(target_handle);
+ if (!node) {
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
+
+ /* Determine size required for the caller buffer */
+
+ required_size = acpi_ns_get_pathname_length(node);
+ if (!required_size) {
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
+
+ /* Validate/Allocate/Clear caller buffer */
+
+ status = acpi_ut_initialize_buffer(buffer, required_size);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /* Build the path in the caller buffer */
+
+ status =
+ acpi_ns_build_external_path(node, required_size, buffer->pointer);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "%s [%X]\n",
+ (char *)buffer->pointer, (u32) required_size));
+ return_ACPI_STATUS(AE_OK);
+}
diff --git a/drivers/acpi/namespace/nsobject.c b/drivers/acpi/namespace/nsobject.c
new file mode 100644
index 0000000..15fe09e
--- /dev/null
+++ b/drivers/acpi/namespace/nsobject.c
@@ -0,0 +1,440 @@
+/*******************************************************************************
+ *
+ * Module Name: nsobject - Utilities for objects attached to namespace
+ * table entries
+ *
+ ******************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include <acpi/acnamesp.h>
+
+#define _COMPONENT ACPI_NAMESPACE
+ACPI_MODULE_NAME("nsobject")
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ns_attach_object
+ *
+ * PARAMETERS: Node - Parent Node
+ * Object - Object to be attached
+ * Type - Type of object, or ACPI_TYPE_ANY if not
+ * known
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Record the given object as the value associated with the
+ * name whose acpi_handle is passed. If Object is NULL
+ * and Type is ACPI_TYPE_ANY, set the name as having no value.
+ * Note: Future may require that the Node->Flags field be passed
+ * as a parameter.
+ *
+ * MUTEX: Assumes namespace is locked
+ *
+ ******************************************************************************/
+acpi_status
+acpi_ns_attach_object(struct acpi_namespace_node *node,
+ union acpi_operand_object *object, acpi_object_type type)
+{
+ union acpi_operand_object *obj_desc;
+ union acpi_operand_object *last_obj_desc;
+ acpi_object_type object_type = ACPI_TYPE_ANY;
+
+ ACPI_FUNCTION_TRACE(ns_attach_object);
+
+ /*
+ * Parameter validation
+ */
+ if (!node) {
+
+ /* Invalid handle */
+
+ ACPI_ERROR((AE_INFO, "Null NamedObj handle"));
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
+
+ if (!object && (ACPI_TYPE_ANY != type)) {
+
+ /* Null object */
+
+ ACPI_ERROR((AE_INFO,
+ "Null object, but type not ACPI_TYPE_ANY"));
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
+
+ if (ACPI_GET_DESCRIPTOR_TYPE(node) != ACPI_DESC_TYPE_NAMED) {
+
+ /* Not a name handle */
+
+ ACPI_ERROR((AE_INFO, "Invalid handle %p [%s]",
+ node, acpi_ut_get_descriptor_name(node)));
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
+
+ /* Check if this object is already attached */
+
+ if (node->object == object) {
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+ "Obj %p already installed in NameObj %p\n",
+ object, node));
+
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ /* If null object, we will just install it */
+
+ if (!object) {
+ obj_desc = NULL;
+ object_type = ACPI_TYPE_ANY;
+ }
+
+ /*
+ * If the source object is a namespace Node with an attached object,
+ * we will use that (attached) object
+ */
+ else if ((ACPI_GET_DESCRIPTOR_TYPE(object) == ACPI_DESC_TYPE_NAMED) &&
+ ((struct acpi_namespace_node *)object)->object) {
+ /*
+ * Value passed is a name handle and that name has a
+ * non-null value. Use that name's value and type.
+ */
+ obj_desc = ((struct acpi_namespace_node *)object)->object;
+ object_type = ((struct acpi_namespace_node *)object)->type;
+ }
+
+ /*
+ * Otherwise, we will use the parameter object, but we must type
+ * it first
+ */
+ else {
+ obj_desc = (union acpi_operand_object *)object;
+
+ /* Use the given type */
+
+ object_type = type;
+ }
+
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Installing %p into Node %p [%4.4s]\n",
+ obj_desc, node, acpi_ut_get_node_name(node)));
+
+ /* Detach an existing attached object if present */
+
+ if (node->object) {
+ acpi_ns_detach_object(node);
+ }
+
+ if (obj_desc) {
+ /*
+ * Must increment the new value's reference count
+ * (if it is an internal object)
+ */
+ acpi_ut_add_reference(obj_desc);
+
+ /*
+ * Handle objects with multiple descriptors - walk
+ * to the end of the descriptor list
+ */
+ last_obj_desc = obj_desc;
+ while (last_obj_desc->common.next_object) {
+ last_obj_desc = last_obj_desc->common.next_object;
+ }
+
+ /* Install the object at the front of the object list */
+
+ last_obj_desc->common.next_object = node->object;
+ }
+
+ node->type = (u8) object_type;
+ node->object = obj_desc;
+
+ return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ns_detach_object
+ *
+ * PARAMETERS: Node - A Namespace node whose object will be detached
+ *
+ * RETURN: None.
+ *
+ * DESCRIPTION: Detach/delete an object associated with a namespace node.
+ * if the object is an allocated object, it is freed.
+ * Otherwise, the field is simply cleared.
+ *
+ ******************************************************************************/
+
+void acpi_ns_detach_object(struct acpi_namespace_node *node)
+{
+ union acpi_operand_object *obj_desc;
+
+ ACPI_FUNCTION_TRACE(ns_detach_object);
+
+ obj_desc = node->object;
+
+ if (!obj_desc ||
+ (ACPI_GET_OBJECT_TYPE(obj_desc) == ACPI_TYPE_LOCAL_DATA)) {
+ return_VOID;
+ }
+
+ /* Clear the entry in all cases */
+
+ node->object = NULL;
+ if (ACPI_GET_DESCRIPTOR_TYPE(obj_desc) == ACPI_DESC_TYPE_OPERAND) {
+ node->object = obj_desc->common.next_object;
+ if (node->object &&
+ (ACPI_GET_OBJECT_TYPE(node->object) !=
+ ACPI_TYPE_LOCAL_DATA)) {
+ node->object = node->object->common.next_object;
+ }
+ }
+
+ /* Reset the node type to untyped */
+
+ node->type = ACPI_TYPE_ANY;
+
+ ACPI_DEBUG_PRINT((ACPI_DB_NAMES, "Node %p [%4.4s] Object %p\n",
+ node, acpi_ut_get_node_name(node), obj_desc));
+
+ /* Remove one reference on the object (and all subobjects) */
+
+ acpi_ut_remove_reference(obj_desc);
+ return_VOID;
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ns_get_attached_object
+ *
+ * PARAMETERS: Node - Namespace node
+ *
+ * RETURN: Current value of the object field from the Node whose
+ * handle is passed
+ *
+ * DESCRIPTION: Obtain the object attached to a namespace node.
+ *
+ ******************************************************************************/
+
+union acpi_operand_object *acpi_ns_get_attached_object(struct
+ acpi_namespace_node
+ *node)
+{
+ ACPI_FUNCTION_TRACE_PTR(ns_get_attached_object, node);
+
+ if (!node) {
+ ACPI_WARNING((AE_INFO, "Null Node ptr"));
+ return_PTR(NULL);
+ }
+
+ if (!node->object ||
+ ((ACPI_GET_DESCRIPTOR_TYPE(node->object) != ACPI_DESC_TYPE_OPERAND)
+ && (ACPI_GET_DESCRIPTOR_TYPE(node->object) !=
+ ACPI_DESC_TYPE_NAMED))
+ || (ACPI_GET_OBJECT_TYPE(node->object) == ACPI_TYPE_LOCAL_DATA)) {
+ return_PTR(NULL);
+ }
+
+ return_PTR(node->object);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ns_get_secondary_object
+ *
+ * PARAMETERS: Node - Namespace node
+ *
+ * RETURN: Current value of the object field from the Node whose
+ * handle is passed.
+ *
+ * DESCRIPTION: Obtain a secondary object associated with a namespace node.
+ *
+ ******************************************************************************/
+
+union acpi_operand_object *acpi_ns_get_secondary_object(union
+ acpi_operand_object
+ *obj_desc)
+{
+ ACPI_FUNCTION_TRACE_PTR(ns_get_secondary_object, obj_desc);
+
+ if ((!obj_desc) ||
+ (ACPI_GET_OBJECT_TYPE(obj_desc) == ACPI_TYPE_LOCAL_DATA) ||
+ (!obj_desc->common.next_object) ||
+ (ACPI_GET_OBJECT_TYPE(obj_desc->common.next_object) ==
+ ACPI_TYPE_LOCAL_DATA)) {
+ return_PTR(NULL);
+ }
+
+ return_PTR(obj_desc->common.next_object);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ns_attach_data
+ *
+ * PARAMETERS: Node - Namespace node
+ * Handler - Handler to be associated with the data
+ * Data - Data to be attached
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Low-level attach data. Create and attach a Data object.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ns_attach_data(struct acpi_namespace_node *node,
+ acpi_object_handler handler, void *data)
+{
+ union acpi_operand_object *prev_obj_desc;
+ union acpi_operand_object *obj_desc;
+ union acpi_operand_object *data_desc;
+
+ /* We only allow one attachment per handler */
+
+ prev_obj_desc = NULL;
+ obj_desc = node->object;
+ while (obj_desc) {
+ if ((ACPI_GET_OBJECT_TYPE(obj_desc) == ACPI_TYPE_LOCAL_DATA) &&
+ (obj_desc->data.handler == handler)) {
+ return (AE_ALREADY_EXISTS);
+ }
+
+ prev_obj_desc = obj_desc;
+ obj_desc = obj_desc->common.next_object;
+ }
+
+ /* Create an internal object for the data */
+
+ data_desc = acpi_ut_create_internal_object(ACPI_TYPE_LOCAL_DATA);
+ if (!data_desc) {
+ return (AE_NO_MEMORY);
+ }
+
+ data_desc->data.handler = handler;
+ data_desc->data.pointer = data;
+
+ /* Install the data object */
+
+ if (prev_obj_desc) {
+ prev_obj_desc->common.next_object = data_desc;
+ } else {
+ node->object = data_desc;
+ }
+
+ return (AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ns_detach_data
+ *
+ * PARAMETERS: Node - Namespace node
+ * Handler - Handler associated with the data
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Low-level detach data. Delete the data node, but the caller
+ * is responsible for the actual data.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ns_detach_data(struct acpi_namespace_node * node,
+ acpi_object_handler handler)
+{
+ union acpi_operand_object *obj_desc;
+ union acpi_operand_object *prev_obj_desc;
+
+ prev_obj_desc = NULL;
+ obj_desc = node->object;
+ while (obj_desc) {
+ if ((ACPI_GET_OBJECT_TYPE(obj_desc) == ACPI_TYPE_LOCAL_DATA) &&
+ (obj_desc->data.handler == handler)) {
+ if (prev_obj_desc) {
+ prev_obj_desc->common.next_object =
+ obj_desc->common.next_object;
+ } else {
+ node->object = obj_desc->common.next_object;
+ }
+
+ acpi_ut_remove_reference(obj_desc);
+ return (AE_OK);
+ }
+
+ prev_obj_desc = obj_desc;
+ obj_desc = obj_desc->common.next_object;
+ }
+
+ return (AE_NOT_FOUND);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ns_get_attached_data
+ *
+ * PARAMETERS: Node - Namespace node
+ * Handler - Handler associated with the data
+ * Data - Where the data is returned
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Low level interface to obtain data previously associated with
+ * a namespace node.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ns_get_attached_data(struct acpi_namespace_node * node,
+ acpi_object_handler handler, void **data)
+{
+ union acpi_operand_object *obj_desc;
+
+ obj_desc = node->object;
+ while (obj_desc) {
+ if ((ACPI_GET_OBJECT_TYPE(obj_desc) == ACPI_TYPE_LOCAL_DATA) &&
+ (obj_desc->data.handler == handler)) {
+ *data = obj_desc->data.pointer;
+ return (AE_OK);
+ }
+
+ obj_desc = obj_desc->common.next_object;
+ }
+
+ return (AE_NOT_FOUND);
+}
diff --git a/drivers/acpi/namespace/nsparse.c b/drivers/acpi/namespace/nsparse.c
new file mode 100644
index 0000000..a82271a
--- /dev/null
+++ b/drivers/acpi/namespace/nsparse.c
@@ -0,0 +1,203 @@
+/******************************************************************************
+ *
+ * Module Name: nsparse - namespace interface to AML parser
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include <acpi/acnamesp.h>
+#include <acpi/acparser.h>
+#include <acpi/acdispat.h>
+#include <acpi/actables.h>
+
+#define _COMPONENT ACPI_NAMESPACE
+ACPI_MODULE_NAME("nsparse")
+
+/*******************************************************************************
+ *
+ * FUNCTION: ns_one_complete_parse
+ *
+ * PARAMETERS: pass_number - 1 or 2
+ * table_desc - The table to be parsed.
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Perform one complete parse of an ACPI/AML table.
+ *
+ ******************************************************************************/
+acpi_status
+acpi_ns_one_complete_parse(u32 pass_number,
+ u32 table_index,
+ struct acpi_namespace_node *start_node)
+{
+ union acpi_parse_object *parse_root;
+ acpi_status status;
+ u32 aml_length;
+ u8 *aml_start;
+ struct acpi_walk_state *walk_state;
+ struct acpi_table_header *table;
+ acpi_owner_id owner_id;
+
+ ACPI_FUNCTION_TRACE(ns_one_complete_parse);
+
+ status = acpi_tb_get_owner_id(table_index, &owner_id);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /* Create and init a Root Node */
+
+ parse_root = acpi_ps_create_scope_op();
+ if (!parse_root) {
+ return_ACPI_STATUS(AE_NO_MEMORY);
+ }
+
+ /* Create and initialize a new walk state */
+
+ walk_state = acpi_ds_create_walk_state(owner_id, NULL, NULL, NULL);
+ if (!walk_state) {
+ acpi_ps_free_op(parse_root);
+ return_ACPI_STATUS(AE_NO_MEMORY);
+ }
+
+ status = acpi_get_table_by_index(table_index, &table);
+ if (ACPI_FAILURE(status)) {
+ acpi_ds_delete_walk_state(walk_state);
+ acpi_ps_free_op(parse_root);
+ return_ACPI_STATUS(status);
+ }
+
+ /* Table must consist of at least a complete header */
+
+ if (table->length < sizeof(struct acpi_table_header)) {
+ status = AE_BAD_HEADER;
+ } else {
+ aml_start = (u8 *) table + sizeof(struct acpi_table_header);
+ aml_length = table->length - sizeof(struct acpi_table_header);
+ status = acpi_ds_init_aml_walk(walk_state, parse_root, NULL,
+ aml_start, aml_length, NULL,
+ (u8) pass_number);
+ }
+
+ if (ACPI_FAILURE(status)) {
+ acpi_ds_delete_walk_state(walk_state);
+ goto cleanup;
+ }
+
+ /* start_node is the default location to load the table */
+
+ if (start_node && start_node != acpi_gbl_root_node) {
+ status =
+ acpi_ds_scope_stack_push(start_node, ACPI_TYPE_METHOD,
+ walk_state);
+ if (ACPI_FAILURE(status)) {
+ acpi_ds_delete_walk_state(walk_state);
+ goto cleanup;
+ }
+ }
+
+ /* Parse the AML */
+
+ ACPI_DEBUG_PRINT((ACPI_DB_PARSE, "*PARSE* pass %d parse\n",
+ (unsigned)pass_number));
+ status = acpi_ps_parse_aml(walk_state);
+
+ cleanup:
+ acpi_ps_delete_parse_tree(parse_root);
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ns_parse_table
+ *
+ * PARAMETERS: table_desc - An ACPI table descriptor for table to parse
+ * start_node - Where to enter the table into the namespace
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Parse AML within an ACPI table and return a tree of ops
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ns_parse_table(u32 table_index, struct acpi_namespace_node *start_node)
+{
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(ns_parse_table);
+
+ /*
+ * AML Parse, pass 1
+ *
+ * In this pass, we load most of the namespace. Control methods
+ * are not parsed until later. A parse tree is not created. Instead,
+ * each Parser Op subtree is deleted when it is finished. This saves
+ * a great deal of memory, and allows a small cache of parse objects
+ * to service the entire parse. The second pass of the parse then
+ * performs another complete parse of the AML.
+ */
+ ACPI_DEBUG_PRINT((ACPI_DB_PARSE, "**** Start pass 1\n"));
+ status =
+ acpi_ns_one_complete_parse(ACPI_IMODE_LOAD_PASS1, table_index,
+ start_node);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /*
+ * AML Parse, pass 2
+ *
+ * In this pass, we resolve forward references and other things
+ * that could not be completed during the first pass.
+ * Another complete parse of the AML is performed, but the
+ * overhead of this is compensated for by the fact that the
+ * parse objects are all cached.
+ */
+ ACPI_DEBUG_PRINT((ACPI_DB_PARSE, "**** Start pass 2\n"));
+ status =
+ acpi_ns_one_complete_parse(ACPI_IMODE_LOAD_PASS2, table_index,
+ start_node);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ return_ACPI_STATUS(status);
+}
diff --git a/drivers/acpi/namespace/nspredef.c b/drivers/acpi/namespace/nspredef.c
new file mode 100644
index 0000000..0f17cf0
--- /dev/null
+++ b/drivers/acpi/namespace/nspredef.c
@@ -0,0 +1,900 @@
+/******************************************************************************
+ *
+ * Module Name: nspredef - Validation of ACPI predefined methods and objects
+ * $Revision: 1.1 $
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include <acpi/acnamesp.h>
+#include <acpi/acpredef.h>
+
+#define _COMPONENT ACPI_NAMESPACE
+ACPI_MODULE_NAME("nspredef")
+
+/*******************************************************************************
+ *
+ * This module validates predefined ACPI objects that appear in the namespace,
+ * at the time they are evaluated (via acpi_evaluate_object). The purpose of this
+ * validation is to detect problems with BIOS-exposed predefined ACPI objects
+ * before the results are returned to the ACPI-related drivers.
+ *
+ * There are several areas that are validated:
+ *
+ * 1) The number of input arguments as defined by the method/object in the
+ * ASL is validated against the ACPI specification.
+ * 2) The type of the return object (if any) is validated against the ACPI
+ * specification.
+ * 3) For returned package objects, the count of package elements is
+ * validated, as well as the type of each package element. Nested
+ * packages are supported.
+ *
+ * For any problems found, a warning message is issued.
+ *
+ ******************************************************************************/
+/* Local prototypes */
+static acpi_status
+acpi_ns_check_package(char *pathname,
+ union acpi_operand_object *return_object,
+ const union acpi_predefined_info *predefined);
+
+static acpi_status
+acpi_ns_check_package_elements(char *pathname,
+ union acpi_operand_object **elements,
+ u8 type1, u32 count1, u8 type2, u32 count2);
+
+static acpi_status
+acpi_ns_check_object_type(char *pathname,
+ union acpi_operand_object *return_object,
+ u32 expected_btypes, u32 package_index);
+
+static acpi_status
+acpi_ns_check_reference(char *pathname,
+ union acpi_operand_object *return_object);
+
+/*
+ * Names for the types that can be returned by the predefined objects.
+ * Used for warning messages. Must be in the same order as the ACPI_RTYPEs
+ */
+static const char *acpi_rtype_names[] = {
+ "/Integer",
+ "/String",
+ "/Buffer",
+ "/Package",
+ "/Reference",
+};
+
+#define ACPI_NOT_PACKAGE ACPI_UINT32_MAX
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ns_check_predefined_names
+ *
+ * PARAMETERS: Node - Namespace node for the method/object
+ * return_object - Object returned from the evaluation of this
+ * method/object
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Check an ACPI name for a match in the predefined name list.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ns_check_predefined_names(struct acpi_namespace_node *node,
+ union acpi_operand_object *return_object)
+{
+ acpi_status status = AE_OK;
+ const union acpi_predefined_info *predefined;
+ char *pathname;
+
+ /* Match the name for this method/object against the predefined list */
+
+ predefined = acpi_ns_check_for_predefined_name(node);
+ if (!predefined) {
+
+ /* Name was not one of the predefined names */
+
+ return (AE_OK);
+ }
+
+ /* Get the full pathname to the object, for use in error messages */
+
+ pathname = acpi_ns_get_external_pathname(node);
+ if (!pathname) {
+ pathname = ACPI_CAST_PTR(char, predefined->info.name);
+ }
+
+ /*
+ * Check that the parameter count for this method is in accordance
+ * with the ACPI specification.
+ */
+ acpi_ns_check_parameter_count(pathname, node, predefined);
+
+ /*
+ * If there is no return value, check if we require a return value for
+ * this predefined name. Either one return value is expected, or none,
+ * for both methods and other objects.
+ *
+ * Exit now if there is no return object. Warning if one was expected.
+ */
+ if (!return_object) {
+ if ((predefined->info.expected_btypes) &&
+ (!(predefined->info.expected_btypes & ACPI_RTYPE_NONE))) {
+ ACPI_ERROR((AE_INFO,
+ "%s: Missing expected return value",
+ pathname));
+
+ status = AE_AML_NO_RETURN_VALUE;
+ }
+ goto exit;
+ }
+
+ /*
+ * We have a return value, but if one wasn't expected, just exit, this is
+ * not a problem
+ *
+ * For example, if "Implicit return value" is enabled, methods will
+ * always return a value
+ */
+ if (!predefined->info.expected_btypes) {
+ goto exit;
+ }
+
+ /*
+ * Check that the type of the return object is what is expected for
+ * this predefined name
+ */
+ status = acpi_ns_check_object_type(pathname, return_object,
+ predefined->info.expected_btypes,
+ ACPI_NOT_PACKAGE);
+ if (ACPI_FAILURE(status)) {
+ goto exit;
+ }
+
+ /* For returned Package objects, check the type of all sub-objects */
+
+ if (ACPI_GET_OBJECT_TYPE(return_object) == ACPI_TYPE_PACKAGE) {
+ status =
+ acpi_ns_check_package(pathname, return_object, predefined);
+ }
+
+ exit:
+ if (pathname) {
+ ACPI_FREE(pathname);
+ }
+
+ return (status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ns_check_parameter_count
+ *
+ * PARAMETERS: Pathname - Full pathname to the node (for error msgs)
+ * Node - Namespace node for the method/object
+ * Predefined - Pointer to entry in predefined name table
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Check that the declared (in ASL/AML) parameter count for a
+ * predefined name is what is expected (i.e., what is defined in
+ * the ACPI specification for this predefined name.)
+ *
+ ******************************************************************************/
+
+void
+acpi_ns_check_parameter_count(char *pathname,
+ struct acpi_namespace_node *node,
+ const union acpi_predefined_info *predefined)
+{
+ u32 param_count;
+ u32 required_params_current;
+ u32 required_params_old;
+
+ /*
+ * Check that the ASL-defined parameter count is what is expected for
+ * this predefined name.
+ *
+ * Methods have 0-7 parameters. All other types have zero.
+ */
+ param_count = 0;
+ if (node->type == ACPI_TYPE_METHOD) {
+ param_count = node->object->method.param_count;
+ }
+
+ /* Validate parameter count - allow two different legal counts (_SCP) */
+
+ required_params_current = predefined->info.param_count & 0x0F;
+ required_params_old = predefined->info.param_count >> 4;
+
+ if ((param_count != required_params_current) &&
+ (param_count != required_params_old)) {
+ ACPI_WARNING((AE_INFO,
+ "%s: Parameter count mismatch - ASL declared %d, expected %d",
+ pathname, param_count, required_params_current));
+ }
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ns_check_for_predefined_name
+ *
+ * PARAMETERS: Node - Namespace node for the method/object
+ *
+ * RETURN: Pointer to entry in predefined table. NULL indicates not found.
+ *
+ * DESCRIPTION: Check an object name against the predefined object list.
+ *
+ ******************************************************************************/
+
+const union acpi_predefined_info *acpi_ns_check_for_predefined_name(struct
+ acpi_namespace_node
+ *node)
+{
+ const union acpi_predefined_info *this_name;
+
+ /* Quick check for a predefined name, first character must be underscore */
+
+ if (node->name.ascii[0] != '_') {
+ return (NULL);
+ }
+
+ /* Search info table for a predefined method/object name */
+
+ this_name = predefined_names;
+ while (this_name->info.name[0]) {
+ if (ACPI_COMPARE_NAME(node->name.ascii, this_name->info.name)) {
+
+ /* Return pointer to this table entry */
+
+ return (this_name);
+ }
+
+ /*
+ * Skip next entry in the table if this name returns a Package
+ * (next entry contains the package info)
+ */
+ if (this_name->info.expected_btypes & ACPI_RTYPE_PACKAGE) {
+ this_name++;
+ }
+
+ this_name++;
+ }
+
+ return (NULL);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ns_check_package
+ *
+ * PARAMETERS: Pathname - Full pathname to the node (for error msgs)
+ * return_object - Object returned from the evaluation of a
+ * method or object
+ * Predefined - Pointer to entry in predefined name table
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Check a returned package object for the correct count and
+ * correct type of all sub-objects.
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ns_check_package(char *pathname,
+ union acpi_operand_object *return_object,
+ const union acpi_predefined_info *predefined)
+{
+ const union acpi_predefined_info *package;
+ union acpi_operand_object *sub_package;
+ union acpi_operand_object **elements;
+ union acpi_operand_object **sub_elements;
+ acpi_status status;
+ u32 expected_count;
+ u32 count;
+ u32 i;
+ u32 j;
+
+ ACPI_FUNCTION_NAME(ns_check_package);
+
+ /* The package info for this name is in the next table entry */
+
+ package = predefined + 1;
+
+ ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
+ "%s Validating return Package of Type %X, Count %X\n",
+ pathname, package->ret_info.type,
+ return_object->package.count));
+
+ /* Extract package count and elements array */
+
+ elements = return_object->package.elements;
+ count = return_object->package.count;
+
+ /* The package must have at least one element, else invalid */
+
+ if (!count) {
+ ACPI_WARNING((AE_INFO,
+ "%s: Return Package has no elements (empty)",
+ pathname));
+
+ return (AE_AML_OPERAND_VALUE);
+ }
+
+ /*
+ * Decode the type of the expected package contents
+ *
+ * PTYPE1 packages contain no subpackages
+ * PTYPE2 packages contain sub-packages
+ */
+ switch (package->ret_info.type) {
+ case ACPI_PTYPE1_FIXED:
+
+ /*
+ * The package count is fixed and there are no sub-packages
+ *
+ * If package is too small, exit.
+ * If package is larger than expected, issue warning but continue
+ */
+ expected_count =
+ package->ret_info.count1 + package->ret_info.count2;
+ if (count < expected_count) {
+ goto package_too_small;
+ } else if (count > expected_count) {
+ ACPI_WARNING((AE_INFO,
+ "%s: Return Package is larger than needed - "
+ "found %u, expected %u", pathname, count,
+ expected_count));
+ }
+
+ /* Validate all elements of the returned package */
+
+ status = acpi_ns_check_package_elements(pathname, elements,
+ package->ret_info.
+ object_type1,
+ package->ret_info.
+ count1,
+ package->ret_info.
+ object_type2,
+ package->ret_info.
+ count2);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+ break;
+
+ case ACPI_PTYPE1_VAR:
+
+ /*
+ * The package count is variable, there are no sub-packages, and all
+ * elements must be of the same type
+ */
+ for (i = 0; i < count; i++) {
+ status = acpi_ns_check_object_type(pathname, *elements,
+ package->ret_info.
+ object_type1, i);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+ elements++;
+ }
+ break;
+
+ case ACPI_PTYPE1_OPTION:
+
+ /*
+ * The package count is variable, there are no sub-packages. There are
+ * a fixed number of required elements, and a variable number of
+ * optional elements.
+ *
+ * Check if package is at least as large as the minimum required
+ */
+ expected_count = package->ret_info3.count;
+ if (count < expected_count) {
+ goto package_too_small;
+ }
+
+ /* Variable number of sub-objects */
+
+ for (i = 0; i < count; i++) {
+ if (i < package->ret_info3.count) {
+
+ /* These are the required package elements (0, 1, or 2) */
+
+ status =
+ acpi_ns_check_object_type(pathname,
+ *elements,
+ package->
+ ret_info3.
+ object_type[i],
+ i);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+ } else {
+ /* These are the optional package elements */
+
+ status =
+ acpi_ns_check_object_type(pathname,
+ *elements,
+ package->
+ ret_info3.
+ tail_object_type,
+ i);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+ }
+ elements++;
+ }
+ break;
+
+ case ACPI_PTYPE2_PKG_COUNT:
+
+ /* First element is the (Integer) count of sub-packages to follow */
+
+ status = acpi_ns_check_object_type(pathname, *elements,
+ ACPI_RTYPE_INTEGER, 0);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+
+ /*
+ * Count cannot be larger than the parent package length, but allow it
+ * to be smaller. The >= accounts for the Integer above.
+ */
+ expected_count = (u32) (*elements)->integer.value;
+ if (expected_count >= count) {
+ goto package_too_small;
+ }
+
+ count = expected_count;
+ elements++;
+
+ /* Now we can walk the sub-packages */
+
+ /*lint -fallthrough */
+
+ case ACPI_PTYPE2:
+ case ACPI_PTYPE2_FIXED:
+ case ACPI_PTYPE2_MIN:
+ case ACPI_PTYPE2_COUNT:
+
+ /*
+ * These types all return a single package that consists of a variable
+ * number of sub-packages
+ */
+ for (i = 0; i < count; i++) {
+ sub_package = *elements;
+ sub_elements = sub_package->package.elements;
+
+ /* Each sub-object must be of type Package */
+
+ status =
+ acpi_ns_check_object_type(pathname, sub_package,
+ ACPI_RTYPE_PACKAGE, i);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+
+ /* Examine the different types of sub-packages */
+
+ switch (package->ret_info.type) {
+ case ACPI_PTYPE2:
+ case ACPI_PTYPE2_PKG_COUNT:
+
+ /* Each subpackage has a fixed number of elements */
+
+ expected_count =
+ package->ret_info.count1 +
+ package->ret_info.count2;
+ if (sub_package->package.count !=
+ expected_count) {
+ count = sub_package->package.count;
+ goto package_too_small;
+ }
+
+ status =
+ acpi_ns_check_package_elements(pathname,
+ sub_elements,
+ package->
+ ret_info.
+ object_type1,
+ package->
+ ret_info.
+ count1,
+ package->
+ ret_info.
+ object_type2,
+ package->
+ ret_info.
+ count2);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+ break;
+
+ case ACPI_PTYPE2_FIXED:
+
+ /* Each sub-package has a fixed length */
+
+ expected_count = package->ret_info2.count;
+ if (sub_package->package.count < expected_count) {
+ count = sub_package->package.count;
+ goto package_too_small;
+ }
+
+ /* Check the type of each sub-package element */
+
+ for (j = 0; j < expected_count; j++) {
+ status =
+ acpi_ns_check_object_type(pathname,
+ sub_elements
+ [j],
+ package->
+ ret_info2.
+ object_type
+ [j], j);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+ }
+ break;
+
+ case ACPI_PTYPE2_MIN:
+
+ /* Each sub-package has a variable but minimum length */
+
+ expected_count = package->ret_info.count1;
+ if (sub_package->package.count < expected_count) {
+ count = sub_package->package.count;
+ goto package_too_small;
+ }
+
+ /* Check the type of each sub-package element */
+
+ status =
+ acpi_ns_check_package_elements(pathname,
+ sub_elements,
+ package->
+ ret_info.
+ object_type1,
+ sub_package->
+ package.
+ count, 0, 0);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+ break;
+
+ case ACPI_PTYPE2_COUNT:
+
+ /* First element is the (Integer) count of elements to follow */
+
+ status =
+ acpi_ns_check_object_type(pathname,
+ *sub_elements,
+ ACPI_RTYPE_INTEGER,
+ 0);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+
+ /* Make sure package is large enough for the Count */
+
+ expected_count =
+ (u32) (*sub_elements)->integer.value;
+ if (sub_package->package.count < expected_count) {
+ count = sub_package->package.count;
+ goto package_too_small;
+ }
+
+ /* Check the type of each sub-package element */
+
+ status =
+ acpi_ns_check_package_elements(pathname,
+ (sub_elements
+ + 1),
+ package->
+ ret_info.
+ object_type1,
+ (expected_count
+ - 1), 0, 0);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ elements++;
+ }
+ break;
+
+ default:
+
+ /* Should not get here if predefined info table is correct */
+
+ ACPI_WARNING((AE_INFO,
+ "%s: Invalid internal return type in table entry: %X",
+ pathname, package->ret_info.type));
+
+ return (AE_AML_INTERNAL);
+ }
+
+ return (AE_OK);
+
+ package_too_small:
+
+ /* Error exit for the case with an incorrect package count */
+
+ ACPI_WARNING((AE_INFO, "%s: Return Package is too small - "
+ "found %u, expected %u", pathname, count,
+ expected_count));
+
+ return (AE_AML_OPERAND_VALUE);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ns_check_package_elements
+ *
+ * PARAMETERS: Pathname - Full pathname to the node (for error msgs)
+ * Elements - Pointer to the package elements array
+ * Type1 - Object type for first group
+ * Count1 - Count for first group
+ * Type2 - Object type for second group
+ * Count2 - Count for second group
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Check that all elements of a package are of the correct object
+ * type. Supports up to two groups of different object types.
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ns_check_package_elements(char *pathname,
+ union acpi_operand_object **elements,
+ u8 type1, u32 count1, u8 type2, u32 count2)
+{
+ union acpi_operand_object **this_element = elements;
+ acpi_status status;
+ u32 i;
+
+ /*
+ * Up to two groups of package elements are supported by the data
+ * structure. All elements in each group must be of the same type.
+ * The second group can have a count of zero.
+ */
+ for (i = 0; i < count1; i++) {
+ status = acpi_ns_check_object_type(pathname, *this_element,
+ type1, i);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+ this_element++;
+ }
+
+ for (i = 0; i < count2; i++) {
+ status = acpi_ns_check_object_type(pathname, *this_element,
+ type2, (i + count1));
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+ this_element++;
+ }
+
+ return (AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ns_check_object_type
+ *
+ * PARAMETERS: Pathname - Full pathname to the node (for error msgs)
+ * return_object - Object return from the execution of this
+ * method/object
+ * expected_btypes - Bitmap of expected return type(s)
+ * package_index - Index of object within parent package (if
+ * applicable - ACPI_NOT_PACKAGE otherwise)
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Check the type of the return object against the expected object
+ * type(s). Use of Btype allows multiple expected object types.
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ns_check_object_type(char *pathname,
+ union acpi_operand_object *return_object,
+ u32 expected_btypes, u32 package_index)
+{
+ acpi_status status = AE_OK;
+ u32 return_btype;
+ char type_buffer[48]; /* Room for 5 types */
+ u32 this_rtype;
+ u32 i;
+ u32 j;
+
+ /*
+ * If we get a NULL return_object here, it is a NULL package element,
+ * and this is always an error.
+ */
+ if (!return_object) {
+ goto type_error_exit;
+ }
+
+ /* A Namespace node should not get here, but make sure */
+
+ if (ACPI_GET_DESCRIPTOR_TYPE(return_object) == ACPI_DESC_TYPE_NAMED) {
+ ACPI_WARNING((AE_INFO,
+ "%s: Invalid return type - Found a Namespace node [%4.4s] type %s",
+ pathname, return_object->node.name.ascii,
+ acpi_ut_get_type_name(return_object->node.type)));
+ return (AE_AML_OPERAND_TYPE);
+ }
+
+ /*
+ * Convert the object type (ACPI_TYPE_xxx) to a bitmapped object type.
+ * The bitmapped type allows multiple possible return types.
+ *
+ * Note, the cases below must handle all of the possible types returned
+ * from all of the predefined names (including elements of returned
+ * packages)
+ */
+ switch (ACPI_GET_OBJECT_TYPE(return_object)) {
+ case ACPI_TYPE_INTEGER:
+ return_btype = ACPI_RTYPE_INTEGER;
+ break;
+
+ case ACPI_TYPE_BUFFER:
+ return_btype = ACPI_RTYPE_BUFFER;
+ break;
+
+ case ACPI_TYPE_STRING:
+ return_btype = ACPI_RTYPE_STRING;
+ break;
+
+ case ACPI_TYPE_PACKAGE:
+ return_btype = ACPI_RTYPE_PACKAGE;
+ break;
+
+ case ACPI_TYPE_LOCAL_REFERENCE:
+ return_btype = ACPI_RTYPE_REFERENCE;
+ break;
+
+ default:
+ /* Not one of the supported objects, must be incorrect */
+
+ goto type_error_exit;
+ }
+
+ /* Is the object one of the expected types? */
+
+ if (!(return_btype & expected_btypes)) {
+ goto type_error_exit;
+ }
+
+ /* For reference objects, check that the reference type is correct */
+
+ if (ACPI_GET_OBJECT_TYPE(return_object) == ACPI_TYPE_LOCAL_REFERENCE) {
+ status = acpi_ns_check_reference(pathname, return_object);
+ }
+
+ return (status);
+
+ type_error_exit:
+
+ /* Create a string with all expected types for this predefined object */
+
+ j = 1;
+ type_buffer[0] = 0;
+ this_rtype = ACPI_RTYPE_INTEGER;
+
+ for (i = 0; i < ACPI_NUM_RTYPES; i++) {
+
+ /* If one of the expected types, concatenate the name of this type */
+
+ if (expected_btypes & this_rtype) {
+ ACPI_STRCAT(type_buffer, &acpi_rtype_names[i][j]);
+ j = 0; /* Use name separator from now on */
+ }
+ this_rtype <<= 1; /* Next Rtype */
+ }
+
+ if (package_index == ACPI_NOT_PACKAGE) {
+ ACPI_WARNING((AE_INFO,
+ "%s: Return type mismatch - found %s, expected %s",
+ pathname,
+ acpi_ut_get_object_type_name(return_object),
+ type_buffer));
+ } else {
+ ACPI_WARNING((AE_INFO,
+ "%s: Return Package type mismatch at index %u - "
+ "found %s, expected %s", pathname, package_index,
+ acpi_ut_get_object_type_name(return_object),
+ type_buffer));
+ }
+
+ return (AE_AML_OPERAND_TYPE);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ns_check_reference
+ *
+ * PARAMETERS: Pathname - Full pathname to the node (for error msgs)
+ * return_object - Object returned from the evaluation of a
+ * method or object
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Check a returned reference object for the correct reference
+ * type. The only reference type that can be returned from a
+ * predefined method is a named reference. All others are invalid.
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ns_check_reference(char *pathname,
+ union acpi_operand_object *return_object)
+{
+
+ /*
+ * Check the reference object for the correct reference type (opcode).
+ * The only type of reference that can be converted to an union acpi_object is
+ * a reference to a named object (reference class: NAME)
+ */
+ if (return_object->reference.class == ACPI_REFCLASS_NAME) {
+ return (AE_OK);
+ }
+
+ ACPI_WARNING((AE_INFO,
+ "%s: Return type mismatch - unexpected reference object type [%s] %2.2X",
+ pathname, acpi_ut_get_reference_name(return_object),
+ return_object->reference.class));
+
+ return (AE_AML_OPERAND_TYPE);
+}
diff --git a/drivers/acpi/namespace/nssearch.c b/drivers/acpi/namespace/nssearch.c
new file mode 100644
index 0000000..a9a80bf
--- /dev/null
+++ b/drivers/acpi/namespace/nssearch.c
@@ -0,0 +1,414 @@
+/*******************************************************************************
+ *
+ * Module Name: nssearch - Namespace search
+ *
+ ******************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include <acpi/acnamesp.h>
+
+#define _COMPONENT ACPI_NAMESPACE
+ACPI_MODULE_NAME("nssearch")
+
+/* Local prototypes */
+static acpi_status
+acpi_ns_search_parent_tree(u32 target_name,
+ struct acpi_namespace_node *node,
+ acpi_object_type type,
+ struct acpi_namespace_node **return_node);
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ns_search_one_scope
+ *
+ * PARAMETERS: target_name - Ascii ACPI name to search for
+ * parent_node - Starting node where search will begin
+ * Type - Object type to match
+ * return_node - Where the matched Named obj is returned
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Search a single level of the namespace. Performs a
+ * simple search of the specified level, and does not add
+ * entries or search parents.
+ *
+ *
+ * Named object lists are built (and subsequently dumped) in the
+ * order in which the names are encountered during the namespace load;
+ *
+ * All namespace searching is linear in this implementation, but
+ * could be easily modified to support any improved search
+ * algorithm. However, the linear search was chosen for simplicity
+ * and because the trees are small and the other interpreter
+ * execution overhead is relatively high.
+ *
+ * Note: CPU execution analysis has shown that the AML interpreter spends
+ * a very small percentage of its time searching the namespace. Therefore,
+ * the linear search seems to be sufficient, as there would seem to be
+ * little value in improving the search.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ns_search_one_scope(u32 target_name,
+ struct acpi_namespace_node *parent_node,
+ acpi_object_type type,
+ struct acpi_namespace_node **return_node)
+{
+ struct acpi_namespace_node *node;
+
+ ACPI_FUNCTION_TRACE(ns_search_one_scope);
+
+#ifdef ACPI_DEBUG_OUTPUT
+ if (ACPI_LV_NAMES & acpi_dbg_level) {
+ char *scope_name;
+
+ scope_name = acpi_ns_get_external_pathname(parent_node);
+ if (scope_name) {
+ ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
+ "Searching %s (%p) For [%4.4s] (%s)\n",
+ scope_name, parent_node,
+ ACPI_CAST_PTR(char, &target_name),
+ acpi_ut_get_type_name(type)));
+
+ ACPI_FREE(scope_name);
+ }
+ }
+#endif
+
+ /*
+ * Search for name at this namespace level, which is to say that we
+ * must search for the name among the children of this object
+ */
+ node = parent_node->child;
+ while (node) {
+
+ /* Check for match against the name */
+
+ if (node->name.integer == target_name) {
+
+ /* Resolve a control method alias if any */
+
+ if (acpi_ns_get_type(node) ==
+ ACPI_TYPE_LOCAL_METHOD_ALIAS) {
+ node =
+ ACPI_CAST_PTR(struct acpi_namespace_node,
+ node->object);
+ }
+
+ /* Found matching entry */
+
+ ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
+ "Name [%4.4s] (%s) %p found in scope [%4.4s] %p\n",
+ ACPI_CAST_PTR(char, &target_name),
+ acpi_ut_get_type_name(node->type),
+ node,
+ acpi_ut_get_node_name(parent_node),
+ parent_node));
+
+ *return_node = node;
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ /*
+ * The last entry in the list points back to the parent,
+ * so a flag is used to indicate the end-of-list
+ */
+ if (node->flags & ANOBJ_END_OF_PEER_LIST) {
+
+ /* Searched entire list, we are done */
+
+ break;
+ }
+
+ /* Didn't match name, move on to the next peer object */
+
+ node = node->peer;
+ }
+
+ /* Searched entire namespace level, not found */
+
+ ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
+ "Name [%4.4s] (%s) not found in search in scope [%4.4s] %p first child %p\n",
+ ACPI_CAST_PTR(char, &target_name),
+ acpi_ut_get_type_name(type),
+ acpi_ut_get_node_name(parent_node), parent_node,
+ parent_node->child));
+
+ return_ACPI_STATUS(AE_NOT_FOUND);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ns_search_parent_tree
+ *
+ * PARAMETERS: target_name - Ascii ACPI name to search for
+ * Node - Starting node where search will begin
+ * Type - Object type to match
+ * return_node - Where the matched Node is returned
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Called when a name has not been found in the current namespace
+ * level. Before adding it or giving up, ACPI scope rules require
+ * searching enclosing scopes in cases identified by acpi_ns_local().
+ *
+ * "A name is located by finding the matching name in the current
+ * name space, and then in the parent name space. If the parent
+ * name space does not contain the name, the search continues
+ * recursively until either the name is found or the name space
+ * does not have a parent (the root of the name space). This
+ * indicates that the name is not found" (From ACPI Specification,
+ * section 5.3)
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ns_search_parent_tree(u32 target_name,
+ struct acpi_namespace_node *node,
+ acpi_object_type type,
+ struct acpi_namespace_node **return_node)
+{
+ acpi_status status;
+ struct acpi_namespace_node *parent_node;
+
+ ACPI_FUNCTION_TRACE(ns_search_parent_tree);
+
+ parent_node = acpi_ns_get_parent_node(node);
+
+ /*
+ * If there is no parent (i.e., we are at the root) or type is "local",
+ * we won't be searching the parent tree.
+ */
+ if (!parent_node) {
+ ACPI_DEBUG_PRINT((ACPI_DB_NAMES, "[%4.4s] has no parent\n",
+ ACPI_CAST_PTR(char, &target_name)));
+ return_ACPI_STATUS(AE_NOT_FOUND);
+ }
+
+ if (acpi_ns_local(type)) {
+ ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
+ "[%4.4s] type [%s] must be local to this scope (no parent search)\n",
+ ACPI_CAST_PTR(char, &target_name),
+ acpi_ut_get_type_name(type)));
+ return_ACPI_STATUS(AE_NOT_FOUND);
+ }
+
+ /* Search the parent tree */
+
+ ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
+ "Searching parent [%4.4s] for [%4.4s]\n",
+ acpi_ut_get_node_name(parent_node),
+ ACPI_CAST_PTR(char, &target_name)));
+
+ /*
+ * Search parents until target is found or we have backed up to the root
+ */
+ while (parent_node) {
+ /*
+ * Search parent scope. Use TYPE_ANY because we don't care about the
+ * object type at this point, we only care about the existence of
+ * the actual name we are searching for. Typechecking comes later.
+ */
+ status =
+ acpi_ns_search_one_scope(target_name, parent_node,
+ ACPI_TYPE_ANY, return_node);
+ if (ACPI_SUCCESS(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /* Not found here, go up another level (until we reach the root) */
+
+ parent_node = acpi_ns_get_parent_node(parent_node);
+ }
+
+ /* Not found in parent tree */
+
+ return_ACPI_STATUS(AE_NOT_FOUND);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ns_search_and_enter
+ *
+ * PARAMETERS: target_name - Ascii ACPI name to search for (4 chars)
+ * walk_state - Current state of the walk
+ * Node - Starting node where search will begin
+ * interpreter_mode - Add names only in ACPI_MODE_LOAD_PASS_x.
+ * Otherwise,search only.
+ * Type - Object type to match
+ * Flags - Flags describing the search restrictions
+ * return_node - Where the Node is returned
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Search for a name segment in a single namespace level,
+ * optionally adding it if it is not found. If the passed
+ * Type is not Any and the type previously stored in the
+ * entry was Any (i.e. unknown), update the stored type.
+ *
+ * In ACPI_IMODE_EXECUTE, search only.
+ * In other modes, search and add if not found.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ns_search_and_enter(u32 target_name,
+ struct acpi_walk_state *walk_state,
+ struct acpi_namespace_node *node,
+ acpi_interpreter_mode interpreter_mode,
+ acpi_object_type type,
+ u32 flags, struct acpi_namespace_node **return_node)
+{
+ acpi_status status;
+ struct acpi_namespace_node *new_node;
+
+ ACPI_FUNCTION_TRACE(ns_search_and_enter);
+
+ /* Parameter validation */
+
+ if (!node || !target_name || !return_node) {
+ ACPI_ERROR((AE_INFO,
+ "Null parameter: Node %p Name %X ReturnNode %p",
+ node, target_name, return_node));
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
+
+ /*
+ * Name must consist of valid ACPI characters. We will repair the name if
+ * necessary because we don't want to abort because of this, but we want
+ * all namespace names to be printable. A warning message is appropriate.
+ *
+ * This issue came up because there are in fact machines that exhibit
+ * this problem, and we want to be able to enable ACPI support for them,
+ * even though there are a few bad names.
+ */
+ if (!acpi_ut_valid_acpi_name(target_name)) {
+ target_name =
+ acpi_ut_repair_name(ACPI_CAST_PTR(char, &target_name));
+
+ /* Report warning only if in strict mode or debug mode */
+
+ if (!acpi_gbl_enable_interpreter_slack) {
+ ACPI_WARNING((AE_INFO,
+ "Found bad character(s) in name, repaired: [%4.4s]\n",
+ ACPI_CAST_PTR(char, &target_name)));
+ } else {
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Found bad character(s) in name, repaired: [%4.4s]\n",
+ ACPI_CAST_PTR(char, &target_name)));
+ }
+ }
+
+ /* Try to find the name in the namespace level specified by the caller */
+
+ *return_node = ACPI_ENTRY_NOT_FOUND;
+ status = acpi_ns_search_one_scope(target_name, node, type, return_node);
+ if (status != AE_NOT_FOUND) {
+ /*
+ * If we found it AND the request specifies that a find is an error,
+ * return the error
+ */
+ if ((status == AE_OK) && (flags & ACPI_NS_ERROR_IF_FOUND)) {
+ status = AE_ALREADY_EXISTS;
+ }
+
+ /* Either found it or there was an error: finished either way */
+
+ return_ACPI_STATUS(status);
+ }
+
+ /*
+ * The name was not found. If we are NOT performing the first pass
+ * (name entry) of loading the namespace, search the parent tree (all the
+ * way to the root if necessary.) We don't want to perform the parent
+ * search when the namespace is actually being loaded. We want to perform
+ * the search when namespace references are being resolved (load pass 2)
+ * and during the execution phase.
+ */
+ if ((interpreter_mode != ACPI_IMODE_LOAD_PASS1) &&
+ (flags & ACPI_NS_SEARCH_PARENT)) {
+ /*
+ * Not found at this level - search parent tree according to the
+ * ACPI specification
+ */
+ status =
+ acpi_ns_search_parent_tree(target_name, node, type,
+ return_node);
+ if (ACPI_SUCCESS(status)) {
+ return_ACPI_STATUS(status);
+ }
+ }
+
+ /* In execute mode, just search, never add names. Exit now */
+
+ if (interpreter_mode == ACPI_IMODE_EXECUTE) {
+ ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
+ "%4.4s Not found in %p [Not adding]\n",
+ ACPI_CAST_PTR(char, &target_name), node));
+
+ return_ACPI_STATUS(AE_NOT_FOUND);
+ }
+
+ /* Create the new named object */
+
+ new_node = acpi_ns_create_node(target_name);
+ if (!new_node) {
+ return_ACPI_STATUS(AE_NO_MEMORY);
+ }
+#ifdef ACPI_ASL_COMPILER
+ /*
+ * Node is an object defined by an External() statement
+ */
+ if (flags & ACPI_NS_EXTERNAL) {
+ new_node->flags |= ANOBJ_IS_EXTERNAL;
+ }
+#endif
+
+ if (flags & ACPI_NS_TEMPORARY) {
+ new_node->flags |= ANOBJ_TEMPORARY;
+ }
+
+ /* Install the new object into the parent's list of children */
+
+ acpi_ns_install_node(walk_state, node, new_node, type);
+ *return_node = new_node;
+ return_ACPI_STATUS(AE_OK);
+}
diff --git a/drivers/acpi/namespace/nsutils.c b/drivers/acpi/namespace/nsutils.c
new file mode 100644
index 0000000..337fb04
--- /dev/null
+++ b/drivers/acpi/namespace/nsutils.c
@@ -0,0 +1,996 @@
+/******************************************************************************
+ *
+ * Module Name: nsutils - Utilities for accessing ACPI namespace, accessing
+ * parents and siblings and Scope manipulation
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include <acpi/acnamesp.h>
+#include <acpi/amlcode.h>
+#include <acpi/actables.h>
+
+#define _COMPONENT ACPI_NAMESPACE
+ACPI_MODULE_NAME("nsutils")
+
+/* Local prototypes */
+static u8 acpi_ns_valid_path_separator(char sep);
+
+#ifdef ACPI_OBSOLETE_FUNCTIONS
+acpi_name acpi_ns_find_parent_name(struct acpi_namespace_node *node_to_search);
+#endif
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ns_report_error
+ *
+ * PARAMETERS: module_name - Caller's module name (for error output)
+ * line_number - Caller's line number (for error output)
+ * internal_name - Name or path of the namespace node
+ * lookup_status - Exception code from NS lookup
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Print warning message with full pathname
+ *
+ ******************************************************************************/
+
+void
+acpi_ns_report_error(const char *module_name,
+ u32 line_number,
+ const char *internal_name, acpi_status lookup_status)
+{
+ acpi_status status;
+ u32 bad_name;
+ char *name = NULL;
+
+ acpi_os_printf("ACPI Error (%s-%04d): ", module_name, line_number);
+
+ if (lookup_status == AE_BAD_CHARACTER) {
+
+ /* There is a non-ascii character in the name */
+
+ ACPI_MOVE_32_TO_32(&bad_name, internal_name);
+ acpi_os_printf("[0x%4.4X] (NON-ASCII)", bad_name);
+ } else {
+ /* Convert path to external format */
+
+ status = acpi_ns_externalize_name(ACPI_UINT32_MAX,
+ internal_name, NULL, &name);
+
+ /* Print target name */
+
+ if (ACPI_SUCCESS(status)) {
+ acpi_os_printf("[%s]", name);
+ } else {
+ acpi_os_printf("[COULD NOT EXTERNALIZE NAME]");
+ }
+
+ if (name) {
+ ACPI_FREE(name);
+ }
+ }
+
+ acpi_os_printf(" Namespace lookup failure, %s\n",
+ acpi_format_exception(lookup_status));
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ns_report_method_error
+ *
+ * PARAMETERS: module_name - Caller's module name (for error output)
+ * line_number - Caller's line number (for error output)
+ * Message - Error message to use on failure
+ * prefix_node - Prefix relative to the path
+ * Path - Path to the node (optional)
+ * method_status - Execution status
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Print warning message with full pathname
+ *
+ ******************************************************************************/
+
+void
+acpi_ns_report_method_error(const char *module_name,
+ u32 line_number,
+ const char *message,
+ struct acpi_namespace_node *prefix_node,
+ const char *path, acpi_status method_status)
+{
+ acpi_status status;
+ struct acpi_namespace_node *node = prefix_node;
+
+ acpi_os_printf("ACPI Error (%s-%04d): ", module_name, line_number);
+
+ if (path) {
+ status =
+ acpi_ns_get_node(prefix_node, path, ACPI_NS_NO_UPSEARCH,
+ &node);
+ if (ACPI_FAILURE(status)) {
+ acpi_os_printf("[Could not get node by pathname]");
+ }
+ }
+
+ acpi_ns_print_node_pathname(node, message);
+ acpi_os_printf(", %s\n", acpi_format_exception(method_status));
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ns_print_node_pathname
+ *
+ * PARAMETERS: Node - Object
+ * Message - Prefix message
+ *
+ * DESCRIPTION: Print an object's full namespace pathname
+ * Manages allocation/freeing of a pathname buffer
+ *
+ ******************************************************************************/
+
+void
+acpi_ns_print_node_pathname(struct acpi_namespace_node *node,
+ const char *message)
+{
+ struct acpi_buffer buffer;
+ acpi_status status;
+
+ if (!node) {
+ acpi_os_printf("[NULL NAME]");
+ return;
+ }
+
+ /* Convert handle to full pathname and print it (with supplied message) */
+
+ buffer.length = ACPI_ALLOCATE_LOCAL_BUFFER;
+
+ status = acpi_ns_handle_to_pathname(node, &buffer);
+ if (ACPI_SUCCESS(status)) {
+ if (message) {
+ acpi_os_printf("%s ", message);
+ }
+
+ acpi_os_printf("[%s] (Node %p)", (char *)buffer.pointer, node);
+ ACPI_FREE(buffer.pointer);
+ }
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ns_valid_root_prefix
+ *
+ * PARAMETERS: Prefix - Character to be checked
+ *
+ * RETURN: TRUE if a valid prefix
+ *
+ * DESCRIPTION: Check if a character is a valid ACPI Root prefix
+ *
+ ******************************************************************************/
+
+u8 acpi_ns_valid_root_prefix(char prefix)
+{
+
+ return ((u8) (prefix == '\\'));
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ns_valid_path_separator
+ *
+ * PARAMETERS: Sep - Character to be checked
+ *
+ * RETURN: TRUE if a valid path separator
+ *
+ * DESCRIPTION: Check if a character is a valid ACPI path separator
+ *
+ ******************************************************************************/
+
+static u8 acpi_ns_valid_path_separator(char sep)
+{
+
+ return ((u8) (sep == '.'));
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ns_get_type
+ *
+ * PARAMETERS: Node - Parent Node to be examined
+ *
+ * RETURN: Type field from Node whose handle is passed
+ *
+ * DESCRIPTION: Return the type of a Namespace node
+ *
+ ******************************************************************************/
+
+acpi_object_type acpi_ns_get_type(struct acpi_namespace_node * node)
+{
+ ACPI_FUNCTION_TRACE(ns_get_type);
+
+ if (!node) {
+ ACPI_WARNING((AE_INFO, "Null Node parameter"));
+ return_UINT32(ACPI_TYPE_ANY);
+ }
+
+ return_UINT32((acpi_object_type) node->type);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ns_local
+ *
+ * PARAMETERS: Type - A namespace object type
+ *
+ * RETURN: LOCAL if names must be found locally in objects of the
+ * passed type, 0 if enclosing scopes should be searched
+ *
+ * DESCRIPTION: Returns scope rule for the given object type.
+ *
+ ******************************************************************************/
+
+u32 acpi_ns_local(acpi_object_type type)
+{
+ ACPI_FUNCTION_TRACE(ns_local);
+
+ if (!acpi_ut_valid_object_type(type)) {
+
+ /* Type code out of range */
+
+ ACPI_WARNING((AE_INFO, "Invalid Object Type %X", type));
+ return_UINT32(ACPI_NS_NORMAL);
+ }
+
+ return_UINT32((u32) acpi_gbl_ns_properties[type] & ACPI_NS_LOCAL);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ns_get_internal_name_length
+ *
+ * PARAMETERS: Info - Info struct initialized with the
+ * external name pointer.
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Calculate the length of the internal (AML) namestring
+ * corresponding to the external (ASL) namestring.
+ *
+ ******************************************************************************/
+
+void acpi_ns_get_internal_name_length(struct acpi_namestring_info *info)
+{
+ const char *next_external_char;
+ u32 i;
+
+ ACPI_FUNCTION_ENTRY();
+
+ next_external_char = info->external_name;
+ info->num_carats = 0;
+ info->num_segments = 0;
+ info->fully_qualified = FALSE;
+
+ /*
+ * For the internal name, the required length is 4 bytes per segment, plus
+ * 1 each for root_prefix, multi_name_prefix_op, segment count, trailing null
+ * (which is not really needed, but no there's harm in putting it there)
+ *
+ * strlen() + 1 covers the first name_seg, which has no path separator
+ */
+ if (acpi_ns_valid_root_prefix(*next_external_char)) {
+ info->fully_qualified = TRUE;
+ next_external_char++;
+
+ /* Skip redundant root_prefix, like \\_SB.PCI0.SBRG.EC0 */
+
+ while (acpi_ns_valid_root_prefix(*next_external_char)) {
+ next_external_char++;
+ }
+ } else {
+ /*
+ * Handle Carat prefixes
+ */
+ while (*next_external_char == '^') {
+ info->num_carats++;
+ next_external_char++;
+ }
+ }
+
+ /*
+ * Determine the number of ACPI name "segments" by counting the number of
+ * path separators within the string. Start with one segment since the
+ * segment count is [(# separators) + 1], and zero separators is ok.
+ */
+ if (*next_external_char) {
+ info->num_segments = 1;
+ for (i = 0; next_external_char[i]; i++) {
+ if (acpi_ns_valid_path_separator(next_external_char[i])) {
+ info->num_segments++;
+ }
+ }
+ }
+
+ info->length = (ACPI_NAME_SIZE * info->num_segments) +
+ 4 + info->num_carats;
+
+ info->next_external_char = next_external_char;
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ns_build_internal_name
+ *
+ * PARAMETERS: Info - Info struct fully initialized
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Construct the internal (AML) namestring
+ * corresponding to the external (ASL) namestring.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ns_build_internal_name(struct acpi_namestring_info *info)
+{
+ u32 num_segments = info->num_segments;
+ char *internal_name = info->internal_name;
+ const char *external_name = info->next_external_char;
+ char *result = NULL;
+ u32 i;
+
+ ACPI_FUNCTION_TRACE(ns_build_internal_name);
+
+ /* Setup the correct prefixes, counts, and pointers */
+
+ if (info->fully_qualified) {
+ internal_name[0] = '\\';
+
+ if (num_segments <= 1) {
+ result = &internal_name[1];
+ } else if (num_segments == 2) {
+ internal_name[1] = AML_DUAL_NAME_PREFIX;
+ result = &internal_name[2];
+ } else {
+ internal_name[1] = AML_MULTI_NAME_PREFIX_OP;
+ internal_name[2] = (char)num_segments;
+ result = &internal_name[3];
+ }
+ } else {
+ /*
+ * Not fully qualified.
+ * Handle Carats first, then append the name segments
+ */
+ i = 0;
+ if (info->num_carats) {
+ for (i = 0; i < info->num_carats; i++) {
+ internal_name[i] = '^';
+ }
+ }
+
+ if (num_segments <= 1) {
+ result = &internal_name[i];
+ } else if (num_segments == 2) {
+ internal_name[i] = AML_DUAL_NAME_PREFIX;
+ result = &internal_name[(acpi_size) i + 1];
+ } else {
+ internal_name[i] = AML_MULTI_NAME_PREFIX_OP;
+ internal_name[(acpi_size) i + 1] = (char)num_segments;
+ result = &internal_name[(acpi_size) i + 2];
+ }
+ }
+
+ /* Build the name (minus path separators) */
+
+ for (; num_segments; num_segments--) {
+ for (i = 0; i < ACPI_NAME_SIZE; i++) {
+ if (acpi_ns_valid_path_separator(*external_name) ||
+ (*external_name == 0)) {
+
+ /* Pad the segment with underscore(s) if segment is short */
+
+ result[i] = '_';
+ } else {
+ /* Convert the character to uppercase and save it */
+
+ result[i] =
+ (char)ACPI_TOUPPER((int)*external_name);
+ external_name++;
+ }
+ }
+
+ /* Now we must have a path separator, or the pathname is bad */
+
+ if (!acpi_ns_valid_path_separator(*external_name) &&
+ (*external_name != 0)) {
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
+
+ /* Move on the next segment */
+
+ external_name++;
+ result += ACPI_NAME_SIZE;
+ }
+
+ /* Terminate the string */
+
+ *result = 0;
+
+ if (info->fully_qualified) {
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+ "Returning [%p] (abs) \"\\%s\"\n",
+ internal_name, internal_name));
+ } else {
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Returning [%p] (rel) \"%s\"\n",
+ internal_name, internal_name));
+ }
+
+ return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ns_internalize_name
+ *
+ * PARAMETERS: *external_name - External representation of name
+ * **Converted Name - Where to return the resulting
+ * internal represention of the name
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Convert an external representation (e.g. "\_PR_.CPU0")
+ * to internal form (e.g. 5c 2f 02 5f 50 52 5f 43 50 55 30)
+ *
+ *******************************************************************************/
+
+acpi_status
+acpi_ns_internalize_name(const char *external_name, char **converted_name)
+{
+ char *internal_name;
+ struct acpi_namestring_info info;
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(ns_internalize_name);
+
+ if ((!external_name) || (*external_name == 0) || (!converted_name)) {
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
+
+ /* Get the length of the new internal name */
+
+ info.external_name = external_name;
+ acpi_ns_get_internal_name_length(&info);
+
+ /* We need a segment to store the internal name */
+
+ internal_name = ACPI_ALLOCATE_ZEROED(info.length);
+ if (!internal_name) {
+ return_ACPI_STATUS(AE_NO_MEMORY);
+ }
+
+ /* Build the name */
+
+ info.internal_name = internal_name;
+ status = acpi_ns_build_internal_name(&info);
+ if (ACPI_FAILURE(status)) {
+ ACPI_FREE(internal_name);
+ return_ACPI_STATUS(status);
+ }
+
+ *converted_name = internal_name;
+ return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ns_externalize_name
+ *
+ * PARAMETERS: internal_name_length - Lenth of the internal name below
+ * internal_name - Internal representation of name
+ * converted_name_length - Where the length is returned
+ * converted_name - Where the resulting external name
+ * is returned
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Convert internal name (e.g. 5c 2f 02 5f 50 52 5f 43 50 55 30)
+ * to its external (printable) form (e.g. "\_PR_.CPU0")
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ns_externalize_name(u32 internal_name_length,
+ const char *internal_name,
+ u32 * converted_name_length, char **converted_name)
+{
+ u32 names_index = 0;
+ u32 num_segments = 0;
+ u32 required_length;
+ u32 prefix_length = 0;
+ u32 i = 0;
+ u32 j = 0;
+
+ ACPI_FUNCTION_TRACE(ns_externalize_name);
+
+ if (!internal_name_length || !internal_name || !converted_name) {
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
+
+ /*
+ * Check for a prefix (one '\' | one or more '^').
+ */
+ switch (internal_name[0]) {
+ case '\\':
+ prefix_length = 1;
+ break;
+
+ case '^':
+ for (i = 0; i < internal_name_length; i++) {
+ if (internal_name[i] == '^') {
+ prefix_length = i + 1;
+ } else {
+ break;
+ }
+ }
+
+ if (i == internal_name_length) {
+ prefix_length = i;
+ }
+
+ break;
+
+ default:
+ break;
+ }
+
+ /*
+ * Check for object names. Note that there could be 0-255 of these
+ * 4-byte elements.
+ */
+ if (prefix_length < internal_name_length) {
+ switch (internal_name[prefix_length]) {
+ case AML_MULTI_NAME_PREFIX_OP:
+
+ /* <count> 4-byte names */
+
+ names_index = prefix_length + 2;
+ num_segments = (u8)
+ internal_name[(acpi_size) prefix_length + 1];
+ break;
+
+ case AML_DUAL_NAME_PREFIX:
+
+ /* Two 4-byte names */
+
+ names_index = prefix_length + 1;
+ num_segments = 2;
+ break;
+
+ case 0:
+
+ /* null_name */
+
+ names_index = 0;
+ num_segments = 0;
+ break;
+
+ default:
+
+ /* one 4-byte name */
+
+ names_index = prefix_length;
+ num_segments = 1;
+ break;
+ }
+ }
+
+ /*
+ * Calculate the length of converted_name, which equals the length
+ * of the prefix, length of all object names, length of any required
+ * punctuation ('.') between object names, plus the NULL terminator.
+ */
+ required_length = prefix_length + (4 * num_segments) +
+ ((num_segments > 0) ? (num_segments - 1) : 0) + 1;
+
+ /*
+ * Check to see if we're still in bounds. If not, there's a problem
+ * with internal_name (invalid format).
+ */
+ if (required_length > internal_name_length) {
+ ACPI_ERROR((AE_INFO, "Invalid internal name"));
+ return_ACPI_STATUS(AE_BAD_PATHNAME);
+ }
+
+ /*
+ * Build converted_name
+ */
+ *converted_name = ACPI_ALLOCATE_ZEROED(required_length);
+ if (!(*converted_name)) {
+ return_ACPI_STATUS(AE_NO_MEMORY);
+ }
+
+ j = 0;
+
+ for (i = 0; i < prefix_length; i++) {
+ (*converted_name)[j++] = internal_name[i];
+ }
+
+ if (num_segments > 0) {
+ for (i = 0; i < num_segments; i++) {
+ if (i > 0) {
+ (*converted_name)[j++] = '.';
+ }
+
+ (*converted_name)[j++] = internal_name[names_index++];
+ (*converted_name)[j++] = internal_name[names_index++];
+ (*converted_name)[j++] = internal_name[names_index++];
+ (*converted_name)[j++] = internal_name[names_index++];
+ }
+ }
+
+ if (converted_name_length) {
+ *converted_name_length = (u32) required_length;
+ }
+
+ return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ns_map_handle_to_node
+ *
+ * PARAMETERS: Handle - Handle to be converted to an Node
+ *
+ * RETURN: A Name table entry pointer
+ *
+ * DESCRIPTION: Convert a namespace handle to a real Node
+ *
+ * Note: Real integer handles would allow for more verification
+ * and keep all pointers within this subsystem - however this introduces
+ * more (and perhaps unnecessary) overhead.
+ *
+ ******************************************************************************/
+
+struct acpi_namespace_node *acpi_ns_map_handle_to_node(acpi_handle handle)
+{
+
+ ACPI_FUNCTION_ENTRY();
+
+ /*
+ * Simple implementation
+ */
+ if ((!handle) || (handle == ACPI_ROOT_OBJECT)) {
+ return (acpi_gbl_root_node);
+ }
+
+ /* We can at least attempt to verify the handle */
+
+ if (ACPI_GET_DESCRIPTOR_TYPE(handle) != ACPI_DESC_TYPE_NAMED) {
+ return (NULL);
+ }
+
+ return (ACPI_CAST_PTR(struct acpi_namespace_node, handle));
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ns_convert_entry_to_handle
+ *
+ * PARAMETERS: Node - Node to be converted to a Handle
+ *
+ * RETURN: A user handle
+ *
+ * DESCRIPTION: Convert a real Node to a namespace handle
+ *
+ ******************************************************************************/
+
+acpi_handle acpi_ns_convert_entry_to_handle(struct acpi_namespace_node *node)
+{
+
+ /*
+ * Simple implementation for now;
+ */
+ return ((acpi_handle) node);
+
+/* Example future implementation ---------------------
+
+ if (!Node)
+ {
+ return (NULL);
+ }
+
+ if (Node == acpi_gbl_root_node)
+ {
+ return (ACPI_ROOT_OBJECT);
+ }
+
+ return ((acpi_handle) Node);
+------------------------------------------------------*/
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ns_terminate
+ *
+ * PARAMETERS: none
+ *
+ * RETURN: none
+ *
+ * DESCRIPTION: free memory allocated for namespace and ACPI table storage.
+ *
+ ******************************************************************************/
+
+void acpi_ns_terminate(void)
+{
+ union acpi_operand_object *obj_desc;
+
+ ACPI_FUNCTION_TRACE(ns_terminate);
+
+ /*
+ * 1) Free the entire namespace -- all nodes and objects
+ *
+ * Delete all object descriptors attached to namepsace nodes
+ */
+ acpi_ns_delete_namespace_subtree(acpi_gbl_root_node);
+
+ /* Detach any objects attached to the root */
+
+ obj_desc = acpi_ns_get_attached_object(acpi_gbl_root_node);
+ if (obj_desc) {
+ acpi_ns_detach_object(acpi_gbl_root_node);
+ }
+
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Namespace freed\n"));
+ return_VOID;
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ns_opens_scope
+ *
+ * PARAMETERS: Type - A valid namespace type
+ *
+ * RETURN: NEWSCOPE if the passed type "opens a name scope" according
+ * to the ACPI specification, else 0
+ *
+ ******************************************************************************/
+
+u32 acpi_ns_opens_scope(acpi_object_type type)
+{
+ ACPI_FUNCTION_TRACE_STR(ns_opens_scope, acpi_ut_get_type_name(type));
+
+ if (!acpi_ut_valid_object_type(type)) {
+
+ /* type code out of range */
+
+ ACPI_WARNING((AE_INFO, "Invalid Object Type %X", type));
+ return_UINT32(ACPI_NS_NORMAL);
+ }
+
+ return_UINT32(((u32) acpi_gbl_ns_properties[type]) & ACPI_NS_NEWSCOPE);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ns_get_node
+ *
+ * PARAMETERS: *Pathname - Name to be found, in external (ASL) format. The
+ * \ (backslash) and ^ (carat) prefixes, and the
+ * . (period) to separate segments are supported.
+ * prefix_node - Root of subtree to be searched, or NS_ALL for the
+ * root of the name space. If Name is fully
+ * qualified (first s8 is '\'), the passed value
+ * of Scope will not be accessed.
+ * Flags - Used to indicate whether to perform upsearch or
+ * not.
+ * return_node - Where the Node is returned
+ *
+ * DESCRIPTION: Look up a name relative to a given scope and return the
+ * corresponding Node. NOTE: Scope can be null.
+ *
+ * MUTEX: Locks namespace
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ns_get_node(struct acpi_namespace_node *prefix_node,
+ const char *pathname,
+ u32 flags, struct acpi_namespace_node **return_node)
+{
+ union acpi_generic_state scope_info;
+ acpi_status status;
+ char *internal_path;
+
+ ACPI_FUNCTION_TRACE_PTR(ns_get_node, pathname);
+
+ if (!pathname) {
+ *return_node = prefix_node;
+ if (!prefix_node) {
+ *return_node = acpi_gbl_root_node;
+ }
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ /* Convert path to internal representation */
+
+ status = acpi_ns_internalize_name(pathname, &internal_path);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /* Must lock namespace during lookup */
+
+ status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+ if (ACPI_FAILURE(status)) {
+ goto cleanup;
+ }
+
+ /* Setup lookup scope (search starting point) */
+
+ scope_info.scope.node = prefix_node;
+
+ /* Lookup the name in the namespace */
+
+ status = acpi_ns_lookup(&scope_info, internal_path, ACPI_TYPE_ANY,
+ ACPI_IMODE_EXECUTE,
+ (flags | ACPI_NS_DONT_OPEN_SCOPE), NULL,
+ return_node);
+ if (ACPI_FAILURE(status)) {
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "%s, %s\n",
+ pathname, acpi_format_exception(status)));
+ }
+
+ (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+
+ cleanup:
+ ACPI_FREE(internal_path);
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ns_get_parent_node
+ *
+ * PARAMETERS: Node - Current table entry
+ *
+ * RETURN: Parent entry of the given entry
+ *
+ * DESCRIPTION: Obtain the parent entry for a given entry in the namespace.
+ *
+ ******************************************************************************/
+
+struct acpi_namespace_node *acpi_ns_get_parent_node(struct acpi_namespace_node
+ *node)
+{
+ ACPI_FUNCTION_ENTRY();
+
+ if (!node) {
+ return (NULL);
+ }
+
+ /*
+ * Walk to the end of this peer list. The last entry is marked with a flag
+ * and the peer pointer is really a pointer back to the parent. This saves
+ * putting a parent back pointer in each and every named object!
+ */
+ while (!(node->flags & ANOBJ_END_OF_PEER_LIST)) {
+ node = node->peer;
+ }
+
+ return (node->peer);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ns_get_next_valid_node
+ *
+ * PARAMETERS: Node - Current table entry
+ *
+ * RETURN: Next valid Node in the linked node list. NULL if no more valid
+ * nodes.
+ *
+ * DESCRIPTION: Find the next valid node within a name table.
+ * Useful for implementing NULL-end-of-list loops.
+ *
+ ******************************************************************************/
+
+struct acpi_namespace_node *acpi_ns_get_next_valid_node(struct
+ acpi_namespace_node
+ *node)
+{
+
+ /* If we are at the end of this peer list, return NULL */
+
+ if (node->flags & ANOBJ_END_OF_PEER_LIST) {
+ return NULL;
+ }
+
+ /* Otherwise just return the next peer */
+
+ return (node->peer);
+}
+
+#ifdef ACPI_OBSOLETE_FUNCTIONS
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ns_find_parent_name
+ *
+ * PARAMETERS: *child_node - Named Obj whose name is to be found
+ *
+ * RETURN: The ACPI name
+ *
+ * DESCRIPTION: Search for the given obj in its parent scope and return the
+ * name segment, or "????" if the parent name can't be found
+ * (which "should not happen").
+ *
+ ******************************************************************************/
+
+acpi_name acpi_ns_find_parent_name(struct acpi_namespace_node * child_node)
+{
+ struct acpi_namespace_node *parent_node;
+
+ ACPI_FUNCTION_TRACE(ns_find_parent_name);
+
+ if (child_node) {
+
+ /* Valid entry. Get the parent Node */
+
+ parent_node = acpi_ns_get_parent_node(child_node);
+ if (parent_node) {
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+ "Parent of %p [%4.4s] is %p [%4.4s]\n",
+ child_node,
+ acpi_ut_get_node_name(child_node),
+ parent_node,
+ acpi_ut_get_node_name(parent_node)));
+
+ if (parent_node->name.integer) {
+ return_VALUE((acpi_name) parent_node->name.
+ integer);
+ }
+ }
+
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+ "Unable to find parent of %p (%4.4s)\n",
+ child_node,
+ acpi_ut_get_node_name(child_node)));
+ }
+
+ return_VALUE(ACPI_UNKNOWN_NAME);
+}
+#endif
diff --git a/drivers/acpi/namespace/nswalk.c b/drivers/acpi/namespace/nswalk.c
new file mode 100644
index 0000000..3c905ce
--- /dev/null
+++ b/drivers/acpi/namespace/nswalk.c
@@ -0,0 +1,295 @@
+/******************************************************************************
+ *
+ * Module Name: nswalk - Functions for walking the ACPI namespace
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include <acpi/acnamesp.h>
+
+#define _COMPONENT ACPI_NAMESPACE
+ACPI_MODULE_NAME("nswalk")
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ns_get_next_node
+ *
+ * PARAMETERS: Type - Type of node to be searched for
+ * parent_node - Parent node whose children we are
+ * getting
+ * child_node - Previous child that was found.
+ * The NEXT child will be returned
+ *
+ * RETURN: struct acpi_namespace_node - Pointer to the NEXT child or NULL if
+ * none is found.
+ *
+ * DESCRIPTION: Return the next peer node within the namespace. If Handle
+ * is valid, Scope is ignored. Otherwise, the first node
+ * within Scope is returned.
+ *
+ ******************************************************************************/
+struct acpi_namespace_node *acpi_ns_get_next_node(acpi_object_type type, struct acpi_namespace_node
+ *parent_node, struct acpi_namespace_node
+ *child_node)
+{
+ struct acpi_namespace_node *next_node = NULL;
+
+ ACPI_FUNCTION_ENTRY();
+
+ if (!child_node) {
+
+ /* It's really the parent's _scope_ that we want */
+
+ next_node = parent_node->child;
+ }
+
+ else {
+ /* Start search at the NEXT node */
+
+ next_node = acpi_ns_get_next_valid_node(child_node);
+ }
+
+ /* If any type is OK, we are done */
+
+ if (type == ACPI_TYPE_ANY) {
+
+ /* next_node is NULL if we are at the end-of-list */
+
+ return (next_node);
+ }
+
+ /* Must search for the node -- but within this scope only */
+
+ while (next_node) {
+
+ /* If type matches, we are done */
+
+ if (next_node->type == type) {
+ return (next_node);
+ }
+
+ /* Otherwise, move on to the next node */
+
+ next_node = acpi_ns_get_next_valid_node(next_node);
+ }
+
+ /* Not found */
+
+ return (NULL);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ns_walk_namespace
+ *
+ * PARAMETERS: Type - acpi_object_type to search for
+ * start_node - Handle in namespace where search begins
+ * max_depth - Depth to which search is to reach
+ * Flags - Whether to unlock the NS before invoking
+ * the callback routine
+ * user_function - Called when an object of "Type" is found
+ * Context - Passed to user function
+ * return_value - from the user_function if terminated early.
+ * Otherwise, returns NULL.
+ * RETURNS: Status
+ *
+ * DESCRIPTION: Performs a modified depth-first walk of the namespace tree,
+ * starting (and ending) at the node specified by start_handle.
+ * The user_function is called whenever a node that matches
+ * the type parameter is found. If the user function returns
+ * a non-zero value, the search is terminated immediately and this
+ * value is returned to the caller.
+ *
+ * The point of this procedure is to provide a generic namespace
+ * walk routine that can be called from multiple places to
+ * provide multiple services; the User Function can be tailored
+ * to each task, whether it is a print function, a compare
+ * function, etc.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ns_walk_namespace(acpi_object_type type,
+ acpi_handle start_node,
+ u32 max_depth,
+ u32 flags,
+ acpi_walk_callback user_function,
+ void *context, void **return_value)
+{
+ acpi_status status;
+ acpi_status mutex_status;
+ struct acpi_namespace_node *child_node;
+ struct acpi_namespace_node *parent_node;
+ acpi_object_type child_type;
+ u32 level;
+
+ ACPI_FUNCTION_TRACE(ns_walk_namespace);
+
+ /* Special case for the namespace Root Node */
+
+ if (start_node == ACPI_ROOT_OBJECT) {
+ start_node = acpi_gbl_root_node;
+ }
+
+ /* Null child means "get first node" */
+
+ parent_node = start_node;
+ child_node = NULL;
+ child_type = ACPI_TYPE_ANY;
+ level = 1;
+
+ /*
+ * Traverse the tree of nodes until we bubble back up to where we
+ * started. When Level is zero, the loop is done because we have
+ * bubbled up to (and passed) the original parent handle (start_entry)
+ */
+ while (level > 0) {
+
+ /* Get the next node in this scope. Null if not found */
+
+ status = AE_OK;
+ child_node =
+ acpi_ns_get_next_node(ACPI_TYPE_ANY, parent_node,
+ child_node);
+ if (child_node) {
+
+ /* Found next child, get the type if we are not searching for ANY */
+
+ if (type != ACPI_TYPE_ANY) {
+ child_type = child_node->type;
+ }
+
+ /*
+ * Ignore all temporary namespace nodes (created during control
+ * method execution) unless told otherwise. These temporary nodes
+ * can cause a race condition because they can be deleted during the
+ * execution of the user function (if the namespace is unlocked before
+ * invocation of the user function.) Only the debugger namespace dump
+ * will examine the temporary nodes.
+ */
+ if ((child_node->flags & ANOBJ_TEMPORARY) &&
+ !(flags & ACPI_NS_WALK_TEMP_NODES)) {
+ status = AE_CTRL_DEPTH;
+ }
+
+ /* Type must match requested type */
+
+ else if (child_type == type) {
+ /*
+ * Found a matching node, invoke the user callback function.
+ * Unlock the namespace if flag is set.
+ */
+ if (flags & ACPI_NS_WALK_UNLOCK) {
+ mutex_status =
+ acpi_ut_release_mutex
+ (ACPI_MTX_NAMESPACE);
+ if (ACPI_FAILURE(mutex_status)) {
+ return_ACPI_STATUS
+ (mutex_status);
+ }
+ }
+
+ status =
+ user_function(child_node, level, context,
+ return_value);
+
+ if (flags & ACPI_NS_WALK_UNLOCK) {
+ mutex_status =
+ acpi_ut_acquire_mutex
+ (ACPI_MTX_NAMESPACE);
+ if (ACPI_FAILURE(mutex_status)) {
+ return_ACPI_STATUS
+ (mutex_status);
+ }
+ }
+
+ switch (status) {
+ case AE_OK:
+ case AE_CTRL_DEPTH:
+
+ /* Just keep going */
+ break;
+
+ case AE_CTRL_TERMINATE:
+
+ /* Exit now, with OK status */
+
+ return_ACPI_STATUS(AE_OK);
+
+ default:
+
+ /* All others are valid exceptions */
+
+ return_ACPI_STATUS(status);
+ }
+ }
+
+ /*
+ * Depth first search: Attempt to go down another level in the
+ * namespace if we are allowed to. Don't go any further if we have
+ * reached the caller specified maximum depth or if the user
+ * function has specified that the maximum depth has been reached.
+ */
+ if ((level < max_depth) && (status != AE_CTRL_DEPTH)) {
+ if (acpi_ns_get_next_node
+ (ACPI_TYPE_ANY, child_node, NULL)) {
+
+ /* There is at least one child of this node, visit it */
+
+ level++;
+ parent_node = child_node;
+ child_node = NULL;
+ }
+ }
+ } else {
+ /*
+ * No more children of this node (acpi_ns_get_next_node failed), go
+ * back upwards in the namespace tree to the node's parent.
+ */
+ level--;
+ child_node = parent_node;
+ parent_node = acpi_ns_get_parent_node(parent_node);
+ }
+ }
+
+ /* Complete walk, not terminated by user function */
+
+ return_ACPI_STATUS(AE_OK);
+}
diff --git a/drivers/acpi/namespace/nsxfeval.c b/drivers/acpi/namespace/nsxfeval.c
new file mode 100644
index 0000000..a085cc3
--- /dev/null
+++ b/drivers/acpi/namespace/nsxfeval.c
@@ -0,0 +1,811 @@
+/*******************************************************************************
+ *
+ * Module Name: nsxfeval - Public interfaces to the ACPI subsystem
+ * ACPI Object evaluation interfaces
+ *
+ ******************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include <acpi/acnamesp.h>
+#include <acpi/acinterp.h>
+
+#define _COMPONENT ACPI_NAMESPACE
+ACPI_MODULE_NAME("nsxfeval")
+
+/* Local prototypes */
+static void acpi_ns_resolve_references(struct acpi_evaluate_info *info);
+
+#ifdef ACPI_FUTURE_USAGE
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_evaluate_object_typed
+ *
+ * PARAMETERS: Handle - Object handle (optional)
+ * Pathname - Object pathname (optional)
+ * external_params - List of parameters to pass to method,
+ * terminated by NULL. May be NULL
+ * if no parameters are being passed.
+ * return_buffer - Where to put method's return value (if
+ * any). If NULL, no value is returned.
+ * return_type - Expected type of return object
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Find and evaluate the given object, passing the given
+ * parameters if necessary. One of "Handle" or "Pathname" must
+ * be valid (non-null)
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_evaluate_object_typed(acpi_handle handle,
+ acpi_string pathname,
+ struct acpi_object_list *external_params,
+ struct acpi_buffer *return_buffer,
+ acpi_object_type return_type)
+{
+ acpi_status status;
+ u8 must_free = FALSE;
+
+ ACPI_FUNCTION_TRACE(acpi_evaluate_object_typed);
+
+ /* Return buffer must be valid */
+
+ if (!return_buffer) {
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
+
+ if (return_buffer->length == ACPI_ALLOCATE_BUFFER) {
+ must_free = TRUE;
+ }
+
+ /* Evaluate the object */
+
+ status =
+ acpi_evaluate_object(handle, pathname, external_params,
+ return_buffer);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /* Type ANY means "don't care" */
+
+ if (return_type == ACPI_TYPE_ANY) {
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ if (return_buffer->length == 0) {
+
+ /* Error because caller specifically asked for a return value */
+
+ ACPI_ERROR((AE_INFO, "No return value"));
+ return_ACPI_STATUS(AE_NULL_OBJECT);
+ }
+
+ /* Examine the object type returned from evaluate_object */
+
+ if (((union acpi_object *)return_buffer->pointer)->type == return_type) {
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ /* Return object type does not match requested type */
+
+ ACPI_ERROR((AE_INFO,
+ "Incorrect return type [%s] requested [%s]",
+ acpi_ut_get_type_name(((union acpi_object *)return_buffer->
+ pointer)->type),
+ acpi_ut_get_type_name(return_type)));
+
+ if (must_free) {
+
+ /* Caller used ACPI_ALLOCATE_BUFFER, free the return buffer */
+
+ ACPI_FREE(return_buffer->pointer);
+ return_buffer->pointer = NULL;
+ }
+
+ return_buffer->length = 0;
+ return_ACPI_STATUS(AE_TYPE);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_evaluate_object_typed)
+#endif /* ACPI_FUTURE_USAGE */
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_evaluate_object
+ *
+ * PARAMETERS: Handle - Object handle (optional)
+ * Pathname - Object pathname (optional)
+ * external_params - List of parameters to pass to method,
+ * terminated by NULL. May be NULL
+ * if no parameters are being passed.
+ * return_buffer - Where to put method's return value (if
+ * any). If NULL, no value is returned.
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Find and evaluate the given object, passing the given
+ * parameters if necessary. One of "Handle" or "Pathname" must
+ * be valid (non-null)
+ *
+ ******************************************************************************/
+acpi_status
+acpi_evaluate_object(acpi_handle handle,
+ acpi_string pathname,
+ struct acpi_object_list *external_params,
+ struct acpi_buffer *return_buffer)
+{
+ acpi_status status;
+ struct acpi_evaluate_info *info;
+ acpi_size buffer_space_needed;
+ u32 i;
+
+ ACPI_FUNCTION_TRACE(acpi_evaluate_object);
+
+ /* Allocate and initialize the evaluation information block */
+
+ info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info));
+ if (!info) {
+ return_ACPI_STATUS(AE_NO_MEMORY);
+ }
+
+ info->pathname = pathname;
+
+ /* Convert and validate the device handle */
+
+ info->prefix_node = acpi_ns_map_handle_to_node(handle);
+ if (!info->prefix_node) {
+ status = AE_BAD_PARAMETER;
+ goto cleanup;
+ }
+
+ /*
+ * If there are parameters to be passed to a control method, the external
+ * objects must all be converted to internal objects
+ */
+ if (external_params && external_params->count) {
+ /*
+ * Allocate a new parameter block for the internal objects
+ * Add 1 to count to allow for null terminated internal list
+ */
+ info->parameters = ACPI_ALLOCATE_ZEROED(((acpi_size)
+ external_params->
+ count +
+ 1) * sizeof(void *));
+ if (!info->parameters) {
+ status = AE_NO_MEMORY;
+ goto cleanup;
+ }
+
+ /* Convert each external object in the list to an internal object */
+
+ for (i = 0; i < external_params->count; i++) {
+ status =
+ acpi_ut_copy_eobject_to_iobject(&external_params->
+ pointer[i],
+ &info->
+ parameters[i]);
+ if (ACPI_FAILURE(status)) {
+ goto cleanup;
+ }
+ }
+ info->parameters[external_params->count] = NULL;
+ }
+
+ /*
+ * Three major cases:
+ * 1) Fully qualified pathname
+ * 2) No handle, not fully qualified pathname (error)
+ * 3) Valid handle
+ */
+ if ((pathname) && (acpi_ns_valid_root_prefix(pathname[0]))) {
+
+ /* The path is fully qualified, just evaluate by name */
+
+ info->prefix_node = NULL;
+ status = acpi_ns_evaluate(info);
+ } else if (!handle) {
+ /*
+ * A handle is optional iff a fully qualified pathname is specified.
+ * Since we've already handled fully qualified names above, this is
+ * an error
+ */
+ if (!pathname) {
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Both Handle and Pathname are NULL"));
+ } else {
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Null Handle with relative pathname [%s]",
+ pathname));
+ }
+
+ status = AE_BAD_PARAMETER;
+ } else {
+ /* We have a namespace a node and a possible relative path */
+
+ status = acpi_ns_evaluate(info);
+ }
+
+ /*
+ * If we are expecting a return value, and all went well above,
+ * copy the return value to an external object.
+ */
+ if (return_buffer) {
+ if (!info->return_object) {
+ return_buffer->length = 0;
+ } else {
+ if (ACPI_GET_DESCRIPTOR_TYPE(info->return_object) ==
+ ACPI_DESC_TYPE_NAMED) {
+ /*
+ * If we received a NS Node as a return object, this means that
+ * the object we are evaluating has nothing interesting to
+ * return (such as a mutex, etc.) We return an error because
+ * these types are essentially unsupported by this interface.
+ * We don't check up front because this makes it easier to add
+ * support for various types at a later date if necessary.
+ */
+ status = AE_TYPE;
+ info->return_object = NULL; /* No need to delete a NS Node */
+ return_buffer->length = 0;
+ }
+
+ if (ACPI_SUCCESS(status)) {
+
+ /* Dereference Index and ref_of references */
+
+ acpi_ns_resolve_references(info);
+
+ /* Get the size of the returned object */
+
+ status =
+ acpi_ut_get_object_size(info->return_object,
+ &buffer_space_needed);
+ if (ACPI_SUCCESS(status)) {
+
+ /* Validate/Allocate/Clear caller buffer */
+
+ status =
+ acpi_ut_initialize_buffer
+ (return_buffer,
+ buffer_space_needed);
+ if (ACPI_FAILURE(status)) {
+ /*
+ * Caller's buffer is too small or a new one can't
+ * be allocated
+ */
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Needed buffer size %X, %s\n",
+ (u32)
+ buffer_space_needed,
+ acpi_format_exception
+ (status)));
+ } else {
+ /* We have enough space for the object, build it */
+
+ status =
+ acpi_ut_copy_iobject_to_eobject
+ (info->return_object,
+ return_buffer);
+ }
+ }
+ }
+ }
+ }
+
+ if (info->return_object) {
+ /*
+ * Delete the internal return object. NOTE: Interpreter must be
+ * locked to avoid race condition.
+ */
+ acpi_ex_enter_interpreter();
+
+ /* Remove one reference on the return object (should delete it) */
+
+ acpi_ut_remove_reference(info->return_object);
+ acpi_ex_exit_interpreter();
+ }
+
+ cleanup:
+
+ /* Free the input parameter list (if we created one) */
+
+ if (info->parameters) {
+
+ /* Free the allocated parameter block */
+
+ acpi_ut_delete_internal_object_list(info->parameters);
+ }
+
+ ACPI_FREE(info);
+ return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_evaluate_object)
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ns_resolve_references
+ *
+ * PARAMETERS: Info - Evaluation info block
+ *
+ * RETURN: Info->return_object is replaced with the dereferenced object
+ *
+ * DESCRIPTION: Dereference certain reference objects. Called before an
+ * internal return object is converted to an external union acpi_object.
+ *
+ * Performs an automatic dereference of Index and ref_of reference objects.
+ * These reference objects are not supported by the union acpi_object, so this is a
+ * last resort effort to return something useful. Also, provides compatibility
+ * with other ACPI implementations.
+ *
+ * NOTE: does not handle references within returned package objects or nested
+ * references, but this support could be added later if found to be necessary.
+ *
+ ******************************************************************************/
+static void acpi_ns_resolve_references(struct acpi_evaluate_info *info)
+{
+ union acpi_operand_object *obj_desc = NULL;
+ struct acpi_namespace_node *node;
+
+ /* We are interested in reference objects only */
+
+ if (ACPI_GET_OBJECT_TYPE(info->return_object) !=
+ ACPI_TYPE_LOCAL_REFERENCE) {
+ return;
+ }
+
+ /*
+ * Two types of references are supported - those created by Index and
+ * ref_of operators. A name reference (AML_NAMEPATH_OP) can be converted
+ * to an union acpi_object, so it is not dereferenced here. A ddb_handle
+ * (AML_LOAD_OP) cannot be dereferenced, nor can it be converted to
+ * an union acpi_object.
+ */
+ switch (info->return_object->reference.class) {
+ case ACPI_REFCLASS_INDEX:
+
+ obj_desc = *(info->return_object->reference.where);
+ break;
+
+ case ACPI_REFCLASS_REFOF:
+
+ node = info->return_object->reference.object;
+ if (node) {
+ obj_desc = node->object;
+ }
+ break;
+
+ default:
+ return;
+ }
+
+ /* Replace the existing reference object */
+
+ if (obj_desc) {
+ acpi_ut_add_reference(obj_desc);
+ acpi_ut_remove_reference(info->return_object);
+ info->return_object = obj_desc;
+ }
+
+ return;
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_walk_namespace
+ *
+ * PARAMETERS: Type - acpi_object_type to search for
+ * start_object - Handle in namespace where search begins
+ * max_depth - Depth to which search is to reach
+ * user_function - Called when an object of "Type" is found
+ * Context - Passed to user function
+ * return_value - Location where return value of
+ * user_function is put if terminated early
+ *
+ * RETURNS Return value from the user_function if terminated early.
+ * Otherwise, returns NULL.
+ *
+ * DESCRIPTION: Performs a modified depth-first walk of the namespace tree,
+ * starting (and ending) at the object specified by start_handle.
+ * The user_function is called whenever an object that matches
+ * the type parameter is found. If the user function returns
+ * a non-zero value, the search is terminated immediately and this
+ * value is returned to the caller.
+ *
+ * The point of this procedure is to provide a generic namespace
+ * walk routine that can be called from multiple places to
+ * provide multiple services; the User Function can be tailored
+ * to each task, whether it is a print function, a compare
+ * function, etc.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_walk_namespace(acpi_object_type type,
+ acpi_handle start_object,
+ u32 max_depth,
+ acpi_walk_callback user_function,
+ void *context, void **return_value)
+{
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(acpi_walk_namespace);
+
+ /* Parameter validation */
+
+ if ((type > ACPI_TYPE_LOCAL_MAX) || (!max_depth) || (!user_function)) {
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
+
+ /*
+ * Lock the namespace around the walk.
+ * The namespace will be unlocked/locked around each call
+ * to the user function - since this function
+ * must be allowed to make Acpi calls itself.
+ */
+ status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ status = acpi_ns_walk_namespace(type, start_object, max_depth,
+ ACPI_NS_WALK_UNLOCK,
+ user_function, context, return_value);
+
+ (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+ return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_walk_namespace)
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ns_get_device_callback
+ *
+ * PARAMETERS: Callback from acpi_get_device
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Takes callbacks from walk_namespace and filters out all non-
+ * present devices, or if they specified a HID, it filters based
+ * on that.
+ *
+ ******************************************************************************/
+static acpi_status
+acpi_ns_get_device_callback(acpi_handle obj_handle,
+ u32 nesting_level,
+ void *context, void **return_value)
+{
+ struct acpi_get_devices_info *info = context;
+ acpi_status status;
+ struct acpi_namespace_node *node;
+ u32 flags;
+ struct acpica_device_id hid;
+ struct acpi_compatible_id_list *cid;
+ u32 i;
+ int found;
+
+ status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+
+ node = acpi_ns_map_handle_to_node(obj_handle);
+ status = acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+
+ if (!node) {
+ return (AE_BAD_PARAMETER);
+ }
+
+ /* Run _STA to determine if device is present */
+
+ status = acpi_ut_execute_STA(node, &flags);
+ if (ACPI_FAILURE(status)) {
+ return (AE_CTRL_DEPTH);
+ }
+
+ if (!(flags & ACPI_STA_DEVICE_PRESENT) &&
+ !(flags & ACPI_STA_DEVICE_FUNCTIONING)) {
+ /*
+ * Don't examine the children of the device only when the
+ * device is neither present nor functional. See ACPI spec,
+ * description of _STA for more information.
+ */
+ return (AE_CTRL_DEPTH);
+ }
+
+ /* Filter based on device HID & CID */
+
+ if (info->hid != NULL) {
+ status = acpi_ut_execute_HID(node, &hid);
+ if (status == AE_NOT_FOUND) {
+ return (AE_OK);
+ } else if (ACPI_FAILURE(status)) {
+ return (AE_CTRL_DEPTH);
+ }
+
+ if (ACPI_STRNCMP(hid.value, info->hid, sizeof(hid.value)) != 0) {
+
+ /* Get the list of Compatible IDs */
+
+ status = acpi_ut_execute_CID(node, &cid);
+ if (status == AE_NOT_FOUND) {
+ return (AE_OK);
+ } else if (ACPI_FAILURE(status)) {
+ return (AE_CTRL_DEPTH);
+ }
+
+ /* Walk the CID list */
+
+ found = 0;
+ for (i = 0; i < cid->count; i++) {
+ if (ACPI_STRNCMP(cid->id[i].value, info->hid,
+ sizeof(struct
+ acpi_compatible_id)) ==
+ 0) {
+ found = 1;
+ break;
+ }
+ }
+ ACPI_FREE(cid);
+ if (!found)
+ return (AE_OK);
+ }
+ }
+
+ status = info->user_function(obj_handle, nesting_level, info->context,
+ return_value);
+ return (status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_get_devices
+ *
+ * PARAMETERS: HID - HID to search for. Can be NULL.
+ * user_function - Called when a matching object is found
+ * Context - Passed to user function
+ * return_value - Location where return value of
+ * user_function is put if terminated early
+ *
+ * RETURNS Return value from the user_function if terminated early.
+ * Otherwise, returns NULL.
+ *
+ * DESCRIPTION: Performs a modified depth-first walk of the namespace tree,
+ * starting (and ending) at the object specified by start_handle.
+ * The user_function is called whenever an object of type
+ * Device is found. If the user function returns
+ * a non-zero value, the search is terminated immediately and this
+ * value is returned to the caller.
+ *
+ * This is a wrapper for walk_namespace, but the callback performs
+ * additional filtering. Please see acpi_ns_get_device_callback.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_get_devices(const char *HID,
+ acpi_walk_callback user_function,
+ void *context, void **return_value)
+{
+ acpi_status status;
+ struct acpi_get_devices_info info;
+
+ ACPI_FUNCTION_TRACE(acpi_get_devices);
+
+ /* Parameter validation */
+
+ if (!user_function) {
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
+
+ /*
+ * We're going to call their callback from OUR callback, so we need
+ * to know what it is, and their context parameter.
+ */
+ info.hid = HID;
+ info.context = context;
+ info.user_function = user_function;
+
+ /*
+ * Lock the namespace around the walk.
+ * The namespace will be unlocked/locked around each call
+ * to the user function - since this function
+ * must be allowed to make Acpi calls itself.
+ */
+ status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ status = acpi_ns_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
+ ACPI_UINT32_MAX, ACPI_NS_WALK_UNLOCK,
+ acpi_ns_get_device_callback, &info,
+ return_value);
+
+ (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+ return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_get_devices)
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_attach_data
+ *
+ * PARAMETERS: obj_handle - Namespace node
+ * Handler - Handler for this attachment
+ * Data - Pointer to data to be attached
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Attach arbitrary data and handler to a namespace node.
+ *
+ ******************************************************************************/
+acpi_status
+acpi_attach_data(acpi_handle obj_handle,
+ acpi_object_handler handler, void *data)
+{
+ struct acpi_namespace_node *node;
+ acpi_status status;
+
+ /* Parameter validation */
+
+ if (!obj_handle || !handler || !data) {
+ return (AE_BAD_PARAMETER);
+ }
+
+ status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+
+ /* Convert and validate the handle */
+
+ node = acpi_ns_map_handle_to_node(obj_handle);
+ if (!node) {
+ status = AE_BAD_PARAMETER;
+ goto unlock_and_exit;
+ }
+
+ status = acpi_ns_attach_data(node, handler, data);
+
+ unlock_and_exit:
+ (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+ return (status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_attach_data)
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_detach_data
+ *
+ * PARAMETERS: obj_handle - Namespace node handle
+ * Handler - Handler used in call to acpi_attach_data
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Remove data that was previously attached to a node.
+ *
+ ******************************************************************************/
+acpi_status
+acpi_detach_data(acpi_handle obj_handle, acpi_object_handler handler)
+{
+ struct acpi_namespace_node *node;
+ acpi_status status;
+
+ /* Parameter validation */
+
+ if (!obj_handle || !handler) {
+ return (AE_BAD_PARAMETER);
+ }
+
+ status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+
+ /* Convert and validate the handle */
+
+ node = acpi_ns_map_handle_to_node(obj_handle);
+ if (!node) {
+ status = AE_BAD_PARAMETER;
+ goto unlock_and_exit;
+ }
+
+ status = acpi_ns_detach_data(node, handler);
+
+ unlock_and_exit:
+ (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+ return (status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_detach_data)
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_get_data
+ *
+ * PARAMETERS: obj_handle - Namespace node
+ * Handler - Handler used in call to attach_data
+ * Data - Where the data is returned
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Retrieve data that was previously attached to a namespace node.
+ *
+ ******************************************************************************/
+acpi_status
+acpi_get_data(acpi_handle obj_handle, acpi_object_handler handler, void **data)
+{
+ struct acpi_namespace_node *node;
+ acpi_status status;
+
+ /* Parameter validation */
+
+ if (!obj_handle || !handler || !data) {
+ return (AE_BAD_PARAMETER);
+ }
+
+ status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+
+ /* Convert and validate the handle */
+
+ node = acpi_ns_map_handle_to_node(obj_handle);
+ if (!node) {
+ status = AE_BAD_PARAMETER;
+ goto unlock_and_exit;
+ }
+
+ status = acpi_ns_get_attached_data(node, handler, data);
+
+ unlock_and_exit:
+ (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+ return (status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_get_data)
diff --git a/drivers/acpi/namespace/nsxfname.c b/drivers/acpi/namespace/nsxfname.c
new file mode 100644
index 0000000..5efa4e7
--- /dev/null
+++ b/drivers/acpi/namespace/nsxfname.c
@@ -0,0 +1,359 @@
+/******************************************************************************
+ *
+ * Module Name: nsxfname - Public interfaces to the ACPI subsystem
+ * ACPI Namespace oriented interfaces
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include <acpi/acnamesp.h>
+
+#define _COMPONENT ACPI_NAMESPACE
+ACPI_MODULE_NAME("nsxfname")
+
+/******************************************************************************
+ *
+ * FUNCTION: acpi_get_handle
+ *
+ * PARAMETERS: Parent - Object to search under (search scope).
+ * Pathname - Pointer to an asciiz string containing the
+ * name
+ * ret_handle - Where the return handle is returned
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: This routine will search for a caller specified name in the
+ * name space. The caller can restrict the search region by
+ * specifying a non NULL parent. The parent value is itself a
+ * namespace handle.
+ *
+ ******************************************************************************/
+acpi_status
+acpi_get_handle(acpi_handle parent,
+ acpi_string pathname, acpi_handle * ret_handle)
+{
+ acpi_status status;
+ struct acpi_namespace_node *node = NULL;
+ struct acpi_namespace_node *prefix_node = NULL;
+
+ ACPI_FUNCTION_ENTRY();
+
+ /* Parameter Validation */
+
+ if (!ret_handle || !pathname) {
+ return (AE_BAD_PARAMETER);
+ }
+
+ /* Convert a parent handle to a prefix node */
+
+ if (parent) {
+ prefix_node = acpi_ns_map_handle_to_node(parent);
+ if (!prefix_node) {
+ return (AE_BAD_PARAMETER);
+ }
+ }
+
+ /*
+ * Valid cases are:
+ * 1) Fully qualified pathname
+ * 2) Parent + Relative pathname
+ *
+ * Error for <null Parent + relative path>
+ */
+ if (acpi_ns_valid_root_prefix(pathname[0])) {
+
+ /* Pathname is fully qualified (starts with '\') */
+
+ /* Special case for root-only, since we can't search for it */
+
+ if (!ACPI_STRCMP(pathname, ACPI_NS_ROOT_PATH)) {
+ *ret_handle =
+ acpi_ns_convert_entry_to_handle(acpi_gbl_root_node);
+ return (AE_OK);
+ }
+ } else if (!prefix_node) {
+
+ /* Relative path with null prefix is disallowed */
+
+ return (AE_BAD_PARAMETER);
+ }
+
+ /* Find the Node and convert to a handle */
+
+ status =
+ acpi_ns_get_node(prefix_node, pathname, ACPI_NS_NO_UPSEARCH, &node);
+ if (ACPI_SUCCESS(status)) {
+ *ret_handle = acpi_ns_convert_entry_to_handle(node);
+ }
+
+ return (status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_get_handle)
+
+/******************************************************************************
+ *
+ * FUNCTION: acpi_get_name
+ *
+ * PARAMETERS: Handle - Handle to be converted to a pathname
+ * name_type - Full pathname or single segment
+ * Buffer - Buffer for returned path
+ *
+ * RETURN: Pointer to a string containing the fully qualified Name.
+ *
+ * DESCRIPTION: This routine returns the fully qualified name associated with
+ * the Handle parameter. This and the acpi_pathname_to_handle are
+ * complementary functions.
+ *
+ ******************************************************************************/
+acpi_status
+acpi_get_name(acpi_handle handle, u32 name_type, struct acpi_buffer * buffer)
+{
+ acpi_status status;
+ struct acpi_namespace_node *node;
+
+ /* Parameter validation */
+
+ if (name_type > ACPI_NAME_TYPE_MAX) {
+ return (AE_BAD_PARAMETER);
+ }
+
+ status = acpi_ut_validate_buffer(buffer);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+
+ if (name_type == ACPI_FULL_PATHNAME) {
+
+ /* Get the full pathname (From the namespace root) */
+
+ status = acpi_ns_handle_to_pathname(handle, buffer);
+ return (status);
+ }
+
+ /*
+ * Wants the single segment ACPI name.
+ * Validate handle and convert to a namespace Node
+ */
+ status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+
+ node = acpi_ns_map_handle_to_node(handle);
+ if (!node) {
+ status = AE_BAD_PARAMETER;
+ goto unlock_and_exit;
+ }
+
+ /* Validate/Allocate/Clear caller buffer */
+
+ status = acpi_ut_initialize_buffer(buffer, ACPI_PATH_SEGMENT_LENGTH);
+ if (ACPI_FAILURE(status)) {
+ goto unlock_and_exit;
+ }
+
+ /* Just copy the ACPI name from the Node and zero terminate it */
+
+ ACPI_STRNCPY(buffer->pointer, acpi_ut_get_node_name(node),
+ ACPI_NAME_SIZE);
+ ((char *)buffer->pointer)[ACPI_NAME_SIZE] = 0;
+ status = AE_OK;
+
+ unlock_and_exit:
+
+ (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+ return (status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_get_name)
+
+/******************************************************************************
+ *
+ * FUNCTION: acpi_get_object_info
+ *
+ * PARAMETERS: Handle - Object Handle
+ * Buffer - Where the info is returned
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Returns information about an object as gleaned from the
+ * namespace node and possibly by running several standard
+ * control methods (Such as in the case of a device.)
+ *
+ ******************************************************************************/
+acpi_status
+acpi_get_object_info(acpi_handle handle, struct acpi_buffer * buffer)
+{
+ acpi_status status;
+ struct acpi_namespace_node *node;
+ struct acpi_device_info *info;
+ struct acpi_device_info *return_info;
+ struct acpi_compatible_id_list *cid_list = NULL;
+ acpi_size size;
+
+ /* Parameter validation */
+
+ if (!handle || !buffer) {
+ return (AE_BAD_PARAMETER);
+ }
+
+ status = acpi_ut_validate_buffer(buffer);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+
+ info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_device_info));
+ if (!info) {
+ return (AE_NO_MEMORY);
+ }
+
+ status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+ if (ACPI_FAILURE(status)) {
+ goto cleanup;
+ }
+
+ node = acpi_ns_map_handle_to_node(handle);
+ if (!node) {
+ (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+ status = AE_BAD_PARAMETER;
+ goto cleanup;
+ }
+
+ /* Init return structure */
+
+ size = sizeof(struct acpi_device_info);
+
+ info->type = node->type;
+ info->name = node->name.integer;
+ info->valid = 0;
+
+ if (node->type == ACPI_TYPE_METHOD) {
+ info->param_count = node->object->method.param_count;
+ }
+
+ status = acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+ if (ACPI_FAILURE(status)) {
+ goto cleanup;
+ }
+
+ /* If not a device, we are all done */
+
+ if (info->type == ACPI_TYPE_DEVICE) {
+ /*
+ * Get extra info for ACPI Devices objects only:
+ * Run the Device _HID, _UID, _CID, _STA, _ADR and _sx_d methods.
+ *
+ * Note: none of these methods are required, so they may or may
+ * not be present for this device. The Info->Valid bitfield is used
+ * to indicate which methods were found and ran successfully.
+ */
+
+ /* Execute the Device._HID method */
+
+ status = acpi_ut_execute_HID(node, &info->hardware_id);
+ if (ACPI_SUCCESS(status)) {
+ info->valid |= ACPI_VALID_HID;
+ }
+
+ /* Execute the Device._UID method */
+
+ status = acpi_ut_execute_UID(node, &info->unique_id);
+ if (ACPI_SUCCESS(status)) {
+ info->valid |= ACPI_VALID_UID;
+ }
+
+ /* Execute the Device._CID method */
+
+ status = acpi_ut_execute_CID(node, &cid_list);
+ if (ACPI_SUCCESS(status)) {
+ size += cid_list->size;
+ info->valid |= ACPI_VALID_CID;
+ }
+
+ /* Execute the Device._STA method */
+
+ status = acpi_ut_execute_STA(node, &info->current_status);
+ if (ACPI_SUCCESS(status)) {
+ info->valid |= ACPI_VALID_STA;
+ }
+
+ /* Execute the Device._ADR method */
+
+ status = acpi_ut_evaluate_numeric_object(METHOD_NAME__ADR, node,
+ &info->address);
+ if (ACPI_SUCCESS(status)) {
+ info->valid |= ACPI_VALID_ADR;
+ }
+
+ /* Execute the Device._sx_d methods */
+
+ status = acpi_ut_execute_sxds(node, info->highest_dstates);
+ if (ACPI_SUCCESS(status)) {
+ info->valid |= ACPI_VALID_SXDS;
+ }
+ }
+
+ /* Validate/Allocate/Clear caller buffer */
+
+ status = acpi_ut_initialize_buffer(buffer, size);
+ if (ACPI_FAILURE(status)) {
+ goto cleanup;
+ }
+
+ /* Populate the return buffer */
+
+ return_info = buffer->pointer;
+ ACPI_MEMCPY(return_info, info, sizeof(struct acpi_device_info));
+
+ if (cid_list) {
+ ACPI_MEMCPY(&return_info->compatibility_id, cid_list,
+ cid_list->size);
+ }
+
+ cleanup:
+ ACPI_FREE(info);
+ if (cid_list) {
+ ACPI_FREE(cid_list);
+ }
+ return (status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_get_object_info)
diff --git a/drivers/acpi/namespace/nsxfobj.c b/drivers/acpi/namespace/nsxfobj.c
new file mode 100644
index 0000000..2b375ee
--- /dev/null
+++ b/drivers/acpi/namespace/nsxfobj.c
@@ -0,0 +1,286 @@
+/*******************************************************************************
+ *
+ * Module Name: nsxfobj - Public interfaces to the ACPI subsystem
+ * ACPI Object oriented interfaces
+ *
+ ******************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include <acpi/acnamesp.h>
+
+#define _COMPONENT ACPI_NAMESPACE
+ACPI_MODULE_NAME("nsxfobj")
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_get_id
+ *
+ * PARAMETERS: Handle - Handle of object whose id is desired
+ * ret_id - Where the id will be placed
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: This routine returns the owner id associated with a handle
+ *
+ ******************************************************************************/
+acpi_status acpi_get_id(acpi_handle handle, acpi_owner_id * ret_id)
+{
+ struct acpi_namespace_node *node;
+ acpi_status status;
+
+ /* Parameter Validation */
+
+ if (!ret_id) {
+ return (AE_BAD_PARAMETER);
+ }
+
+ status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+
+ /* Convert and validate the handle */
+
+ node = acpi_ns_map_handle_to_node(handle);
+ if (!node) {
+ (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+ return (AE_BAD_PARAMETER);
+ }
+
+ *ret_id = node->owner_id;
+
+ status = acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+ return (status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_get_id)
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_get_type
+ *
+ * PARAMETERS: Handle - Handle of object whose type is desired
+ * ret_type - Where the type will be placed
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: This routine returns the type associatd with a particular handle
+ *
+ ******************************************************************************/
+acpi_status acpi_get_type(acpi_handle handle, acpi_object_type * ret_type)
+{
+ struct acpi_namespace_node *node;
+ acpi_status status;
+
+ /* Parameter Validation */
+
+ if (!ret_type) {
+ return (AE_BAD_PARAMETER);
+ }
+
+ /*
+ * Special case for the predefined Root Node
+ * (return type ANY)
+ */
+ if (handle == ACPI_ROOT_OBJECT) {
+ *ret_type = ACPI_TYPE_ANY;
+ return (AE_OK);
+ }
+
+ status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+
+ /* Convert and validate the handle */
+
+ node = acpi_ns_map_handle_to_node(handle);
+ if (!node) {
+ (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+ return (AE_BAD_PARAMETER);
+ }
+
+ *ret_type = node->type;
+
+ status = acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+ return (status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_get_type)
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_get_parent
+ *
+ * PARAMETERS: Handle - Handle of object whose parent is desired
+ * ret_handle - Where the parent handle will be placed
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Returns a handle to the parent of the object represented by
+ * Handle.
+ *
+ ******************************************************************************/
+acpi_status acpi_get_parent(acpi_handle handle, acpi_handle * ret_handle)
+{
+ struct acpi_namespace_node *node;
+ acpi_status status;
+
+ if (!ret_handle) {
+ return (AE_BAD_PARAMETER);
+ }
+
+ /* Special case for the predefined Root Node (no parent) */
+
+ if (handle == ACPI_ROOT_OBJECT) {
+ return (AE_NULL_ENTRY);
+ }
+
+ status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+
+ /* Convert and validate the handle */
+
+ node = acpi_ns_map_handle_to_node(handle);
+ if (!node) {
+ status = AE_BAD_PARAMETER;
+ goto unlock_and_exit;
+ }
+
+ /* Get the parent entry */
+
+ *ret_handle =
+ acpi_ns_convert_entry_to_handle(acpi_ns_get_parent_node(node));
+
+ /* Return exception if parent is null */
+
+ if (!acpi_ns_get_parent_node(node)) {
+ status = AE_NULL_ENTRY;
+ }
+
+ unlock_and_exit:
+
+ (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+ return (status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_get_parent)
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_get_next_object
+ *
+ * PARAMETERS: Type - Type of object to be searched for
+ * Parent - Parent object whose children we are getting
+ * last_child - Previous child that was found.
+ * The NEXT child will be returned
+ * ret_handle - Where handle to the next object is placed
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Return the next peer object within the namespace. If Handle is
+ * valid, Scope is ignored. Otherwise, the first object within
+ * Scope is returned.
+ *
+ ******************************************************************************/
+acpi_status
+acpi_get_next_object(acpi_object_type type,
+ acpi_handle parent,
+ acpi_handle child, acpi_handle * ret_handle)
+{
+ acpi_status status;
+ struct acpi_namespace_node *node;
+ struct acpi_namespace_node *parent_node = NULL;
+ struct acpi_namespace_node *child_node = NULL;
+
+ /* Parameter validation */
+
+ if (type > ACPI_TYPE_EXTERNAL_MAX) {
+ return (AE_BAD_PARAMETER);
+ }
+
+ status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+
+ /* If null handle, use the parent */
+
+ if (!child) {
+
+ /* Start search at the beginning of the specified scope */
+
+ parent_node = acpi_ns_map_handle_to_node(parent);
+ if (!parent_node) {
+ status = AE_BAD_PARAMETER;
+ goto unlock_and_exit;
+ }
+ } else {
+ /* Non-null handle, ignore the parent */
+ /* Convert and validate the handle */
+
+ child_node = acpi_ns_map_handle_to_node(child);
+ if (!child_node) {
+ status = AE_BAD_PARAMETER;
+ goto unlock_and_exit;
+ }
+ }
+
+ /* Internal function does the real work */
+
+ node = acpi_ns_get_next_node(type, parent_node, child_node);
+ if (!node) {
+ status = AE_NOT_FOUND;
+ goto unlock_and_exit;
+ }
+
+ if (ret_handle) {
+ *ret_handle = acpi_ns_convert_entry_to_handle(node);
+ }
+
+ unlock_and_exit:
+
+ (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+ return (status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_get_next_object)
diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c
new file mode 100644
index 0000000..25ceae9
--- /dev/null
+++ b/drivers/acpi/numa.c
@@ -0,0 +1,286 @@
+/*
+ * acpi_numa.c - ACPI NUMA support
+ *
+ * Copyright (C) 2002 Takayoshi Kochi <t-kochi@bq.jp.nec.com>
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/acpi.h>
+#include <acpi/acpi_bus.h>
+#include <acpi/acmacros.h>
+
+#define ACPI_NUMA 0x80000000
+#define _COMPONENT ACPI_NUMA
+ACPI_MODULE_NAME("numa");
+
+static nodemask_t nodes_found_map = NODE_MASK_NONE;
+
+/* maps to convert between proximity domain and logical node ID */
+static int pxm_to_node_map[MAX_PXM_DOMAINS]
+ = { [0 ... MAX_PXM_DOMAINS - 1] = NID_INVAL };
+static int node_to_pxm_map[MAX_NUMNODES]
+ = { [0 ... MAX_NUMNODES - 1] = PXM_INVAL };
+
+int pxm_to_node(int pxm)
+{
+ if (pxm < 0)
+ return NID_INVAL;
+ return pxm_to_node_map[pxm];
+}
+
+int node_to_pxm(int node)
+{
+ if (node < 0)
+ return PXM_INVAL;
+ return node_to_pxm_map[node];
+}
+
+void __acpi_map_pxm_to_node(int pxm, int node)
+{
+ pxm_to_node_map[pxm] = node;
+ node_to_pxm_map[node] = pxm;
+}
+
+int acpi_map_pxm_to_node(int pxm)
+{
+ int node = pxm_to_node_map[pxm];
+
+ if (node < 0){
+ if (nodes_weight(nodes_found_map) >= MAX_NUMNODES)
+ return NID_INVAL;
+ node = first_unset_node(nodes_found_map);
+ __acpi_map_pxm_to_node(pxm, node);
+ node_set(node, nodes_found_map);
+ }
+
+ return node;
+}
+
+#if 0
+void __cpuinit acpi_unmap_pxm_to_node(int node)
+{
+ int pxm = node_to_pxm_map[node];
+ pxm_to_node_map[pxm] = NID_INVAL;
+ node_to_pxm_map[node] = PXM_INVAL;
+ node_clear(node, nodes_found_map);
+}
+#endif /* 0 */
+
+static void __init
+acpi_table_print_srat_entry(struct acpi_subtable_header *header)
+{
+
+ ACPI_FUNCTION_NAME("acpi_table_print_srat_entry");
+
+ if (!header)
+ return;
+
+ switch (header->type) {
+
+ case ACPI_SRAT_TYPE_CPU_AFFINITY:
+#ifdef ACPI_DEBUG_OUTPUT
+ {
+ struct acpi_srat_cpu_affinity *p =
+ (struct acpi_srat_cpu_affinity *)header;
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "SRAT Processor (id[0x%02x] eid[0x%02x]) in proximity domain %d %s\n",
+ p->apic_id, p->local_sapic_eid,
+ p->proximity_domain_lo,
+ (p->flags & ACPI_SRAT_CPU_ENABLED)?
+ "enabled" : "disabled"));
+ }
+#endif /* ACPI_DEBUG_OUTPUT */
+ break;
+
+ case ACPI_SRAT_TYPE_MEMORY_AFFINITY:
+#ifdef ACPI_DEBUG_OUTPUT
+ {
+ struct acpi_srat_mem_affinity *p =
+ (struct acpi_srat_mem_affinity *)header;
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "SRAT Memory (0x%lx length 0x%lx) in proximity domain %d %s%s\n",
+ (unsigned long)p->base_address,
+ (unsigned long)p->length,
+ p->proximity_domain,
+ (p->flags & ACPI_SRAT_MEM_ENABLED)?
+ "enabled" : "disabled",
+ (p->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE)?
+ " hot-pluggable" : ""));
+ }
+#endif /* ACPI_DEBUG_OUTPUT */
+ break;
+
+ default:
+ printk(KERN_WARNING PREFIX
+ "Found unsupported SRAT entry (type = 0x%x)\n",
+ header->type);
+ break;
+ }
+}
+
+/*
+ * A lot of BIOS fill in 10 (= no distance) everywhere. This messes
+ * up the NUMA heuristics which wants the local node to have a smaller
+ * distance than the others.
+ * Do some quick checks here and only use the SLIT if it passes.
+ */
+static __init int slit_valid(struct acpi_table_slit *slit)
+{
+ int i, j;
+ int d = slit->locality_count;
+ for (i = 0; i < d; i++) {
+ for (j = 0; j < d; j++) {
+ u8 val = slit->entry[d*i + j];
+ if (i == j) {
+ if (val != LOCAL_DISTANCE)
+ return 0;
+ } else if (val <= LOCAL_DISTANCE)
+ return 0;
+ }
+ }
+ return 1;
+}
+
+static int __init acpi_parse_slit(struct acpi_table_header *table)
+{
+ struct acpi_table_slit *slit;
+
+ if (!table)
+ return -EINVAL;
+
+ slit = (struct acpi_table_slit *)table;
+
+ if (!slit_valid(slit)) {
+ printk(KERN_INFO "ACPI: SLIT table looks invalid. Not used.\n");
+ return -EINVAL;
+ }
+ acpi_numa_slit_init(slit);
+
+ return 0;
+}
+
+static int __init
+acpi_parse_processor_affinity(struct acpi_subtable_header * header,
+ const unsigned long end)
+{
+ struct acpi_srat_cpu_affinity *processor_affinity;
+
+ processor_affinity = (struct acpi_srat_cpu_affinity *)header;
+ if (!processor_affinity)
+ return -EINVAL;
+
+ acpi_table_print_srat_entry(header);
+
+ /* let architecture-dependent part to do it */
+ acpi_numa_processor_affinity_init(processor_affinity);
+
+ return 0;
+}
+
+static int __init
+acpi_parse_memory_affinity(struct acpi_subtable_header * header,
+ const unsigned long end)
+{
+ struct acpi_srat_mem_affinity *memory_affinity;
+
+ memory_affinity = (struct acpi_srat_mem_affinity *)header;
+ if (!memory_affinity)
+ return -EINVAL;
+
+ acpi_table_print_srat_entry(header);
+
+ /* let architecture-dependent part to do it */
+ acpi_numa_memory_affinity_init(memory_affinity);
+
+ return 0;
+}
+
+static int __init acpi_parse_srat(struct acpi_table_header *table)
+{
+ struct acpi_table_srat *srat;
+
+ if (!table)
+ return -EINVAL;
+
+ srat = (struct acpi_table_srat *)table;
+
+ return 0;
+}
+
+static int __init
+acpi_table_parse_srat(enum acpi_srat_type id,
+ acpi_table_entry_handler handler, unsigned int max_entries)
+{
+ return acpi_table_parse_entries(ACPI_SIG_SRAT,
+ sizeof(struct acpi_table_srat), id,
+ handler, max_entries);
+}
+
+int __init acpi_numa_init(void)
+{
+ /* SRAT: Static Resource Affinity Table */
+ if (!acpi_table_parse(ACPI_SIG_SRAT, acpi_parse_srat)) {
+ acpi_table_parse_srat(ACPI_SRAT_TYPE_CPU_AFFINITY,
+ acpi_parse_processor_affinity, NR_CPUS);
+ acpi_table_parse_srat(ACPI_SRAT_TYPE_MEMORY_AFFINITY,
+ acpi_parse_memory_affinity,
+ NR_NODE_MEMBLKS);
+ }
+
+ /* SLIT: System Locality Information Table */
+ acpi_table_parse(ACPI_SIG_SLIT, acpi_parse_slit);
+
+ acpi_numa_arch_fixup();
+ return 0;
+}
+
+int acpi_get_pxm(acpi_handle h)
+{
+ unsigned long long pxm;
+ acpi_status status;
+ acpi_handle handle;
+ acpi_handle phandle = h;
+
+ do {
+ handle = phandle;
+ status = acpi_evaluate_integer(handle, "_PXM", NULL, &pxm);
+ if (ACPI_SUCCESS(status))
+ return pxm;
+ status = acpi_get_parent(handle, &phandle);
+ } while (ACPI_SUCCESS(status));
+ return -1;
+}
+
+int acpi_get_node(acpi_handle *handle)
+{
+ int pxm, node = -1;
+
+ pxm = acpi_get_pxm(handle);
+ if (pxm >= 0)
+ node = acpi_map_pxm_to_node(pxm);
+
+ return node;
+}
+EXPORT_SYMBOL(acpi_get_node);
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
new file mode 100644
index 0000000..c811142
--- /dev/null
+++ b/drivers/acpi/osl.c
@@ -0,0 +1,1382 @@
+/*
+ * acpi_osl.c - OS-dependent functions ($Revision: 83 $)
+ *
+ * Copyright (C) 2000 Andrew Henroid
+ * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
+ * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
+ * Copyright (c) 2008 Intel Corporation
+ * Author: Matthew Wilcox <willy@linux.intel.com>
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/kmod.h>
+#include <linux/delay.h>
+#include <linux/workqueue.h>
+#include <linux/nmi.h>
+#include <linux/acpi.h>
+#include <linux/efi.h>
+#include <linux/ioport.h>
+#include <linux/list.h>
+#include <linux/jiffies.h>
+#include <linux/semaphore.h>
+
+#include <asm/io.h>
+#include <asm/uaccess.h>
+
+#include <acpi/acpi.h>
+#include <acpi/acpi_bus.h>
+#include <acpi/processor.h>
+
+#define _COMPONENT ACPI_OS_SERVICES
+ACPI_MODULE_NAME("osl");
+#define PREFIX "ACPI: "
+struct acpi_os_dpc {
+ acpi_osd_exec_callback function;
+ void *context;
+ struct work_struct work;
+};
+
+#ifdef CONFIG_ACPI_CUSTOM_DSDT
+#include CONFIG_ACPI_CUSTOM_DSDT_FILE
+#endif
+
+#ifdef ENABLE_DEBUGGER
+#include <linux/kdb.h>
+
+/* stuff for debugger support */
+int acpi_in_debugger;
+EXPORT_SYMBOL(acpi_in_debugger);
+
+extern char line_buf[80];
+#endif /*ENABLE_DEBUGGER */
+
+static unsigned int acpi_irq_irq;
+static acpi_osd_handler acpi_irq_handler;
+static void *acpi_irq_context;
+static struct workqueue_struct *kacpid_wq;
+static struct workqueue_struct *kacpi_notify_wq;
+
+struct acpi_res_list {
+ resource_size_t start;
+ resource_size_t end;
+ acpi_adr_space_type resource_type; /* IO port, System memory, ...*/
+ char name[5]; /* only can have a length of 4 chars, make use of this
+ one instead of res->name, no need to kalloc then */
+ struct list_head resource_list;
+};
+
+static LIST_HEAD(resource_list_head);
+static DEFINE_SPINLOCK(acpi_res_lock);
+
+#define OSI_STRING_LENGTH_MAX 64 /* arbitrary */
+static char osi_additional_string[OSI_STRING_LENGTH_MAX];
+
+/*
+ * The story of _OSI(Linux)
+ *
+ * From pre-history through Linux-2.6.22,
+ * Linux responded TRUE upon a BIOS OSI(Linux) query.
+ *
+ * Unfortunately, reference BIOS writers got wind of this
+ * and put OSI(Linux) in their example code, quickly exposing
+ * this string as ill-conceived and opening the door to
+ * an un-bounded number of BIOS incompatibilities.
+ *
+ * For example, OSI(Linux) was used on resume to re-POST a
+ * video card on one system, because Linux at that time
+ * could not do a speedy restore in its native driver.
+ * But then upon gaining quick native restore capability,
+ * Linux has no way to tell the BIOS to skip the time-consuming
+ * POST -- putting Linux at a permanent performance disadvantage.
+ * On another system, the BIOS writer used OSI(Linux)
+ * to infer native OS support for IPMI! On other systems,
+ * OSI(Linux) simply got in the way of Linux claiming to
+ * be compatible with other operating systems, exposing
+ * BIOS issues such as skipped device initialization.
+ *
+ * So "Linux" turned out to be a really poor chose of
+ * OSI string, and from Linux-2.6.23 onward we respond FALSE.
+ *
+ * BIOS writers should NOT query _OSI(Linux) on future systems.
+ * Linux will complain on the console when it sees it, and return FALSE.
+ * To get Linux to return TRUE for your system will require
+ * a kernel source update to add a DMI entry,
+ * or boot with "acpi_osi=Linux"
+ */
+
+static struct osi_linux {
+ unsigned int enable:1;
+ unsigned int dmi:1;
+ unsigned int cmdline:1;
+ unsigned int known:1;
+} osi_linux = { 0, 0, 0, 0};
+
+static void __init acpi_request_region (struct acpi_generic_address *addr,
+ unsigned int length, char *desc)
+{
+ struct resource *res;
+
+ if (!addr->address || !length)
+ return;
+
+ if (addr->space_id == ACPI_ADR_SPACE_SYSTEM_IO)
+ res = request_region(addr->address, length, desc);
+ else if (addr->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
+ res = request_mem_region(addr->address, length, desc);
+}
+
+static int __init acpi_reserve_resources(void)
+{
+ acpi_request_region(&acpi_gbl_FADT.xpm1a_event_block, acpi_gbl_FADT.pm1_event_length,
+ "ACPI PM1a_EVT_BLK");
+
+ acpi_request_region(&acpi_gbl_FADT.xpm1b_event_block, acpi_gbl_FADT.pm1_event_length,
+ "ACPI PM1b_EVT_BLK");
+
+ acpi_request_region(&acpi_gbl_FADT.xpm1a_control_block, acpi_gbl_FADT.pm1_control_length,
+ "ACPI PM1a_CNT_BLK");
+
+ acpi_request_region(&acpi_gbl_FADT.xpm1b_control_block, acpi_gbl_FADT.pm1_control_length,
+ "ACPI PM1b_CNT_BLK");
+
+ if (acpi_gbl_FADT.pm_timer_length == 4)
+ acpi_request_region(&acpi_gbl_FADT.xpm_timer_block, 4, "ACPI PM_TMR");
+
+ acpi_request_region(&acpi_gbl_FADT.xpm2_control_block, acpi_gbl_FADT.pm2_control_length,
+ "ACPI PM2_CNT_BLK");
+
+ /* Length of GPE blocks must be a non-negative multiple of 2 */
+
+ if (!(acpi_gbl_FADT.gpe0_block_length & 0x1))
+ acpi_request_region(&acpi_gbl_FADT.xgpe0_block,
+ acpi_gbl_FADT.gpe0_block_length, "ACPI GPE0_BLK");
+
+ if (!(acpi_gbl_FADT.gpe1_block_length & 0x1))
+ acpi_request_region(&acpi_gbl_FADT.xgpe1_block,
+ acpi_gbl_FADT.gpe1_block_length, "ACPI GPE1_BLK");
+
+ return 0;
+}
+device_initcall(acpi_reserve_resources);
+
+acpi_status __init acpi_os_initialize(void)
+{
+ return AE_OK;
+}
+
+acpi_status acpi_os_initialize1(void)
+{
+ kacpid_wq = create_singlethread_workqueue("kacpid");
+ kacpi_notify_wq = create_singlethread_workqueue("kacpi_notify");
+ BUG_ON(!kacpid_wq);
+ BUG_ON(!kacpi_notify_wq);
+ return AE_OK;
+}
+
+acpi_status acpi_os_terminate(void)
+{
+ if (acpi_irq_handler) {
+ acpi_os_remove_interrupt_handler(acpi_irq_irq,
+ acpi_irq_handler);
+ }
+
+ destroy_workqueue(kacpid_wq);
+ destroy_workqueue(kacpi_notify_wq);
+
+ return AE_OK;
+}
+
+void acpi_os_printf(const char *fmt, ...)
+{
+ va_list args;
+ va_start(args, fmt);
+ acpi_os_vprintf(fmt, args);
+ va_end(args);
+}
+
+void acpi_os_vprintf(const char *fmt, va_list args)
+{
+ static char buffer[512];
+
+ vsprintf(buffer, fmt, args);
+
+#ifdef ENABLE_DEBUGGER
+ if (acpi_in_debugger) {
+ kdb_printf("%s", buffer);
+ } else {
+ printk("%s", buffer);
+ }
+#else
+ printk("%s", buffer);
+#endif
+}
+
+acpi_physical_address __init acpi_os_get_root_pointer(void)
+{
+ if (efi_enabled) {
+ if (efi.acpi20 != EFI_INVALID_TABLE_ADDR)
+ return efi.acpi20;
+ else if (efi.acpi != EFI_INVALID_TABLE_ADDR)
+ return efi.acpi;
+ else {
+ printk(KERN_ERR PREFIX
+ "System description tables not found\n");
+ return 0;
+ }
+ } else {
+ acpi_physical_address pa = 0;
+
+ acpi_find_root_pointer(&pa);
+ return pa;
+ }
+}
+
+void __iomem *__init_refok
+acpi_os_map_memory(acpi_physical_address phys, acpi_size size)
+{
+ if (phys > ULONG_MAX) {
+ printk(KERN_ERR PREFIX "Cannot map memory that high\n");
+ return NULL;
+ }
+ if (acpi_gbl_permanent_mmap)
+ /*
+ * ioremap checks to ensure this is in reserved space
+ */
+ return ioremap((unsigned long)phys, size);
+ else
+ return __acpi_map_table((unsigned long)phys, size);
+}
+EXPORT_SYMBOL_GPL(acpi_os_map_memory);
+
+void acpi_os_unmap_memory(void __iomem * virt, acpi_size size)
+{
+ if (acpi_gbl_permanent_mmap) {
+ iounmap(virt);
+ }
+}
+EXPORT_SYMBOL_GPL(acpi_os_unmap_memory);
+
+#ifdef ACPI_FUTURE_USAGE
+acpi_status
+acpi_os_get_physical_address(void *virt, acpi_physical_address * phys)
+{
+ if (!phys || !virt)
+ return AE_BAD_PARAMETER;
+
+ *phys = virt_to_phys(virt);
+
+ return AE_OK;
+}
+#endif
+
+#define ACPI_MAX_OVERRIDE_LEN 100
+
+static char acpi_os_name[ACPI_MAX_OVERRIDE_LEN];
+
+acpi_status
+acpi_os_predefined_override(const struct acpi_predefined_names *init_val,
+ acpi_string * new_val)
+{
+ if (!init_val || !new_val)
+ return AE_BAD_PARAMETER;
+
+ *new_val = NULL;
+ if (!memcmp(init_val->name, "_OS_", 4) && strlen(acpi_os_name)) {
+ printk(KERN_INFO PREFIX "Overriding _OS definition to '%s'\n",
+ acpi_os_name);
+ *new_val = acpi_os_name;
+ }
+
+ return AE_OK;
+}
+
+acpi_status
+acpi_os_table_override(struct acpi_table_header * existing_table,
+ struct acpi_table_header ** new_table)
+{
+ if (!existing_table || !new_table)
+ return AE_BAD_PARAMETER;
+
+ *new_table = NULL;
+
+#ifdef CONFIG_ACPI_CUSTOM_DSDT
+ if (strncmp(existing_table->signature, "DSDT", 4) == 0)
+ *new_table = (struct acpi_table_header *)AmlCode;
+#endif
+ if (*new_table != NULL) {
+ printk(KERN_WARNING PREFIX "Override [%4.4s-%8.8s], "
+ "this is unsafe: tainting kernel\n",
+ existing_table->signature,
+ existing_table->oem_table_id);
+ add_taint(TAINT_OVERRIDDEN_ACPI_TABLE);
+ }
+ return AE_OK;
+}
+
+static irqreturn_t acpi_irq(int irq, void *dev_id)
+{
+ u32 handled;
+
+ handled = (*acpi_irq_handler) (acpi_irq_context);
+
+ if (handled) {
+ acpi_irq_handled++;
+ return IRQ_HANDLED;
+ } else
+ return IRQ_NONE;
+}
+
+acpi_status
+acpi_os_install_interrupt_handler(u32 gsi, acpi_osd_handler handler,
+ void *context)
+{
+ unsigned int irq;
+
+ acpi_irq_stats_init();
+
+ /*
+ * Ignore the GSI from the core, and use the value in our copy of the
+ * FADT. It may not be the same if an interrupt source override exists
+ * for the SCI.
+ */
+ gsi = acpi_gbl_FADT.sci_interrupt;
+ if (acpi_gsi_to_irq(gsi, &irq) < 0) {
+ printk(KERN_ERR PREFIX "SCI (ACPI GSI %d) not registered\n",
+ gsi);
+ return AE_OK;
+ }
+
+ acpi_irq_handler = handler;
+ acpi_irq_context = context;
+ if (request_irq(irq, acpi_irq, IRQF_SHARED, "acpi", acpi_irq)) {
+ printk(KERN_ERR PREFIX "SCI (IRQ%d) allocation failed\n", irq);
+ return AE_NOT_ACQUIRED;
+ }
+ acpi_irq_irq = irq;
+
+ return AE_OK;
+}
+
+acpi_status acpi_os_remove_interrupt_handler(u32 irq, acpi_osd_handler handler)
+{
+ if (irq) {
+ free_irq(irq, acpi_irq);
+ acpi_irq_handler = NULL;
+ acpi_irq_irq = 0;
+ }
+
+ return AE_OK;
+}
+
+/*
+ * Running in interpreter thread context, safe to sleep
+ */
+
+void acpi_os_sleep(acpi_integer ms)
+{
+ schedule_timeout_interruptible(msecs_to_jiffies(ms));
+}
+
+void acpi_os_stall(u32 us)
+{
+ while (us) {
+ u32 delay = 1000;
+
+ if (delay > us)
+ delay = us;
+ udelay(delay);
+ touch_nmi_watchdog();
+ us -= delay;
+ }
+}
+
+/*
+ * Support ACPI 3.0 AML Timer operand
+ * Returns 64-bit free-running, monotonically increasing timer
+ * with 100ns granularity
+ */
+u64 acpi_os_get_timer(void)
+{
+ static u64 t;
+
+#ifdef CONFIG_HPET
+ /* TBD: use HPET if available */
+#endif
+
+#ifdef CONFIG_X86_PM_TIMER
+ /* TBD: default to PM timer if HPET was not available */
+#endif
+ if (!t)
+ printk(KERN_ERR PREFIX "acpi_os_get_timer() TBD\n");
+
+ return ++t;
+}
+
+acpi_status acpi_os_read_port(acpi_io_address port, u32 * value, u32 width)
+{
+ u32 dummy;
+
+ if (!value)
+ value = &dummy;
+
+ *value = 0;
+ if (width <= 8) {
+ *(u8 *) value = inb(port);
+ } else if (width <= 16) {
+ *(u16 *) value = inw(port);
+ } else if (width <= 32) {
+ *(u32 *) value = inl(port);
+ } else {
+ BUG();
+ }
+
+ return AE_OK;
+}
+
+EXPORT_SYMBOL(acpi_os_read_port);
+
+acpi_status acpi_os_write_port(acpi_io_address port, u32 value, u32 width)
+{
+ if (width <= 8) {
+ outb(value, port);
+ } else if (width <= 16) {
+ outw(value, port);
+ } else if (width <= 32) {
+ outl(value, port);
+ } else {
+ BUG();
+ }
+
+ return AE_OK;
+}
+
+EXPORT_SYMBOL(acpi_os_write_port);
+
+acpi_status
+acpi_os_read_memory(acpi_physical_address phys_addr, u32 * value, u32 width)
+{
+ u32 dummy;
+ void __iomem *virt_addr;
+
+ virt_addr = ioremap(phys_addr, width);
+ if (!value)
+ value = &dummy;
+
+ switch (width) {
+ case 8:
+ *(u8 *) value = readb(virt_addr);
+ break;
+ case 16:
+ *(u16 *) value = readw(virt_addr);
+ break;
+ case 32:
+ *(u32 *) value = readl(virt_addr);
+ break;
+ default:
+ BUG();
+ }
+
+ iounmap(virt_addr);
+
+ return AE_OK;
+}
+
+acpi_status
+acpi_os_write_memory(acpi_physical_address phys_addr, u32 value, u32 width)
+{
+ void __iomem *virt_addr;
+
+ virt_addr = ioremap(phys_addr, width);
+
+ switch (width) {
+ case 8:
+ writeb(value, virt_addr);
+ break;
+ case 16:
+ writew(value, virt_addr);
+ break;
+ case 32:
+ writel(value, virt_addr);
+ break;
+ default:
+ BUG();
+ }
+
+ iounmap(virt_addr);
+
+ return AE_OK;
+}
+
+acpi_status
+acpi_os_read_pci_configuration(struct acpi_pci_id * pci_id, u32 reg,
+ u32 *value, u32 width)
+{
+ int result, size;
+
+ if (!value)
+ return AE_BAD_PARAMETER;
+
+ switch (width) {
+ case 8:
+ size = 1;
+ break;
+ case 16:
+ size = 2;
+ break;
+ case 32:
+ size = 4;
+ break;
+ default:
+ return AE_ERROR;
+ }
+
+ result = raw_pci_read(pci_id->segment, pci_id->bus,
+ PCI_DEVFN(pci_id->device, pci_id->function),
+ reg, size, value);
+
+ return (result ? AE_ERROR : AE_OK);
+}
+
+acpi_status
+acpi_os_write_pci_configuration(struct acpi_pci_id * pci_id, u32 reg,
+ acpi_integer value, u32 width)
+{
+ int result, size;
+
+ switch (width) {
+ case 8:
+ size = 1;
+ break;
+ case 16:
+ size = 2;
+ break;
+ case 32:
+ size = 4;
+ break;
+ default:
+ return AE_ERROR;
+ }
+
+ result = raw_pci_write(pci_id->segment, pci_id->bus,
+ PCI_DEVFN(pci_id->device, pci_id->function),
+ reg, size, value);
+
+ return (result ? AE_ERROR : AE_OK);
+}
+
+/* TODO: Change code to take advantage of driver model more */
+static void acpi_os_derive_pci_id_2(acpi_handle rhandle, /* upper bound */
+ acpi_handle chandle, /* current node */
+ struct acpi_pci_id **id,
+ int *is_bridge, u8 * bus_number)
+{
+ acpi_handle handle;
+ struct acpi_pci_id *pci_id = *id;
+ acpi_status status;
+ unsigned long long temp;
+ acpi_object_type type;
+
+ acpi_get_parent(chandle, &handle);
+ if (handle != rhandle) {
+ acpi_os_derive_pci_id_2(rhandle, handle, &pci_id, is_bridge,
+ bus_number);
+
+ status = acpi_get_type(handle, &type);
+ if ((ACPI_FAILURE(status)) || (type != ACPI_TYPE_DEVICE))
+ return;
+
+ status = acpi_evaluate_integer(handle, METHOD_NAME__ADR, NULL,
+ &temp);
+ if (ACPI_SUCCESS(status)) {
+ u32 val;
+ pci_id->device = ACPI_HIWORD(ACPI_LODWORD(temp));
+ pci_id->function = ACPI_LOWORD(ACPI_LODWORD(temp));
+
+ if (*is_bridge)
+ pci_id->bus = *bus_number;
+
+ /* any nicer way to get bus number of bridge ? */
+ status =
+ acpi_os_read_pci_configuration(pci_id, 0x0e, &val,
+ 8);
+ if (ACPI_SUCCESS(status)
+ && ((val & 0x7f) == 1 || (val & 0x7f) == 2)) {
+ status =
+ acpi_os_read_pci_configuration(pci_id, 0x18,
+ &val, 8);
+ if (!ACPI_SUCCESS(status)) {
+ /* Certainly broken... FIX ME */
+ return;
+ }
+ *is_bridge = 1;
+ pci_id->bus = val;
+ status =
+ acpi_os_read_pci_configuration(pci_id, 0x19,
+ &val, 8);
+ if (ACPI_SUCCESS(status)) {
+ *bus_number = val;
+ }
+ } else
+ *is_bridge = 0;
+ }
+ }
+}
+
+void acpi_os_derive_pci_id(acpi_handle rhandle, /* upper bound */
+ acpi_handle chandle, /* current node */
+ struct acpi_pci_id **id)
+{
+ int is_bridge = 1;
+ u8 bus_number = (*id)->bus;
+
+ acpi_os_derive_pci_id_2(rhandle, chandle, id, &is_bridge, &bus_number);
+}
+
+static void acpi_os_execute_deferred(struct work_struct *work)
+{
+ struct acpi_os_dpc *dpc = container_of(work, struct acpi_os_dpc, work);
+ if (!dpc) {
+ printk(KERN_ERR PREFIX "Invalid (NULL) context\n");
+ return;
+ }
+
+ dpc->function(dpc->context);
+ kfree(dpc);
+
+ return;
+}
+
+static void acpi_os_execute_hp_deferred(struct work_struct *work)
+{
+ struct acpi_os_dpc *dpc = container_of(work, struct acpi_os_dpc, work);
+ if (!dpc) {
+ printk(KERN_ERR PREFIX "Invalid (NULL) context\n");
+ return;
+ }
+
+ acpi_os_wait_events_complete(NULL);
+
+ dpc->function(dpc->context);
+ kfree(dpc);
+
+ return;
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_os_execute
+ *
+ * PARAMETERS: Type - Type of the callback
+ * Function - Function to be executed
+ * Context - Function parameters
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Depending on type, either queues function for deferred execution or
+ * immediately executes function on a separate thread.
+ *
+ ******************************************************************************/
+
+static acpi_status __acpi_os_execute(acpi_execute_type type,
+ acpi_osd_exec_callback function, void *context, int hp)
+{
+ acpi_status status = AE_OK;
+ struct acpi_os_dpc *dpc;
+ struct workqueue_struct *queue;
+ int ret;
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+ "Scheduling function [%p(%p)] for deferred execution.\n",
+ function, context));
+
+ if (!function)
+ return AE_BAD_PARAMETER;
+
+ /*
+ * Allocate/initialize DPC structure. Note that this memory will be
+ * freed by the callee. The kernel handles the work_struct list in a
+ * way that allows us to also free its memory inside the callee.
+ * Because we may want to schedule several tasks with different
+ * parameters we can't use the approach some kernel code uses of
+ * having a static work_struct.
+ */
+
+ dpc = kmalloc(sizeof(struct acpi_os_dpc), GFP_ATOMIC);
+ if (!dpc)
+ return_ACPI_STATUS(AE_NO_MEMORY);
+
+ dpc->function = function;
+ dpc->context = context;
+
+ if (!hp) {
+ INIT_WORK(&dpc->work, acpi_os_execute_deferred);
+ queue = (type == OSL_NOTIFY_HANDLER) ?
+ kacpi_notify_wq : kacpid_wq;
+ ret = queue_work(queue, &dpc->work);
+ } else {
+ INIT_WORK(&dpc->work, acpi_os_execute_hp_deferred);
+ ret = schedule_work(&dpc->work);
+ }
+
+ if (!ret) {
+ printk(KERN_ERR PREFIX
+ "Call to queue_work() failed.\n");
+ status = AE_ERROR;
+ kfree(dpc);
+ }
+ return_ACPI_STATUS(status);
+}
+
+acpi_status acpi_os_execute(acpi_execute_type type,
+ acpi_osd_exec_callback function, void *context)
+{
+ return __acpi_os_execute(type, function, context, 0);
+}
+EXPORT_SYMBOL(acpi_os_execute);
+
+acpi_status acpi_os_hotplug_execute(acpi_osd_exec_callback function,
+ void *context)
+{
+ return __acpi_os_execute(0, function, context, 1);
+}
+
+void acpi_os_wait_events_complete(void *context)
+{
+ flush_workqueue(kacpid_wq);
+ flush_workqueue(kacpi_notify_wq);
+}
+
+EXPORT_SYMBOL(acpi_os_wait_events_complete);
+
+/*
+ * Allocate the memory for a spinlock and initialize it.
+ */
+acpi_status acpi_os_create_lock(acpi_spinlock * handle)
+{
+ spin_lock_init(*handle);
+
+ return AE_OK;
+}
+
+/*
+ * Deallocate the memory for a spinlock.
+ */
+void acpi_os_delete_lock(acpi_spinlock handle)
+{
+ return;
+}
+
+acpi_status
+acpi_os_create_semaphore(u32 max_units, u32 initial_units, acpi_handle * handle)
+{
+ struct semaphore *sem = NULL;
+
+ sem = acpi_os_allocate(sizeof(struct semaphore));
+ if (!sem)
+ return AE_NO_MEMORY;
+ memset(sem, 0, sizeof(struct semaphore));
+
+ sema_init(sem, initial_units);
+
+ *handle = (acpi_handle *) sem;
+
+ ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Creating semaphore[%p|%d].\n",
+ *handle, initial_units));
+
+ return AE_OK;
+}
+
+/*
+ * TODO: A better way to delete semaphores? Linux doesn't have a
+ * 'delete_semaphore()' function -- may result in an invalid
+ * pointer dereference for non-synchronized consumers. Should
+ * we at least check for blocked threads and signal/cancel them?
+ */
+
+acpi_status acpi_os_delete_semaphore(acpi_handle handle)
+{
+ struct semaphore *sem = (struct semaphore *)handle;
+
+ if (!sem)
+ return AE_BAD_PARAMETER;
+
+ ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Deleting semaphore[%p].\n", handle));
+
+ BUG_ON(!list_empty(&sem->wait_list));
+ kfree(sem);
+ sem = NULL;
+
+ return AE_OK;
+}
+
+/*
+ * TODO: Support for units > 1?
+ */
+acpi_status acpi_os_wait_semaphore(acpi_handle handle, u32 units, u16 timeout)
+{
+ acpi_status status = AE_OK;
+ struct semaphore *sem = (struct semaphore *)handle;
+ long jiffies;
+ int ret = 0;
+
+ if (!sem || (units < 1))
+ return AE_BAD_PARAMETER;
+
+ if (units > 1)
+ return AE_SUPPORT;
+
+ ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Waiting for semaphore[%p|%d|%d]\n",
+ handle, units, timeout));
+
+ if (timeout == ACPI_WAIT_FOREVER)
+ jiffies = MAX_SCHEDULE_TIMEOUT;
+ else
+ jiffies = msecs_to_jiffies(timeout);
+
+ ret = down_timeout(sem, jiffies);
+ if (ret)
+ status = AE_TIME;
+
+ if (ACPI_FAILURE(status)) {
+ ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
+ "Failed to acquire semaphore[%p|%d|%d], %s",
+ handle, units, timeout,
+ acpi_format_exception(status)));
+ } else {
+ ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
+ "Acquired semaphore[%p|%d|%d]", handle,
+ units, timeout));
+ }
+
+ return status;
+}
+
+/*
+ * TODO: Support for units > 1?
+ */
+acpi_status acpi_os_signal_semaphore(acpi_handle handle, u32 units)
+{
+ struct semaphore *sem = (struct semaphore *)handle;
+
+ if (!sem || (units < 1))
+ return AE_BAD_PARAMETER;
+
+ if (units > 1)
+ return AE_SUPPORT;
+
+ ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Signaling semaphore[%p|%d]\n", handle,
+ units));
+
+ up(sem);
+
+ return AE_OK;
+}
+
+#ifdef ACPI_FUTURE_USAGE
+u32 acpi_os_get_line(char *buffer)
+{
+
+#ifdef ENABLE_DEBUGGER
+ if (acpi_in_debugger) {
+ u32 chars;
+
+ kdb_read(buffer, sizeof(line_buf));
+
+ /* remove the CR kdb includes */
+ chars = strlen(buffer) - 1;
+ buffer[chars] = '\0';
+ }
+#endif
+
+ return 0;
+}
+#endif /* ACPI_FUTURE_USAGE */
+
+acpi_status acpi_os_signal(u32 function, void *info)
+{
+ switch (function) {
+ case ACPI_SIGNAL_FATAL:
+ printk(KERN_ERR PREFIX "Fatal opcode executed\n");
+ break;
+ case ACPI_SIGNAL_BREAKPOINT:
+ /*
+ * AML Breakpoint
+ * ACPI spec. says to treat it as a NOP unless
+ * you are debugging. So if/when we integrate
+ * AML debugger into the kernel debugger its
+ * hook will go here. But until then it is
+ * not useful to print anything on breakpoints.
+ */
+ break;
+ default:
+ break;
+ }
+
+ return AE_OK;
+}
+
+static int __init acpi_os_name_setup(char *str)
+{
+ char *p = acpi_os_name;
+ int count = ACPI_MAX_OVERRIDE_LEN - 1;
+
+ if (!str || !*str)
+ return 0;
+
+ for (; count-- && str && *str; str++) {
+ if (isalnum(*str) || *str == ' ' || *str == ':')
+ *p++ = *str;
+ else if (*str == '\'' || *str == '"')
+ continue;
+ else
+ break;
+ }
+ *p = 0;
+
+ return 1;
+
+}
+
+__setup("acpi_os_name=", acpi_os_name_setup);
+
+static void __init set_osi_linux(unsigned int enable)
+{
+ if (osi_linux.enable != enable) {
+ osi_linux.enable = enable;
+ printk(KERN_NOTICE PREFIX "%sed _OSI(Linux)\n",
+ enable ? "Add": "Delet");
+ }
+ return;
+}
+
+static void __init acpi_cmdline_osi_linux(unsigned int enable)
+{
+ osi_linux.cmdline = 1; /* cmdline set the default */
+ set_osi_linux(enable);
+
+ return;
+}
+
+void __init acpi_dmi_osi_linux(int enable, const struct dmi_system_id *d)
+{
+ osi_linux.dmi = 1; /* DMI knows that this box asks OSI(Linux) */
+
+ printk(KERN_NOTICE PREFIX "DMI detected: %s\n", d->ident);
+
+ if (enable == -1)
+ return;
+
+ osi_linux.known = 1; /* DMI knows which OSI(Linux) default needed */
+
+ set_osi_linux(enable);
+
+ return;
+}
+
+/*
+ * Modify the list of "OS Interfaces" reported to BIOS via _OSI
+ *
+ * empty string disables _OSI
+ * string starting with '!' disables that string
+ * otherwise string is added to list, augmenting built-in strings
+ */
+int __init acpi_osi_setup(char *str)
+{
+ if (str == NULL || *str == '\0') {
+ printk(KERN_INFO PREFIX "_OSI method disabled\n");
+ acpi_gbl_create_osi_method = FALSE;
+ } else if (!strcmp("!Linux", str)) {
+ acpi_cmdline_osi_linux(0); /* !enable */
+ } else if (*str == '!') {
+ if (acpi_osi_invalidate(++str) == AE_OK)
+ printk(KERN_INFO PREFIX "Deleted _OSI(%s)\n", str);
+ } else if (!strcmp("Linux", str)) {
+ acpi_cmdline_osi_linux(1); /* enable */
+ } else if (*osi_additional_string == '\0') {
+ strncpy(osi_additional_string, str, OSI_STRING_LENGTH_MAX);
+ printk(KERN_INFO PREFIX "Added _OSI(%s)\n", str);
+ }
+
+ return 1;
+}
+
+__setup("acpi_osi=", acpi_osi_setup);
+
+/* enable serialization to combat AE_ALREADY_EXISTS errors */
+static int __init acpi_serialize_setup(char *str)
+{
+ printk(KERN_INFO PREFIX "serialize enabled\n");
+
+ acpi_gbl_all_methods_serialized = TRUE;
+
+ return 1;
+}
+
+__setup("acpi_serialize", acpi_serialize_setup);
+
+/*
+ * Wake and Run-Time GPES are expected to be separate.
+ * We disable wake-GPEs at run-time to prevent spurious
+ * interrupts.
+ *
+ * However, if a system exists that shares Wake and
+ * Run-time events on the same GPE this flag is available
+ * to tell Linux to keep the wake-time GPEs enabled at run-time.
+ */
+static int __init acpi_wake_gpes_always_on_setup(char *str)
+{
+ printk(KERN_INFO PREFIX "wake GPEs not disabled\n");
+
+ acpi_gbl_leave_wake_gpes_disabled = FALSE;
+
+ return 1;
+}
+
+__setup("acpi_wake_gpes_always_on", acpi_wake_gpes_always_on_setup);
+
+/* Check of resource interference between native drivers and ACPI
+ * OperationRegions (SystemIO and System Memory only).
+ * IO ports and memory declared in ACPI might be used by the ACPI subsystem
+ * in arbitrary AML code and can interfere with legacy drivers.
+ * acpi_enforce_resources= can be set to:
+ *
+ * - strict (2)
+ * -> further driver trying to access the resources will not load
+ * - lax (default) (1)
+ * -> further driver trying to access the resources will load, but you
+ * get a system message that something might go wrong...
+ *
+ * - no (0)
+ * -> ACPI Operation Region resources will not be registered
+ *
+ */
+#define ENFORCE_RESOURCES_STRICT 2
+#define ENFORCE_RESOURCES_LAX 1
+#define ENFORCE_RESOURCES_NO 0
+
+static unsigned int acpi_enforce_resources = ENFORCE_RESOURCES_LAX;
+
+static int __init acpi_enforce_resources_setup(char *str)
+{
+ if (str == NULL || *str == '\0')
+ return 0;
+
+ if (!strcmp("strict", str))
+ acpi_enforce_resources = ENFORCE_RESOURCES_STRICT;
+ else if (!strcmp("lax", str))
+ acpi_enforce_resources = ENFORCE_RESOURCES_LAX;
+ else if (!strcmp("no", str))
+ acpi_enforce_resources = ENFORCE_RESOURCES_NO;
+
+ return 1;
+}
+
+__setup("acpi_enforce_resources=", acpi_enforce_resources_setup);
+
+/* Check for resource conflicts between ACPI OperationRegions and native
+ * drivers */
+int acpi_check_resource_conflict(struct resource *res)
+{
+ struct acpi_res_list *res_list_elem;
+ int ioport;
+ int clash = 0;
+
+ if (acpi_enforce_resources == ENFORCE_RESOURCES_NO)
+ return 0;
+ if (!(res->flags & IORESOURCE_IO) && !(res->flags & IORESOURCE_MEM))
+ return 0;
+
+ ioport = res->flags & IORESOURCE_IO;
+
+ spin_lock(&acpi_res_lock);
+ list_for_each_entry(res_list_elem, &resource_list_head,
+ resource_list) {
+ if (ioport && (res_list_elem->resource_type
+ != ACPI_ADR_SPACE_SYSTEM_IO))
+ continue;
+ if (!ioport && (res_list_elem->resource_type
+ != ACPI_ADR_SPACE_SYSTEM_MEMORY))
+ continue;
+
+ if (res->end < res_list_elem->start
+ || res_list_elem->end < res->start)
+ continue;
+ clash = 1;
+ break;
+ }
+ spin_unlock(&acpi_res_lock);
+
+ if (clash) {
+ if (acpi_enforce_resources != ENFORCE_RESOURCES_NO) {
+ printk("%sACPI: %s resource %s [0x%llx-0x%llx]"
+ " conflicts with ACPI region %s"
+ " [0x%llx-0x%llx]\n",
+ acpi_enforce_resources == ENFORCE_RESOURCES_LAX
+ ? KERN_WARNING : KERN_ERR,
+ ioport ? "I/O" : "Memory", res->name,
+ (long long) res->start, (long long) res->end,
+ res_list_elem->name,
+ (long long) res_list_elem->start,
+ (long long) res_list_elem->end);
+ printk(KERN_INFO "ACPI: Device needs an ACPI driver\n");
+ }
+ if (acpi_enforce_resources == ENFORCE_RESOURCES_STRICT)
+ return -EBUSY;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(acpi_check_resource_conflict);
+
+int acpi_check_region(resource_size_t start, resource_size_t n,
+ const char *name)
+{
+ struct resource res = {
+ .start = start,
+ .end = start + n - 1,
+ .name = name,
+ .flags = IORESOURCE_IO,
+ };
+
+ return acpi_check_resource_conflict(&res);
+}
+EXPORT_SYMBOL(acpi_check_region);
+
+int acpi_check_mem_region(resource_size_t start, resource_size_t n,
+ const char *name)
+{
+ struct resource res = {
+ .start = start,
+ .end = start + n - 1,
+ .name = name,
+ .flags = IORESOURCE_MEM,
+ };
+
+ return acpi_check_resource_conflict(&res);
+
+}
+EXPORT_SYMBOL(acpi_check_mem_region);
+
+/*
+ * Acquire a spinlock.
+ *
+ * handle is a pointer to the spinlock_t.
+ */
+
+acpi_cpu_flags acpi_os_acquire_lock(acpi_spinlock lockp)
+{
+ acpi_cpu_flags flags;
+ spin_lock_irqsave(lockp, flags);
+ return flags;
+}
+
+/*
+ * Release a spinlock. See above.
+ */
+
+void acpi_os_release_lock(acpi_spinlock lockp, acpi_cpu_flags flags)
+{
+ spin_unlock_irqrestore(lockp, flags);
+}
+
+#ifndef ACPI_USE_LOCAL_CACHE
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_os_create_cache
+ *
+ * PARAMETERS: name - Ascii name for the cache
+ * size - Size of each cached object
+ * depth - Maximum depth of the cache (in objects) <ignored>
+ * cache - Where the new cache object is returned
+ *
+ * RETURN: status
+ *
+ * DESCRIPTION: Create a cache object
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_os_create_cache(char *name, u16 size, u16 depth, acpi_cache_t ** cache)
+{
+ *cache = kmem_cache_create(name, size, 0, 0, NULL);
+ if (*cache == NULL)
+ return AE_ERROR;
+ else
+ return AE_OK;
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_os_purge_cache
+ *
+ * PARAMETERS: Cache - Handle to cache object
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Free all objects within the requested cache.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_os_purge_cache(acpi_cache_t * cache)
+{
+ kmem_cache_shrink(cache);
+ return (AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_os_delete_cache
+ *
+ * PARAMETERS: Cache - Handle to cache object
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Free all objects within the requested cache and delete the
+ * cache object.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_os_delete_cache(acpi_cache_t * cache)
+{
+ kmem_cache_destroy(cache);
+ return (AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_os_release_object
+ *
+ * PARAMETERS: Cache - Handle to cache object
+ * Object - The object to be released
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Release an object to the specified cache. If cache is full,
+ * the object is deleted.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_os_release_object(acpi_cache_t * cache, void *object)
+{
+ kmem_cache_free(cache, object);
+ return (AE_OK);
+}
+
+/******************************************************************************
+ *
+ * FUNCTION: acpi_os_validate_interface
+ *
+ * PARAMETERS: interface - Requested interface to be validated
+ *
+ * RETURN: AE_OK if interface is supported, AE_SUPPORT otherwise
+ *
+ * DESCRIPTION: Match an interface string to the interfaces supported by the
+ * host. Strings originate from an AML call to the _OSI method.
+ *
+ *****************************************************************************/
+
+acpi_status
+acpi_os_validate_interface (char *interface)
+{
+ if (!strncmp(osi_additional_string, interface, OSI_STRING_LENGTH_MAX))
+ return AE_OK;
+ if (!strcmp("Linux", interface)) {
+
+ printk(KERN_NOTICE PREFIX
+ "BIOS _OSI(Linux) query %s%s\n",
+ osi_linux.enable ? "honored" : "ignored",
+ osi_linux.cmdline ? " via cmdline" :
+ osi_linux.dmi ? " via DMI" : "");
+
+ if (osi_linux.enable)
+ return AE_OK;
+ }
+ return AE_SUPPORT;
+}
+
+/******************************************************************************
+ *
+ * FUNCTION: acpi_os_validate_address
+ *
+ * PARAMETERS: space_id - ACPI space ID
+ * address - Physical address
+ * length - Address length
+ *
+ * RETURN: AE_OK if address/length is valid for the space_id. Otherwise,
+ * should return AE_AML_ILLEGAL_ADDRESS.
+ *
+ * DESCRIPTION: Validate a system address via the host OS. Used to validate
+ * the addresses accessed by AML operation regions.
+ *
+ *****************************************************************************/
+
+acpi_status
+acpi_os_validate_address (
+ u8 space_id,
+ acpi_physical_address address,
+ acpi_size length,
+ char *name)
+{
+ struct acpi_res_list *res;
+ if (acpi_enforce_resources == ENFORCE_RESOURCES_NO)
+ return AE_OK;
+
+ switch (space_id) {
+ case ACPI_ADR_SPACE_SYSTEM_IO:
+ case ACPI_ADR_SPACE_SYSTEM_MEMORY:
+ /* Only interference checks against SystemIO and SytemMemory
+ are needed */
+ res = kzalloc(sizeof(struct acpi_res_list), GFP_KERNEL);
+ if (!res)
+ return AE_OK;
+ /* ACPI names are fixed to 4 bytes, still better use strlcpy */
+ strlcpy(res->name, name, 5);
+ res->start = address;
+ res->end = address + length - 1;
+ res->resource_type = space_id;
+ spin_lock(&acpi_res_lock);
+ list_add(&res->resource_list, &resource_list_head);
+ spin_unlock(&acpi_res_lock);
+ pr_debug("Added %s resource: start: 0x%llx, end: 0x%llx, "
+ "name: %s\n", (space_id == ACPI_ADR_SPACE_SYSTEM_IO)
+ ? "SystemIO" : "System Memory",
+ (unsigned long long)res->start,
+ (unsigned long long)res->end,
+ res->name);
+ break;
+ case ACPI_ADR_SPACE_PCI_CONFIG:
+ case ACPI_ADR_SPACE_EC:
+ case ACPI_ADR_SPACE_SMBUS:
+ case ACPI_ADR_SPACE_CMOS:
+ case ACPI_ADR_SPACE_PCI_BAR_TARGET:
+ case ACPI_ADR_SPACE_DATA_TABLE:
+ case ACPI_ADR_SPACE_FIXED_HARDWARE:
+ break;
+ }
+ return AE_OK;
+}
+
+#endif
diff --git a/drivers/acpi/parser/Makefile b/drivers/acpi/parser/Makefile
new file mode 100644
index 0000000..db24ee0
--- /dev/null
+++ b/drivers/acpi/parser/Makefile
@@ -0,0 +1,8 @@
+#
+# Makefile for all Linux ACPI interpreter subdirectories
+#
+
+obj-y := psargs.o psparse.o psloop.o pstree.o pswalk.o \
+ psopcode.o psscope.o psutils.o psxface.o
+
+EXTRA_CFLAGS += $(ACPI_CFLAGS)
diff --git a/drivers/acpi/parser/psargs.c b/drivers/acpi/parser/psargs.c
new file mode 100644
index 0000000..d830b29
--- /dev/null
+++ b/drivers/acpi/parser/psargs.c
@@ -0,0 +1,751 @@
+/******************************************************************************
+ *
+ * Module Name: psargs - Parse AML opcode arguments
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include <acpi/acparser.h>
+#include <acpi/amlcode.h>
+#include <acpi/acnamesp.h>
+#include <acpi/acdispat.h>
+
+#define _COMPONENT ACPI_PARSER
+ACPI_MODULE_NAME("psargs")
+
+/* Local prototypes */
+static u32
+acpi_ps_get_next_package_length(struct acpi_parse_state *parser_state);
+
+static union acpi_parse_object *acpi_ps_get_next_field(struct acpi_parse_state
+ *parser_state);
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ps_get_next_package_length
+ *
+ * PARAMETERS: parser_state - Current parser state object
+ *
+ * RETURN: Decoded package length. On completion, the AML pointer points
+ * past the length byte or bytes.
+ *
+ * DESCRIPTION: Decode and return a package length field.
+ * Note: Largest package length is 28 bits, from ACPI specification
+ *
+ ******************************************************************************/
+
+static u32
+acpi_ps_get_next_package_length(struct acpi_parse_state *parser_state)
+{
+ u8 *aml = parser_state->aml;
+ u32 package_length = 0;
+ u32 byte_count;
+ u8 byte_zero_mask = 0x3F; /* Default [0:5] */
+
+ ACPI_FUNCTION_TRACE(ps_get_next_package_length);
+
+ /*
+ * Byte 0 bits [6:7] contain the number of additional bytes
+ * used to encode the package length, either 0,1,2, or 3
+ */
+ byte_count = (aml[0] >> 6);
+ parser_state->aml += ((acpi_size) byte_count + 1);
+
+ /* Get bytes 3, 2, 1 as needed */
+
+ while (byte_count) {
+ /*
+ * Final bit positions for the package length bytes:
+ * Byte3->[20:27]
+ * Byte2->[12:19]
+ * Byte1->[04:11]
+ * Byte0->[00:03]
+ */
+ package_length |= (aml[byte_count] << ((byte_count << 3) - 4));
+
+ byte_zero_mask = 0x0F; /* Use bits [0:3] of byte 0 */
+ byte_count--;
+ }
+
+ /* Byte 0 is a special case, either bits [0:3] or [0:5] are used */
+
+ package_length |= (aml[0] & byte_zero_mask);
+ return_UINT32(package_length);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ps_get_next_package_end
+ *
+ * PARAMETERS: parser_state - Current parser state object
+ *
+ * RETURN: Pointer to end-of-package +1
+ *
+ * DESCRIPTION: Get next package length and return a pointer past the end of
+ * the package. Consumes the package length field
+ *
+ ******************************************************************************/
+
+u8 *acpi_ps_get_next_package_end(struct acpi_parse_state *parser_state)
+{
+ u8 *start = parser_state->aml;
+ u32 package_length;
+
+ ACPI_FUNCTION_TRACE(ps_get_next_package_end);
+
+ /* Function below updates parser_state->Aml */
+
+ package_length = acpi_ps_get_next_package_length(parser_state);
+
+ return_PTR(start + package_length); /* end of package */
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ps_get_next_namestring
+ *
+ * PARAMETERS: parser_state - Current parser state object
+ *
+ * RETURN: Pointer to the start of the name string (pointer points into
+ * the AML.
+ *
+ * DESCRIPTION: Get next raw namestring within the AML stream. Handles all name
+ * prefix characters. Set parser state to point past the string.
+ * (Name is consumed from the AML.)
+ *
+ ******************************************************************************/
+
+char *acpi_ps_get_next_namestring(struct acpi_parse_state *parser_state)
+{
+ u8 *start = parser_state->aml;
+ u8 *end = parser_state->aml;
+
+ ACPI_FUNCTION_TRACE(ps_get_next_namestring);
+
+ /* Point past any namestring prefix characters (backslash or carat) */
+
+ while (acpi_ps_is_prefix_char(*end)) {
+ end++;
+ }
+
+ /* Decode the path prefix character */
+
+ switch (*end) {
+ case 0:
+
+ /* null_name */
+
+ if (end == start) {
+ start = NULL;
+ }
+ end++;
+ break;
+
+ case AML_DUAL_NAME_PREFIX:
+
+ /* Two name segments */
+
+ end += 1 + (2 * ACPI_NAME_SIZE);
+ break;
+
+ case AML_MULTI_NAME_PREFIX_OP:
+
+ /* Multiple name segments, 4 chars each, count in next byte */
+
+ end += 2 + (*(end + 1) * ACPI_NAME_SIZE);
+ break;
+
+ default:
+
+ /* Single name segment */
+
+ end += ACPI_NAME_SIZE;
+ break;
+ }
+
+ parser_state->aml = end;
+ return_PTR((char *)start);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ps_get_next_namepath
+ *
+ * PARAMETERS: parser_state - Current parser state object
+ * Arg - Where the namepath will be stored
+ * arg_count - If the namepath points to a control method
+ * the method's argument is returned here.
+ * possible_method_call - Whether the namepath can possibly be the
+ * start of a method call
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Get next name (if method call, return # of required args).
+ * Names are looked up in the internal namespace to determine
+ * if the name represents a control method. If a method
+ * is found, the number of arguments to the method is returned.
+ * This information is critical for parsing to continue correctly.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ps_get_next_namepath(struct acpi_walk_state *walk_state,
+ struct acpi_parse_state *parser_state,
+ union acpi_parse_object *arg, u8 possible_method_call)
+{
+ acpi_status status;
+ char *path;
+ union acpi_parse_object *name_op;
+ union acpi_operand_object *method_desc;
+ struct acpi_namespace_node *node;
+ u8 *start = parser_state->aml;
+
+ ACPI_FUNCTION_TRACE(ps_get_next_namepath);
+
+ path = acpi_ps_get_next_namestring(parser_state);
+ acpi_ps_init_op(arg, AML_INT_NAMEPATH_OP);
+
+ /* Null path case is allowed, just exit */
+
+ if (!path) {
+ arg->common.value.name = path;
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ /*
+ * Lookup the name in the internal namespace, starting with the current
+ * scope. We don't want to add anything new to the namespace here,
+ * however, so we use MODE_EXECUTE.
+ * Allow searching of the parent tree, but don't open a new scope -
+ * we just want to lookup the object (must be mode EXECUTE to perform
+ * the upsearch)
+ */
+ status = acpi_ns_lookup(walk_state->scope_info, path,
+ ACPI_TYPE_ANY, ACPI_IMODE_EXECUTE,
+ ACPI_NS_SEARCH_PARENT | ACPI_NS_DONT_OPEN_SCOPE,
+ NULL, &node);
+
+ /*
+ * If this name is a control method invocation, we must
+ * setup the method call
+ */
+ if (ACPI_SUCCESS(status) &&
+ possible_method_call && (node->type == ACPI_TYPE_METHOD)) {
+ if (walk_state->opcode == AML_UNLOAD_OP) {
+ /*
+ * acpi_ps_get_next_namestring has increased the AML pointer,
+ * so we need to restore the saved AML pointer for method call.
+ */
+ walk_state->parser_state.aml = start;
+ walk_state->arg_count = 1;
+ acpi_ps_init_op(arg, AML_INT_METHODCALL_OP);
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ /* This name is actually a control method invocation */
+
+ method_desc = acpi_ns_get_attached_object(node);
+ ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
+ "Control Method - %p Desc %p Path=%p\n", node,
+ method_desc, path));
+
+ name_op = acpi_ps_alloc_op(AML_INT_NAMEPATH_OP);
+ if (!name_op) {
+ return_ACPI_STATUS(AE_NO_MEMORY);
+ }
+
+ /* Change Arg into a METHOD CALL and attach name to it */
+
+ acpi_ps_init_op(arg, AML_INT_METHODCALL_OP);
+ name_op->common.value.name = path;
+
+ /* Point METHODCALL/NAME to the METHOD Node */
+
+ name_op->common.node = node;
+ acpi_ps_append_arg(arg, name_op);
+
+ if (!method_desc) {
+ ACPI_ERROR((AE_INFO,
+ "Control Method %p has no attached object",
+ node));
+ return_ACPI_STATUS(AE_AML_INTERNAL);
+ }
+
+ ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
+ "Control Method - %p Args %X\n",
+ node, method_desc->method.param_count));
+
+ /* Get the number of arguments to expect */
+
+ walk_state->arg_count = method_desc->method.param_count;
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ /*
+ * Special handling if the name was not found during the lookup -
+ * some not_found cases are allowed
+ */
+ if (status == AE_NOT_FOUND) {
+
+ /* 1) not_found is ok during load pass 1/2 (allow forward references) */
+
+ if ((walk_state->parse_flags & ACPI_PARSE_MODE_MASK) !=
+ ACPI_PARSE_EXECUTE) {
+ status = AE_OK;
+ }
+
+ /* 2) not_found during a cond_ref_of(x) is ok by definition */
+
+ else if (walk_state->op->common.aml_opcode ==
+ AML_COND_REF_OF_OP) {
+ status = AE_OK;
+ }
+
+ /*
+ * 3) not_found while building a Package is ok at this point, we
+ * may flag as an error later if slack mode is not enabled.
+ * (Some ASL code depends on allowing this behavior)
+ */
+ else if ((arg->common.parent) &&
+ ((arg->common.parent->common.aml_opcode ==
+ AML_PACKAGE_OP)
+ || (arg->common.parent->common.aml_opcode ==
+ AML_VAR_PACKAGE_OP))) {
+ status = AE_OK;
+ }
+ }
+
+ /* Final exception check (may have been changed from code above) */
+
+ if (ACPI_FAILURE(status)) {
+ ACPI_ERROR_NAMESPACE(path, status);
+
+ if ((walk_state->parse_flags & ACPI_PARSE_MODE_MASK) ==
+ ACPI_PARSE_EXECUTE) {
+
+ /* Report a control method execution error */
+
+ status = acpi_ds_method_error(status, walk_state);
+ }
+ }
+
+ /* Save the namepath */
+
+ arg->common.value.name = path;
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ps_get_next_simple_arg
+ *
+ * PARAMETERS: parser_state - Current parser state object
+ * arg_type - The argument type (AML_*_ARG)
+ * Arg - Where the argument is returned
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Get the next simple argument (constant, string, or namestring)
+ *
+ ******************************************************************************/
+
+void
+acpi_ps_get_next_simple_arg(struct acpi_parse_state *parser_state,
+ u32 arg_type, union acpi_parse_object *arg)
+{
+ u32 length;
+ u16 opcode;
+ u8 *aml = parser_state->aml;
+
+ ACPI_FUNCTION_TRACE_U32(ps_get_next_simple_arg, arg_type);
+
+ switch (arg_type) {
+ case ARGP_BYTEDATA:
+
+ /* Get 1 byte from the AML stream */
+
+ opcode = AML_BYTE_OP;
+ arg->common.value.integer = (acpi_integer) * aml;
+ length = 1;
+ break;
+
+ case ARGP_WORDDATA:
+
+ /* Get 2 bytes from the AML stream */
+
+ opcode = AML_WORD_OP;
+ ACPI_MOVE_16_TO_64(&arg->common.value.integer, aml);
+ length = 2;
+ break;
+
+ case ARGP_DWORDDATA:
+
+ /* Get 4 bytes from the AML stream */
+
+ opcode = AML_DWORD_OP;
+ ACPI_MOVE_32_TO_64(&arg->common.value.integer, aml);
+ length = 4;
+ break;
+
+ case ARGP_QWORDDATA:
+
+ /* Get 8 bytes from the AML stream */
+
+ opcode = AML_QWORD_OP;
+ ACPI_MOVE_64_TO_64(&arg->common.value.integer, aml);
+ length = 8;
+ break;
+
+ case ARGP_CHARLIST:
+
+ /* Get a pointer to the string, point past the string */
+
+ opcode = AML_STRING_OP;
+ arg->common.value.string = ACPI_CAST_PTR(char, aml);
+
+ /* Find the null terminator */
+
+ length = 0;
+ while (aml[length]) {
+ length++;
+ }
+ length++;
+ break;
+
+ case ARGP_NAME:
+ case ARGP_NAMESTRING:
+
+ acpi_ps_init_op(arg, AML_INT_NAMEPATH_OP);
+ arg->common.value.name =
+ acpi_ps_get_next_namestring(parser_state);
+ return_VOID;
+
+ default:
+
+ ACPI_ERROR((AE_INFO, "Invalid ArgType %X", arg_type));
+ return_VOID;
+ }
+
+ acpi_ps_init_op(arg, opcode);
+ parser_state->aml += length;
+ return_VOID;
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ps_get_next_field
+ *
+ * PARAMETERS: parser_state - Current parser state object
+ *
+ * RETURN: A newly allocated FIELD op
+ *
+ * DESCRIPTION: Get next field (named_field, reserved_field, or access_field)
+ *
+ ******************************************************************************/
+
+static union acpi_parse_object *acpi_ps_get_next_field(struct acpi_parse_state
+ *parser_state)
+{
+ u32 aml_offset = (u32)
+ ACPI_PTR_DIFF(parser_state->aml,
+ parser_state->aml_start);
+ union acpi_parse_object *field;
+ u16 opcode;
+ u32 name;
+
+ ACPI_FUNCTION_TRACE(ps_get_next_field);
+
+ /* Determine field type */
+
+ switch (ACPI_GET8(parser_state->aml)) {
+ default:
+
+ opcode = AML_INT_NAMEDFIELD_OP;
+ break;
+
+ case 0x00:
+
+ opcode = AML_INT_RESERVEDFIELD_OP;
+ parser_state->aml++;
+ break;
+
+ case 0x01:
+
+ opcode = AML_INT_ACCESSFIELD_OP;
+ parser_state->aml++;
+ break;
+ }
+
+ /* Allocate a new field op */
+
+ field = acpi_ps_alloc_op(opcode);
+ if (!field) {
+ return_PTR(NULL);
+ }
+
+ field->common.aml_offset = aml_offset;
+
+ /* Decode the field type */
+
+ switch (opcode) {
+ case AML_INT_NAMEDFIELD_OP:
+
+ /* Get the 4-character name */
+
+ ACPI_MOVE_32_TO_32(&name, parser_state->aml);
+ acpi_ps_set_name(field, name);
+ parser_state->aml += ACPI_NAME_SIZE;
+
+ /* Get the length which is encoded as a package length */
+
+ field->common.value.size =
+ acpi_ps_get_next_package_length(parser_state);
+ break;
+
+ case AML_INT_RESERVEDFIELD_OP:
+
+ /* Get the length which is encoded as a package length */
+
+ field->common.value.size =
+ acpi_ps_get_next_package_length(parser_state);
+ break;
+
+ case AML_INT_ACCESSFIELD_OP:
+
+ /*
+ * Get access_type and access_attrib and merge into the field Op
+ * access_type is first operand, access_attribute is second
+ */
+ field->common.value.integer =
+ (((u32) ACPI_GET8(parser_state->aml) << 8));
+ parser_state->aml++;
+ field->common.value.integer |= ACPI_GET8(parser_state->aml);
+ parser_state->aml++;
+ break;
+
+ default:
+
+ /* Opcode was set in previous switch */
+ break;
+ }
+
+ return_PTR(field);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ps_get_next_arg
+ *
+ * PARAMETERS: walk_state - Current state
+ * parser_state - Current parser state object
+ * arg_type - The argument type (AML_*_ARG)
+ * return_arg - Where the next arg is returned
+ *
+ * RETURN: Status, and an op object containing the next argument.
+ *
+ * DESCRIPTION: Get next argument (including complex list arguments that require
+ * pushing the parser stack)
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ps_get_next_arg(struct acpi_walk_state *walk_state,
+ struct acpi_parse_state *parser_state,
+ u32 arg_type, union acpi_parse_object **return_arg)
+{
+ union acpi_parse_object *arg = NULL;
+ union acpi_parse_object *prev = NULL;
+ union acpi_parse_object *field;
+ u32 subop;
+ acpi_status status = AE_OK;
+
+ ACPI_FUNCTION_TRACE_PTR(ps_get_next_arg, parser_state);
+
+ switch (arg_type) {
+ case ARGP_BYTEDATA:
+ case ARGP_WORDDATA:
+ case ARGP_DWORDDATA:
+ case ARGP_CHARLIST:
+ case ARGP_NAME:
+ case ARGP_NAMESTRING:
+
+ /* Constants, strings, and namestrings are all the same size */
+
+ arg = acpi_ps_alloc_op(AML_BYTE_OP);
+ if (!arg) {
+ return_ACPI_STATUS(AE_NO_MEMORY);
+ }
+ acpi_ps_get_next_simple_arg(parser_state, arg_type, arg);
+ break;
+
+ case ARGP_PKGLENGTH:
+
+ /* Package length, nothing returned */
+
+ parser_state->pkg_end =
+ acpi_ps_get_next_package_end(parser_state);
+ break;
+
+ case ARGP_FIELDLIST:
+
+ if (parser_state->aml < parser_state->pkg_end) {
+
+ /* Non-empty list */
+
+ while (parser_state->aml < parser_state->pkg_end) {
+ field = acpi_ps_get_next_field(parser_state);
+ if (!field) {
+ return_ACPI_STATUS(AE_NO_MEMORY);
+ }
+
+ if (prev) {
+ prev->common.next = field;
+ } else {
+ arg = field;
+ }
+ prev = field;
+ }
+
+ /* Skip to End of byte data */
+
+ parser_state->aml = parser_state->pkg_end;
+ }
+ break;
+
+ case ARGP_BYTELIST:
+
+ if (parser_state->aml < parser_state->pkg_end) {
+
+ /* Non-empty list */
+
+ arg = acpi_ps_alloc_op(AML_INT_BYTELIST_OP);
+ if (!arg) {
+ return_ACPI_STATUS(AE_NO_MEMORY);
+ }
+
+ /* Fill in bytelist data */
+
+ arg->common.value.size = (u32)
+ ACPI_PTR_DIFF(parser_state->pkg_end,
+ parser_state->aml);
+ arg->named.data = parser_state->aml;
+
+ /* Skip to End of byte data */
+
+ parser_state->aml = parser_state->pkg_end;
+ }
+ break;
+
+ case ARGP_TARGET:
+ case ARGP_SUPERNAME:
+ case ARGP_SIMPLENAME:
+
+ subop = acpi_ps_peek_opcode(parser_state);
+ if (subop == 0 ||
+ acpi_ps_is_leading_char(subop) ||
+ acpi_ps_is_prefix_char(subop)) {
+
+ /* null_name or name_string */
+
+ arg = acpi_ps_alloc_op(AML_INT_NAMEPATH_OP);
+ if (!arg) {
+ return_ACPI_STATUS(AE_NO_MEMORY);
+ }
+
+ /* To support super_name arg of Unload */
+
+ if (walk_state->opcode == AML_UNLOAD_OP) {
+ status =
+ acpi_ps_get_next_namepath(walk_state,
+ parser_state, arg,
+ 1);
+
+ /*
+ * If the super_name arg of Unload is a method call,
+ * we have restored the AML pointer, just free this Arg
+ */
+ if (arg->common.aml_opcode ==
+ AML_INT_METHODCALL_OP) {
+ acpi_ps_free_op(arg);
+ arg = NULL;
+ }
+ } else {
+ status =
+ acpi_ps_get_next_namepath(walk_state,
+ parser_state, arg,
+ 0);
+ }
+ } else {
+ /* Single complex argument, nothing returned */
+
+ walk_state->arg_count = 1;
+ }
+ break;
+
+ case ARGP_DATAOBJ:
+ case ARGP_TERMARG:
+
+ /* Single complex argument, nothing returned */
+
+ walk_state->arg_count = 1;
+ break;
+
+ case ARGP_DATAOBJLIST:
+ case ARGP_TERMLIST:
+ case ARGP_OBJLIST:
+
+ if (parser_state->aml < parser_state->pkg_end) {
+
+ /* Non-empty list of variable arguments, nothing returned */
+
+ walk_state->arg_count = ACPI_VAR_ARGS;
+ }
+ break;
+
+ default:
+
+ ACPI_ERROR((AE_INFO, "Invalid ArgType: %X", arg_type));
+ status = AE_AML_OPERAND_TYPE;
+ break;
+ }
+
+ *return_arg = arg;
+ return_ACPI_STATUS(status);
+}
diff --git a/drivers/acpi/parser/psloop.c b/drivers/acpi/parser/psloop.c
new file mode 100644
index 0000000..4647039
--- /dev/null
+++ b/drivers/acpi/parser/psloop.c
@@ -0,0 +1,1087 @@
+/******************************************************************************
+ *
+ * Module Name: psloop - Main AML parse loop
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+/*
+ * Parse the AML and build an operation tree as most interpreters, (such as
+ * Perl) do. Parsing is done by hand rather than with a YACC generated parser
+ * to tightly constrain stack and dynamic memory usage. Parsing is kept
+ * flexible and the code fairly compact by parsing based on a list of AML
+ * opcode templates in aml_op_info[].
+ */
+
+#include <acpi/acpi.h>
+#include <acpi/acparser.h>
+#include <acpi/acdispat.h>
+#include <acpi/amlcode.h>
+
+#define _COMPONENT ACPI_PARSER
+ACPI_MODULE_NAME("psloop")
+
+static u32 acpi_gbl_depth = 0;
+
+/* Local prototypes */
+
+static acpi_status acpi_ps_get_aml_opcode(struct acpi_walk_state *walk_state);
+
+static acpi_status
+acpi_ps_build_named_op(struct acpi_walk_state *walk_state,
+ u8 * aml_op_start,
+ union acpi_parse_object *unnamed_op,
+ union acpi_parse_object **op);
+
+static acpi_status
+acpi_ps_create_op(struct acpi_walk_state *walk_state,
+ u8 * aml_op_start, union acpi_parse_object **new_op);
+
+static acpi_status
+acpi_ps_get_arguments(struct acpi_walk_state *walk_state,
+ u8 * aml_op_start, union acpi_parse_object *op);
+
+static acpi_status
+acpi_ps_complete_op(struct acpi_walk_state *walk_state,
+ union acpi_parse_object **op, acpi_status status);
+
+static acpi_status
+acpi_ps_complete_final_op(struct acpi_walk_state *walk_state,
+ union acpi_parse_object *op, acpi_status status);
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ps_get_aml_opcode
+ *
+ * PARAMETERS: walk_state - Current state
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Extract the next AML opcode from the input stream.
+ *
+ ******************************************************************************/
+
+static acpi_status acpi_ps_get_aml_opcode(struct acpi_walk_state *walk_state)
+{
+
+ ACPI_FUNCTION_TRACE_PTR(ps_get_aml_opcode, walk_state);
+
+ walk_state->aml_offset =
+ (u32) ACPI_PTR_DIFF(walk_state->parser_state.aml,
+ walk_state->parser_state.aml_start);
+ walk_state->opcode = acpi_ps_peek_opcode(&(walk_state->parser_state));
+
+ /*
+ * First cut to determine what we have found:
+ * 1) A valid AML opcode
+ * 2) A name string
+ * 3) An unknown/invalid opcode
+ */
+ walk_state->op_info = acpi_ps_get_opcode_info(walk_state->opcode);
+
+ switch (walk_state->op_info->class) {
+ case AML_CLASS_ASCII:
+ case AML_CLASS_PREFIX:
+ /*
+ * Starts with a valid prefix or ASCII char, this is a name
+ * string. Convert the bare name string to a namepath.
+ */
+ walk_state->opcode = AML_INT_NAMEPATH_OP;
+ walk_state->arg_types = ARGP_NAMESTRING;
+ break;
+
+ case AML_CLASS_UNKNOWN:
+
+ /* The opcode is unrecognized. Just skip unknown opcodes */
+
+ ACPI_ERROR((AE_INFO,
+ "Found unknown opcode %X at AML address %p offset %X, ignoring",
+ walk_state->opcode, walk_state->parser_state.aml,
+ walk_state->aml_offset));
+
+ ACPI_DUMP_BUFFER(walk_state->parser_state.aml, 128);
+
+ /* Assume one-byte bad opcode */
+
+ walk_state->parser_state.aml++;
+ return_ACPI_STATUS(AE_CTRL_PARSE_CONTINUE);
+
+ default:
+
+ /* Found opcode info, this is a normal opcode */
+
+ walk_state->parser_state.aml +=
+ acpi_ps_get_opcode_size(walk_state->opcode);
+ walk_state->arg_types = walk_state->op_info->parse_args;
+ break;
+ }
+
+ return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ps_build_named_op
+ *
+ * PARAMETERS: walk_state - Current state
+ * aml_op_start - Begin of named Op in AML
+ * unnamed_op - Early Op (not a named Op)
+ * Op - Returned Op
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Parse a named Op
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ps_build_named_op(struct acpi_walk_state *walk_state,
+ u8 * aml_op_start,
+ union acpi_parse_object *unnamed_op,
+ union acpi_parse_object **op)
+{
+ acpi_status status = AE_OK;
+ union acpi_parse_object *arg = NULL;
+
+ ACPI_FUNCTION_TRACE_PTR(ps_build_named_op, walk_state);
+
+ unnamed_op->common.value.arg = NULL;
+ unnamed_op->common.arg_list_length = 0;
+ unnamed_op->common.aml_opcode = walk_state->opcode;
+
+ /*
+ * Get and append arguments until we find the node that contains
+ * the name (the type ARGP_NAME).
+ */
+ while (GET_CURRENT_ARG_TYPE(walk_state->arg_types) &&
+ (GET_CURRENT_ARG_TYPE(walk_state->arg_types) != ARGP_NAME)) {
+ status =
+ acpi_ps_get_next_arg(walk_state,
+ &(walk_state->parser_state),
+ GET_CURRENT_ARG_TYPE(walk_state->
+ arg_types), &arg);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ acpi_ps_append_arg(unnamed_op, arg);
+ INCREMENT_ARG_LIST(walk_state->arg_types);
+ }
+
+ /*
+ * Make sure that we found a NAME and didn't run out of arguments
+ */
+ if (!GET_CURRENT_ARG_TYPE(walk_state->arg_types)) {
+ return_ACPI_STATUS(AE_AML_NO_OPERAND);
+ }
+
+ /* We know that this arg is a name, move to next arg */
+
+ INCREMENT_ARG_LIST(walk_state->arg_types);
+
+ /*
+ * Find the object. This will either insert the object into
+ * the namespace or simply look it up
+ */
+ walk_state->op = NULL;
+
+ status = walk_state->descending_callback(walk_state, op);
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status, "During name lookup/catalog"));
+ return_ACPI_STATUS(status);
+ }
+
+ if (!*op) {
+ return_ACPI_STATUS(AE_CTRL_PARSE_CONTINUE);
+ }
+
+ status = acpi_ps_next_parse_state(walk_state, *op, status);
+ if (ACPI_FAILURE(status)) {
+ if (status == AE_CTRL_PENDING) {
+ return_ACPI_STATUS(AE_CTRL_PARSE_PENDING);
+ }
+ return_ACPI_STATUS(status);
+ }
+
+ acpi_ps_append_arg(*op, unnamed_op->common.value.arg);
+ acpi_gbl_depth++;
+
+ if ((*op)->common.aml_opcode == AML_REGION_OP ||
+ (*op)->common.aml_opcode == AML_DATA_REGION_OP) {
+ /*
+ * Defer final parsing of an operation_region body, because we don't
+ * have enough info in the first pass to parse it correctly (i.e.,
+ * there may be method calls within the term_arg elements of the body.)
+ *
+ * However, we must continue parsing because the opregion is not a
+ * standalone package -- we don't know where the end is at this point.
+ *
+ * (Length is unknown until parse of the body complete)
+ */
+ (*op)->named.data = aml_op_start;
+ (*op)->named.length = 0;
+ }
+
+ return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ps_create_op
+ *
+ * PARAMETERS: walk_state - Current state
+ * aml_op_start - Op start in AML
+ * new_op - Returned Op
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Get Op from AML
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ps_create_op(struct acpi_walk_state *walk_state,
+ u8 * aml_op_start, union acpi_parse_object **new_op)
+{
+ acpi_status status = AE_OK;
+ union acpi_parse_object *op;
+ union acpi_parse_object *named_op = NULL;
+ union acpi_parse_object *parent_scope;
+ u8 argument_count;
+ const struct acpi_opcode_info *op_info;
+
+ ACPI_FUNCTION_TRACE_PTR(ps_create_op, walk_state);
+
+ status = acpi_ps_get_aml_opcode(walk_state);
+ if (status == AE_CTRL_PARSE_CONTINUE) {
+ return_ACPI_STATUS(AE_CTRL_PARSE_CONTINUE);
+ }
+
+ /* Create Op structure and append to parent's argument list */
+
+ walk_state->op_info = acpi_ps_get_opcode_info(walk_state->opcode);
+ op = acpi_ps_alloc_op(walk_state->opcode);
+ if (!op) {
+ return_ACPI_STATUS(AE_NO_MEMORY);
+ }
+
+ if (walk_state->op_info->flags & AML_NAMED) {
+ status =
+ acpi_ps_build_named_op(walk_state, aml_op_start, op,
+ &named_op);
+ acpi_ps_free_op(op);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ *new_op = named_op;
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ /* Not a named opcode, just allocate Op and append to parent */
+
+ if (walk_state->op_info->flags & AML_CREATE) {
+ /*
+ * Backup to beginning of create_xXXfield declaration
+ * body_length is unknown until we parse the body
+ */
+ op->named.data = aml_op_start;
+ op->named.length = 0;
+ }
+
+ if (walk_state->opcode == AML_BANK_FIELD_OP) {
+ /*
+ * Backup to beginning of bank_field declaration
+ * body_length is unknown until we parse the body
+ */
+ op->named.data = aml_op_start;
+ op->named.length = 0;
+ }
+
+ parent_scope = acpi_ps_get_parent_scope(&(walk_state->parser_state));
+ acpi_ps_append_arg(parent_scope, op);
+
+ if (parent_scope) {
+ op_info =
+ acpi_ps_get_opcode_info(parent_scope->common.aml_opcode);
+ if (op_info->flags & AML_HAS_TARGET) {
+ argument_count =
+ acpi_ps_get_argument_count(op_info->type);
+ if (parent_scope->common.arg_list_length >
+ argument_count) {
+ op->common.flags |= ACPI_PARSEOP_TARGET;
+ }
+ } else if (parent_scope->common.aml_opcode == AML_INCREMENT_OP) {
+ op->common.flags |= ACPI_PARSEOP_TARGET;
+ }
+ }
+
+ if (walk_state->descending_callback != NULL) {
+ /*
+ * Find the object. This will either insert the object into
+ * the namespace or simply look it up
+ */
+ walk_state->op = *new_op = op;
+
+ status = walk_state->descending_callback(walk_state, &op);
+ status = acpi_ps_next_parse_state(walk_state, op, status);
+ if (status == AE_CTRL_PENDING) {
+ status = AE_CTRL_PARSE_PENDING;
+ }
+ }
+
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ps_get_arguments
+ *
+ * PARAMETERS: walk_state - Current state
+ * aml_op_start - Op start in AML
+ * Op - Current Op
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Get arguments for passed Op.
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ps_get_arguments(struct acpi_walk_state *walk_state,
+ u8 * aml_op_start, union acpi_parse_object *op)
+{
+ acpi_status status = AE_OK;
+ union acpi_parse_object *arg = NULL;
+
+ ACPI_FUNCTION_TRACE_PTR(ps_get_arguments, walk_state);
+
+ switch (op->common.aml_opcode) {
+ case AML_BYTE_OP: /* AML_BYTEDATA_ARG */
+ case AML_WORD_OP: /* AML_WORDDATA_ARG */
+ case AML_DWORD_OP: /* AML_DWORDATA_ARG */
+ case AML_QWORD_OP: /* AML_QWORDATA_ARG */
+ case AML_STRING_OP: /* AML_ASCIICHARLIST_ARG */
+
+ /* Fill in constant or string argument directly */
+
+ acpi_ps_get_next_simple_arg(&(walk_state->parser_state),
+ GET_CURRENT_ARG_TYPE(walk_state->
+ arg_types),
+ op);
+ break;
+
+ case AML_INT_NAMEPATH_OP: /* AML_NAMESTRING_ARG */
+
+ status =
+ acpi_ps_get_next_namepath(walk_state,
+ &(walk_state->parser_state), op,
+ 1);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ walk_state->arg_types = 0;
+ break;
+
+ default:
+ /*
+ * Op is not a constant or string, append each argument to the Op
+ */
+ while (GET_CURRENT_ARG_TYPE(walk_state->arg_types)
+ && !walk_state->arg_count) {
+ walk_state->aml_offset =
+ (u32) ACPI_PTR_DIFF(walk_state->parser_state.aml,
+ walk_state->parser_state.
+ aml_start);
+
+ status =
+ acpi_ps_get_next_arg(walk_state,
+ &(walk_state->parser_state),
+ GET_CURRENT_ARG_TYPE
+ (walk_state->arg_types), &arg);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ if (arg) {
+ arg->common.aml_offset = walk_state->aml_offset;
+ acpi_ps_append_arg(op, arg);
+ }
+
+ INCREMENT_ARG_LIST(walk_state->arg_types);
+ }
+
+ /* Special processing for certain opcodes */
+
+ /* TBD (remove): Temporary mechanism to disable this code if needed */
+
+#ifdef ACPI_ENABLE_MODULE_LEVEL_CODE
+
+ if ((walk_state->pass_number <= ACPI_IMODE_LOAD_PASS1) &&
+ ((walk_state->parse_flags & ACPI_PARSE_DISASSEMBLE) == 0)) {
+ /*
+ * We want to skip If/Else/While constructs during Pass1 because we
+ * want to actually conditionally execute the code during Pass2.
+ *
+ * Except for disassembly, where we always want to walk the
+ * If/Else/While packages
+ */
+ switch (op->common.aml_opcode) {
+ case AML_IF_OP:
+ case AML_ELSE_OP:
+ case AML_WHILE_OP:
+
+ ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
+ "Pass1: Skipping an If/Else/While body\n"));
+
+ /* Skip body of if/else/while in pass 1 */
+
+ walk_state->parser_state.aml =
+ walk_state->parser_state.pkg_end;
+ walk_state->arg_count = 0;
+ break;
+
+ default:
+ break;
+ }
+ }
+#endif
+
+ switch (op->common.aml_opcode) {
+ case AML_METHOD_OP:
+ /*
+ * Skip parsing of control method because we don't have enough
+ * info in the first pass to parse it correctly.
+ *
+ * Save the length and address of the body
+ */
+ op->named.data = walk_state->parser_state.aml;
+ op->named.length = (u32)
+ (walk_state->parser_state.pkg_end -
+ walk_state->parser_state.aml);
+
+ /* Skip body of method */
+
+ walk_state->parser_state.aml =
+ walk_state->parser_state.pkg_end;
+ walk_state->arg_count = 0;
+ break;
+
+ case AML_BUFFER_OP:
+ case AML_PACKAGE_OP:
+ case AML_VAR_PACKAGE_OP:
+
+ if ((op->common.parent) &&
+ (op->common.parent->common.aml_opcode ==
+ AML_NAME_OP)
+ && (walk_state->pass_number <=
+ ACPI_IMODE_LOAD_PASS2)) {
+ /*
+ * Skip parsing of Buffers and Packages because we don't have
+ * enough info in the first pass to parse them correctly.
+ */
+ op->named.data = aml_op_start;
+ op->named.length = (u32)
+ (walk_state->parser_state.pkg_end -
+ aml_op_start);
+
+ /* Skip body */
+
+ walk_state->parser_state.aml =
+ walk_state->parser_state.pkg_end;
+ walk_state->arg_count = 0;
+ }
+ break;
+
+ case AML_WHILE_OP:
+
+ if (walk_state->control_state) {
+ walk_state->control_state->control.package_end =
+ walk_state->parser_state.pkg_end;
+ }
+ break;
+
+ default:
+
+ /* No action for all other opcodes */
+ break;
+ }
+
+ break;
+ }
+
+ return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ps_complete_op
+ *
+ * PARAMETERS: walk_state - Current state
+ * Op - Returned Op
+ * Status - Parse status before complete Op
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Complete Op
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ps_complete_op(struct acpi_walk_state *walk_state,
+ union acpi_parse_object **op, acpi_status status)
+{
+ acpi_status status2;
+
+ ACPI_FUNCTION_TRACE_PTR(ps_complete_op, walk_state);
+
+ /*
+ * Finished one argument of the containing scope
+ */
+ walk_state->parser_state.scope->parse_scope.arg_count--;
+
+ /* Close this Op (will result in parse subtree deletion) */
+
+ status2 = acpi_ps_complete_this_op(walk_state, *op);
+ if (ACPI_FAILURE(status2)) {
+ return_ACPI_STATUS(status2);
+ }
+
+ *op = NULL;
+
+ switch (status) {
+ case AE_OK:
+ break;
+
+ case AE_CTRL_TRANSFER:
+
+ /* We are about to transfer to a called method */
+
+ walk_state->prev_op = NULL;
+ walk_state->prev_arg_types = walk_state->arg_types;
+ return_ACPI_STATUS(status);
+
+ case AE_CTRL_END:
+
+ acpi_ps_pop_scope(&(walk_state->parser_state), op,
+ &walk_state->arg_types,
+ &walk_state->arg_count);
+
+ if (*op) {
+ walk_state->op = *op;
+ walk_state->op_info =
+ acpi_ps_get_opcode_info((*op)->common.aml_opcode);
+ walk_state->opcode = (*op)->common.aml_opcode;
+
+ status = walk_state->ascending_callback(walk_state);
+ status =
+ acpi_ps_next_parse_state(walk_state, *op, status);
+
+ status2 = acpi_ps_complete_this_op(walk_state, *op);
+ if (ACPI_FAILURE(status2)) {
+ return_ACPI_STATUS(status2);
+ }
+ }
+
+ status = AE_OK;
+ break;
+
+ case AE_CTRL_BREAK:
+ case AE_CTRL_CONTINUE:
+
+ /* Pop off scopes until we find the While */
+
+ while (!(*op) || ((*op)->common.aml_opcode != AML_WHILE_OP)) {
+ acpi_ps_pop_scope(&(walk_state->parser_state), op,
+ &walk_state->arg_types,
+ &walk_state->arg_count);
+ }
+
+ /* Close this iteration of the While loop */
+
+ walk_state->op = *op;
+ walk_state->op_info =
+ acpi_ps_get_opcode_info((*op)->common.aml_opcode);
+ walk_state->opcode = (*op)->common.aml_opcode;
+
+ status = walk_state->ascending_callback(walk_state);
+ status = acpi_ps_next_parse_state(walk_state, *op, status);
+
+ status2 = acpi_ps_complete_this_op(walk_state, *op);
+ if (ACPI_FAILURE(status2)) {
+ return_ACPI_STATUS(status2);
+ }
+
+ status = AE_OK;
+ break;
+
+ case AE_CTRL_TERMINATE:
+
+ /* Clean up */
+ do {
+ if (*op) {
+ status2 =
+ acpi_ps_complete_this_op(walk_state, *op);
+ if (ACPI_FAILURE(status2)) {
+ return_ACPI_STATUS(status2);
+ }
+
+ acpi_ut_delete_generic_state
+ (acpi_ut_pop_generic_state
+ (&walk_state->control_state));
+ }
+
+ acpi_ps_pop_scope(&(walk_state->parser_state), op,
+ &walk_state->arg_types,
+ &walk_state->arg_count);
+
+ } while (*op);
+
+ return_ACPI_STATUS(AE_OK);
+
+ default: /* All other non-AE_OK status */
+
+ do {
+ if (*op) {
+ status2 =
+ acpi_ps_complete_this_op(walk_state, *op);
+ if (ACPI_FAILURE(status2)) {
+ return_ACPI_STATUS(status2);
+ }
+ }
+
+ acpi_ps_pop_scope(&(walk_state->parser_state), op,
+ &walk_state->arg_types,
+ &walk_state->arg_count);
+
+ } while (*op);
+
+#if 0
+ /*
+ * TBD: Cleanup parse ops on error
+ */
+ if (*op == NULL) {
+ acpi_ps_pop_scope(parser_state, op,
+ &walk_state->arg_types,
+ &walk_state->arg_count);
+ }
+#endif
+ walk_state->prev_op = NULL;
+ walk_state->prev_arg_types = walk_state->arg_types;
+ return_ACPI_STATUS(status);
+ }
+
+ /* This scope complete? */
+
+ if (acpi_ps_has_completed_scope(&(walk_state->parser_state))) {
+ acpi_ps_pop_scope(&(walk_state->parser_state), op,
+ &walk_state->arg_types,
+ &walk_state->arg_count);
+ ACPI_DEBUG_PRINT((ACPI_DB_PARSE, "Popped scope, Op=%p\n", *op));
+ } else {
+ *op = NULL;
+ }
+
+ ACPI_PREEMPTION_POINT();
+
+ return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ps_complete_final_op
+ *
+ * PARAMETERS: walk_state - Current state
+ * Op - Current Op
+ * Status - Current parse status before complete last
+ * Op
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Complete last Op.
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ps_complete_final_op(struct acpi_walk_state *walk_state,
+ union acpi_parse_object *op, acpi_status status)
+{
+ acpi_status status2;
+
+ ACPI_FUNCTION_TRACE_PTR(ps_complete_final_op, walk_state);
+
+ /*
+ * Complete the last Op (if not completed), and clear the scope stack.
+ * It is easily possible to end an AML "package" with an unbounded number
+ * of open scopes (such as when several ASL blocks are closed with
+ * sequential closing braces). We want to terminate each one cleanly.
+ */
+ ACPI_DEBUG_PRINT((ACPI_DB_PARSE, "AML package complete at Op %p\n",
+ op));
+ do {
+ if (op) {
+ if (walk_state->ascending_callback != NULL) {
+ walk_state->op = op;
+ walk_state->op_info =
+ acpi_ps_get_opcode_info(op->common.
+ aml_opcode);
+ walk_state->opcode = op->common.aml_opcode;
+
+ status =
+ walk_state->ascending_callback(walk_state);
+ status =
+ acpi_ps_next_parse_state(walk_state, op,
+ status);
+ if (status == AE_CTRL_PENDING) {
+ status =
+ acpi_ps_complete_op(walk_state, &op,
+ AE_OK);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+ }
+
+ if (status == AE_CTRL_TERMINATE) {
+ status = AE_OK;
+
+ /* Clean up */
+ do {
+ if (op) {
+ status2 =
+ acpi_ps_complete_this_op
+ (walk_state, op);
+ if (ACPI_FAILURE
+ (status2)) {
+ return_ACPI_STATUS
+ (status2);
+ }
+ }
+
+ acpi_ps_pop_scope(&
+ (walk_state->
+ parser_state),
+ &op,
+ &walk_state->
+ arg_types,
+ &walk_state->
+ arg_count);
+
+ } while (op);
+
+ return_ACPI_STATUS(status);
+ }
+
+ else if (ACPI_FAILURE(status)) {
+
+ /* First error is most important */
+
+ (void)
+ acpi_ps_complete_this_op(walk_state,
+ op);
+ return_ACPI_STATUS(status);
+ }
+ }
+
+ status2 = acpi_ps_complete_this_op(walk_state, op);
+ if (ACPI_FAILURE(status2)) {
+ return_ACPI_STATUS(status2);
+ }
+ }
+
+ acpi_ps_pop_scope(&(walk_state->parser_state), &op,
+ &walk_state->arg_types,
+ &walk_state->arg_count);
+
+ } while (op);
+
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ps_parse_loop
+ *
+ * PARAMETERS: walk_state - Current state
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Parse AML (pointed to by the current parser state) and return
+ * a tree of ops.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state)
+{
+ acpi_status status = AE_OK;
+ union acpi_parse_object *op = NULL; /* current op */
+ struct acpi_parse_state *parser_state;
+ u8 *aml_op_start = NULL;
+
+ ACPI_FUNCTION_TRACE_PTR(ps_parse_loop, walk_state);
+
+ if (walk_state->descending_callback == NULL) {
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
+
+ parser_state = &walk_state->parser_state;
+ walk_state->arg_types = 0;
+
+#if (!defined (ACPI_NO_METHOD_EXECUTION) && !defined (ACPI_CONSTANT_EVAL_ONLY))
+
+ if (walk_state->walk_type & ACPI_WALK_METHOD_RESTART) {
+
+ /* We are restarting a preempted control method */
+
+ if (acpi_ps_has_completed_scope(parser_state)) {
+ /*
+ * We must check if a predicate to an IF or WHILE statement
+ * was just completed
+ */
+ if ((parser_state->scope->parse_scope.op) &&
+ ((parser_state->scope->parse_scope.op->common.
+ aml_opcode == AML_IF_OP)
+ || (parser_state->scope->parse_scope.op->common.
+ aml_opcode == AML_WHILE_OP))
+ && (walk_state->control_state)
+ && (walk_state->control_state->common.state ==
+ ACPI_CONTROL_PREDICATE_EXECUTING)) {
+ /*
+ * A predicate was just completed, get the value of the
+ * predicate and branch based on that value
+ */
+ walk_state->op = NULL;
+ status =
+ acpi_ds_get_predicate_value(walk_state,
+ ACPI_TO_POINTER
+ (TRUE));
+ if (ACPI_FAILURE(status)
+ && ((status & AE_CODE_MASK) !=
+ AE_CODE_CONTROL)) {
+ if (status == AE_AML_NO_RETURN_VALUE) {
+ ACPI_EXCEPTION((AE_INFO, status,
+ "Invoked method did not return a value"));
+
+ }
+
+ ACPI_EXCEPTION((AE_INFO, status,
+ "GetPredicate Failed"));
+ return_ACPI_STATUS(status);
+ }
+
+ status =
+ acpi_ps_next_parse_state(walk_state, op,
+ status);
+ }
+
+ acpi_ps_pop_scope(parser_state, &op,
+ &walk_state->arg_types,
+ &walk_state->arg_count);
+ ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
+ "Popped scope, Op=%p\n", op));
+ } else if (walk_state->prev_op) {
+
+ /* We were in the middle of an op */
+
+ op = walk_state->prev_op;
+ walk_state->arg_types = walk_state->prev_arg_types;
+ }
+ }
+#endif
+
+ /* Iterative parsing loop, while there is more AML to process: */
+
+ while ((parser_state->aml < parser_state->aml_end) || (op)) {
+ aml_op_start = parser_state->aml;
+ if (!op) {
+ status =
+ acpi_ps_create_op(walk_state, aml_op_start, &op);
+ if (ACPI_FAILURE(status)) {
+ if (status == AE_CTRL_PARSE_CONTINUE) {
+ continue;
+ }
+
+ if (status == AE_CTRL_PARSE_PENDING) {
+ status = AE_OK;
+ }
+
+ status =
+ acpi_ps_complete_op(walk_state, &op,
+ status);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ continue;
+ }
+
+ op->common.aml_offset = walk_state->aml_offset;
+
+ if (walk_state->op_info) {
+ ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
+ "Opcode %4.4X [%s] Op %p Aml %p AmlOffset %5.5X\n",
+ (u32) op->common.aml_opcode,
+ walk_state->op_info->name, op,
+ parser_state->aml,
+ op->common.aml_offset));
+ }
+ }
+
+ /*
+ * Start arg_count at zero because we don't know if there are
+ * any args yet
+ */
+ walk_state->arg_count = 0;
+
+ /* Are there any arguments that must be processed? */
+
+ if (walk_state->arg_types) {
+
+ /* Get arguments */
+
+ status =
+ acpi_ps_get_arguments(walk_state, aml_op_start, op);
+ if (ACPI_FAILURE(status)) {
+ status =
+ acpi_ps_complete_op(walk_state, &op,
+ status);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ continue;
+ }
+ }
+
+ /* Check for arguments that need to be processed */
+
+ if (walk_state->arg_count) {
+ /*
+ * There are arguments (complex ones), push Op and
+ * prepare for argument
+ */
+ status = acpi_ps_push_scope(parser_state, op,
+ walk_state->arg_types,
+ walk_state->arg_count);
+ if (ACPI_FAILURE(status)) {
+ status =
+ acpi_ps_complete_op(walk_state, &op,
+ status);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ continue;
+ }
+
+ op = NULL;
+ continue;
+ }
+
+ /*
+ * All arguments have been processed -- Op is complete,
+ * prepare for next
+ */
+ walk_state->op_info =
+ acpi_ps_get_opcode_info(op->common.aml_opcode);
+ if (walk_state->op_info->flags & AML_NAMED) {
+ if (acpi_gbl_depth) {
+ acpi_gbl_depth--;
+ }
+
+ if (op->common.aml_opcode == AML_REGION_OP ||
+ op->common.aml_opcode == AML_DATA_REGION_OP) {
+ /*
+ * Skip parsing of control method or opregion body,
+ * because we don't have enough info in the first pass
+ * to parse them correctly.
+ *
+ * Completed parsing an op_region declaration, we now
+ * know the length.
+ */
+ op->named.length =
+ (u32) (parser_state->aml - op->named.data);
+ }
+ }
+
+ if (walk_state->op_info->flags & AML_CREATE) {
+ /*
+ * Backup to beginning of create_xXXfield declaration (1 for
+ * Opcode)
+ *
+ * body_length is unknown until we parse the body
+ */
+ op->named.length =
+ (u32) (parser_state->aml - op->named.data);
+ }
+
+ if (op->common.aml_opcode == AML_BANK_FIELD_OP) {
+ /*
+ * Backup to beginning of bank_field declaration
+ *
+ * body_length is unknown until we parse the body
+ */
+ op->named.length =
+ (u32) (parser_state->aml - op->named.data);
+ }
+
+ /* This op complete, notify the dispatcher */
+
+ if (walk_state->ascending_callback != NULL) {
+ walk_state->op = op;
+ walk_state->opcode = op->common.aml_opcode;
+
+ status = walk_state->ascending_callback(walk_state);
+ status =
+ acpi_ps_next_parse_state(walk_state, op, status);
+ if (status == AE_CTRL_PENDING) {
+ status = AE_OK;
+ }
+ }
+
+ status = acpi_ps_complete_op(walk_state, &op, status);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ } /* while parser_state->Aml */
+
+ status = acpi_ps_complete_final_op(walk_state, op, status);
+ return_ACPI_STATUS(status);
+}
diff --git a/drivers/acpi/parser/psopcode.c b/drivers/acpi/parser/psopcode.c
new file mode 100644
index 0000000..f425ab3
--- /dev/null
+++ b/drivers/acpi/parser/psopcode.c
@@ -0,0 +1,809 @@
+/******************************************************************************
+ *
+ * Module Name: psopcode - Parser/Interpreter opcode information table
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include <acpi/acparser.h>
+#include <acpi/acopcode.h>
+#include <acpi/amlcode.h>
+
+#define _COMPONENT ACPI_PARSER
+ACPI_MODULE_NAME("psopcode")
+
+static const u8 acpi_gbl_argument_count[] =
+ { 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 6 };
+
+/*******************************************************************************
+ *
+ * NAME: acpi_gbl_aml_op_info
+ *
+ * DESCRIPTION: Opcode table. Each entry contains <opcode, type, name, operands>
+ * The name is a simple ascii string, the operand specifier is an
+ * ascii string with one letter per operand. The letter specifies
+ * the operand type.
+ *
+ ******************************************************************************/
+
+/*
+ * Summary of opcode types/flags
+ *
+
+ Opcodes that have associated namespace objects (AML_NSOBJECT flag)
+
+ AML_SCOPE_OP
+ AML_DEVICE_OP
+ AML_THERMAL_ZONE_OP
+ AML_METHOD_OP
+ AML_POWER_RES_OP
+ AML_PROCESSOR_OP
+ AML_FIELD_OP
+ AML_INDEX_FIELD_OP
+ AML_BANK_FIELD_OP
+ AML_NAME_OP
+ AML_ALIAS_OP
+ AML_MUTEX_OP
+ AML_EVENT_OP
+ AML_REGION_OP
+ AML_CREATE_FIELD_OP
+ AML_CREATE_BIT_FIELD_OP
+ AML_CREATE_BYTE_FIELD_OP
+ AML_CREATE_WORD_FIELD_OP
+ AML_CREATE_DWORD_FIELD_OP
+ AML_CREATE_QWORD_FIELD_OP
+ AML_INT_NAMEDFIELD_OP
+ AML_INT_METHODCALL_OP
+ AML_INT_NAMEPATH_OP
+
+ Opcodes that are "namespace" opcodes (AML_NSOPCODE flag)
+
+ AML_SCOPE_OP
+ AML_DEVICE_OP
+ AML_THERMAL_ZONE_OP
+ AML_METHOD_OP
+ AML_POWER_RES_OP
+ AML_PROCESSOR_OP
+ AML_FIELD_OP
+ AML_INDEX_FIELD_OP
+ AML_BANK_FIELD_OP
+ AML_NAME_OP
+ AML_ALIAS_OP
+ AML_MUTEX_OP
+ AML_EVENT_OP
+ AML_REGION_OP
+ AML_INT_NAMEDFIELD_OP
+
+ Opcodes that have an associated namespace node (AML_NSNODE flag)
+
+ AML_SCOPE_OP
+ AML_DEVICE_OP
+ AML_THERMAL_ZONE_OP
+ AML_METHOD_OP
+ AML_POWER_RES_OP
+ AML_PROCESSOR_OP
+ AML_NAME_OP
+ AML_ALIAS_OP
+ AML_MUTEX_OP
+ AML_EVENT_OP
+ AML_REGION_OP
+ AML_CREATE_FIELD_OP
+ AML_CREATE_BIT_FIELD_OP
+ AML_CREATE_BYTE_FIELD_OP
+ AML_CREATE_WORD_FIELD_OP
+ AML_CREATE_DWORD_FIELD_OP
+ AML_CREATE_QWORD_FIELD_OP
+ AML_INT_NAMEDFIELD_OP
+ AML_INT_METHODCALL_OP
+ AML_INT_NAMEPATH_OP
+
+ Opcodes that define named ACPI objects (AML_NAMED flag)
+
+ AML_SCOPE_OP
+ AML_DEVICE_OP
+ AML_THERMAL_ZONE_OP
+ AML_METHOD_OP
+ AML_POWER_RES_OP
+ AML_PROCESSOR_OP
+ AML_NAME_OP
+ AML_ALIAS_OP
+ AML_MUTEX_OP
+ AML_EVENT_OP
+ AML_REGION_OP
+ AML_INT_NAMEDFIELD_OP
+
+ Opcodes that contain executable AML as part of the definition that
+ must be deferred until needed
+
+ AML_METHOD_OP
+ AML_VAR_PACKAGE_OP
+ AML_CREATE_FIELD_OP
+ AML_CREATE_BIT_FIELD_OP
+ AML_CREATE_BYTE_FIELD_OP
+ AML_CREATE_WORD_FIELD_OP
+ AML_CREATE_DWORD_FIELD_OP
+ AML_CREATE_QWORD_FIELD_OP
+ AML_REGION_OP
+ AML_BUFFER_OP
+
+ Field opcodes
+
+ AML_CREATE_FIELD_OP
+ AML_FIELD_OP
+ AML_INDEX_FIELD_OP
+ AML_BANK_FIELD_OP
+
+ Field "Create" opcodes
+
+ AML_CREATE_FIELD_OP
+ AML_CREATE_BIT_FIELD_OP
+ AML_CREATE_BYTE_FIELD_OP
+ AML_CREATE_WORD_FIELD_OP
+ AML_CREATE_DWORD_FIELD_OP
+ AML_CREATE_QWORD_FIELD_OP
+
+ ******************************************************************************/
+
+/*
+ * Master Opcode information table. A summary of everything we know about each
+ * opcode, all in one place.
+ */
+const struct acpi_opcode_info acpi_gbl_aml_op_info[AML_NUM_OPCODES] = {
+/*! [Begin] no source code translation */
+/* Index Name Parser Args Interpreter Args ObjectType Class Type Flags */
+
+/* 00 */ ACPI_OP("Zero", ARGP_ZERO_OP, ARGI_ZERO_OP, ACPI_TYPE_INTEGER,
+ AML_CLASS_ARGUMENT, AML_TYPE_CONSTANT, AML_CONSTANT),
+/* 01 */ ACPI_OP("One", ARGP_ONE_OP, ARGI_ONE_OP, ACPI_TYPE_INTEGER,
+ AML_CLASS_ARGUMENT, AML_TYPE_CONSTANT, AML_CONSTANT),
+/* 02 */ ACPI_OP("Alias", ARGP_ALIAS_OP, ARGI_ALIAS_OP,
+ ACPI_TYPE_LOCAL_ALIAS, AML_CLASS_NAMED_OBJECT,
+ AML_TYPE_NAMED_SIMPLE,
+ AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE |
+ AML_NSNODE | AML_NAMED),
+/* 03 */ ACPI_OP("Name", ARGP_NAME_OP, ARGI_NAME_OP, ACPI_TYPE_ANY,
+ AML_CLASS_NAMED_OBJECT, AML_TYPE_NAMED_COMPLEX,
+ AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE |
+ AML_NSNODE | AML_NAMED),
+/* 04 */ ACPI_OP("ByteConst", ARGP_BYTE_OP, ARGI_BYTE_OP,
+ ACPI_TYPE_INTEGER, AML_CLASS_ARGUMENT,
+ AML_TYPE_LITERAL, AML_CONSTANT),
+/* 05 */ ACPI_OP("WordConst", ARGP_WORD_OP, ARGI_WORD_OP,
+ ACPI_TYPE_INTEGER, AML_CLASS_ARGUMENT,
+ AML_TYPE_LITERAL, AML_CONSTANT),
+/* 06 */ ACPI_OP("DwordConst", ARGP_DWORD_OP, ARGI_DWORD_OP,
+ ACPI_TYPE_INTEGER, AML_CLASS_ARGUMENT,
+ AML_TYPE_LITERAL, AML_CONSTANT),
+/* 07 */ ACPI_OP("String", ARGP_STRING_OP, ARGI_STRING_OP,
+ ACPI_TYPE_STRING, AML_CLASS_ARGUMENT,
+ AML_TYPE_LITERAL, AML_CONSTANT),
+/* 08 */ ACPI_OP("Scope", ARGP_SCOPE_OP, ARGI_SCOPE_OP,
+ ACPI_TYPE_LOCAL_SCOPE, AML_CLASS_NAMED_OBJECT,
+ AML_TYPE_NAMED_NO_OBJ,
+ AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE |
+ AML_NSNODE | AML_NAMED),
+/* 09 */ ACPI_OP("Buffer", ARGP_BUFFER_OP, ARGI_BUFFER_OP,
+ ACPI_TYPE_BUFFER, AML_CLASS_CREATE,
+ AML_TYPE_CREATE_OBJECT,
+ AML_HAS_ARGS | AML_DEFER | AML_CONSTANT),
+/* 0A */ ACPI_OP("Package", ARGP_PACKAGE_OP, ARGI_PACKAGE_OP,
+ ACPI_TYPE_PACKAGE, AML_CLASS_CREATE,
+ AML_TYPE_CREATE_OBJECT,
+ AML_HAS_ARGS | AML_DEFER | AML_CONSTANT),
+/* 0B */ ACPI_OP("Method", ARGP_METHOD_OP, ARGI_METHOD_OP,
+ ACPI_TYPE_METHOD, AML_CLASS_NAMED_OBJECT,
+ AML_TYPE_NAMED_COMPLEX,
+ AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE |
+ AML_NSNODE | AML_NAMED | AML_DEFER),
+/* 0C */ ACPI_OP("Local0", ARGP_LOCAL0, ARGI_LOCAL0,
+ ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT,
+ AML_TYPE_LOCAL_VARIABLE, 0),
+/* 0D */ ACPI_OP("Local1", ARGP_LOCAL1, ARGI_LOCAL1,
+ ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT,
+ AML_TYPE_LOCAL_VARIABLE, 0),
+/* 0E */ ACPI_OP("Local2", ARGP_LOCAL2, ARGI_LOCAL2,
+ ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT,
+ AML_TYPE_LOCAL_VARIABLE, 0),
+/* 0F */ ACPI_OP("Local3", ARGP_LOCAL3, ARGI_LOCAL3,
+ ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT,
+ AML_TYPE_LOCAL_VARIABLE, 0),
+/* 10 */ ACPI_OP("Local4", ARGP_LOCAL4, ARGI_LOCAL4,
+ ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT,
+ AML_TYPE_LOCAL_VARIABLE, 0),
+/* 11 */ ACPI_OP("Local5", ARGP_LOCAL5, ARGI_LOCAL5,
+ ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT,
+ AML_TYPE_LOCAL_VARIABLE, 0),
+/* 12 */ ACPI_OP("Local6", ARGP_LOCAL6, ARGI_LOCAL6,
+ ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT,
+ AML_TYPE_LOCAL_VARIABLE, 0),
+/* 13 */ ACPI_OP("Local7", ARGP_LOCAL7, ARGI_LOCAL7,
+ ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT,
+ AML_TYPE_LOCAL_VARIABLE, 0),
+/* 14 */ ACPI_OP("Arg0", ARGP_ARG0, ARGI_ARG0,
+ ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT,
+ AML_TYPE_METHOD_ARGUMENT, 0),
+/* 15 */ ACPI_OP("Arg1", ARGP_ARG1, ARGI_ARG1,
+ ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT,
+ AML_TYPE_METHOD_ARGUMENT, 0),
+/* 16 */ ACPI_OP("Arg2", ARGP_ARG2, ARGI_ARG2,
+ ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT,
+ AML_TYPE_METHOD_ARGUMENT, 0),
+/* 17 */ ACPI_OP("Arg3", ARGP_ARG3, ARGI_ARG3,
+ ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT,
+ AML_TYPE_METHOD_ARGUMENT, 0),
+/* 18 */ ACPI_OP("Arg4", ARGP_ARG4, ARGI_ARG4,
+ ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT,
+ AML_TYPE_METHOD_ARGUMENT, 0),
+/* 19 */ ACPI_OP("Arg5", ARGP_ARG5, ARGI_ARG5,
+ ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT,
+ AML_TYPE_METHOD_ARGUMENT, 0),
+/* 1A */ ACPI_OP("Arg6", ARGP_ARG6, ARGI_ARG6,
+ ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT,
+ AML_TYPE_METHOD_ARGUMENT, 0),
+/* 1B */ ACPI_OP("Store", ARGP_STORE_OP, ARGI_STORE_OP, ACPI_TYPE_ANY,
+ AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_1T_1R,
+ AML_FLAGS_EXEC_1A_1T_1R),
+/* 1C */ ACPI_OP("RefOf", ARGP_REF_OF_OP, ARGI_REF_OF_OP, ACPI_TYPE_ANY,
+ AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_0T_1R,
+ AML_FLAGS_EXEC_1A_0T_1R),
+/* 1D */ ACPI_OP("Add", ARGP_ADD_OP, ARGI_ADD_OP, ACPI_TYPE_ANY,
+ AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_1T_1R,
+ AML_FLAGS_EXEC_2A_1T_1R | AML_MATH | AML_CONSTANT),
+/* 1E */ ACPI_OP("Concatenate", ARGP_CONCAT_OP, ARGI_CONCAT_OP,
+ ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
+ AML_TYPE_EXEC_2A_1T_1R,
+ AML_FLAGS_EXEC_2A_1T_1R | AML_CONSTANT),
+/* 1F */ ACPI_OP("Subtract", ARGP_SUBTRACT_OP, ARGI_SUBTRACT_OP,
+ ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
+ AML_TYPE_EXEC_2A_1T_1R,
+ AML_FLAGS_EXEC_2A_1T_1R | AML_MATH | AML_CONSTANT),
+/* 20 */ ACPI_OP("Increment", ARGP_INCREMENT_OP, ARGI_INCREMENT_OP,
+ ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
+ AML_TYPE_EXEC_1A_0T_1R,
+ AML_FLAGS_EXEC_1A_0T_1R | AML_CONSTANT),
+/* 21 */ ACPI_OP("Decrement", ARGP_DECREMENT_OP, ARGI_DECREMENT_OP,
+ ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
+ AML_TYPE_EXEC_1A_0T_1R,
+ AML_FLAGS_EXEC_1A_0T_1R | AML_CONSTANT),
+/* 22 */ ACPI_OP("Multiply", ARGP_MULTIPLY_OP, ARGI_MULTIPLY_OP,
+ ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
+ AML_TYPE_EXEC_2A_1T_1R,
+ AML_FLAGS_EXEC_2A_1T_1R | AML_MATH | AML_CONSTANT),
+/* 23 */ ACPI_OP("Divide", ARGP_DIVIDE_OP, ARGI_DIVIDE_OP,
+ ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
+ AML_TYPE_EXEC_2A_2T_1R,
+ AML_FLAGS_EXEC_2A_2T_1R | AML_CONSTANT),
+/* 24 */ ACPI_OP("ShiftLeft", ARGP_SHIFT_LEFT_OP, ARGI_SHIFT_LEFT_OP,
+ ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
+ AML_TYPE_EXEC_2A_1T_1R,
+ AML_FLAGS_EXEC_2A_1T_1R | AML_MATH | AML_CONSTANT),
+/* 25 */ ACPI_OP("ShiftRight", ARGP_SHIFT_RIGHT_OP, ARGI_SHIFT_RIGHT_OP,
+ ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
+ AML_TYPE_EXEC_2A_1T_1R,
+ AML_FLAGS_EXEC_2A_1T_1R | AML_MATH | AML_CONSTANT),
+/* 26 */ ACPI_OP("And", ARGP_BIT_AND_OP, ARGI_BIT_AND_OP, ACPI_TYPE_ANY,
+ AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_1T_1R,
+ AML_FLAGS_EXEC_2A_1T_1R | AML_MATH | AML_CONSTANT),
+/* 27 */ ACPI_OP("NAnd", ARGP_BIT_NAND_OP, ARGI_BIT_NAND_OP,
+ ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
+ AML_TYPE_EXEC_2A_1T_1R,
+ AML_FLAGS_EXEC_2A_1T_1R | AML_MATH | AML_CONSTANT),
+/* 28 */ ACPI_OP("Or", ARGP_BIT_OR_OP, ARGI_BIT_OR_OP, ACPI_TYPE_ANY,
+ AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_1T_1R,
+ AML_FLAGS_EXEC_2A_1T_1R | AML_MATH | AML_CONSTANT),
+/* 29 */ ACPI_OP("NOr", ARGP_BIT_NOR_OP, ARGI_BIT_NOR_OP, ACPI_TYPE_ANY,
+ AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_1T_1R,
+ AML_FLAGS_EXEC_2A_1T_1R | AML_MATH | AML_CONSTANT),
+/* 2A */ ACPI_OP("XOr", ARGP_BIT_XOR_OP, ARGI_BIT_XOR_OP, ACPI_TYPE_ANY,
+ AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_1T_1R,
+ AML_FLAGS_EXEC_2A_1T_1R | AML_MATH | AML_CONSTANT),
+/* 2B */ ACPI_OP("Not", ARGP_BIT_NOT_OP, ARGI_BIT_NOT_OP, ACPI_TYPE_ANY,
+ AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_1T_1R,
+ AML_FLAGS_EXEC_1A_1T_1R | AML_CONSTANT),
+/* 2C */ ACPI_OP("FindSetLeftBit", ARGP_FIND_SET_LEFT_BIT_OP,
+ ARGI_FIND_SET_LEFT_BIT_OP, ACPI_TYPE_ANY,
+ AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_1T_1R,
+ AML_FLAGS_EXEC_1A_1T_1R | AML_CONSTANT),
+/* 2D */ ACPI_OP("FindSetRightBit", ARGP_FIND_SET_RIGHT_BIT_OP,
+ ARGI_FIND_SET_RIGHT_BIT_OP, ACPI_TYPE_ANY,
+ AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_1T_1R,
+ AML_FLAGS_EXEC_1A_1T_1R | AML_CONSTANT),
+/* 2E */ ACPI_OP("DerefOf", ARGP_DEREF_OF_OP, ARGI_DEREF_OF_OP,
+ ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
+ AML_TYPE_EXEC_1A_0T_1R, AML_FLAGS_EXEC_1A_0T_1R),
+/* 2F */ ACPI_OP("Notify", ARGP_NOTIFY_OP, ARGI_NOTIFY_OP,
+ ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
+ AML_TYPE_EXEC_2A_0T_0R, AML_FLAGS_EXEC_2A_0T_0R),
+/* 30 */ ACPI_OP("SizeOf", ARGP_SIZE_OF_OP, ARGI_SIZE_OF_OP,
+ ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
+ AML_TYPE_EXEC_1A_0T_1R,
+ AML_FLAGS_EXEC_1A_0T_1R | AML_NO_OPERAND_RESOLVE),
+/* 31 */ ACPI_OP("Index", ARGP_INDEX_OP, ARGI_INDEX_OP, ACPI_TYPE_ANY,
+ AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_1T_1R,
+ AML_FLAGS_EXEC_2A_1T_1R),
+/* 32 */ ACPI_OP("Match", ARGP_MATCH_OP, ARGI_MATCH_OP, ACPI_TYPE_ANY,
+ AML_CLASS_EXECUTE, AML_TYPE_EXEC_6A_0T_1R,
+ AML_FLAGS_EXEC_6A_0T_1R | AML_CONSTANT),
+/* 33 */ ACPI_OP("CreateDWordField", ARGP_CREATE_DWORD_FIELD_OP,
+ ARGI_CREATE_DWORD_FIELD_OP,
+ ACPI_TYPE_BUFFER_FIELD, AML_CLASS_CREATE,
+ AML_TYPE_CREATE_FIELD,
+ AML_HAS_ARGS | AML_NSOBJECT | AML_NSNODE |
+ AML_DEFER | AML_CREATE),
+/* 34 */ ACPI_OP("CreateWordField", ARGP_CREATE_WORD_FIELD_OP,
+ ARGI_CREATE_WORD_FIELD_OP,
+ ACPI_TYPE_BUFFER_FIELD, AML_CLASS_CREATE,
+ AML_TYPE_CREATE_FIELD,
+ AML_HAS_ARGS | AML_NSOBJECT | AML_NSNODE |
+ AML_DEFER | AML_CREATE),
+/* 35 */ ACPI_OP("CreateByteField", ARGP_CREATE_BYTE_FIELD_OP,
+ ARGI_CREATE_BYTE_FIELD_OP,
+ ACPI_TYPE_BUFFER_FIELD, AML_CLASS_CREATE,
+ AML_TYPE_CREATE_FIELD,
+ AML_HAS_ARGS | AML_NSOBJECT | AML_NSNODE |
+ AML_DEFER | AML_CREATE),
+/* 36 */ ACPI_OP("CreateBitField", ARGP_CREATE_BIT_FIELD_OP,
+ ARGI_CREATE_BIT_FIELD_OP,
+ ACPI_TYPE_BUFFER_FIELD, AML_CLASS_CREATE,
+ AML_TYPE_CREATE_FIELD,
+ AML_HAS_ARGS | AML_NSOBJECT | AML_NSNODE |
+ AML_DEFER | AML_CREATE),
+/* 37 */ ACPI_OP("ObjectType", ARGP_TYPE_OP, ARGI_TYPE_OP,
+ ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
+ AML_TYPE_EXEC_1A_0T_1R,
+ AML_FLAGS_EXEC_1A_0T_1R | AML_NO_OPERAND_RESOLVE),
+/* 38 */ ACPI_OP("LAnd", ARGP_LAND_OP, ARGI_LAND_OP, ACPI_TYPE_ANY,
+ AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_0T_1R,
+ AML_FLAGS_EXEC_2A_0T_1R | AML_LOGICAL_NUMERIC | AML_CONSTANT),
+/* 39 */ ACPI_OP("LOr", ARGP_LOR_OP, ARGI_LOR_OP, ACPI_TYPE_ANY,
+ AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_0T_1R,
+ AML_FLAGS_EXEC_2A_0T_1R | AML_LOGICAL_NUMERIC | AML_CONSTANT),
+/* 3A */ ACPI_OP("LNot", ARGP_LNOT_OP, ARGI_LNOT_OP, ACPI_TYPE_ANY,
+ AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_0T_1R,
+ AML_FLAGS_EXEC_1A_0T_1R | AML_CONSTANT),
+/* 3B */ ACPI_OP("LEqual", ARGP_LEQUAL_OP, ARGI_LEQUAL_OP,
+ ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
+ AML_TYPE_EXEC_2A_0T_1R,
+ AML_FLAGS_EXEC_2A_0T_1R | AML_LOGICAL | AML_CONSTANT),
+/* 3C */ ACPI_OP("LGreater", ARGP_LGREATER_OP, ARGI_LGREATER_OP,
+ ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
+ AML_TYPE_EXEC_2A_0T_1R,
+ AML_FLAGS_EXEC_2A_0T_1R | AML_LOGICAL | AML_CONSTANT),
+/* 3D */ ACPI_OP("LLess", ARGP_LLESS_OP, ARGI_LLESS_OP, ACPI_TYPE_ANY,
+ AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_0T_1R,
+ AML_FLAGS_EXEC_2A_0T_1R | AML_LOGICAL | AML_CONSTANT),
+/* 3E */ ACPI_OP("If", ARGP_IF_OP, ARGI_IF_OP, ACPI_TYPE_ANY,
+ AML_CLASS_CONTROL, AML_TYPE_CONTROL, AML_HAS_ARGS),
+/* 3F */ ACPI_OP("Else", ARGP_ELSE_OP, ARGI_ELSE_OP, ACPI_TYPE_ANY,
+ AML_CLASS_CONTROL, AML_TYPE_CONTROL, AML_HAS_ARGS),
+/* 40 */ ACPI_OP("While", ARGP_WHILE_OP, ARGI_WHILE_OP, ACPI_TYPE_ANY,
+ AML_CLASS_CONTROL, AML_TYPE_CONTROL, AML_HAS_ARGS),
+/* 41 */ ACPI_OP("Noop", ARGP_NOOP_OP, ARGI_NOOP_OP, ACPI_TYPE_ANY,
+ AML_CLASS_CONTROL, AML_TYPE_CONTROL, 0),
+/* 42 */ ACPI_OP("Return", ARGP_RETURN_OP, ARGI_RETURN_OP,
+ ACPI_TYPE_ANY, AML_CLASS_CONTROL,
+ AML_TYPE_CONTROL, AML_HAS_ARGS),
+/* 43 */ ACPI_OP("Break", ARGP_BREAK_OP, ARGI_BREAK_OP, ACPI_TYPE_ANY,
+ AML_CLASS_CONTROL, AML_TYPE_CONTROL, 0),
+/* 44 */ ACPI_OP("BreakPoint", ARGP_BREAK_POINT_OP, ARGI_BREAK_POINT_OP,
+ ACPI_TYPE_ANY, AML_CLASS_CONTROL, AML_TYPE_CONTROL, 0),
+/* 45 */ ACPI_OP("Ones", ARGP_ONES_OP, ARGI_ONES_OP, ACPI_TYPE_INTEGER,
+ AML_CLASS_ARGUMENT, AML_TYPE_CONSTANT, AML_CONSTANT),
+
+/* Prefixed opcodes (Two-byte opcodes with a prefix op) */
+
+/* 46 */ ACPI_OP("Mutex", ARGP_MUTEX_OP, ARGI_MUTEX_OP, ACPI_TYPE_MUTEX,
+ AML_CLASS_NAMED_OBJECT, AML_TYPE_NAMED_SIMPLE,
+ AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE |
+ AML_NSNODE | AML_NAMED),
+/* 47 */ ACPI_OP("Event", ARGP_EVENT_OP, ARGI_EVENT_OP, ACPI_TYPE_EVENT,
+ AML_CLASS_NAMED_OBJECT, AML_TYPE_NAMED_SIMPLE,
+ AML_NSOBJECT | AML_NSOPCODE | AML_NSNODE | AML_NAMED),
+/* 48 */ ACPI_OP("CondRefOf", ARGP_COND_REF_OF_OP, ARGI_COND_REF_OF_OP,
+ ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
+ AML_TYPE_EXEC_1A_1T_1R, AML_FLAGS_EXEC_1A_1T_1R),
+/* 49 */ ACPI_OP("CreateField", ARGP_CREATE_FIELD_OP,
+ ARGI_CREATE_FIELD_OP, ACPI_TYPE_BUFFER_FIELD,
+ AML_CLASS_CREATE, AML_TYPE_CREATE_FIELD,
+ AML_HAS_ARGS | AML_NSOBJECT | AML_NSNODE |
+ AML_DEFER | AML_FIELD | AML_CREATE),
+/* 4A */ ACPI_OP("Load", ARGP_LOAD_OP, ARGI_LOAD_OP, ACPI_TYPE_ANY,
+ AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_1T_0R,
+ AML_FLAGS_EXEC_1A_1T_0R),
+/* 4B */ ACPI_OP("Stall", ARGP_STALL_OP, ARGI_STALL_OP, ACPI_TYPE_ANY,
+ AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_0T_0R,
+ AML_FLAGS_EXEC_1A_0T_0R),
+/* 4C */ ACPI_OP("Sleep", ARGP_SLEEP_OP, ARGI_SLEEP_OP, ACPI_TYPE_ANY,
+ AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_0T_0R,
+ AML_FLAGS_EXEC_1A_0T_0R),
+/* 4D */ ACPI_OP("Acquire", ARGP_ACQUIRE_OP, ARGI_ACQUIRE_OP,
+ ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
+ AML_TYPE_EXEC_2A_0T_1R, AML_FLAGS_EXEC_2A_0T_1R),
+/* 4E */ ACPI_OP("Signal", ARGP_SIGNAL_OP, ARGI_SIGNAL_OP,
+ ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
+ AML_TYPE_EXEC_1A_0T_0R, AML_FLAGS_EXEC_1A_0T_0R),
+/* 4F */ ACPI_OP("Wait", ARGP_WAIT_OP, ARGI_WAIT_OP, ACPI_TYPE_ANY,
+ AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_0T_1R,
+ AML_FLAGS_EXEC_2A_0T_1R),
+/* 50 */ ACPI_OP("Reset", ARGP_RESET_OP, ARGI_RESET_OP, ACPI_TYPE_ANY,
+ AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_0T_0R,
+ AML_FLAGS_EXEC_1A_0T_0R),
+/* 51 */ ACPI_OP("Release", ARGP_RELEASE_OP, ARGI_RELEASE_OP,
+ ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
+ AML_TYPE_EXEC_1A_0T_0R, AML_FLAGS_EXEC_1A_0T_0R),
+/* 52 */ ACPI_OP("FromBCD", ARGP_FROM_BCD_OP, ARGI_FROM_BCD_OP,
+ ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
+ AML_TYPE_EXEC_1A_1T_1R,
+ AML_FLAGS_EXEC_1A_1T_1R | AML_CONSTANT),
+/* 53 */ ACPI_OP("ToBCD", ARGP_TO_BCD_OP, ARGI_TO_BCD_OP, ACPI_TYPE_ANY,
+ AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_1T_1R,
+ AML_FLAGS_EXEC_1A_1T_1R | AML_CONSTANT),
+/* 54 */ ACPI_OP("Unload", ARGP_UNLOAD_OP, ARGI_UNLOAD_OP,
+ ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
+ AML_TYPE_EXEC_1A_0T_0R, AML_FLAGS_EXEC_1A_0T_0R),
+/* 55 */ ACPI_OP("Revision", ARGP_REVISION_OP, ARGI_REVISION_OP,
+ ACPI_TYPE_INTEGER, AML_CLASS_ARGUMENT,
+ AML_TYPE_CONSTANT, 0),
+/* 56 */ ACPI_OP("Debug", ARGP_DEBUG_OP, ARGI_DEBUG_OP,
+ ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT,
+ AML_TYPE_CONSTANT, 0),
+/* 57 */ ACPI_OP("Fatal", ARGP_FATAL_OP, ARGI_FATAL_OP, ACPI_TYPE_ANY,
+ AML_CLASS_EXECUTE, AML_TYPE_EXEC_3A_0T_0R,
+ AML_FLAGS_EXEC_3A_0T_0R),
+/* 58 */ ACPI_OP("OperationRegion", ARGP_REGION_OP, ARGI_REGION_OP,
+ ACPI_TYPE_REGION, AML_CLASS_NAMED_OBJECT,
+ AML_TYPE_NAMED_COMPLEX,
+ AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE |
+ AML_NSNODE | AML_NAMED | AML_DEFER),
+/* 59 */ ACPI_OP("Field", ARGP_FIELD_OP, ARGI_FIELD_OP, ACPI_TYPE_ANY,
+ AML_CLASS_NAMED_OBJECT, AML_TYPE_NAMED_FIELD,
+ AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | AML_FIELD),
+/* 5A */ ACPI_OP("Device", ARGP_DEVICE_OP, ARGI_DEVICE_OP,
+ ACPI_TYPE_DEVICE, AML_CLASS_NAMED_OBJECT,
+ AML_TYPE_NAMED_NO_OBJ,
+ AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE |
+ AML_NSNODE | AML_NAMED),
+/* 5B */ ACPI_OP("Processor", ARGP_PROCESSOR_OP, ARGI_PROCESSOR_OP,
+ ACPI_TYPE_PROCESSOR, AML_CLASS_NAMED_OBJECT,
+ AML_TYPE_NAMED_SIMPLE,
+ AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE |
+ AML_NSNODE | AML_NAMED),
+/* 5C */ ACPI_OP("PowerResource", ARGP_POWER_RES_OP, ARGI_POWER_RES_OP,
+ ACPI_TYPE_POWER, AML_CLASS_NAMED_OBJECT,
+ AML_TYPE_NAMED_SIMPLE,
+ AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE |
+ AML_NSNODE | AML_NAMED),
+/* 5D */ ACPI_OP("ThermalZone", ARGP_THERMAL_ZONE_OP,
+ ARGI_THERMAL_ZONE_OP, ACPI_TYPE_THERMAL,
+ AML_CLASS_NAMED_OBJECT, AML_TYPE_NAMED_NO_OBJ,
+ AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE |
+ AML_NSNODE | AML_NAMED),
+/* 5E */ ACPI_OP("IndexField", ARGP_INDEX_FIELD_OP, ARGI_INDEX_FIELD_OP,
+ ACPI_TYPE_ANY, AML_CLASS_NAMED_OBJECT,
+ AML_TYPE_NAMED_FIELD,
+ AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | AML_FIELD),
+/* 5F */ ACPI_OP("BankField", ARGP_BANK_FIELD_OP, ARGI_BANK_FIELD_OP,
+ ACPI_TYPE_LOCAL_BANK_FIELD, AML_CLASS_NAMED_OBJECT,
+ AML_TYPE_NAMED_FIELD,
+ AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | AML_FIELD |
+ AML_DEFER),
+
+/* Internal opcodes that map to invalid AML opcodes */
+
+/* 60 */ ACPI_OP("LNotEqual", ARGP_LNOTEQUAL_OP, ARGI_LNOTEQUAL_OP,
+ ACPI_TYPE_ANY, AML_CLASS_INTERNAL,
+ AML_TYPE_BOGUS, AML_HAS_ARGS | AML_CONSTANT),
+/* 61 */ ACPI_OP("LLessEqual", ARGP_LLESSEQUAL_OP, ARGI_LLESSEQUAL_OP,
+ ACPI_TYPE_ANY, AML_CLASS_INTERNAL,
+ AML_TYPE_BOGUS, AML_HAS_ARGS | AML_CONSTANT),
+/* 62 */ ACPI_OP("LGreaterEqual", ARGP_LGREATEREQUAL_OP,
+ ARGI_LGREATEREQUAL_OP, ACPI_TYPE_ANY,
+ AML_CLASS_INTERNAL, AML_TYPE_BOGUS,
+ AML_HAS_ARGS | AML_CONSTANT),
+/* 63 */ ACPI_OP("-NamePath-", ARGP_NAMEPATH_OP, ARGI_NAMEPATH_OP,
+ ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT,
+ AML_TYPE_LITERAL, AML_NSOBJECT | AML_NSNODE),
+/* 64 */ ACPI_OP("-MethodCall-", ARGP_METHODCALL_OP, ARGI_METHODCALL_OP,
+ ACPI_TYPE_METHOD, AML_CLASS_METHOD_CALL,
+ AML_TYPE_METHOD_CALL,
+ AML_HAS_ARGS | AML_NSOBJECT | AML_NSNODE),
+/* 65 */ ACPI_OP("-ByteList-", ARGP_BYTELIST_OP, ARGI_BYTELIST_OP,
+ ACPI_TYPE_ANY, AML_CLASS_ARGUMENT,
+ AML_TYPE_LITERAL, 0),
+/* 66 */ ACPI_OP("-ReservedField-", ARGP_RESERVEDFIELD_OP,
+ ARGI_RESERVEDFIELD_OP, ACPI_TYPE_ANY,
+ AML_CLASS_INTERNAL, AML_TYPE_BOGUS, 0),
+/* 67 */ ACPI_OP("-NamedField-", ARGP_NAMEDFIELD_OP, ARGI_NAMEDFIELD_OP,
+ ACPI_TYPE_ANY, AML_CLASS_INTERNAL,
+ AML_TYPE_BOGUS,
+ AML_NSOBJECT | AML_NSOPCODE | AML_NSNODE | AML_NAMED),
+/* 68 */ ACPI_OP("-AccessField-", ARGP_ACCESSFIELD_OP,
+ ARGI_ACCESSFIELD_OP, ACPI_TYPE_ANY,
+ AML_CLASS_INTERNAL, AML_TYPE_BOGUS, 0),
+/* 69 */ ACPI_OP("-StaticString", ARGP_STATICSTRING_OP,
+ ARGI_STATICSTRING_OP, ACPI_TYPE_ANY,
+ AML_CLASS_INTERNAL, AML_TYPE_BOGUS, 0),
+/* 6A */ ACPI_OP("-Return Value-", ARG_NONE, ARG_NONE, ACPI_TYPE_ANY,
+ AML_CLASS_RETURN_VALUE, AML_TYPE_RETURN,
+ AML_HAS_ARGS | AML_HAS_RETVAL),
+/* 6B */ ACPI_OP("-UNKNOWN_OP-", ARG_NONE, ARG_NONE, ACPI_TYPE_INVALID,
+ AML_CLASS_UNKNOWN, AML_TYPE_BOGUS, AML_HAS_ARGS),
+/* 6C */ ACPI_OP("-ASCII_ONLY-", ARG_NONE, ARG_NONE, ACPI_TYPE_ANY,
+ AML_CLASS_ASCII, AML_TYPE_BOGUS, AML_HAS_ARGS),
+/* 6D */ ACPI_OP("-PREFIX_ONLY-", ARG_NONE, ARG_NONE, ACPI_TYPE_ANY,
+ AML_CLASS_PREFIX, AML_TYPE_BOGUS, AML_HAS_ARGS),
+
+/* ACPI 2.0 opcodes */
+
+/* 6E */ ACPI_OP("QwordConst", ARGP_QWORD_OP, ARGI_QWORD_OP,
+ ACPI_TYPE_INTEGER, AML_CLASS_ARGUMENT,
+ AML_TYPE_LITERAL, AML_CONSTANT),
+ /* 6F */ ACPI_OP("Package", /* Var */ ARGP_VAR_PACKAGE_OP,
+ ARGI_VAR_PACKAGE_OP, ACPI_TYPE_PACKAGE,
+ AML_CLASS_CREATE, AML_TYPE_CREATE_OBJECT,
+ AML_HAS_ARGS | AML_DEFER),
+/* 70 */ ACPI_OP("ConcatenateResTemplate", ARGP_CONCAT_RES_OP,
+ ARGI_CONCAT_RES_OP, ACPI_TYPE_ANY,
+ AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_1T_1R,
+ AML_FLAGS_EXEC_2A_1T_1R | AML_CONSTANT),
+/* 71 */ ACPI_OP("Mod", ARGP_MOD_OP, ARGI_MOD_OP, ACPI_TYPE_ANY,
+ AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_1T_1R,
+ AML_FLAGS_EXEC_2A_1T_1R | AML_CONSTANT),
+/* 72 */ ACPI_OP("CreateQWordField", ARGP_CREATE_QWORD_FIELD_OP,
+ ARGI_CREATE_QWORD_FIELD_OP,
+ ACPI_TYPE_BUFFER_FIELD, AML_CLASS_CREATE,
+ AML_TYPE_CREATE_FIELD,
+ AML_HAS_ARGS | AML_NSOBJECT | AML_NSNODE |
+ AML_DEFER | AML_CREATE),
+/* 73 */ ACPI_OP("ToBuffer", ARGP_TO_BUFFER_OP, ARGI_TO_BUFFER_OP,
+ ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
+ AML_TYPE_EXEC_1A_1T_1R,
+ AML_FLAGS_EXEC_1A_1T_1R | AML_CONSTANT),
+/* 74 */ ACPI_OP("ToDecimalString", ARGP_TO_DEC_STR_OP,
+ ARGI_TO_DEC_STR_OP, ACPI_TYPE_ANY,
+ AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_1T_1R,
+ AML_FLAGS_EXEC_1A_1T_1R | AML_CONSTANT),
+/* 75 */ ACPI_OP("ToHexString", ARGP_TO_HEX_STR_OP, ARGI_TO_HEX_STR_OP,
+ ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
+ AML_TYPE_EXEC_1A_1T_1R,
+ AML_FLAGS_EXEC_1A_1T_1R | AML_CONSTANT),
+/* 76 */ ACPI_OP("ToInteger", ARGP_TO_INTEGER_OP, ARGI_TO_INTEGER_OP,
+ ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
+ AML_TYPE_EXEC_1A_1T_1R,
+ AML_FLAGS_EXEC_1A_1T_1R | AML_CONSTANT),
+/* 77 */ ACPI_OP("ToString", ARGP_TO_STRING_OP, ARGI_TO_STRING_OP,
+ ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
+ AML_TYPE_EXEC_2A_1T_1R,
+ AML_FLAGS_EXEC_2A_1T_1R | AML_CONSTANT),
+/* 78 */ ACPI_OP("CopyObject", ARGP_COPY_OP, ARGI_COPY_OP,
+ ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
+ AML_TYPE_EXEC_1A_1T_1R, AML_FLAGS_EXEC_1A_1T_1R),
+/* 79 */ ACPI_OP("Mid", ARGP_MID_OP, ARGI_MID_OP, ACPI_TYPE_ANY,
+ AML_CLASS_EXECUTE, AML_TYPE_EXEC_3A_1T_1R,
+ AML_FLAGS_EXEC_3A_1T_1R | AML_CONSTANT),
+/* 7A */ ACPI_OP("Continue", ARGP_CONTINUE_OP, ARGI_CONTINUE_OP,
+ ACPI_TYPE_ANY, AML_CLASS_CONTROL, AML_TYPE_CONTROL, 0),
+/* 7B */ ACPI_OP("LoadTable", ARGP_LOAD_TABLE_OP, ARGI_LOAD_TABLE_OP,
+ ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
+ AML_TYPE_EXEC_6A_0T_1R, AML_FLAGS_EXEC_6A_0T_1R),
+/* 7C */ ACPI_OP("DataTableRegion", ARGP_DATA_REGION_OP,
+ ARGI_DATA_REGION_OP, ACPI_TYPE_REGION,
+ AML_CLASS_NAMED_OBJECT, AML_TYPE_NAMED_COMPLEX,
+ AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE |
+ AML_NSNODE | AML_NAMED | AML_DEFER),
+/* 7D */ ACPI_OP("[EvalSubTree]", ARGP_SCOPE_OP, ARGI_SCOPE_OP,
+ ACPI_TYPE_ANY, AML_CLASS_NAMED_OBJECT,
+ AML_TYPE_NAMED_NO_OBJ,
+ AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | AML_NSNODE),
+
+/* ACPI 3.0 opcodes */
+
+/* 7E */ ACPI_OP("Timer", ARGP_TIMER_OP, ARGI_TIMER_OP, ACPI_TYPE_ANY,
+ AML_CLASS_EXECUTE, AML_TYPE_EXEC_0A_0T_1R,
+ AML_FLAGS_EXEC_0A_0T_1R)
+
+/*! [End] no source code translation !*/
+};
+
+/*
+ * This table is directly indexed by the opcodes, and returns an
+ * index into the table above
+ */
+static const u8 acpi_gbl_short_op_index[256] = {
+/* 0 1 2 3 4 5 6 7 */
+/* 8 9 A B C D E F */
+/* 0x00 */ 0x00, 0x01, _UNK, _UNK, _UNK, _UNK, 0x02, _UNK,
+/* 0x08 */ 0x03, _UNK, 0x04, 0x05, 0x06, 0x07, 0x6E, _UNK,
+/* 0x10 */ 0x08, 0x09, 0x0a, 0x6F, 0x0b, _UNK, _UNK, _UNK,
+/* 0x18 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
+/* 0x20 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
+/* 0x28 */ _UNK, _UNK, _UNK, _UNK, _UNK, 0x63, _PFX, _PFX,
+/* 0x30 */ 0x67, 0x66, 0x68, 0x65, 0x69, 0x64, 0x6A, 0x7D,
+/* 0x38 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
+/* 0x40 */ _UNK, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC,
+/* 0x48 */ _ASC, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC,
+/* 0x50 */ _ASC, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC,
+/* 0x58 */ _ASC, _ASC, _ASC, _UNK, _PFX, _UNK, _PFX, _ASC,
+/* 0x60 */ 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13,
+/* 0x68 */ 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, _UNK,
+/* 0x70 */ 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22,
+/* 0x78 */ 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a,
+/* 0x80 */ 0x2b, 0x2c, 0x2d, 0x2e, 0x70, 0x71, 0x2f, 0x30,
+/* 0x88 */ 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x72,
+/* 0x90 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x73, 0x74,
+/* 0x98 */ 0x75, 0x76, _UNK, _UNK, 0x77, 0x78, 0x79, 0x7A,
+/* 0xA0 */ 0x3e, 0x3f, 0x40, 0x41, 0x42, 0x43, 0x60, 0x61,
+/* 0xA8 */ 0x62, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
+/* 0xB0 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
+/* 0xB8 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
+/* 0xC0 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
+/* 0xC8 */ _UNK, _UNK, _UNK, _UNK, 0x44, _UNK, _UNK, _UNK,
+/* 0xD0 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
+/* 0xD8 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
+/* 0xE0 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
+/* 0xE8 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
+/* 0xF0 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
+/* 0xF8 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, 0x45,
+};
+
+/*
+ * This table is indexed by the second opcode of the extended opcode
+ * pair. It returns an index into the opcode table (acpi_gbl_aml_op_info)
+ */
+static const u8 acpi_gbl_long_op_index[NUM_EXTENDED_OPCODE] = {
+/* 0 1 2 3 4 5 6 7 */
+/* 8 9 A B C D E F */
+/* 0x00 */ _UNK, 0x46, 0x47, _UNK, _UNK, _UNK, _UNK, _UNK,
+/* 0x08 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
+/* 0x10 */ _UNK, _UNK, 0x48, 0x49, _UNK, _UNK, _UNK, _UNK,
+/* 0x18 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, 0x7B,
+/* 0x20 */ 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50, 0x51,
+/* 0x28 */ 0x52, 0x53, 0x54, _UNK, _UNK, _UNK, _UNK, _UNK,
+/* 0x30 */ 0x55, 0x56, 0x57, 0x7e, _UNK, _UNK, _UNK, _UNK,
+/* 0x38 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
+/* 0x40 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
+/* 0x48 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
+/* 0x50 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
+/* 0x58 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
+/* 0x60 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
+/* 0x68 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
+/* 0x70 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
+/* 0x78 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
+/* 0x80 */ 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f,
+/* 0x88 */ 0x7C,
+};
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ps_get_opcode_info
+ *
+ * PARAMETERS: Opcode - The AML opcode
+ *
+ * RETURN: A pointer to the info about the opcode.
+ *
+ * DESCRIPTION: Find AML opcode description based on the opcode.
+ * NOTE: This procedure must ALWAYS return a valid pointer!
+ *
+ ******************************************************************************/
+
+const struct acpi_opcode_info *acpi_ps_get_opcode_info(u16 opcode)
+{
+ ACPI_FUNCTION_NAME(ps_get_opcode_info);
+
+ /*
+ * Detect normal 8-bit opcode or extended 16-bit opcode
+ */
+ if (!(opcode & 0xFF00)) {
+
+ /* Simple (8-bit) opcode: 0-255, can't index beyond table */
+
+ return (&acpi_gbl_aml_op_info
+ [acpi_gbl_short_op_index[(u8) opcode]]);
+ }
+
+ if (((opcode & 0xFF00) == AML_EXTENDED_OPCODE) &&
+ (((u8) opcode) <= MAX_EXTENDED_OPCODE)) {
+
+ /* Valid extended (16-bit) opcode */
+
+ return (&acpi_gbl_aml_op_info
+ [acpi_gbl_long_op_index[(u8) opcode]]);
+ }
+
+ /* Unknown AML opcode */
+
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+ "Unknown AML opcode [%4.4X]\n", opcode));
+
+ return (&acpi_gbl_aml_op_info[_UNK]);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ps_get_opcode_name
+ *
+ * PARAMETERS: Opcode - The AML opcode
+ *
+ * RETURN: A pointer to the name of the opcode (ASCII String)
+ * Note: Never returns NULL.
+ *
+ * DESCRIPTION: Translate an opcode into a human-readable string
+ *
+ ******************************************************************************/
+
+char *acpi_ps_get_opcode_name(u16 opcode)
+{
+#if defined(ACPI_DISASSEMBLER) || defined (ACPI_DEBUG_OUTPUT)
+
+ const struct acpi_opcode_info *op;
+
+ op = acpi_ps_get_opcode_info(opcode);
+
+ /* Always guaranteed to return a valid pointer */
+
+ return (op->name);
+
+#else
+ return ("OpcodeName unavailable");
+
+#endif
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ps_get_argument_count
+ *
+ * PARAMETERS: op_type - Type associated with the AML opcode
+ *
+ * RETURN: Argument count
+ *
+ * DESCRIPTION: Obtain the number of expected arguments for an AML opcode
+ *
+ ******************************************************************************/
+
+u8 acpi_ps_get_argument_count(u32 op_type)
+{
+
+ if (op_type <= AML_TYPE_EXEC_6A_0T_1R) {
+ return (acpi_gbl_argument_count[op_type]);
+ }
+
+ return (0);
+}
diff --git a/drivers/acpi/parser/psparse.c b/drivers/acpi/parser/psparse.c
new file mode 100644
index 0000000..68e932f
--- /dev/null
+++ b/drivers/acpi/parser/psparse.c
@@ -0,0 +1,688 @@
+/******************************************************************************
+ *
+ * Module Name: psparse - Parser top level AML parse routines
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+/*
+ * Parse the AML and build an operation tree as most interpreters,
+ * like Perl, do. Parsing is done by hand rather than with a YACC
+ * generated parser to tightly constrain stack and dynamic memory
+ * usage. At the same time, parsing is kept flexible and the code
+ * fairly compact by parsing based on a list of AML opcode
+ * templates in aml_op_info[]
+ */
+
+#include <acpi/acpi.h>
+#include <acpi/acparser.h>
+#include <acpi/acdispat.h>
+#include <acpi/amlcode.h>
+#include <acpi/acnamesp.h>
+#include <acpi/acinterp.h>
+
+#define _COMPONENT ACPI_PARSER
+ACPI_MODULE_NAME("psparse")
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ps_get_opcode_size
+ *
+ * PARAMETERS: Opcode - An AML opcode
+ *
+ * RETURN: Size of the opcode, in bytes (1 or 2)
+ *
+ * DESCRIPTION: Get the size of the current opcode.
+ *
+ ******************************************************************************/
+u32 acpi_ps_get_opcode_size(u32 opcode)
+{
+
+ /* Extended (2-byte) opcode if > 255 */
+
+ if (opcode > 0x00FF) {
+ return (2);
+ }
+
+ /* Otherwise, just a single byte opcode */
+
+ return (1);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ps_peek_opcode
+ *
+ * PARAMETERS: parser_state - A parser state object
+ *
+ * RETURN: Next AML opcode
+ *
+ * DESCRIPTION: Get next AML opcode (without incrementing AML pointer)
+ *
+ ******************************************************************************/
+
+u16 acpi_ps_peek_opcode(struct acpi_parse_state * parser_state)
+{
+ u8 *aml;
+ u16 opcode;
+
+ aml = parser_state->aml;
+ opcode = (u16) ACPI_GET8(aml);
+
+ if (opcode == AML_EXTENDED_OP_PREFIX) {
+
+ /* Extended opcode, get the second opcode byte */
+
+ aml++;
+ opcode = (u16) ((opcode << 8) | ACPI_GET8(aml));
+ }
+
+ return (opcode);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ps_complete_this_op
+ *
+ * PARAMETERS: walk_state - Current State
+ * Op - Op to complete
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Perform any cleanup at the completion of an Op.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ps_complete_this_op(struct acpi_walk_state * walk_state,
+ union acpi_parse_object * op)
+{
+ union acpi_parse_object *prev;
+ union acpi_parse_object *next;
+ const struct acpi_opcode_info *parent_info;
+ union acpi_parse_object *replacement_op = NULL;
+ acpi_status status = AE_OK;
+
+ ACPI_FUNCTION_TRACE_PTR(ps_complete_this_op, op);
+
+ /* Check for null Op, can happen if AML code is corrupt */
+
+ if (!op) {
+ return_ACPI_STATUS(AE_OK); /* OK for now */
+ }
+
+ /* Delete this op and the subtree below it if asked to */
+
+ if (((walk_state->parse_flags & ACPI_PARSE_TREE_MASK) !=
+ ACPI_PARSE_DELETE_TREE)
+ || (walk_state->op_info->class == AML_CLASS_ARGUMENT)) {
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ /* Make sure that we only delete this subtree */
+
+ if (op->common.parent) {
+ prev = op->common.parent->common.value.arg;
+ if (!prev) {
+
+ /* Nothing more to do */
+
+ goto cleanup;
+ }
+
+ /*
+ * Check if we need to replace the operator and its subtree
+ * with a return value op (placeholder op)
+ */
+ parent_info =
+ acpi_ps_get_opcode_info(op->common.parent->common.
+ aml_opcode);
+
+ switch (parent_info->class) {
+ case AML_CLASS_CONTROL:
+ break;
+
+ case AML_CLASS_CREATE:
+
+ /*
+ * These opcodes contain term_arg operands. The current
+ * op must be replaced by a placeholder return op
+ */
+ replacement_op =
+ acpi_ps_alloc_op(AML_INT_RETURN_VALUE_OP);
+ if (!replacement_op) {
+ status = AE_NO_MEMORY;
+ }
+ break;
+
+ case AML_CLASS_NAMED_OBJECT:
+
+ /*
+ * These opcodes contain term_arg operands. The current
+ * op must be replaced by a placeholder return op
+ */
+ if ((op->common.parent->common.aml_opcode ==
+ AML_REGION_OP)
+ || (op->common.parent->common.aml_opcode ==
+ AML_DATA_REGION_OP)
+ || (op->common.parent->common.aml_opcode ==
+ AML_BUFFER_OP)
+ || (op->common.parent->common.aml_opcode ==
+ AML_PACKAGE_OP)
+ || (op->common.parent->common.aml_opcode ==
+ AML_BANK_FIELD_OP)
+ || (op->common.parent->common.aml_opcode ==
+ AML_VAR_PACKAGE_OP)) {
+ replacement_op =
+ acpi_ps_alloc_op(AML_INT_RETURN_VALUE_OP);
+ if (!replacement_op) {
+ status = AE_NO_MEMORY;
+ }
+ } else
+ if ((op->common.parent->common.aml_opcode ==
+ AML_NAME_OP)
+ && (walk_state->pass_number <=
+ ACPI_IMODE_LOAD_PASS2)) {
+ if ((op->common.aml_opcode == AML_BUFFER_OP)
+ || (op->common.aml_opcode == AML_PACKAGE_OP)
+ || (op->common.aml_opcode ==
+ AML_VAR_PACKAGE_OP)) {
+ replacement_op =
+ acpi_ps_alloc_op(op->common.
+ aml_opcode);
+ if (!replacement_op) {
+ status = AE_NO_MEMORY;
+ } else {
+ replacement_op->named.data =
+ op->named.data;
+ replacement_op->named.length =
+ op->named.length;
+ }
+ }
+ }
+ break;
+
+ default:
+
+ replacement_op =
+ acpi_ps_alloc_op(AML_INT_RETURN_VALUE_OP);
+ if (!replacement_op) {
+ status = AE_NO_MEMORY;
+ }
+ }
+
+ /* We must unlink this op from the parent tree */
+
+ if (prev == op) {
+
+ /* This op is the first in the list */
+
+ if (replacement_op) {
+ replacement_op->common.parent =
+ op->common.parent;
+ replacement_op->common.value.arg = NULL;
+ replacement_op->common.node = op->common.node;
+ op->common.parent->common.value.arg =
+ replacement_op;
+ replacement_op->common.next = op->common.next;
+ } else {
+ op->common.parent->common.value.arg =
+ op->common.next;
+ }
+ }
+
+ /* Search the parent list */
+
+ else
+ while (prev) {
+
+ /* Traverse all siblings in the parent's argument list */
+
+ next = prev->common.next;
+ if (next == op) {
+ if (replacement_op) {
+ replacement_op->common.parent =
+ op->common.parent;
+ replacement_op->common.value.
+ arg = NULL;
+ replacement_op->common.node =
+ op->common.node;
+ prev->common.next =
+ replacement_op;
+ replacement_op->common.next =
+ op->common.next;
+ next = NULL;
+ } else {
+ prev->common.next =
+ op->common.next;
+ next = NULL;
+ }
+ }
+ prev = next;
+ }
+ }
+
+ cleanup:
+
+ /* Now we can actually delete the subtree rooted at Op */
+
+ acpi_ps_delete_parse_tree(op);
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ps_next_parse_state
+ *
+ * PARAMETERS: walk_state - Current state
+ * Op - Current parse op
+ * callback_status - Status from previous operation
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Update the parser state based upon the return exception from
+ * the parser callback.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ps_next_parse_state(struct acpi_walk_state *walk_state,
+ union acpi_parse_object *op,
+ acpi_status callback_status)
+{
+ struct acpi_parse_state *parser_state = &walk_state->parser_state;
+ acpi_status status = AE_CTRL_PENDING;
+
+ ACPI_FUNCTION_TRACE_PTR(ps_next_parse_state, op);
+
+ switch (callback_status) {
+ case AE_CTRL_TERMINATE:
+ /*
+ * A control method was terminated via a RETURN statement.
+ * The walk of this method is complete.
+ */
+ parser_state->aml = parser_state->aml_end;
+ status = AE_CTRL_TERMINATE;
+ break;
+
+ case AE_CTRL_BREAK:
+
+ parser_state->aml = walk_state->aml_last_while;
+ walk_state->control_state->common.value = FALSE;
+ status = AE_CTRL_BREAK;
+ break;
+
+ case AE_CTRL_CONTINUE:
+
+ parser_state->aml = walk_state->aml_last_while;
+ status = AE_CTRL_CONTINUE;
+ break;
+
+ case AE_CTRL_PENDING:
+
+ parser_state->aml = walk_state->aml_last_while;
+ break;
+
+#if 0
+ case AE_CTRL_SKIP:
+
+ parser_state->aml = parser_state->scope->parse_scope.pkg_end;
+ status = AE_OK;
+ break;
+#endif
+
+ case AE_CTRL_TRUE:
+ /*
+ * Predicate of an IF was true, and we are at the matching ELSE.
+ * Just close out this package
+ */
+ parser_state->aml = acpi_ps_get_next_package_end(parser_state);
+ status = AE_CTRL_PENDING;
+ break;
+
+ case AE_CTRL_FALSE:
+ /*
+ * Either an IF/WHILE Predicate was false or we encountered a BREAK
+ * opcode. In both cases, we do not execute the rest of the
+ * package; We simply close out the parent (finishing the walk of
+ * this branch of the tree) and continue execution at the parent
+ * level.
+ */
+ parser_state->aml = parser_state->scope->parse_scope.pkg_end;
+
+ /* In the case of a BREAK, just force a predicate (if any) to FALSE */
+
+ walk_state->control_state->common.value = FALSE;
+ status = AE_CTRL_END;
+ break;
+
+ case AE_CTRL_TRANSFER:
+
+ /* A method call (invocation) -- transfer control */
+
+ status = AE_CTRL_TRANSFER;
+ walk_state->prev_op = op;
+ walk_state->method_call_op = op;
+ walk_state->method_call_node =
+ (op->common.value.arg)->common.node;
+
+ /* Will return value (if any) be used by the caller? */
+
+ walk_state->return_used =
+ acpi_ds_is_result_used(op, walk_state);
+ break;
+
+ default:
+
+ status = callback_status;
+ if ((callback_status & AE_CODE_MASK) == AE_CODE_CONTROL) {
+ status = AE_OK;
+ }
+ break;
+ }
+
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ps_parse_aml
+ *
+ * PARAMETERS: walk_state - Current state
+ *
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Parse raw AML and return a tree of ops
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ps_parse_aml(struct acpi_walk_state *walk_state)
+{
+ acpi_status status;
+ struct acpi_thread_state *thread;
+ struct acpi_thread_state *prev_walk_list = acpi_gbl_current_walk_list;
+ struct acpi_walk_state *previous_walk_state;
+
+ ACPI_FUNCTION_TRACE(ps_parse_aml);
+
+ ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
+ "Entered with WalkState=%p Aml=%p size=%X\n",
+ walk_state, walk_state->parser_state.aml,
+ walk_state->parser_state.aml_size));
+
+ /* Create and initialize a new thread state */
+
+ thread = acpi_ut_create_thread_state();
+ if (!thread) {
+ acpi_ds_delete_walk_state(walk_state);
+ return_ACPI_STATUS(AE_NO_MEMORY);
+ }
+
+ walk_state->thread = thread;
+
+ /*
+ * If executing a method, the starting sync_level is this method's
+ * sync_level
+ */
+ if (walk_state->method_desc) {
+ walk_state->thread->current_sync_level =
+ walk_state->method_desc->method.sync_level;
+ }
+
+ acpi_ds_push_walk_state(walk_state, thread);
+
+ /*
+ * This global allows the AML debugger to get a handle to the currently
+ * executing control method.
+ */
+ acpi_gbl_current_walk_list = thread;
+
+ /*
+ * Execute the walk loop as long as there is a valid Walk State. This
+ * handles nested control method invocations without recursion.
+ */
+ ACPI_DEBUG_PRINT((ACPI_DB_PARSE, "State=%p\n", walk_state));
+
+ status = AE_OK;
+ while (walk_state) {
+ if (ACPI_SUCCESS(status)) {
+ /*
+ * The parse_loop executes AML until the method terminates
+ * or calls another method.
+ */
+ status = acpi_ps_parse_loop(walk_state);
+ }
+
+ ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
+ "Completed one call to walk loop, %s State=%p\n",
+ acpi_format_exception(status), walk_state));
+
+ if (status == AE_CTRL_TRANSFER) {
+ /*
+ * A method call was detected.
+ * Transfer control to the called control method
+ */
+ status =
+ acpi_ds_call_control_method(thread, walk_state,
+ NULL);
+ if (ACPI_FAILURE(status)) {
+ status =
+ acpi_ds_method_error(status, walk_state);
+ }
+
+ /*
+ * If the transfer to the new method method call worked, a new walk
+ * state was created -- get it
+ */
+ walk_state = acpi_ds_get_current_walk_state(thread);
+ continue;
+ } else if (status == AE_CTRL_TERMINATE) {
+ status = AE_OK;
+ } else if ((status != AE_OK) && (walk_state->method_desc)) {
+
+ /* Either the method parse or actual execution failed */
+
+ ACPI_ERROR_METHOD("Method parse/execution failed",
+ walk_state->method_node, NULL,
+ status);
+
+ /* Check for possible multi-thread reentrancy problem */
+
+ if ((status == AE_ALREADY_EXISTS) &&
+ (!walk_state->method_desc->method.mutex)) {
+ ACPI_INFO((AE_INFO,
+ "Marking method %4.4s as Serialized because of AE_ALREADY_EXISTS error",
+ walk_state->method_node->name.
+ ascii));
+
+ /*
+ * Method tried to create an object twice. The probable cause is
+ * that the method cannot handle reentrancy.
+ *
+ * The method is marked not_serialized, but it tried to create
+ * a named object, causing the second thread entrance to fail.
+ * Workaround this problem by marking the method permanently
+ * as Serialized.
+ */
+ walk_state->method_desc->method.method_flags |=
+ AML_METHOD_SERIALIZED;
+ walk_state->method_desc->method.sync_level = 0;
+ }
+ }
+
+ /* We are done with this walk, move on to the parent if any */
+
+ walk_state = acpi_ds_pop_walk_state(thread);
+
+ /* Reset the current scope to the beginning of scope stack */
+
+ acpi_ds_scope_stack_clear(walk_state);
+
+ /*
+ * If we just returned from the execution of a control method or if we
+ * encountered an error during the method parse phase, there's lots of
+ * cleanup to do
+ */
+ if (((walk_state->parse_flags & ACPI_PARSE_MODE_MASK) ==
+ ACPI_PARSE_EXECUTE) || (ACPI_FAILURE(status))) {
+ acpi_ds_terminate_control_method(walk_state->
+ method_desc,
+ walk_state);
+ }
+
+ /* Delete this walk state and all linked control states */
+
+ acpi_ps_cleanup_scope(&walk_state->parser_state);
+ previous_walk_state = walk_state;
+
+ ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
+ "ReturnValue=%p, ImplicitValue=%p State=%p\n",
+ walk_state->return_desc,
+ walk_state->implicit_return_obj, walk_state));
+
+ /* Check if we have restarted a preempted walk */
+
+ walk_state = acpi_ds_get_current_walk_state(thread);
+ if (walk_state) {
+ if (ACPI_SUCCESS(status)) {
+ /*
+ * There is another walk state, restart it.
+ * If the method return value is not used by the parent,
+ * The object is deleted
+ */
+ if (!previous_walk_state->return_desc) {
+ /*
+ * In slack mode execution, if there is no return value
+ * we should implicitly return zero (0) as a default value.
+ */
+ if (acpi_gbl_enable_interpreter_slack &&
+ !previous_walk_state->
+ implicit_return_obj) {
+ previous_walk_state->
+ implicit_return_obj =
+ acpi_ut_create_internal_object
+ (ACPI_TYPE_INTEGER);
+ if (!previous_walk_state->
+ implicit_return_obj) {
+ return_ACPI_STATUS
+ (AE_NO_MEMORY);
+ }
+
+ previous_walk_state->
+ implicit_return_obj->
+ integer.value = 0;
+ }
+
+ /* Restart the calling control method */
+
+ status =
+ acpi_ds_restart_control_method
+ (walk_state,
+ previous_walk_state->
+ implicit_return_obj);
+ } else {
+ /*
+ * We have a valid return value, delete any implicit
+ * return value.
+ */
+ acpi_ds_clear_implicit_return
+ (previous_walk_state);
+
+ status =
+ acpi_ds_restart_control_method
+ (walk_state,
+ previous_walk_state->return_desc);
+ }
+ if (ACPI_SUCCESS(status)) {
+ walk_state->walk_type |=
+ ACPI_WALK_METHOD_RESTART;
+ }
+ } else {
+ /* On error, delete any return object or implicit return */
+
+ acpi_ut_remove_reference(previous_walk_state->
+ return_desc);
+ acpi_ds_clear_implicit_return
+ (previous_walk_state);
+ }
+ }
+
+ /*
+ * Just completed a 1st-level method, save the final internal return
+ * value (if any)
+ */
+ else if (previous_walk_state->caller_return_desc) {
+ if (previous_walk_state->implicit_return_obj) {
+ *(previous_walk_state->caller_return_desc) =
+ previous_walk_state->implicit_return_obj;
+ } else {
+ /* NULL if no return value */
+
+ *(previous_walk_state->caller_return_desc) =
+ previous_walk_state->return_desc;
+ }
+ } else {
+ if (previous_walk_state->return_desc) {
+
+ /* Caller doesn't want it, must delete it */
+
+ acpi_ut_remove_reference(previous_walk_state->
+ return_desc);
+ }
+ if (previous_walk_state->implicit_return_obj) {
+
+ /* Caller doesn't want it, must delete it */
+
+ acpi_ut_remove_reference(previous_walk_state->
+ implicit_return_obj);
+ }
+ }
+
+ acpi_ds_delete_walk_state(previous_walk_state);
+ }
+
+ /* Normal exit */
+
+ acpi_ex_release_all_mutexes(thread);
+ acpi_ut_delete_generic_state(ACPI_CAST_PTR
+ (union acpi_generic_state, thread));
+ acpi_gbl_current_walk_list = prev_walk_list;
+ return_ACPI_STATUS(status);
+}
diff --git a/drivers/acpi/parser/psscope.c b/drivers/acpi/parser/psscope.c
new file mode 100644
index 0000000..ee50e67
--- /dev/null
+++ b/drivers/acpi/parser/psscope.c
@@ -0,0 +1,264 @@
+/******************************************************************************
+ *
+ * Module Name: psscope - Parser scope stack management routines
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include <acpi/acparser.h>
+
+#define _COMPONENT ACPI_PARSER
+ACPI_MODULE_NAME("psscope")
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ps_get_parent_scope
+ *
+ * PARAMETERS: parser_state - Current parser state object
+ *
+ * RETURN: Pointer to an Op object
+ *
+ * DESCRIPTION: Get parent of current op being parsed
+ *
+ ******************************************************************************/
+union acpi_parse_object *acpi_ps_get_parent_scope(struct acpi_parse_state
+ *parser_state)
+{
+
+ return (parser_state->scope->parse_scope.op);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ps_has_completed_scope
+ *
+ * PARAMETERS: parser_state - Current parser state object
+ *
+ * RETURN: Boolean, TRUE = scope completed.
+ *
+ * DESCRIPTION: Is parsing of current argument complete? Determined by
+ * 1) AML pointer is at or beyond the end of the scope
+ * 2) The scope argument count has reached zero.
+ *
+ ******************************************************************************/
+
+u8 acpi_ps_has_completed_scope(struct acpi_parse_state * parser_state)
+{
+
+ return ((u8)
+ ((parser_state->aml >= parser_state->scope->parse_scope.arg_end
+ || !parser_state->scope->parse_scope.arg_count)));
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ps_init_scope
+ *
+ * PARAMETERS: parser_state - Current parser state object
+ * Root - the Root Node of this new scope
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Allocate and init a new scope object
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ps_init_scope(struct acpi_parse_state * parser_state,
+ union acpi_parse_object * root_op)
+{
+ union acpi_generic_state *scope;
+
+ ACPI_FUNCTION_TRACE_PTR(ps_init_scope, root_op);
+
+ scope = acpi_ut_create_generic_state();
+ if (!scope) {
+ return_ACPI_STATUS(AE_NO_MEMORY);
+ }
+
+ scope->common.descriptor_type = ACPI_DESC_TYPE_STATE_RPSCOPE;
+ scope->parse_scope.op = root_op;
+ scope->parse_scope.arg_count = ACPI_VAR_ARGS;
+ scope->parse_scope.arg_end = parser_state->aml_end;
+ scope->parse_scope.pkg_end = parser_state->aml_end;
+
+ parser_state->scope = scope;
+ parser_state->start_op = root_op;
+
+ return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ps_push_scope
+ *
+ * PARAMETERS: parser_state - Current parser state object
+ * Op - Current op to be pushed
+ * remaining_args - List of args remaining
+ * arg_count - Fixed or variable number of args
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Push current op to begin parsing its argument
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ps_push_scope(struct acpi_parse_state *parser_state,
+ union acpi_parse_object *op,
+ u32 remaining_args, u32 arg_count)
+{
+ union acpi_generic_state *scope;
+
+ ACPI_FUNCTION_TRACE_PTR(ps_push_scope, op);
+
+ scope = acpi_ut_create_generic_state();
+ if (!scope) {
+ return_ACPI_STATUS(AE_NO_MEMORY);
+ }
+
+ scope->common.descriptor_type = ACPI_DESC_TYPE_STATE_PSCOPE;
+ scope->parse_scope.op = op;
+ scope->parse_scope.arg_list = remaining_args;
+ scope->parse_scope.arg_count = arg_count;
+ scope->parse_scope.pkg_end = parser_state->pkg_end;
+
+ /* Push onto scope stack */
+
+ acpi_ut_push_generic_state(&parser_state->scope, scope);
+
+ if (arg_count == ACPI_VAR_ARGS) {
+
+ /* Multiple arguments */
+
+ scope->parse_scope.arg_end = parser_state->pkg_end;
+ } else {
+ /* Single argument */
+
+ scope->parse_scope.arg_end = ACPI_TO_POINTER(ACPI_MAX_PTR);
+ }
+
+ return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ps_pop_scope
+ *
+ * PARAMETERS: parser_state - Current parser state object
+ * Op - Where the popped op is returned
+ * arg_list - Where the popped "next argument" is
+ * returned
+ * arg_count - Count of objects in arg_list
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Return to parsing a previous op
+ *
+ ******************************************************************************/
+
+void
+acpi_ps_pop_scope(struct acpi_parse_state *parser_state,
+ union acpi_parse_object **op, u32 * arg_list, u32 * arg_count)
+{
+ union acpi_generic_state *scope = parser_state->scope;
+
+ ACPI_FUNCTION_TRACE(ps_pop_scope);
+
+ /* Only pop the scope if there is in fact a next scope */
+
+ if (scope->common.next) {
+ scope = acpi_ut_pop_generic_state(&parser_state->scope);
+
+ /* Return to parsing previous op */
+
+ *op = scope->parse_scope.op;
+ *arg_list = scope->parse_scope.arg_list;
+ *arg_count = scope->parse_scope.arg_count;
+ parser_state->pkg_end = scope->parse_scope.pkg_end;
+
+ /* All done with this scope state structure */
+
+ acpi_ut_delete_generic_state(scope);
+ } else {
+ /* Empty parse stack, prepare to fetch next opcode */
+
+ *op = NULL;
+ *arg_list = 0;
+ *arg_count = 0;
+ }
+
+ ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
+ "Popped Op %p Args %X\n", *op, *arg_count));
+ return_VOID;
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ps_cleanup_scope
+ *
+ * PARAMETERS: parser_state - Current parser state object
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Destroy available list, remaining stack levels, and return
+ * root scope
+ *
+ ******************************************************************************/
+
+void acpi_ps_cleanup_scope(struct acpi_parse_state *parser_state)
+{
+ union acpi_generic_state *scope;
+
+ ACPI_FUNCTION_TRACE_PTR(ps_cleanup_scope, parser_state);
+
+ if (!parser_state) {
+ return_VOID;
+ }
+
+ /* Delete anything on the scope stack */
+
+ while (parser_state->scope) {
+ scope = acpi_ut_pop_generic_state(&parser_state->scope);
+ acpi_ut_delete_generic_state(scope);
+ }
+
+ return_VOID;
+}
diff --git a/drivers/acpi/parser/pstree.c b/drivers/acpi/parser/pstree.c
new file mode 100644
index 0000000..1dd355d
--- /dev/null
+++ b/drivers/acpi/parser/pstree.c
@@ -0,0 +1,311 @@
+/******************************************************************************
+ *
+ * Module Name: pstree - Parser op tree manipulation/traversal/search
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include <acpi/acparser.h>
+#include <acpi/amlcode.h>
+
+#define _COMPONENT ACPI_PARSER
+ACPI_MODULE_NAME("pstree")
+
+/* Local prototypes */
+#ifdef ACPI_OBSOLETE_FUNCTIONS
+union acpi_parse_object *acpi_ps_get_child(union acpi_parse_object *op);
+#endif
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ps_get_arg
+ *
+ * PARAMETERS: Op - Get an argument for this op
+ * Argn - Nth argument to get
+ *
+ * RETURN: The argument (as an Op object). NULL if argument does not exist
+ *
+ * DESCRIPTION: Get the specified op's argument.
+ *
+ ******************************************************************************/
+
+union acpi_parse_object *acpi_ps_get_arg(union acpi_parse_object *op, u32 argn)
+{
+ union acpi_parse_object *arg = NULL;
+ const struct acpi_opcode_info *op_info;
+
+ ACPI_FUNCTION_ENTRY();
+
+ /* Get the info structure for this opcode */
+
+ op_info = acpi_ps_get_opcode_info(op->common.aml_opcode);
+ if (op_info->class == AML_CLASS_UNKNOWN) {
+
+ /* Invalid opcode or ASCII character */
+
+ return (NULL);
+ }
+
+ /* Check if this opcode requires argument sub-objects */
+
+ if (!(op_info->flags & AML_HAS_ARGS)) {
+
+ /* Has no linked argument objects */
+
+ return (NULL);
+ }
+
+ /* Get the requested argument object */
+
+ arg = op->common.value.arg;
+ while (arg && argn) {
+ argn--;
+ arg = arg->common.next;
+ }
+
+ return (arg);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ps_append_arg
+ *
+ * PARAMETERS: Op - Append an argument to this Op.
+ * Arg - Argument Op to append
+ *
+ * RETURN: None.
+ *
+ * DESCRIPTION: Append an argument to an op's argument list (a NULL arg is OK)
+ *
+ ******************************************************************************/
+
+void
+acpi_ps_append_arg(union acpi_parse_object *op, union acpi_parse_object *arg)
+{
+ union acpi_parse_object *prev_arg;
+ const struct acpi_opcode_info *op_info;
+
+ ACPI_FUNCTION_ENTRY();
+
+ if (!op) {
+ return;
+ }
+
+ /* Get the info structure for this opcode */
+
+ op_info = acpi_ps_get_opcode_info(op->common.aml_opcode);
+ if (op_info->class == AML_CLASS_UNKNOWN) {
+
+ /* Invalid opcode */
+
+ ACPI_ERROR((AE_INFO, "Invalid AML Opcode: 0x%2.2X",
+ op->common.aml_opcode));
+ return;
+ }
+
+ /* Check if this opcode requires argument sub-objects */
+
+ if (!(op_info->flags & AML_HAS_ARGS)) {
+
+ /* Has no linked argument objects */
+
+ return;
+ }
+
+ /* Append the argument to the linked argument list */
+
+ if (op->common.value.arg) {
+
+ /* Append to existing argument list */
+
+ prev_arg = op->common.value.arg;
+ while (prev_arg->common.next) {
+ prev_arg = prev_arg->common.next;
+ }
+ prev_arg->common.next = arg;
+ } else {
+ /* No argument list, this will be the first argument */
+
+ op->common.value.arg = arg;
+ }
+
+ /* Set the parent in this arg and any args linked after it */
+
+ while (arg) {
+ arg->common.parent = op;
+ arg = arg->common.next;
+
+ op->common.arg_list_length++;
+ }
+}
+
+#ifdef ACPI_FUTURE_USAGE
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ps_get_depth_next
+ *
+ * PARAMETERS: Origin - Root of subtree to search
+ * Op - Last (previous) Op that was found
+ *
+ * RETURN: Next Op found in the search.
+ *
+ * DESCRIPTION: Get next op in tree (walking the tree in depth-first order)
+ * Return NULL when reaching "origin" or when walking up from root
+ *
+ ******************************************************************************/
+
+union acpi_parse_object *acpi_ps_get_depth_next(union acpi_parse_object *origin,
+ union acpi_parse_object *op)
+{
+ union acpi_parse_object *next = NULL;
+ union acpi_parse_object *parent;
+ union acpi_parse_object *arg;
+
+ ACPI_FUNCTION_ENTRY();
+
+ if (!op) {
+ return (NULL);
+ }
+
+ /* Look for an argument or child */
+
+ next = acpi_ps_get_arg(op, 0);
+ if (next) {
+ return (next);
+ }
+
+ /* Look for a sibling */
+
+ next = op->common.next;
+ if (next) {
+ return (next);
+ }
+
+ /* Look for a sibling of parent */
+
+ parent = op->common.parent;
+
+ while (parent) {
+ arg = acpi_ps_get_arg(parent, 0);
+ while (arg && (arg != origin) && (arg != op)) {
+ arg = arg->common.next;
+ }
+
+ if (arg == origin) {
+
+ /* Reached parent of origin, end search */
+
+ return (NULL);
+ }
+
+ if (parent->common.next) {
+
+ /* Found sibling of parent */
+
+ return (parent->common.next);
+ }
+
+ op = parent;
+ parent = parent->common.parent;
+ }
+
+ return (next);
+}
+
+#ifdef ACPI_OBSOLETE_FUNCTIONS
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ps_get_child
+ *
+ * PARAMETERS: Op - Get the child of this Op
+ *
+ * RETURN: Child Op, Null if none is found.
+ *
+ * DESCRIPTION: Get op's children or NULL if none
+ *
+ ******************************************************************************/
+
+union acpi_parse_object *acpi_ps_get_child(union acpi_parse_object *op)
+{
+ union acpi_parse_object *child = NULL;
+
+ ACPI_FUNCTION_ENTRY();
+
+ switch (op->common.aml_opcode) {
+ case AML_SCOPE_OP:
+ case AML_ELSE_OP:
+ case AML_DEVICE_OP:
+ case AML_THERMAL_ZONE_OP:
+ case AML_INT_METHODCALL_OP:
+
+ child = acpi_ps_get_arg(op, 0);
+ break;
+
+ case AML_BUFFER_OP:
+ case AML_PACKAGE_OP:
+ case AML_METHOD_OP:
+ case AML_IF_OP:
+ case AML_WHILE_OP:
+ case AML_FIELD_OP:
+
+ child = acpi_ps_get_arg(op, 1);
+ break;
+
+ case AML_POWER_RES_OP:
+ case AML_INDEX_FIELD_OP:
+
+ child = acpi_ps_get_arg(op, 2);
+ break;
+
+ case AML_PROCESSOR_OP:
+ case AML_BANK_FIELD_OP:
+
+ child = acpi_ps_get_arg(op, 3);
+ break;
+
+ default:
+ /* All others have no children */
+ break;
+ }
+
+ return (child);
+}
+#endif
+#endif /* ACPI_FUTURE_USAGE */
diff --git a/drivers/acpi/parser/psutils.c b/drivers/acpi/parser/psutils.c
new file mode 100644
index 0000000..7cf1f65
--- /dev/null
+++ b/drivers/acpi/parser/psutils.c
@@ -0,0 +1,243 @@
+/******************************************************************************
+ *
+ * Module Name: psutils - Parser miscellaneous utilities (Parser only)
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include <acpi/acparser.h>
+#include <acpi/amlcode.h>
+
+#define _COMPONENT ACPI_PARSER
+ACPI_MODULE_NAME("psutils")
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ps_create_scope_op
+ *
+ * PARAMETERS: None
+ *
+ * RETURN: A new Scope object, null on failure
+ *
+ * DESCRIPTION: Create a Scope and associated namepath op with the root name
+ *
+ ******************************************************************************/
+union acpi_parse_object *acpi_ps_create_scope_op(void)
+{
+ union acpi_parse_object *scope_op;
+
+ scope_op = acpi_ps_alloc_op(AML_SCOPE_OP);
+ if (!scope_op) {
+ return (NULL);
+ }
+
+ scope_op->named.name = ACPI_ROOT_NAME;
+ return (scope_op);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ps_init_op
+ *
+ * PARAMETERS: Op - A newly allocated Op object
+ * Opcode - Opcode to store in the Op
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Initialize a parse (Op) object
+ *
+ ******************************************************************************/
+
+void acpi_ps_init_op(union acpi_parse_object *op, u16 opcode)
+{
+ ACPI_FUNCTION_ENTRY();
+
+ op->common.descriptor_type = ACPI_DESC_TYPE_PARSER;
+ op->common.aml_opcode = opcode;
+
+ ACPI_DISASM_ONLY_MEMBERS(ACPI_STRNCPY(op->common.aml_op_name,
+ (acpi_ps_get_opcode_info
+ (opcode))->name,
+ sizeof(op->common.aml_op_name)));
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ps_alloc_op
+ *
+ * PARAMETERS: Opcode - Opcode that will be stored in the new Op
+ *
+ * RETURN: Pointer to the new Op, null on failure
+ *
+ * DESCRIPTION: Allocate an acpi_op, choose op type (and thus size) based on
+ * opcode. A cache of opcodes is available for the pure
+ * GENERIC_OP, since this is by far the most commonly used.
+ *
+ ******************************************************************************/
+
+union acpi_parse_object *acpi_ps_alloc_op(u16 opcode)
+{
+ union acpi_parse_object *op;
+ const struct acpi_opcode_info *op_info;
+ u8 flags = ACPI_PARSEOP_GENERIC;
+
+ ACPI_FUNCTION_ENTRY();
+
+ op_info = acpi_ps_get_opcode_info(opcode);
+
+ /* Determine type of parse_op required */
+
+ if (op_info->flags & AML_DEFER) {
+ flags = ACPI_PARSEOP_DEFERRED;
+ } else if (op_info->flags & AML_NAMED) {
+ flags = ACPI_PARSEOP_NAMED;
+ } else if (opcode == AML_INT_BYTELIST_OP) {
+ flags = ACPI_PARSEOP_BYTELIST;
+ }
+
+ /* Allocate the minimum required size object */
+
+ if (flags == ACPI_PARSEOP_GENERIC) {
+
+ /* The generic op (default) is by far the most common (16 to 1) */
+
+ op = acpi_os_acquire_object(acpi_gbl_ps_node_cache);
+ } else {
+ /* Extended parseop */
+
+ op = acpi_os_acquire_object(acpi_gbl_ps_node_ext_cache);
+ }
+
+ /* Initialize the Op */
+
+ if (op) {
+ acpi_ps_init_op(op, opcode);
+ op->common.flags = flags;
+ }
+
+ return (op);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ps_free_op
+ *
+ * PARAMETERS: Op - Op to be freed
+ *
+ * RETURN: None.
+ *
+ * DESCRIPTION: Free an Op object. Either put it on the GENERIC_OP cache list
+ * or actually free it.
+ *
+ ******************************************************************************/
+
+void acpi_ps_free_op(union acpi_parse_object *op)
+{
+ ACPI_FUNCTION_NAME(ps_free_op);
+
+ if (op->common.aml_opcode == AML_INT_RETURN_VALUE_OP) {
+ ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS, "Free retval op: %p\n",
+ op));
+ }
+
+ if (op->common.flags & ACPI_PARSEOP_GENERIC) {
+ (void)acpi_os_release_object(acpi_gbl_ps_node_cache, op);
+ } else {
+ (void)acpi_os_release_object(acpi_gbl_ps_node_ext_cache, op);
+ }
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: Utility functions
+ *
+ * DESCRIPTION: Low level character and object functions
+ *
+ ******************************************************************************/
+
+/*
+ * Is "c" a namestring lead character?
+ */
+u8 acpi_ps_is_leading_char(u32 c)
+{
+ return ((u8) (c == '_' || (c >= 'A' && c <= 'Z')));
+}
+
+/*
+ * Is "c" a namestring prefix character?
+ */
+u8 acpi_ps_is_prefix_char(u32 c)
+{
+ return ((u8) (c == '\\' || c == '^'));
+}
+
+/*
+ * Get op's name (4-byte name segment) or 0 if unnamed
+ */
+#ifdef ACPI_FUTURE_USAGE
+u32 acpi_ps_get_name(union acpi_parse_object * op)
+{
+
+ /* The "generic" object has no name associated with it */
+
+ if (op->common.flags & ACPI_PARSEOP_GENERIC) {
+ return (0);
+ }
+
+ /* Only the "Extended" parse objects have a name */
+
+ return (op->named.name);
+}
+#endif /* ACPI_FUTURE_USAGE */
+
+/*
+ * Set op's name
+ */
+void acpi_ps_set_name(union acpi_parse_object *op, u32 name)
+{
+
+ /* The "generic" object has no name associated with it */
+
+ if (op->common.flags & ACPI_PARSEOP_GENERIC) {
+ return;
+ }
+
+ op->named.name = name;
+}
diff --git a/drivers/acpi/parser/pswalk.c b/drivers/acpi/parser/pswalk.c
new file mode 100644
index 0000000..8b86ad5
--- /dev/null
+++ b/drivers/acpi/parser/pswalk.c
@@ -0,0 +1,109 @@
+/******************************************************************************
+ *
+ * Module Name: pswalk - Parser routines to walk parsed op tree(s)
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include <acpi/acparser.h>
+
+#define _COMPONENT ACPI_PARSER
+ACPI_MODULE_NAME("pswalk")
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ps_delete_parse_tree
+ *
+ * PARAMETERS: subtree_root - Root of tree (or subtree) to delete
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Delete a portion of or an entire parse tree.
+ *
+ ******************************************************************************/
+void acpi_ps_delete_parse_tree(union acpi_parse_object *subtree_root)
+{
+ union acpi_parse_object *op = subtree_root;
+ union acpi_parse_object *next = NULL;
+ union acpi_parse_object *parent = NULL;
+
+ ACPI_FUNCTION_TRACE_PTR(ps_delete_parse_tree, subtree_root);
+
+ /* Visit all nodes in the subtree */
+
+ while (op) {
+
+ /* Check if we are not ascending */
+
+ if (op != parent) {
+
+ /* Look for an argument or child of the current op */
+
+ next = acpi_ps_get_arg(op, 0);
+ if (next) {
+
+ /* Still going downward in tree (Op is not completed yet) */
+
+ op = next;
+ continue;
+ }
+ }
+
+ /* No more children, this Op is complete. */
+
+ next = op->common.next;
+ parent = op->common.parent;
+
+ acpi_ps_free_op(op);
+
+ /* If we are back to the starting point, the walk is complete. */
+
+ if (op == subtree_root) {
+ return_VOID;
+ }
+ if (next) {
+ op = next;
+ } else {
+ op = parent;
+ }
+ }
+
+ return_VOID;
+}
diff --git a/drivers/acpi/parser/psxface.c b/drivers/acpi/parser/psxface.c
new file mode 100644
index 0000000..270469a
--- /dev/null
+++ b/drivers/acpi/parser/psxface.c
@@ -0,0 +1,351 @@
+/******************************************************************************
+ *
+ * Module Name: psxface - Parser external interfaces
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include <acpi/acparser.h>
+#include <acpi/acdispat.h>
+#include <acpi/acinterp.h>
+
+#define _COMPONENT ACPI_PARSER
+ACPI_MODULE_NAME("psxface")
+
+/* Local Prototypes */
+static void acpi_ps_start_trace(struct acpi_evaluate_info *info);
+
+static void acpi_ps_stop_trace(struct acpi_evaluate_info *info);
+
+static void
+acpi_ps_update_parameter_list(struct acpi_evaluate_info *info, u16 action);
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_debug_trace
+ *
+ * PARAMETERS: method_name - Valid ACPI name string
+ * debug_level - Optional level mask. 0 to use default
+ * debug_layer - Optional layer mask. 0 to use default
+ * Flags - bit 1: one shot(1) or persistent(0)
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: External interface to enable debug tracing during control
+ * method execution
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_debug_trace(char *name, u32 debug_level, u32 debug_layer, u32 flags)
+{
+ acpi_status status;
+
+ status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+
+ /* TBDs: Validate name, allow full path or just nameseg */
+
+ acpi_gbl_trace_method_name = *ACPI_CAST_PTR(u32, name);
+ acpi_gbl_trace_flags = flags;
+
+ if (debug_level) {
+ acpi_gbl_trace_dbg_level = debug_level;
+ }
+ if (debug_layer) {
+ acpi_gbl_trace_dbg_layer = debug_layer;
+ }
+
+ (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+ return (AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ps_start_trace
+ *
+ * PARAMETERS: Info - Method info struct
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Start control method execution trace
+ *
+ ******************************************************************************/
+
+static void acpi_ps_start_trace(struct acpi_evaluate_info *info)
+{
+ acpi_status status;
+
+ ACPI_FUNCTION_ENTRY();
+
+ status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+ if (ACPI_FAILURE(status)) {
+ return;
+ }
+
+ if ((!acpi_gbl_trace_method_name) ||
+ (acpi_gbl_trace_method_name != info->resolved_node->name.integer)) {
+ goto exit;
+ }
+
+ acpi_gbl_original_dbg_level = acpi_dbg_level;
+ acpi_gbl_original_dbg_layer = acpi_dbg_layer;
+
+ acpi_dbg_level = 0x00FFFFFF;
+ acpi_dbg_layer = ACPI_UINT32_MAX;
+
+ if (acpi_gbl_trace_dbg_level) {
+ acpi_dbg_level = acpi_gbl_trace_dbg_level;
+ }
+ if (acpi_gbl_trace_dbg_layer) {
+ acpi_dbg_layer = acpi_gbl_trace_dbg_layer;
+ }
+
+ exit:
+ (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ps_stop_trace
+ *
+ * PARAMETERS: Info - Method info struct
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Stop control method execution trace
+ *
+ ******************************************************************************/
+
+static void acpi_ps_stop_trace(struct acpi_evaluate_info *info)
+{
+ acpi_status status;
+
+ ACPI_FUNCTION_ENTRY();
+
+ status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+ if (ACPI_FAILURE(status)) {
+ return;
+ }
+
+ if ((!acpi_gbl_trace_method_name) ||
+ (acpi_gbl_trace_method_name != info->resolved_node->name.integer)) {
+ goto exit;
+ }
+
+ /* Disable further tracing if type is one-shot */
+
+ if (acpi_gbl_trace_flags & 1) {
+ acpi_gbl_trace_method_name = 0;
+ acpi_gbl_trace_dbg_level = 0;
+ acpi_gbl_trace_dbg_layer = 0;
+ }
+
+ acpi_dbg_level = acpi_gbl_original_dbg_level;
+ acpi_dbg_layer = acpi_gbl_original_dbg_layer;
+
+ exit:
+ (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ps_execute_method
+ *
+ * PARAMETERS: Info - Method info block, contains:
+ * Node - Method Node to execute
+ * obj_desc - Method object
+ * Parameters - List of parameters to pass to the method,
+ * terminated by NULL. Params itself may be
+ * NULL if no parameters are being passed.
+ * return_object - Where to put method's return value (if
+ * any). If NULL, no value is returned.
+ * parameter_type - Type of Parameter list
+ * return_object - Where to put method's return value (if
+ * any). If NULL, no value is returned.
+ * pass_number - Parse or execute pass
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Execute a control method
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ps_execute_method(struct acpi_evaluate_info *info)
+{
+ acpi_status status;
+ union acpi_parse_object *op;
+ struct acpi_walk_state *walk_state;
+
+ ACPI_FUNCTION_TRACE(ps_execute_method);
+
+ /* Validate the Info and method Node */
+
+ if (!info || !info->resolved_node) {
+ return_ACPI_STATUS(AE_NULL_ENTRY);
+ }
+
+ /* Init for new method, wait on concurrency semaphore */
+
+ status =
+ acpi_ds_begin_method_execution(info->resolved_node, info->obj_desc,
+ NULL);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /*
+ * The caller "owns" the parameters, so give each one an extra reference
+ */
+ acpi_ps_update_parameter_list(info, REF_INCREMENT);
+
+ /* Begin tracing if requested */
+
+ acpi_ps_start_trace(info);
+
+ /*
+ * Execute the method. Performs parse simultaneously
+ */
+ ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
+ "**** Begin Method Parse/Execute [%4.4s] **** Node=%p Obj=%p\n",
+ info->resolved_node->name.ascii, info->resolved_node,
+ info->obj_desc));
+
+ /* Create and init a Root Node */
+
+ op = acpi_ps_create_scope_op();
+ if (!op) {
+ status = AE_NO_MEMORY;
+ goto cleanup;
+ }
+
+ /* Create and initialize a new walk state */
+
+ info->pass_number = ACPI_IMODE_EXECUTE;
+ walk_state =
+ acpi_ds_create_walk_state(info->obj_desc->method.owner_id, NULL,
+ NULL, NULL);
+ if (!walk_state) {
+ status = AE_NO_MEMORY;
+ goto cleanup;
+ }
+
+ status = acpi_ds_init_aml_walk(walk_state, op, info->resolved_node,
+ info->obj_desc->method.aml_start,
+ info->obj_desc->method.aml_length, info,
+ info->pass_number);
+ if (ACPI_FAILURE(status)) {
+ acpi_ds_delete_walk_state(walk_state);
+ goto cleanup;
+ }
+
+ /* Parse the AML */
+
+ status = acpi_ps_parse_aml(walk_state);
+
+ /* walk_state was deleted by parse_aml */
+
+ cleanup:
+ acpi_ps_delete_parse_tree(op);
+
+ /* End optional tracing */
+
+ acpi_ps_stop_trace(info);
+
+ /* Take away the extra reference that we gave the parameters above */
+
+ acpi_ps_update_parameter_list(info, REF_DECREMENT);
+
+ /* Exit now if error above */
+
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /*
+ * If the method has returned an object, signal this to the caller with
+ * a control exception code
+ */
+ if (info->return_object) {
+ ACPI_DEBUG_PRINT((ACPI_DB_PARSE, "Method returned ObjDesc=%p\n",
+ info->return_object));
+ ACPI_DUMP_STACK_ENTRY(info->return_object);
+
+ status = AE_CTRL_RETURN_VALUE;
+ }
+
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ps_update_parameter_list
+ *
+ * PARAMETERS: Info - See struct acpi_evaluate_info
+ * (Used: parameter_type and Parameters)
+ * Action - Add or Remove reference
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Update reference count on all method parameter objects
+ *
+ ******************************************************************************/
+
+static void
+acpi_ps_update_parameter_list(struct acpi_evaluate_info *info, u16 action)
+{
+ u32 i;
+
+ if (info->parameters) {
+
+ /* Update reference count for each parameter */
+
+ for (i = 0; info->parameters[i]; i++) {
+
+ /* Ignore errors, just do them all */
+
+ (void)acpi_ut_update_object_reference(info->
+ parameters[i],
+ action);
+ }
+ }
+}
diff --git a/drivers/acpi/pci_bind.c b/drivers/acpi/pci_bind.c
new file mode 100644
index 0000000..4b252ea
--- /dev/null
+++ b/drivers/acpi/pci_bind.c
@@ -0,0 +1,373 @@
+/*
+ * pci_bind.c - ACPI PCI Device Binding ($Revision: 2 $)
+ *
+ * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
+ * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/proc_fs.h>
+#include <linux/spinlock.h>
+#include <linux/pm.h>
+#include <linux/pci.h>
+#include <linux/acpi.h>
+#include <acpi/acpi_bus.h>
+#include <acpi/acpi_drivers.h>
+
+#define _COMPONENT ACPI_PCI_COMPONENT
+ACPI_MODULE_NAME("pci_bind");
+
+struct acpi_pci_data {
+ struct acpi_pci_id id;
+ struct pci_bus *bus;
+ struct pci_dev *dev;
+};
+
+static int acpi_pci_unbind(struct acpi_device *device);
+
+static void acpi_pci_data_handler(acpi_handle handle, u32 function,
+ void *context)
+{
+
+ /* TBD: Anything we need to do here? */
+
+ return;
+}
+
+/**
+ * acpi_get_pci_id
+ * ------------------
+ * This function is used by the ACPI Interpreter (a.k.a. Core Subsystem)
+ * to resolve PCI information for ACPI-PCI devices defined in the namespace.
+ * This typically occurs when resolving PCI operation region information.
+ */
+acpi_status acpi_get_pci_id(acpi_handle handle, struct acpi_pci_id *id)
+{
+ int result = 0;
+ acpi_status status = AE_OK;
+ struct acpi_device *device = NULL;
+ struct acpi_pci_data *data = NULL;
+
+
+ if (!id)
+ return AE_BAD_PARAMETER;
+
+ result = acpi_bus_get_device(handle, &device);
+ if (result) {
+ printk(KERN_ERR PREFIX
+ "Invalid ACPI Bus context for device %s\n",
+ acpi_device_bid(device));
+ return AE_NOT_EXIST;
+ }
+
+ status = acpi_get_data(handle, acpi_pci_data_handler, (void **)&data);
+ if (ACPI_FAILURE(status) || !data) {
+ ACPI_EXCEPTION((AE_INFO, status,
+ "Invalid ACPI-PCI context for device %s",
+ acpi_device_bid(device)));
+ return status;
+ }
+
+ *id = data->id;
+
+ /*
+ id->segment = data->id.segment;
+ id->bus = data->id.bus;
+ id->device = data->id.device;
+ id->function = data->id.function;
+ */
+
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Device %s has PCI address %02x:%02x:%02x.%02x\n",
+ acpi_device_bid(device), id->segment, id->bus,
+ id->device, id->function));
+
+ return AE_OK;
+}
+
+EXPORT_SYMBOL(acpi_get_pci_id);
+
+int acpi_pci_bind(struct acpi_device *device)
+{
+ int result = 0;
+ acpi_status status = AE_OK;
+ struct acpi_pci_data *data = NULL;
+ struct acpi_pci_data *pdata = NULL;
+ char *pathname = NULL;
+ struct acpi_buffer buffer = { 0, NULL };
+ acpi_handle handle = NULL;
+ struct pci_dev *dev;
+ struct pci_bus *bus;
+
+
+ if (!device || !device->parent)
+ return -EINVAL;
+
+ pathname = kzalloc(ACPI_PATHNAME_MAX, GFP_KERNEL);
+ if (!pathname)
+ return -ENOMEM;
+ buffer.length = ACPI_PATHNAME_MAX;
+ buffer.pointer = pathname;
+
+ data = kzalloc(sizeof(struct acpi_pci_data), GFP_KERNEL);
+ if (!data) {
+ kfree(pathname);
+ return -ENOMEM;
+ }
+
+ acpi_get_name(device->handle, ACPI_FULL_PATHNAME, &buffer);
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Binding PCI device [%s]...\n",
+ pathname));
+
+ /*
+ * Segment & Bus
+ * -------------
+ * These are obtained via the parent device's ACPI-PCI context.
+ */
+ status = acpi_get_data(device->parent->handle, acpi_pci_data_handler,
+ (void **)&pdata);
+ if (ACPI_FAILURE(status) || !pdata || !pdata->bus) {
+ ACPI_EXCEPTION((AE_INFO, status,
+ "Invalid ACPI-PCI context for parent device %s",
+ acpi_device_bid(device->parent)));
+ result = -ENODEV;
+ goto end;
+ }
+ data->id.segment = pdata->id.segment;
+ data->id.bus = pdata->bus->number;
+
+ /*
+ * Device & Function
+ * -----------------
+ * These are simply obtained from the device's _ADR method. Note
+ * that a value of zero is valid.
+ */
+ data->id.device = device->pnp.bus_address >> 16;
+ data->id.function = device->pnp.bus_address & 0xFFFF;
+
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "...to %02x:%02x:%02x.%02x\n",
+ data->id.segment, data->id.bus, data->id.device,
+ data->id.function));
+
+ /*
+ * TBD: Support slot devices (e.g. function=0xFFFF).
+ */
+
+ /*
+ * Locate PCI Device
+ * -----------------
+ * Locate matching device in PCI namespace. If it doesn't exist
+ * this typically means that the device isn't currently inserted
+ * (e.g. docking station, port replicator, etc.).
+ * We cannot simply search the global pci device list, since
+ * PCI devices are added to the global pci list when the root
+ * bridge start ops are run, which may not have happened yet.
+ */
+ bus = pci_find_bus(data->id.segment, data->id.bus);
+ if (bus) {
+ list_for_each_entry(dev, &bus->devices, bus_list) {
+ if (dev->devfn == PCI_DEVFN(data->id.device,
+ data->id.function)) {
+ data->dev = dev;
+ break;
+ }
+ }
+ }
+ if (!data->dev) {
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Device %02x:%02x:%02x.%02x not present in PCI namespace\n",
+ data->id.segment, data->id.bus,
+ data->id.device, data->id.function));
+ result = -ENODEV;
+ goto end;
+ }
+ if (!data->dev->bus) {
+ printk(KERN_ERR PREFIX
+ "Device %02x:%02x:%02x.%02x has invalid 'bus' field\n",
+ data->id.segment, data->id.bus,
+ data->id.device, data->id.function);
+ result = -ENODEV;
+ goto end;
+ }
+
+ /*
+ * PCI Bridge?
+ * -----------
+ * If so, set the 'bus' field and install the 'bind' function to
+ * facilitate callbacks for all of its children.
+ */
+ if (data->dev->subordinate) {
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Device %02x:%02x:%02x.%02x is a PCI bridge\n",
+ data->id.segment, data->id.bus,
+ data->id.device, data->id.function));
+ data->bus = data->dev->subordinate;
+ device->ops.bind = acpi_pci_bind;
+ device->ops.unbind = acpi_pci_unbind;
+ }
+
+ /*
+ * Attach ACPI-PCI Context
+ * -----------------------
+ * Thus binding the ACPI and PCI devices.
+ */
+ status = acpi_attach_data(device->handle, acpi_pci_data_handler, data);
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status,
+ "Unable to attach ACPI-PCI context to device %s",
+ acpi_device_bid(device)));
+ result = -ENODEV;
+ goto end;
+ }
+
+ /*
+ * PCI Routing Table
+ * -----------------
+ * Evaluate and parse _PRT, if exists. This code is independent of
+ * PCI bridges (above) to allow parsing of _PRT objects within the
+ * scope of non-bridge devices. Note that _PRTs within the scope of
+ * a PCI bridge assume the bridge's subordinate bus number.
+ *
+ * TBD: Can _PRTs exist within the scope of non-bridge PCI devices?
+ */
+ status = acpi_get_handle(device->handle, METHOD_NAME__PRT, &handle);
+ if (ACPI_SUCCESS(status)) {
+ if (data->bus) /* PCI-PCI bridge */
+ acpi_pci_irq_add_prt(device->handle, data->id.segment,
+ data->bus->number);
+ else /* non-bridge PCI device */
+ acpi_pci_irq_add_prt(device->handle, data->id.segment,
+ data->id.bus);
+ }
+
+ end:
+ kfree(pathname);
+ if (result)
+ kfree(data);
+
+ return result;
+}
+
+static int acpi_pci_unbind(struct acpi_device *device)
+{
+ int result = 0;
+ acpi_status status = AE_OK;
+ struct acpi_pci_data *data = NULL;
+ char *pathname = NULL;
+ struct acpi_buffer buffer = { 0, NULL };
+
+
+ if (!device || !device->parent)
+ return -EINVAL;
+
+ pathname = kzalloc(ACPI_PATHNAME_MAX, GFP_KERNEL);
+ if (!pathname)
+ return -ENOMEM;
+
+ buffer.length = ACPI_PATHNAME_MAX;
+ buffer.pointer = pathname;
+ acpi_get_name(device->handle, ACPI_FULL_PATHNAME, &buffer);
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Unbinding PCI device [%s]...\n",
+ pathname));
+ kfree(pathname);
+
+ status =
+ acpi_get_data(device->handle, acpi_pci_data_handler,
+ (void **)&data);
+ if (ACPI_FAILURE(status)) {
+ result = -ENODEV;
+ goto end;
+ }
+
+ status = acpi_detach_data(device->handle, acpi_pci_data_handler);
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status,
+ "Unable to detach data from device %s",
+ acpi_device_bid(device)));
+ result = -ENODEV;
+ goto end;
+ }
+ if (data->dev->subordinate) {
+ acpi_pci_irq_del_prt(data->id.segment, data->bus->number);
+ }
+ kfree(data);
+
+ end:
+ return result;
+}
+
+int
+acpi_pci_bind_root(struct acpi_device *device,
+ struct acpi_pci_id *id, struct pci_bus *bus)
+{
+ int result = 0;
+ acpi_status status = AE_OK;
+ struct acpi_pci_data *data = NULL;
+ char *pathname = NULL;
+ struct acpi_buffer buffer = { 0, NULL };
+
+ pathname = kzalloc(ACPI_PATHNAME_MAX, GFP_KERNEL);
+ if (!pathname)
+ return -ENOMEM;
+
+ buffer.length = ACPI_PATHNAME_MAX;
+ buffer.pointer = pathname;
+
+ if (!device || !id || !bus) {
+ kfree(pathname);
+ return -EINVAL;
+ }
+
+ data = kzalloc(sizeof(struct acpi_pci_data), GFP_KERNEL);
+ if (!data) {
+ kfree(pathname);
+ return -ENOMEM;
+ }
+
+ data->id = *id;
+ data->bus = bus;
+ device->ops.bind = acpi_pci_bind;
+ device->ops.unbind = acpi_pci_unbind;
+
+ acpi_get_name(device->handle, ACPI_FULL_PATHNAME, &buffer);
+
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Binding PCI root bridge [%s] to "
+ "%02x:%02x\n", pathname, id->segment, id->bus));
+
+ status = acpi_attach_data(device->handle, acpi_pci_data_handler, data);
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status,
+ "Unable to attach ACPI-PCI context to device %s",
+ pathname));
+ result = -ENODEV;
+ goto end;
+ }
+
+ end:
+ kfree(pathname);
+ if (result != 0)
+ kfree(data);
+
+ return result;
+}
diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c
new file mode 100644
index 0000000..11acaee
--- /dev/null
+++ b/drivers/acpi/pci_irq.c
@@ -0,0 +1,621 @@
+/*
+ * pci_irq.c - ACPI PCI Interrupt Routing ($Revision: 11 $)
+ *
+ * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
+ * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
+ * Copyright (C) 2002 Dominik Brodowski <devel@brodo.de>
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+
+
+#include <linux/dmi.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/proc_fs.h>
+#include <linux/spinlock.h>
+#include <linux/pm.h>
+#include <linux/pci.h>
+#include <linux/acpi.h>
+#include <acpi/acpi_bus.h>
+#include <acpi/acpi_drivers.h>
+
+#define _COMPONENT ACPI_PCI_COMPONENT
+ACPI_MODULE_NAME("pci_irq");
+
+static struct acpi_prt_list acpi_prt;
+static DEFINE_SPINLOCK(acpi_prt_lock);
+
+/* --------------------------------------------------------------------------
+ PCI IRQ Routing Table (PRT) Support
+ -------------------------------------------------------------------------- */
+
+static struct acpi_prt_entry *acpi_pci_irq_find_prt_entry(int segment,
+ int bus,
+ int device, int pin)
+{
+ struct acpi_prt_entry *entry = NULL;
+
+ if (!acpi_prt.count)
+ return NULL;
+
+ /*
+ * Parse through all PRT entries looking for a match on the specified
+ * PCI device's segment, bus, device, and pin (don't care about func).
+ *
+ */
+ spin_lock(&acpi_prt_lock);
+ list_for_each_entry(entry, &acpi_prt.entries, node) {
+ if ((segment == entry->id.segment)
+ && (bus == entry->id.bus)
+ && (device == entry->id.device)
+ && (pin == entry->pin)) {
+ spin_unlock(&acpi_prt_lock);
+ return entry;
+ }
+ }
+
+ spin_unlock(&acpi_prt_lock);
+ return NULL;
+}
+
+/* http://bugzilla.kernel.org/show_bug.cgi?id=4773 */
+static struct dmi_system_id medion_md9580[] = {
+ {
+ .ident = "Medion MD9580-F laptop",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "MEDIONNB"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "A555"),
+ },
+ },
+ { }
+};
+
+/* http://bugzilla.kernel.org/show_bug.cgi?id=5044 */
+static struct dmi_system_id dell_optiplex[] = {
+ {
+ .ident = "Dell Optiplex GX1",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex GX1 600S+"),
+ },
+ },
+ { }
+};
+
+/* http://bugzilla.kernel.org/show_bug.cgi?id=10138 */
+static struct dmi_system_id hp_t5710[] = {
+ {
+ .ident = "HP t5710",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "hp t5000 series"),
+ DMI_MATCH(DMI_BOARD_NAME, "098Ch"),
+ },
+ },
+ { }
+};
+
+struct prt_quirk {
+ struct dmi_system_id *system;
+ unsigned int segment;
+ unsigned int bus;
+ unsigned int device;
+ unsigned char pin;
+ char *source; /* according to BIOS */
+ char *actual_source;
+};
+
+/*
+ * These systems have incorrect _PRT entries. The BIOS claims the PCI
+ * interrupt at the listed segment/bus/device/pin is connected to the first
+ * link device, but it is actually connected to the second.
+ */
+static struct prt_quirk prt_quirks[] = {
+ { medion_md9580, 0, 0, 9, 'A',
+ "\\_SB_.PCI0.ISA_.LNKA",
+ "\\_SB_.PCI0.ISA_.LNKB"},
+ { dell_optiplex, 0, 0, 0xd, 'A',
+ "\\_SB_.LNKB",
+ "\\_SB_.LNKA"},
+ { hp_t5710, 0, 0, 1, 'A',
+ "\\_SB_.PCI0.LNK1",
+ "\\_SB_.PCI0.LNK3"},
+};
+
+static void
+do_prt_fixups(struct acpi_prt_entry *entry, struct acpi_pci_routing_table *prt)
+{
+ int i;
+ struct prt_quirk *quirk;
+
+ for (i = 0; i < ARRAY_SIZE(prt_quirks); i++) {
+ quirk = &prt_quirks[i];
+
+ /* All current quirks involve link devices, not GSIs */
+ if (!prt->source)
+ continue;
+
+ if (dmi_check_system(quirk->system) &&
+ entry->id.segment == quirk->segment &&
+ entry->id.bus == quirk->bus &&
+ entry->id.device == quirk->device &&
+ entry->pin + 'A' == quirk->pin &&
+ !strcmp(prt->source, quirk->source) &&
+ strlen(prt->source) >= strlen(quirk->actual_source)) {
+ printk(KERN_WARNING PREFIX "firmware reports "
+ "%04x:%02x:%02x PCI INT %c connected to %s; "
+ "changing to %s\n",
+ entry->id.segment, entry->id.bus,
+ entry->id.device, 'A' + entry->pin,
+ prt->source, quirk->actual_source);
+ strcpy(prt->source, quirk->actual_source);
+ }
+ }
+}
+
+static int
+acpi_pci_irq_add_entry(acpi_handle handle,
+ int segment, int bus, struct acpi_pci_routing_table *prt)
+{
+ struct acpi_prt_entry *entry = NULL;
+
+
+ if (!prt)
+ return -EINVAL;
+
+ entry = kzalloc(sizeof(struct acpi_prt_entry), GFP_KERNEL);
+ if (!entry)
+ return -ENOMEM;
+
+ entry->id.segment = segment;
+ entry->id.bus = bus;
+ entry->id.device = (prt->address >> 16) & 0xFFFF;
+ entry->id.function = prt->address & 0xFFFF;
+ entry->pin = prt->pin;
+
+ do_prt_fixups(entry, prt);
+
+ /*
+ * Type 1: Dynamic
+ * ---------------
+ * The 'source' field specifies the PCI interrupt link device used to
+ * configure the IRQ assigned to this slot|dev|pin. The 'source_index'
+ * indicates which resource descriptor in the resource template (of
+ * the link device) this interrupt is allocated from.
+ *
+ * NOTE: Don't query the Link Device for IRQ information at this time
+ * because Link Device enumeration may not have occurred yet
+ * (e.g. exists somewhere 'below' this _PRT entry in the ACPI
+ * namespace).
+ */
+ if (prt->source[0]) {
+ acpi_get_handle(handle, prt->source, &entry->link.handle);
+ entry->link.index = prt->source_index;
+ }
+ /*
+ * Type 2: Static
+ * --------------
+ * The 'source' field is NULL, and the 'source_index' field specifies
+ * the IRQ value, which is hardwired to specific interrupt inputs on
+ * the interrupt controller.
+ */
+ else
+ entry->link.index = prt->source_index;
+
+ ACPI_DEBUG_PRINT_RAW((ACPI_DB_INFO,
+ " %02X:%02X:%02X[%c] -> %s[%d]\n",
+ entry->id.segment, entry->id.bus,
+ entry->id.device, ('A' + entry->pin), prt->source,
+ entry->link.index));
+
+ spin_lock(&acpi_prt_lock);
+ list_add_tail(&entry->node, &acpi_prt.entries);
+ acpi_prt.count++;
+ spin_unlock(&acpi_prt_lock);
+
+ return 0;
+}
+
+static void
+acpi_pci_irq_del_entry(int segment, int bus, struct acpi_prt_entry *entry)
+{
+ if (segment == entry->id.segment && bus == entry->id.bus) {
+ acpi_prt.count--;
+ list_del(&entry->node);
+ kfree(entry);
+ }
+}
+
+int acpi_pci_irq_add_prt(acpi_handle handle, int segment, int bus)
+{
+ acpi_status status = AE_OK;
+ char *pathname = NULL;
+ struct acpi_buffer buffer = { 0, NULL };
+ struct acpi_pci_routing_table *prt = NULL;
+ struct acpi_pci_routing_table *entry = NULL;
+ static int first_time = 1;
+
+
+ pathname = kzalloc(ACPI_PATHNAME_MAX, GFP_KERNEL);
+ if (!pathname)
+ return -ENOMEM;
+
+ if (first_time) {
+ acpi_prt.count = 0;
+ INIT_LIST_HEAD(&acpi_prt.entries);
+ first_time = 0;
+ }
+
+ /*
+ * NOTE: We're given a 'handle' to the _PRT object's parent device
+ * (either a PCI root bridge or PCI-PCI bridge).
+ */
+
+ buffer.length = ACPI_PATHNAME_MAX;
+ buffer.pointer = pathname;
+ acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer);
+
+ printk(KERN_DEBUG "ACPI: PCI Interrupt Routing Table [%s._PRT]\n",
+ pathname);
+
+ /*
+ * Evaluate this _PRT and add its entries to our global list (acpi_prt).
+ */
+
+ buffer.length = 0;
+ buffer.pointer = NULL;
+ kfree(pathname);
+ status = acpi_get_irq_routing_table(handle, &buffer);
+ if (status != AE_BUFFER_OVERFLOW) {
+ ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PRT [%s]",
+ acpi_format_exception(status)));
+ return -ENODEV;
+ }
+
+ prt = kzalloc(buffer.length, GFP_KERNEL);
+ if (!prt) {
+ return -ENOMEM;
+ }
+ buffer.pointer = prt;
+
+ status = acpi_get_irq_routing_table(handle, &buffer);
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PRT [%s]",
+ acpi_format_exception(status)));
+ kfree(buffer.pointer);
+ return -ENODEV;
+ }
+
+ entry = prt;
+
+ while (entry && (entry->length > 0)) {
+ acpi_pci_irq_add_entry(handle, segment, bus, entry);
+ entry = (struct acpi_pci_routing_table *)
+ ((unsigned long)entry + entry->length);
+ }
+
+ kfree(prt);
+
+ return 0;
+}
+
+void acpi_pci_irq_del_prt(int segment, int bus)
+{
+ struct list_head *node = NULL, *n = NULL;
+ struct acpi_prt_entry *entry = NULL;
+
+ if (!acpi_prt.count) {
+ return;
+ }
+
+ printk(KERN_DEBUG
+ "ACPI: Delete PCI Interrupt Routing Table for %x:%x\n", segment,
+ bus);
+ spin_lock(&acpi_prt_lock);
+ list_for_each_safe(node, n, &acpi_prt.entries) {
+ entry = list_entry(node, struct acpi_prt_entry, node);
+
+ acpi_pci_irq_del_entry(segment, bus, entry);
+ }
+ spin_unlock(&acpi_prt_lock);
+}
+
+/* --------------------------------------------------------------------------
+ PCI Interrupt Routing Support
+ -------------------------------------------------------------------------- */
+typedef int (*irq_lookup_func) (struct acpi_prt_entry *, int *, int *, char **);
+
+static int
+acpi_pci_allocate_irq(struct acpi_prt_entry *entry,
+ int *triggering, int *polarity, char **link)
+{
+ int irq;
+
+
+ if (entry->link.handle) {
+ irq = acpi_pci_link_allocate_irq(entry->link.handle,
+ entry->link.index, triggering,
+ polarity, link);
+ if (irq < 0) {
+ printk(KERN_WARNING PREFIX
+ "Invalid IRQ link routing entry\n");
+ return -1;
+ }
+ } else {
+ irq = entry->link.index;
+ *triggering = ACPI_LEVEL_SENSITIVE;
+ *polarity = ACPI_ACTIVE_LOW;
+ }
+
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found IRQ %d\n", irq));
+ return irq;
+}
+
+static int
+acpi_pci_free_irq(struct acpi_prt_entry *entry,
+ int *triggering, int *polarity, char **link)
+{
+ int irq;
+
+ if (entry->link.handle) {
+ irq = acpi_pci_link_free_irq(entry->link.handle);
+ } else {
+ irq = entry->link.index;
+ }
+ return irq;
+}
+
+/*
+ * acpi_pci_irq_lookup
+ * success: return IRQ >= 0
+ * failure: return -1
+ */
+static int
+acpi_pci_irq_lookup(struct pci_bus *bus,
+ int device,
+ int pin,
+ int *triggering,
+ int *polarity, char **link, irq_lookup_func func)
+{
+ struct acpi_prt_entry *entry = NULL;
+ int segment = pci_domain_nr(bus);
+ int bus_nr = bus->number;
+ int ret;
+
+
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Searching for PRT entry for %02x:%02x:%02x[%c]\n",
+ segment, bus_nr, device, ('A' + pin)));
+
+ entry = acpi_pci_irq_find_prt_entry(segment, bus_nr, device, pin);
+ if (!entry) {
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "PRT entry not found\n"));
+ return -1;
+ }
+
+ ret = func(entry, triggering, polarity, link);
+ return ret;
+}
+
+/*
+ * acpi_pci_irq_derive
+ * success: return IRQ >= 0
+ * failure: return < 0
+ */
+static int
+acpi_pci_irq_derive(struct pci_dev *dev,
+ int pin,
+ int *triggering,
+ int *polarity, char **link, irq_lookup_func func)
+{
+ struct pci_dev *bridge = dev;
+ int irq = -1;
+ u8 bridge_pin = 0, orig_pin = pin;
+
+
+ if (!dev)
+ return -EINVAL;
+
+ /*
+ * Attempt to derive an IRQ for this device from a parent bridge's
+ * PCI interrupt routing entry (eg. yenta bridge and add-in card bridge).
+ */
+ while (irq < 0 && bridge->bus->self) {
+ pin = (pin + PCI_SLOT(bridge->devfn)) % 4;
+ bridge = bridge->bus->self;
+
+ if ((bridge->class >> 8) == PCI_CLASS_BRIDGE_CARDBUS) {
+ /* PC card has the same IRQ as its cardbridge */
+ bridge_pin = bridge->pin;
+ if (!bridge_pin) {
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "No interrupt pin configured for device %s\n",
+ pci_name(bridge)));
+ return -1;
+ }
+ /* Pin is from 0 to 3 */
+ bridge_pin--;
+ pin = bridge_pin;
+ }
+
+ irq = acpi_pci_irq_lookup(bridge->bus, PCI_SLOT(bridge->devfn),
+ pin, triggering, polarity,
+ link, func);
+ }
+
+ if (irq < 0) {
+ dev_warn(&dev->dev, "can't derive routing for PCI INT %c\n",
+ 'A' + orig_pin);
+ return -1;
+ }
+
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Derive IRQ %d for device %s from %s\n",
+ irq, pci_name(dev), pci_name(bridge)));
+
+ return irq;
+}
+
+/*
+ * acpi_pci_irq_enable
+ * success: return 0
+ * failure: return < 0
+ */
+
+int acpi_pci_irq_enable(struct pci_dev *dev)
+{
+ int irq = 0;
+ u8 pin = 0;
+ int triggering = ACPI_LEVEL_SENSITIVE;
+ int polarity = ACPI_ACTIVE_LOW;
+ char *link = NULL;
+ char link_desc[16];
+ int rc;
+
+
+ if (!dev)
+ return -EINVAL;
+
+ pin = dev->pin;
+ if (!pin) {
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "No interrupt pin configured for device %s\n",
+ pci_name(dev)));
+ return 0;
+ }
+ pin--;
+
+ if (!dev->bus) {
+ dev_err(&dev->dev, "invalid (NULL) 'bus' field\n");
+ return -ENODEV;
+ }
+
+ /*
+ * First we check the PCI IRQ routing table (PRT) for an IRQ. PRT
+ * values override any BIOS-assigned IRQs set during boot.
+ */
+ irq = acpi_pci_irq_lookup(dev->bus, PCI_SLOT(dev->devfn), pin,
+ &triggering, &polarity, &link,
+ acpi_pci_allocate_irq);
+
+ /*
+ * If no PRT entry was found, we'll try to derive an IRQ from the
+ * device's parent bridge.
+ */
+ if (irq < 0)
+ irq = acpi_pci_irq_derive(dev, pin, &triggering,
+ &polarity, &link,
+ acpi_pci_allocate_irq);
+
+ if (irq < 0) {
+ /*
+ * IDE legacy mode controller IRQs are magic. Why do compat
+ * extensions always make such a nasty mess.
+ */
+ if (dev->class >> 8 == PCI_CLASS_STORAGE_IDE &&
+ (dev->class & 0x05) == 0)
+ return 0;
+ }
+ /*
+ * No IRQ known to the ACPI subsystem - maybe the BIOS /
+ * driver reported one, then use it. Exit in any case.
+ */
+ if (irq < 0) {
+ dev_warn(&dev->dev, "PCI INT %c: no GSI", 'A' + pin);
+ /* Interrupt Line values above 0xF are forbidden */
+ if (dev->irq > 0 && (dev->irq <= 0xF)) {
+ printk(" - using IRQ %d\n", dev->irq);
+ acpi_register_gsi(dev->irq, ACPI_LEVEL_SENSITIVE,
+ ACPI_ACTIVE_LOW);
+ return 0;
+ } else {
+ printk("\n");
+ return 0;
+ }
+ }
+
+ rc = acpi_register_gsi(irq, triggering, polarity);
+ if (rc < 0) {
+ dev_warn(&dev->dev, "PCI INT %c: failed to register GSI\n",
+ 'A' + pin);
+ return rc;
+ }
+ dev->irq = rc;
+
+ if (link)
+ snprintf(link_desc, sizeof(link_desc), " -> Link[%s]", link);
+ else
+ link_desc[0] = '\0';
+
+ dev_info(&dev->dev, "PCI INT %c%s -> GSI %u (%s, %s) -> IRQ %d\n",
+ 'A' + pin, link_desc, irq,
+ (triggering == ACPI_LEVEL_SENSITIVE) ? "level" : "edge",
+ (polarity == ACPI_ACTIVE_LOW) ? "low" : "high", dev->irq);
+
+ return 0;
+}
+
+/* FIXME: implement x86/x86_64 version */
+void __attribute__ ((weak)) acpi_unregister_gsi(u32 i)
+{
+}
+
+void acpi_pci_irq_disable(struct pci_dev *dev)
+{
+ int gsi = 0;
+ u8 pin = 0;
+ int triggering = ACPI_LEVEL_SENSITIVE;
+ int polarity = ACPI_ACTIVE_LOW;
+
+
+ if (!dev || !dev->bus)
+ return;
+
+ pin = dev->pin;
+ if (!pin)
+ return;
+ pin--;
+
+ /*
+ * First we check the PCI IRQ routing table (PRT) for an IRQ.
+ */
+ gsi = acpi_pci_irq_lookup(dev->bus, PCI_SLOT(dev->devfn), pin,
+ &triggering, &polarity, NULL,
+ acpi_pci_free_irq);
+ /*
+ * If no PRT entry was found, we'll try to derive an IRQ from the
+ * device's parent bridge.
+ */
+ if (gsi < 0)
+ gsi = acpi_pci_irq_derive(dev, pin,
+ &triggering, &polarity, NULL,
+ acpi_pci_free_irq);
+ if (gsi < 0)
+ return;
+
+ /*
+ * TBD: It might be worth clearing dev->irq by magic constant
+ * (e.g. PCI_UNDEFINED_IRQ).
+ */
+
+ dev_info(&dev->dev, "PCI INT %c disabled\n", 'A' + pin);
+ acpi_unregister_gsi(gsi);
+}
diff --git a/drivers/acpi/pci_link.c b/drivers/acpi/pci_link.c
new file mode 100644
index 0000000..e33c0bc
--- /dev/null
+++ b/drivers/acpi/pci_link.c
@@ -0,0 +1,969 @@
+/*
+ * pci_link.c - ACPI PCI Interrupt Link Device Driver ($Revision: 34 $)
+ *
+ * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
+ * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
+ * Copyright (C) 2002 Dominik Brodowski <devel@brodo.de>
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * TBD:
+ * 1. Support more than one IRQ resource entry per link device (index).
+ * 2. Implement start/stop mechanism and use ACPI Bus Driver facilities
+ * for IRQ management (e.g. start()->_SRS).
+ */
+
+#include <linux/sysdev.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/proc_fs.h>
+#include <linux/spinlock.h>
+#include <linux/pm.h>
+#include <linux/pci.h>
+#include <linux/mutex.h>
+
+#include <acpi/acpi_bus.h>
+#include <acpi/acpi_drivers.h>
+
+#define _COMPONENT ACPI_PCI_COMPONENT
+ACPI_MODULE_NAME("pci_link");
+#define ACPI_PCI_LINK_CLASS "pci_irq_routing"
+#define ACPI_PCI_LINK_DEVICE_NAME "PCI Interrupt Link"
+#define ACPI_PCI_LINK_FILE_INFO "info"
+#define ACPI_PCI_LINK_FILE_STATUS "state"
+#define ACPI_PCI_LINK_MAX_POSSIBLE 16
+static int acpi_pci_link_add(struct acpi_device *device);
+static int acpi_pci_link_remove(struct acpi_device *device, int type);
+
+static struct acpi_device_id link_device_ids[] = {
+ {"PNP0C0F", 0},
+ {"", 0},
+};
+MODULE_DEVICE_TABLE(acpi, link_device_ids);
+
+static struct acpi_driver acpi_pci_link_driver = {
+ .name = "pci_link",
+ .class = ACPI_PCI_LINK_CLASS,
+ .ids = link_device_ids,
+ .ops = {
+ .add = acpi_pci_link_add,
+ .remove = acpi_pci_link_remove,
+ },
+};
+
+/*
+ * If a link is initialized, we never change its active and initialized
+ * later even the link is disable. Instead, we just repick the active irq
+ */
+struct acpi_pci_link_irq {
+ u8 active; /* Current IRQ */
+ u8 triggering; /* All IRQs */
+ u8 polarity; /* All IRQs */
+ u8 resource_type;
+ u8 possible_count;
+ u8 possible[ACPI_PCI_LINK_MAX_POSSIBLE];
+ u8 initialized:1;
+ u8 reserved:7;
+};
+
+struct acpi_pci_link {
+ struct list_head node;
+ struct acpi_device *device;
+ struct acpi_pci_link_irq irq;
+ int refcnt;
+};
+
+static struct {
+ int count;
+ struct list_head entries;
+} acpi_link;
+static DEFINE_MUTEX(acpi_link_lock);
+
+/* --------------------------------------------------------------------------
+ PCI Link Device Management
+ -------------------------------------------------------------------------- */
+
+/*
+ * set context (link) possible list from resource list
+ */
+static acpi_status
+acpi_pci_link_check_possible(struct acpi_resource *resource, void *context)
+{
+ struct acpi_pci_link *link = context;
+ u32 i = 0;
+
+
+ switch (resource->type) {
+ case ACPI_RESOURCE_TYPE_START_DEPENDENT:
+ case ACPI_RESOURCE_TYPE_END_TAG:
+ return AE_OK;
+ case ACPI_RESOURCE_TYPE_IRQ:
+ {
+ struct acpi_resource_irq *p = &resource->data.irq;
+ if (!p || !p->interrupt_count) {
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Blank _PRS IRQ resource\n"));
+ return AE_OK;
+ }
+ for (i = 0;
+ (i < p->interrupt_count
+ && i < ACPI_PCI_LINK_MAX_POSSIBLE); i++) {
+ if (!p->interrupts[i]) {
+ printk(KERN_WARNING PREFIX
+ "Invalid _PRS IRQ %d\n",
+ p->interrupts[i]);
+ continue;
+ }
+ link->irq.possible[i] = p->interrupts[i];
+ link->irq.possible_count++;
+ }
+ link->irq.triggering = p->triggering;
+ link->irq.polarity = p->polarity;
+ link->irq.resource_type = ACPI_RESOURCE_TYPE_IRQ;
+ break;
+ }
+ case ACPI_RESOURCE_TYPE_EXTENDED_IRQ:
+ {
+ struct acpi_resource_extended_irq *p =
+ &resource->data.extended_irq;
+ if (!p || !p->interrupt_count) {
+ printk(KERN_WARNING PREFIX
+ "Blank _PRS EXT IRQ resource\n");
+ return AE_OK;
+ }
+ for (i = 0;
+ (i < p->interrupt_count
+ && i < ACPI_PCI_LINK_MAX_POSSIBLE); i++) {
+ if (!p->interrupts[i]) {
+ printk(KERN_WARNING PREFIX
+ "Invalid _PRS IRQ %d\n",
+ p->interrupts[i]);
+ continue;
+ }
+ link->irq.possible[i] = p->interrupts[i];
+ link->irq.possible_count++;
+ }
+ link->irq.triggering = p->triggering;
+ link->irq.polarity = p->polarity;
+ link->irq.resource_type = ACPI_RESOURCE_TYPE_EXTENDED_IRQ;
+ break;
+ }
+ default:
+ printk(KERN_ERR PREFIX "_PRS resource type 0x%x isn't an IRQ\n",
+ resource->type);
+ return AE_OK;
+ }
+
+ return AE_CTRL_TERMINATE;
+}
+
+static int acpi_pci_link_get_possible(struct acpi_pci_link *link)
+{
+ acpi_status status;
+
+
+ if (!link)
+ return -EINVAL;
+
+ status = acpi_walk_resources(link->device->handle, METHOD_NAME__PRS,
+ acpi_pci_link_check_possible, link);
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PRS"));
+ return -ENODEV;
+ }
+
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Found %d possible IRQs\n",
+ link->irq.possible_count));
+
+ return 0;
+}
+
+static acpi_status
+acpi_pci_link_check_current(struct acpi_resource *resource, void *context)
+{
+ int *irq = (int *)context;
+
+
+ switch (resource->type) {
+ case ACPI_RESOURCE_TYPE_START_DEPENDENT:
+ case ACPI_RESOURCE_TYPE_END_TAG:
+ return AE_OK;
+ case ACPI_RESOURCE_TYPE_IRQ:
+ {
+ struct acpi_resource_irq *p = &resource->data.irq;
+ if (!p || !p->interrupt_count) {
+ /*
+ * IRQ descriptors may have no IRQ# bits set,
+ * particularly those those w/ _STA disabled
+ */
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Blank _CRS IRQ resource\n"));
+ return AE_OK;
+ }
+ *irq = p->interrupts[0];
+ break;
+ }
+ case ACPI_RESOURCE_TYPE_EXTENDED_IRQ:
+ {
+ struct acpi_resource_extended_irq *p =
+ &resource->data.extended_irq;
+ if (!p || !p->interrupt_count) {
+ /*
+ * extended IRQ descriptors must
+ * return at least 1 IRQ
+ */
+ printk(KERN_WARNING PREFIX
+ "Blank _CRS EXT IRQ resource\n");
+ return AE_OK;
+ }
+ *irq = p->interrupts[0];
+ break;
+ }
+ break;
+ default:
+ printk(KERN_ERR PREFIX "_CRS resource type 0x%x isn't an IRQ\n",
+ resource->type);
+ return AE_OK;
+ }
+
+ return AE_CTRL_TERMINATE;
+}
+
+/*
+ * Run _CRS and set link->irq.active
+ *
+ * return value:
+ * 0 - success
+ * !0 - failure
+ */
+static int acpi_pci_link_get_current(struct acpi_pci_link *link)
+{
+ int result = 0;
+ acpi_status status = AE_OK;
+ int irq = 0;
+
+ if (!link)
+ return -EINVAL;
+
+ link->irq.active = 0;
+
+ /* in practice, status disabled is meaningless, ignore it */
+ if (acpi_strict) {
+ /* Query _STA, set link->device->status */
+ result = acpi_bus_get_status(link->device);
+ if (result) {
+ printk(KERN_ERR PREFIX "Unable to read status\n");
+ goto end;
+ }
+
+ if (!link->device->status.enabled) {
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Link disabled\n"));
+ return 0;
+ }
+ }
+
+ /*
+ * Query and parse _CRS to get the current IRQ assignment.
+ */
+
+ status = acpi_walk_resources(link->device->handle, METHOD_NAME__CRS,
+ acpi_pci_link_check_current, &irq);
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status, "Evaluating _CRS"));
+ result = -ENODEV;
+ goto end;
+ }
+
+ if (acpi_strict && !irq) {
+ printk(KERN_ERR PREFIX "_CRS returned 0\n");
+ result = -ENODEV;
+ }
+
+ link->irq.active = irq;
+
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Link at IRQ %d \n", link->irq.active));
+
+ end:
+ return result;
+}
+
+static int acpi_pci_link_set(struct acpi_pci_link *link, int irq)
+{
+ int result = 0;
+ acpi_status status = AE_OK;
+ struct {
+ struct acpi_resource res;
+ struct acpi_resource end;
+ } *resource;
+ struct acpi_buffer buffer = { 0, NULL };
+
+
+ if (!link || !irq)
+ return -EINVAL;
+
+ resource = kzalloc(sizeof(*resource) + 1, irqs_disabled() ? GFP_ATOMIC: GFP_KERNEL);
+ if (!resource)
+ return -ENOMEM;
+
+ buffer.length = sizeof(*resource) + 1;
+ buffer.pointer = resource;
+
+ switch (link->irq.resource_type) {
+ case ACPI_RESOURCE_TYPE_IRQ:
+ resource->res.type = ACPI_RESOURCE_TYPE_IRQ;
+ resource->res.length = sizeof(struct acpi_resource);
+ resource->res.data.irq.triggering = link->irq.triggering;
+ resource->res.data.irq.polarity =
+ link->irq.polarity;
+ if (link->irq.triggering == ACPI_EDGE_SENSITIVE)
+ resource->res.data.irq.sharable =
+ ACPI_EXCLUSIVE;
+ else
+ resource->res.data.irq.sharable = ACPI_SHARED;
+ resource->res.data.irq.interrupt_count = 1;
+ resource->res.data.irq.interrupts[0] = irq;
+ break;
+
+ case ACPI_RESOURCE_TYPE_EXTENDED_IRQ:
+ resource->res.type = ACPI_RESOURCE_TYPE_EXTENDED_IRQ;
+ resource->res.length = sizeof(struct acpi_resource);
+ resource->res.data.extended_irq.producer_consumer =
+ ACPI_CONSUMER;
+ resource->res.data.extended_irq.triggering =
+ link->irq.triggering;
+ resource->res.data.extended_irq.polarity =
+ link->irq.polarity;
+ if (link->irq.triggering == ACPI_EDGE_SENSITIVE)
+ resource->res.data.irq.sharable =
+ ACPI_EXCLUSIVE;
+ else
+ resource->res.data.irq.sharable = ACPI_SHARED;
+ resource->res.data.extended_irq.interrupt_count = 1;
+ resource->res.data.extended_irq.interrupts[0] = irq;
+ /* ignore resource_source, it's optional */
+ break;
+ default:
+ printk(KERN_ERR PREFIX "Invalid Resource_type %d\n", link->irq.resource_type);
+ result = -EINVAL;
+ goto end;
+
+ }
+ resource->end.type = ACPI_RESOURCE_TYPE_END_TAG;
+
+ /* Attempt to set the resource */
+ status = acpi_set_current_resources(link->device->handle, &buffer);
+
+ /* check for total failure */
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status, "Evaluating _SRS"));
+ result = -ENODEV;
+ goto end;
+ }
+
+ /* Query _STA, set device->status */
+ result = acpi_bus_get_status(link->device);
+ if (result) {
+ printk(KERN_ERR PREFIX "Unable to read status\n");
+ goto end;
+ }
+ if (!link->device->status.enabled) {
+ printk(KERN_WARNING PREFIX
+ "%s [%s] disabled and referenced, BIOS bug\n",
+ acpi_device_name(link->device),
+ acpi_device_bid(link->device));
+ }
+
+ /* Query _CRS, set link->irq.active */
+ result = acpi_pci_link_get_current(link);
+ if (result) {
+ goto end;
+ }
+
+ /*
+ * Is current setting not what we set?
+ * set link->irq.active
+ */
+ if (link->irq.active != irq) {
+ /*
+ * policy: when _CRS doesn't return what we just _SRS
+ * assume _SRS worked and override _CRS value.
+ */
+ printk(KERN_WARNING PREFIX
+ "%s [%s] BIOS reported IRQ %d, using IRQ %d\n",
+ acpi_device_name(link->device),
+ acpi_device_bid(link->device), link->irq.active, irq);
+ link->irq.active = irq;
+ }
+
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Set IRQ %d\n", link->irq.active));
+
+ end:
+ kfree(resource);
+ return result;
+}
+
+/* --------------------------------------------------------------------------
+ PCI Link IRQ Management
+ -------------------------------------------------------------------------- */
+
+/*
+ * "acpi_irq_balance" (default in APIC mode) enables ACPI to use PIC Interrupt
+ * Link Devices to move the PIRQs around to minimize sharing.
+ *
+ * "acpi_irq_nobalance" (default in PIC mode) tells ACPI not to move any PIC IRQs
+ * that the BIOS has already set to active. This is necessary because
+ * ACPI has no automatic means of knowing what ISA IRQs are used. Note that
+ * if the BIOS doesn't set a Link Device active, ACPI needs to program it
+ * even if acpi_irq_nobalance is set.
+ *
+ * A tables of penalties avoids directing PCI interrupts to well known
+ * ISA IRQs. Boot params are available to over-ride the default table:
+ *
+ * List interrupts that are free for PCI use.
+ * acpi_irq_pci=n[,m]
+ *
+ * List interrupts that should not be used for PCI:
+ * acpi_irq_isa=n[,m]
+ *
+ * Note that PCI IRQ routers have a list of possible IRQs,
+ * which may not include the IRQs this table says are available.
+ *
+ * Since this heuristic can't tell the difference between a link
+ * that no device will attach to, vs. a link which may be shared
+ * by multiple active devices -- it is not optimal.
+ *
+ * If interrupt performance is that important, get an IO-APIC system
+ * with a pin dedicated to each device. Or for that matter, an MSI
+ * enabled system.
+ */
+
+#define ACPI_MAX_IRQS 256
+#define ACPI_MAX_ISA_IRQ 16
+
+#define PIRQ_PENALTY_PCI_AVAILABLE (0)
+#define PIRQ_PENALTY_PCI_POSSIBLE (16*16)
+#define PIRQ_PENALTY_PCI_USING (16*16*16)
+#define PIRQ_PENALTY_ISA_TYPICAL (16*16*16*16)
+#define PIRQ_PENALTY_ISA_USED (16*16*16*16*16)
+#define PIRQ_PENALTY_ISA_ALWAYS (16*16*16*16*16*16)
+
+static int acpi_irq_penalty[ACPI_MAX_IRQS] = {
+ PIRQ_PENALTY_ISA_ALWAYS, /* IRQ0 timer */
+ PIRQ_PENALTY_ISA_ALWAYS, /* IRQ1 keyboard */
+ PIRQ_PENALTY_ISA_ALWAYS, /* IRQ2 cascade */
+ PIRQ_PENALTY_ISA_TYPICAL, /* IRQ3 serial */
+ PIRQ_PENALTY_ISA_TYPICAL, /* IRQ4 serial */
+ PIRQ_PENALTY_ISA_TYPICAL, /* IRQ5 sometimes SoundBlaster */
+ PIRQ_PENALTY_ISA_TYPICAL, /* IRQ6 */
+ PIRQ_PENALTY_ISA_TYPICAL, /* IRQ7 parallel, spurious */
+ PIRQ_PENALTY_ISA_TYPICAL, /* IRQ8 rtc, sometimes */
+ PIRQ_PENALTY_PCI_AVAILABLE, /* IRQ9 PCI, often acpi */
+ PIRQ_PENALTY_PCI_AVAILABLE, /* IRQ10 PCI */
+ PIRQ_PENALTY_PCI_AVAILABLE, /* IRQ11 PCI */
+ PIRQ_PENALTY_ISA_USED, /* IRQ12 mouse */
+ PIRQ_PENALTY_ISA_USED, /* IRQ13 fpe, sometimes */
+ PIRQ_PENALTY_ISA_USED, /* IRQ14 ide0 */
+ PIRQ_PENALTY_ISA_USED, /* IRQ15 ide1 */
+ /* >IRQ15 */
+};
+
+int __init acpi_irq_penalty_init(void)
+{
+ struct list_head *node = NULL;
+ struct acpi_pci_link *link = NULL;
+ int i = 0;
+
+
+ /*
+ * Update penalties to facilitate IRQ balancing.
+ */
+ list_for_each(node, &acpi_link.entries) {
+
+ link = list_entry(node, struct acpi_pci_link, node);
+ if (!link) {
+ printk(KERN_ERR PREFIX "Invalid link context\n");
+ continue;
+ }
+
+ /*
+ * reflect the possible and active irqs in the penalty table --
+ * useful for breaking ties.
+ */
+ if (link->irq.possible_count) {
+ int penalty =
+ PIRQ_PENALTY_PCI_POSSIBLE /
+ link->irq.possible_count;
+
+ for (i = 0; i < link->irq.possible_count; i++) {
+ if (link->irq.possible[i] < ACPI_MAX_ISA_IRQ)
+ acpi_irq_penalty[link->irq.
+ possible[i]] +=
+ penalty;
+ }
+
+ } else if (link->irq.active) {
+ acpi_irq_penalty[link->irq.active] +=
+ PIRQ_PENALTY_PCI_POSSIBLE;
+ }
+ }
+ /* Add a penalty for the SCI */
+ acpi_irq_penalty[acpi_gbl_FADT.sci_interrupt] += PIRQ_PENALTY_PCI_USING;
+
+ return 0;
+}
+
+static int acpi_irq_balance = -1; /* 0: static, 1: balance */
+
+static int acpi_pci_link_allocate(struct acpi_pci_link *link)
+{
+ int irq;
+ int i;
+
+
+ if (link->irq.initialized) {
+ if (link->refcnt == 0)
+ /* This means the link is disabled but initialized */
+ acpi_pci_link_set(link, link->irq.active);
+ return 0;
+ }
+
+ /*
+ * search for active IRQ in list of possible IRQs.
+ */
+ for (i = 0; i < link->irq.possible_count; ++i) {
+ if (link->irq.active == link->irq.possible[i])
+ break;
+ }
+ /*
+ * forget active IRQ that is not in possible list
+ */
+ if (i == link->irq.possible_count) {
+ if (acpi_strict)
+ printk(KERN_WARNING PREFIX "_CRS %d not found"
+ " in _PRS\n", link->irq.active);
+ link->irq.active = 0;
+ }
+
+ /*
+ * if active found, use it; else pick entry from end of possible list.
+ */
+ if (link->irq.active) {
+ irq = link->irq.active;
+ } else {
+ irq = link->irq.possible[link->irq.possible_count - 1];
+ }
+
+ if (acpi_irq_balance || !link->irq.active) {
+ /*
+ * Select the best IRQ. This is done in reverse to promote
+ * the use of IRQs 9, 10, 11, and >15.
+ */
+ for (i = (link->irq.possible_count - 1); i >= 0; i--) {
+ if (acpi_irq_penalty[irq] >
+ acpi_irq_penalty[link->irq.possible[i]])
+ irq = link->irq.possible[i];
+ }
+ }
+
+ /* Attempt to enable the link device at this IRQ. */
+ if (acpi_pci_link_set(link, irq)) {
+ printk(KERN_ERR PREFIX "Unable to set IRQ for %s [%s]. "
+ "Try pci=noacpi or acpi=off\n",
+ acpi_device_name(link->device),
+ acpi_device_bid(link->device));
+ return -ENODEV;
+ } else {
+ acpi_irq_penalty[link->irq.active] += PIRQ_PENALTY_PCI_USING;
+ printk(PREFIX "%s [%s] enabled at IRQ %d\n",
+ acpi_device_name(link->device),
+ acpi_device_bid(link->device), link->irq.active);
+ }
+
+ link->irq.initialized = 1;
+
+ return 0;
+}
+
+/*
+ * acpi_pci_link_allocate_irq
+ * success: return IRQ >= 0
+ * failure: return -1
+ */
+
+int
+acpi_pci_link_allocate_irq(acpi_handle handle,
+ int index,
+ int *triggering, int *polarity, char **name)
+{
+ int result = 0;
+ struct acpi_device *device = NULL;
+ struct acpi_pci_link *link = NULL;
+
+
+ result = acpi_bus_get_device(handle, &device);
+ if (result) {
+ printk(KERN_ERR PREFIX "Invalid link device\n");
+ return -1;
+ }
+
+ link = acpi_driver_data(device);
+ if (!link) {
+ printk(KERN_ERR PREFIX "Invalid link context\n");
+ return -1;
+ }
+
+ /* TBD: Support multiple index (IRQ) entries per Link Device */
+ if (index) {
+ printk(KERN_ERR PREFIX "Invalid index %d\n", index);
+ return -1;
+ }
+
+ mutex_lock(&acpi_link_lock);
+ if (acpi_pci_link_allocate(link)) {
+ mutex_unlock(&acpi_link_lock);
+ return -1;
+ }
+
+ if (!link->irq.active) {
+ mutex_unlock(&acpi_link_lock);
+ printk(KERN_ERR PREFIX "Link active IRQ is 0!\n");
+ return -1;
+ }
+ link->refcnt++;
+ mutex_unlock(&acpi_link_lock);
+
+ if (triggering)
+ *triggering = link->irq.triggering;
+ if (polarity)
+ *polarity = link->irq.polarity;
+ if (name)
+ *name = acpi_device_bid(link->device);
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Link %s is referenced\n",
+ acpi_device_bid(link->device)));
+ return (link->irq.active);
+}
+
+/*
+ * We don't change link's irq information here. After it is reenabled, we
+ * continue use the info
+ */
+int acpi_pci_link_free_irq(acpi_handle handle)
+{
+ struct acpi_device *device = NULL;
+ struct acpi_pci_link *link = NULL;
+ acpi_status result;
+
+
+ result = acpi_bus_get_device(handle, &device);
+ if (result) {
+ printk(KERN_ERR PREFIX "Invalid link device\n");
+ return -1;
+ }
+
+ link = acpi_driver_data(device);
+ if (!link) {
+ printk(KERN_ERR PREFIX "Invalid link context\n");
+ return -1;
+ }
+
+ mutex_lock(&acpi_link_lock);
+ if (!link->irq.initialized) {
+ mutex_unlock(&acpi_link_lock);
+ printk(KERN_ERR PREFIX "Link isn't initialized\n");
+ return -1;
+ }
+#ifdef FUTURE_USE
+ /*
+ * The Link reference count allows us to _DISable an unused link
+ * and suspend time, and set it again on resume.
+ * However, 2.6.12 still has irq_router.resume
+ * which blindly restores the link state.
+ * So we disable the reference count method
+ * to prevent duplicate acpi_pci_link_set()
+ * which would harm some systems
+ */
+ link->refcnt--;
+#endif
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Link %s is dereferenced\n",
+ acpi_device_bid(link->device)));
+
+ if (link->refcnt == 0) {
+ acpi_evaluate_object(link->device->handle, "_DIS", NULL, NULL);
+ }
+ mutex_unlock(&acpi_link_lock);
+ return (link->irq.active);
+}
+
+/* --------------------------------------------------------------------------
+ Driver Interface
+ -------------------------------------------------------------------------- */
+
+static int acpi_pci_link_add(struct acpi_device *device)
+{
+ int result = 0;
+ struct acpi_pci_link *link = NULL;
+ int i = 0;
+ int found = 0;
+
+
+ if (!device)
+ return -EINVAL;
+
+ link = kzalloc(sizeof(struct acpi_pci_link), GFP_KERNEL);
+ if (!link)
+ return -ENOMEM;
+
+ link->device = device;
+ strcpy(acpi_device_name(device), ACPI_PCI_LINK_DEVICE_NAME);
+ strcpy(acpi_device_class(device), ACPI_PCI_LINK_CLASS);
+ device->driver_data = link;
+
+ mutex_lock(&acpi_link_lock);
+ result = acpi_pci_link_get_possible(link);
+ if (result)
+ goto end;
+
+ /* query and set link->irq.active */
+ acpi_pci_link_get_current(link);
+
+ printk(KERN_INFO PREFIX "%s [%s] (IRQs", acpi_device_name(device),
+ acpi_device_bid(device));
+ for (i = 0; i < link->irq.possible_count; i++) {
+ if (link->irq.active == link->irq.possible[i]) {
+ printk(" *%d", link->irq.possible[i]);
+ found = 1;
+ } else
+ printk(" %d", link->irq.possible[i]);
+ }
+
+ printk(")");
+
+ if (!found)
+ printk(" *%d", link->irq.active);
+
+ if (!link->device->status.enabled)
+ printk(", disabled.");
+
+ printk("\n");
+
+ /* TBD: Acquire/release lock */
+ list_add_tail(&link->node, &acpi_link.entries);
+ acpi_link.count++;
+
+ end:
+ /* disable all links -- to be activated on use */
+ acpi_evaluate_object(device->handle, "_DIS", NULL, NULL);
+ mutex_unlock(&acpi_link_lock);
+
+ if (result)
+ kfree(link);
+
+ return result;
+}
+
+static int acpi_pci_link_resume(struct acpi_pci_link *link)
+{
+
+ if (link->refcnt && link->irq.active && link->irq.initialized)
+ return (acpi_pci_link_set(link, link->irq.active));
+ else
+ return 0;
+}
+
+static int irqrouter_resume(struct sys_device *dev)
+{
+ struct list_head *node = NULL;
+ struct acpi_pci_link *link = NULL;
+
+ list_for_each(node, &acpi_link.entries) {
+ link = list_entry(node, struct acpi_pci_link, node);
+ if (!link) {
+ printk(KERN_ERR PREFIX "Invalid link context\n");
+ continue;
+ }
+ acpi_pci_link_resume(link);
+ }
+ return 0;
+}
+
+static int acpi_pci_link_remove(struct acpi_device *device, int type)
+{
+ struct acpi_pci_link *link = NULL;
+
+
+ if (!device || !acpi_driver_data(device))
+ return -EINVAL;
+
+ link = acpi_driver_data(device);
+
+ mutex_lock(&acpi_link_lock);
+ list_del(&link->node);
+ mutex_unlock(&acpi_link_lock);
+
+ kfree(link);
+
+ return 0;
+}
+
+/*
+ * modify acpi_irq_penalty[] from cmdline
+ */
+static int __init acpi_irq_penalty_update(char *str, int used)
+{
+ int i;
+
+ for (i = 0; i < 16; i++) {
+ int retval;
+ int irq;
+
+ retval = get_option(&str, &irq);
+
+ if (!retval)
+ break; /* no number found */
+
+ if (irq < 0)
+ continue;
+
+ if (irq >= ARRAY_SIZE(acpi_irq_penalty))
+ continue;
+
+ if (used)
+ acpi_irq_penalty[irq] += PIRQ_PENALTY_ISA_USED;
+ else
+ acpi_irq_penalty[irq] = PIRQ_PENALTY_PCI_AVAILABLE;
+
+ if (retval != 2) /* no next number */
+ break;
+ }
+ return 1;
+}
+
+/*
+ * We'd like PNP to call this routine for the
+ * single ISA_USED value for each legacy device.
+ * But instead it calls us with each POSSIBLE setting.
+ * There is no ISA_POSSIBLE weight, so we simply use
+ * the (small) PCI_USING penalty.
+ */
+void acpi_penalize_isa_irq(int irq, int active)
+{
+ if (irq >= 0 && irq < ARRAY_SIZE(acpi_irq_penalty)) {
+ if (active)
+ acpi_irq_penalty[irq] += PIRQ_PENALTY_ISA_USED;
+ else
+ acpi_irq_penalty[irq] += PIRQ_PENALTY_PCI_USING;
+ }
+}
+
+/*
+ * Over-ride default table to reserve additional IRQs for use by ISA
+ * e.g. acpi_irq_isa=5
+ * Useful for telling ACPI how not to interfere with your ISA sound card.
+ */
+static int __init acpi_irq_isa(char *str)
+{
+ return acpi_irq_penalty_update(str, 1);
+}
+
+__setup("acpi_irq_isa=", acpi_irq_isa);
+
+/*
+ * Over-ride default table to free additional IRQs for use by PCI
+ * e.g. acpi_irq_pci=7,15
+ * Used for acpi_irq_balance to free up IRQs to reduce PCI IRQ sharing.
+ */
+static int __init acpi_irq_pci(char *str)
+{
+ return acpi_irq_penalty_update(str, 0);
+}
+
+__setup("acpi_irq_pci=", acpi_irq_pci);
+
+static int __init acpi_irq_nobalance_set(char *str)
+{
+ acpi_irq_balance = 0;
+ return 1;
+}
+
+__setup("acpi_irq_nobalance", acpi_irq_nobalance_set);
+
+int __init acpi_irq_balance_set(char *str)
+{
+ acpi_irq_balance = 1;
+ return 1;
+}
+
+__setup("acpi_irq_balance", acpi_irq_balance_set);
+
+/* FIXME: we will remove this interface after all drivers call pci_disable_device */
+static struct sysdev_class irqrouter_sysdev_class = {
+ .name = "irqrouter",
+ .resume = irqrouter_resume,
+};
+
+static struct sys_device device_irqrouter = {
+ .id = 0,
+ .cls = &irqrouter_sysdev_class,
+};
+
+static int __init irqrouter_init_sysfs(void)
+{
+ int error;
+
+
+ if (acpi_disabled || acpi_noirq)
+ return 0;
+
+ error = sysdev_class_register(&irqrouter_sysdev_class);
+ if (!error)
+ error = sysdev_register(&device_irqrouter);
+
+ return error;
+}
+
+device_initcall(irqrouter_init_sysfs);
+
+static int __init acpi_pci_link_init(void)
+{
+ if (acpi_noirq)
+ return 0;
+
+ if (acpi_irq_balance == -1) {
+ /* no command line switch: enable balancing in IOAPIC mode */
+ if (acpi_irq_model == ACPI_IRQ_MODEL_IOAPIC)
+ acpi_irq_balance = 1;
+ else
+ acpi_irq_balance = 0;
+ }
+
+ acpi_link.count = 0;
+ INIT_LIST_HEAD(&acpi_link.entries);
+
+ if (acpi_bus_register_driver(&acpi_pci_link_driver) < 0)
+ return -ENODEV;
+
+ return 0;
+}
+
+subsys_initcall(acpi_pci_link_init);
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
new file mode 100644
index 0000000..642554b
--- /dev/null
+++ b/drivers/acpi/pci_root.c
@@ -0,0 +1,388 @@
+/*
+ * pci_root.c - ACPI PCI Root Bridge Driver ($Revision: 40 $)
+ *
+ * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
+ * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/proc_fs.h>
+#include <linux/spinlock.h>
+#include <linux/pm.h>
+#include <linux/pci.h>
+#include <linux/acpi.h>
+#include <acpi/acpi_bus.h>
+#include <acpi/acpi_drivers.h>
+
+#define _COMPONENT ACPI_PCI_COMPONENT
+ACPI_MODULE_NAME("pci_root");
+#define ACPI_PCI_ROOT_CLASS "pci_bridge"
+#define ACPI_PCI_ROOT_DEVICE_NAME "PCI Root Bridge"
+static int acpi_pci_root_add(struct acpi_device *device);
+static int acpi_pci_root_remove(struct acpi_device *device, int type);
+static int acpi_pci_root_start(struct acpi_device *device);
+
+static struct acpi_device_id root_device_ids[] = {
+ {"PNP0A03", 0},
+ {"", 0},
+};
+MODULE_DEVICE_TABLE(acpi, root_device_ids);
+
+static struct acpi_driver acpi_pci_root_driver = {
+ .name = "pci_root",
+ .class = ACPI_PCI_ROOT_CLASS,
+ .ids = root_device_ids,
+ .ops = {
+ .add = acpi_pci_root_add,
+ .remove = acpi_pci_root_remove,
+ .start = acpi_pci_root_start,
+ },
+};
+
+struct acpi_pci_root {
+ struct list_head node;
+ struct acpi_device * device;
+ struct acpi_pci_id id;
+ struct pci_bus *bus;
+};
+
+static LIST_HEAD(acpi_pci_roots);
+
+static struct acpi_pci_driver *sub_driver;
+
+int acpi_pci_register_driver(struct acpi_pci_driver *driver)
+{
+ int n = 0;
+ struct list_head *entry;
+
+ struct acpi_pci_driver **pptr = &sub_driver;
+ while (*pptr)
+ pptr = &(*pptr)->next;
+ *pptr = driver;
+
+ if (!driver->add)
+ return 0;
+
+ list_for_each(entry, &acpi_pci_roots) {
+ struct acpi_pci_root *root;
+ root = list_entry(entry, struct acpi_pci_root, node);
+ driver->add(root->device->handle);
+ n++;
+ }
+
+ return n;
+}
+
+EXPORT_SYMBOL(acpi_pci_register_driver);
+
+void acpi_pci_unregister_driver(struct acpi_pci_driver *driver)
+{
+ struct list_head *entry;
+
+ struct acpi_pci_driver **pptr = &sub_driver;
+ while (*pptr) {
+ if (*pptr == driver)
+ break;
+ pptr = &(*pptr)->next;
+ }
+ BUG_ON(!*pptr);
+ *pptr = (*pptr)->next;
+
+ if (!driver->remove)
+ return;
+
+ list_for_each(entry, &acpi_pci_roots) {
+ struct acpi_pci_root *root;
+ root = list_entry(entry, struct acpi_pci_root, node);
+ driver->remove(root->device->handle);
+ }
+}
+
+EXPORT_SYMBOL(acpi_pci_unregister_driver);
+
+acpi_handle acpi_get_pci_rootbridge_handle(unsigned int seg, unsigned int bus)
+{
+ struct acpi_pci_root *tmp;
+
+ list_for_each_entry(tmp, &acpi_pci_roots, node) {
+ if ((tmp->id.segment == (u16) seg) && (tmp->id.bus == (u16) bus))
+ return tmp->device->handle;
+ }
+ return NULL;
+}
+
+EXPORT_SYMBOL_GPL(acpi_get_pci_rootbridge_handle);
+
+static acpi_status
+get_root_bridge_busnr_callback(struct acpi_resource *resource, void *data)
+{
+ int *busnr = data;
+ struct acpi_resource_address64 address;
+
+ if (resource->type != ACPI_RESOURCE_TYPE_ADDRESS16 &&
+ resource->type != ACPI_RESOURCE_TYPE_ADDRESS32 &&
+ resource->type != ACPI_RESOURCE_TYPE_ADDRESS64)
+ return AE_OK;
+
+ acpi_resource_to_address64(resource, &address);
+ if ((address.address_length > 0) &&
+ (address.resource_type == ACPI_BUS_NUMBER_RANGE))
+ *busnr = address.minimum;
+
+ return AE_OK;
+}
+
+static acpi_status try_get_root_bridge_busnr(acpi_handle handle, int *busnum)
+{
+ acpi_status status;
+
+ *busnum = -1;
+ status =
+ acpi_walk_resources(handle, METHOD_NAME__CRS,
+ get_root_bridge_busnr_callback, busnum);
+ if (ACPI_FAILURE(status))
+ return status;
+ /* Check if we really get a bus number from _CRS */
+ if (*busnum == -1)
+ return AE_ERROR;
+ return AE_OK;
+}
+
+static void acpi_pci_bridge_scan(struct acpi_device *device)
+{
+ int status;
+ struct acpi_device *child = NULL;
+
+ if (device->flags.bus_address)
+ if (device->parent && device->parent->ops.bind) {
+ status = device->parent->ops.bind(device);
+ if (!status) {
+ list_for_each_entry(child, &device->children, node)
+ acpi_pci_bridge_scan(child);
+ }
+ }
+}
+
+static int __devinit acpi_pci_root_add(struct acpi_device *device)
+{
+ int result = 0;
+ struct acpi_pci_root *root = NULL;
+ struct acpi_pci_root *tmp;
+ acpi_status status = AE_OK;
+ unsigned long long value = 0;
+ acpi_handle handle = NULL;
+ struct acpi_device *child;
+
+
+ if (!device)
+ return -EINVAL;
+
+ root = kzalloc(sizeof(struct acpi_pci_root), GFP_KERNEL);
+ if (!root)
+ return -ENOMEM;
+ INIT_LIST_HEAD(&root->node);
+
+ root->device = device;
+ strcpy(acpi_device_name(device), ACPI_PCI_ROOT_DEVICE_NAME);
+ strcpy(acpi_device_class(device), ACPI_PCI_ROOT_CLASS);
+ device->driver_data = root;
+
+ device->ops.bind = acpi_pci_bind;
+
+ /*
+ * Segment
+ * -------
+ * Obtained via _SEG, if exists, otherwise assumed to be zero (0).
+ */
+ status = acpi_evaluate_integer(device->handle, METHOD_NAME__SEG, NULL,
+ &value);
+ switch (status) {
+ case AE_OK:
+ root->id.segment = (u16) value;
+ break;
+ case AE_NOT_FOUND:
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Assuming segment 0 (no _SEG)\n"));
+ root->id.segment = 0;
+ break;
+ default:
+ ACPI_EXCEPTION((AE_INFO, status, "Evaluating _SEG"));
+ result = -ENODEV;
+ goto end;
+ }
+
+ /*
+ * Bus
+ * ---
+ * Obtained via _BBN, if exists, otherwise assumed to be zero (0).
+ */
+ status = acpi_evaluate_integer(device->handle, METHOD_NAME__BBN, NULL,
+ &value);
+ switch (status) {
+ case AE_OK:
+ root->id.bus = (u16) value;
+ break;
+ case AE_NOT_FOUND:
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Assuming bus 0 (no _BBN)\n"));
+ root->id.bus = 0;
+ break;
+ default:
+ ACPI_EXCEPTION((AE_INFO, status, "Evaluating _BBN"));
+ result = -ENODEV;
+ goto end;
+ }
+
+ /* Some systems have wrong _BBN */
+ list_for_each_entry(tmp, &acpi_pci_roots, node) {
+ if ((tmp->id.segment == root->id.segment)
+ && (tmp->id.bus == root->id.bus)) {
+ int bus = 0;
+ acpi_status status;
+
+ printk(KERN_ERR PREFIX
+ "Wrong _BBN value, reboot"
+ " and use option 'pci=noacpi'\n");
+
+ status = try_get_root_bridge_busnr(device->handle, &bus);
+ if (ACPI_FAILURE(status))
+ break;
+ if (bus != root->id.bus) {
+ printk(KERN_INFO PREFIX
+ "PCI _CRS %d overrides _BBN 0\n", bus);
+ root->id.bus = bus;
+ }
+ break;
+ }
+ }
+ /*
+ * Device & Function
+ * -----------------
+ * Obtained from _ADR (which has already been evaluated for us).
+ */
+ root->id.device = device->pnp.bus_address >> 16;
+ root->id.function = device->pnp.bus_address & 0xFFFF;
+
+ /*
+ * TBD: Need PCI interface for enumeration/configuration of roots.
+ */
+
+ /* TBD: Locking */
+ list_add_tail(&root->node, &acpi_pci_roots);
+
+ printk(KERN_INFO PREFIX "%s [%s] (%04x:%02x)\n",
+ acpi_device_name(device), acpi_device_bid(device),
+ root->id.segment, root->id.bus);
+
+ /*
+ * Scan the Root Bridge
+ * --------------------
+ * Must do this prior to any attempt to bind the root device, as the
+ * PCI namespace does not get created until this call is made (and
+ * thus the root bridge's pci_dev does not exist).
+ */
+ root->bus = pci_acpi_scan_root(device, root->id.segment, root->id.bus);
+ if (!root->bus) {
+ printk(KERN_ERR PREFIX
+ "Bus %04x:%02x not present in PCI namespace\n",
+ root->id.segment, root->id.bus);
+ result = -ENODEV;
+ goto end;
+ }
+
+ /*
+ * Attach ACPI-PCI Context
+ * -----------------------
+ * Thus binding the ACPI and PCI devices.
+ */
+ result = acpi_pci_bind_root(device, &root->id, root->bus);
+ if (result)
+ goto end;
+
+ /*
+ * PCI Routing Table
+ * -----------------
+ * Evaluate and parse _PRT, if exists.
+ */
+ status = acpi_get_handle(device->handle, METHOD_NAME__PRT, &handle);
+ if (ACPI_SUCCESS(status))
+ result = acpi_pci_irq_add_prt(device->handle, root->id.segment,
+ root->id.bus);
+
+ /*
+ * Scan and bind all _ADR-Based Devices
+ */
+ list_for_each_entry(child, &device->children, node)
+ acpi_pci_bridge_scan(child);
+
+ end:
+ if (result) {
+ if (!list_empty(&root->node))
+ list_del(&root->node);
+ kfree(root);
+ }
+
+ return result;
+}
+
+static int acpi_pci_root_start(struct acpi_device *device)
+{
+ struct acpi_pci_root *root;
+
+
+ list_for_each_entry(root, &acpi_pci_roots, node) {
+ if (root->device == device) {
+ pci_bus_add_devices(root->bus);
+ return 0;
+ }
+ }
+ return -ENODEV;
+}
+
+static int acpi_pci_root_remove(struct acpi_device *device, int type)
+{
+ struct acpi_pci_root *root = NULL;
+
+
+ if (!device || !acpi_driver_data(device))
+ return -EINVAL;
+
+ root = acpi_driver_data(device);
+
+ kfree(root);
+
+ return 0;
+}
+
+static int __init acpi_pci_root_init(void)
+{
+ if (acpi_pci_disabled)
+ return 0;
+
+ if (acpi_bus_register_driver(&acpi_pci_root_driver) < 0)
+ return -ENODEV;
+
+ return 0;
+}
+
+subsys_initcall(acpi_pci_root_init);
diff --git a/drivers/acpi/pci_slot.c b/drivers/acpi/pci_slot.c
new file mode 100644
index 0000000..cd1f446
--- /dev/null
+++ b/drivers/acpi/pci_slot.c
@@ -0,0 +1,365 @@
+/*
+ * pci_slot.c - ACPI PCI Slot Driver
+ *
+ * The code here is heavily leveraged from the acpiphp module.
+ * Thanks to Matthew Wilcox <matthew@wil.cx> for much guidance.
+ * Thanks to Kenji Kaneshige <kaneshige.kenji@jp.fujitsu.com> for code
+ * review and fixes.
+ *
+ * Copyright (C) 2007-2008 Hewlett-Packard Development Company, L.P.
+ * Alex Chiang <achiang@hp.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/acpi.h>
+#include <acpi/acpi_bus.h>
+#include <acpi/acpi_drivers.h>
+
+static int debug;
+static int check_sta_before_sun;
+
+#define DRIVER_VERSION "0.1"
+#define DRIVER_AUTHOR "Alex Chiang <achiang@hp.com>"
+#define DRIVER_DESC "ACPI PCI Slot Detection Driver"
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
+MODULE_PARM_DESC(debug, "Debugging mode enabled or not");
+module_param(debug, bool, 0644);
+
+#define _COMPONENT ACPI_PCI_COMPONENT
+ACPI_MODULE_NAME("pci_slot");
+
+#define MY_NAME "pci_slot"
+#define err(format, arg...) printk(KERN_ERR "%s: " format , MY_NAME , ## arg)
+#define info(format, arg...) printk(KERN_INFO "%s: " format , MY_NAME , ## arg)
+#define dbg(format, arg...) \
+ do { \
+ if (debug) \
+ printk(KERN_DEBUG "%s: " format, \
+ MY_NAME , ## arg); \
+ } while (0)
+
+#define SLOT_NAME_SIZE 20 /* Inspired by #define in acpiphp.h */
+
+struct acpi_pci_slot {
+ acpi_handle root_handle; /* handle of the root bridge */
+ struct pci_slot *pci_slot; /* corresponding pci_slot */
+ struct list_head list; /* node in the list of slots */
+};
+
+static int acpi_pci_slot_add(acpi_handle handle);
+static void acpi_pci_slot_remove(acpi_handle handle);
+
+static LIST_HEAD(slot_list);
+static DEFINE_MUTEX(slot_list_lock);
+static struct acpi_pci_driver acpi_pci_slot_driver = {
+ .add = acpi_pci_slot_add,
+ .remove = acpi_pci_slot_remove,
+};
+
+static int
+check_slot(acpi_handle handle, unsigned long long *sun)
+{
+ int device = -1;
+ unsigned long long adr, sta;
+ acpi_status status;
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+
+ acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer);
+ dbg("Checking slot on path: %s\n", (char *)buffer.pointer);
+
+ if (check_sta_before_sun) {
+ /* If SxFy doesn't have _STA, we just assume it's there */
+ status = acpi_evaluate_integer(handle, "_STA", NULL, &sta);
+ if (ACPI_SUCCESS(status) && !(sta & ACPI_STA_DEVICE_PRESENT))
+ goto out;
+ }
+
+ status = acpi_evaluate_integer(handle, "_ADR", NULL, &adr);
+ if (ACPI_FAILURE(status)) {
+ dbg("_ADR returned %d on %s\n", status, (char *)buffer.pointer);
+ goto out;
+ }
+
+ /* No _SUN == not a slot == bail */
+ status = acpi_evaluate_integer(handle, "_SUN", NULL, sun);
+ if (ACPI_FAILURE(status)) {
+ dbg("_SUN returned %d on %s\n", status, (char *)buffer.pointer);
+ goto out;
+ }
+
+ device = (adr >> 16) & 0xffff;
+out:
+ kfree(buffer.pointer);
+ return device;
+}
+
+struct callback_args {
+ acpi_walk_callback user_function; /* only for walk_p2p_bridge */
+ struct pci_bus *pci_bus;
+ acpi_handle root_handle;
+};
+
+/*
+ * register_slot
+ *
+ * Called once for each SxFy object in the namespace. Don't worry about
+ * calling pci_create_slot multiple times for the same pci_bus:device,
+ * since each subsequent call simply bumps the refcount on the pci_slot.
+ *
+ * The number of calls to pci_destroy_slot from unregister_slot is
+ * symmetrical.
+ */
+static acpi_status
+register_slot(acpi_handle handle, u32 lvl, void *context, void **rv)
+{
+ int device;
+ unsigned long long sun;
+ char name[SLOT_NAME_SIZE];
+ struct acpi_pci_slot *slot;
+ struct pci_slot *pci_slot;
+ struct callback_args *parent_context = context;
+ struct pci_bus *pci_bus = parent_context->pci_bus;
+
+ device = check_slot(handle, &sun);
+ if (device < 0)
+ return AE_OK;
+
+ slot = kmalloc(sizeof(*slot), GFP_KERNEL);
+ if (!slot) {
+ err("%s: cannot allocate memory\n", __func__);
+ return AE_OK;
+ }
+
+ snprintf(name, sizeof(name), "%u", (u32)sun);
+ pci_slot = pci_create_slot(pci_bus, device, name, NULL);
+ if (IS_ERR(pci_slot)) {
+ err("pci_create_slot returned %ld\n", PTR_ERR(pci_slot));
+ kfree(slot);
+ return AE_OK;
+ }
+
+ slot->root_handle = parent_context->root_handle;
+ slot->pci_slot = pci_slot;
+ INIT_LIST_HEAD(&slot->list);
+ mutex_lock(&slot_list_lock);
+ list_add(&slot->list, &slot_list);
+ mutex_unlock(&slot_list_lock);
+
+ dbg("pci_slot: %p, pci_bus: %x, device: %d, name: %s\n",
+ pci_slot, pci_bus->number, device, name);
+
+ return AE_OK;
+}
+
+/*
+ * walk_p2p_bridge - discover and walk p2p bridges
+ * @handle: points to an acpi_pci_root
+ * @context: p2p_bridge_context pointer
+ *
+ * Note that when we call ourselves recursively, we pass a different
+ * value of pci_bus in the child_context.
+ */
+static acpi_status
+walk_p2p_bridge(acpi_handle handle, u32 lvl, void *context, void **rv)
+{
+ int device, function;
+ unsigned long long adr;
+ acpi_status status;
+ acpi_handle dummy_handle;
+ acpi_walk_callback user_function;
+
+ struct pci_dev *dev;
+ struct pci_bus *pci_bus;
+ struct callback_args child_context;
+ struct callback_args *parent_context = context;
+
+ pci_bus = parent_context->pci_bus;
+ user_function = parent_context->user_function;
+
+ status = acpi_get_handle(handle, "_ADR", &dummy_handle);
+ if (ACPI_FAILURE(status))
+ return AE_OK;
+
+ status = acpi_evaluate_integer(handle, "_ADR", NULL, &adr);
+ if (ACPI_FAILURE(status))
+ return AE_OK;
+
+ device = (adr >> 16) & 0xffff;
+ function = adr & 0xffff;
+
+ dev = pci_get_slot(pci_bus, PCI_DEVFN(device, function));
+ if (!dev || !dev->subordinate)
+ goto out;
+
+ child_context.pci_bus = dev->subordinate;
+ child_context.user_function = user_function;
+ child_context.root_handle = parent_context->root_handle;
+
+ dbg("p2p bridge walk, pci_bus = %x\n", dev->subordinate->number);
+ status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, (u32)1,
+ user_function, &child_context, NULL);
+ if (ACPI_FAILURE(status))
+ goto out;
+
+ status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, (u32)1,
+ walk_p2p_bridge, &child_context, NULL);
+out:
+ pci_dev_put(dev);
+ return AE_OK;
+}
+
+/*
+ * walk_root_bridge - generic root bridge walker
+ * @handle: points to an acpi_pci_root
+ * @user_function: user callback for slot objects
+ *
+ * Call user_function for all objects underneath this root bridge.
+ * Walk p2p bridges underneath us and call user_function on those too.
+ */
+static int
+walk_root_bridge(acpi_handle handle, acpi_walk_callback user_function)
+{
+ int seg, bus;
+ unsigned long long tmp;
+ acpi_status status;
+ acpi_handle dummy_handle;
+ struct pci_bus *pci_bus;
+ struct callback_args context;
+
+ /* If the bridge doesn't have _STA, we assume it is always there */
+ status = acpi_get_handle(handle, "_STA", &dummy_handle);
+ if (ACPI_SUCCESS(status)) {
+ status = acpi_evaluate_integer(handle, "_STA", NULL, &tmp);
+ if (ACPI_FAILURE(status)) {
+ info("%s: _STA evaluation failure\n", __func__);
+ return 0;
+ }
+ if ((tmp & ACPI_STA_DEVICE_FUNCTIONING) == 0)
+ /* don't register this object */
+ return 0;
+ }
+
+ status = acpi_evaluate_integer(handle, "_SEG", NULL, &tmp);
+ seg = ACPI_SUCCESS(status) ? tmp : 0;
+
+ status = acpi_evaluate_integer(handle, "_BBN", NULL, &tmp);
+ bus = ACPI_SUCCESS(status) ? tmp : 0;
+
+ pci_bus = pci_find_bus(seg, bus);
+ if (!pci_bus)
+ return 0;
+
+ context.pci_bus = pci_bus;
+ context.user_function = user_function;
+ context.root_handle = handle;
+
+ dbg("root bridge walk, pci_bus = %x\n", pci_bus->number);
+ status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, (u32)1,
+ user_function, &context, NULL);
+ if (ACPI_FAILURE(status))
+ return status;
+
+ status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, (u32)1,
+ walk_p2p_bridge, &context, NULL);
+ if (ACPI_FAILURE(status))
+ err("%s: walk_p2p_bridge failure - %d\n", __func__, status);
+
+ return status;
+}
+
+/*
+ * acpi_pci_slot_add
+ * @handle: points to an acpi_pci_root
+ */
+static int
+acpi_pci_slot_add(acpi_handle handle)
+{
+ acpi_status status;
+
+ status = walk_root_bridge(handle, register_slot);
+ if (ACPI_FAILURE(status))
+ err("%s: register_slot failure - %d\n", __func__, status);
+
+ return status;
+}
+
+/*
+ * acpi_pci_slot_remove
+ * @handle: points to an acpi_pci_root
+ */
+static void
+acpi_pci_slot_remove(acpi_handle handle)
+{
+ struct acpi_pci_slot *slot, *tmp;
+
+ mutex_lock(&slot_list_lock);
+ list_for_each_entry_safe(slot, tmp, &slot_list, list) {
+ if (slot->root_handle == handle) {
+ list_del(&slot->list);
+ pci_destroy_slot(slot->pci_slot);
+ kfree(slot);
+ }
+ }
+ mutex_unlock(&slot_list_lock);
+}
+
+static int do_sta_before_sun(const struct dmi_system_id *d)
+{
+ info("%s detected: will evaluate _STA before calling _SUN\n", d->ident);
+ check_sta_before_sun = 1;
+ return 0;
+}
+
+static struct dmi_system_id acpi_pci_slot_dmi_table[] __initdata = {
+ /*
+ * Fujitsu Primequest machines will return 1023 to indicate an
+ * error if the _SUN method is evaluated on SxFy objects that
+ * are not present (as indicated by _STA), so for those machines,
+ * we want to check _STA before evaluating _SUN.
+ */
+ {
+ .callback = do_sta_before_sun,
+ .ident = "Fujitsu PRIMEQUEST",
+ .matches = {
+ DMI_MATCH(DMI_BIOS_VENDOR, "FUJITSU LIMITED"),
+ DMI_MATCH(DMI_BIOS_VERSION, "PRIMEQUEST"),
+ },
+ },
+ {}
+};
+
+static int __init
+acpi_pci_slot_init(void)
+{
+ dmi_check_system(acpi_pci_slot_dmi_table);
+ acpi_pci_register_driver(&acpi_pci_slot_driver);
+ return 0;
+}
+
+static void __exit
+acpi_pci_slot_exit(void)
+{
+ acpi_pci_unregister_driver(&acpi_pci_slot_driver);
+}
+
+module_init(acpi_pci_slot_init);
+module_exit(acpi_pci_slot_exit);
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c
new file mode 100644
index 0000000..ad55db1
--- /dev/null
+++ b/drivers/acpi/power.c
@@ -0,0 +1,795 @@
+/*
+ * acpi_power.c - ACPI Bus Power Management ($Revision: 39 $)
+ *
+ * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
+ * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+
+/*
+ * ACPI power-managed devices may be controlled in two ways:
+ * 1. via "Device Specific (D-State) Control"
+ * 2. via "Power Resource Control".
+ * This module is used to manage devices relying on Power Resource Control.
+ *
+ * An ACPI "power resource object" describes a software controllable power
+ * plane, clock plane, or other resource used by a power managed device.
+ * A device may rely on multiple power resources, and a power resource
+ * may be shared by multiple devices.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <acpi/acpi_bus.h>
+#include <acpi/acpi_drivers.h>
+
+#define _COMPONENT ACPI_POWER_COMPONENT
+ACPI_MODULE_NAME("power");
+#define ACPI_POWER_CLASS "power_resource"
+#define ACPI_POWER_DEVICE_NAME "Power Resource"
+#define ACPI_POWER_FILE_INFO "info"
+#define ACPI_POWER_FILE_STATUS "state"
+#define ACPI_POWER_RESOURCE_STATE_OFF 0x00
+#define ACPI_POWER_RESOURCE_STATE_ON 0x01
+#define ACPI_POWER_RESOURCE_STATE_UNKNOWN 0xFF
+
+#ifdef MODULE_PARAM_PREFIX
+#undef MODULE_PARAM_PREFIX
+#endif
+#define MODULE_PARAM_PREFIX "acpi."
+int acpi_power_nocheck;
+module_param_named(power_nocheck, acpi_power_nocheck, bool, 000);
+
+static int acpi_power_add(struct acpi_device *device);
+static int acpi_power_remove(struct acpi_device *device, int type);
+static int acpi_power_resume(struct acpi_device *device);
+static int acpi_power_open_fs(struct inode *inode, struct file *file);
+
+static struct acpi_device_id power_device_ids[] = {
+ {ACPI_POWER_HID, 0},
+ {"", 0},
+};
+MODULE_DEVICE_TABLE(acpi, power_device_ids);
+
+static struct acpi_driver acpi_power_driver = {
+ .name = "power",
+ .class = ACPI_POWER_CLASS,
+ .ids = power_device_ids,
+ .ops = {
+ .add = acpi_power_add,
+ .remove = acpi_power_remove,
+ .resume = acpi_power_resume,
+ },
+};
+
+struct acpi_power_reference {
+ struct list_head node;
+ struct acpi_device *device;
+};
+
+struct acpi_power_resource {
+ struct acpi_device * device;
+ acpi_bus_id name;
+ u32 system_level;
+ u32 order;
+ struct mutex resource_lock;
+ struct list_head reference;
+};
+
+static struct list_head acpi_power_resource_list;
+
+static const struct file_operations acpi_power_fops = {
+ .owner = THIS_MODULE,
+ .open = acpi_power_open_fs,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+/* --------------------------------------------------------------------------
+ Power Resource Management
+ -------------------------------------------------------------------------- */
+
+static int
+acpi_power_get_context(acpi_handle handle,
+ struct acpi_power_resource **resource)
+{
+ int result = 0;
+ struct acpi_device *device = NULL;
+
+
+ if (!resource)
+ return -ENODEV;
+
+ result = acpi_bus_get_device(handle, &device);
+ if (result) {
+ printk(KERN_WARNING PREFIX "Getting context [%p]\n", handle);
+ return result;
+ }
+
+ *resource = acpi_driver_data(device);
+ if (!*resource)
+ return -ENODEV;
+
+ return 0;
+}
+
+static int acpi_power_get_state(acpi_handle handle, int *state)
+{
+ acpi_status status = AE_OK;
+ unsigned long long sta = 0;
+
+
+ if (!handle || !state)
+ return -EINVAL;
+
+ status = acpi_evaluate_integer(handle, "_STA", NULL, &sta);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+
+ *state = (sta & 0x01)?ACPI_POWER_RESOURCE_STATE_ON:
+ ACPI_POWER_RESOURCE_STATE_OFF;
+
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Resource [%4.4s] is %s\n",
+ acpi_ut_get_node_name(handle),
+ *state ? "on" : "off"));
+
+ return 0;
+}
+
+static int acpi_power_get_list_state(struct acpi_handle_list *list, int *state)
+{
+ int result = 0, state1;
+ u32 i = 0;
+
+
+ if (!list || !state)
+ return -EINVAL;
+
+ /* The state of the list is 'on' IFF all resources are 'on'. */
+ /* */
+
+ for (i = 0; i < list->count; i++) {
+ /*
+ * The state of the power resource can be obtained by
+ * using the ACPI handle. In such case it is unnecessary to
+ * get the Power resource first and then get its state again.
+ */
+ result = acpi_power_get_state(list->handles[i], &state1);
+ if (result)
+ return result;
+
+ *state = state1;
+
+ if (*state != ACPI_POWER_RESOURCE_STATE_ON)
+ break;
+ }
+
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Resource list is %s\n",
+ *state ? "on" : "off"));
+
+ return result;
+}
+
+static int acpi_power_on(acpi_handle handle, struct acpi_device *dev)
+{
+ int result = 0, state;
+ int found = 0;
+ acpi_status status = AE_OK;
+ struct acpi_power_resource *resource = NULL;
+ struct list_head *node, *next;
+ struct acpi_power_reference *ref;
+
+
+ result = acpi_power_get_context(handle, &resource);
+ if (result)
+ return result;
+
+ mutex_lock(&resource->resource_lock);
+ list_for_each_safe(node, next, &resource->reference) {
+ ref = container_of(node, struct acpi_power_reference, node);
+ if (dev->handle == ref->device->handle) {
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device [%s] already referenced by resource [%s]\n",
+ dev->pnp.bus_id, resource->name));
+ found = 1;
+ break;
+ }
+ }
+
+ if (!found) {
+ ref = kmalloc(sizeof (struct acpi_power_reference),
+ irqs_disabled() ? GFP_ATOMIC : GFP_KERNEL);
+ if (!ref) {
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "kmalloc() failed\n"));
+ mutex_unlock(&resource->resource_lock);
+ return -ENOMEM;
+ }
+ list_add_tail(&ref->node, &resource->reference);
+ ref->device = dev;
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device [%s] added to resource [%s] references\n",
+ dev->pnp.bus_id, resource->name));
+ }
+ mutex_unlock(&resource->resource_lock);
+
+ status = acpi_evaluate_object(resource->device->handle, "_ON", NULL, NULL);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+
+ if (!acpi_power_nocheck) {
+ /*
+ * If acpi_power_nocheck is set, it is unnecessary to check
+ * the power state after power transition.
+ */
+ result = acpi_power_get_state(resource->device->handle,
+ &state);
+ if (result)
+ return result;
+ if (state != ACPI_POWER_RESOURCE_STATE_ON)
+ return -ENOEXEC;
+ }
+ /* Update the power resource's _device_ power state */
+ resource->device->power.state = ACPI_STATE_D0;
+
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Resource [%s] turned on\n",
+ resource->name));
+ return 0;
+}
+
+static int acpi_power_off_device(acpi_handle handle, struct acpi_device *dev)
+{
+ int result = 0, state;
+ acpi_status status = AE_OK;
+ struct acpi_power_resource *resource = NULL;
+ struct list_head *node, *next;
+ struct acpi_power_reference *ref;
+
+
+ result = acpi_power_get_context(handle, &resource);
+ if (result)
+ return result;
+
+ mutex_lock(&resource->resource_lock);
+ list_for_each_safe(node, next, &resource->reference) {
+ ref = container_of(node, struct acpi_power_reference, node);
+ if (dev->handle == ref->device->handle) {
+ list_del(&ref->node);
+ kfree(ref);
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device [%s] removed from resource [%s] references\n",
+ dev->pnp.bus_id, resource->name));
+ break;
+ }
+ }
+
+ if (!list_empty(&resource->reference)) {
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Cannot turn resource [%s] off - resource is in use\n",
+ resource->name));
+ mutex_unlock(&resource->resource_lock);
+ return 0;
+ }
+ mutex_unlock(&resource->resource_lock);
+
+ status = acpi_evaluate_object(resource->device->handle, "_OFF", NULL, NULL);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+
+ if (!acpi_power_nocheck) {
+ /*
+ * If acpi_power_nocheck is set, it is unnecessary to check
+ * the power state after power transition.
+ */
+ result = acpi_power_get_state(handle, &state);
+ if (result)
+ return result;
+ if (state != ACPI_POWER_RESOURCE_STATE_OFF)
+ return -ENOEXEC;
+ }
+
+ /* Update the power resource's _device_ power state */
+ resource->device->power.state = ACPI_STATE_D3;
+
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Resource [%s] turned off\n",
+ resource->name));
+
+ return 0;
+}
+
+/**
+ * acpi_device_sleep_wake - execute _DSW (Device Sleep Wake) or (deprecated in
+ * ACPI 3.0) _PSW (Power State Wake)
+ * @dev: Device to handle.
+ * @enable: 0 - disable, 1 - enable the wake capabilities of the device.
+ * @sleep_state: Target sleep state of the system.
+ * @dev_state: Target power state of the device.
+ *
+ * Execute _DSW (Device Sleep Wake) or (deprecated in ACPI 3.0) _PSW (Power
+ * State Wake) for the device, if present. On failure reset the device's
+ * wakeup.flags.valid flag.
+ *
+ * RETURN VALUE:
+ * 0 if either _DSW or _PSW has been successfully executed
+ * 0 if neither _DSW nor _PSW has been found
+ * -ENODEV if the execution of either _DSW or _PSW has failed
+ */
+int acpi_device_sleep_wake(struct acpi_device *dev,
+ int enable, int sleep_state, int dev_state)
+{
+ union acpi_object in_arg[3];
+ struct acpi_object_list arg_list = { 3, in_arg };
+ acpi_status status = AE_OK;
+
+ /*
+ * Try to execute _DSW first.
+ *
+ * Three agruments are needed for the _DSW object:
+ * Argument 0: enable/disable the wake capabilities
+ * Argument 1: target system state
+ * Argument 2: target device state
+ * When _DSW object is called to disable the wake capabilities, maybe
+ * the first argument is filled. The values of the other two agruments
+ * are meaningless.
+ */
+ in_arg[0].type = ACPI_TYPE_INTEGER;
+ in_arg[0].integer.value = enable;
+ in_arg[1].type = ACPI_TYPE_INTEGER;
+ in_arg[1].integer.value = sleep_state;
+ in_arg[2].type = ACPI_TYPE_INTEGER;
+ in_arg[2].integer.value = dev_state;
+ status = acpi_evaluate_object(dev->handle, "_DSW", &arg_list, NULL);
+ if (ACPI_SUCCESS(status)) {
+ return 0;
+ } else if (status != AE_NOT_FOUND) {
+ printk(KERN_ERR PREFIX "_DSW execution failed\n");
+ dev->wakeup.flags.valid = 0;
+ return -ENODEV;
+ }
+
+ /* Execute _PSW */
+ arg_list.count = 1;
+ in_arg[0].integer.value = enable;
+ status = acpi_evaluate_object(dev->handle, "_PSW", &arg_list, NULL);
+ if (ACPI_FAILURE(status) && (status != AE_NOT_FOUND)) {
+ printk(KERN_ERR PREFIX "_PSW execution failed\n");
+ dev->wakeup.flags.valid = 0;
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+/*
+ * Prepare a wakeup device, two steps (Ref ACPI 2.0:P229):
+ * 1. Power on the power resources required for the wakeup device
+ * 2. Execute _DSW (Device Sleep Wake) or (deprecated in ACPI 3.0) _PSW (Power
+ * State Wake) for the device, if present
+ */
+int acpi_enable_wakeup_device_power(struct acpi_device *dev, int sleep_state)
+{
+ int i, err;
+
+ if (!dev || !dev->wakeup.flags.valid)
+ return -EINVAL;
+
+ /*
+ * Do not execute the code below twice in a row without calling
+ * acpi_disable_wakeup_device_power() in between for the same device
+ */
+ if (dev->wakeup.flags.prepared)
+ return 0;
+
+ /* Open power resource */
+ for (i = 0; i < dev->wakeup.resources.count; i++) {
+ int ret = acpi_power_on(dev->wakeup.resources.handles[i], dev);
+ if (ret) {
+ printk(KERN_ERR PREFIX "Transition power state\n");
+ dev->wakeup.flags.valid = 0;
+ return -ENODEV;
+ }
+ }
+
+ /*
+ * Passing 3 as the third argument below means the device may be placed
+ * in arbitrary power state afterwards.
+ */
+ err = acpi_device_sleep_wake(dev, 1, sleep_state, 3);
+ if (!err)
+ dev->wakeup.flags.prepared = 1;
+
+ return err;
+}
+
+/*
+ * Shutdown a wakeup device, counterpart of above method
+ * 1. Execute _DSW (Device Sleep Wake) or (deprecated in ACPI 3.0) _PSW (Power
+ * State Wake) for the device, if present
+ * 2. Shutdown down the power resources
+ */
+int acpi_disable_wakeup_device_power(struct acpi_device *dev)
+{
+ int i, ret;
+
+ if (!dev || !dev->wakeup.flags.valid)
+ return -EINVAL;
+
+ /*
+ * Do not execute the code below twice in a row without calling
+ * acpi_enable_wakeup_device_power() in between for the same device
+ */
+ if (!dev->wakeup.flags.prepared)
+ return 0;
+
+ dev->wakeup.flags.prepared = 0;
+
+ ret = acpi_device_sleep_wake(dev, 0, 0, 0);
+ if (ret)
+ return ret;
+
+ /* Close power resource */
+ for (i = 0; i < dev->wakeup.resources.count; i++) {
+ ret = acpi_power_off_device(dev->wakeup.resources.handles[i], dev);
+ if (ret) {
+ printk(KERN_ERR PREFIX "Transition power state\n");
+ dev->wakeup.flags.valid = 0;
+ return -ENODEV;
+ }
+ }
+
+ return ret;
+}
+
+/* --------------------------------------------------------------------------
+ Device Power Management
+ -------------------------------------------------------------------------- */
+
+int acpi_power_get_inferred_state(struct acpi_device *device)
+{
+ int result = 0;
+ struct acpi_handle_list *list = NULL;
+ int list_state = 0;
+ int i = 0;
+
+
+ if (!device)
+ return -EINVAL;
+
+ device->power.state = ACPI_STATE_UNKNOWN;
+
+ /*
+ * We know a device's inferred power state when all the resources
+ * required for a given D-state are 'on'.
+ */
+ for (i = ACPI_STATE_D0; i < ACPI_STATE_D3; i++) {
+ list = &device->power.states[i].resources;
+ if (list->count < 1)
+ continue;
+
+ result = acpi_power_get_list_state(list, &list_state);
+ if (result)
+ return result;
+
+ if (list_state == ACPI_POWER_RESOURCE_STATE_ON) {
+ device->power.state = i;
+ return 0;
+ }
+ }
+
+ device->power.state = ACPI_STATE_D3;
+
+ return 0;
+}
+
+int acpi_power_transition(struct acpi_device *device, int state)
+{
+ int result = 0;
+ struct acpi_handle_list *cl = NULL; /* Current Resources */
+ struct acpi_handle_list *tl = NULL; /* Target Resources */
+ int i = 0;
+
+
+ if (!device || (state < ACPI_STATE_D0) || (state > ACPI_STATE_D3))
+ return -EINVAL;
+
+ if ((device->power.state < ACPI_STATE_D0)
+ || (device->power.state > ACPI_STATE_D3))
+ return -ENODEV;
+
+ cl = &device->power.states[device->power.state].resources;
+ tl = &device->power.states[state].resources;
+
+ /* TBD: Resources must be ordered. */
+
+ /*
+ * First we reference all power resources required in the target list
+ * (e.g. so the device doesn't lose power while transitioning).
+ */
+ for (i = 0; i < tl->count; i++) {
+ result = acpi_power_on(tl->handles[i], device);
+ if (result)
+ goto end;
+ }
+
+ if (device->power.state == state) {
+ goto end;
+ }
+
+ /*
+ * Then we dereference all power resources used in the current list.
+ */
+ for (i = 0; i < cl->count; i++) {
+ result = acpi_power_off_device(cl->handles[i], device);
+ if (result)
+ goto end;
+ }
+
+ end:
+ if (result)
+ device->power.state = ACPI_STATE_UNKNOWN;
+ else {
+ /* We shouldn't change the state till all above operations succeed */
+ device->power.state = state;
+ }
+
+ return result;
+}
+
+/* --------------------------------------------------------------------------
+ FS Interface (/proc)
+ -------------------------------------------------------------------------- */
+
+static struct proc_dir_entry *acpi_power_dir;
+
+static int acpi_power_seq_show(struct seq_file *seq, void *offset)
+{
+ int count = 0;
+ int result = 0, state;
+ struct acpi_power_resource *resource = NULL;
+ struct list_head *node, *next;
+ struct acpi_power_reference *ref;
+
+
+ resource = seq->private;
+
+ if (!resource)
+ goto end;
+
+ result = acpi_power_get_state(resource->device->handle, &state);
+ if (result)
+ goto end;
+
+ seq_puts(seq, "state: ");
+ switch (state) {
+ case ACPI_POWER_RESOURCE_STATE_ON:
+ seq_puts(seq, "on\n");
+ break;
+ case ACPI_POWER_RESOURCE_STATE_OFF:
+ seq_puts(seq, "off\n");
+ break;
+ default:
+ seq_puts(seq, "unknown\n");
+ break;
+ }
+
+ mutex_lock(&resource->resource_lock);
+ list_for_each_safe(node, next, &resource->reference) {
+ ref = container_of(node, struct acpi_power_reference, node);
+ count++;
+ }
+ mutex_unlock(&resource->resource_lock);
+
+ seq_printf(seq, "system level: S%d\n"
+ "order: %d\n"
+ "reference count: %d\n",
+ resource->system_level,
+ resource->order, count);
+
+ end:
+ return 0;
+}
+
+static int acpi_power_open_fs(struct inode *inode, struct file *file)
+{
+ return single_open(file, acpi_power_seq_show, PDE(inode)->data);
+}
+
+static int acpi_power_add_fs(struct acpi_device *device)
+{
+ struct proc_dir_entry *entry = NULL;
+
+
+ if (!device)
+ return -EINVAL;
+
+ if (!acpi_device_dir(device)) {
+ acpi_device_dir(device) = proc_mkdir(acpi_device_bid(device),
+ acpi_power_dir);
+ if (!acpi_device_dir(device))
+ return -ENODEV;
+ }
+
+ /* 'status' [R] */
+ entry = proc_create_data(ACPI_POWER_FILE_STATUS,
+ S_IRUGO, acpi_device_dir(device),
+ &acpi_power_fops, acpi_driver_data(device));
+ if (!entry)
+ return -EIO;
+ return 0;
+}
+
+static int acpi_power_remove_fs(struct acpi_device *device)
+{
+
+ if (acpi_device_dir(device)) {
+ remove_proc_entry(ACPI_POWER_FILE_STATUS,
+ acpi_device_dir(device));
+ remove_proc_entry(acpi_device_bid(device), acpi_power_dir);
+ acpi_device_dir(device) = NULL;
+ }
+
+ return 0;
+}
+
+/* --------------------------------------------------------------------------
+ Driver Interface
+ -------------------------------------------------------------------------- */
+
+static int acpi_power_add(struct acpi_device *device)
+{
+ int result = 0, state;
+ acpi_status status = AE_OK;
+ struct acpi_power_resource *resource = NULL;
+ union acpi_object acpi_object;
+ struct acpi_buffer buffer = { sizeof(acpi_object), &acpi_object };
+
+
+ if (!device)
+ return -EINVAL;
+
+ resource = kzalloc(sizeof(struct acpi_power_resource), GFP_KERNEL);
+ if (!resource)
+ return -ENOMEM;
+
+ resource->device = device;
+ mutex_init(&resource->resource_lock);
+ INIT_LIST_HEAD(&resource->reference);
+ strcpy(resource->name, device->pnp.bus_id);
+ strcpy(acpi_device_name(device), ACPI_POWER_DEVICE_NAME);
+ strcpy(acpi_device_class(device), ACPI_POWER_CLASS);
+ device->driver_data = resource;
+
+ /* Evalute the object to get the system level and resource order. */
+ status = acpi_evaluate_object(device->handle, NULL, NULL, &buffer);
+ if (ACPI_FAILURE(status)) {
+ result = -ENODEV;
+ goto end;
+ }
+ resource->system_level = acpi_object.power_resource.system_level;
+ resource->order = acpi_object.power_resource.resource_order;
+
+ result = acpi_power_get_state(device->handle, &state);
+ if (result)
+ goto end;
+
+ switch (state) {
+ case ACPI_POWER_RESOURCE_STATE_ON:
+ device->power.state = ACPI_STATE_D0;
+ break;
+ case ACPI_POWER_RESOURCE_STATE_OFF:
+ device->power.state = ACPI_STATE_D3;
+ break;
+ default:
+ device->power.state = ACPI_STATE_UNKNOWN;
+ break;
+ }
+
+ result = acpi_power_add_fs(device);
+ if (result)
+ goto end;
+
+ printk(KERN_INFO PREFIX "%s [%s] (%s)\n", acpi_device_name(device),
+ acpi_device_bid(device), state ? "on" : "off");
+
+ end:
+ if (result)
+ kfree(resource);
+
+ return result;
+}
+
+static int acpi_power_remove(struct acpi_device *device, int type)
+{
+ struct acpi_power_resource *resource = NULL;
+ struct list_head *node, *next;
+
+
+ if (!device || !acpi_driver_data(device))
+ return -EINVAL;
+
+ resource = acpi_driver_data(device);
+
+ acpi_power_remove_fs(device);
+
+ mutex_lock(&resource->resource_lock);
+ list_for_each_safe(node, next, &resource->reference) {
+ struct acpi_power_reference *ref = container_of(node, struct acpi_power_reference, node);
+ list_del(&ref->node);
+ kfree(ref);
+ }
+ mutex_unlock(&resource->resource_lock);
+
+ kfree(resource);
+
+ return 0;
+}
+
+static int acpi_power_resume(struct acpi_device *device)
+{
+ int result = 0, state;
+ struct acpi_power_resource *resource = NULL;
+ struct acpi_power_reference *ref;
+
+ if (!device || !acpi_driver_data(device))
+ return -EINVAL;
+
+ resource = acpi_driver_data(device);
+
+ result = acpi_power_get_state(device->handle, &state);
+ if (result)
+ return result;
+
+ mutex_lock(&resource->resource_lock);
+ if (state == ACPI_POWER_RESOURCE_STATE_OFF &&
+ !list_empty(&resource->reference)) {
+ ref = container_of(resource->reference.next, struct acpi_power_reference, node);
+ mutex_unlock(&resource->resource_lock);
+ result = acpi_power_on(device->handle, ref->device);
+ return result;
+ }
+
+ mutex_unlock(&resource->resource_lock);
+ return 0;
+}
+
+static int __init acpi_power_init(void)
+{
+ int result = 0;
+
+
+ if (acpi_disabled)
+ return 0;
+
+ INIT_LIST_HEAD(&acpi_power_resource_list);
+
+ acpi_power_dir = proc_mkdir(ACPI_POWER_CLASS, acpi_root_dir);
+ if (!acpi_power_dir)
+ return -ENODEV;
+
+ result = acpi_bus_register_driver(&acpi_power_driver);
+ if (result < 0) {
+ remove_proc_entry(ACPI_POWER_CLASS, acpi_root_dir);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+subsys_initcall(acpi_power_init);
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
new file mode 100644
index 0000000..3494836
--- /dev/null
+++ b/drivers/acpi/processor_core.c
@@ -0,0 +1,1190 @@
+/*
+ * acpi_processor.c - ACPI Processor Driver ($Revision: 71 $)
+ *
+ * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
+ * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
+ * Copyright (C) 2004 Dominik Brodowski <linux@brodo.de>
+ * Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
+ * - Added processor hotplug support
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ * TBD:
+ * 1. Make # power states dynamic.
+ * 2. Support duty_cycle values that span bit 4.
+ * 3. Optimize by having scheduler determine business instead of
+ * having us try to calculate it here.
+ * 4. Need C1 timing -- must modify kernel (IRQ handler) to get this.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/pm.h>
+#include <linux/cpufreq.h>
+#include <linux/cpu.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/dmi.h>
+#include <linux/moduleparam.h>
+#include <linux/cpuidle.h>
+
+#include <asm/io.h>
+#include <asm/system.h>
+#include <asm/cpu.h>
+#include <asm/delay.h>
+#include <asm/uaccess.h>
+#include <asm/processor.h>
+#include <asm/smp.h>
+#include <asm/acpi.h>
+
+#include <acpi/acpi_bus.h>
+#include <acpi/acpi_drivers.h>
+#include <acpi/processor.h>
+
+#define ACPI_PROCESSOR_CLASS "processor"
+#define ACPI_PROCESSOR_DEVICE_NAME "Processor"
+#define ACPI_PROCESSOR_FILE_INFO "info"
+#define ACPI_PROCESSOR_FILE_THROTTLING "throttling"
+#define ACPI_PROCESSOR_FILE_LIMIT "limit"
+#define ACPI_PROCESSOR_NOTIFY_PERFORMANCE 0x80
+#define ACPI_PROCESSOR_NOTIFY_POWER 0x81
+#define ACPI_PROCESSOR_NOTIFY_THROTTLING 0x82
+
+#define ACPI_PROCESSOR_LIMIT_USER 0
+#define ACPI_PROCESSOR_LIMIT_THERMAL 1
+
+#define _COMPONENT ACPI_PROCESSOR_COMPONENT
+ACPI_MODULE_NAME("processor_core");
+
+MODULE_AUTHOR("Paul Diefenbaugh");
+MODULE_DESCRIPTION("ACPI Processor Driver");
+MODULE_LICENSE("GPL");
+
+static int acpi_processor_add(struct acpi_device *device);
+static int acpi_processor_start(struct acpi_device *device);
+static int acpi_processor_remove(struct acpi_device *device, int type);
+static int acpi_processor_info_open_fs(struct inode *inode, struct file *file);
+static void acpi_processor_notify(acpi_handle handle, u32 event, void *data);
+static acpi_status acpi_processor_hotadd_init(acpi_handle handle, int *p_cpu);
+static int acpi_processor_handle_eject(struct acpi_processor *pr);
+
+
+static const struct acpi_device_id processor_device_ids[] = {
+ {ACPI_PROCESSOR_OBJECT_HID, 0},
+ {ACPI_PROCESSOR_HID, 0},
+ {"", 0},
+};
+MODULE_DEVICE_TABLE(acpi, processor_device_ids);
+
+static struct acpi_driver acpi_processor_driver = {
+ .name = "processor",
+ .class = ACPI_PROCESSOR_CLASS,
+ .ids = processor_device_ids,
+ .ops = {
+ .add = acpi_processor_add,
+ .remove = acpi_processor_remove,
+ .start = acpi_processor_start,
+ .suspend = acpi_processor_suspend,
+ .resume = acpi_processor_resume,
+ },
+};
+
+#define INSTALL_NOTIFY_HANDLER 1
+#define UNINSTALL_NOTIFY_HANDLER 2
+
+static const struct file_operations acpi_processor_info_fops = {
+ .owner = THIS_MODULE,
+ .open = acpi_processor_info_open_fs,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+DEFINE_PER_CPU(struct acpi_processor *, processors);
+struct acpi_processor_errata errata __read_mostly;
+static int set_no_mwait(const struct dmi_system_id *id)
+{
+ printk(KERN_NOTICE PREFIX "%s detected - "
+ "disabling mwait for CPU C-states\n", id->ident);
+ idle_nomwait = 1;
+ return 0;
+}
+
+static struct dmi_system_id __cpuinitdata processor_idle_dmi_table[] = {
+ {
+ set_no_mwait, "IFL91 board", {
+ DMI_MATCH(DMI_BIOS_VENDOR, "COMPAL"),
+ DMI_MATCH(DMI_SYS_VENDOR, "ZEPTO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "3215W"),
+ DMI_MATCH(DMI_BOARD_NAME, "IFL91") }, NULL},
+ {
+ set_no_mwait, "Extensa 5220", {
+ DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "0100"),
+ DMI_MATCH(DMI_BOARD_NAME, "Columbia") }, NULL},
+ {},
+};
+
+/* --------------------------------------------------------------------------
+ Errata Handling
+ -------------------------------------------------------------------------- */
+
+static int acpi_processor_errata_piix4(struct pci_dev *dev)
+{
+ u8 value1 = 0;
+ u8 value2 = 0;
+
+
+ if (!dev)
+ return -EINVAL;
+
+ /*
+ * Note that 'dev' references the PIIX4 ACPI Controller.
+ */
+
+ switch (dev->revision) {
+ case 0:
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found PIIX4 A-step\n"));
+ break;
+ case 1:
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found PIIX4 B-step\n"));
+ break;
+ case 2:
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found PIIX4E\n"));
+ break;
+ case 3:
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found PIIX4M\n"));
+ break;
+ default:
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found unknown PIIX4\n"));
+ break;
+ }
+
+ switch (dev->revision) {
+
+ case 0: /* PIIX4 A-step */
+ case 1: /* PIIX4 B-step */
+ /*
+ * See specification changes #13 ("Manual Throttle Duty Cycle")
+ * and #14 ("Enabling and Disabling Manual Throttle"), plus
+ * erratum #5 ("STPCLK# Deassertion Time") from the January
+ * 2002 PIIX4 specification update. Applies to only older
+ * PIIX4 models.
+ */
+ errata.piix4.throttle = 1;
+
+ case 2: /* PIIX4E */
+ case 3: /* PIIX4M */
+ /*
+ * See erratum #18 ("C3 Power State/BMIDE and Type-F DMA
+ * Livelock") from the January 2002 PIIX4 specification update.
+ * Applies to all PIIX4 models.
+ */
+
+ /*
+ * BM-IDE
+ * ------
+ * Find the PIIX4 IDE Controller and get the Bus Master IDE
+ * Status register address. We'll use this later to read
+ * each IDE controller's DMA status to make sure we catch all
+ * DMA activity.
+ */
+ dev = pci_get_subsys(PCI_VENDOR_ID_INTEL,
+ PCI_DEVICE_ID_INTEL_82371AB,
+ PCI_ANY_ID, PCI_ANY_ID, NULL);
+ if (dev) {
+ errata.piix4.bmisx = pci_resource_start(dev, 4);
+ pci_dev_put(dev);
+ }
+
+ /*
+ * Type-F DMA
+ * ----------
+ * Find the PIIX4 ISA Controller and read the Motherboard
+ * DMA controller's status to see if Type-F (Fast) DMA mode
+ * is enabled (bit 7) on either channel. Note that we'll
+ * disable C3 support if this is enabled, as some legacy
+ * devices won't operate well if fast DMA is disabled.
+ */
+ dev = pci_get_subsys(PCI_VENDOR_ID_INTEL,
+ PCI_DEVICE_ID_INTEL_82371AB_0,
+ PCI_ANY_ID, PCI_ANY_ID, NULL);
+ if (dev) {
+ pci_read_config_byte(dev, 0x76, &value1);
+ pci_read_config_byte(dev, 0x77, &value2);
+ if ((value1 & 0x80) || (value2 & 0x80))
+ errata.piix4.fdma = 1;
+ pci_dev_put(dev);
+ }
+
+ break;
+ }
+
+ if (errata.piix4.bmisx)
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Bus master activity detection (BM-IDE) erratum enabled\n"));
+ if (errata.piix4.fdma)
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Type-F DMA livelock erratum (C3 disabled)\n"));
+
+ return 0;
+}
+
+static int acpi_processor_errata(struct acpi_processor *pr)
+{
+ int result = 0;
+ struct pci_dev *dev = NULL;
+
+
+ if (!pr)
+ return -EINVAL;
+
+ /*
+ * PIIX4
+ */
+ dev = pci_get_subsys(PCI_VENDOR_ID_INTEL,
+ PCI_DEVICE_ID_INTEL_82371AB_3, PCI_ANY_ID,
+ PCI_ANY_ID, NULL);
+ if (dev) {
+ result = acpi_processor_errata_piix4(dev);
+ pci_dev_put(dev);
+ }
+
+ return result;
+}
+
+/* --------------------------------------------------------------------------
+ Common ACPI processor functions
+ -------------------------------------------------------------------------- */
+
+/*
+ * _PDC is required for a BIOS-OS handshake for most of the newer
+ * ACPI processor features.
+ */
+static int acpi_processor_set_pdc(struct acpi_processor *pr)
+{
+ struct acpi_object_list *pdc_in = pr->pdc;
+ acpi_status status = AE_OK;
+
+
+ if (!pdc_in)
+ return status;
+ if (idle_nomwait) {
+ /*
+ * If mwait is disabled for CPU C-states, the C2C3_FFH access
+ * mode will be disabled in the parameter of _PDC object.
+ * Of course C1_FFH access mode will also be disabled.
+ */
+ union acpi_object *obj;
+ u32 *buffer = NULL;
+
+ obj = pdc_in->pointer;
+ buffer = (u32 *)(obj->buffer.pointer);
+ buffer[2] &= ~(ACPI_PDC_C_C2C3_FFH | ACPI_PDC_C_C1_FFH);
+
+ }
+ status = acpi_evaluate_object(pr->handle, "_PDC", pdc_in, NULL);
+
+ if (ACPI_FAILURE(status))
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Could not evaluate _PDC, using legacy perf. control...\n"));
+
+ return status;
+}
+
+/* --------------------------------------------------------------------------
+ FS Interface (/proc)
+ -------------------------------------------------------------------------- */
+
+static struct proc_dir_entry *acpi_processor_dir = NULL;
+
+static int acpi_processor_info_seq_show(struct seq_file *seq, void *offset)
+{
+ struct acpi_processor *pr = seq->private;
+
+
+ if (!pr)
+ goto end;
+
+ seq_printf(seq, "processor id: %d\n"
+ "acpi id: %d\n"
+ "bus mastering control: %s\n"
+ "power management: %s\n"
+ "throttling control: %s\n"
+ "limit interface: %s\n",
+ pr->id,
+ pr->acpi_id,
+ pr->flags.bm_control ? "yes" : "no",
+ pr->flags.power ? "yes" : "no",
+ pr->flags.throttling ? "yes" : "no",
+ pr->flags.limit ? "yes" : "no");
+
+ end:
+ return 0;
+}
+
+static int acpi_processor_info_open_fs(struct inode *inode, struct file *file)
+{
+ return single_open(file, acpi_processor_info_seq_show,
+ PDE(inode)->data);
+}
+
+static int acpi_processor_add_fs(struct acpi_device *device)
+{
+ struct proc_dir_entry *entry = NULL;
+
+
+ if (!acpi_device_dir(device)) {
+ acpi_device_dir(device) = proc_mkdir(acpi_device_bid(device),
+ acpi_processor_dir);
+ if (!acpi_device_dir(device))
+ return -ENODEV;
+ }
+ acpi_device_dir(device)->owner = THIS_MODULE;
+
+ /* 'info' [R] */
+ entry = proc_create_data(ACPI_PROCESSOR_FILE_INFO,
+ S_IRUGO, acpi_device_dir(device),
+ &acpi_processor_info_fops,
+ acpi_driver_data(device));
+ if (!entry)
+ return -EIO;
+
+ /* 'throttling' [R/W] */
+ entry = proc_create_data(ACPI_PROCESSOR_FILE_THROTTLING,
+ S_IFREG | S_IRUGO | S_IWUSR,
+ acpi_device_dir(device),
+ &acpi_processor_throttling_fops,
+ acpi_driver_data(device));
+ if (!entry)
+ return -EIO;
+
+ /* 'limit' [R/W] */
+ entry = proc_create_data(ACPI_PROCESSOR_FILE_LIMIT,
+ S_IFREG | S_IRUGO | S_IWUSR,
+ acpi_device_dir(device),
+ &acpi_processor_limit_fops,
+ acpi_driver_data(device));
+ if (!entry)
+ return -EIO;
+ return 0;
+}
+
+static int acpi_processor_remove_fs(struct acpi_device *device)
+{
+
+ if (acpi_device_dir(device)) {
+ remove_proc_entry(ACPI_PROCESSOR_FILE_INFO,
+ acpi_device_dir(device));
+ remove_proc_entry(ACPI_PROCESSOR_FILE_THROTTLING,
+ acpi_device_dir(device));
+ remove_proc_entry(ACPI_PROCESSOR_FILE_LIMIT,
+ acpi_device_dir(device));
+ remove_proc_entry(acpi_device_bid(device), acpi_processor_dir);
+ acpi_device_dir(device) = NULL;
+ }
+
+ return 0;
+}
+
+/* Use the acpiid in MADT to map cpus in case of SMP */
+
+#ifndef CONFIG_SMP
+static int get_cpu_id(acpi_handle handle, int type, u32 acpi_id) { return -1; }
+#else
+
+static struct acpi_table_madt *madt;
+
+static int map_lapic_id(struct acpi_subtable_header *entry,
+ u32 acpi_id, int *apic_id)
+{
+ struct acpi_madt_local_apic *lapic =
+ (struct acpi_madt_local_apic *)entry;
+ if ((lapic->lapic_flags & ACPI_MADT_ENABLED) &&
+ lapic->processor_id == acpi_id) {
+ *apic_id = lapic->id;
+ return 1;
+ }
+ return 0;
+}
+
+static int map_lsapic_id(struct acpi_subtable_header *entry,
+ int device_declaration, u32 acpi_id, int *apic_id)
+{
+ struct acpi_madt_local_sapic *lsapic =
+ (struct acpi_madt_local_sapic *)entry;
+ u32 tmp = (lsapic->id << 8) | lsapic->eid;
+
+ /* Only check enabled APICs*/
+ if (!(lsapic->lapic_flags & ACPI_MADT_ENABLED))
+ return 0;
+
+ /* Device statement declaration type */
+ if (device_declaration) {
+ if (entry->length < 16)
+ printk(KERN_ERR PREFIX
+ "Invalid LSAPIC with Device type processor (SAPIC ID %#x)\n",
+ tmp);
+ else if (lsapic->uid == acpi_id)
+ goto found;
+ /* Processor statement declaration type */
+ } else if (lsapic->processor_id == acpi_id)
+ goto found;
+
+ return 0;
+found:
+ *apic_id = tmp;
+ return 1;
+}
+
+static int map_madt_entry(int type, u32 acpi_id)
+{
+ unsigned long madt_end, entry;
+ int apic_id = -1;
+
+ if (!madt)
+ return apic_id;
+
+ entry = (unsigned long)madt;
+ madt_end = entry + madt->header.length;
+
+ /* Parse all entries looking for a match. */
+
+ entry += sizeof(struct acpi_table_madt);
+ while (entry + sizeof(struct acpi_subtable_header) < madt_end) {
+ struct acpi_subtable_header *header =
+ (struct acpi_subtable_header *)entry;
+ if (header->type == ACPI_MADT_TYPE_LOCAL_APIC) {
+ if (map_lapic_id(header, acpi_id, &apic_id))
+ break;
+ } else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC) {
+ if (map_lsapic_id(header, type, acpi_id, &apic_id))
+ break;
+ }
+ entry += header->length;
+ }
+ return apic_id;
+}
+
+static int map_mat_entry(acpi_handle handle, int type, u32 acpi_id)
+{
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ union acpi_object *obj;
+ struct acpi_subtable_header *header;
+ int apic_id = -1;
+
+ if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer)))
+ goto exit;
+
+ if (!buffer.length || !buffer.pointer)
+ goto exit;
+
+ obj = buffer.pointer;
+ if (obj->type != ACPI_TYPE_BUFFER ||
+ obj->buffer.length < sizeof(struct acpi_subtable_header)) {
+ goto exit;
+ }
+
+ header = (struct acpi_subtable_header *)obj->buffer.pointer;
+ if (header->type == ACPI_MADT_TYPE_LOCAL_APIC) {
+ map_lapic_id(header, acpi_id, &apic_id);
+ } else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC) {
+ map_lsapic_id(header, type, acpi_id, &apic_id);
+ }
+
+exit:
+ if (buffer.pointer)
+ kfree(buffer.pointer);
+ return apic_id;
+}
+
+static int get_cpu_id(acpi_handle handle, int type, u32 acpi_id)
+{
+ int i;
+ int apic_id = -1;
+
+ apic_id = map_mat_entry(handle, type, acpi_id);
+ if (apic_id == -1)
+ apic_id = map_madt_entry(type, acpi_id);
+ if (apic_id == -1)
+ return apic_id;
+
+ for_each_possible_cpu(i) {
+ if (cpu_physical_id(i) == apic_id)
+ return i;
+ }
+ return -1;
+}
+#endif
+
+/* --------------------------------------------------------------------------
+ Driver Interface
+ -------------------------------------------------------------------------- */
+
+static int acpi_processor_get_info(struct acpi_device *device)
+{
+ acpi_status status = 0;
+ union acpi_object object = { 0 };
+ struct acpi_buffer buffer = { sizeof(union acpi_object), &object };
+ struct acpi_processor *pr;
+ int cpu_index, device_declaration = 0;
+ static int cpu0_initialized;
+
+ pr = acpi_driver_data(device);
+ if (!pr)
+ return -EINVAL;
+
+ if (num_online_cpus() > 1)
+ errata.smp = TRUE;
+
+ acpi_processor_errata(pr);
+
+ /*
+ * Check to see if we have bus mastering arbitration control. This
+ * is required for proper C3 usage (to maintain cache coherency).
+ */
+ if (acpi_gbl_FADT.pm2_control_block && acpi_gbl_FADT.pm2_control_length) {
+ pr->flags.bm_control = 1;
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Bus mastering arbitration control present\n"));
+ } else
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "No bus mastering arbitration control\n"));
+
+ if (!strcmp(acpi_device_hid(device), ACPI_PROCESSOR_HID)) {
+ /*
+ * Declared with "Device" statement; match _UID.
+ * Note that we don't handle string _UIDs yet.
+ */
+ unsigned long long value;
+ status = acpi_evaluate_integer(pr->handle, METHOD_NAME__UID,
+ NULL, &value);
+ if (ACPI_FAILURE(status)) {
+ printk(KERN_ERR PREFIX
+ "Evaluating processor _UID [%#x]\n", status);
+ return -ENODEV;
+ }
+ device_declaration = 1;
+ pr->acpi_id = value;
+ } else {
+ /* Declared with "Processor" statement; match ProcessorID */
+ status = acpi_evaluate_object(pr->handle, NULL, NULL, &buffer);
+ if (ACPI_FAILURE(status)) {
+ printk(KERN_ERR PREFIX "Evaluating processor object\n");
+ return -ENODEV;
+ }
+
+ /*
+ * TBD: Synch processor ID (via LAPIC/LSAPIC structures) on SMP.
+ * >>> 'acpi_get_processor_id(acpi_id, &id)' in
+ * arch/xxx/acpi.c
+ */
+ pr->acpi_id = object.processor.proc_id;
+ }
+ cpu_index = get_cpu_id(pr->handle, device_declaration, pr->acpi_id);
+
+ /* Handle UP system running SMP kernel, with no LAPIC in MADT */
+ if (!cpu0_initialized && (cpu_index == -1) &&
+ (num_online_cpus() == 1)) {
+ cpu_index = 0;
+ }
+
+ cpu0_initialized = 1;
+
+ pr->id = cpu_index;
+
+ /*
+ * Extra Processor objects may be enumerated on MP systems with
+ * less than the max # of CPUs. They should be ignored _iff
+ * they are physically not present.
+ */
+ if (pr->id == -1) {
+ if (ACPI_FAILURE
+ (acpi_processor_hotadd_init(pr->handle, &pr->id))) {
+ return -ENODEV;
+ }
+ }
+
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Processor [%d:%d]\n", pr->id,
+ pr->acpi_id));
+
+ if (!object.processor.pblk_address)
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No PBLK (NULL address)\n"));
+ else if (object.processor.pblk_length != 6)
+ printk(KERN_ERR PREFIX "Invalid PBLK length [%d]\n",
+ object.processor.pblk_length);
+ else {
+ pr->throttling.address = object.processor.pblk_address;
+ pr->throttling.duty_offset = acpi_gbl_FADT.duty_offset;
+ pr->throttling.duty_width = acpi_gbl_FADT.duty_width;
+
+ pr->pblk = object.processor.pblk_address;
+
+ /*
+ * We don't care about error returns - we just try to mark
+ * these reserved so that nobody else is confused into thinking
+ * that this region might be unused..
+ *
+ * (In particular, allocating the IO range for Cardbus)
+ */
+ request_region(pr->throttling.address, 6, "ACPI CPU throttle");
+ }
+
+ /*
+ * If ACPI describes a slot number for this CPU, we can use it
+ * ensure we get the right value in the "physical id" field
+ * of /proc/cpuinfo
+ */
+ status = acpi_evaluate_object(pr->handle, "_SUN", NULL, &buffer);
+ if (ACPI_SUCCESS(status))
+ arch_fix_phys_package_id(pr->id, object.integer.value);
+
+ return 0;
+}
+
+static DEFINE_PER_CPU(void *, processor_device_array);
+
+static int __cpuinit acpi_processor_start(struct acpi_device *device)
+{
+ int result = 0;
+ acpi_status status = AE_OK;
+ struct acpi_processor *pr;
+ struct sys_device *sysdev;
+
+ pr = acpi_driver_data(device);
+
+ result = acpi_processor_get_info(device);
+ if (result) {
+ /* Processor is physically not present */
+ return 0;
+ }
+
+ BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
+
+ /*
+ * Buggy BIOS check
+ * ACPI id of processors can be reported wrongly by the BIOS.
+ * Don't trust it blindly
+ */
+ if (per_cpu(processor_device_array, pr->id) != NULL &&
+ per_cpu(processor_device_array, pr->id) != device) {
+ printk(KERN_WARNING "BIOS reported wrong ACPI id "
+ "for the processor\n");
+ return -ENODEV;
+ }
+ per_cpu(processor_device_array, pr->id) = device;
+
+ per_cpu(processors, pr->id) = pr;
+
+ result = acpi_processor_add_fs(device);
+ if (result)
+ goto end;
+
+ sysdev = get_cpu_sysdev(pr->id);
+ if (sysfs_create_link(&device->dev.kobj, &sysdev->kobj, "sysdev"))
+ return -EFAULT;
+
+ status = acpi_install_notify_handler(pr->handle, ACPI_DEVICE_NOTIFY,
+ acpi_processor_notify, pr);
+
+ /* _PDC call should be done before doing anything else (if reqd.). */
+ arch_acpi_processor_init_pdc(pr);
+ acpi_processor_set_pdc(pr);
+#ifdef CONFIG_CPU_FREQ
+ acpi_processor_ppc_has_changed(pr);
+#endif
+ acpi_processor_get_throttling_info(pr);
+ acpi_processor_get_limit_info(pr);
+
+
+ acpi_processor_power_init(pr, device);
+
+ pr->cdev = thermal_cooling_device_register("Processor", device,
+ &processor_cooling_ops);
+ if (IS_ERR(pr->cdev)) {
+ result = PTR_ERR(pr->cdev);
+ goto end;
+ }
+
+ dev_info(&device->dev, "registered as cooling_device%d\n",
+ pr->cdev->id);
+
+ result = sysfs_create_link(&device->dev.kobj,
+ &pr->cdev->device.kobj,
+ "thermal_cooling");
+ if (result)
+ printk(KERN_ERR PREFIX "Create sysfs link\n");
+ result = sysfs_create_link(&pr->cdev->device.kobj,
+ &device->dev.kobj,
+ "device");
+ if (result)
+ printk(KERN_ERR PREFIX "Create sysfs link\n");
+
+ if (pr->flags.throttling) {
+ printk(KERN_INFO PREFIX "%s [%s] (supports",
+ acpi_device_name(device), acpi_device_bid(device));
+ printk(" %d throttling states", pr->throttling.state_count);
+ printk(")\n");
+ }
+
+ end:
+
+ return result;
+}
+
+static void acpi_processor_notify(acpi_handle handle, u32 event, void *data)
+{
+ struct acpi_processor *pr = data;
+ struct acpi_device *device = NULL;
+ int saved;
+
+ if (!pr)
+ return;
+
+ if (acpi_bus_get_device(pr->handle, &device))
+ return;
+
+ switch (event) {
+ case ACPI_PROCESSOR_NOTIFY_PERFORMANCE:
+ saved = pr->performance_platform_limit;
+ acpi_processor_ppc_has_changed(pr);
+ if (saved == pr->performance_platform_limit)
+ break;
+ acpi_bus_generate_proc_event(device, event,
+ pr->performance_platform_limit);
+ acpi_bus_generate_netlink_event(device->pnp.device_class,
+ dev_name(&device->dev), event,
+ pr->performance_platform_limit);
+ break;
+ case ACPI_PROCESSOR_NOTIFY_POWER:
+ acpi_processor_cst_has_changed(pr);
+ acpi_bus_generate_proc_event(device, event, 0);
+ acpi_bus_generate_netlink_event(device->pnp.device_class,
+ dev_name(&device->dev), event, 0);
+ break;
+ case ACPI_PROCESSOR_NOTIFY_THROTTLING:
+ acpi_processor_tstate_has_changed(pr);
+ acpi_bus_generate_proc_event(device, event, 0);
+ acpi_bus_generate_netlink_event(device->pnp.device_class,
+ dev_name(&device->dev), event, 0);
+ default:
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Unsupported event [0x%x]\n", event));
+ break;
+ }
+
+ return;
+}
+
+static int acpi_cpu_soft_notify(struct notifier_block *nfb,
+ unsigned long action, void *hcpu)
+{
+ unsigned int cpu = (unsigned long)hcpu;
+ struct acpi_processor *pr = per_cpu(processors, cpu);
+
+ if (action == CPU_ONLINE && pr) {
+ acpi_processor_ppc_has_changed(pr);
+ acpi_processor_cst_has_changed(pr);
+ acpi_processor_tstate_has_changed(pr);
+ }
+ return NOTIFY_OK;
+}
+
+static struct notifier_block acpi_cpu_notifier =
+{
+ .notifier_call = acpi_cpu_soft_notify,
+};
+
+static int acpi_processor_add(struct acpi_device *device)
+{
+ struct acpi_processor *pr = NULL;
+
+
+ if (!device)
+ return -EINVAL;
+
+ pr = kzalloc(sizeof(struct acpi_processor), GFP_KERNEL);
+ if (!pr)
+ return -ENOMEM;
+
+ pr->handle = device->handle;
+ strcpy(acpi_device_name(device), ACPI_PROCESSOR_DEVICE_NAME);
+ strcpy(acpi_device_class(device), ACPI_PROCESSOR_CLASS);
+ device->driver_data = pr;
+
+ return 0;
+}
+
+static int acpi_processor_remove(struct acpi_device *device, int type)
+{
+ acpi_status status = AE_OK;
+ struct acpi_processor *pr = NULL;
+
+
+ if (!device || !acpi_driver_data(device))
+ return -EINVAL;
+
+ pr = acpi_driver_data(device);
+
+ if (pr->id >= nr_cpu_ids) {
+ kfree(pr);
+ return 0;
+ }
+
+ if (type == ACPI_BUS_REMOVAL_EJECT) {
+ if (acpi_processor_handle_eject(pr))
+ return -EINVAL;
+ }
+
+ acpi_processor_power_exit(pr, device);
+
+ status = acpi_remove_notify_handler(pr->handle, ACPI_DEVICE_NOTIFY,
+ acpi_processor_notify);
+
+ sysfs_remove_link(&device->dev.kobj, "sysdev");
+
+ acpi_processor_remove_fs(device);
+
+ if (pr->cdev) {
+ sysfs_remove_link(&device->dev.kobj, "thermal_cooling");
+ sysfs_remove_link(&pr->cdev->device.kobj, "device");
+ thermal_cooling_device_unregister(pr->cdev);
+ pr->cdev = NULL;
+ }
+
+ per_cpu(processors, pr->id) = NULL;
+ per_cpu(processor_device_array, pr->id) = NULL;
+ kfree(pr);
+
+ return 0;
+}
+
+#ifdef CONFIG_ACPI_HOTPLUG_CPU
+/****************************************************************************
+ * Acpi processor hotplug support *
+ ****************************************************************************/
+
+static int is_processor_present(acpi_handle handle)
+{
+ acpi_status status;
+ unsigned long long sta = 0;
+
+
+ status = acpi_evaluate_integer(handle, "_STA", NULL, &sta);
+
+ if (ACPI_SUCCESS(status) && (sta & ACPI_STA_DEVICE_PRESENT))
+ return 1;
+
+ /*
+ * _STA is mandatory for a processor that supports hot plug
+ */
+ if (status == AE_NOT_FOUND)
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Processor does not support hot plug\n"));
+ else
+ ACPI_EXCEPTION((AE_INFO, status,
+ "Processor Device is not present"));
+ return 0;
+}
+
+static
+int acpi_processor_device_add(acpi_handle handle, struct acpi_device **device)
+{
+ acpi_handle phandle;
+ struct acpi_device *pdev;
+ struct acpi_processor *pr;
+
+
+ if (acpi_get_parent(handle, &phandle)) {
+ return -ENODEV;
+ }
+
+ if (acpi_bus_get_device(phandle, &pdev)) {
+ return -ENODEV;
+ }
+
+ if (acpi_bus_add(device, pdev, handle, ACPI_BUS_TYPE_PROCESSOR)) {
+ return -ENODEV;
+ }
+
+ acpi_bus_start(*device);
+
+ pr = acpi_driver_data(*device);
+ if (!pr)
+ return -ENODEV;
+
+ if ((pr->id >= 0) && (pr->id < nr_cpu_ids)) {
+ kobject_uevent(&(*device)->dev.kobj, KOBJ_ONLINE);
+ }
+ return 0;
+}
+
+static void __ref acpi_processor_hotplug_notify(acpi_handle handle,
+ u32 event, void *data)
+{
+ struct acpi_processor *pr;
+ struct acpi_device *device = NULL;
+ int result;
+
+
+ switch (event) {
+ case ACPI_NOTIFY_BUS_CHECK:
+ case ACPI_NOTIFY_DEVICE_CHECK:
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Processor driver received %s event\n",
+ (event == ACPI_NOTIFY_BUS_CHECK) ?
+ "ACPI_NOTIFY_BUS_CHECK" : "ACPI_NOTIFY_DEVICE_CHECK"));
+
+ if (!is_processor_present(handle))
+ break;
+
+ if (acpi_bus_get_device(handle, &device)) {
+ result = acpi_processor_device_add(handle, &device);
+ if (result)
+ printk(KERN_ERR PREFIX
+ "Unable to add the device\n");
+ break;
+ }
+
+ pr = acpi_driver_data(device);
+ if (!pr) {
+ printk(KERN_ERR PREFIX "Driver data is NULL\n");
+ break;
+ }
+
+ if (pr->id >= 0 && (pr->id < nr_cpu_ids)) {
+ kobject_uevent(&device->dev.kobj, KOBJ_OFFLINE);
+ break;
+ }
+
+ result = acpi_processor_start(device);
+ if ((!result) && ((pr->id >= 0) && (pr->id < nr_cpu_ids))) {
+ kobject_uevent(&device->dev.kobj, KOBJ_ONLINE);
+ } else {
+ printk(KERN_ERR PREFIX "Device [%s] failed to start\n",
+ acpi_device_bid(device));
+ }
+ break;
+ case ACPI_NOTIFY_EJECT_REQUEST:
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "received ACPI_NOTIFY_EJECT_REQUEST\n"));
+
+ if (acpi_bus_get_device(handle, &device)) {
+ printk(KERN_ERR PREFIX
+ "Device don't exist, dropping EJECT\n");
+ break;
+ }
+ pr = acpi_driver_data(device);
+ if (!pr) {
+ printk(KERN_ERR PREFIX
+ "Driver data is NULL, dropping EJECT\n");
+ return;
+ }
+
+ if ((pr->id < nr_cpu_ids) && (cpu_present(pr->id)))
+ kobject_uevent(&device->dev.kobj, KOBJ_OFFLINE);
+ break;
+ default:
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Unsupported event [0x%x]\n", event));
+ break;
+ }
+
+ return;
+}
+
+static acpi_status
+processor_walk_namespace_cb(acpi_handle handle,
+ u32 lvl, void *context, void **rv)
+{
+ acpi_status status;
+ int *action = context;
+ acpi_object_type type = 0;
+
+ status = acpi_get_type(handle, &type);
+ if (ACPI_FAILURE(status))
+ return (AE_OK);
+
+ if (type != ACPI_TYPE_PROCESSOR)
+ return (AE_OK);
+
+ switch (*action) {
+ case INSTALL_NOTIFY_HANDLER:
+ acpi_install_notify_handler(handle,
+ ACPI_SYSTEM_NOTIFY,
+ acpi_processor_hotplug_notify,
+ NULL);
+ break;
+ case UNINSTALL_NOTIFY_HANDLER:
+ acpi_remove_notify_handler(handle,
+ ACPI_SYSTEM_NOTIFY,
+ acpi_processor_hotplug_notify);
+ break;
+ default:
+ break;
+ }
+
+ return (AE_OK);
+}
+
+static acpi_status acpi_processor_hotadd_init(acpi_handle handle, int *p_cpu)
+{
+
+ if (!is_processor_present(handle)) {
+ return AE_ERROR;
+ }
+
+ if (acpi_map_lsapic(handle, p_cpu))
+ return AE_ERROR;
+
+ if (arch_register_cpu(*p_cpu)) {
+ acpi_unmap_lsapic(*p_cpu);
+ return AE_ERROR;
+ }
+
+ return AE_OK;
+}
+
+static int acpi_processor_handle_eject(struct acpi_processor *pr)
+{
+ if (cpu_online(pr->id))
+ cpu_down(pr->id);
+
+ arch_unregister_cpu(pr->id);
+ acpi_unmap_lsapic(pr->id);
+ return (0);
+}
+#else
+static acpi_status acpi_processor_hotadd_init(acpi_handle handle, int *p_cpu)
+{
+ return AE_ERROR;
+}
+static int acpi_processor_handle_eject(struct acpi_processor *pr)
+{
+ return (-EINVAL);
+}
+#endif
+
+static
+void acpi_processor_install_hotplug_notify(void)
+{
+#ifdef CONFIG_ACPI_HOTPLUG_CPU
+ int action = INSTALL_NOTIFY_HANDLER;
+ acpi_walk_namespace(ACPI_TYPE_PROCESSOR,
+ ACPI_ROOT_OBJECT,
+ ACPI_UINT32_MAX,
+ processor_walk_namespace_cb, &action, NULL);
+#endif
+ register_hotcpu_notifier(&acpi_cpu_notifier);
+}
+
+static
+void acpi_processor_uninstall_hotplug_notify(void)
+{
+#ifdef CONFIG_ACPI_HOTPLUG_CPU
+ int action = UNINSTALL_NOTIFY_HANDLER;
+ acpi_walk_namespace(ACPI_TYPE_PROCESSOR,
+ ACPI_ROOT_OBJECT,
+ ACPI_UINT32_MAX,
+ processor_walk_namespace_cb, &action, NULL);
+#endif
+ unregister_hotcpu_notifier(&acpi_cpu_notifier);
+}
+
+/*
+ * We keep the driver loaded even when ACPI is not running.
+ * This is needed for the powernow-k8 driver, that works even without
+ * ACPI, but needs symbols from this driver
+ */
+
+static int __init acpi_processor_init(void)
+{
+ int result = 0;
+
+ memset(&errata, 0, sizeof(errata));
+
+#ifdef CONFIG_SMP
+ if (ACPI_FAILURE(acpi_get_table(ACPI_SIG_MADT, 0,
+ (struct acpi_table_header **)&madt)))
+ madt = NULL;
+#endif
+
+ acpi_processor_dir = proc_mkdir(ACPI_PROCESSOR_CLASS, acpi_root_dir);
+ if (!acpi_processor_dir)
+ return -ENOMEM;
+ acpi_processor_dir->owner = THIS_MODULE;
+
+ /*
+ * Check whether the system is DMI table. If yes, OSPM
+ * should not use mwait for CPU-states.
+ */
+ dmi_check_system(processor_idle_dmi_table);
+ result = cpuidle_register_driver(&acpi_idle_driver);
+ if (result < 0)
+ goto out_proc;
+
+ result = acpi_bus_register_driver(&acpi_processor_driver);
+ if (result < 0)
+ goto out_cpuidle;
+
+ acpi_processor_install_hotplug_notify();
+
+ acpi_thermal_cpufreq_init();
+
+ acpi_processor_ppc_init();
+
+ acpi_processor_throttling_init();
+
+ return 0;
+
+out_cpuidle:
+ cpuidle_unregister_driver(&acpi_idle_driver);
+
+out_proc:
+ remove_proc_entry(ACPI_PROCESSOR_CLASS, acpi_root_dir);
+
+ return result;
+}
+
+static void __exit acpi_processor_exit(void)
+{
+ acpi_processor_ppc_exit();
+
+ acpi_thermal_cpufreq_exit();
+
+ acpi_processor_uninstall_hotplug_notify();
+
+ acpi_bus_unregister_driver(&acpi_processor_driver);
+
+ cpuidle_unregister_driver(&acpi_idle_driver);
+
+ remove_proc_entry(ACPI_PROCESSOR_CLASS, acpi_root_dir);
+
+ return;
+}
+
+module_init(acpi_processor_init);
+module_exit(acpi_processor_exit);
+
+EXPORT_SYMBOL(acpi_processor_set_thermal_limit);
+
+MODULE_ALIAS("processor");
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
new file mode 100644
index 0000000..5f8d746
--- /dev/null
+++ b/drivers/acpi/processor_idle.c
@@ -0,0 +1,1921 @@
+/*
+ * processor_idle - idle state submodule to the ACPI processor driver
+ *
+ * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
+ * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
+ * Copyright (C) 2004, 2005 Dominik Brodowski <linux@brodo.de>
+ * Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
+ * - Added processor hotplug support
+ * Copyright (C) 2005 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
+ * - Added support for C3 on SMP
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/cpufreq.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/acpi.h>
+#include <linux/dmi.h>
+#include <linux/moduleparam.h>
+#include <linux/sched.h> /* need_resched() */
+#include <linux/pm_qos_params.h>
+#include <linux/clockchips.h>
+#include <linux/cpuidle.h>
+
+/*
+ * Include the apic definitions for x86 to have the APIC timer related defines
+ * available also for UP (on SMP it gets magically included via linux/smp.h).
+ * asm/acpi.h is not an option, as it would require more include magic. Also
+ * creating an empty asm-ia64/apic.h would just trade pest vs. cholera.
+ */
+#ifdef CONFIG_X86
+#include <asm/apic.h>
+#endif
+
+#include <asm/io.h>
+#include <asm/uaccess.h>
+
+#include <acpi/acpi_bus.h>
+#include <acpi/processor.h>
+#include <asm/processor.h>
+
+#define ACPI_PROCESSOR_CLASS "processor"
+#define _COMPONENT ACPI_PROCESSOR_COMPONENT
+ACPI_MODULE_NAME("processor_idle");
+#define ACPI_PROCESSOR_FILE_POWER "power"
+#define US_TO_PM_TIMER_TICKS(t) ((t * (PM_TIMER_FREQUENCY/1000)) / 1000)
+#define PM_TIMER_TICK_NS (1000000000ULL/PM_TIMER_FREQUENCY)
+#ifndef CONFIG_CPU_IDLE
+#define C2_OVERHEAD 4 /* 1us (3.579 ticks per us) */
+#define C3_OVERHEAD 4 /* 1us (3.579 ticks per us) */
+static void (*pm_idle_save) (void) __read_mostly;
+#else
+#define C2_OVERHEAD 1 /* 1us */
+#define C3_OVERHEAD 1 /* 1us */
+#endif
+#define PM_TIMER_TICKS_TO_US(p) (((p) * 1000)/(PM_TIMER_FREQUENCY/1000))
+
+static unsigned int max_cstate __read_mostly = ACPI_PROCESSOR_MAX_POWER;
+#ifdef CONFIG_CPU_IDLE
+module_param(max_cstate, uint, 0000);
+#else
+module_param(max_cstate, uint, 0644);
+#endif
+static unsigned int nocst __read_mostly;
+module_param(nocst, uint, 0000);
+
+#ifndef CONFIG_CPU_IDLE
+/*
+ * bm_history -- bit-mask with a bit per jiffy of bus-master activity
+ * 1000 HZ: 0xFFFFFFFF: 32 jiffies = 32ms
+ * 800 HZ: 0xFFFFFFFF: 32 jiffies = 40ms
+ * 100 HZ: 0x0000000F: 4 jiffies = 40ms
+ * reduce history for more aggressive entry into C3
+ */
+static unsigned int bm_history __read_mostly =
+ (HZ >= 800 ? 0xFFFFFFFF : ((1U << (HZ / 25)) - 1));
+module_param(bm_history, uint, 0644);
+
+static int acpi_processor_set_power_policy(struct acpi_processor *pr);
+
+#else /* CONFIG_CPU_IDLE */
+static unsigned int latency_factor __read_mostly = 2;
+module_param(latency_factor, uint, 0644);
+#endif
+
+/*
+ * IBM ThinkPad R40e crashes mysteriously when going into C2 or C3.
+ * For now disable this. Probably a bug somewhere else.
+ *
+ * To skip this limit, boot/load with a large max_cstate limit.
+ */
+static int set_max_cstate(const struct dmi_system_id *id)
+{
+ if (max_cstate > ACPI_PROCESSOR_MAX_POWER)
+ return 0;
+
+ printk(KERN_NOTICE PREFIX "%s detected - limiting to C%ld max_cstate."
+ " Override with \"processor.max_cstate=%d\"\n", id->ident,
+ (long)id->driver_data, ACPI_PROCESSOR_MAX_POWER + 1);
+
+ max_cstate = (long)id->driver_data;
+
+ return 0;
+}
+
+/* Actually this shouldn't be __cpuinitdata, would be better to fix the
+ callers to only run once -AK */
+static struct dmi_system_id __cpuinitdata processor_power_dmi_table[] = {
+ { set_max_cstate, "IBM ThinkPad R40e", {
+ DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
+ DMI_MATCH(DMI_BIOS_VERSION,"1SET70WW")}, (void *)1},
+ { set_max_cstate, "IBM ThinkPad R40e", {
+ DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
+ DMI_MATCH(DMI_BIOS_VERSION,"1SET60WW")}, (void *)1},
+ { set_max_cstate, "IBM ThinkPad R40e", {
+ DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
+ DMI_MATCH(DMI_BIOS_VERSION,"1SET43WW") }, (void*)1},
+ { set_max_cstate, "IBM ThinkPad R40e", {
+ DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
+ DMI_MATCH(DMI_BIOS_VERSION,"1SET45WW") }, (void*)1},
+ { set_max_cstate, "IBM ThinkPad R40e", {
+ DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
+ DMI_MATCH(DMI_BIOS_VERSION,"1SET47WW") }, (void*)1},
+ { set_max_cstate, "IBM ThinkPad R40e", {
+ DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
+ DMI_MATCH(DMI_BIOS_VERSION,"1SET50WW") }, (void*)1},
+ { set_max_cstate, "IBM ThinkPad R40e", {
+ DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
+ DMI_MATCH(DMI_BIOS_VERSION,"1SET52WW") }, (void*)1},
+ { set_max_cstate, "IBM ThinkPad R40e", {
+ DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
+ DMI_MATCH(DMI_BIOS_VERSION,"1SET55WW") }, (void*)1},
+ { set_max_cstate, "IBM ThinkPad R40e", {
+ DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
+ DMI_MATCH(DMI_BIOS_VERSION,"1SET56WW") }, (void*)1},
+ { set_max_cstate, "IBM ThinkPad R40e", {
+ DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
+ DMI_MATCH(DMI_BIOS_VERSION,"1SET59WW") }, (void*)1},
+ { set_max_cstate, "IBM ThinkPad R40e", {
+ DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
+ DMI_MATCH(DMI_BIOS_VERSION,"1SET60WW") }, (void*)1},
+ { set_max_cstate, "IBM ThinkPad R40e", {
+ DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
+ DMI_MATCH(DMI_BIOS_VERSION,"1SET61WW") }, (void*)1},
+ { set_max_cstate, "IBM ThinkPad R40e", {
+ DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
+ DMI_MATCH(DMI_BIOS_VERSION,"1SET62WW") }, (void*)1},
+ { set_max_cstate, "IBM ThinkPad R40e", {
+ DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
+ DMI_MATCH(DMI_BIOS_VERSION,"1SET64WW") }, (void*)1},
+ { set_max_cstate, "IBM ThinkPad R40e", {
+ DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
+ DMI_MATCH(DMI_BIOS_VERSION,"1SET65WW") }, (void*)1},
+ { set_max_cstate, "IBM ThinkPad R40e", {
+ DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
+ DMI_MATCH(DMI_BIOS_VERSION,"1SET68WW") }, (void*)1},
+ { set_max_cstate, "Medion 41700", {
+ DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"),
+ DMI_MATCH(DMI_BIOS_VERSION,"R01-A1J")}, (void *)1},
+ { set_max_cstate, "Clevo 5600D", {
+ DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"),
+ DMI_MATCH(DMI_BIOS_VERSION,"SHE845M0.86C.0013.D.0302131307")},
+ (void *)2},
+ {},
+};
+
+static inline u32 ticks_elapsed(u32 t1, u32 t2)
+{
+ if (t2 >= t1)
+ return (t2 - t1);
+ else if (!(acpi_gbl_FADT.flags & ACPI_FADT_32BIT_TIMER))
+ return (((0x00FFFFFF - t1) + t2) & 0x00FFFFFF);
+ else
+ return ((0xFFFFFFFF - t1) + t2);
+}
+
+static inline u32 ticks_elapsed_in_us(u32 t1, u32 t2)
+{
+ if (t2 >= t1)
+ return PM_TIMER_TICKS_TO_US(t2 - t1);
+ else if (!(acpi_gbl_FADT.flags & ACPI_FADT_32BIT_TIMER))
+ return PM_TIMER_TICKS_TO_US(((0x00FFFFFF - t1) + t2) & 0x00FFFFFF);
+ else
+ return PM_TIMER_TICKS_TO_US((0xFFFFFFFF - t1) + t2);
+}
+
+/*
+ * Callers should disable interrupts before the call and enable
+ * interrupts after return.
+ */
+static void acpi_safe_halt(void)
+{
+ current_thread_info()->status &= ~TS_POLLING;
+ /*
+ * TS_POLLING-cleared state must be visible before we
+ * test NEED_RESCHED:
+ */
+ smp_mb();
+ if (!need_resched()) {
+ safe_halt();
+ local_irq_disable();
+ }
+ current_thread_info()->status |= TS_POLLING;
+}
+
+#ifndef CONFIG_CPU_IDLE
+
+static void
+acpi_processor_power_activate(struct acpi_processor *pr,
+ struct acpi_processor_cx *new)
+{
+ struct acpi_processor_cx *old;
+
+ if (!pr || !new)
+ return;
+
+ old = pr->power.state;
+
+ if (old)
+ old->promotion.count = 0;
+ new->demotion.count = 0;
+
+ /* Cleanup from old state. */
+ if (old) {
+ switch (old->type) {
+ case ACPI_STATE_C3:
+ /* Disable bus master reload */
+ if (new->type != ACPI_STATE_C3 && pr->flags.bm_check)
+ acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0);
+ break;
+ }
+ }
+
+ /* Prepare to use new state. */
+ switch (new->type) {
+ case ACPI_STATE_C3:
+ /* Enable bus master reload */
+ if (old->type != ACPI_STATE_C3 && pr->flags.bm_check)
+ acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 1);
+ break;
+ }
+
+ pr->power.state = new;
+
+ return;
+}
+
+static atomic_t c3_cpu_count;
+
+/* Common C-state entry for C2, C3, .. */
+static void acpi_cstate_enter(struct acpi_processor_cx *cstate)
+{
+ /* Don't trace irqs off for idle */
+ stop_critical_timings();
+ if (cstate->entry_method == ACPI_CSTATE_FFH) {
+ /* Call into architectural FFH based C-state */
+ acpi_processor_ffh_cstate_enter(cstate);
+ } else {
+ int unused;
+ /* IO port based C-state */
+ inb(cstate->address);
+ /* Dummy wait op - must do something useless after P_LVL2 read
+ because chipsets cannot guarantee that STPCLK# signal
+ gets asserted in time to freeze execution properly. */
+ unused = inl(acpi_gbl_FADT.xpm_timer_block.address);
+ }
+ start_critical_timings();
+}
+#endif /* !CONFIG_CPU_IDLE */
+
+#ifdef ARCH_APICTIMER_STOPS_ON_C3
+
+/*
+ * Some BIOS implementations switch to C3 in the published C2 state.
+ * This seems to be a common problem on AMD boxen, but other vendors
+ * are affected too. We pick the most conservative approach: we assume
+ * that the local APIC stops in both C2 and C3.
+ */
+static void acpi_timer_check_state(int state, struct acpi_processor *pr,
+ struct acpi_processor_cx *cx)
+{
+ struct acpi_processor_power *pwr = &pr->power;
+ u8 type = local_apic_timer_c2_ok ? ACPI_STATE_C3 : ACPI_STATE_C2;
+
+ /*
+ * Check, if one of the previous states already marked the lapic
+ * unstable
+ */
+ if (pwr->timer_broadcast_on_state < state)
+ return;
+
+ if (cx->type >= type)
+ pr->power.timer_broadcast_on_state = state;
+}
+
+static void acpi_propagate_timer_broadcast(struct acpi_processor *pr)
+{
+ unsigned long reason;
+
+ reason = pr->power.timer_broadcast_on_state < INT_MAX ?
+ CLOCK_EVT_NOTIFY_BROADCAST_ON : CLOCK_EVT_NOTIFY_BROADCAST_OFF;
+
+ clockevents_notify(reason, &pr->id);
+}
+
+/* Power(C) State timer broadcast control */
+static void acpi_state_timer_broadcast(struct acpi_processor *pr,
+ struct acpi_processor_cx *cx,
+ int broadcast)
+{
+ int state = cx - pr->power.states;
+
+ if (state >= pr->power.timer_broadcast_on_state) {
+ unsigned long reason;
+
+ reason = broadcast ? CLOCK_EVT_NOTIFY_BROADCAST_ENTER :
+ CLOCK_EVT_NOTIFY_BROADCAST_EXIT;
+ clockevents_notify(reason, &pr->id);
+ }
+}
+
+#else
+
+static void acpi_timer_check_state(int state, struct acpi_processor *pr,
+ struct acpi_processor_cx *cstate) { }
+static void acpi_propagate_timer_broadcast(struct acpi_processor *pr) { }
+static void acpi_state_timer_broadcast(struct acpi_processor *pr,
+ struct acpi_processor_cx *cx,
+ int broadcast)
+{
+}
+
+#endif
+
+/*
+ * Suspend / resume control
+ */
+static int acpi_idle_suspend;
+
+int acpi_processor_suspend(struct acpi_device * device, pm_message_t state)
+{
+ acpi_idle_suspend = 1;
+ return 0;
+}
+
+int acpi_processor_resume(struct acpi_device * device)
+{
+ acpi_idle_suspend = 0;
+ return 0;
+}
+
+#if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86)
+static int tsc_halts_in_c(int state)
+{
+ switch (boot_cpu_data.x86_vendor) {
+ case X86_VENDOR_AMD:
+ /*
+ * AMD Fam10h TSC will tick in all
+ * C/P/S0/S1 states when this bit is set.
+ */
+ if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
+ return 0;
+ /*FALL THROUGH*/
+ case X86_VENDOR_INTEL:
+ /* Several cases known where TSC halts in C2 too */
+ default:
+ return state > ACPI_STATE_C1;
+ }
+}
+#endif
+
+#ifndef CONFIG_CPU_IDLE
+static void acpi_processor_idle(void)
+{
+ struct acpi_processor *pr = NULL;
+ struct acpi_processor_cx *cx = NULL;
+ struct acpi_processor_cx *next_state = NULL;
+ int sleep_ticks = 0;
+ u32 t1, t2 = 0;
+
+ /*
+ * Interrupts must be disabled during bus mastering calculations and
+ * for C2/C3 transitions.
+ */
+ local_irq_disable();
+
+ pr = __get_cpu_var(processors);
+ if (!pr) {
+ local_irq_enable();
+ return;
+ }
+
+ /*
+ * Check whether we truly need to go idle, or should
+ * reschedule:
+ */
+ if (unlikely(need_resched())) {
+ local_irq_enable();
+ return;
+ }
+
+ cx = pr->power.state;
+ if (!cx || acpi_idle_suspend) {
+ if (pm_idle_save) {
+ pm_idle_save(); /* enables IRQs */
+ } else {
+ acpi_safe_halt();
+ local_irq_enable();
+ }
+
+ return;
+ }
+
+ /*
+ * Check BM Activity
+ * -----------------
+ * Check for bus mastering activity (if required), record, and check
+ * for demotion.
+ */
+ if (pr->flags.bm_check) {
+ u32 bm_status = 0;
+ unsigned long diff = jiffies - pr->power.bm_check_timestamp;
+
+ if (diff > 31)
+ diff = 31;
+
+ pr->power.bm_activity <<= diff;
+
+ acpi_get_register(ACPI_BITREG_BUS_MASTER_STATUS, &bm_status);
+ if (bm_status) {
+ pr->power.bm_activity |= 0x1;
+ acpi_set_register(ACPI_BITREG_BUS_MASTER_STATUS, 1);
+ }
+ /*
+ * PIIX4 Erratum #18: Note that BM_STS doesn't always reflect
+ * the true state of bus mastering activity; forcing us to
+ * manually check the BMIDEA bit of each IDE channel.
+ */
+ else if (errata.piix4.bmisx) {
+ if ((inb_p(errata.piix4.bmisx + 0x02) & 0x01)
+ || (inb_p(errata.piix4.bmisx + 0x0A) & 0x01))
+ pr->power.bm_activity |= 0x1;
+ }
+
+ pr->power.bm_check_timestamp = jiffies;
+
+ /*
+ * If bus mastering is or was active this jiffy, demote
+ * to avoid a faulty transition. Note that the processor
+ * won't enter a low-power state during this call (to this
+ * function) but should upon the next.
+ *
+ * TBD: A better policy might be to fallback to the demotion
+ * state (use it for this quantum only) istead of
+ * demoting -- and rely on duration as our sole demotion
+ * qualification. This may, however, introduce DMA
+ * issues (e.g. floppy DMA transfer overrun/underrun).
+ */
+ if ((pr->power.bm_activity & 0x1) &&
+ cx->demotion.threshold.bm) {
+ local_irq_enable();
+ next_state = cx->demotion.state;
+ goto end;
+ }
+ }
+
+#ifdef CONFIG_HOTPLUG_CPU
+ /*
+ * Check for P_LVL2_UP flag before entering C2 and above on
+ * an SMP system. We do it here instead of doing it at _CST/P_LVL
+ * detection phase, to work cleanly with logical CPU hotplug.
+ */
+ if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) &&
+ !pr->flags.has_cst && !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
+ cx = &pr->power.states[ACPI_STATE_C1];
+#endif
+
+ /*
+ * Sleep:
+ * ------
+ * Invoke the current Cx state to put the processor to sleep.
+ */
+ if (cx->type == ACPI_STATE_C2 || cx->type == ACPI_STATE_C3) {
+ current_thread_info()->status &= ~TS_POLLING;
+ /*
+ * TS_POLLING-cleared state must be visible before we
+ * test NEED_RESCHED:
+ */
+ smp_mb();
+ if (need_resched()) {
+ current_thread_info()->status |= TS_POLLING;
+ local_irq_enable();
+ return;
+ }
+ }
+
+ switch (cx->type) {
+
+ case ACPI_STATE_C1:
+ /*
+ * Invoke C1.
+ * Use the appropriate idle routine, the one that would
+ * be used without acpi C-states.
+ */
+ if (pm_idle_save) {
+ pm_idle_save(); /* enables IRQs */
+ } else {
+ acpi_safe_halt();
+ local_irq_enable();
+ }
+
+ /*
+ * TBD: Can't get time duration while in C1, as resumes
+ * go to an ISR rather than here. Need to instrument
+ * base interrupt handler.
+ *
+ * Note: the TSC better not stop in C1, sched_clock() will
+ * skew otherwise.
+ */
+ sleep_ticks = 0xFFFFFFFF;
+
+ break;
+
+ case ACPI_STATE_C2:
+ /* Get start time (ticks) */
+ t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
+ /* Tell the scheduler that we are going deep-idle: */
+ sched_clock_idle_sleep_event();
+ /* Invoke C2 */
+ acpi_state_timer_broadcast(pr, cx, 1);
+ acpi_cstate_enter(cx);
+ /* Get end time (ticks) */
+ t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
+
+#if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86)
+ /* TSC halts in C2, so notify users */
+ if (tsc_halts_in_c(ACPI_STATE_C2))
+ mark_tsc_unstable("possible TSC halt in C2");
+#endif
+ /* Compute time (ticks) that we were actually asleep */
+ sleep_ticks = ticks_elapsed(t1, t2);
+
+ /* Tell the scheduler how much we idled: */
+ sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS);
+
+ /* Re-enable interrupts */
+ local_irq_enable();
+ /* Do not account our idle-switching overhead: */
+ sleep_ticks -= cx->latency_ticks + C2_OVERHEAD;
+
+ current_thread_info()->status |= TS_POLLING;
+ acpi_state_timer_broadcast(pr, cx, 0);
+ break;
+
+ case ACPI_STATE_C3:
+ acpi_unlazy_tlb(smp_processor_id());
+ /*
+ * Must be done before busmaster disable as we might
+ * need to access HPET !
+ */
+ acpi_state_timer_broadcast(pr, cx, 1);
+ /*
+ * disable bus master
+ * bm_check implies we need ARB_DIS
+ * !bm_check implies we need cache flush
+ * bm_control implies whether we can do ARB_DIS
+ *
+ * That leaves a case where bm_check is set and bm_control is
+ * not set. In that case we cannot do much, we enter C3
+ * without doing anything.
+ */
+ if (pr->flags.bm_check && pr->flags.bm_control) {
+ if (atomic_inc_return(&c3_cpu_count) ==
+ num_online_cpus()) {
+ /*
+ * All CPUs are trying to go to C3
+ * Disable bus master arbitration
+ */
+ acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1);
+ }
+ } else if (!pr->flags.bm_check) {
+ /* SMP with no shared cache... Invalidate cache */
+ ACPI_FLUSH_CPU_CACHE();
+ }
+
+ /* Get start time (ticks) */
+ t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
+ /* Invoke C3 */
+ /* Tell the scheduler that we are going deep-idle: */
+ sched_clock_idle_sleep_event();
+ acpi_cstate_enter(cx);
+ /* Get end time (ticks) */
+ t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
+ if (pr->flags.bm_check && pr->flags.bm_control) {
+ /* Enable bus master arbitration */
+ atomic_dec(&c3_cpu_count);
+ acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0);
+ }
+
+#if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86)
+ /* TSC halts in C3, so notify users */
+ if (tsc_halts_in_c(ACPI_STATE_C3))
+ mark_tsc_unstable("TSC halts in C3");
+#endif
+ /* Compute time (ticks) that we were actually asleep */
+ sleep_ticks = ticks_elapsed(t1, t2);
+ /* Tell the scheduler how much we idled: */
+ sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS);
+
+ /* Re-enable interrupts */
+ local_irq_enable();
+ /* Do not account our idle-switching overhead: */
+ sleep_ticks -= cx->latency_ticks + C3_OVERHEAD;
+
+ current_thread_info()->status |= TS_POLLING;
+ acpi_state_timer_broadcast(pr, cx, 0);
+ break;
+
+ default:
+ local_irq_enable();
+ return;
+ }
+ cx->usage++;
+ if ((cx->type != ACPI_STATE_C1) && (sleep_ticks > 0))
+ cx->time += sleep_ticks;
+
+ next_state = pr->power.state;
+
+#ifdef CONFIG_HOTPLUG_CPU
+ /* Don't do promotion/demotion */
+ if ((cx->type == ACPI_STATE_C1) && (num_online_cpus() > 1) &&
+ !pr->flags.has_cst && !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED)) {
+ next_state = cx;
+ goto end;
+ }
+#endif
+
+ /*
+ * Promotion?
+ * ----------
+ * Track the number of longs (time asleep is greater than threshold)
+ * and promote when the count threshold is reached. Note that bus
+ * mastering activity may prevent promotions.
+ * Do not promote above max_cstate.
+ */
+ if (cx->promotion.state &&
+ ((cx->promotion.state - pr->power.states) <= max_cstate)) {
+ if (sleep_ticks > cx->promotion.threshold.ticks &&
+ cx->promotion.state->latency <=
+ pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY)) {
+ cx->promotion.count++;
+ cx->demotion.count = 0;
+ if (cx->promotion.count >=
+ cx->promotion.threshold.count) {
+ if (pr->flags.bm_check) {
+ if (!
+ (pr->power.bm_activity & cx->
+ promotion.threshold.bm)) {
+ next_state =
+ cx->promotion.state;
+ goto end;
+ }
+ } else {
+ next_state = cx->promotion.state;
+ goto end;
+ }
+ }
+ }
+ }
+
+ /*
+ * Demotion?
+ * ---------
+ * Track the number of shorts (time asleep is less than time threshold)
+ * and demote when the usage threshold is reached.
+ */
+ if (cx->demotion.state) {
+ if (sleep_ticks < cx->demotion.threshold.ticks) {
+ cx->demotion.count++;
+ cx->promotion.count = 0;
+ if (cx->demotion.count >= cx->demotion.threshold.count) {
+ next_state = cx->demotion.state;
+ goto end;
+ }
+ }
+ }
+
+ end:
+ /*
+ * Demote if current state exceeds max_cstate
+ * or if the latency of the current state is unacceptable
+ */
+ if ((pr->power.state - pr->power.states) > max_cstate ||
+ pr->power.state->latency >
+ pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY)) {
+ if (cx->demotion.state)
+ next_state = cx->demotion.state;
+ }
+
+ /*
+ * New Cx State?
+ * -------------
+ * If we're going to start using a new Cx state we must clean up
+ * from the previous and prepare to use the new.
+ */
+ if (next_state != pr->power.state)
+ acpi_processor_power_activate(pr, next_state);
+}
+
+static int acpi_processor_set_power_policy(struct acpi_processor *pr)
+{
+ unsigned int i;
+ unsigned int state_is_set = 0;
+ struct acpi_processor_cx *lower = NULL;
+ struct acpi_processor_cx *higher = NULL;
+ struct acpi_processor_cx *cx;
+
+
+ if (!pr)
+ return -EINVAL;
+
+ /*
+ * This function sets the default Cx state policy (OS idle handler).
+ * Our scheme is to promote quickly to C2 but more conservatively
+ * to C3. We're favoring C2 for its characteristics of low latency
+ * (quick response), good power savings, and ability to allow bus
+ * mastering activity. Note that the Cx state policy is completely
+ * customizable and can be altered dynamically.
+ */
+
+ /* startup state */
+ for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
+ cx = &pr->power.states[i];
+ if (!cx->valid)
+ continue;
+
+ if (!state_is_set)
+ pr->power.state = cx;
+ state_is_set++;
+ break;
+ }
+
+ if (!state_is_set)
+ return -ENODEV;
+
+ /* demotion */
+ for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
+ cx = &pr->power.states[i];
+ if (!cx->valid)
+ continue;
+
+ if (lower) {
+ cx->demotion.state = lower;
+ cx->demotion.threshold.ticks = cx->latency_ticks;
+ cx->demotion.threshold.count = 1;
+ if (cx->type == ACPI_STATE_C3)
+ cx->demotion.threshold.bm = bm_history;
+ }
+
+ lower = cx;
+ }
+
+ /* promotion */
+ for (i = (ACPI_PROCESSOR_MAX_POWER - 1); i > 0; i--) {
+ cx = &pr->power.states[i];
+ if (!cx->valid)
+ continue;
+
+ if (higher) {
+ cx->promotion.state = higher;
+ cx->promotion.threshold.ticks = cx->latency_ticks;
+ if (cx->type >= ACPI_STATE_C2)
+ cx->promotion.threshold.count = 4;
+ else
+ cx->promotion.threshold.count = 10;
+ if (higher->type == ACPI_STATE_C3)
+ cx->promotion.threshold.bm = bm_history;
+ }
+
+ higher = cx;
+ }
+
+ return 0;
+}
+#endif /* !CONFIG_CPU_IDLE */
+
+static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr)
+{
+
+ if (!pr)
+ return -EINVAL;
+
+ if (!pr->pblk)
+ return -ENODEV;
+
+ /* if info is obtained from pblk/fadt, type equals state */
+ pr->power.states[ACPI_STATE_C2].type = ACPI_STATE_C2;
+ pr->power.states[ACPI_STATE_C3].type = ACPI_STATE_C3;
+
+#ifndef CONFIG_HOTPLUG_CPU
+ /*
+ * Check for P_LVL2_UP flag before entering C2 and above on
+ * an SMP system.
+ */
+ if ((num_online_cpus() > 1) &&
+ !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
+ return -ENODEV;
+#endif
+
+ /* determine C2 and C3 address from pblk */
+ pr->power.states[ACPI_STATE_C2].address = pr->pblk + 4;
+ pr->power.states[ACPI_STATE_C3].address = pr->pblk + 5;
+
+ /* determine latencies from FADT */
+ pr->power.states[ACPI_STATE_C2].latency = acpi_gbl_FADT.C2latency;
+ pr->power.states[ACPI_STATE_C3].latency = acpi_gbl_FADT.C3latency;
+
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "lvl2[0x%08x] lvl3[0x%08x]\n",
+ pr->power.states[ACPI_STATE_C2].address,
+ pr->power.states[ACPI_STATE_C3].address));
+
+ return 0;
+}
+
+static int acpi_processor_get_power_info_default(struct acpi_processor *pr)
+{
+ if (!pr->power.states[ACPI_STATE_C1].valid) {
+ /* set the first C-State to C1 */
+ /* all processors need to support C1 */
+ pr->power.states[ACPI_STATE_C1].type = ACPI_STATE_C1;
+ pr->power.states[ACPI_STATE_C1].valid = 1;
+ pr->power.states[ACPI_STATE_C1].entry_method = ACPI_CSTATE_HALT;
+ }
+ /* the C0 state only exists as a filler in our array */
+ pr->power.states[ACPI_STATE_C0].valid = 1;
+ return 0;
+}
+
+static int acpi_processor_get_power_info_cst(struct acpi_processor *pr)
+{
+ acpi_status status = 0;
+ acpi_integer count;
+ int current_count;
+ int i;
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ union acpi_object *cst;
+
+
+ if (nocst)
+ return -ENODEV;
+
+ current_count = 0;
+
+ status = acpi_evaluate_object(pr->handle, "_CST", NULL, &buffer);
+ if (ACPI_FAILURE(status)) {
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No _CST, giving up\n"));
+ return -ENODEV;
+ }
+
+ cst = buffer.pointer;
+
+ /* There must be at least 2 elements */
+ if (!cst || (cst->type != ACPI_TYPE_PACKAGE) || cst->package.count < 2) {
+ printk(KERN_ERR PREFIX "not enough elements in _CST\n");
+ status = -EFAULT;
+ goto end;
+ }
+
+ count = cst->package.elements[0].integer.value;
+
+ /* Validate number of power states. */
+ if (count < 1 || count != cst->package.count - 1) {
+ printk(KERN_ERR PREFIX "count given by _CST is not valid\n");
+ status = -EFAULT;
+ goto end;
+ }
+
+ /* Tell driver that at least _CST is supported. */
+ pr->flags.has_cst = 1;
+
+ for (i = 1; i <= count; i++) {
+ union acpi_object *element;
+ union acpi_object *obj;
+ struct acpi_power_register *reg;
+ struct acpi_processor_cx cx;
+
+ memset(&cx, 0, sizeof(cx));
+
+ element = &(cst->package.elements[i]);
+ if (element->type != ACPI_TYPE_PACKAGE)
+ continue;
+
+ if (element->package.count != 4)
+ continue;
+
+ obj = &(element->package.elements[0]);
+
+ if (obj->type != ACPI_TYPE_BUFFER)
+ continue;
+
+ reg = (struct acpi_power_register *)obj->buffer.pointer;
+
+ if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_IO &&
+ (reg->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE))
+ continue;
+
+ /* There should be an easy way to extract an integer... */
+ obj = &(element->package.elements[1]);
+ if (obj->type != ACPI_TYPE_INTEGER)
+ continue;
+
+ cx.type = obj->integer.value;
+ /*
+ * Some buggy BIOSes won't list C1 in _CST -
+ * Let acpi_processor_get_power_info_default() handle them later
+ */
+ if (i == 1 && cx.type != ACPI_STATE_C1)
+ current_count++;
+
+ cx.address = reg->address;
+ cx.index = current_count + 1;
+
+ cx.entry_method = ACPI_CSTATE_SYSTEMIO;
+ if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) {
+ if (acpi_processor_ffh_cstate_probe
+ (pr->id, &cx, reg) == 0) {
+ cx.entry_method = ACPI_CSTATE_FFH;
+ } else if (cx.type == ACPI_STATE_C1) {
+ /*
+ * C1 is a special case where FIXED_HARDWARE
+ * can be handled in non-MWAIT way as well.
+ * In that case, save this _CST entry info.
+ * Otherwise, ignore this info and continue.
+ */
+ cx.entry_method = ACPI_CSTATE_HALT;
+ snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT");
+ } else {
+ continue;
+ }
+ if (cx.type == ACPI_STATE_C1 &&
+ (idle_halt || idle_nomwait)) {
+ /*
+ * In most cases the C1 space_id obtained from
+ * _CST object is FIXED_HARDWARE access mode.
+ * But when the option of idle=halt is added,
+ * the entry_method type should be changed from
+ * CSTATE_FFH to CSTATE_HALT.
+ * When the option of idle=nomwait is added,
+ * the C1 entry_method type should be
+ * CSTATE_HALT.
+ */
+ cx.entry_method = ACPI_CSTATE_HALT;
+ snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT");
+ }
+ } else {
+ snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI IOPORT 0x%x",
+ cx.address);
+ }
+
+ if (cx.type == ACPI_STATE_C1) {
+ cx.valid = 1;
+ }
+
+ obj = &(element->package.elements[2]);
+ if (obj->type != ACPI_TYPE_INTEGER)
+ continue;
+
+ cx.latency = obj->integer.value;
+
+ obj = &(element->package.elements[3]);
+ if (obj->type != ACPI_TYPE_INTEGER)
+ continue;
+
+ cx.power = obj->integer.value;
+
+ current_count++;
+ memcpy(&(pr->power.states[current_count]), &cx, sizeof(cx));
+
+ /*
+ * We support total ACPI_PROCESSOR_MAX_POWER - 1
+ * (From 1 through ACPI_PROCESSOR_MAX_POWER - 1)
+ */
+ if (current_count >= (ACPI_PROCESSOR_MAX_POWER - 1)) {
+ printk(KERN_WARNING
+ "Limiting number of power states to max (%d)\n",
+ ACPI_PROCESSOR_MAX_POWER);
+ printk(KERN_WARNING
+ "Please increase ACPI_PROCESSOR_MAX_POWER if needed.\n");
+ break;
+ }
+ }
+
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d power states\n",
+ current_count));
+
+ /* Validate number of power states discovered */
+ if (current_count < 2)
+ status = -EFAULT;
+
+ end:
+ kfree(buffer.pointer);
+
+ return status;
+}
+
+static void acpi_processor_power_verify_c2(struct acpi_processor_cx *cx)
+{
+
+ if (!cx->address)
+ return;
+
+ /*
+ * C2 latency must be less than or equal to 100
+ * microseconds.
+ */
+ else if (cx->latency > ACPI_PROCESSOR_MAX_C2_LATENCY) {
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "latency too large [%d]\n", cx->latency));
+ return;
+ }
+
+ /*
+ * Otherwise we've met all of our C2 requirements.
+ * Normalize the C2 latency to expidite policy
+ */
+ cx->valid = 1;
+
+#ifndef CONFIG_CPU_IDLE
+ cx->latency_ticks = US_TO_PM_TIMER_TICKS(cx->latency);
+#else
+ cx->latency_ticks = cx->latency;
+#endif
+
+ return;
+}
+
+static void acpi_processor_power_verify_c3(struct acpi_processor *pr,
+ struct acpi_processor_cx *cx)
+{
+ static int bm_check_flag;
+
+
+ if (!cx->address)
+ return;
+
+ /*
+ * C3 latency must be less than or equal to 1000
+ * microseconds.
+ */
+ else if (cx->latency > ACPI_PROCESSOR_MAX_C3_LATENCY) {
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "latency too large [%d]\n", cx->latency));
+ return;
+ }
+
+ /*
+ * PIIX4 Erratum #18: We don't support C3 when Type-F (fast)
+ * DMA transfers are used by any ISA device to avoid livelock.
+ * Note that we could disable Type-F DMA (as recommended by
+ * the erratum), but this is known to disrupt certain ISA
+ * devices thus we take the conservative approach.
+ */
+ else if (errata.piix4.fdma) {
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "C3 not supported on PIIX4 with Type-F DMA\n"));
+ return;
+ }
+
+ /* All the logic here assumes flags.bm_check is same across all CPUs */
+ if (!bm_check_flag) {
+ /* Determine whether bm_check is needed based on CPU */
+ acpi_processor_power_init_bm_check(&(pr->flags), pr->id);
+ bm_check_flag = pr->flags.bm_check;
+ } else {
+ pr->flags.bm_check = bm_check_flag;
+ }
+
+ if (pr->flags.bm_check) {
+ if (!pr->flags.bm_control) {
+ if (pr->flags.has_cst != 1) {
+ /* bus mastering control is necessary */
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "C3 support requires BM control\n"));
+ return;
+ } else {
+ /* Here we enter C3 without bus mastering */
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "C3 support without BM control\n"));
+ }
+ }
+ } else {
+ /*
+ * WBINVD should be set in fadt, for C3 state to be
+ * supported on when bm_check is not required.
+ */
+ if (!(acpi_gbl_FADT.flags & ACPI_FADT_WBINVD)) {
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Cache invalidation should work properly"
+ " for C3 to be enabled on SMP systems\n"));
+ return;
+ }
+ acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0);
+ }
+
+ /*
+ * Otherwise we've met all of our C3 requirements.
+ * Normalize the C3 latency to expidite policy. Enable
+ * checking of bus mastering status (bm_check) so we can
+ * use this in our C3 policy
+ */
+ cx->valid = 1;
+
+#ifndef CONFIG_CPU_IDLE
+ cx->latency_ticks = US_TO_PM_TIMER_TICKS(cx->latency);
+#else
+ cx->latency_ticks = cx->latency;
+#endif
+
+ return;
+}
+
+static int acpi_processor_power_verify(struct acpi_processor *pr)
+{
+ unsigned int i;
+ unsigned int working = 0;
+
+ pr->power.timer_broadcast_on_state = INT_MAX;
+
+ for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
+ struct acpi_processor_cx *cx = &pr->power.states[i];
+
+ switch (cx->type) {
+ case ACPI_STATE_C1:
+ cx->valid = 1;
+ break;
+
+ case ACPI_STATE_C2:
+ acpi_processor_power_verify_c2(cx);
+ if (cx->valid)
+ acpi_timer_check_state(i, pr, cx);
+ break;
+
+ case ACPI_STATE_C3:
+ acpi_processor_power_verify_c3(pr, cx);
+ if (cx->valid)
+ acpi_timer_check_state(i, pr, cx);
+ break;
+ }
+
+ if (cx->valid)
+ working++;
+ }
+
+ acpi_propagate_timer_broadcast(pr);
+
+ return (working);
+}
+
+static int acpi_processor_get_power_info(struct acpi_processor *pr)
+{
+ unsigned int i;
+ int result;
+
+
+ /* NOTE: the idle thread may not be running while calling
+ * this function */
+
+ /* Zero initialize all the C-states info. */
+ memset(pr->power.states, 0, sizeof(pr->power.states));
+
+ result = acpi_processor_get_power_info_cst(pr);
+ if (result == -ENODEV)
+ result = acpi_processor_get_power_info_fadt(pr);
+
+ if (result)
+ return result;
+
+ acpi_processor_get_power_info_default(pr);
+
+ pr->power.count = acpi_processor_power_verify(pr);
+
+#ifndef CONFIG_CPU_IDLE
+ /*
+ * Set Default Policy
+ * ------------------
+ * Now that we know which states are supported, set the default
+ * policy. Note that this policy can be changed dynamically
+ * (e.g. encourage deeper sleeps to conserve battery life when
+ * not on AC).
+ */
+ result = acpi_processor_set_power_policy(pr);
+ if (result)
+ return result;
+#endif
+
+ /*
+ * if one state of type C2 or C3 is available, mark this
+ * CPU as being "idle manageable"
+ */
+ for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
+ if (pr->power.states[i].valid) {
+ pr->power.count = i;
+ if (pr->power.states[i].type >= ACPI_STATE_C2)
+ pr->flags.power = 1;
+ }
+ }
+
+ return 0;
+}
+
+static int acpi_processor_power_seq_show(struct seq_file *seq, void *offset)
+{
+ struct acpi_processor *pr = seq->private;
+ unsigned int i;
+
+
+ if (!pr)
+ goto end;
+
+ seq_printf(seq, "active state: C%zd\n"
+ "max_cstate: C%d\n"
+ "bus master activity: %08x\n"
+ "maximum allowed latency: %d usec\n",
+ pr->power.state ? pr->power.state - pr->power.states : 0,
+ max_cstate, (unsigned)pr->power.bm_activity,
+ pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY));
+
+ seq_puts(seq, "states:\n");
+
+ for (i = 1; i <= pr->power.count; i++) {
+ seq_printf(seq, " %cC%d: ",
+ (&pr->power.states[i] ==
+ pr->power.state ? '*' : ' '), i);
+
+ if (!pr->power.states[i].valid) {
+ seq_puts(seq, "<not supported>\n");
+ continue;
+ }
+
+ switch (pr->power.states[i].type) {
+ case ACPI_STATE_C1:
+ seq_printf(seq, "type[C1] ");
+ break;
+ case ACPI_STATE_C2:
+ seq_printf(seq, "type[C2] ");
+ break;
+ case ACPI_STATE_C3:
+ seq_printf(seq, "type[C3] ");
+ break;
+ default:
+ seq_printf(seq, "type[--] ");
+ break;
+ }
+
+ if (pr->power.states[i].promotion.state)
+ seq_printf(seq, "promotion[C%zd] ",
+ (pr->power.states[i].promotion.state -
+ pr->power.states));
+ else
+ seq_puts(seq, "promotion[--] ");
+
+ if (pr->power.states[i].demotion.state)
+ seq_printf(seq, "demotion[C%zd] ",
+ (pr->power.states[i].demotion.state -
+ pr->power.states));
+ else
+ seq_puts(seq, "demotion[--] ");
+
+ seq_printf(seq, "latency[%03d] usage[%08d] duration[%020llu]\n",
+ pr->power.states[i].latency,
+ pr->power.states[i].usage,
+ (unsigned long long)pr->power.states[i].time);
+ }
+
+ end:
+ return 0;
+}
+
+static int acpi_processor_power_open_fs(struct inode *inode, struct file *file)
+{
+ return single_open(file, acpi_processor_power_seq_show,
+ PDE(inode)->data);
+}
+
+static const struct file_operations acpi_processor_power_fops = {
+ .owner = THIS_MODULE,
+ .open = acpi_processor_power_open_fs,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+#ifndef CONFIG_CPU_IDLE
+
+int acpi_processor_cst_has_changed(struct acpi_processor *pr)
+{
+ int result = 0;
+
+ if (boot_option_idle_override)
+ return 0;
+
+ if (!pr)
+ return -EINVAL;
+
+ if (nocst) {
+ return -ENODEV;
+ }
+
+ if (!pr->flags.power_setup_done)
+ return -ENODEV;
+
+ /*
+ * Fall back to the default idle loop, when pm_idle_save had
+ * been initialized.
+ */
+ if (pm_idle_save) {
+ pm_idle = pm_idle_save;
+ /* Relies on interrupts forcing exit from idle. */
+ synchronize_sched();
+ }
+
+ pr->flags.power = 0;
+ result = acpi_processor_get_power_info(pr);
+ if ((pr->flags.power == 1) && (pr->flags.power_setup_done))
+ pm_idle = acpi_processor_idle;
+
+ return result;
+}
+
+#ifdef CONFIG_SMP
+static void smp_callback(void *v)
+{
+ /* we already woke the CPU up, nothing more to do */
+}
+
+/*
+ * This function gets called when a part of the kernel has a new latency
+ * requirement. This means we need to get all processors out of their C-state,
+ * and then recalculate a new suitable C-state. Just do a cross-cpu IPI; that
+ * wakes them all right up.
+ */
+static int acpi_processor_latency_notify(struct notifier_block *b,
+ unsigned long l, void *v)
+{
+ smp_call_function(smp_callback, NULL, 1);
+ return NOTIFY_OK;
+}
+
+static struct notifier_block acpi_processor_latency_notifier = {
+ .notifier_call = acpi_processor_latency_notify,
+};
+
+#endif
+
+#else /* CONFIG_CPU_IDLE */
+
+/**
+ * acpi_idle_bm_check - checks if bus master activity was detected
+ */
+static int acpi_idle_bm_check(void)
+{
+ u32 bm_status = 0;
+
+ acpi_get_register(ACPI_BITREG_BUS_MASTER_STATUS, &bm_status);
+ if (bm_status)
+ acpi_set_register(ACPI_BITREG_BUS_MASTER_STATUS, 1);
+ /*
+ * PIIX4 Erratum #18: Note that BM_STS doesn't always reflect
+ * the true state of bus mastering activity; forcing us to
+ * manually check the BMIDEA bit of each IDE channel.
+ */
+ else if (errata.piix4.bmisx) {
+ if ((inb_p(errata.piix4.bmisx + 0x02) & 0x01)
+ || (inb_p(errata.piix4.bmisx + 0x0A) & 0x01))
+ bm_status = 1;
+ }
+ return bm_status;
+}
+
+/**
+ * acpi_idle_update_bm_rld - updates the BM_RLD bit depending on target state
+ * @pr: the processor
+ * @target: the new target state
+ */
+static inline void acpi_idle_update_bm_rld(struct acpi_processor *pr,
+ struct acpi_processor_cx *target)
+{
+ if (pr->flags.bm_rld_set && target->type != ACPI_STATE_C3) {
+ acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0);
+ pr->flags.bm_rld_set = 0;
+ }
+
+ if (!pr->flags.bm_rld_set && target->type == ACPI_STATE_C3) {
+ acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 1);
+ pr->flags.bm_rld_set = 1;
+ }
+}
+
+/**
+ * acpi_idle_do_entry - a helper function that does C2 and C3 type entry
+ * @cx: cstate data
+ *
+ * Caller disables interrupt before call and enables interrupt after return.
+ */
+static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx)
+{
+ /* Don't trace irqs off for idle */
+ stop_critical_timings();
+ if (cx->entry_method == ACPI_CSTATE_FFH) {
+ /* Call into architectural FFH based C-state */
+ acpi_processor_ffh_cstate_enter(cx);
+ } else if (cx->entry_method == ACPI_CSTATE_HALT) {
+ acpi_safe_halt();
+ } else {
+ int unused;
+ /* IO port based C-state */
+ inb(cx->address);
+ /* Dummy wait op - must do something useless after P_LVL2 read
+ because chipsets cannot guarantee that STPCLK# signal
+ gets asserted in time to freeze execution properly. */
+ unused = inl(acpi_gbl_FADT.xpm_timer_block.address);
+ }
+ start_critical_timings();
+}
+
+/**
+ * acpi_idle_enter_c1 - enters an ACPI C1 state-type
+ * @dev: the target CPU
+ * @state: the state data
+ *
+ * This is equivalent to the HALT instruction.
+ */
+static int acpi_idle_enter_c1(struct cpuidle_device *dev,
+ struct cpuidle_state *state)
+{
+ u32 t1, t2;
+ struct acpi_processor *pr;
+ struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
+
+ pr = __get_cpu_var(processors);
+
+ if (unlikely(!pr))
+ return 0;
+
+ local_irq_disable();
+
+ /* Do not access any ACPI IO ports in suspend path */
+ if (acpi_idle_suspend) {
+ acpi_safe_halt();
+ local_irq_enable();
+ return 0;
+ }
+
+ if (pr->flags.bm_check)
+ acpi_idle_update_bm_rld(pr, cx);
+
+ t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
+ acpi_idle_do_entry(cx);
+ t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
+
+ local_irq_enable();
+ cx->usage++;
+
+ return ticks_elapsed_in_us(t1, t2);
+}
+
+/**
+ * acpi_idle_enter_simple - enters an ACPI state without BM handling
+ * @dev: the target CPU
+ * @state: the state data
+ */
+static int acpi_idle_enter_simple(struct cpuidle_device *dev,
+ struct cpuidle_state *state)
+{
+ struct acpi_processor *pr;
+ struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
+ u32 t1, t2;
+ int sleep_ticks = 0;
+
+ pr = __get_cpu_var(processors);
+
+ if (unlikely(!pr))
+ return 0;
+
+ if (acpi_idle_suspend)
+ return(acpi_idle_enter_c1(dev, state));
+
+ local_irq_disable();
+ current_thread_info()->status &= ~TS_POLLING;
+ /*
+ * TS_POLLING-cleared state must be visible before we test
+ * NEED_RESCHED:
+ */
+ smp_mb();
+
+ if (unlikely(need_resched())) {
+ current_thread_info()->status |= TS_POLLING;
+ local_irq_enable();
+ return 0;
+ }
+
+ /*
+ * Must be done before busmaster disable as we might need to
+ * access HPET !
+ */
+ acpi_state_timer_broadcast(pr, cx, 1);
+
+ if (pr->flags.bm_check)
+ acpi_idle_update_bm_rld(pr, cx);
+
+ if (cx->type == ACPI_STATE_C3)
+ ACPI_FLUSH_CPU_CACHE();
+
+ t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
+ /* Tell the scheduler that we are going deep-idle: */
+ sched_clock_idle_sleep_event();
+ acpi_idle_do_entry(cx);
+ t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
+
+#if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86)
+ /* TSC could halt in idle, so notify users */
+ if (tsc_halts_in_c(cx->type))
+ mark_tsc_unstable("TSC halts in idle");;
+#endif
+ sleep_ticks = ticks_elapsed(t1, t2);
+
+ /* Tell the scheduler how much we idled: */
+ sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS);
+
+ local_irq_enable();
+ current_thread_info()->status |= TS_POLLING;
+
+ cx->usage++;
+
+ acpi_state_timer_broadcast(pr, cx, 0);
+ cx->time += sleep_ticks;
+ return ticks_elapsed_in_us(t1, t2);
+}
+
+static int c3_cpu_count;
+static DEFINE_SPINLOCK(c3_lock);
+
+/**
+ * acpi_idle_enter_bm - enters C3 with proper BM handling
+ * @dev: the target CPU
+ * @state: the state data
+ *
+ * If BM is detected, the deepest non-C3 idle state is entered instead.
+ */
+static int acpi_idle_enter_bm(struct cpuidle_device *dev,
+ struct cpuidle_state *state)
+{
+ struct acpi_processor *pr;
+ struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
+ u32 t1, t2;
+ int sleep_ticks = 0;
+
+ pr = __get_cpu_var(processors);
+
+ if (unlikely(!pr))
+ return 0;
+
+ if (acpi_idle_suspend)
+ return(acpi_idle_enter_c1(dev, state));
+
+ if (acpi_idle_bm_check()) {
+ if (dev->safe_state) {
+ dev->last_state = dev->safe_state;
+ return dev->safe_state->enter(dev, dev->safe_state);
+ } else {
+ local_irq_disable();
+ acpi_safe_halt();
+ local_irq_enable();
+ return 0;
+ }
+ }
+
+ local_irq_disable();
+ current_thread_info()->status &= ~TS_POLLING;
+ /*
+ * TS_POLLING-cleared state must be visible before we test
+ * NEED_RESCHED:
+ */
+ smp_mb();
+
+ if (unlikely(need_resched())) {
+ current_thread_info()->status |= TS_POLLING;
+ local_irq_enable();
+ return 0;
+ }
+
+ acpi_unlazy_tlb(smp_processor_id());
+
+ /* Tell the scheduler that we are going deep-idle: */
+ sched_clock_idle_sleep_event();
+ /*
+ * Must be done before busmaster disable as we might need to
+ * access HPET !
+ */
+ acpi_state_timer_broadcast(pr, cx, 1);
+
+ acpi_idle_update_bm_rld(pr, cx);
+
+ /*
+ * disable bus master
+ * bm_check implies we need ARB_DIS
+ * !bm_check implies we need cache flush
+ * bm_control implies whether we can do ARB_DIS
+ *
+ * That leaves a case where bm_check is set and bm_control is
+ * not set. In that case we cannot do much, we enter C3
+ * without doing anything.
+ */
+ if (pr->flags.bm_check && pr->flags.bm_control) {
+ spin_lock(&c3_lock);
+ c3_cpu_count++;
+ /* Disable bus master arbitration when all CPUs are in C3 */
+ if (c3_cpu_count == num_online_cpus())
+ acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1);
+ spin_unlock(&c3_lock);
+ } else if (!pr->flags.bm_check) {
+ ACPI_FLUSH_CPU_CACHE();
+ }
+
+ t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
+ acpi_idle_do_entry(cx);
+ t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
+
+ /* Re-enable bus master arbitration */
+ if (pr->flags.bm_check && pr->flags.bm_control) {
+ spin_lock(&c3_lock);
+ acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0);
+ c3_cpu_count--;
+ spin_unlock(&c3_lock);
+ }
+
+#if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86)
+ /* TSC could halt in idle, so notify users */
+ if (tsc_halts_in_c(ACPI_STATE_C3))
+ mark_tsc_unstable("TSC halts in idle");
+#endif
+ sleep_ticks = ticks_elapsed(t1, t2);
+ /* Tell the scheduler how much we idled: */
+ sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS);
+
+ local_irq_enable();
+ current_thread_info()->status |= TS_POLLING;
+
+ cx->usage++;
+
+ acpi_state_timer_broadcast(pr, cx, 0);
+ cx->time += sleep_ticks;
+ return ticks_elapsed_in_us(t1, t2);
+}
+
+struct cpuidle_driver acpi_idle_driver = {
+ .name = "acpi_idle",
+ .owner = THIS_MODULE,
+};
+
+/**
+ * acpi_processor_setup_cpuidle - prepares and configures CPUIDLE
+ * @pr: the ACPI processor
+ */
+static int acpi_processor_setup_cpuidle(struct acpi_processor *pr)
+{
+ int i, count = CPUIDLE_DRIVER_STATE_START;
+ struct acpi_processor_cx *cx;
+ struct cpuidle_state *state;
+ struct cpuidle_device *dev = &pr->power.dev;
+
+ if (!pr->flags.power_setup_done)
+ return -EINVAL;
+
+ if (pr->flags.power == 0) {
+ return -EINVAL;
+ }
+
+ dev->cpu = pr->id;
+ for (i = 0; i < CPUIDLE_STATE_MAX; i++) {
+ dev->states[i].name[0] = '\0';
+ dev->states[i].desc[0] = '\0';
+ }
+
+ for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
+ cx = &pr->power.states[i];
+ state = &dev->states[count];
+
+ if (!cx->valid)
+ continue;
+
+#ifdef CONFIG_HOTPLUG_CPU
+ if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) &&
+ !pr->flags.has_cst &&
+ !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
+ continue;
+#endif
+ cpuidle_set_statedata(state, cx);
+
+ snprintf(state->name, CPUIDLE_NAME_LEN, "C%d", i);
+ strncpy(state->desc, cx->desc, CPUIDLE_DESC_LEN);
+ state->exit_latency = cx->latency;
+ state->target_residency = cx->latency * latency_factor;
+ state->power_usage = cx->power;
+
+ state->flags = 0;
+ switch (cx->type) {
+ case ACPI_STATE_C1:
+ state->flags |= CPUIDLE_FLAG_SHALLOW;
+ if (cx->entry_method == ACPI_CSTATE_FFH)
+ state->flags |= CPUIDLE_FLAG_TIME_VALID;
+
+ state->enter = acpi_idle_enter_c1;
+ dev->safe_state = state;
+ break;
+
+ case ACPI_STATE_C2:
+ state->flags |= CPUIDLE_FLAG_BALANCED;
+ state->flags |= CPUIDLE_FLAG_TIME_VALID;
+ state->enter = acpi_idle_enter_simple;
+ dev->safe_state = state;
+ break;
+
+ case ACPI_STATE_C3:
+ state->flags |= CPUIDLE_FLAG_DEEP;
+ state->flags |= CPUIDLE_FLAG_TIME_VALID;
+ state->flags |= CPUIDLE_FLAG_CHECK_BM;
+ state->enter = pr->flags.bm_check ?
+ acpi_idle_enter_bm :
+ acpi_idle_enter_simple;
+ break;
+ }
+
+ count++;
+ if (count == CPUIDLE_STATE_MAX)
+ break;
+ }
+
+ dev->state_count = count;
+
+ if (!count)
+ return -EINVAL;
+
+ return 0;
+}
+
+int acpi_processor_cst_has_changed(struct acpi_processor *pr)
+{
+ int ret = 0;
+
+ if (boot_option_idle_override)
+ return 0;
+
+ if (!pr)
+ return -EINVAL;
+
+ if (nocst) {
+ return -ENODEV;
+ }
+
+ if (!pr->flags.power_setup_done)
+ return -ENODEV;
+
+ cpuidle_pause_and_lock();
+ cpuidle_disable_device(&pr->power.dev);
+ acpi_processor_get_power_info(pr);
+ if (pr->flags.power) {
+ acpi_processor_setup_cpuidle(pr);
+ ret = cpuidle_enable_device(&pr->power.dev);
+ }
+ cpuidle_resume_and_unlock();
+
+ return ret;
+}
+
+#endif /* CONFIG_CPU_IDLE */
+
+int __cpuinit acpi_processor_power_init(struct acpi_processor *pr,
+ struct acpi_device *device)
+{
+ acpi_status status = 0;
+ static int first_run;
+ struct proc_dir_entry *entry = NULL;
+ unsigned int i;
+
+ if (boot_option_idle_override)
+ return 0;
+
+ if (!first_run) {
+ if (idle_halt) {
+ /*
+ * When the boot option of "idle=halt" is added, halt
+ * is used for CPU IDLE.
+ * In such case C2/C3 is meaningless. So the max_cstate
+ * is set to one.
+ */
+ max_cstate = 1;
+ }
+ dmi_check_system(processor_power_dmi_table);
+ max_cstate = acpi_processor_cstate_check(max_cstate);
+ if (max_cstate < ACPI_C_STATES_MAX)
+ printk(KERN_NOTICE
+ "ACPI: processor limited to max C-state %d\n",
+ max_cstate);
+ first_run++;
+#if !defined(CONFIG_CPU_IDLE) && defined(CONFIG_SMP)
+ pm_qos_add_notifier(PM_QOS_CPU_DMA_LATENCY,
+ &acpi_processor_latency_notifier);
+#endif
+ }
+
+ if (!pr)
+ return -EINVAL;
+
+ if (acpi_gbl_FADT.cst_control && !nocst) {
+ status =
+ acpi_os_write_port(acpi_gbl_FADT.smi_command, acpi_gbl_FADT.cst_control, 8);
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status,
+ "Notifying BIOS of _CST ability failed"));
+ }
+ }
+
+ acpi_processor_get_power_info(pr);
+ pr->flags.power_setup_done = 1;
+
+ /*
+ * Install the idle handler if processor power management is supported.
+ * Note that we use previously set idle handler will be used on
+ * platforms that only support C1.
+ */
+ if (pr->flags.power) {
+#ifdef CONFIG_CPU_IDLE
+ acpi_processor_setup_cpuidle(pr);
+ if (cpuidle_register_device(&pr->power.dev))
+ return -EIO;
+#endif
+
+ printk(KERN_INFO PREFIX "CPU%d (power states:", pr->id);
+ for (i = 1; i <= pr->power.count; i++)
+ if (pr->power.states[i].valid)
+ printk(" C%d[C%d]", i,
+ pr->power.states[i].type);
+ printk(")\n");
+
+#ifndef CONFIG_CPU_IDLE
+ if (pr->id == 0) {
+ pm_idle_save = pm_idle;
+ pm_idle = acpi_processor_idle;
+ }
+#endif
+ }
+
+ /* 'power' [R] */
+ entry = proc_create_data(ACPI_PROCESSOR_FILE_POWER,
+ S_IRUGO, acpi_device_dir(device),
+ &acpi_processor_power_fops,
+ acpi_driver_data(device));
+ if (!entry)
+ return -EIO;
+ return 0;
+}
+
+int acpi_processor_power_exit(struct acpi_processor *pr,
+ struct acpi_device *device)
+{
+ if (boot_option_idle_override)
+ return 0;
+
+#ifdef CONFIG_CPU_IDLE
+ cpuidle_unregister_device(&pr->power.dev);
+#endif
+ pr->flags.power_setup_done = 0;
+
+ if (acpi_device_dir(device))
+ remove_proc_entry(ACPI_PROCESSOR_FILE_POWER,
+ acpi_device_dir(device));
+
+#ifndef CONFIG_CPU_IDLE
+
+ /* Unregister the idle handler when processor #0 is removed. */
+ if (pr->id == 0) {
+ if (pm_idle_save)
+ pm_idle = pm_idle_save;
+
+ /*
+ * We are about to unload the current idle thread pm callback
+ * (pm_idle), Wait for all processors to update cached/local
+ * copies of pm_idle before proceeding.
+ */
+ cpu_idle_wait();
+#ifdef CONFIG_SMP
+ pm_qos_remove_notifier(PM_QOS_CPU_DMA_LATENCY,
+ &acpi_processor_latency_notifier);
+#endif
+ }
+#endif
+
+ return 0;
+}
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c
new file mode 100644
index 0000000..0d7b772
--- /dev/null
+++ b/drivers/acpi/processor_perflib.c
@@ -0,0 +1,814 @@
+/*
+ * processor_perflib.c - ACPI Processor P-States Library ($Revision: 71 $)
+ *
+ * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
+ * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
+ * Copyright (C) 2004 Dominik Brodowski <linux@brodo.de>
+ * Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
+ * - Added processor hotplug support
+ *
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/cpufreq.h>
+
+#ifdef CONFIG_X86_ACPI_CPUFREQ_PROC_INTF
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/mutex.h>
+
+#include <asm/uaccess.h>
+#endif
+
+#ifdef CONFIG_X86
+#include <asm/cpufeature.h>
+#endif
+
+#include <acpi/acpi_bus.h>
+#include <acpi/acpi_drivers.h>
+#include <acpi/processor.h>
+
+#define ACPI_PROCESSOR_CLASS "processor"
+#define ACPI_PROCESSOR_FILE_PERFORMANCE "performance"
+#define _COMPONENT ACPI_PROCESSOR_COMPONENT
+ACPI_MODULE_NAME("processor_perflib");
+
+static DEFINE_MUTEX(performance_mutex);
+
+/* Use cpufreq debug layer for _PPC changes. */
+#define cpufreq_printk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_CORE, \
+ "cpufreq-core", msg)
+
+/*
+ * _PPC support is implemented as a CPUfreq policy notifier:
+ * This means each time a CPUfreq driver registered also with
+ * the ACPI core is asked to change the speed policy, the maximum
+ * value is adjusted so that it is within the platform limit.
+ *
+ * Also, when a new platform limit value is detected, the CPUfreq
+ * policy is adjusted accordingly.
+ */
+
+/* ignore_ppc:
+ * -1 -> cpufreq low level drivers not initialized -> _PSS, etc. not called yet
+ * ignore _PPC
+ * 0 -> cpufreq low level drivers initialized -> consider _PPC values
+ * 1 -> ignore _PPC totally -> forced by user through boot param
+ */
+static int ignore_ppc = -1;
+module_param(ignore_ppc, int, 0644);
+MODULE_PARM_DESC(ignore_ppc, "If the frequency of your machine gets wrongly" \
+ "limited by BIOS, this should help");
+
+#define PPC_REGISTERED 1
+#define PPC_IN_USE 2
+
+static int acpi_processor_ppc_status;
+
+static int acpi_processor_ppc_notifier(struct notifier_block *nb,
+ unsigned long event, void *data)
+{
+ struct cpufreq_policy *policy = data;
+ struct acpi_processor *pr;
+ unsigned int ppc = 0;
+
+ if (event == CPUFREQ_START && ignore_ppc <= 0) {
+ ignore_ppc = 0;
+ return 0;
+ }
+
+ if (ignore_ppc)
+ return 0;
+
+ if (event != CPUFREQ_INCOMPATIBLE)
+ return 0;
+
+ mutex_lock(&performance_mutex);
+
+ pr = per_cpu(processors, policy->cpu);
+ if (!pr || !pr->performance)
+ goto out;
+
+ ppc = (unsigned int)pr->performance_platform_limit;
+
+ if (ppc >= pr->performance->state_count)
+ goto out;
+
+ cpufreq_verify_within_limits(policy, 0,
+ pr->performance->states[ppc].
+ core_frequency * 1000);
+
+ out:
+ mutex_unlock(&performance_mutex);
+
+ return 0;
+}
+
+static struct notifier_block acpi_ppc_notifier_block = {
+ .notifier_call = acpi_processor_ppc_notifier,
+};
+
+static int acpi_processor_get_platform_limit(struct acpi_processor *pr)
+{
+ acpi_status status = 0;
+ unsigned long long ppc = 0;
+
+
+ if (!pr)
+ return -EINVAL;
+
+ /*
+ * _PPC indicates the maximum state currently supported by the platform
+ * (e.g. 0 = states 0..n; 1 = states 1..n; etc.
+ */
+ status = acpi_evaluate_integer(pr->handle, "_PPC", NULL, &ppc);
+
+ if (status != AE_NOT_FOUND)
+ acpi_processor_ppc_status |= PPC_IN_USE;
+
+ if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
+ ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PPC"));
+ return -ENODEV;
+ }
+
+ cpufreq_printk("CPU %d: _PPC is %d - frequency %s limited\n", pr->id,
+ (int)ppc, ppc ? "" : "not");
+
+ pr->performance_platform_limit = (int)ppc;
+
+ return 0;
+}
+
+int acpi_processor_ppc_has_changed(struct acpi_processor *pr)
+{
+ int ret;
+
+ if (ignore_ppc)
+ return 0;
+
+ ret = acpi_processor_get_platform_limit(pr);
+
+ if (ret < 0)
+ return (ret);
+ else
+ return cpufreq_update_policy(pr->id);
+}
+
+void acpi_processor_ppc_init(void)
+{
+ if (!cpufreq_register_notifier
+ (&acpi_ppc_notifier_block, CPUFREQ_POLICY_NOTIFIER))
+ acpi_processor_ppc_status |= PPC_REGISTERED;
+ else
+ printk(KERN_DEBUG
+ "Warning: Processor Platform Limit not supported.\n");
+}
+
+void acpi_processor_ppc_exit(void)
+{
+ if (acpi_processor_ppc_status & PPC_REGISTERED)
+ cpufreq_unregister_notifier(&acpi_ppc_notifier_block,
+ CPUFREQ_POLICY_NOTIFIER);
+
+ acpi_processor_ppc_status &= ~PPC_REGISTERED;
+}
+
+static int acpi_processor_get_performance_control(struct acpi_processor *pr)
+{
+ int result = 0;
+ acpi_status status = 0;
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ union acpi_object *pct = NULL;
+ union acpi_object obj = { 0 };
+
+
+ status = acpi_evaluate_object(pr->handle, "_PCT", NULL, &buffer);
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PCT"));
+ return -ENODEV;
+ }
+
+ pct = (union acpi_object *)buffer.pointer;
+ if (!pct || (pct->type != ACPI_TYPE_PACKAGE)
+ || (pct->package.count != 2)) {
+ printk(KERN_ERR PREFIX "Invalid _PCT data\n");
+ result = -EFAULT;
+ goto end;
+ }
+
+ /*
+ * control_register
+ */
+
+ obj = pct->package.elements[0];
+
+ if ((obj.type != ACPI_TYPE_BUFFER)
+ || (obj.buffer.length < sizeof(struct acpi_pct_register))
+ || (obj.buffer.pointer == NULL)) {
+ printk(KERN_ERR PREFIX "Invalid _PCT data (control_register)\n");
+ result = -EFAULT;
+ goto end;
+ }
+ memcpy(&pr->performance->control_register, obj.buffer.pointer,
+ sizeof(struct acpi_pct_register));
+
+ /*
+ * status_register
+ */
+
+ obj = pct->package.elements[1];
+
+ if ((obj.type != ACPI_TYPE_BUFFER)
+ || (obj.buffer.length < sizeof(struct acpi_pct_register))
+ || (obj.buffer.pointer == NULL)) {
+ printk(KERN_ERR PREFIX "Invalid _PCT data (status_register)\n");
+ result = -EFAULT;
+ goto end;
+ }
+
+ memcpy(&pr->performance->status_register, obj.buffer.pointer,
+ sizeof(struct acpi_pct_register));
+
+ end:
+ kfree(buffer.pointer);
+
+ return result;
+}
+
+static int acpi_processor_get_performance_states(struct acpi_processor *pr)
+{
+ int result = 0;
+ acpi_status status = AE_OK;
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ struct acpi_buffer format = { sizeof("NNNNNN"), "NNNNNN" };
+ struct acpi_buffer state = { 0, NULL };
+ union acpi_object *pss = NULL;
+ int i;
+
+
+ status = acpi_evaluate_object(pr->handle, "_PSS", NULL, &buffer);
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PSS"));
+ return -ENODEV;
+ }
+
+ pss = buffer.pointer;
+ if (!pss || (pss->type != ACPI_TYPE_PACKAGE)) {
+ printk(KERN_ERR PREFIX "Invalid _PSS data\n");
+ result = -EFAULT;
+ goto end;
+ }
+
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d performance states\n",
+ pss->package.count));
+
+ pr->performance->state_count = pss->package.count;
+ pr->performance->states =
+ kmalloc(sizeof(struct acpi_processor_px) * pss->package.count,
+ GFP_KERNEL);
+ if (!pr->performance->states) {
+ result = -ENOMEM;
+ goto end;
+ }
+
+ for (i = 0; i < pr->performance->state_count; i++) {
+
+ struct acpi_processor_px *px = &(pr->performance->states[i]);
+
+ state.length = sizeof(struct acpi_processor_px);
+ state.pointer = px;
+
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Extracting state %d\n", i));
+
+ status = acpi_extract_package(&(pss->package.elements[i]),
+ &format, &state);
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status, "Invalid _PSS data"));
+ result = -EFAULT;
+ kfree(pr->performance->states);
+ goto end;
+ }
+
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "State [%d]: core_frequency[%d] power[%d] transition_latency[%d] bus_master_latency[%d] control[0x%x] status[0x%x]\n",
+ i,
+ (u32) px->core_frequency,
+ (u32) px->power,
+ (u32) px->transition_latency,
+ (u32) px->bus_master_latency,
+ (u32) px->control, (u32) px->status));
+
+ if (!px->core_frequency) {
+ printk(KERN_ERR PREFIX
+ "Invalid _PSS data: freq is zero\n");
+ result = -EFAULT;
+ kfree(pr->performance->states);
+ goto end;
+ }
+ }
+
+ end:
+ kfree(buffer.pointer);
+
+ return result;
+}
+
+static int acpi_processor_get_performance_info(struct acpi_processor *pr)
+{
+ int result = 0;
+ acpi_status status = AE_OK;
+ acpi_handle handle = NULL;
+
+ if (!pr || !pr->performance || !pr->handle)
+ return -EINVAL;
+
+ status = acpi_get_handle(pr->handle, "_PCT", &handle);
+ if (ACPI_FAILURE(status)) {
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "ACPI-based processor performance control unavailable\n"));
+ return -ENODEV;
+ }
+
+ result = acpi_processor_get_performance_control(pr);
+ if (result)
+ goto update_bios;
+
+ result = acpi_processor_get_performance_states(pr);
+ if (result)
+ goto update_bios;
+
+ return 0;
+
+ /*
+ * Having _PPC but missing frequencies (_PSS, _PCT) is a very good hint that
+ * the BIOS is older than the CPU and does not know its frequencies
+ */
+ update_bios:
+#ifdef CONFIG_X86
+ if (ACPI_SUCCESS(acpi_get_handle(pr->handle, "_PPC", &handle))){
+ if(boot_cpu_has(X86_FEATURE_EST))
+ printk(KERN_WARNING FW_BUG "BIOS needs update for CPU "
+ "frequency support\n");
+ }
+#endif
+ return result;
+}
+
+int acpi_processor_notify_smm(struct module *calling_module)
+{
+ acpi_status status;
+ static int is_done = 0;
+
+
+ if (!(acpi_processor_ppc_status & PPC_REGISTERED))
+ return -EBUSY;
+
+ if (!try_module_get(calling_module))
+ return -EINVAL;
+
+ /* is_done is set to negative if an error occured,
+ * and to postitive if _no_ error occured, but SMM
+ * was already notified. This avoids double notification
+ * which might lead to unexpected results...
+ */
+ if (is_done > 0) {
+ module_put(calling_module);
+ return 0;
+ } else if (is_done < 0) {
+ module_put(calling_module);
+ return is_done;
+ }
+
+ is_done = -EIO;
+
+ /* Can't write pstate_control to smi_command if either value is zero */
+ if ((!acpi_gbl_FADT.smi_command) || (!acpi_gbl_FADT.pstate_control)) {
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No SMI port or pstate_control\n"));
+ module_put(calling_module);
+ return 0;
+ }
+
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Writing pstate_control [0x%x] to smi_command [0x%x]\n",
+ acpi_gbl_FADT.pstate_control, acpi_gbl_FADT.smi_command));
+
+ status = acpi_os_write_port(acpi_gbl_FADT.smi_command,
+ (u32) acpi_gbl_FADT.pstate_control, 8);
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status,
+ "Failed to write pstate_control [0x%x] to "
+ "smi_command [0x%x]", acpi_gbl_FADT.pstate_control,
+ acpi_gbl_FADT.smi_command));
+ module_put(calling_module);
+ return status;
+ }
+
+ /* Success. If there's no _PPC, we need to fear nothing, so
+ * we can allow the cpufreq driver to be rmmod'ed. */
+ is_done = 1;
+
+ if (!(acpi_processor_ppc_status & PPC_IN_USE))
+ module_put(calling_module);
+
+ return 0;
+}
+
+EXPORT_SYMBOL(acpi_processor_notify_smm);
+
+#ifdef CONFIG_X86_ACPI_CPUFREQ_PROC_INTF
+/* /proc/acpi/processor/../performance interface (DEPRECATED) */
+
+static int acpi_processor_perf_open_fs(struct inode *inode, struct file *file);
+static struct file_operations acpi_processor_perf_fops = {
+ .owner = THIS_MODULE,
+ .open = acpi_processor_perf_open_fs,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int acpi_processor_perf_seq_show(struct seq_file *seq, void *offset)
+{
+ struct acpi_processor *pr = seq->private;
+ int i;
+
+
+ if (!pr)
+ goto end;
+
+ if (!pr->performance) {
+ seq_puts(seq, "<not supported>\n");
+ goto end;
+ }
+
+ seq_printf(seq, "state count: %d\n"
+ "active state: P%d\n",
+ pr->performance->state_count, pr->performance->state);
+
+ seq_puts(seq, "states:\n");
+ for (i = 0; i < pr->performance->state_count; i++)
+ seq_printf(seq,
+ " %cP%d: %d MHz, %d mW, %d uS\n",
+ (i == pr->performance->state ? '*' : ' '), i,
+ (u32) pr->performance->states[i].core_frequency,
+ (u32) pr->performance->states[i].power,
+ (u32) pr->performance->states[i].transition_latency);
+
+ end:
+ return 0;
+}
+
+static int acpi_processor_perf_open_fs(struct inode *inode, struct file *file)
+{
+ return single_open(file, acpi_processor_perf_seq_show,
+ PDE(inode)->data);
+}
+
+static void acpi_cpufreq_add_file(struct acpi_processor *pr)
+{
+ struct acpi_device *device = NULL;
+
+
+ if (acpi_bus_get_device(pr->handle, &device))
+ return;
+
+ /* add file 'performance' [R/W] */
+ proc_create_data(ACPI_PROCESSOR_FILE_PERFORMANCE, S_IFREG | S_IRUGO,
+ acpi_device_dir(device),
+ &acpi_processor_perf_fops, acpi_driver_data(device));
+ return;
+}
+
+static void acpi_cpufreq_remove_file(struct acpi_processor *pr)
+{
+ struct acpi_device *device = NULL;
+
+
+ if (acpi_bus_get_device(pr->handle, &device))
+ return;
+
+ /* remove file 'performance' */
+ remove_proc_entry(ACPI_PROCESSOR_FILE_PERFORMANCE,
+ acpi_device_dir(device));
+
+ return;
+}
+
+#else
+static void acpi_cpufreq_add_file(struct acpi_processor *pr)
+{
+ return;
+}
+static void acpi_cpufreq_remove_file(struct acpi_processor *pr)
+{
+ return;
+}
+#endif /* CONFIG_X86_ACPI_CPUFREQ_PROC_INTF */
+
+static int acpi_processor_get_psd(struct acpi_processor *pr)
+{
+ int result = 0;
+ acpi_status status = AE_OK;
+ struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
+ struct acpi_buffer format = {sizeof("NNNNN"), "NNNNN"};
+ struct acpi_buffer state = {0, NULL};
+ union acpi_object *psd = NULL;
+ struct acpi_psd_package *pdomain;
+
+ status = acpi_evaluate_object(pr->handle, "_PSD", NULL, &buffer);
+ if (ACPI_FAILURE(status)) {
+ return -ENODEV;
+ }
+
+ psd = buffer.pointer;
+ if (!psd || (psd->type != ACPI_TYPE_PACKAGE)) {
+ printk(KERN_ERR PREFIX "Invalid _PSD data\n");
+ result = -EFAULT;
+ goto end;
+ }
+
+ if (psd->package.count != 1) {
+ printk(KERN_ERR PREFIX "Invalid _PSD data\n");
+ result = -EFAULT;
+ goto end;
+ }
+
+ pdomain = &(pr->performance->domain_info);
+
+ state.length = sizeof(struct acpi_psd_package);
+ state.pointer = pdomain;
+
+ status = acpi_extract_package(&(psd->package.elements[0]),
+ &format, &state);
+ if (ACPI_FAILURE(status)) {
+ printk(KERN_ERR PREFIX "Invalid _PSD data\n");
+ result = -EFAULT;
+ goto end;
+ }
+
+ if (pdomain->num_entries != ACPI_PSD_REV0_ENTRIES) {
+ printk(KERN_ERR PREFIX "Unknown _PSD:num_entries\n");
+ result = -EFAULT;
+ goto end;
+ }
+
+ if (pdomain->revision != ACPI_PSD_REV0_REVISION) {
+ printk(KERN_ERR PREFIX "Unknown _PSD:revision\n");
+ result = -EFAULT;
+ goto end;
+ }
+
+end:
+ kfree(buffer.pointer);
+ return result;
+}
+
+int acpi_processor_preregister_performance(
+ struct acpi_processor_performance *performance)
+{
+ int count, count_target;
+ int retval = 0;
+ unsigned int i, j;
+ cpumask_t covered_cpus;
+ struct acpi_processor *pr;
+ struct acpi_psd_package *pdomain;
+ struct acpi_processor *match_pr;
+ struct acpi_psd_package *match_pdomain;
+
+ mutex_lock(&performance_mutex);
+
+ retval = 0;
+
+ /* Call _PSD for all CPUs */
+ for_each_possible_cpu(i) {
+ pr = per_cpu(processors, i);
+ if (!pr) {
+ /* Look only at processors in ACPI namespace */
+ continue;
+ }
+
+ if (pr->performance) {
+ retval = -EBUSY;
+ continue;
+ }
+
+ if (!performance || !percpu_ptr(performance, i)) {
+ retval = -EINVAL;
+ continue;
+ }
+
+ pr->performance = percpu_ptr(performance, i);
+ cpu_set(i, pr->performance->shared_cpu_map);
+ if (acpi_processor_get_psd(pr)) {
+ retval = -EINVAL;
+ continue;
+ }
+ }
+ if (retval)
+ goto err_ret;
+
+ /*
+ * Now that we have _PSD data from all CPUs, lets setup P-state
+ * domain info.
+ */
+ for_each_possible_cpu(i) {
+ pr = per_cpu(processors, i);
+ if (!pr)
+ continue;
+
+ /* Basic validity check for domain info */
+ pdomain = &(pr->performance->domain_info);
+ if ((pdomain->revision != ACPI_PSD_REV0_REVISION) ||
+ (pdomain->num_entries != ACPI_PSD_REV0_ENTRIES)) {
+ retval = -EINVAL;
+ goto err_ret;
+ }
+ if (pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ALL &&
+ pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ANY &&
+ pdomain->coord_type != DOMAIN_COORD_TYPE_HW_ALL) {
+ retval = -EINVAL;
+ goto err_ret;
+ }
+ }
+
+ cpus_clear(covered_cpus);
+ for_each_possible_cpu(i) {
+ pr = per_cpu(processors, i);
+ if (!pr)
+ continue;
+
+ if (cpu_isset(i, covered_cpus))
+ continue;
+
+ pdomain = &(pr->performance->domain_info);
+ cpu_set(i, pr->performance->shared_cpu_map);
+ cpu_set(i, covered_cpus);
+ if (pdomain->num_processors <= 1)
+ continue;
+
+ /* Validate the Domain info */
+ count_target = pdomain->num_processors;
+ count = 1;
+ if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ALL)
+ pr->performance->shared_type = CPUFREQ_SHARED_TYPE_ALL;
+ else if (pdomain->coord_type == DOMAIN_COORD_TYPE_HW_ALL)
+ pr->performance->shared_type = CPUFREQ_SHARED_TYPE_HW;
+ else if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ANY)
+ pr->performance->shared_type = CPUFREQ_SHARED_TYPE_ANY;
+
+ for_each_possible_cpu(j) {
+ if (i == j)
+ continue;
+
+ match_pr = per_cpu(processors, j);
+ if (!match_pr)
+ continue;
+
+ match_pdomain = &(match_pr->performance->domain_info);
+ if (match_pdomain->domain != pdomain->domain)
+ continue;
+
+ /* Here i and j are in the same domain */
+
+ if (match_pdomain->num_processors != count_target) {
+ retval = -EINVAL;
+ goto err_ret;
+ }
+
+ if (pdomain->coord_type != match_pdomain->coord_type) {
+ retval = -EINVAL;
+ goto err_ret;
+ }
+
+ cpu_set(j, covered_cpus);
+ cpu_set(j, pr->performance->shared_cpu_map);
+ count++;
+ }
+
+ for_each_possible_cpu(j) {
+ if (i == j)
+ continue;
+
+ match_pr = per_cpu(processors, j);
+ if (!match_pr)
+ continue;
+
+ match_pdomain = &(match_pr->performance->domain_info);
+ if (match_pdomain->domain != pdomain->domain)
+ continue;
+
+ match_pr->performance->shared_type =
+ pr->performance->shared_type;
+ match_pr->performance->shared_cpu_map =
+ pr->performance->shared_cpu_map;
+ }
+ }
+
+err_ret:
+ for_each_possible_cpu(i) {
+ pr = per_cpu(processors, i);
+ if (!pr || !pr->performance)
+ continue;
+
+ /* Assume no coordination on any error parsing domain info */
+ if (retval) {
+ cpus_clear(pr->performance->shared_cpu_map);
+ cpu_set(i, pr->performance->shared_cpu_map);
+ pr->performance->shared_type = CPUFREQ_SHARED_TYPE_ALL;
+ }
+ pr->performance = NULL; /* Will be set for real in register */
+ }
+
+ mutex_unlock(&performance_mutex);
+ return retval;
+}
+EXPORT_SYMBOL(acpi_processor_preregister_performance);
+
+
+int
+acpi_processor_register_performance(struct acpi_processor_performance
+ *performance, unsigned int cpu)
+{
+ struct acpi_processor *pr;
+
+
+ if (!(acpi_processor_ppc_status & PPC_REGISTERED))
+ return -EINVAL;
+
+ mutex_lock(&performance_mutex);
+
+ pr = per_cpu(processors, cpu);
+ if (!pr) {
+ mutex_unlock(&performance_mutex);
+ return -ENODEV;
+ }
+
+ if (pr->performance) {
+ mutex_unlock(&performance_mutex);
+ return -EBUSY;
+ }
+
+ WARN_ON(!performance);
+
+ pr->performance = performance;
+
+ if (acpi_processor_get_performance_info(pr)) {
+ pr->performance = NULL;
+ mutex_unlock(&performance_mutex);
+ return -EIO;
+ }
+
+ acpi_cpufreq_add_file(pr);
+
+ mutex_unlock(&performance_mutex);
+ return 0;
+}
+
+EXPORT_SYMBOL(acpi_processor_register_performance);
+
+void
+acpi_processor_unregister_performance(struct acpi_processor_performance
+ *performance, unsigned int cpu)
+{
+ struct acpi_processor *pr;
+
+
+ mutex_lock(&performance_mutex);
+
+ pr = per_cpu(processors, cpu);
+ if (!pr) {
+ mutex_unlock(&performance_mutex);
+ return;
+ }
+
+ if (pr->performance)
+ kfree(pr->performance->states);
+ pr->performance = NULL;
+
+ acpi_cpufreq_remove_file(pr);
+
+ mutex_unlock(&performance_mutex);
+
+ return;
+}
+
+EXPORT_SYMBOL(acpi_processor_unregister_performance);
diff --git a/drivers/acpi/processor_thermal.c b/drivers/acpi/processor_thermal.c
new file mode 100644
index 0000000..b1eb376
--- /dev/null
+++ b/drivers/acpi/processor_thermal.c
@@ -0,0 +1,517 @@
+/*
+ * processor_thermal.c - Passive cooling submodule of the ACPI processor driver
+ *
+ * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
+ * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
+ * Copyright (C) 2004 Dominik Brodowski <linux@brodo.de>
+ * Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
+ * - Added processor hotplug support
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/cpufreq.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/sysdev.h>
+
+#include <asm/uaccess.h>
+
+#include <acpi/acpi_bus.h>
+#include <acpi/processor.h>
+#include <acpi/acpi_drivers.h>
+
+#define ACPI_PROCESSOR_CLASS "processor"
+#define _COMPONENT ACPI_PROCESSOR_COMPONENT
+ACPI_MODULE_NAME("processor_thermal");
+
+/* --------------------------------------------------------------------------
+ Limit Interface
+ -------------------------------------------------------------------------- */
+static int acpi_processor_apply_limit(struct acpi_processor *pr)
+{
+ int result = 0;
+ u16 px = 0;
+ u16 tx = 0;
+
+
+ if (!pr)
+ return -EINVAL;
+
+ if (!pr->flags.limit)
+ return -ENODEV;
+
+ if (pr->flags.throttling) {
+ if (pr->limit.user.tx > tx)
+ tx = pr->limit.user.tx;
+ if (pr->limit.thermal.tx > tx)
+ tx = pr->limit.thermal.tx;
+
+ result = acpi_processor_set_throttling(pr, tx);
+ if (result)
+ goto end;
+ }
+
+ pr->limit.state.px = px;
+ pr->limit.state.tx = tx;
+
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Processor [%d] limit set to (P%d:T%d)\n", pr->id,
+ pr->limit.state.px, pr->limit.state.tx));
+
+ end:
+ if (result)
+ printk(KERN_ERR PREFIX "Unable to set limit\n");
+
+ return result;
+}
+
+#ifdef CONFIG_CPU_FREQ
+
+/* If a passive cooling situation is detected, primarily CPUfreq is used, as it
+ * offers (in most cases) voltage scaling in addition to frequency scaling, and
+ * thus a cubic (instead of linear) reduction of energy. Also, we allow for
+ * _any_ cpufreq driver and not only the acpi-cpufreq driver.
+ */
+
+#define CPUFREQ_THERMAL_MIN_STEP 0
+#define CPUFREQ_THERMAL_MAX_STEP 3
+
+static DEFINE_PER_CPU(unsigned int, cpufreq_thermal_reduction_pctg);
+static unsigned int acpi_thermal_cpufreq_is_init = 0;
+
+static int cpu_has_cpufreq(unsigned int cpu)
+{
+ struct cpufreq_policy policy;
+ if (!acpi_thermal_cpufreq_is_init || cpufreq_get_policy(&policy, cpu))
+ return 0;
+ return 1;
+}
+
+static int acpi_thermal_cpufreq_increase(unsigned int cpu)
+{
+ if (!cpu_has_cpufreq(cpu))
+ return -ENODEV;
+
+ if (per_cpu(cpufreq_thermal_reduction_pctg, cpu) <
+ CPUFREQ_THERMAL_MAX_STEP) {
+ per_cpu(cpufreq_thermal_reduction_pctg, cpu)++;
+ cpufreq_update_policy(cpu);
+ return 0;
+ }
+
+ return -ERANGE;
+}
+
+static int acpi_thermal_cpufreq_decrease(unsigned int cpu)
+{
+ if (!cpu_has_cpufreq(cpu))
+ return -ENODEV;
+
+ if (per_cpu(cpufreq_thermal_reduction_pctg, cpu) >
+ (CPUFREQ_THERMAL_MIN_STEP + 1))
+ per_cpu(cpufreq_thermal_reduction_pctg, cpu)--;
+ else
+ per_cpu(cpufreq_thermal_reduction_pctg, cpu) = 0;
+ cpufreq_update_policy(cpu);
+ /* We reached max freq again and can leave passive mode */
+ return !per_cpu(cpufreq_thermal_reduction_pctg, cpu);
+}
+
+static int acpi_thermal_cpufreq_notifier(struct notifier_block *nb,
+ unsigned long event, void *data)
+{
+ struct cpufreq_policy *policy = data;
+ unsigned long max_freq = 0;
+
+ if (event != CPUFREQ_ADJUST)
+ goto out;
+
+ max_freq = (
+ policy->cpuinfo.max_freq *
+ (100 - per_cpu(cpufreq_thermal_reduction_pctg, policy->cpu) * 20)
+ ) / 100;
+
+ cpufreq_verify_within_limits(policy, 0, max_freq);
+
+ out:
+ return 0;
+}
+
+static struct notifier_block acpi_thermal_cpufreq_notifier_block = {
+ .notifier_call = acpi_thermal_cpufreq_notifier,
+};
+
+static int cpufreq_get_max_state(unsigned int cpu)
+{
+ if (!cpu_has_cpufreq(cpu))
+ return 0;
+
+ return CPUFREQ_THERMAL_MAX_STEP;
+}
+
+static int cpufreq_get_cur_state(unsigned int cpu)
+{
+ if (!cpu_has_cpufreq(cpu))
+ return 0;
+
+ return per_cpu(cpufreq_thermal_reduction_pctg, cpu);
+}
+
+static int cpufreq_set_cur_state(unsigned int cpu, int state)
+{
+ if (!cpu_has_cpufreq(cpu))
+ return 0;
+
+ per_cpu(cpufreq_thermal_reduction_pctg, cpu) = state;
+ cpufreq_update_policy(cpu);
+ return 0;
+}
+
+void acpi_thermal_cpufreq_init(void)
+{
+ int i;
+
+ for (i = 0; i < nr_cpu_ids; i++)
+ if (cpu_present(i))
+ per_cpu(cpufreq_thermal_reduction_pctg, i) = 0;
+
+ i = cpufreq_register_notifier(&acpi_thermal_cpufreq_notifier_block,
+ CPUFREQ_POLICY_NOTIFIER);
+ if (!i)
+ acpi_thermal_cpufreq_is_init = 1;
+}
+
+void acpi_thermal_cpufreq_exit(void)
+{
+ if (acpi_thermal_cpufreq_is_init)
+ cpufreq_unregister_notifier
+ (&acpi_thermal_cpufreq_notifier_block,
+ CPUFREQ_POLICY_NOTIFIER);
+
+ acpi_thermal_cpufreq_is_init = 0;
+}
+
+#else /* ! CONFIG_CPU_FREQ */
+static int cpufreq_get_max_state(unsigned int cpu)
+{
+ return 0;
+}
+
+static int cpufreq_get_cur_state(unsigned int cpu)
+{
+ return 0;
+}
+
+static int cpufreq_set_cur_state(unsigned int cpu, int state)
+{
+ return 0;
+}
+
+static int acpi_thermal_cpufreq_increase(unsigned int cpu)
+{
+ return -ENODEV;
+}
+static int acpi_thermal_cpufreq_decrease(unsigned int cpu)
+{
+ return -ENODEV;
+}
+
+#endif
+
+int acpi_processor_set_thermal_limit(acpi_handle handle, int type)
+{
+ int result = 0;
+ struct acpi_processor *pr = NULL;
+ struct acpi_device *device = NULL;
+ int tx = 0, max_tx_px = 0;
+
+
+ if ((type < ACPI_PROCESSOR_LIMIT_NONE)
+ || (type > ACPI_PROCESSOR_LIMIT_DECREMENT))
+ return -EINVAL;
+
+ result = acpi_bus_get_device(handle, &device);
+ if (result)
+ return result;
+
+ pr = acpi_driver_data(device);
+ if (!pr)
+ return -ENODEV;
+
+ /* Thermal limits are always relative to the current Px/Tx state. */
+ if (pr->flags.throttling)
+ pr->limit.thermal.tx = pr->throttling.state;
+
+ /*
+ * Our default policy is to only use throttling at the lowest
+ * performance state.
+ */
+
+ tx = pr->limit.thermal.tx;
+
+ switch (type) {
+
+ case ACPI_PROCESSOR_LIMIT_NONE:
+ do {
+ result = acpi_thermal_cpufreq_decrease(pr->id);
+ } while (!result);
+ tx = 0;
+ break;
+
+ case ACPI_PROCESSOR_LIMIT_INCREMENT:
+ /* if going up: P-states first, T-states later */
+
+ result = acpi_thermal_cpufreq_increase(pr->id);
+ if (!result)
+ goto end;
+ else if (result == -ERANGE)
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "At maximum performance state\n"));
+
+ if (pr->flags.throttling) {
+ if (tx == (pr->throttling.state_count - 1))
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "At maximum throttling state\n"));
+ else
+ tx++;
+ }
+ break;
+
+ case ACPI_PROCESSOR_LIMIT_DECREMENT:
+ /* if going down: T-states first, P-states later */
+
+ if (pr->flags.throttling) {
+ if (tx == 0) {
+ max_tx_px = 1;
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "At minimum throttling state\n"));
+ } else {
+ tx--;
+ goto end;
+ }
+ }
+
+ result = acpi_thermal_cpufreq_decrease(pr->id);
+ if (result) {
+ /*
+ * We only could get -ERANGE, 1 or 0.
+ * In the first two cases we reached max freq again.
+ */
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "At minimum performance state\n"));
+ max_tx_px = 1;
+ } else
+ max_tx_px = 0;
+
+ break;
+ }
+
+ end:
+ if (pr->flags.throttling) {
+ pr->limit.thermal.px = 0;
+ pr->limit.thermal.tx = tx;
+
+ result = acpi_processor_apply_limit(pr);
+ if (result)
+ printk(KERN_ERR PREFIX "Unable to set thermal limit\n");
+
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Thermal limit now (P%d:T%d)\n",
+ pr->limit.thermal.px, pr->limit.thermal.tx));
+ } else
+ result = 0;
+ if (max_tx_px)
+ return 1;
+ else
+ return result;
+}
+
+int acpi_processor_get_limit_info(struct acpi_processor *pr)
+{
+
+ if (!pr)
+ return -EINVAL;
+
+ if (pr->flags.throttling)
+ pr->flags.limit = 1;
+
+ return 0;
+}
+
+/* thermal coolign device callbacks */
+static int acpi_processor_max_state(struct acpi_processor *pr)
+{
+ int max_state = 0;
+
+ /*
+ * There exists four states according to
+ * cpufreq_thermal_reduction_ptg. 0, 1, 2, 3
+ */
+ max_state += cpufreq_get_max_state(pr->id);
+ if (pr->flags.throttling)
+ max_state += (pr->throttling.state_count -1);
+
+ return max_state;
+}
+static int
+processor_get_max_state(struct thermal_cooling_device *cdev, char *buf)
+{
+ struct acpi_device *device = cdev->devdata;
+ struct acpi_processor *pr = acpi_driver_data(device);
+
+ if (!device || !pr)
+ return -EINVAL;
+
+ return sprintf(buf, "%d\n", acpi_processor_max_state(pr));
+}
+
+static int
+processor_get_cur_state(struct thermal_cooling_device *cdev, char *buf)
+{
+ struct acpi_device *device = cdev->devdata;
+ struct acpi_processor *pr = acpi_driver_data(device);
+ int cur_state;
+
+ if (!device || !pr)
+ return -EINVAL;
+
+ cur_state = cpufreq_get_cur_state(pr->id);
+ if (pr->flags.throttling)
+ cur_state += pr->throttling.state;
+
+ return sprintf(buf, "%d\n", cur_state);
+}
+
+static int
+processor_set_cur_state(struct thermal_cooling_device *cdev, unsigned int state)
+{
+ struct acpi_device *device = cdev->devdata;
+ struct acpi_processor *pr = acpi_driver_data(device);
+ int result = 0;
+ int max_pstate;
+
+ if (!device || !pr)
+ return -EINVAL;
+
+ max_pstate = cpufreq_get_max_state(pr->id);
+
+ if (state > acpi_processor_max_state(pr))
+ return -EINVAL;
+
+ if (state <= max_pstate) {
+ if (pr->flags.throttling && pr->throttling.state)
+ result = acpi_processor_set_throttling(pr, 0);
+ cpufreq_set_cur_state(pr->id, state);
+ } else {
+ cpufreq_set_cur_state(pr->id, max_pstate);
+ result = acpi_processor_set_throttling(pr,
+ state - max_pstate);
+ }
+ return result;
+}
+
+struct thermal_cooling_device_ops processor_cooling_ops = {
+ .get_max_state = processor_get_max_state,
+ .get_cur_state = processor_get_cur_state,
+ .set_cur_state = processor_set_cur_state,
+};
+
+/* /proc interface */
+
+static int acpi_processor_limit_seq_show(struct seq_file *seq, void *offset)
+{
+ struct acpi_processor *pr = (struct acpi_processor *)seq->private;
+
+
+ if (!pr)
+ goto end;
+
+ if (!pr->flags.limit) {
+ seq_puts(seq, "<not supported>\n");
+ goto end;
+ }
+
+ seq_printf(seq, "active limit: P%d:T%d\n"
+ "user limit: P%d:T%d\n"
+ "thermal limit: P%d:T%d\n",
+ pr->limit.state.px, pr->limit.state.tx,
+ pr->limit.user.px, pr->limit.user.tx,
+ pr->limit.thermal.px, pr->limit.thermal.tx);
+
+ end:
+ return 0;
+}
+
+static int acpi_processor_limit_open_fs(struct inode *inode, struct file *file)
+{
+ return single_open(file, acpi_processor_limit_seq_show,
+ PDE(inode)->data);
+}
+
+static ssize_t acpi_processor_write_limit(struct file * file,
+ const char __user * buffer,
+ size_t count, loff_t * data)
+{
+ int result = 0;
+ struct seq_file *m = file->private_data;
+ struct acpi_processor *pr = m->private;
+ char limit_string[25] = { '\0' };
+ int px = 0;
+ int tx = 0;
+
+
+ if (!pr || (count > sizeof(limit_string) - 1)) {
+ return -EINVAL;
+ }
+
+ if (copy_from_user(limit_string, buffer, count)) {
+ return -EFAULT;
+ }
+
+ limit_string[count] = '\0';
+
+ if (sscanf(limit_string, "%d:%d", &px, &tx) != 2) {
+ printk(KERN_ERR PREFIX "Invalid data format\n");
+ return -EINVAL;
+ }
+
+ if (pr->flags.throttling) {
+ if ((tx < 0) || (tx > (pr->throttling.state_count - 1))) {
+ printk(KERN_ERR PREFIX "Invalid tx\n");
+ return -EINVAL;
+ }
+ pr->limit.user.tx = tx;
+ }
+
+ result = acpi_processor_apply_limit(pr);
+
+ return count;
+}
+
+struct file_operations acpi_processor_limit_fops = {
+ .owner = THIS_MODULE,
+ .open = acpi_processor_limit_open_fs,
+ .read = seq_read,
+ .write = acpi_processor_write_limit,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c
new file mode 100644
index 0000000..a0c38c9
--- /dev/null
+++ b/drivers/acpi/processor_throttling.c
@@ -0,0 +1,1277 @@
+/*
+ * processor_throttling.c - Throttling submodule of the ACPI processor driver
+ *
+ * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
+ * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
+ * Copyright (C) 2004 Dominik Brodowski <linux@brodo.de>
+ * Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
+ * - Added processor hotplug support
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/cpufreq.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+
+#include <asm/io.h>
+#include <asm/uaccess.h>
+
+#include <acpi/acpi_bus.h>
+#include <acpi/acpi_drivers.h>
+#include <acpi/processor.h>
+
+#define ACPI_PROCESSOR_CLASS "processor"
+#define _COMPONENT ACPI_PROCESSOR_COMPONENT
+ACPI_MODULE_NAME("processor_throttling");
+
+struct throttling_tstate {
+ unsigned int cpu; /* cpu nr */
+ int target_state; /* target T-state */
+};
+
+#define THROTTLING_PRECHANGE (1)
+#define THROTTLING_POSTCHANGE (2)
+
+static int acpi_processor_get_throttling(struct acpi_processor *pr);
+int acpi_processor_set_throttling(struct acpi_processor *pr, int state);
+
+static int acpi_processor_update_tsd_coord(void)
+{
+ int count, count_target;
+ int retval = 0;
+ unsigned int i, j;
+ cpumask_t covered_cpus;
+ struct acpi_processor *pr, *match_pr;
+ struct acpi_tsd_package *pdomain, *match_pdomain;
+ struct acpi_processor_throttling *pthrottling, *match_pthrottling;
+
+ /*
+ * Now that we have _TSD data from all CPUs, lets setup T-state
+ * coordination between all CPUs.
+ */
+ for_each_possible_cpu(i) {
+ pr = per_cpu(processors, i);
+ if (!pr)
+ continue;
+
+ /* Basic validity check for domain info */
+ pthrottling = &(pr->throttling);
+
+ /*
+ * If tsd package for one cpu is invalid, the coordination
+ * among all CPUs is thought as invalid.
+ * Maybe it is ugly.
+ */
+ if (!pthrottling->tsd_valid_flag) {
+ retval = -EINVAL;
+ break;
+ }
+ }
+ if (retval)
+ goto err_ret;
+
+ cpus_clear(covered_cpus);
+ for_each_possible_cpu(i) {
+ pr = per_cpu(processors, i);
+ if (!pr)
+ continue;
+
+ if (cpu_isset(i, covered_cpus))
+ continue;
+ pthrottling = &pr->throttling;
+
+ pdomain = &(pthrottling->domain_info);
+ cpu_set(i, pthrottling->shared_cpu_map);
+ cpu_set(i, covered_cpus);
+ /*
+ * If the number of processor in the TSD domain is 1, it is
+ * unnecessary to parse the coordination for this CPU.
+ */
+ if (pdomain->num_processors <= 1)
+ continue;
+
+ /* Validate the Domain info */
+ count_target = pdomain->num_processors;
+ count = 1;
+
+ for_each_possible_cpu(j) {
+ if (i == j)
+ continue;
+
+ match_pr = per_cpu(processors, j);
+ if (!match_pr)
+ continue;
+
+ match_pthrottling = &(match_pr->throttling);
+ match_pdomain = &(match_pthrottling->domain_info);
+ if (match_pdomain->domain != pdomain->domain)
+ continue;
+
+ /* Here i and j are in the same domain.
+ * If two TSD packages have the same domain, they
+ * should have the same num_porcessors and
+ * coordination type. Otherwise it will be regarded
+ * as illegal.
+ */
+ if (match_pdomain->num_processors != count_target) {
+ retval = -EINVAL;
+ goto err_ret;
+ }
+
+ if (pdomain->coord_type != match_pdomain->coord_type) {
+ retval = -EINVAL;
+ goto err_ret;
+ }
+
+ cpu_set(j, covered_cpus);
+ cpu_set(j, pthrottling->shared_cpu_map);
+ count++;
+ }
+ for_each_possible_cpu(j) {
+ if (i == j)
+ continue;
+
+ match_pr = per_cpu(processors, j);
+ if (!match_pr)
+ continue;
+
+ match_pthrottling = &(match_pr->throttling);
+ match_pdomain = &(match_pthrottling->domain_info);
+ if (match_pdomain->domain != pdomain->domain)
+ continue;
+
+ /*
+ * If some CPUS have the same domain, they
+ * will have the same shared_cpu_map.
+ */
+ match_pthrottling->shared_cpu_map =
+ pthrottling->shared_cpu_map;
+ }
+ }
+
+err_ret:
+ for_each_possible_cpu(i) {
+ pr = per_cpu(processors, i);
+ if (!pr)
+ continue;
+
+ /*
+ * Assume no coordination on any error parsing domain info.
+ * The coordination type will be forced as SW_ALL.
+ */
+ if (retval) {
+ pthrottling = &(pr->throttling);
+ cpus_clear(pthrottling->shared_cpu_map);
+ cpu_set(i, pthrottling->shared_cpu_map);
+ pthrottling->shared_type = DOMAIN_COORD_TYPE_SW_ALL;
+ }
+ }
+
+ return retval;
+}
+
+/*
+ * Update the T-state coordination after the _TSD
+ * data for all cpus is obtained.
+ */
+void acpi_processor_throttling_init(void)
+{
+ if (acpi_processor_update_tsd_coord())
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Assume no T-state coordination\n"));
+
+ return;
+}
+
+static int acpi_processor_throttling_notifier(unsigned long event, void *data)
+{
+ struct throttling_tstate *p_tstate = data;
+ struct acpi_processor *pr;
+ unsigned int cpu ;
+ int target_state;
+ struct acpi_processor_limit *p_limit;
+ struct acpi_processor_throttling *p_throttling;
+
+ cpu = p_tstate->cpu;
+ pr = per_cpu(processors, cpu);
+ if (!pr) {
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Invalid pr pointer\n"));
+ return 0;
+ }
+ if (!pr->flags.throttling) {
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Throttling control is "
+ "unsupported on CPU %d\n", cpu));
+ return 0;
+ }
+ target_state = p_tstate->target_state;
+ p_throttling = &(pr->throttling);
+ switch (event) {
+ case THROTTLING_PRECHANGE:
+ /*
+ * Prechange event is used to choose one proper t-state,
+ * which meets the limits of thermal, user and _TPC.
+ */
+ p_limit = &pr->limit;
+ if (p_limit->thermal.tx > target_state)
+ target_state = p_limit->thermal.tx;
+ if (p_limit->user.tx > target_state)
+ target_state = p_limit->user.tx;
+ if (pr->throttling_platform_limit > target_state)
+ target_state = pr->throttling_platform_limit;
+ if (target_state >= p_throttling->state_count) {
+ printk(KERN_WARNING
+ "Exceed the limit of T-state \n");
+ target_state = p_throttling->state_count - 1;
+ }
+ p_tstate->target_state = target_state;
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "PreChange Event:"
+ "target T-state of CPU %d is T%d\n",
+ cpu, target_state));
+ break;
+ case THROTTLING_POSTCHANGE:
+ /*
+ * Postchange event is only used to update the
+ * T-state flag of acpi_processor_throttling.
+ */
+ p_throttling->state = target_state;
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "PostChange Event:"
+ "CPU %d is switched to T%d\n",
+ cpu, target_state));
+ break;
+ default:
+ printk(KERN_WARNING
+ "Unsupported Throttling notifier event\n");
+ break;
+ }
+
+ return 0;
+}
+
+/*
+ * _TPC - Throttling Present Capabilities
+ */
+static int acpi_processor_get_platform_limit(struct acpi_processor *pr)
+{
+ acpi_status status = 0;
+ unsigned long long tpc = 0;
+
+ if (!pr)
+ return -EINVAL;
+ status = acpi_evaluate_integer(pr->handle, "_TPC", NULL, &tpc);
+ if (ACPI_FAILURE(status)) {
+ if (status != AE_NOT_FOUND) {
+ ACPI_EXCEPTION((AE_INFO, status, "Evaluating _TPC"));
+ }
+ return -ENODEV;
+ }
+ pr->throttling_platform_limit = (int)tpc;
+ return 0;
+}
+
+int acpi_processor_tstate_has_changed(struct acpi_processor *pr)
+{
+ int result = 0;
+ int throttling_limit;
+ int current_state;
+ struct acpi_processor_limit *limit;
+ int target_state;
+
+ result = acpi_processor_get_platform_limit(pr);
+ if (result) {
+ /* Throttling Limit is unsupported */
+ return result;
+ }
+
+ throttling_limit = pr->throttling_platform_limit;
+ if (throttling_limit >= pr->throttling.state_count) {
+ /* Uncorrect Throttling Limit */
+ return -EINVAL;
+ }
+
+ current_state = pr->throttling.state;
+ if (current_state > throttling_limit) {
+ /*
+ * The current state can meet the requirement of
+ * _TPC limit. But it is reasonable that OSPM changes
+ * t-states from high to low for better performance.
+ * Of course the limit condition of thermal
+ * and user should be considered.
+ */
+ limit = &pr->limit;
+ target_state = throttling_limit;
+ if (limit->thermal.tx > target_state)
+ target_state = limit->thermal.tx;
+ if (limit->user.tx > target_state)
+ target_state = limit->user.tx;
+ } else if (current_state == throttling_limit) {
+ /*
+ * Unnecessary to change the throttling state
+ */
+ return 0;
+ } else {
+ /*
+ * If the current state is lower than the limit of _TPC, it
+ * will be forced to switch to the throttling state defined
+ * by throttling_platfor_limit.
+ * Because the previous state meets with the limit condition
+ * of thermal and user, it is unnecessary to check it again.
+ */
+ target_state = throttling_limit;
+ }
+ return acpi_processor_set_throttling(pr, target_state);
+}
+
+/*
+ * _PTC - Processor Throttling Control (and status) register location
+ */
+static int acpi_processor_get_throttling_control(struct acpi_processor *pr)
+{
+ int result = 0;
+ acpi_status status = 0;
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ union acpi_object *ptc = NULL;
+ union acpi_object obj = { 0 };
+ struct acpi_processor_throttling *throttling;
+
+ status = acpi_evaluate_object(pr->handle, "_PTC", NULL, &buffer);
+ if (ACPI_FAILURE(status)) {
+ if (status != AE_NOT_FOUND) {
+ ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PTC"));
+ }
+ return -ENODEV;
+ }
+
+ ptc = (union acpi_object *)buffer.pointer;
+ if (!ptc || (ptc->type != ACPI_TYPE_PACKAGE)
+ || (ptc->package.count != 2)) {
+ printk(KERN_ERR PREFIX "Invalid _PTC data\n");
+ result = -EFAULT;
+ goto end;
+ }
+
+ /*
+ * control_register
+ */
+
+ obj = ptc->package.elements[0];
+
+ if ((obj.type != ACPI_TYPE_BUFFER)
+ || (obj.buffer.length < sizeof(struct acpi_ptc_register))
+ || (obj.buffer.pointer == NULL)) {
+ printk(KERN_ERR PREFIX
+ "Invalid _PTC data (control_register)\n");
+ result = -EFAULT;
+ goto end;
+ }
+ memcpy(&pr->throttling.control_register, obj.buffer.pointer,
+ sizeof(struct acpi_ptc_register));
+
+ /*
+ * status_register
+ */
+
+ obj = ptc->package.elements[1];
+
+ if ((obj.type != ACPI_TYPE_BUFFER)
+ || (obj.buffer.length < sizeof(struct acpi_ptc_register))
+ || (obj.buffer.pointer == NULL)) {
+ printk(KERN_ERR PREFIX "Invalid _PTC data (status_register)\n");
+ result = -EFAULT;
+ goto end;
+ }
+
+ memcpy(&pr->throttling.status_register, obj.buffer.pointer,
+ sizeof(struct acpi_ptc_register));
+
+ throttling = &pr->throttling;
+
+ if ((throttling->control_register.bit_width +
+ throttling->control_register.bit_offset) > 32) {
+ printk(KERN_ERR PREFIX "Invalid _PTC control register\n");
+ result = -EFAULT;
+ goto end;
+ }
+
+ if ((throttling->status_register.bit_width +
+ throttling->status_register.bit_offset) > 32) {
+ printk(KERN_ERR PREFIX "Invalid _PTC status register\n");
+ result = -EFAULT;
+ goto end;
+ }
+
+ end:
+ kfree(buffer.pointer);
+
+ return result;
+}
+
+/*
+ * _TSS - Throttling Supported States
+ */
+static int acpi_processor_get_throttling_states(struct acpi_processor *pr)
+{
+ int result = 0;
+ acpi_status status = AE_OK;
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ struct acpi_buffer format = { sizeof("NNNNN"), "NNNNN" };
+ struct acpi_buffer state = { 0, NULL };
+ union acpi_object *tss = NULL;
+ int i;
+
+ status = acpi_evaluate_object(pr->handle, "_TSS", NULL, &buffer);
+ if (ACPI_FAILURE(status)) {
+ if (status != AE_NOT_FOUND) {
+ ACPI_EXCEPTION((AE_INFO, status, "Evaluating _TSS"));
+ }
+ return -ENODEV;
+ }
+
+ tss = buffer.pointer;
+ if (!tss || (tss->type != ACPI_TYPE_PACKAGE)) {
+ printk(KERN_ERR PREFIX "Invalid _TSS data\n");
+ result = -EFAULT;
+ goto end;
+ }
+
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d throttling states\n",
+ tss->package.count));
+
+ pr->throttling.state_count = tss->package.count;
+ pr->throttling.states_tss =
+ kmalloc(sizeof(struct acpi_processor_tx_tss) * tss->package.count,
+ GFP_KERNEL);
+ if (!pr->throttling.states_tss) {
+ result = -ENOMEM;
+ goto end;
+ }
+
+ for (i = 0; i < pr->throttling.state_count; i++) {
+
+ struct acpi_processor_tx_tss *tx =
+ (struct acpi_processor_tx_tss *)&(pr->throttling.
+ states_tss[i]);
+
+ state.length = sizeof(struct acpi_processor_tx_tss);
+ state.pointer = tx;
+
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Extracting state %d\n", i));
+
+ status = acpi_extract_package(&(tss->package.elements[i]),
+ &format, &state);
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status, "Invalid _TSS data"));
+ result = -EFAULT;
+ kfree(pr->throttling.states_tss);
+ goto end;
+ }
+
+ if (!tx->freqpercentage) {
+ printk(KERN_ERR PREFIX
+ "Invalid _TSS data: freq is zero\n");
+ result = -EFAULT;
+ kfree(pr->throttling.states_tss);
+ goto end;
+ }
+ }
+
+ end:
+ kfree(buffer.pointer);
+
+ return result;
+}
+
+/*
+ * _TSD - T-State Dependencies
+ */
+static int acpi_processor_get_tsd(struct acpi_processor *pr)
+{
+ int result = 0;
+ acpi_status status = AE_OK;
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ struct acpi_buffer format = { sizeof("NNNNN"), "NNNNN" };
+ struct acpi_buffer state = { 0, NULL };
+ union acpi_object *tsd = NULL;
+ struct acpi_tsd_package *pdomain;
+ struct acpi_processor_throttling *pthrottling;
+
+ pthrottling = &pr->throttling;
+ pthrottling->tsd_valid_flag = 0;
+
+ status = acpi_evaluate_object(pr->handle, "_TSD", NULL, &buffer);
+ if (ACPI_FAILURE(status)) {
+ if (status != AE_NOT_FOUND) {
+ ACPI_EXCEPTION((AE_INFO, status, "Evaluating _TSD"));
+ }
+ return -ENODEV;
+ }
+
+ tsd = buffer.pointer;
+ if (!tsd || (tsd->type != ACPI_TYPE_PACKAGE)) {
+ printk(KERN_ERR PREFIX "Invalid _TSD data\n");
+ result = -EFAULT;
+ goto end;
+ }
+
+ if (tsd->package.count != 1) {
+ printk(KERN_ERR PREFIX "Invalid _TSD data\n");
+ result = -EFAULT;
+ goto end;
+ }
+
+ pdomain = &(pr->throttling.domain_info);
+
+ state.length = sizeof(struct acpi_tsd_package);
+ state.pointer = pdomain;
+
+ status = acpi_extract_package(&(tsd->package.elements[0]),
+ &format, &state);
+ if (ACPI_FAILURE(status)) {
+ printk(KERN_ERR PREFIX "Invalid _TSD data\n");
+ result = -EFAULT;
+ goto end;
+ }
+
+ if (pdomain->num_entries != ACPI_TSD_REV0_ENTRIES) {
+ printk(KERN_ERR PREFIX "Unknown _TSD:num_entries\n");
+ result = -EFAULT;
+ goto end;
+ }
+
+ if (pdomain->revision != ACPI_TSD_REV0_REVISION) {
+ printk(KERN_ERR PREFIX "Unknown _TSD:revision\n");
+ result = -EFAULT;
+ goto end;
+ }
+
+ pthrottling = &pr->throttling;
+ pthrottling->tsd_valid_flag = 1;
+ pthrottling->shared_type = pdomain->coord_type;
+ cpu_set(pr->id, pthrottling->shared_cpu_map);
+ /*
+ * If the coordination type is not defined in ACPI spec,
+ * the tsd_valid_flag will be clear and coordination type
+ * will be forecd as DOMAIN_COORD_TYPE_SW_ALL.
+ */
+ if (pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ALL &&
+ pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ANY &&
+ pdomain->coord_type != DOMAIN_COORD_TYPE_HW_ALL) {
+ pthrottling->tsd_valid_flag = 0;
+ pthrottling->shared_type = DOMAIN_COORD_TYPE_SW_ALL;
+ }
+
+ end:
+ kfree(buffer.pointer);
+ return result;
+}
+
+/* --------------------------------------------------------------------------
+ Throttling Control
+ -------------------------------------------------------------------------- */
+static int acpi_processor_get_throttling_fadt(struct acpi_processor *pr)
+{
+ int state = 0;
+ u32 value = 0;
+ u32 duty_mask = 0;
+ u32 duty_value = 0;
+
+ if (!pr)
+ return -EINVAL;
+
+ if (!pr->flags.throttling)
+ return -ENODEV;
+
+ pr->throttling.state = 0;
+
+ duty_mask = pr->throttling.state_count - 1;
+
+ duty_mask <<= pr->throttling.duty_offset;
+
+ local_irq_disable();
+
+ value = inl(pr->throttling.address);
+
+ /*
+ * Compute the current throttling state when throttling is enabled
+ * (bit 4 is on).
+ */
+ if (value & 0x10) {
+ duty_value = value & duty_mask;
+ duty_value >>= pr->throttling.duty_offset;
+
+ if (duty_value)
+ state = pr->throttling.state_count - duty_value;
+ }
+
+ pr->throttling.state = state;
+
+ local_irq_enable();
+
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Throttling state is T%d (%d%% throttling applied)\n",
+ state, pr->throttling.states[state].performance));
+
+ return 0;
+}
+
+#ifdef CONFIG_X86
+static int acpi_throttling_rdmsr(struct acpi_processor *pr,
+ acpi_integer * value)
+{
+ struct cpuinfo_x86 *c;
+ u64 msr_high, msr_low;
+ unsigned int cpu;
+ u64 msr = 0;
+ int ret = -1;
+
+ cpu = pr->id;
+ c = &cpu_data(cpu);
+
+ if ((c->x86_vendor != X86_VENDOR_INTEL) ||
+ !cpu_has(c, X86_FEATURE_ACPI)) {
+ printk(KERN_ERR PREFIX
+ "HARDWARE addr space,NOT supported yet\n");
+ } else {
+ msr_low = 0;
+ msr_high = 0;
+ rdmsr_safe(MSR_IA32_THERM_CONTROL,
+ (u32 *)&msr_low , (u32 *) &msr_high);
+ msr = (msr_high << 32) | msr_low;
+ *value = (acpi_integer) msr;
+ ret = 0;
+ }
+ return ret;
+}
+
+static int acpi_throttling_wrmsr(struct acpi_processor *pr, acpi_integer value)
+{
+ struct cpuinfo_x86 *c;
+ unsigned int cpu;
+ int ret = -1;
+ u64 msr;
+
+ cpu = pr->id;
+ c = &cpu_data(cpu);
+
+ if ((c->x86_vendor != X86_VENDOR_INTEL) ||
+ !cpu_has(c, X86_FEATURE_ACPI)) {
+ printk(KERN_ERR PREFIX
+ "HARDWARE addr space,NOT supported yet\n");
+ } else {
+ msr = value;
+ wrmsr_safe(MSR_IA32_THERM_CONTROL,
+ msr & 0xffffffff, msr >> 32);
+ ret = 0;
+ }
+ return ret;
+}
+#else
+static int acpi_throttling_rdmsr(struct acpi_processor *pr,
+ acpi_integer * value)
+{
+ printk(KERN_ERR PREFIX
+ "HARDWARE addr space,NOT supported yet\n");
+ return -1;
+}
+
+static int acpi_throttling_wrmsr(struct acpi_processor *pr, acpi_integer value)
+{
+ printk(KERN_ERR PREFIX
+ "HARDWARE addr space,NOT supported yet\n");
+ return -1;
+}
+#endif
+
+static int acpi_read_throttling_status(struct acpi_processor *pr,
+ acpi_integer *value)
+{
+ u32 bit_width, bit_offset;
+ u64 ptc_value;
+ u64 ptc_mask;
+ struct acpi_processor_throttling *throttling;
+ int ret = -1;
+
+ throttling = &pr->throttling;
+ switch (throttling->status_register.space_id) {
+ case ACPI_ADR_SPACE_SYSTEM_IO:
+ ptc_value = 0;
+ bit_width = throttling->status_register.bit_width;
+ bit_offset = throttling->status_register.bit_offset;
+
+ acpi_os_read_port((acpi_io_address) throttling->status_register.
+ address, (u32 *) &ptc_value,
+ (u32) (bit_width + bit_offset));
+ ptc_mask = (1 << bit_width) - 1;
+ *value = (acpi_integer) ((ptc_value >> bit_offset) & ptc_mask);
+ ret = 0;
+ break;
+ case ACPI_ADR_SPACE_FIXED_HARDWARE:
+ ret = acpi_throttling_rdmsr(pr, value);
+ break;
+ default:
+ printk(KERN_ERR PREFIX "Unknown addr space %d\n",
+ (u32) (throttling->status_register.space_id));
+ }
+ return ret;
+}
+
+static int acpi_write_throttling_state(struct acpi_processor *pr,
+ acpi_integer value)
+{
+ u32 bit_width, bit_offset;
+ u64 ptc_value;
+ u64 ptc_mask;
+ struct acpi_processor_throttling *throttling;
+ int ret = -1;
+
+ throttling = &pr->throttling;
+ switch (throttling->control_register.space_id) {
+ case ACPI_ADR_SPACE_SYSTEM_IO:
+ bit_width = throttling->control_register.bit_width;
+ bit_offset = throttling->control_register.bit_offset;
+ ptc_mask = (1 << bit_width) - 1;
+ ptc_value = value & ptc_mask;
+
+ acpi_os_write_port((acpi_io_address) throttling->
+ control_register.address,
+ (u32) (ptc_value << bit_offset),
+ (u32) (bit_width + bit_offset));
+ ret = 0;
+ break;
+ case ACPI_ADR_SPACE_FIXED_HARDWARE:
+ ret = acpi_throttling_wrmsr(pr, value);
+ break;
+ default:
+ printk(KERN_ERR PREFIX "Unknown addr space %d\n",
+ (u32) (throttling->control_register.space_id));
+ }
+ return ret;
+}
+
+static int acpi_get_throttling_state(struct acpi_processor *pr,
+ acpi_integer value)
+{
+ int i;
+
+ for (i = 0; i < pr->throttling.state_count; i++) {
+ struct acpi_processor_tx_tss *tx =
+ (struct acpi_processor_tx_tss *)&(pr->throttling.
+ states_tss[i]);
+ if (tx->control == value)
+ break;
+ }
+ if (i > pr->throttling.state_count)
+ i = -1;
+ return i;
+}
+
+static int acpi_get_throttling_value(struct acpi_processor *pr,
+ int state, acpi_integer *value)
+{
+ int ret = -1;
+
+ if (state >= 0 && state <= pr->throttling.state_count) {
+ struct acpi_processor_tx_tss *tx =
+ (struct acpi_processor_tx_tss *)&(pr->throttling.
+ states_tss[state]);
+ *value = tx->control;
+ ret = 0;
+ }
+ return ret;
+}
+
+static int acpi_processor_get_throttling_ptc(struct acpi_processor *pr)
+{
+ int state = 0;
+ int ret;
+ acpi_integer value;
+
+ if (!pr)
+ return -EINVAL;
+
+ if (!pr->flags.throttling)
+ return -ENODEV;
+
+ pr->throttling.state = 0;
+
+ value = 0;
+ ret = acpi_read_throttling_status(pr, &value);
+ if (ret >= 0) {
+ state = acpi_get_throttling_state(pr, value);
+ pr->throttling.state = state;
+ }
+
+ return 0;
+}
+
+static int acpi_processor_get_throttling(struct acpi_processor *pr)
+{
+ cpumask_t saved_mask;
+ int ret;
+
+ if (!pr)
+ return -EINVAL;
+
+ if (!pr->flags.throttling)
+ return -ENODEV;
+ /*
+ * Migrate task to the cpu pointed by pr.
+ */
+ saved_mask = current->cpus_allowed;
+ set_cpus_allowed_ptr(current, &cpumask_of_cpu(pr->id));
+ ret = pr->throttling.acpi_processor_get_throttling(pr);
+ /* restore the previous state */
+ set_cpus_allowed_ptr(current, &saved_mask);
+
+ return ret;
+}
+
+static int acpi_processor_get_fadt_info(struct acpi_processor *pr)
+{
+ int i, step;
+
+ if (!pr->throttling.address) {
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No throttling register\n"));
+ return -EINVAL;
+ } else if (!pr->throttling.duty_width) {
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No throttling states\n"));
+ return -EINVAL;
+ }
+ /* TBD: Support duty_cycle values that span bit 4. */
+ else if ((pr->throttling.duty_offset + pr->throttling.duty_width) > 4) {
+ printk(KERN_WARNING PREFIX "duty_cycle spans bit 4\n");
+ return -EINVAL;
+ }
+
+ pr->throttling.state_count = 1 << acpi_gbl_FADT.duty_width;
+
+ /*
+ * Compute state values. Note that throttling displays a linear power
+ * performance relationship (at 50% performance the CPU will consume
+ * 50% power). Values are in 1/10th of a percent to preserve accuracy.
+ */
+
+ step = (1000 / pr->throttling.state_count);
+
+ for (i = 0; i < pr->throttling.state_count; i++) {
+ pr->throttling.states[i].performance = 1000 - step * i;
+ pr->throttling.states[i].power = 1000 - step * i;
+ }
+ return 0;
+}
+
+static int acpi_processor_set_throttling_fadt(struct acpi_processor *pr,
+ int state)
+{
+ u32 value = 0;
+ u32 duty_mask = 0;
+ u32 duty_value = 0;
+
+ if (!pr)
+ return -EINVAL;
+
+ if ((state < 0) || (state > (pr->throttling.state_count - 1)))
+ return -EINVAL;
+
+ if (!pr->flags.throttling)
+ return -ENODEV;
+
+ if (state == pr->throttling.state)
+ return 0;
+
+ if (state < pr->throttling_platform_limit)
+ return -EPERM;
+ /*
+ * Calculate the duty_value and duty_mask.
+ */
+ if (state) {
+ duty_value = pr->throttling.state_count - state;
+
+ duty_value <<= pr->throttling.duty_offset;
+
+ /* Used to clear all duty_value bits */
+ duty_mask = pr->throttling.state_count - 1;
+
+ duty_mask <<= acpi_gbl_FADT.duty_offset;
+ duty_mask = ~duty_mask;
+ }
+
+ local_irq_disable();
+
+ /*
+ * Disable throttling by writing a 0 to bit 4. Note that we must
+ * turn it off before you can change the duty_value.
+ */
+ value = inl(pr->throttling.address);
+ if (value & 0x10) {
+ value &= 0xFFFFFFEF;
+ outl(value, pr->throttling.address);
+ }
+
+ /*
+ * Write the new duty_value and then enable throttling. Note
+ * that a state value of 0 leaves throttling disabled.
+ */
+ if (state) {
+ value &= duty_mask;
+ value |= duty_value;
+ outl(value, pr->throttling.address);
+
+ value |= 0x00000010;
+ outl(value, pr->throttling.address);
+ }
+
+ pr->throttling.state = state;
+
+ local_irq_enable();
+
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Throttling state set to T%d (%d%%)\n", state,
+ (pr->throttling.states[state].performance ? pr->
+ throttling.states[state].performance / 10 : 0)));
+
+ return 0;
+}
+
+static int acpi_processor_set_throttling_ptc(struct acpi_processor *pr,
+ int state)
+{
+ int ret;
+ acpi_integer value;
+
+ if (!pr)
+ return -EINVAL;
+
+ if ((state < 0) || (state > (pr->throttling.state_count - 1)))
+ return -EINVAL;
+
+ if (!pr->flags.throttling)
+ return -ENODEV;
+
+ if (state == pr->throttling.state)
+ return 0;
+
+ if (state < pr->throttling_platform_limit)
+ return -EPERM;
+
+ value = 0;
+ ret = acpi_get_throttling_value(pr, state, &value);
+ if (ret >= 0) {
+ acpi_write_throttling_state(pr, value);
+ pr->throttling.state = state;
+ }
+
+ return 0;
+}
+
+int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
+{
+ cpumask_t saved_mask;
+ int ret = 0;
+ unsigned int i;
+ struct acpi_processor *match_pr;
+ struct acpi_processor_throttling *p_throttling;
+ struct throttling_tstate t_state;
+ cpumask_t online_throttling_cpus;
+
+ if (!pr)
+ return -EINVAL;
+
+ if (!pr->flags.throttling)
+ return -ENODEV;
+
+ if ((state < 0) || (state > (pr->throttling.state_count - 1)))
+ return -EINVAL;
+
+ saved_mask = current->cpus_allowed;
+ t_state.target_state = state;
+ p_throttling = &(pr->throttling);
+ cpus_and(online_throttling_cpus, cpu_online_map,
+ p_throttling->shared_cpu_map);
+ /*
+ * The throttling notifier will be called for every
+ * affected cpu in order to get one proper T-state.
+ * The notifier event is THROTTLING_PRECHANGE.
+ */
+ for_each_cpu_mask_nr(i, online_throttling_cpus) {
+ t_state.cpu = i;
+ acpi_processor_throttling_notifier(THROTTLING_PRECHANGE,
+ &t_state);
+ }
+ /*
+ * The function of acpi_processor_set_throttling will be called
+ * to switch T-state. If the coordination type is SW_ALL or HW_ALL,
+ * it is necessary to call it for every affected cpu. Otherwise
+ * it can be called only for the cpu pointed by pr.
+ */
+ if (p_throttling->shared_type == DOMAIN_COORD_TYPE_SW_ANY) {
+ set_cpus_allowed_ptr(current, &cpumask_of_cpu(pr->id));
+ ret = p_throttling->acpi_processor_set_throttling(pr,
+ t_state.target_state);
+ } else {
+ /*
+ * When the T-state coordination is SW_ALL or HW_ALL,
+ * it is necessary to set T-state for every affected
+ * cpus.
+ */
+ for_each_cpu_mask_nr(i, online_throttling_cpus) {
+ match_pr = per_cpu(processors, i);
+ /*
+ * If the pointer is invalid, we will report the
+ * error message and continue.
+ */
+ if (!match_pr) {
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Invalid Pointer for CPU %d\n", i));
+ continue;
+ }
+ /*
+ * If the throttling control is unsupported on CPU i,
+ * we will report the error message and continue.
+ */
+ if (!match_pr->flags.throttling) {
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Throttling Controll is unsupported "
+ "on CPU %d\n", i));
+ continue;
+ }
+ t_state.cpu = i;
+ set_cpus_allowed_ptr(current, &cpumask_of_cpu(i));
+ ret = match_pr->throttling.
+ acpi_processor_set_throttling(
+ match_pr, t_state.target_state);
+ }
+ }
+ /*
+ * After the set_throttling is called, the
+ * throttling notifier is called for every
+ * affected cpu to update the T-states.
+ * The notifier event is THROTTLING_POSTCHANGE
+ */
+ for_each_cpu_mask_nr(i, online_throttling_cpus) {
+ t_state.cpu = i;
+ acpi_processor_throttling_notifier(THROTTLING_POSTCHANGE,
+ &t_state);
+ }
+ /* restore the previous state */
+ set_cpus_allowed_ptr(current, &saved_mask);
+ return ret;
+}
+
+int acpi_processor_get_throttling_info(struct acpi_processor *pr)
+{
+ int result = 0;
+ struct acpi_processor_throttling *pthrottling;
+
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "pblk_address[0x%08x] duty_offset[%d] duty_width[%d]\n",
+ pr->throttling.address,
+ pr->throttling.duty_offset,
+ pr->throttling.duty_width));
+
+ if (!pr)
+ return -EINVAL;
+
+ /*
+ * Evaluate _PTC, _TSS and _TPC
+ * They must all be present or none of them can be used.
+ */
+ if (acpi_processor_get_throttling_control(pr) ||
+ acpi_processor_get_throttling_states(pr) ||
+ acpi_processor_get_platform_limit(pr))
+ {
+ pr->throttling.acpi_processor_get_throttling =
+ &acpi_processor_get_throttling_fadt;
+ pr->throttling.acpi_processor_set_throttling =
+ &acpi_processor_set_throttling_fadt;
+ if (acpi_processor_get_fadt_info(pr))
+ return 0;
+ } else {
+ pr->throttling.acpi_processor_get_throttling =
+ &acpi_processor_get_throttling_ptc;
+ pr->throttling.acpi_processor_set_throttling =
+ &acpi_processor_set_throttling_ptc;
+ }
+
+ /*
+ * If TSD package for one CPU can't be parsed successfully, it means
+ * that this CPU will have no coordination with other CPUs.
+ */
+ if (acpi_processor_get_tsd(pr)) {
+ pthrottling = &pr->throttling;
+ pthrottling->tsd_valid_flag = 0;
+ cpu_set(pr->id, pthrottling->shared_cpu_map);
+ pthrottling->shared_type = DOMAIN_COORD_TYPE_SW_ALL;
+ }
+
+ /*
+ * PIIX4 Errata: We don't support throttling on the original PIIX4.
+ * This shouldn't be an issue as few (if any) mobile systems ever
+ * used this part.
+ */
+ if (errata.piix4.throttle) {
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Throttling not supported on PIIX4 A- or B-step\n"));
+ return 0;
+ }
+
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d throttling states\n",
+ pr->throttling.state_count));
+
+ pr->flags.throttling = 1;
+
+ /*
+ * Disable throttling (if enabled). We'll let subsequent policy (e.g.
+ * thermal) decide to lower performance if it so chooses, but for now
+ * we'll crank up the speed.
+ */
+
+ result = acpi_processor_get_throttling(pr);
+ if (result)
+ goto end;
+
+ if (pr->throttling.state) {
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Disabling throttling (was T%d)\n",
+ pr->throttling.state));
+ result = acpi_processor_set_throttling(pr, 0);
+ if (result)
+ goto end;
+ }
+
+ end:
+ if (result)
+ pr->flags.throttling = 0;
+
+ return result;
+}
+
+/* proc interface */
+
+static int acpi_processor_throttling_seq_show(struct seq_file *seq,
+ void *offset)
+{
+ struct acpi_processor *pr = seq->private;
+ int i = 0;
+ int result = 0;
+
+ if (!pr)
+ goto end;
+
+ if (!(pr->throttling.state_count > 0)) {
+ seq_puts(seq, "<not supported>\n");
+ goto end;
+ }
+
+ result = acpi_processor_get_throttling(pr);
+
+ if (result) {
+ seq_puts(seq,
+ "Could not determine current throttling state.\n");
+ goto end;
+ }
+
+ seq_printf(seq, "state count: %d\n"
+ "active state: T%d\n"
+ "state available: T%d to T%d\n",
+ pr->throttling.state_count, pr->throttling.state,
+ pr->throttling_platform_limit,
+ pr->throttling.state_count - 1);
+
+ seq_puts(seq, "states:\n");
+ if (pr->throttling.acpi_processor_get_throttling ==
+ acpi_processor_get_throttling_fadt) {
+ for (i = 0; i < pr->throttling.state_count; i++)
+ seq_printf(seq, " %cT%d: %02d%%\n",
+ (i == pr->throttling.state ? '*' : ' '), i,
+ (pr->throttling.states[i].performance ? pr->
+ throttling.states[i].performance / 10 : 0));
+ } else {
+ for (i = 0; i < pr->throttling.state_count; i++)
+ seq_printf(seq, " %cT%d: %02d%%\n",
+ (i == pr->throttling.state ? '*' : ' '), i,
+ (int)pr->throttling.states_tss[i].
+ freqpercentage);
+ }
+
+ end:
+ return 0;
+}
+
+static int acpi_processor_throttling_open_fs(struct inode *inode,
+ struct file *file)
+{
+ return single_open(file, acpi_processor_throttling_seq_show,
+ PDE(inode)->data);
+}
+
+static ssize_t acpi_processor_write_throttling(struct file *file,
+ const char __user * buffer,
+ size_t count, loff_t * data)
+{
+ int result = 0;
+ struct seq_file *m = file->private_data;
+ struct acpi_processor *pr = m->private;
+ char state_string[5] = "";
+ char *charp = NULL;
+ size_t state_val = 0;
+ char tmpbuf[5] = "";
+
+ if (!pr || (count > sizeof(state_string) - 1))
+ return -EINVAL;
+
+ if (copy_from_user(state_string, buffer, count))
+ return -EFAULT;
+
+ state_string[count] = '\0';
+ if ((count > 0) && (state_string[count-1] == '\n'))
+ state_string[count-1] = '\0';
+
+ charp = state_string;
+ if ((state_string[0] == 't') || (state_string[0] == 'T'))
+ charp++;
+
+ state_val = simple_strtoul(charp, NULL, 0);
+ if (state_val >= pr->throttling.state_count)
+ return -EINVAL;
+
+ snprintf(tmpbuf, 5, "%zu", state_val);
+
+ if (strcmp(tmpbuf, charp) != 0)
+ return -EINVAL;
+
+ result = acpi_processor_set_throttling(pr, state_val);
+ if (result)
+ return result;
+
+ return count;
+}
+
+struct file_operations acpi_processor_throttling_fops = {
+ .owner = THIS_MODULE,
+ .open = acpi_processor_throttling_open_fs,
+ .read = seq_read,
+ .write = acpi_processor_write_throttling,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
diff --git a/drivers/acpi/reboot.c b/drivers/acpi/reboot.c
new file mode 100644
index 0000000..a6b662c
--- /dev/null
+++ b/drivers/acpi/reboot.c
@@ -0,0 +1,50 @@
+
+#include <linux/pci.h>
+#include <linux/acpi.h>
+#include <acpi/reboot.h>
+
+void acpi_reboot(void)
+{
+ struct acpi_generic_address *rr;
+ struct pci_bus *bus0;
+ u8 reset_value;
+ unsigned int devfn;
+
+ if (acpi_disabled)
+ return;
+
+ rr = &acpi_gbl_FADT.reset_register;
+
+ /* Is the reset register supported? */
+ if (!(acpi_gbl_FADT.flags & ACPI_FADT_RESET_REGISTER) ||
+ rr->bit_width != 8 || rr->bit_offset != 0)
+ return;
+
+ reset_value = acpi_gbl_FADT.reset_value;
+
+ /* The reset register can only exist in I/O, Memory or PCI config space
+ * on a device on bus 0. */
+ switch (rr->space_id) {
+ case ACPI_ADR_SPACE_PCI_CONFIG:
+ /* The reset register can only live on bus 0. */
+ bus0 = pci_find_bus(0, 0);
+ if (!bus0)
+ return;
+ /* Form PCI device/function pair. */
+ devfn = PCI_DEVFN((rr->address >> 32) & 0xffff,
+ (rr->address >> 16) & 0xffff);
+ printk(KERN_DEBUG "Resetting with ACPI PCI RESET_REG.");
+ /* Write the value that resets us. */
+ pci_bus_write_config_byte(bus0, devfn,
+ (rr->address & 0xffff), reset_value);
+ break;
+
+ case ACPI_ADR_SPACE_SYSTEM_MEMORY:
+ case ACPI_ADR_SPACE_SYSTEM_IO:
+ printk(KERN_DEBUG "ACPI MEMORY or I/O RESET_REG.\n");
+ acpi_hw_low_level_write(8, reset_value, rr);
+ break;
+ }
+ /* Wait ten seconds */
+ acpi_os_stall(10000000);
+}
diff --git a/drivers/acpi/resources/Makefile b/drivers/acpi/resources/Makefile
new file mode 100644
index 0000000..8de4f69
--- /dev/null
+++ b/drivers/acpi/resources/Makefile
@@ -0,0 +1,10 @@
+#
+# Makefile for all Linux ACPI interpreter subdirectories
+#
+
+obj-y := rsaddr.o rscreate.o rsinfo.o rsio.o rslist.o rsmisc.o rsxface.o \
+ rscalc.o rsirq.o rsmemory.o rsutils.o
+
+obj-$(ACPI_FUTURE_USAGE) += rsdump.o
+
+EXTRA_CFLAGS += $(ACPI_CFLAGS)
diff --git a/drivers/acpi/resources/rsaddr.c b/drivers/acpi/resources/rsaddr.c
new file mode 100644
index 0000000..7f96332
--- /dev/null
+++ b/drivers/acpi/resources/rsaddr.c
@@ -0,0 +1,380 @@
+/*******************************************************************************
+ *
+ * Module Name: rsaddr - Address resource descriptors (16/32/64)
+ *
+ ******************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include <acpi/acresrc.h>
+
+#define _COMPONENT ACPI_RESOURCES
+ACPI_MODULE_NAME("rsaddr")
+
+/*******************************************************************************
+ *
+ * acpi_rs_convert_address16 - All WORD (16-bit) address resources
+ *
+ ******************************************************************************/
+struct acpi_rsconvert_info acpi_rs_convert_address16[5] = {
+ {ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_ADDRESS16,
+ ACPI_RS_SIZE(struct acpi_resource_address16),
+ ACPI_RSC_TABLE_SIZE(acpi_rs_convert_address16)},
+
+ {ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_ADDRESS16,
+ sizeof(struct aml_resource_address16),
+ 0},
+
+ /* Resource Type, General Flags, and Type-Specific Flags */
+
+ {ACPI_RSC_ADDRESS, 0, 0, 0},
+
+ /*
+ * These fields are contiguous in both the source and destination:
+ * Address Granularity
+ * Address Range Minimum
+ * Address Range Maximum
+ * Address Translation Offset
+ * Address Length
+ */
+ {ACPI_RSC_MOVE16, ACPI_RS_OFFSET(data.address16.granularity),
+ AML_OFFSET(address16.granularity),
+ 5},
+
+ /* Optional resource_source (Index and String) */
+
+ {ACPI_RSC_SOURCE, ACPI_RS_OFFSET(data.address16.resource_source),
+ 0,
+ sizeof(struct aml_resource_address16)}
+};
+
+/*******************************************************************************
+ *
+ * acpi_rs_convert_address32 - All DWORD (32-bit) address resources
+ *
+ ******************************************************************************/
+
+struct acpi_rsconvert_info acpi_rs_convert_address32[5] = {
+ {ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_ADDRESS32,
+ ACPI_RS_SIZE(struct acpi_resource_address32),
+ ACPI_RSC_TABLE_SIZE(acpi_rs_convert_address32)},
+
+ {ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_ADDRESS32,
+ sizeof(struct aml_resource_address32),
+ 0},
+
+ /* Resource Type, General Flags, and Type-Specific Flags */
+
+ {ACPI_RSC_ADDRESS, 0, 0, 0},
+
+ /*
+ * These fields are contiguous in both the source and destination:
+ * Address Granularity
+ * Address Range Minimum
+ * Address Range Maximum
+ * Address Translation Offset
+ * Address Length
+ */
+ {ACPI_RSC_MOVE32, ACPI_RS_OFFSET(data.address32.granularity),
+ AML_OFFSET(address32.granularity),
+ 5},
+
+ /* Optional resource_source (Index and String) */
+
+ {ACPI_RSC_SOURCE, ACPI_RS_OFFSET(data.address32.resource_source),
+ 0,
+ sizeof(struct aml_resource_address32)}
+};
+
+/*******************************************************************************
+ *
+ * acpi_rs_convert_address64 - All QWORD (64-bit) address resources
+ *
+ ******************************************************************************/
+
+struct acpi_rsconvert_info acpi_rs_convert_address64[5] = {
+ {ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_ADDRESS64,
+ ACPI_RS_SIZE(struct acpi_resource_address64),
+ ACPI_RSC_TABLE_SIZE(acpi_rs_convert_address64)},
+
+ {ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_ADDRESS64,
+ sizeof(struct aml_resource_address64),
+ 0},
+
+ /* Resource Type, General Flags, and Type-Specific Flags */
+
+ {ACPI_RSC_ADDRESS, 0, 0, 0},
+
+ /*
+ * These fields are contiguous in both the source and destination:
+ * Address Granularity
+ * Address Range Minimum
+ * Address Range Maximum
+ * Address Translation Offset
+ * Address Length
+ */
+ {ACPI_RSC_MOVE64, ACPI_RS_OFFSET(data.address64.granularity),
+ AML_OFFSET(address64.granularity),
+ 5},
+
+ /* Optional resource_source (Index and String) */
+
+ {ACPI_RSC_SOURCE, ACPI_RS_OFFSET(data.address64.resource_source),
+ 0,
+ sizeof(struct aml_resource_address64)}
+};
+
+/*******************************************************************************
+ *
+ * acpi_rs_convert_ext_address64 - All Extended (64-bit) address resources
+ *
+ ******************************************************************************/
+
+struct acpi_rsconvert_info acpi_rs_convert_ext_address64[5] = {
+ {ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64,
+ ACPI_RS_SIZE(struct acpi_resource_extended_address64),
+ ACPI_RSC_TABLE_SIZE(acpi_rs_convert_ext_address64)},
+
+ {ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_EXTENDED_ADDRESS64,
+ sizeof(struct aml_resource_extended_address64),
+ 0},
+
+ /* Resource Type, General Flags, and Type-Specific Flags */
+
+ {ACPI_RSC_ADDRESS, 0, 0, 0},
+
+ /* Revision ID */
+
+ {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.ext_address64.revision_iD),
+ AML_OFFSET(ext_address64.revision_iD),
+ 1},
+ /*
+ * These fields are contiguous in both the source and destination:
+ * Address Granularity
+ * Address Range Minimum
+ * Address Range Maximum
+ * Address Translation Offset
+ * Address Length
+ * Type-Specific Attribute
+ */
+ {ACPI_RSC_MOVE64, ACPI_RS_OFFSET(data.ext_address64.granularity),
+ AML_OFFSET(ext_address64.granularity),
+ 6}
+};
+
+/*******************************************************************************
+ *
+ * acpi_rs_convert_general_flags - Flags common to all address descriptors
+ *
+ ******************************************************************************/
+
+static struct acpi_rsconvert_info acpi_rs_convert_general_flags[6] = {
+ {ACPI_RSC_FLAGINIT, 0, AML_OFFSET(address.flags),
+ ACPI_RSC_TABLE_SIZE(acpi_rs_convert_general_flags)},
+
+ /* Resource Type (Memory, Io, bus_number, etc.) */
+
+ {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.address.resource_type),
+ AML_OFFSET(address.resource_type),
+ 1},
+
+ /* General Flags - Consume, Decode, min_fixed, max_fixed */
+
+ {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.address.producer_consumer),
+ AML_OFFSET(address.flags),
+ 0},
+
+ {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.address.decode),
+ AML_OFFSET(address.flags),
+ 1},
+
+ {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.address.min_address_fixed),
+ AML_OFFSET(address.flags),
+ 2},
+
+ {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.address.max_address_fixed),
+ AML_OFFSET(address.flags),
+ 3}
+};
+
+/*******************************************************************************
+ *
+ * acpi_rs_convert_mem_flags - Flags common to Memory address descriptors
+ *
+ ******************************************************************************/
+
+static struct acpi_rsconvert_info acpi_rs_convert_mem_flags[5] = {
+ {ACPI_RSC_FLAGINIT, 0, AML_OFFSET(address.specific_flags),
+ ACPI_RSC_TABLE_SIZE(acpi_rs_convert_mem_flags)},
+
+ /* Memory-specific flags */
+
+ {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.address.info.mem.write_protect),
+ AML_OFFSET(address.specific_flags),
+ 0},
+
+ {ACPI_RSC_2BITFLAG, ACPI_RS_OFFSET(data.address.info.mem.caching),
+ AML_OFFSET(address.specific_flags),
+ 1},
+
+ {ACPI_RSC_2BITFLAG, ACPI_RS_OFFSET(data.address.info.mem.range_type),
+ AML_OFFSET(address.specific_flags),
+ 3},
+
+ {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.address.info.mem.translation),
+ AML_OFFSET(address.specific_flags),
+ 5}
+};
+
+/*******************************************************************************
+ *
+ * acpi_rs_convert_io_flags - Flags common to I/O address descriptors
+ *
+ ******************************************************************************/
+
+static struct acpi_rsconvert_info acpi_rs_convert_io_flags[4] = {
+ {ACPI_RSC_FLAGINIT, 0, AML_OFFSET(address.specific_flags),
+ ACPI_RSC_TABLE_SIZE(acpi_rs_convert_io_flags)},
+
+ /* I/O-specific flags */
+
+ {ACPI_RSC_2BITFLAG, ACPI_RS_OFFSET(data.address.info.io.range_type),
+ AML_OFFSET(address.specific_flags),
+ 0},
+
+ {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.address.info.io.translation),
+ AML_OFFSET(address.specific_flags),
+ 4},
+
+ {ACPI_RSC_1BITFLAG,
+ ACPI_RS_OFFSET(data.address.info.io.translation_type),
+ AML_OFFSET(address.specific_flags),
+ 5}
+};
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_rs_get_address_common
+ *
+ * PARAMETERS: Resource - Pointer to the internal resource struct
+ * Aml - Pointer to the AML resource descriptor
+ *
+ * RETURN: TRUE if the resource_type field is OK, FALSE otherwise
+ *
+ * DESCRIPTION: Convert common flag fields from a raw AML resource descriptor
+ * to an internal resource descriptor
+ *
+ ******************************************************************************/
+
+u8
+acpi_rs_get_address_common(struct acpi_resource *resource,
+ union aml_resource *aml)
+{
+ ACPI_FUNCTION_ENTRY();
+
+ /* Validate the Resource Type */
+
+ if ((aml->address.resource_type > 2)
+ && (aml->address.resource_type < 0xC0)) {
+ return (FALSE);
+ }
+
+ /* Get the Resource Type and General Flags */
+
+ (void)acpi_rs_convert_aml_to_resource(resource, aml,
+ acpi_rs_convert_general_flags);
+
+ /* Get the Type-Specific Flags (Memory and I/O descriptors only) */
+
+ if (resource->data.address.resource_type == ACPI_MEMORY_RANGE) {
+ (void)acpi_rs_convert_aml_to_resource(resource, aml,
+ acpi_rs_convert_mem_flags);
+ } else if (resource->data.address.resource_type == ACPI_IO_RANGE) {
+ (void)acpi_rs_convert_aml_to_resource(resource, aml,
+ acpi_rs_convert_io_flags);
+ } else {
+ /* Generic resource type, just grab the type_specific byte */
+
+ resource->data.address.info.type_specific =
+ aml->address.specific_flags;
+ }
+
+ return (TRUE);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_rs_set_address_common
+ *
+ * PARAMETERS: Aml - Pointer to the AML resource descriptor
+ * Resource - Pointer to the internal resource struct
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Convert common flag fields from a resource descriptor to an
+ * AML descriptor
+ *
+ ******************************************************************************/
+
+void
+acpi_rs_set_address_common(union aml_resource *aml,
+ struct acpi_resource *resource)
+{
+ ACPI_FUNCTION_ENTRY();
+
+ /* Set the Resource Type and General Flags */
+
+ (void)acpi_rs_convert_resource_to_aml(resource, aml,
+ acpi_rs_convert_general_flags);
+
+ /* Set the Type-Specific Flags (Memory and I/O descriptors only) */
+
+ if (resource->data.address.resource_type == ACPI_MEMORY_RANGE) {
+ (void)acpi_rs_convert_resource_to_aml(resource, aml,
+ acpi_rs_convert_mem_flags);
+ } else if (resource->data.address.resource_type == ACPI_IO_RANGE) {
+ (void)acpi_rs_convert_resource_to_aml(resource, aml,
+ acpi_rs_convert_io_flags);
+ } else {
+ /* Generic resource type, just copy the type_specific byte */
+
+ aml->address.specific_flags =
+ resource->data.address.info.type_specific;
+ }
+}
diff --git a/drivers/acpi/resources/rscalc.c b/drivers/acpi/resources/rscalc.c
new file mode 100644
index 0000000..8eaaecf
--- /dev/null
+++ b/drivers/acpi/resources/rscalc.c
@@ -0,0 +1,617 @@
+/*******************************************************************************
+ *
+ * Module Name: rscalc - Calculate stream and list lengths
+ *
+ ******************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include <acpi/acresrc.h>
+#include <acpi/acnamesp.h>
+
+#define _COMPONENT ACPI_RESOURCES
+ACPI_MODULE_NAME("rscalc")
+
+/* Local prototypes */
+static u8 acpi_rs_count_set_bits(u16 bit_field);
+
+static acpi_rs_length
+acpi_rs_struct_option_length(struct acpi_resource_source *resource_source);
+
+static u32
+acpi_rs_stream_option_length(u32 resource_length, u32 minimum_total_length);
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_rs_count_set_bits
+ *
+ * PARAMETERS: bit_field - Field in which to count bits
+ *
+ * RETURN: Number of bits set within the field
+ *
+ * DESCRIPTION: Count the number of bits set in a resource field. Used for
+ * (Short descriptor) interrupt and DMA lists.
+ *
+ ******************************************************************************/
+
+static u8 acpi_rs_count_set_bits(u16 bit_field)
+{
+ u8 bits_set;
+
+ ACPI_FUNCTION_ENTRY();
+
+ for (bits_set = 0; bit_field; bits_set++) {
+
+ /* Zero the least significant bit that is set */
+
+ bit_field &= (u16) (bit_field - 1);
+ }
+
+ return bits_set;
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_rs_struct_option_length
+ *
+ * PARAMETERS: resource_source - Pointer to optional descriptor field
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Common code to handle optional resource_source_index and
+ * resource_source fields in some Large descriptors. Used during
+ * list-to-stream conversion
+ *
+ ******************************************************************************/
+
+static acpi_rs_length
+acpi_rs_struct_option_length(struct acpi_resource_source *resource_source)
+{
+ ACPI_FUNCTION_ENTRY();
+
+ /*
+ * If the resource_source string is valid, return the size of the string
+ * (string_length includes the NULL terminator) plus the size of the
+ * resource_source_index (1).
+ */
+ if (resource_source->string_ptr) {
+ return ((acpi_rs_length) (resource_source->string_length + 1));
+ }
+
+ return (0);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_rs_stream_option_length
+ *
+ * PARAMETERS: resource_length - Length from the resource header
+ * minimum_total_length - Minimum length of this resource, before
+ * any optional fields. Includes header size
+ *
+ * RETURN: Length of optional string (0 if no string present)
+ *
+ * DESCRIPTION: Common code to handle optional resource_source_index and
+ * resource_source fields in some Large descriptors. Used during
+ * stream-to-list conversion
+ *
+ ******************************************************************************/
+
+static u32
+acpi_rs_stream_option_length(u32 resource_length,
+ u32 minimum_aml_resource_length)
+{
+ u32 string_length = 0;
+
+ ACPI_FUNCTION_ENTRY();
+
+ /*
+ * The resource_source_index and resource_source are optional elements of some
+ * Large-type resource descriptors.
+ */
+
+ /*
+ * If the length of the actual resource descriptor is greater than the ACPI
+ * spec-defined minimum length, it means that a resource_source_index exists
+ * and is followed by a (required) null terminated string. The string length
+ * (including the null terminator) is the resource length minus the minimum
+ * length, minus one byte for the resource_source_index itself.
+ */
+ if (resource_length > minimum_aml_resource_length) {
+
+ /* Compute the length of the optional string */
+
+ string_length =
+ resource_length - minimum_aml_resource_length - 1;
+ }
+
+ /*
+ * Round the length up to a multiple of the native word in order to
+ * guarantee that the entire resource descriptor is native word aligned
+ */
+ return ((u32) ACPI_ROUND_UP_TO_NATIVE_WORD(string_length));
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_rs_get_aml_length
+ *
+ * PARAMETERS: Resource - Pointer to the resource linked list
+ * size_needed - Where the required size is returned
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Takes a linked list of internal resource descriptors and
+ * calculates the size buffer needed to hold the corresponding
+ * external resource byte stream.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_rs_get_aml_length(struct acpi_resource * resource, acpi_size * size_needed)
+{
+ acpi_size aml_size_needed = 0;
+ acpi_rs_length total_size;
+
+ ACPI_FUNCTION_TRACE(rs_get_aml_length);
+
+ /* Traverse entire list of internal resource descriptors */
+
+ while (resource) {
+
+ /* Validate the descriptor type */
+
+ if (resource->type > ACPI_RESOURCE_TYPE_MAX) {
+ return_ACPI_STATUS(AE_AML_INVALID_RESOURCE_TYPE);
+ }
+
+ /* Get the base size of the (external stream) resource descriptor */
+
+ total_size = acpi_gbl_aml_resource_sizes[resource->type];
+
+ /*
+ * Augment the base size for descriptors with optional and/or
+ * variable-length fields
+ */
+ switch (resource->type) {
+ case ACPI_RESOURCE_TYPE_IRQ:
+
+ /* Length can be 3 or 2 */
+
+ if (resource->data.irq.descriptor_length == 2) {
+ total_size--;
+ }
+ break;
+
+ case ACPI_RESOURCE_TYPE_START_DEPENDENT:
+
+ /* Length can be 1 or 0 */
+
+ if (resource->data.irq.descriptor_length == 0) {
+ total_size--;
+ }
+ break;
+
+ case ACPI_RESOURCE_TYPE_VENDOR:
+ /*
+ * Vendor Defined Resource:
+ * For a Vendor Specific resource, if the Length is between 1 and 7
+ * it will be created as a Small Resource data type, otherwise it
+ * is a Large Resource data type.
+ */
+ if (resource->data.vendor.byte_length > 7) {
+
+ /* Base size of a Large resource descriptor */
+
+ total_size =
+ sizeof(struct aml_resource_large_header);
+ }
+
+ /* Add the size of the vendor-specific data */
+
+ total_size = (acpi_rs_length)
+ (total_size + resource->data.vendor.byte_length);
+ break;
+
+ case ACPI_RESOURCE_TYPE_END_TAG:
+ /*
+ * End Tag:
+ * We are done -- return the accumulated total size.
+ */
+ *size_needed = aml_size_needed + total_size;
+
+ /* Normal exit */
+
+ return_ACPI_STATUS(AE_OK);
+
+ case ACPI_RESOURCE_TYPE_ADDRESS16:
+ /*
+ * 16-Bit Address Resource:
+ * Add the size of the optional resource_source info
+ */
+ total_size = (acpi_rs_length)
+ (total_size +
+ acpi_rs_struct_option_length(&resource->data.
+ address16.
+ resource_source));
+ break;
+
+ case ACPI_RESOURCE_TYPE_ADDRESS32:
+ /*
+ * 32-Bit Address Resource:
+ * Add the size of the optional resource_source info
+ */
+ total_size = (acpi_rs_length)
+ (total_size +
+ acpi_rs_struct_option_length(&resource->data.
+ address32.
+ resource_source));
+ break;
+
+ case ACPI_RESOURCE_TYPE_ADDRESS64:
+ /*
+ * 64-Bit Address Resource:
+ * Add the size of the optional resource_source info
+ */
+ total_size = (acpi_rs_length)
+ (total_size +
+ acpi_rs_struct_option_length(&resource->data.
+ address64.
+ resource_source));
+ break;
+
+ case ACPI_RESOURCE_TYPE_EXTENDED_IRQ:
+ /*
+ * Extended IRQ Resource:
+ * Add the size of each additional optional interrupt beyond the
+ * required 1 (4 bytes for each u32 interrupt number)
+ */
+ total_size = (acpi_rs_length)
+ (total_size +
+ ((resource->data.extended_irq.interrupt_count -
+ 1) * 4) +
+ /* Add the size of the optional resource_source info */
+ acpi_rs_struct_option_length(&resource->data.
+ extended_irq.
+ resource_source));
+ break;
+
+ default:
+ break;
+ }
+
+ /* Update the total */
+
+ aml_size_needed += total_size;
+
+ /* Point to the next object */
+
+ resource =
+ ACPI_ADD_PTR(struct acpi_resource, resource,
+ resource->length);
+ }
+
+ /* Did not find an end_tag resource descriptor */
+
+ return_ACPI_STATUS(AE_AML_NO_RESOURCE_END_TAG);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_rs_get_list_length
+ *
+ * PARAMETERS: aml_buffer - Pointer to the resource byte stream
+ * aml_buffer_length - Size of aml_buffer
+ * size_needed - Where the size needed is returned
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Takes an external resource byte stream and calculates the size
+ * buffer needed to hold the corresponding internal resource
+ * descriptor linked list.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_rs_get_list_length(u8 * aml_buffer,
+ u32 aml_buffer_length, acpi_size * size_needed)
+{
+ acpi_status status;
+ u8 *end_aml;
+ u8 *buffer;
+ u32 buffer_size;
+ u16 temp16;
+ u16 resource_length;
+ u32 extra_struct_bytes;
+ u8 resource_index;
+ u8 minimum_aml_resource_length;
+
+ ACPI_FUNCTION_TRACE(rs_get_list_length);
+
+ *size_needed = 0;
+ end_aml = aml_buffer + aml_buffer_length;
+
+ /* Walk the list of AML resource descriptors */
+
+ while (aml_buffer < end_aml) {
+
+ /* Validate the Resource Type and Resource Length */
+
+ status = acpi_ut_validate_resource(aml_buffer, &resource_index);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /* Get the resource length and base (minimum) AML size */
+
+ resource_length = acpi_ut_get_resource_length(aml_buffer);
+ minimum_aml_resource_length =
+ acpi_gbl_resource_aml_sizes[resource_index];
+
+ /*
+ * Augment the size for descriptors with optional
+ * and/or variable length fields
+ */
+ extra_struct_bytes = 0;
+ buffer =
+ aml_buffer + acpi_ut_get_resource_header_length(aml_buffer);
+
+ switch (acpi_ut_get_resource_type(aml_buffer)) {
+ case ACPI_RESOURCE_NAME_IRQ:
+ /*
+ * IRQ Resource:
+ * Get the number of bits set in the 16-bit IRQ mask
+ */
+ ACPI_MOVE_16_TO_16(&temp16, buffer);
+ extra_struct_bytes = acpi_rs_count_set_bits(temp16);
+ break;
+
+ case ACPI_RESOURCE_NAME_DMA:
+ /*
+ * DMA Resource:
+ * Get the number of bits set in the 8-bit DMA mask
+ */
+ extra_struct_bytes = acpi_rs_count_set_bits(*buffer);
+ break;
+
+ case ACPI_RESOURCE_NAME_VENDOR_SMALL:
+ case ACPI_RESOURCE_NAME_VENDOR_LARGE:
+ /*
+ * Vendor Resource:
+ * Get the number of vendor data bytes
+ */
+ extra_struct_bytes = resource_length;
+ break;
+
+ case ACPI_RESOURCE_NAME_END_TAG:
+ /*
+ * End Tag:
+ * This is the normal exit, add size of end_tag
+ */
+ *size_needed += ACPI_RS_SIZE_MIN;
+ return_ACPI_STATUS(AE_OK);
+
+ case ACPI_RESOURCE_NAME_ADDRESS32:
+ case ACPI_RESOURCE_NAME_ADDRESS16:
+ case ACPI_RESOURCE_NAME_ADDRESS64:
+ /*
+ * Address Resource:
+ * Add the size of the optional resource_source
+ */
+ extra_struct_bytes =
+ acpi_rs_stream_option_length(resource_length,
+ minimum_aml_resource_length);
+ break;
+
+ case ACPI_RESOURCE_NAME_EXTENDED_IRQ:
+ /*
+ * Extended IRQ Resource:
+ * Using the interrupt_table_length, add 4 bytes for each additional
+ * interrupt. Note: at least one interrupt is required and is
+ * included in the minimum descriptor size (reason for the -1)
+ */
+ extra_struct_bytes = (buffer[1] - 1) * sizeof(u32);
+
+ /* Add the size of the optional resource_source */
+
+ extra_struct_bytes +=
+ acpi_rs_stream_option_length(resource_length -
+ extra_struct_bytes,
+ minimum_aml_resource_length);
+ break;
+
+ default:
+ break;
+ }
+
+ /*
+ * Update the required buffer size for the internal descriptor structs
+ *
+ * Important: Round the size up for the appropriate alignment. This
+ * is a requirement on IA64.
+ */
+ buffer_size = acpi_gbl_resource_struct_sizes[resource_index] +
+ extra_struct_bytes;
+ buffer_size = (u32) ACPI_ROUND_UP_TO_NATIVE_WORD(buffer_size);
+
+ *size_needed += buffer_size;
+
+ ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES,
+ "Type %.2X, AmlLength %.2X InternalLength %.2X\n",
+ acpi_ut_get_resource_type(aml_buffer),
+ acpi_ut_get_descriptor_length(aml_buffer),
+ buffer_size));
+
+ /*
+ * Point to the next resource within the AML stream using the length
+ * contained in the resource descriptor header
+ */
+ aml_buffer += acpi_ut_get_descriptor_length(aml_buffer);
+ }
+
+ /* Did not find an end_tag resource descriptor */
+
+ return_ACPI_STATUS(AE_AML_NO_RESOURCE_END_TAG);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_rs_get_pci_routing_table_length
+ *
+ * PARAMETERS: package_object - Pointer to the package object
+ * buffer_size_needed - u32 pointer of the size buffer
+ * needed to properly return the
+ * parsed data
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Given a package representing a PCI routing table, this
+ * calculates the size of the corresponding linked list of
+ * descriptions.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_rs_get_pci_routing_table_length(union acpi_operand_object *package_object,
+ acpi_size * buffer_size_needed)
+{
+ u32 number_of_elements;
+ acpi_size temp_size_needed = 0;
+ union acpi_operand_object **top_object_list;
+ u32 index;
+ union acpi_operand_object *package_element;
+ union acpi_operand_object **sub_object_list;
+ u8 name_found;
+ u32 table_index;
+
+ ACPI_FUNCTION_TRACE(rs_get_pci_routing_table_length);
+
+ number_of_elements = package_object->package.count;
+
+ /*
+ * Calculate the size of the return buffer.
+ * The base size is the number of elements * the sizes of the
+ * structures. Additional space for the strings is added below.
+ * The minus one is to subtract the size of the u8 Source[1]
+ * member because it is added below.
+ *
+ * But each PRT_ENTRY structure has a pointer to a string and
+ * the size of that string must be found.
+ */
+ top_object_list = package_object->package.elements;
+
+ for (index = 0; index < number_of_elements; index++) {
+
+ /* Dereference the sub-package */
+
+ package_element = *top_object_list;
+
+ /*
+ * The sub_object_list will now point to an array of the
+ * four IRQ elements: Address, Pin, Source and source_index
+ */
+ sub_object_list = package_element->package.elements;
+
+ /* Scan the irq_table_elements for the Source Name String */
+
+ name_found = FALSE;
+
+ for (table_index = 0; table_index < 4 && !name_found;
+ table_index++) {
+ if (*sub_object_list && /* Null object allowed */
+ ((ACPI_TYPE_STRING ==
+ ACPI_GET_OBJECT_TYPE(*sub_object_list)) ||
+ ((ACPI_TYPE_LOCAL_REFERENCE ==
+ ACPI_GET_OBJECT_TYPE(*sub_object_list)) &&
+ ((*sub_object_list)->reference.class ==
+ ACPI_REFCLASS_NAME)))) {
+ name_found = TRUE;
+ } else {
+ /* Look at the next element */
+
+ sub_object_list++;
+ }
+ }
+
+ temp_size_needed += (sizeof(struct acpi_pci_routing_table) - 4);
+
+ /* Was a String type found? */
+
+ if (name_found) {
+ if (ACPI_GET_OBJECT_TYPE(*sub_object_list) ==
+ ACPI_TYPE_STRING) {
+ /*
+ * The length String.Length field does not include the
+ * terminating NULL, add 1
+ */
+ temp_size_needed += ((acpi_size)
+ (*sub_object_list)->string.
+ length + 1);
+ } else {
+ temp_size_needed +=
+ acpi_ns_get_pathname_length((*sub_object_list)->reference.node);
+ if (!temp_size_needed) {
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
+ }
+ } else {
+ /*
+ * If no name was found, then this is a NULL, which is
+ * translated as a u32 zero.
+ */
+ temp_size_needed += sizeof(u32);
+ }
+
+ /* Round up the size since each element must be aligned */
+
+ temp_size_needed = ACPI_ROUND_UP_TO_64BIT(temp_size_needed);
+
+ /* Point to the next union acpi_operand_object */
+
+ top_object_list++;
+ }
+
+ /*
+ * Add an extra element to the end of the list, essentially a
+ * NULL terminator
+ */
+ *buffer_size_needed =
+ temp_size_needed + sizeof(struct acpi_pci_routing_table);
+ return_ACPI_STATUS(AE_OK);
+}
diff --git a/drivers/acpi/resources/rscreate.c b/drivers/acpi/resources/rscreate.c
new file mode 100644
index 0000000..c0bbfa2
--- /dev/null
+++ b/drivers/acpi/resources/rscreate.c
@@ -0,0 +1,467 @@
+/*******************************************************************************
+ *
+ * Module Name: rscreate - Create resource lists/tables
+ *
+ ******************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include <acpi/acresrc.h>
+#include <acpi/acnamesp.h>
+
+#define _COMPONENT ACPI_RESOURCES
+ACPI_MODULE_NAME("rscreate")
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_rs_create_resource_list
+ *
+ * PARAMETERS: aml_buffer - Pointer to the resource byte stream
+ * output_buffer - Pointer to the user's buffer
+ *
+ * RETURN: Status: AE_OK if okay, else a valid acpi_status code
+ * If output_buffer is not large enough, output_buffer_length
+ * indicates how large output_buffer should be, else it
+ * indicates how may u8 elements of output_buffer are valid.
+ *
+ * DESCRIPTION: Takes the byte stream returned from a _CRS, _PRS control method
+ * execution and parses the stream to create a linked list
+ * of device resources.
+ *
+ ******************************************************************************/
+acpi_status
+acpi_rs_create_resource_list(union acpi_operand_object *aml_buffer,
+ struct acpi_buffer *output_buffer)
+{
+
+ acpi_status status;
+ u8 *aml_start;
+ acpi_size list_size_needed = 0;
+ u32 aml_buffer_length;
+ void *resource;
+
+ ACPI_FUNCTION_TRACE(rs_create_resource_list);
+
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "AmlBuffer = %p\n", aml_buffer));
+
+ /* Params already validated, so we don't re-validate here */
+
+ aml_buffer_length = aml_buffer->buffer.length;
+ aml_start = aml_buffer->buffer.pointer;
+
+ /*
+ * Pass the aml_buffer into a module that can calculate
+ * the buffer size needed for the linked list
+ */
+ status = acpi_rs_get_list_length(aml_start, aml_buffer_length,
+ &list_size_needed);
+
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Status=%X ListSizeNeeded=%X\n",
+ status, (u32) list_size_needed));
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /* Validate/Allocate/Clear caller buffer */
+
+ status = acpi_ut_initialize_buffer(output_buffer, list_size_needed);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /* Do the conversion */
+
+ resource = output_buffer->pointer;
+ status = acpi_ut_walk_aml_resources(aml_start, aml_buffer_length,
+ acpi_rs_convert_aml_to_resources,
+ &resource);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "OutputBuffer %p Length %X\n",
+ output_buffer->pointer, (u32) output_buffer->length));
+ return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_rs_create_pci_routing_table
+ *
+ * PARAMETERS: package_object - Pointer to an union acpi_operand_object
+ * package
+ * output_buffer - Pointer to the user's buffer
+ *
+ * RETURN: Status AE_OK if okay, else a valid acpi_status code.
+ * If the output_buffer is too small, the error will be
+ * AE_BUFFER_OVERFLOW and output_buffer->Length will point
+ * to the size buffer needed.
+ *
+ * DESCRIPTION: Takes the union acpi_operand_object package and creates a
+ * linked list of PCI interrupt descriptions
+ *
+ * NOTE: It is the caller's responsibility to ensure that the start of the
+ * output buffer is aligned properly (if necessary).
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_rs_create_pci_routing_table(union acpi_operand_object *package_object,
+ struct acpi_buffer *output_buffer)
+{
+ u8 *buffer;
+ union acpi_operand_object **top_object_list;
+ union acpi_operand_object **sub_object_list;
+ union acpi_operand_object *obj_desc;
+ acpi_size buffer_size_needed = 0;
+ u32 number_of_elements;
+ u32 index;
+ struct acpi_pci_routing_table *user_prt;
+ struct acpi_namespace_node *node;
+ acpi_status status;
+ struct acpi_buffer path_buffer;
+
+ ACPI_FUNCTION_TRACE(rs_create_pci_routing_table);
+
+ /* Params already validated, so we don't re-validate here */
+
+ /* Get the required buffer length */
+
+ status = acpi_rs_get_pci_routing_table_length(package_object,
+ &buffer_size_needed);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "BufferSizeNeeded = %X\n",
+ (u32) buffer_size_needed));
+
+ /* Validate/Allocate/Clear caller buffer */
+
+ status = acpi_ut_initialize_buffer(output_buffer, buffer_size_needed);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /*
+ * Loop through the ACPI_INTERNAL_OBJECTS - Each object should be a
+ * package that in turn contains an acpi_integer Address, a u8 Pin,
+ * a Name, and a u8 source_index.
+ */
+ top_object_list = package_object->package.elements;
+ number_of_elements = package_object->package.count;
+ buffer = output_buffer->pointer;
+ user_prt = ACPI_CAST_PTR(struct acpi_pci_routing_table, buffer);
+
+ for (index = 0; index < number_of_elements; index++) {
+ int source_name_index = 2;
+ int source_index_index = 3;
+
+ /*
+ * Point user_prt past this current structure
+ *
+ * NOTE: On the first iteration, user_prt->Length will
+ * be zero because we cleared the return buffer earlier
+ */
+ buffer += user_prt->length;
+ user_prt = ACPI_CAST_PTR(struct acpi_pci_routing_table, buffer);
+
+ /*
+ * Fill in the Length field with the information we have at this point.
+ * The minus four is to subtract the size of the u8 Source[4] member
+ * because it is added below.
+ */
+ user_prt->length = (sizeof(struct acpi_pci_routing_table) - 4);
+
+ /* Each element of the top-level package must also be a package */
+
+ if (ACPI_GET_OBJECT_TYPE(*top_object_list) != ACPI_TYPE_PACKAGE) {
+ ACPI_ERROR((AE_INFO,
+ "(PRT[%X]) Need sub-package, found %s",
+ index,
+ acpi_ut_get_object_type_name
+ (*top_object_list)));
+ return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
+ }
+
+ /* Each sub-package must be of length 4 */
+
+ if ((*top_object_list)->package.count != 4) {
+ ACPI_ERROR((AE_INFO,
+ "(PRT[%X]) Need package of length 4, found length %d",
+ index, (*top_object_list)->package.count));
+ return_ACPI_STATUS(AE_AML_PACKAGE_LIMIT);
+ }
+
+ /*
+ * Dereference the sub-package.
+ * The sub_object_list will now point to an array of the four IRQ
+ * elements: [Address, Pin, Source, source_index]
+ */
+ sub_object_list = (*top_object_list)->package.elements;
+
+ /* 1) First subobject: Dereference the PRT.Address */
+
+ obj_desc = sub_object_list[0];
+ if (ACPI_GET_OBJECT_TYPE(obj_desc) != ACPI_TYPE_INTEGER) {
+ ACPI_ERROR((AE_INFO,
+ "(PRT[%X].Address) Need Integer, found %s",
+ index,
+ acpi_ut_get_object_type_name(obj_desc)));
+ return_ACPI_STATUS(AE_BAD_DATA);
+ }
+
+ user_prt->address = obj_desc->integer.value;
+
+ /* 2) Second subobject: Dereference the PRT.Pin */
+
+ obj_desc = sub_object_list[1];
+ if (ACPI_GET_OBJECT_TYPE(obj_desc) != ACPI_TYPE_INTEGER) {
+ ACPI_ERROR((AE_INFO,
+ "(PRT[%X].Pin) Need Integer, found %s",
+ index,
+ acpi_ut_get_object_type_name(obj_desc)));
+ return_ACPI_STATUS(AE_BAD_DATA);
+ }
+
+ /*
+ * If BIOS erroneously reversed the _PRT source_name and source_index,
+ * then reverse them back.
+ */
+ if (ACPI_GET_OBJECT_TYPE(sub_object_list[3]) !=
+ ACPI_TYPE_INTEGER) {
+ if (acpi_gbl_enable_interpreter_slack) {
+ source_name_index = 3;
+ source_index_index = 2;
+ printk(KERN_WARNING
+ "ACPI: Handling Garbled _PRT entry\n");
+ } else {
+ ACPI_ERROR((AE_INFO,
+ "(PRT[%X].source_index) Need Integer, found %s",
+ index,
+ acpi_ut_get_object_type_name
+ (sub_object_list[3])));
+ return_ACPI_STATUS(AE_BAD_DATA);
+ }
+ }
+
+ user_prt->pin = (u32) obj_desc->integer.value;
+
+ /*
+ * If the BIOS has erroneously reversed the _PRT source_name (index 2)
+ * and the source_index (index 3), fix it. _PRT is important enough to
+ * workaround this BIOS error. This also provides compatibility with
+ * other ACPI implementations.
+ */
+ obj_desc = sub_object_list[3];
+ if (!obj_desc
+ || (ACPI_GET_OBJECT_TYPE(obj_desc) != ACPI_TYPE_INTEGER)) {
+ sub_object_list[3] = sub_object_list[2];
+ sub_object_list[2] = obj_desc;
+
+ ACPI_WARNING((AE_INFO,
+ "(PRT[%X].Source) SourceName and SourceIndex are reversed, fixed",
+ index));
+ }
+
+ /*
+ * 3) Third subobject: Dereference the PRT.source_name
+ * The name may be unresolved (slack mode), so allow a null object
+ */
+ obj_desc = sub_object_list[source_name_index];
+ if (obj_desc) {
+ switch (ACPI_GET_OBJECT_TYPE(obj_desc)) {
+ case ACPI_TYPE_LOCAL_REFERENCE:
+
+ if (obj_desc->reference.class !=
+ ACPI_REFCLASS_NAME) {
+ ACPI_ERROR((AE_INFO,
+ "(PRT[%X].Source) Need name, found Reference Class %X",
+ index,
+ obj_desc->reference.class));
+ return_ACPI_STATUS(AE_BAD_DATA);
+ }
+
+ node = obj_desc->reference.node;
+
+ /* Use *remaining* length of the buffer as max for pathname */
+
+ path_buffer.length = output_buffer->length -
+ (u32) ((u8 *) user_prt->source -
+ (u8 *) output_buffer->pointer);
+ path_buffer.pointer = user_prt->source;
+
+ status =
+ acpi_ns_handle_to_pathname((acpi_handle)
+ node,
+ &path_buffer);
+
+ /* +1 to include null terminator */
+
+ user_prt->length +=
+ (u32) ACPI_STRLEN(user_prt->source) + 1;
+ break;
+
+ case ACPI_TYPE_STRING:
+
+ ACPI_STRCPY(user_prt->source,
+ obj_desc->string.pointer);
+
+ /*
+ * Add to the Length field the length of the string
+ * (add 1 for terminator)
+ */
+ user_prt->length += obj_desc->string.length + 1;
+ break;
+
+ case ACPI_TYPE_INTEGER:
+ /*
+ * If this is a number, then the Source Name is NULL, since the
+ * entire buffer was zeroed out, we can leave this alone.
+ *
+ * Add to the Length field the length of the u32 NULL
+ */
+ user_prt->length += sizeof(u32);
+ break;
+
+ default:
+
+ ACPI_ERROR((AE_INFO,
+ "(PRT[%X].Source) Need Ref/String/Integer, found %s",
+ index,
+ acpi_ut_get_object_type_name
+ (obj_desc)));
+ return_ACPI_STATUS(AE_BAD_DATA);
+ }
+ }
+
+ /* Now align the current length */
+
+ user_prt->length =
+ (u32) ACPI_ROUND_UP_TO_64BIT(user_prt->length);
+
+ /* 4) Fourth subobject: Dereference the PRT.source_index */
+
+ obj_desc = sub_object_list[source_index_index];
+ if (ACPI_GET_OBJECT_TYPE(obj_desc) != ACPI_TYPE_INTEGER) {
+ ACPI_ERROR((AE_INFO,
+ "(PRT[%X].SourceIndex) Need Integer, found %s",
+ index,
+ acpi_ut_get_object_type_name(obj_desc)));
+ return_ACPI_STATUS(AE_BAD_DATA);
+ }
+
+ user_prt->source_index = (u32) obj_desc->integer.value;
+
+ /* Point to the next union acpi_operand_object in the top level package */
+
+ top_object_list++;
+ }
+
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "OutputBuffer %p Length %X\n",
+ output_buffer->pointer, (u32) output_buffer->length));
+ return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_rs_create_aml_resources
+ *
+ * PARAMETERS: linked_list_buffer - Pointer to the resource linked list
+ * output_buffer - Pointer to the user's buffer
+ *
+ * RETURN: Status AE_OK if okay, else a valid acpi_status code.
+ * If the output_buffer is too small, the error will be
+ * AE_BUFFER_OVERFLOW and output_buffer->Length will point
+ * to the size buffer needed.
+ *
+ * DESCRIPTION: Takes the linked list of device resources and
+ * creates a bytestream to be used as input for the
+ * _SRS control method.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_rs_create_aml_resources(struct acpi_resource *linked_list_buffer,
+ struct acpi_buffer *output_buffer)
+{
+ acpi_status status;
+ acpi_size aml_size_needed = 0;
+
+ ACPI_FUNCTION_TRACE(rs_create_aml_resources);
+
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "LinkedListBuffer = %p\n",
+ linked_list_buffer));
+
+ /*
+ * Params already validated, so we don't re-validate here
+ *
+ * Pass the linked_list_buffer into a module that calculates
+ * the buffer size needed for the byte stream.
+ */
+ status = acpi_rs_get_aml_length(linked_list_buffer, &aml_size_needed);
+
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "AmlSizeNeeded=%X, %s\n",
+ (u32) aml_size_needed,
+ acpi_format_exception(status)));
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /* Validate/Allocate/Clear caller buffer */
+
+ status = acpi_ut_initialize_buffer(output_buffer, aml_size_needed);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /* Do the conversion */
+
+ status =
+ acpi_rs_convert_resources_to_aml(linked_list_buffer,
+ aml_size_needed,
+ output_buffer->pointer);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "OutputBuffer %p Length %X\n",
+ output_buffer->pointer, (u32) output_buffer->length));
+ return_ACPI_STATUS(AE_OK);
+}
diff --git a/drivers/acpi/resources/rsdump.c b/drivers/acpi/resources/rsdump.c
new file mode 100644
index 0000000..6bbbb7b
--- /dev/null
+++ b/drivers/acpi/resources/rsdump.c
@@ -0,0 +1,770 @@
+/*******************************************************************************
+ *
+ * Module Name: rsdump - Functions to display the resource structures.
+ *
+ ******************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include <acpi/acresrc.h>
+
+#define _COMPONENT ACPI_RESOURCES
+ACPI_MODULE_NAME("rsdump")
+#if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER)
+/* Local prototypes */
+static void acpi_rs_out_string(char *title, char *value);
+
+static void acpi_rs_out_integer8(char *title, u8 value);
+
+static void acpi_rs_out_integer16(char *title, u16 value);
+
+static void acpi_rs_out_integer32(char *title, u32 value);
+
+static void acpi_rs_out_integer64(char *title, u64 value);
+
+static void acpi_rs_out_title(char *title);
+
+static void acpi_rs_dump_byte_list(u16 length, u8 * data);
+
+static void acpi_rs_dump_dword_list(u8 length, u32 * data);
+
+static void acpi_rs_dump_short_byte_list(u8 length, u8 * data);
+
+static void
+acpi_rs_dump_resource_source(struct acpi_resource_source *resource_source);
+
+static void acpi_rs_dump_address_common(union acpi_resource_data *resource);
+
+static void
+acpi_rs_dump_descriptor(void *resource, struct acpi_rsdump_info *table);
+
+#define ACPI_RSD_OFFSET(f) (u8) ACPI_OFFSET (union acpi_resource_data,f)
+#define ACPI_PRT_OFFSET(f) (u8) ACPI_OFFSET (struct acpi_pci_routing_table,f)
+#define ACPI_RSD_TABLE_SIZE(name) (sizeof(name) / sizeof (struct acpi_rsdump_info))
+
+/*******************************************************************************
+ *
+ * Resource Descriptor info tables
+ *
+ * Note: The first table entry must be a Title or Literal and must contain
+ * the table length (number of table entries)
+ *
+ ******************************************************************************/
+
+struct acpi_rsdump_info acpi_rs_dump_irq[7] = {
+ {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_irq), "IRQ", NULL},
+ {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(irq.descriptor_length),
+ "Descriptor Length", NULL},
+ {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(irq.triggering), "Triggering",
+ acpi_gbl_he_decode},
+ {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(irq.polarity), "Polarity",
+ acpi_gbl_ll_decode},
+ {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(irq.sharable), "Sharing",
+ acpi_gbl_shr_decode},
+ {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(irq.interrupt_count),
+ "Interrupt Count", NULL},
+ {ACPI_RSD_SHORTLIST, ACPI_RSD_OFFSET(irq.interrupts[0]),
+ "Interrupt List", NULL}
+};
+
+struct acpi_rsdump_info acpi_rs_dump_dma[6] = {
+ {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_dma), "DMA", NULL},
+ {ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(dma.type), "Speed",
+ acpi_gbl_typ_decode},
+ {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(dma.bus_master), "Mastering",
+ acpi_gbl_bm_decode},
+ {ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(dma.transfer), "Transfer Type",
+ acpi_gbl_siz_decode},
+ {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(dma.channel_count), "Channel Count",
+ NULL},
+ {ACPI_RSD_SHORTLIST, ACPI_RSD_OFFSET(dma.channels[0]), "Channel List",
+ NULL}
+};
+
+struct acpi_rsdump_info acpi_rs_dump_start_dpf[4] = {
+ {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_start_dpf),
+ "Start-Dependent-Functions", NULL},
+ {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(start_dpf.descriptor_length),
+ "Descriptor Length", NULL},
+ {ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(start_dpf.compatibility_priority),
+ "Compatibility Priority", acpi_gbl_config_decode},
+ {ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(start_dpf.performance_robustness),
+ "Performance/Robustness", acpi_gbl_config_decode}
+};
+
+struct acpi_rsdump_info acpi_rs_dump_end_dpf[1] = {
+ {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_end_dpf),
+ "End-Dependent-Functions", NULL}
+};
+
+struct acpi_rsdump_info acpi_rs_dump_io[6] = {
+ {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_io), "I/O", NULL},
+ {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(io.io_decode), "Address Decoding",
+ acpi_gbl_io_decode},
+ {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(io.minimum), "Address Minimum", NULL},
+ {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(io.maximum), "Address Maximum", NULL},
+ {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(io.alignment), "Alignment", NULL},
+ {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(io.address_length), "Address Length",
+ NULL}
+};
+
+struct acpi_rsdump_info acpi_rs_dump_fixed_io[3] = {
+ {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_fixed_io),
+ "Fixed I/O", NULL},
+ {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(fixed_io.address), "Address", NULL},
+ {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(fixed_io.address_length),
+ "Address Length", NULL}
+};
+
+struct acpi_rsdump_info acpi_rs_dump_vendor[3] = {
+ {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_vendor),
+ "Vendor Specific", NULL},
+ {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(vendor.byte_length), "Length", NULL},
+ {ACPI_RSD_LONGLIST, ACPI_RSD_OFFSET(vendor.byte_data[0]), "Vendor Data",
+ NULL}
+};
+
+struct acpi_rsdump_info acpi_rs_dump_end_tag[1] = {
+ {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_end_tag), "EndTag",
+ NULL}
+};
+
+struct acpi_rsdump_info acpi_rs_dump_memory24[6] = {
+ {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_memory24),
+ "24-Bit Memory Range", NULL},
+ {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(memory24.write_protect),
+ "Write Protect", acpi_gbl_rw_decode},
+ {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(memory24.minimum), "Address Minimum",
+ NULL},
+ {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(memory24.maximum), "Address Maximum",
+ NULL},
+ {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(memory24.alignment), "Alignment",
+ NULL},
+ {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(memory24.address_length),
+ "Address Length", NULL}
+};
+
+struct acpi_rsdump_info acpi_rs_dump_memory32[6] = {
+ {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_memory32),
+ "32-Bit Memory Range", NULL},
+ {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(memory32.write_protect),
+ "Write Protect", acpi_gbl_rw_decode},
+ {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(memory32.minimum), "Address Minimum",
+ NULL},
+ {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(memory32.maximum), "Address Maximum",
+ NULL},
+ {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(memory32.alignment), "Alignment",
+ NULL},
+ {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(memory32.address_length),
+ "Address Length", NULL}
+};
+
+struct acpi_rsdump_info acpi_rs_dump_fixed_memory32[4] = {
+ {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_fixed_memory32),
+ "32-Bit Fixed Memory Range", NULL},
+ {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(fixed_memory32.write_protect),
+ "Write Protect", acpi_gbl_rw_decode},
+ {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(fixed_memory32.address), "Address",
+ NULL},
+ {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(fixed_memory32.address_length),
+ "Address Length", NULL}
+};
+
+struct acpi_rsdump_info acpi_rs_dump_address16[8] = {
+ {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_address16),
+ "16-Bit WORD Address Space", NULL},
+ {ACPI_RSD_ADDRESS, 0, NULL, NULL},
+ {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(address16.granularity), "Granularity",
+ NULL},
+ {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(address16.minimum), "Address Minimum",
+ NULL},
+ {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(address16.maximum), "Address Maximum",
+ NULL},
+ {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(address16.translation_offset),
+ "Translation Offset", NULL},
+ {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(address16.address_length),
+ "Address Length", NULL},
+ {ACPI_RSD_SOURCE, ACPI_RSD_OFFSET(address16.resource_source), NULL, NULL}
+};
+
+struct acpi_rsdump_info acpi_rs_dump_address32[8] = {
+ {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_address32),
+ "32-Bit DWORD Address Space", NULL},
+ {ACPI_RSD_ADDRESS, 0, NULL, NULL},
+ {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(address32.granularity), "Granularity",
+ NULL},
+ {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(address32.minimum), "Address Minimum",
+ NULL},
+ {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(address32.maximum), "Address Maximum",
+ NULL},
+ {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(address32.translation_offset),
+ "Translation Offset", NULL},
+ {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(address32.address_length),
+ "Address Length", NULL},
+ {ACPI_RSD_SOURCE, ACPI_RSD_OFFSET(address32.resource_source), NULL, NULL}
+};
+
+struct acpi_rsdump_info acpi_rs_dump_address64[8] = {
+ {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_address64),
+ "64-Bit QWORD Address Space", NULL},
+ {ACPI_RSD_ADDRESS, 0, NULL, NULL},
+ {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(address64.granularity), "Granularity",
+ NULL},
+ {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(address64.minimum), "Address Minimum",
+ NULL},
+ {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(address64.maximum), "Address Maximum",
+ NULL},
+ {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(address64.translation_offset),
+ "Translation Offset", NULL},
+ {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(address64.address_length),
+ "Address Length", NULL},
+ {ACPI_RSD_SOURCE, ACPI_RSD_OFFSET(address64.resource_source), NULL, NULL}
+};
+
+struct acpi_rsdump_info acpi_rs_dump_ext_address64[8] = {
+ {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_ext_address64),
+ "64-Bit Extended Address Space", NULL},
+ {ACPI_RSD_ADDRESS, 0, NULL, NULL},
+ {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(ext_address64.granularity),
+ "Granularity", NULL},
+ {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(ext_address64.minimum),
+ "Address Minimum", NULL},
+ {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(ext_address64.maximum),
+ "Address Maximum", NULL},
+ {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(ext_address64.translation_offset),
+ "Translation Offset", NULL},
+ {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(ext_address64.address_length),
+ "Address Length", NULL},
+ {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(ext_address64.type_specific),
+ "Type-Specific Attribute", NULL}
+};
+
+struct acpi_rsdump_info acpi_rs_dump_ext_irq[8] = {
+ {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_ext_irq),
+ "Extended IRQ", NULL},
+ {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(extended_irq.producer_consumer),
+ "Type", acpi_gbl_consume_decode},
+ {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(extended_irq.triggering),
+ "Triggering", acpi_gbl_he_decode},
+ {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(extended_irq.polarity), "Polarity",
+ acpi_gbl_ll_decode},
+ {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(extended_irq.sharable), "Sharing",
+ acpi_gbl_shr_decode},
+ {ACPI_RSD_SOURCE, ACPI_RSD_OFFSET(extended_irq.resource_source), NULL,
+ NULL},
+ {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(extended_irq.interrupt_count),
+ "Interrupt Count", NULL},
+ {ACPI_RSD_DWORDLIST, ACPI_RSD_OFFSET(extended_irq.interrupts[0]),
+ "Interrupt List", NULL}
+};
+
+struct acpi_rsdump_info acpi_rs_dump_generic_reg[6] = {
+ {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_generic_reg),
+ "Generic Register", NULL},
+ {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(generic_reg.space_id), "Space ID",
+ NULL},
+ {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(generic_reg.bit_width), "Bit Width",
+ NULL},
+ {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(generic_reg.bit_offset), "Bit Offset",
+ NULL},
+ {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(generic_reg.access_size),
+ "Access Size", NULL},
+ {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(generic_reg.address), "Address", NULL}
+};
+
+/*
+ * Tables used for common address descriptor flag fields
+ */
+static struct acpi_rsdump_info acpi_rs_dump_general_flags[5] = {
+ {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_general_flags), NULL,
+ NULL},
+ {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(address.producer_consumer),
+ "Consumer/Producer", acpi_gbl_consume_decode},
+ {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(address.decode), "Address Decode",
+ acpi_gbl_dec_decode},
+ {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(address.min_address_fixed),
+ "Min Relocatability", acpi_gbl_min_decode},
+ {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(address.max_address_fixed),
+ "Max Relocatability", acpi_gbl_max_decode}
+};
+
+static struct acpi_rsdump_info acpi_rs_dump_memory_flags[5] = {
+ {ACPI_RSD_LITERAL, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_memory_flags),
+ "Resource Type", (void *)"Memory Range"},
+ {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(address.info.mem.write_protect),
+ "Write Protect", acpi_gbl_rw_decode},
+ {ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(address.info.mem.caching),
+ "Caching", acpi_gbl_mem_decode},
+ {ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(address.info.mem.range_type),
+ "Range Type", acpi_gbl_mtp_decode},
+ {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(address.info.mem.translation),
+ "Translation", acpi_gbl_ttp_decode}
+};
+
+static struct acpi_rsdump_info acpi_rs_dump_io_flags[4] = {
+ {ACPI_RSD_LITERAL, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_io_flags),
+ "Resource Type", (void *)"I/O Range"},
+ {ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(address.info.io.range_type),
+ "Range Type", acpi_gbl_rng_decode},
+ {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(address.info.io.translation),
+ "Translation", acpi_gbl_ttp_decode},
+ {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(address.info.io.translation_type),
+ "Translation Type", acpi_gbl_trs_decode}
+};
+
+/*
+ * Table used to dump _PRT contents
+ */
+static struct acpi_rsdump_info acpi_rs_dump_prt[5] = {
+ {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_prt), NULL, NULL},
+ {ACPI_RSD_UINT64, ACPI_PRT_OFFSET(address), "Address", NULL},
+ {ACPI_RSD_UINT32, ACPI_PRT_OFFSET(pin), "Pin", NULL},
+ {ACPI_RSD_STRING, ACPI_PRT_OFFSET(source[0]), "Source", NULL},
+ {ACPI_RSD_UINT32, ACPI_PRT_OFFSET(source_index), "Source Index", NULL}
+};
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_rs_dump_descriptor
+ *
+ * PARAMETERS: Resource
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION:
+ *
+ ******************************************************************************/
+
+static void
+acpi_rs_dump_descriptor(void *resource, struct acpi_rsdump_info *table)
+{
+ u8 *target = NULL;
+ u8 *previous_target;
+ char *name;
+ u8 count;
+
+ /* First table entry must contain the table length (# of table entries) */
+
+ count = table->offset;
+
+ while (count) {
+ previous_target = target;
+ target = ACPI_ADD_PTR(u8, resource, table->offset);
+ name = table->name;
+
+ switch (table->opcode) {
+ case ACPI_RSD_TITLE:
+ /*
+ * Optional resource title
+ */
+ if (table->name) {
+ acpi_os_printf("%s Resource\n", name);
+ }
+ break;
+
+ /* Strings */
+
+ case ACPI_RSD_LITERAL:
+ acpi_rs_out_string(name,
+ ACPI_CAST_PTR(char, table->pointer));
+ break;
+
+ case ACPI_RSD_STRING:
+ acpi_rs_out_string(name, ACPI_CAST_PTR(char, target));
+ break;
+
+ /* Data items, 8/16/32/64 bit */
+
+ case ACPI_RSD_UINT8:
+ acpi_rs_out_integer8(name, ACPI_GET8(target));
+ break;
+
+ case ACPI_RSD_UINT16:
+ acpi_rs_out_integer16(name, ACPI_GET16(target));
+ break;
+
+ case ACPI_RSD_UINT32:
+ acpi_rs_out_integer32(name, ACPI_GET32(target));
+ break;
+
+ case ACPI_RSD_UINT64:
+ acpi_rs_out_integer64(name, ACPI_GET64(target));
+ break;
+
+ /* Flags: 1-bit and 2-bit flags supported */
+
+ case ACPI_RSD_1BITFLAG:
+ acpi_rs_out_string(name, ACPI_CAST_PTR(char,
+ table->
+ pointer[*target &
+ 0x01]));
+ break;
+
+ case ACPI_RSD_2BITFLAG:
+ acpi_rs_out_string(name, ACPI_CAST_PTR(char,
+ table->
+ pointer[*target &
+ 0x03]));
+ break;
+
+ case ACPI_RSD_SHORTLIST:
+ /*
+ * Short byte list (single line output) for DMA and IRQ resources
+ * Note: The list length is obtained from the previous table entry
+ */
+ if (previous_target) {
+ acpi_rs_out_title(name);
+ acpi_rs_dump_short_byte_list(*previous_target,
+ target);
+ }
+ break;
+
+ case ACPI_RSD_LONGLIST:
+ /*
+ * Long byte list for Vendor resource data
+ * Note: The list length is obtained from the previous table entry
+ */
+ if (previous_target) {
+ acpi_rs_dump_byte_list(ACPI_GET16
+ (previous_target),
+ target);
+ }
+ break;
+
+ case ACPI_RSD_DWORDLIST:
+ /*
+ * Dword list for Extended Interrupt resources
+ * Note: The list length is obtained from the previous table entry
+ */
+ if (previous_target) {
+ acpi_rs_dump_dword_list(*previous_target,
+ ACPI_CAST_PTR(u32,
+ target));
+ }
+ break;
+
+ case ACPI_RSD_ADDRESS:
+ /*
+ * Common flags for all Address resources
+ */
+ acpi_rs_dump_address_common(ACPI_CAST_PTR
+ (union acpi_resource_data,
+ target));
+ break;
+
+ case ACPI_RSD_SOURCE:
+ /*
+ * Optional resource_source for Address resources
+ */
+ acpi_rs_dump_resource_source(ACPI_CAST_PTR(struct
+ acpi_resource_source,
+ target));
+ break;
+
+ default:
+ acpi_os_printf("**** Invalid table opcode [%X] ****\n",
+ table->opcode);
+ return;
+ }
+
+ table++;
+ count--;
+ }
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_rs_dump_resource_source
+ *
+ * PARAMETERS: resource_source - Pointer to a Resource Source struct
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Common routine for dumping the optional resource_source and the
+ * corresponding resource_source_index.
+ *
+ ******************************************************************************/
+
+static void
+acpi_rs_dump_resource_source(struct acpi_resource_source *resource_source)
+{
+ ACPI_FUNCTION_ENTRY();
+
+ if (resource_source->index == 0xFF) {
+ return;
+ }
+
+ acpi_rs_out_integer8("Resource Source Index", resource_source->index);
+
+ acpi_rs_out_string("Resource Source",
+ resource_source->string_ptr ?
+ resource_source->string_ptr : "[Not Specified]");
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_rs_dump_address_common
+ *
+ * PARAMETERS: Resource - Pointer to an internal resource descriptor
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Dump the fields that are common to all Address resource
+ * descriptors
+ *
+ ******************************************************************************/
+
+static void acpi_rs_dump_address_common(union acpi_resource_data *resource)
+{
+ ACPI_FUNCTION_ENTRY();
+
+ /* Decode the type-specific flags */
+
+ switch (resource->address.resource_type) {
+ case ACPI_MEMORY_RANGE:
+
+ acpi_rs_dump_descriptor(resource, acpi_rs_dump_memory_flags);
+ break;
+
+ case ACPI_IO_RANGE:
+
+ acpi_rs_dump_descriptor(resource, acpi_rs_dump_io_flags);
+ break;
+
+ case ACPI_BUS_NUMBER_RANGE:
+
+ acpi_rs_out_string("Resource Type", "Bus Number Range");
+ break;
+
+ default:
+
+ acpi_rs_out_integer8("Resource Type",
+ (u8) resource->address.resource_type);
+ break;
+ }
+
+ /* Decode the general flags */
+
+ acpi_rs_dump_descriptor(resource, acpi_rs_dump_general_flags);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_rs_dump_resource_list
+ *
+ * PARAMETERS: resource_list - Pointer to a resource descriptor list
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Dispatches the structure to the correct dump routine.
+ *
+ ******************************************************************************/
+
+void acpi_rs_dump_resource_list(struct acpi_resource *resource_list)
+{
+ u32 count = 0;
+ u32 type;
+
+ ACPI_FUNCTION_ENTRY();
+
+ if (!(acpi_dbg_level & ACPI_LV_RESOURCES)
+ || !(_COMPONENT & acpi_dbg_layer)) {
+ return;
+ }
+
+ /* Walk list and dump all resource descriptors (END_TAG terminates) */
+
+ do {
+ acpi_os_printf("\n[%02X] ", count);
+ count++;
+
+ /* Validate Type before dispatch */
+
+ type = resource_list->type;
+ if (type > ACPI_RESOURCE_TYPE_MAX) {
+ acpi_os_printf
+ ("Invalid descriptor type (%X) in resource list\n",
+ resource_list->type);
+ return;
+ }
+
+ /* Dump the resource descriptor */
+
+ acpi_rs_dump_descriptor(&resource_list->data,
+ acpi_gbl_dump_resource_dispatch[type]);
+
+ /* Point to the next resource structure */
+
+ resource_list =
+ ACPI_ADD_PTR(struct acpi_resource, resource_list,
+ resource_list->length);
+
+ /* Exit when END_TAG descriptor is reached */
+
+ } while (type != ACPI_RESOURCE_TYPE_END_TAG);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_rs_dump_irq_list
+ *
+ * PARAMETERS: route_table - Pointer to the routing table to dump.
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Print IRQ routing table
+ *
+ ******************************************************************************/
+
+void acpi_rs_dump_irq_list(u8 * route_table)
+{
+ struct acpi_pci_routing_table *prt_element;
+ u8 count;
+
+ ACPI_FUNCTION_ENTRY();
+
+ if (!(acpi_dbg_level & ACPI_LV_RESOURCES)
+ || !(_COMPONENT & acpi_dbg_layer)) {
+ return;
+ }
+
+ prt_element = ACPI_CAST_PTR(struct acpi_pci_routing_table, route_table);
+
+ /* Dump all table elements, Exit on zero length element */
+
+ for (count = 0; prt_element->length; count++) {
+ acpi_os_printf("\n[%02X] PCI IRQ Routing Table Package\n",
+ count);
+ acpi_rs_dump_descriptor(prt_element, acpi_rs_dump_prt);
+
+ prt_element = ACPI_ADD_PTR(struct acpi_pci_routing_table,
+ prt_element, prt_element->length);
+ }
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_rs_out*
+ *
+ * PARAMETERS: Title - Name of the resource field
+ * Value - Value of the resource field
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Miscellaneous helper functions to consistently format the
+ * output of the resource dump routines
+ *
+ ******************************************************************************/
+
+static void acpi_rs_out_string(char *title, char *value)
+{
+ acpi_os_printf("%27s : %s", title, value);
+ if (!*value) {
+ acpi_os_printf("[NULL NAMESTRING]");
+ }
+ acpi_os_printf("\n");
+}
+
+static void acpi_rs_out_integer8(char *title, u8 value)
+{
+ acpi_os_printf("%27s : %2.2X\n", title, value);
+}
+
+static void acpi_rs_out_integer16(char *title, u16 value)
+{
+ acpi_os_printf("%27s : %4.4X\n", title, value);
+}
+
+static void acpi_rs_out_integer32(char *title, u32 value)
+{
+ acpi_os_printf("%27s : %8.8X\n", title, value);
+}
+
+static void acpi_rs_out_integer64(char *title, u64 value)
+{
+ acpi_os_printf("%27s : %8.8X%8.8X\n", title, ACPI_FORMAT_UINT64(value));
+}
+
+static void acpi_rs_out_title(char *title)
+{
+ acpi_os_printf("%27s : ", title);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_rs_dump*List
+ *
+ * PARAMETERS: Length - Number of elements in the list
+ * Data - Start of the list
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Miscellaneous functions to dump lists of raw data
+ *
+ ******************************************************************************/
+
+static void acpi_rs_dump_byte_list(u16 length, u8 * data)
+{
+ u8 i;
+
+ for (i = 0; i < length; i++) {
+ acpi_os_printf("%25s%2.2X : %2.2X\n", "Byte", i, data[i]);
+ }
+}
+
+static void acpi_rs_dump_short_byte_list(u8 length, u8 * data)
+{
+ u8 i;
+
+ for (i = 0; i < length; i++) {
+ acpi_os_printf("%X ", data[i]);
+ }
+ acpi_os_printf("\n");
+}
+
+static void acpi_rs_dump_dword_list(u8 length, u32 * data)
+{
+ u8 i;
+
+ for (i = 0; i < length; i++) {
+ acpi_os_printf("%25s%2.2X : %8.8X\n", "Dword", i, data[i]);
+ }
+}
+
+#endif
diff --git a/drivers/acpi/resources/rsinfo.c b/drivers/acpi/resources/rsinfo.c
new file mode 100644
index 0000000..3f0a1fe
--- /dev/null
+++ b/drivers/acpi/resources/rsinfo.c
@@ -0,0 +1,205 @@
+/*******************************************************************************
+ *
+ * Module Name: rsinfo - Dispatch and Info tables
+ *
+ ******************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include <acpi/acresrc.h>
+
+#define _COMPONENT ACPI_RESOURCES
+ACPI_MODULE_NAME("rsinfo")
+
+/*
+ * Resource dispatch and information tables. Any new resource types (either
+ * Large or Small) must be reflected in each of these tables, so they are here
+ * in one place.
+ *
+ * The tables for Large descriptors are indexed by bits 6:0 of the AML
+ * descriptor type byte. The tables for Small descriptors are indexed by
+ * bits 6:3 of the descriptor byte. The tables for internal resource
+ * descriptors are indexed by the acpi_resource_type field.
+ */
+/* Dispatch table for resource-to-AML (Set Resource) conversion functions */
+struct acpi_rsconvert_info *acpi_gbl_set_resource_dispatch[] = {
+ acpi_rs_set_irq, /* 0x00, ACPI_RESOURCE_TYPE_IRQ */
+ acpi_rs_convert_dma, /* 0x01, ACPI_RESOURCE_TYPE_DMA */
+ acpi_rs_set_start_dpf, /* 0x02, ACPI_RESOURCE_TYPE_START_DEPENDENT */
+ acpi_rs_convert_end_dpf, /* 0x03, ACPI_RESOURCE_TYPE_END_DEPENDENT */
+ acpi_rs_convert_io, /* 0x04, ACPI_RESOURCE_TYPE_IO */
+ acpi_rs_convert_fixed_io, /* 0x05, ACPI_RESOURCE_TYPE_FIXED_IO */
+ acpi_rs_set_vendor, /* 0x06, ACPI_RESOURCE_TYPE_VENDOR */
+ acpi_rs_convert_end_tag, /* 0x07, ACPI_RESOURCE_TYPE_END_TAG */
+ acpi_rs_convert_memory24, /* 0x08, ACPI_RESOURCE_TYPE_MEMORY24 */
+ acpi_rs_convert_memory32, /* 0x09, ACPI_RESOURCE_TYPE_MEMORY32 */
+ acpi_rs_convert_fixed_memory32, /* 0x0A, ACPI_RESOURCE_TYPE_FIXED_MEMORY32 */
+ acpi_rs_convert_address16, /* 0x0B, ACPI_RESOURCE_TYPE_ADDRESS16 */
+ acpi_rs_convert_address32, /* 0x0C, ACPI_RESOURCE_TYPE_ADDRESS32 */
+ acpi_rs_convert_address64, /* 0x0D, ACPI_RESOURCE_TYPE_ADDRESS64 */
+ acpi_rs_convert_ext_address64, /* 0x0E, ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64 */
+ acpi_rs_convert_ext_irq, /* 0x0F, ACPI_RESOURCE_TYPE_EXTENDED_IRQ */
+ acpi_rs_convert_generic_reg /* 0x10, ACPI_RESOURCE_TYPE_GENERIC_REGISTER */
+};
+
+/* Dispatch tables for AML-to-resource (Get Resource) conversion functions */
+
+struct acpi_rsconvert_info *acpi_gbl_get_resource_dispatch[] = {
+ /* Small descriptors */
+
+ NULL, /* 0x00, Reserved */
+ NULL, /* 0x01, Reserved */
+ NULL, /* 0x02, Reserved */
+ NULL, /* 0x03, Reserved */
+ acpi_rs_get_irq, /* 0x04, ACPI_RESOURCE_NAME_IRQ */
+ acpi_rs_convert_dma, /* 0x05, ACPI_RESOURCE_NAME_DMA */
+ acpi_rs_get_start_dpf, /* 0x06, ACPI_RESOURCE_NAME_START_DEPENDENT */
+ acpi_rs_convert_end_dpf, /* 0x07, ACPI_RESOURCE_NAME_END_DEPENDENT */
+ acpi_rs_convert_io, /* 0x08, ACPI_RESOURCE_NAME_IO */
+ acpi_rs_convert_fixed_io, /* 0x09, ACPI_RESOURCE_NAME_FIXED_IO */
+ NULL, /* 0x0A, Reserved */
+ NULL, /* 0x0B, Reserved */
+ NULL, /* 0x0C, Reserved */
+ NULL, /* 0x0D, Reserved */
+ acpi_rs_get_vendor_small, /* 0x0E, ACPI_RESOURCE_NAME_VENDOR_SMALL */
+ acpi_rs_convert_end_tag, /* 0x0F, ACPI_RESOURCE_NAME_END_TAG */
+
+ /* Large descriptors */
+
+ NULL, /* 0x00, Reserved */
+ acpi_rs_convert_memory24, /* 0x01, ACPI_RESOURCE_NAME_MEMORY24 */
+ acpi_rs_convert_generic_reg, /* 0x02, ACPI_RESOURCE_NAME_GENERIC_REGISTER */
+ NULL, /* 0x03, Reserved */
+ acpi_rs_get_vendor_large, /* 0x04, ACPI_RESOURCE_NAME_VENDOR_LARGE */
+ acpi_rs_convert_memory32, /* 0x05, ACPI_RESOURCE_NAME_MEMORY32 */
+ acpi_rs_convert_fixed_memory32, /* 0x06, ACPI_RESOURCE_NAME_FIXED_MEMORY32 */
+ acpi_rs_convert_address32, /* 0x07, ACPI_RESOURCE_NAME_ADDRESS32 */
+ acpi_rs_convert_address16, /* 0x08, ACPI_RESOURCE_NAME_ADDRESS16 */
+ acpi_rs_convert_ext_irq, /* 0x09, ACPI_RESOURCE_NAME_EXTENDED_IRQ */
+ acpi_rs_convert_address64, /* 0x0A, ACPI_RESOURCE_NAME_ADDRESS64 */
+ acpi_rs_convert_ext_address64 /* 0x0B, ACPI_RESOURCE_NAME_EXTENDED_ADDRESS64 */
+};
+
+#ifdef ACPI_FUTURE_USAGE
+#if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER)
+
+/* Dispatch table for resource dump functions */
+
+struct acpi_rsdump_info *acpi_gbl_dump_resource_dispatch[] = {
+ acpi_rs_dump_irq, /* ACPI_RESOURCE_TYPE_IRQ */
+ acpi_rs_dump_dma, /* ACPI_RESOURCE_TYPE_DMA */
+ acpi_rs_dump_start_dpf, /* ACPI_RESOURCE_TYPE_START_DEPENDENT */
+ acpi_rs_dump_end_dpf, /* ACPI_RESOURCE_TYPE_END_DEPENDENT */
+ acpi_rs_dump_io, /* ACPI_RESOURCE_TYPE_IO */
+ acpi_rs_dump_fixed_io, /* ACPI_RESOURCE_TYPE_FIXED_IO */
+ acpi_rs_dump_vendor, /* ACPI_RESOURCE_TYPE_VENDOR */
+ acpi_rs_dump_end_tag, /* ACPI_RESOURCE_TYPE_END_TAG */
+ acpi_rs_dump_memory24, /* ACPI_RESOURCE_TYPE_MEMORY24 */
+ acpi_rs_dump_memory32, /* ACPI_RESOURCE_TYPE_MEMORY32 */
+ acpi_rs_dump_fixed_memory32, /* ACPI_RESOURCE_TYPE_FIXED_MEMORY32 */
+ acpi_rs_dump_address16, /* ACPI_RESOURCE_TYPE_ADDRESS16 */
+ acpi_rs_dump_address32, /* ACPI_RESOURCE_TYPE_ADDRESS32 */
+ acpi_rs_dump_address64, /* ACPI_RESOURCE_TYPE_ADDRESS64 */
+ acpi_rs_dump_ext_address64, /* ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64 */
+ acpi_rs_dump_ext_irq, /* ACPI_RESOURCE_TYPE_EXTENDED_IRQ */
+ acpi_rs_dump_generic_reg, /* ACPI_RESOURCE_TYPE_GENERIC_REGISTER */
+};
+#endif
+
+#endif /* ACPI_FUTURE_USAGE */
+/*
+ * Base sizes for external AML resource descriptors, indexed by internal type.
+ * Includes size of the descriptor header (1 byte for small descriptors,
+ * 3 bytes for large descriptors)
+ */
+const u8 acpi_gbl_aml_resource_sizes[] = {
+ sizeof(struct aml_resource_irq), /* ACPI_RESOURCE_TYPE_IRQ (optional Byte 3 always created) */
+ sizeof(struct aml_resource_dma), /* ACPI_RESOURCE_TYPE_DMA */
+ sizeof(struct aml_resource_start_dependent), /* ACPI_RESOURCE_TYPE_START_DEPENDENT (optional Byte 1 always created) */
+ sizeof(struct aml_resource_end_dependent), /* ACPI_RESOURCE_TYPE_END_DEPENDENT */
+ sizeof(struct aml_resource_io), /* ACPI_RESOURCE_TYPE_IO */
+ sizeof(struct aml_resource_fixed_io), /* ACPI_RESOURCE_TYPE_FIXED_IO */
+ sizeof(struct aml_resource_vendor_small), /* ACPI_RESOURCE_TYPE_VENDOR */
+ sizeof(struct aml_resource_end_tag), /* ACPI_RESOURCE_TYPE_END_TAG */
+ sizeof(struct aml_resource_memory24), /* ACPI_RESOURCE_TYPE_MEMORY24 */
+ sizeof(struct aml_resource_memory32), /* ACPI_RESOURCE_TYPE_MEMORY32 */
+ sizeof(struct aml_resource_fixed_memory32), /* ACPI_RESOURCE_TYPE_FIXED_MEMORY32 */
+ sizeof(struct aml_resource_address16), /* ACPI_RESOURCE_TYPE_ADDRESS16 */
+ sizeof(struct aml_resource_address32), /* ACPI_RESOURCE_TYPE_ADDRESS32 */
+ sizeof(struct aml_resource_address64), /* ACPI_RESOURCE_TYPE_ADDRESS64 */
+ sizeof(struct aml_resource_extended_address64), /*ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64 */
+ sizeof(struct aml_resource_extended_irq), /* ACPI_RESOURCE_TYPE_EXTENDED_IRQ */
+ sizeof(struct aml_resource_generic_register) /* ACPI_RESOURCE_TYPE_GENERIC_REGISTER */
+};
+
+const u8 acpi_gbl_resource_struct_sizes[] = {
+ /* Small descriptors */
+
+ 0,
+ 0,
+ 0,
+ 0,
+ ACPI_RS_SIZE(struct acpi_resource_irq),
+ ACPI_RS_SIZE(struct acpi_resource_dma),
+ ACPI_RS_SIZE(struct acpi_resource_start_dependent),
+ ACPI_RS_SIZE_MIN,
+ ACPI_RS_SIZE(struct acpi_resource_io),
+ ACPI_RS_SIZE(struct acpi_resource_fixed_io),
+ 0,
+ 0,
+ 0,
+ 0,
+ ACPI_RS_SIZE(struct acpi_resource_vendor),
+ ACPI_RS_SIZE_MIN,
+
+ /* Large descriptors */
+
+ 0,
+ ACPI_RS_SIZE(struct acpi_resource_memory24),
+ ACPI_RS_SIZE(struct acpi_resource_generic_register),
+ 0,
+ ACPI_RS_SIZE(struct acpi_resource_vendor),
+ ACPI_RS_SIZE(struct acpi_resource_memory32),
+ ACPI_RS_SIZE(struct acpi_resource_fixed_memory32),
+ ACPI_RS_SIZE(struct acpi_resource_address32),
+ ACPI_RS_SIZE(struct acpi_resource_address16),
+ ACPI_RS_SIZE(struct acpi_resource_extended_irq),
+ ACPI_RS_SIZE(struct acpi_resource_address64),
+ ACPI_RS_SIZE(struct acpi_resource_extended_address64)
+};
diff --git a/drivers/acpi/resources/rsio.c b/drivers/acpi/resources/rsio.c
new file mode 100644
index 0000000..b66d42e
--- /dev/null
+++ b/drivers/acpi/resources/rsio.c
@@ -0,0 +1,289 @@
+/*******************************************************************************
+ *
+ * Module Name: rsio - IO and DMA resource descriptors
+ *
+ ******************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include <acpi/acresrc.h>
+
+#define _COMPONENT ACPI_RESOURCES
+ACPI_MODULE_NAME("rsio")
+
+/*******************************************************************************
+ *
+ * acpi_rs_convert_io
+ *
+ ******************************************************************************/
+struct acpi_rsconvert_info acpi_rs_convert_io[5] = {
+ {ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_IO,
+ ACPI_RS_SIZE(struct acpi_resource_io),
+ ACPI_RSC_TABLE_SIZE(acpi_rs_convert_io)},
+
+ {ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_IO,
+ sizeof(struct aml_resource_io),
+ 0},
+
+ /* Decode flag */
+
+ {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.io.io_decode),
+ AML_OFFSET(io.flags),
+ 0},
+ /*
+ * These fields are contiguous in both the source and destination:
+ * Address Alignment
+ * Length
+ * Minimum Base Address
+ * Maximum Base Address
+ */
+ {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.io.alignment),
+ AML_OFFSET(io.alignment),
+ 2},
+
+ {ACPI_RSC_MOVE16, ACPI_RS_OFFSET(data.io.minimum),
+ AML_OFFSET(io.minimum),
+ 2}
+};
+
+/*******************************************************************************
+ *
+ * acpi_rs_convert_fixed_io
+ *
+ ******************************************************************************/
+
+struct acpi_rsconvert_info acpi_rs_convert_fixed_io[4] = {
+ {ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_FIXED_IO,
+ ACPI_RS_SIZE(struct acpi_resource_fixed_io),
+ ACPI_RSC_TABLE_SIZE(acpi_rs_convert_fixed_io)},
+
+ {ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_FIXED_IO,
+ sizeof(struct aml_resource_fixed_io),
+ 0},
+ /*
+ * These fields are contiguous in both the source and destination:
+ * Base Address
+ * Length
+ */
+ {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.fixed_io.address_length),
+ AML_OFFSET(fixed_io.address_length),
+ 1},
+
+ {ACPI_RSC_MOVE16, ACPI_RS_OFFSET(data.fixed_io.address),
+ AML_OFFSET(fixed_io.address),
+ 1}
+};
+
+/*******************************************************************************
+ *
+ * acpi_rs_convert_generic_reg
+ *
+ ******************************************************************************/
+
+struct acpi_rsconvert_info acpi_rs_convert_generic_reg[4] = {
+ {ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_GENERIC_REGISTER,
+ ACPI_RS_SIZE(struct acpi_resource_generic_register),
+ ACPI_RSC_TABLE_SIZE(acpi_rs_convert_generic_reg)},
+
+ {ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_GENERIC_REGISTER,
+ sizeof(struct aml_resource_generic_register),
+ 0},
+ /*
+ * These fields are contiguous in both the source and destination:
+ * Address Space ID
+ * Register Bit Width
+ * Register Bit Offset
+ * Access Size
+ */
+ {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.generic_reg.space_id),
+ AML_OFFSET(generic_reg.address_space_id),
+ 4},
+
+ /* Get the Register Address */
+
+ {ACPI_RSC_MOVE64, ACPI_RS_OFFSET(data.generic_reg.address),
+ AML_OFFSET(generic_reg.address),
+ 1}
+};
+
+/*******************************************************************************
+ *
+ * acpi_rs_convert_end_dpf
+ *
+ ******************************************************************************/
+
+struct acpi_rsconvert_info acpi_rs_convert_end_dpf[2] = {
+ {ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_END_DEPENDENT,
+ ACPI_RS_SIZE_MIN,
+ ACPI_RSC_TABLE_SIZE(acpi_rs_convert_end_dpf)},
+
+ {ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_END_DEPENDENT,
+ sizeof(struct aml_resource_end_dependent),
+ 0}
+};
+
+/*******************************************************************************
+ *
+ * acpi_rs_convert_end_tag
+ *
+ ******************************************************************************/
+
+struct acpi_rsconvert_info acpi_rs_convert_end_tag[2] = {
+ {ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_END_TAG,
+ ACPI_RS_SIZE_MIN,
+ ACPI_RSC_TABLE_SIZE(acpi_rs_convert_end_tag)},
+
+ /*
+ * Note: The checksum field is set to zero, meaning that the resource
+ * data is treated as if the checksum operation succeeded.
+ * (ACPI Spec 1.0b Section 6.4.2.8)
+ */
+ {ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_END_TAG,
+ sizeof(struct aml_resource_end_tag),
+ 0}
+};
+
+/*******************************************************************************
+ *
+ * acpi_rs_get_start_dpf
+ *
+ ******************************************************************************/
+
+struct acpi_rsconvert_info acpi_rs_get_start_dpf[6] = {
+ {ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_START_DEPENDENT,
+ ACPI_RS_SIZE(struct acpi_resource_start_dependent),
+ ACPI_RSC_TABLE_SIZE(acpi_rs_get_start_dpf)},
+
+ /* Defaults for Compatibility and Performance priorities */
+
+ {ACPI_RSC_SET8, ACPI_RS_OFFSET(data.start_dpf.compatibility_priority),
+ ACPI_ACCEPTABLE_CONFIGURATION,
+ 2},
+
+ /* Get the descriptor length (0 or 1 for Start Dpf descriptor) */
+
+ {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.start_dpf.descriptor_length),
+ AML_OFFSET(start_dpf.descriptor_type),
+ 0},
+
+ /* All done if there is no flag byte present in the descriptor */
+
+ {ACPI_RSC_EXIT_NE, ACPI_RSC_COMPARE_AML_LENGTH, 0, 1},
+
+ /* Flag byte is present, get the flags */
+
+ {ACPI_RSC_2BITFLAG,
+ ACPI_RS_OFFSET(data.start_dpf.compatibility_priority),
+ AML_OFFSET(start_dpf.flags),
+ 0},
+
+ {ACPI_RSC_2BITFLAG,
+ ACPI_RS_OFFSET(data.start_dpf.performance_robustness),
+ AML_OFFSET(start_dpf.flags),
+ 2}
+};
+
+/*******************************************************************************
+ *
+ * acpi_rs_set_start_dpf
+ *
+ ******************************************************************************/
+
+struct acpi_rsconvert_info acpi_rs_set_start_dpf[10] = {
+ /* Start with a default descriptor of length 1 */
+
+ {ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_START_DEPENDENT,
+ sizeof(struct aml_resource_start_dependent),
+ ACPI_RSC_TABLE_SIZE(acpi_rs_set_start_dpf)},
+
+ /* Set the default flag values */
+
+ {ACPI_RSC_2BITFLAG,
+ ACPI_RS_OFFSET(data.start_dpf.compatibility_priority),
+ AML_OFFSET(start_dpf.flags),
+ 0},
+
+ {ACPI_RSC_2BITFLAG,
+ ACPI_RS_OFFSET(data.start_dpf.performance_robustness),
+ AML_OFFSET(start_dpf.flags),
+ 2},
+ /*
+ * All done if the output descriptor length is required to be 1
+ * (i.e., optimization to 0 bytes cannot be attempted)
+ */
+ {ACPI_RSC_EXIT_EQ, ACPI_RSC_COMPARE_VALUE,
+ ACPI_RS_OFFSET(data.start_dpf.descriptor_length),
+ 1},
+
+ /* Set length to 0 bytes (no flags byte) */
+
+ {ACPI_RSC_LENGTH, 0, 0,
+ sizeof(struct aml_resource_start_dependent_noprio)},
+
+ /*
+ * All done if the output descriptor length is required to be 0.
+ *
+ * TBD: Perhaps we should check for error if input flags are not
+ * compatible with a 0-byte descriptor.
+ */
+ {ACPI_RSC_EXIT_EQ, ACPI_RSC_COMPARE_VALUE,
+ ACPI_RS_OFFSET(data.start_dpf.descriptor_length),
+ 0},
+
+ /* Reset length to 1 byte (descriptor with flags byte) */
+
+ {ACPI_RSC_LENGTH, 0, 0, sizeof(struct aml_resource_start_dependent)},
+
+ /*
+ * All done if flags byte is necessary -- if either priority value
+ * is not ACPI_ACCEPTABLE_CONFIGURATION
+ */
+ {ACPI_RSC_EXIT_NE, ACPI_RSC_COMPARE_VALUE,
+ ACPI_RS_OFFSET(data.start_dpf.compatibility_priority),
+ ACPI_ACCEPTABLE_CONFIGURATION},
+
+ {ACPI_RSC_EXIT_NE, ACPI_RSC_COMPARE_VALUE,
+ ACPI_RS_OFFSET(data.start_dpf.performance_robustness),
+ ACPI_ACCEPTABLE_CONFIGURATION},
+
+ /* Flag byte is not necessary */
+
+ {ACPI_RSC_LENGTH, 0, 0,
+ sizeof(struct aml_resource_start_dependent_noprio)}
+};
diff --git a/drivers/acpi/resources/rsirq.c b/drivers/acpi/resources/rsirq.c
new file mode 100644
index 0000000..a8805ef
--- /dev/null
+++ b/drivers/acpi/resources/rsirq.c
@@ -0,0 +1,265 @@
+/*******************************************************************************
+ *
+ * Module Name: rsirq - IRQ resource descriptors
+ *
+ ******************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include <acpi/acresrc.h>
+
+#define _COMPONENT ACPI_RESOURCES
+ACPI_MODULE_NAME("rsirq")
+
+/*******************************************************************************
+ *
+ * acpi_rs_get_irq
+ *
+ ******************************************************************************/
+struct acpi_rsconvert_info acpi_rs_get_irq[8] = {
+ {ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_IRQ,
+ ACPI_RS_SIZE(struct acpi_resource_irq),
+ ACPI_RSC_TABLE_SIZE(acpi_rs_get_irq)},
+
+ /* Get the IRQ mask (bytes 1:2) */
+
+ {ACPI_RSC_BITMASK16, ACPI_RS_OFFSET(data.irq.interrupts[0]),
+ AML_OFFSET(irq.irq_mask),
+ ACPI_RS_OFFSET(data.irq.interrupt_count)},
+
+ /* Set default flags (others are zero) */
+
+ {ACPI_RSC_SET8, ACPI_RS_OFFSET(data.irq.triggering),
+ ACPI_EDGE_SENSITIVE,
+ 1},
+
+ /* Get the descriptor length (2 or 3 for IRQ descriptor) */
+
+ {ACPI_RSC_2BITFLAG, ACPI_RS_OFFSET(data.irq.descriptor_length),
+ AML_OFFSET(irq.descriptor_type),
+ 0},
+
+ /* All done if no flag byte present in descriptor */
+
+ {ACPI_RSC_EXIT_NE, ACPI_RSC_COMPARE_AML_LENGTH, 0, 3},
+
+ /* Get flags: Triggering[0], Polarity[3], Sharing[4] */
+
+ {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.irq.triggering),
+ AML_OFFSET(irq.flags),
+ 0},
+
+ {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.irq.polarity),
+ AML_OFFSET(irq.flags),
+ 3},
+
+ {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.irq.sharable),
+ AML_OFFSET(irq.flags),
+ 4}
+};
+
+/*******************************************************************************
+ *
+ * acpi_rs_set_irq
+ *
+ ******************************************************************************/
+
+struct acpi_rsconvert_info acpi_rs_set_irq[13] = {
+ /* Start with a default descriptor of length 3 */
+
+ {ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_IRQ,
+ sizeof(struct aml_resource_irq),
+ ACPI_RSC_TABLE_SIZE(acpi_rs_set_irq)},
+
+ /* Convert interrupt list to 16-bit IRQ bitmask */
+
+ {ACPI_RSC_BITMASK16, ACPI_RS_OFFSET(data.irq.interrupts[0]),
+ AML_OFFSET(irq.irq_mask),
+ ACPI_RS_OFFSET(data.irq.interrupt_count)},
+
+ /* Set the flags byte */
+
+ {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.irq.triggering),
+ AML_OFFSET(irq.flags),
+ 0},
+
+ {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.irq.polarity),
+ AML_OFFSET(irq.flags),
+ 3},
+
+ {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.irq.sharable),
+ AML_OFFSET(irq.flags),
+ 4},
+
+ /*
+ * All done if the output descriptor length is required to be 3
+ * (i.e., optimization to 2 bytes cannot be attempted)
+ */
+ {ACPI_RSC_EXIT_EQ, ACPI_RSC_COMPARE_VALUE,
+ ACPI_RS_OFFSET(data.irq.descriptor_length),
+ 3},
+
+ /* Set length to 2 bytes (no flags byte) */
+
+ {ACPI_RSC_LENGTH, 0, 0, sizeof(struct aml_resource_irq_noflags)},
+
+ /*
+ * All done if the output descriptor length is required to be 2.
+ *
+ * TBD: Perhaps we should check for error if input flags are not
+ * compatible with a 2-byte descriptor.
+ */
+ {ACPI_RSC_EXIT_EQ, ACPI_RSC_COMPARE_VALUE,
+ ACPI_RS_OFFSET(data.irq.descriptor_length),
+ 2},
+
+ /* Reset length to 3 bytes (descriptor with flags byte) */
+
+ {ACPI_RSC_LENGTH, 0, 0, sizeof(struct aml_resource_irq)},
+
+ /*
+ * Check if the flags byte is necessary. Not needed if the flags are:
+ * ACPI_EDGE_SENSITIVE, ACPI_ACTIVE_HIGH, ACPI_EXCLUSIVE
+ */
+ {ACPI_RSC_EXIT_NE, ACPI_RSC_COMPARE_VALUE,
+ ACPI_RS_OFFSET(data.irq.triggering),
+ ACPI_EDGE_SENSITIVE},
+
+ {ACPI_RSC_EXIT_NE, ACPI_RSC_COMPARE_VALUE,
+ ACPI_RS_OFFSET(data.irq.polarity),
+ ACPI_ACTIVE_HIGH},
+
+ {ACPI_RSC_EXIT_NE, ACPI_RSC_COMPARE_VALUE,
+ ACPI_RS_OFFSET(data.irq.sharable),
+ ACPI_EXCLUSIVE},
+
+ /* We can optimize to a 2-byte irq_no_flags() descriptor */
+
+ {ACPI_RSC_LENGTH, 0, 0, sizeof(struct aml_resource_irq_noflags)}
+};
+
+/*******************************************************************************
+ *
+ * acpi_rs_convert_ext_irq
+ *
+ ******************************************************************************/
+
+struct acpi_rsconvert_info acpi_rs_convert_ext_irq[9] = {
+ {ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_EXTENDED_IRQ,
+ ACPI_RS_SIZE(struct acpi_resource_extended_irq),
+ ACPI_RSC_TABLE_SIZE(acpi_rs_convert_ext_irq)},
+
+ {ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_EXTENDED_IRQ,
+ sizeof(struct aml_resource_extended_irq),
+ 0},
+
+ /* Flag bits */
+
+ {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.extended_irq.producer_consumer),
+ AML_OFFSET(extended_irq.flags),
+ 0},
+
+ {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.extended_irq.triggering),
+ AML_OFFSET(extended_irq.flags),
+ 1},
+
+ {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.extended_irq.polarity),
+ AML_OFFSET(extended_irq.flags),
+ 2},
+
+ {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.extended_irq.sharable),
+ AML_OFFSET(extended_irq.flags),
+ 3},
+
+ /* IRQ Table length (Byte4) */
+
+ {ACPI_RSC_COUNT, ACPI_RS_OFFSET(data.extended_irq.interrupt_count),
+ AML_OFFSET(extended_irq.interrupt_count),
+ sizeof(u32)}
+ ,
+
+ /* Copy every IRQ in the table, each is 32 bits */
+
+ {ACPI_RSC_MOVE32, ACPI_RS_OFFSET(data.extended_irq.interrupts[0]),
+ AML_OFFSET(extended_irq.interrupts[0]),
+ 0}
+ ,
+
+ /* Optional resource_source (Index and String) */
+
+ {ACPI_RSC_SOURCEX, ACPI_RS_OFFSET(data.extended_irq.resource_source),
+ ACPI_RS_OFFSET(data.extended_irq.interrupts[0]),
+ sizeof(struct aml_resource_extended_irq)}
+};
+
+/*******************************************************************************
+ *
+ * acpi_rs_convert_dma
+ *
+ ******************************************************************************/
+
+struct acpi_rsconvert_info acpi_rs_convert_dma[6] = {
+ {ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_DMA,
+ ACPI_RS_SIZE(struct acpi_resource_dma),
+ ACPI_RSC_TABLE_SIZE(acpi_rs_convert_dma)},
+
+ {ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_DMA,
+ sizeof(struct aml_resource_dma),
+ 0},
+
+ /* Flags: transfer preference, bus mastering, channel speed */
+
+ {ACPI_RSC_2BITFLAG, ACPI_RS_OFFSET(data.dma.transfer),
+ AML_OFFSET(dma.flags),
+ 0},
+
+ {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.dma.bus_master),
+ AML_OFFSET(dma.flags),
+ 2},
+
+ {ACPI_RSC_2BITFLAG, ACPI_RS_OFFSET(data.dma.type),
+ AML_OFFSET(dma.flags),
+ 5},
+
+ /* DMA channel mask bits */
+
+ {ACPI_RSC_BITMASK, ACPI_RS_OFFSET(data.dma.channels[0]),
+ AML_OFFSET(dma.dma_channel_mask),
+ ACPI_RS_OFFSET(data.dma.channel_count)}
+};
diff --git a/drivers/acpi/resources/rslist.c b/drivers/acpi/resources/rslist.c
new file mode 100644
index 0000000..b78c7e7
--- /dev/null
+++ b/drivers/acpi/resources/rslist.c
@@ -0,0 +1,202 @@
+/*******************************************************************************
+ *
+ * Module Name: rslist - Linked list utilities
+ *
+ ******************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include <acpi/acresrc.h>
+
+#define _COMPONENT ACPI_RESOURCES
+ACPI_MODULE_NAME("rslist")
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_rs_convert_aml_to_resources
+ *
+ * PARAMETERS: acpi_walk_aml_callback
+ * resource_ptr - Pointer to the buffer that will
+ * contain the output structures
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Convert an AML resource to an internal representation of the
+ * resource that is aligned and easier to access.
+ *
+ ******************************************************************************/
+acpi_status
+acpi_rs_convert_aml_to_resources(u8 * aml,
+ u32 length,
+ u32 offset, u8 resource_index, void **context)
+{
+ struct acpi_resource **resource_ptr =
+ ACPI_CAST_INDIRECT_PTR(struct acpi_resource, context);
+ struct acpi_resource *resource;
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(rs_convert_aml_to_resources);
+
+ /*
+ * Check that the input buffer and all subsequent pointers into it
+ * are aligned on a native word boundary. Most important on IA64
+ */
+ resource = *resource_ptr;
+ if (ACPI_IS_MISALIGNED(resource)) {
+ ACPI_WARNING((AE_INFO,
+ "Misaligned resource pointer %p", resource));
+ }
+
+ /* Convert the AML byte stream resource to a local resource struct */
+
+ status =
+ acpi_rs_convert_aml_to_resource(resource,
+ ACPI_CAST_PTR(union aml_resource,
+ aml),
+ acpi_gbl_get_resource_dispatch
+ [resource_index]);
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status,
+ "Could not convert AML resource (Type %X)",
+ *aml));
+ return_ACPI_STATUS(status);
+ }
+
+ ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES,
+ "Type %.2X, AmlLength %.2X InternalLength %.2X\n",
+ acpi_ut_get_resource_type(aml), length,
+ resource->length));
+
+ /* Point to the next structure in the output buffer */
+
+ *resource_ptr = ACPI_ADD_PTR(void, resource, resource->length);
+ return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_rs_convert_resources_to_aml
+ *
+ * PARAMETERS: Resource - Pointer to the resource linked list
+ * aml_size_needed - Calculated size of the byte stream
+ * needed from calling acpi_rs_get_aml_length()
+ * The size of the output_buffer is
+ * guaranteed to be >= aml_size_needed
+ * output_buffer - Pointer to the buffer that will
+ * contain the byte stream
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Takes the resource linked list and parses it, creating a
+ * byte stream of resources in the caller's output buffer
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_rs_convert_resources_to_aml(struct acpi_resource *resource,
+ acpi_size aml_size_needed, u8 * output_buffer)
+{
+ u8 *aml = output_buffer;
+ u8 *end_aml = output_buffer + aml_size_needed;
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(rs_convert_resources_to_aml);
+
+ /* Walk the resource descriptor list, convert each descriptor */
+
+ while (aml < end_aml) {
+
+ /* Validate the (internal) Resource Type */
+
+ if (resource->type > ACPI_RESOURCE_TYPE_MAX) {
+ ACPI_ERROR((AE_INFO,
+ "Invalid descriptor type (%X) in resource list",
+ resource->type));
+ return_ACPI_STATUS(AE_BAD_DATA);
+ }
+
+ /* Perform the conversion */
+
+ status = acpi_rs_convert_resource_to_aml(resource, ACPI_CAST_PTR(union
+ aml_resource,
+ aml),
+ acpi_gbl_set_resource_dispatch
+ [resource->type]);
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status,
+ "Could not convert resource (type %X) to AML",
+ resource->type));
+ return_ACPI_STATUS(status);
+ }
+
+ /* Perform final sanity check on the new AML resource descriptor */
+
+ status =
+ acpi_ut_validate_resource(ACPI_CAST_PTR
+ (union aml_resource, aml), NULL);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /* Check for end-of-list, normal exit */
+
+ if (resource->type == ACPI_RESOURCE_TYPE_END_TAG) {
+
+ /* An End Tag indicates the end of the input Resource Template */
+
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ /*
+ * Extract the total length of the new descriptor and set the
+ * Aml to point to the next (output) resource descriptor
+ */
+ aml += acpi_ut_get_descriptor_length(aml);
+
+ /* Point to the next input resource descriptor */
+
+ resource =
+ ACPI_ADD_PTR(struct acpi_resource, resource,
+ resource->length);
+ }
+
+ /* Completed buffer, but did not find an end_tag resource descriptor */
+
+ return_ACPI_STATUS(AE_AML_NO_RESOURCE_END_TAG);
+}
diff --git a/drivers/acpi/resources/rsmemory.c b/drivers/acpi/resources/rsmemory.c
new file mode 100644
index 0000000..63b21ab
--- /dev/null
+++ b/drivers/acpi/resources/rsmemory.c
@@ -0,0 +1,235 @@
+/*******************************************************************************
+ *
+ * Module Name: rsmem24 - Memory resource descriptors
+ *
+ ******************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include <acpi/acresrc.h>
+
+#define _COMPONENT ACPI_RESOURCES
+ACPI_MODULE_NAME("rsmemory")
+
+/*******************************************************************************
+ *
+ * acpi_rs_convert_memory24
+ *
+ ******************************************************************************/
+struct acpi_rsconvert_info acpi_rs_convert_memory24[4] = {
+ {ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_MEMORY24,
+ ACPI_RS_SIZE(struct acpi_resource_memory24),
+ ACPI_RSC_TABLE_SIZE(acpi_rs_convert_memory24)},
+
+ {ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_MEMORY24,
+ sizeof(struct aml_resource_memory24),
+ 0},
+
+ /* Read/Write bit */
+
+ {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.memory24.write_protect),
+ AML_OFFSET(memory24.flags),
+ 0},
+ /*
+ * These fields are contiguous in both the source and destination:
+ * Minimum Base Address
+ * Maximum Base Address
+ * Address Base Alignment
+ * Range Length
+ */
+ {ACPI_RSC_MOVE16, ACPI_RS_OFFSET(data.memory24.minimum),
+ AML_OFFSET(memory24.minimum),
+ 4}
+};
+
+/*******************************************************************************
+ *
+ * acpi_rs_convert_memory32
+ *
+ ******************************************************************************/
+
+struct acpi_rsconvert_info acpi_rs_convert_memory32[4] = {
+ {ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_MEMORY32,
+ ACPI_RS_SIZE(struct acpi_resource_memory32),
+ ACPI_RSC_TABLE_SIZE(acpi_rs_convert_memory32)},
+
+ {ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_MEMORY32,
+ sizeof(struct aml_resource_memory32),
+ 0},
+
+ /* Read/Write bit */
+
+ {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.memory32.write_protect),
+ AML_OFFSET(memory32.flags),
+ 0},
+ /*
+ * These fields are contiguous in both the source and destination:
+ * Minimum Base Address
+ * Maximum Base Address
+ * Address Base Alignment
+ * Range Length
+ */
+ {ACPI_RSC_MOVE32, ACPI_RS_OFFSET(data.memory32.minimum),
+ AML_OFFSET(memory32.minimum),
+ 4}
+};
+
+/*******************************************************************************
+ *
+ * acpi_rs_convert_fixed_memory32
+ *
+ ******************************************************************************/
+
+struct acpi_rsconvert_info acpi_rs_convert_fixed_memory32[4] = {
+ {ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_FIXED_MEMORY32,
+ ACPI_RS_SIZE(struct acpi_resource_fixed_memory32),
+ ACPI_RSC_TABLE_SIZE(acpi_rs_convert_fixed_memory32)},
+
+ {ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_FIXED_MEMORY32,
+ sizeof(struct aml_resource_fixed_memory32),
+ 0},
+
+ /* Read/Write bit */
+
+ {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.fixed_memory32.write_protect),
+ AML_OFFSET(fixed_memory32.flags),
+ 0},
+ /*
+ * These fields are contiguous in both the source and destination:
+ * Base Address
+ * Range Length
+ */
+ {ACPI_RSC_MOVE32, ACPI_RS_OFFSET(data.fixed_memory32.address),
+ AML_OFFSET(fixed_memory32.address),
+ 2}
+};
+
+/*******************************************************************************
+ *
+ * acpi_rs_get_vendor_small
+ *
+ ******************************************************************************/
+
+struct acpi_rsconvert_info acpi_rs_get_vendor_small[3] = {
+ {ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_VENDOR,
+ ACPI_RS_SIZE(struct acpi_resource_vendor),
+ ACPI_RSC_TABLE_SIZE(acpi_rs_get_vendor_small)},
+
+ /* Length of the vendor data (byte count) */
+
+ {ACPI_RSC_COUNT16, ACPI_RS_OFFSET(data.vendor.byte_length),
+ 0,
+ sizeof(u8)}
+ ,
+
+ /* Vendor data */
+
+ {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.vendor.byte_data[0]),
+ sizeof(struct aml_resource_small_header),
+ 0}
+};
+
+/*******************************************************************************
+ *
+ * acpi_rs_get_vendor_large
+ *
+ ******************************************************************************/
+
+struct acpi_rsconvert_info acpi_rs_get_vendor_large[3] = {
+ {ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_VENDOR,
+ ACPI_RS_SIZE(struct acpi_resource_vendor),
+ ACPI_RSC_TABLE_SIZE(acpi_rs_get_vendor_large)},
+
+ /* Length of the vendor data (byte count) */
+
+ {ACPI_RSC_COUNT16, ACPI_RS_OFFSET(data.vendor.byte_length),
+ 0,
+ sizeof(u8)}
+ ,
+
+ /* Vendor data */
+
+ {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.vendor.byte_data[0]),
+ sizeof(struct aml_resource_large_header),
+ 0}
+};
+
+/*******************************************************************************
+ *
+ * acpi_rs_set_vendor
+ *
+ ******************************************************************************/
+
+struct acpi_rsconvert_info acpi_rs_set_vendor[7] = {
+ /* Default is a small vendor descriptor */
+
+ {ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_VENDOR_SMALL,
+ sizeof(struct aml_resource_small_header),
+ ACPI_RSC_TABLE_SIZE(acpi_rs_set_vendor)},
+
+ /* Get the length and copy the data */
+
+ {ACPI_RSC_COUNT16, ACPI_RS_OFFSET(data.vendor.byte_length),
+ 0,
+ 0},
+
+ {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.vendor.byte_data[0]),
+ sizeof(struct aml_resource_small_header),
+ 0},
+
+ /*
+ * All done if the Vendor byte length is 7 or less, meaning that it will
+ * fit within a small descriptor
+ */
+ {ACPI_RSC_EXIT_LE, 0, 0, 7},
+
+ /* Must create a large vendor descriptor */
+
+ {ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_VENDOR_LARGE,
+ sizeof(struct aml_resource_large_header),
+ 0},
+
+ {ACPI_RSC_COUNT16, ACPI_RS_OFFSET(data.vendor.byte_length),
+ 0,
+ 0},
+
+ {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.vendor.byte_data[0]),
+ sizeof(struct aml_resource_large_header),
+ 0}
+};
diff --git a/drivers/acpi/resources/rsmisc.c b/drivers/acpi/resources/rsmisc.c
new file mode 100644
index 0000000..96a6c03
--- /dev/null
+++ b/drivers/acpi/resources/rsmisc.c
@@ -0,0 +1,560 @@
+/*******************************************************************************
+ *
+ * Module Name: rsmisc - Miscellaneous resource descriptors
+ *
+ ******************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include <acpi/acresrc.h>
+
+#define _COMPONENT ACPI_RESOURCES
+ACPI_MODULE_NAME("rsmisc")
+#define INIT_RESOURCE_TYPE(i) i->resource_offset
+#define INIT_RESOURCE_LENGTH(i) i->aml_offset
+#define INIT_TABLE_LENGTH(i) i->value
+#define COMPARE_OPCODE(i) i->resource_offset
+#define COMPARE_TARGET(i) i->aml_offset
+#define COMPARE_VALUE(i) i->value
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_rs_convert_aml_to_resource
+ *
+ * PARAMETERS: Resource - Pointer to the resource descriptor
+ * Aml - Where the AML descriptor is returned
+ * Info - Pointer to appropriate conversion table
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Convert an external AML resource descriptor to the corresponding
+ * internal resource descriptor
+ *
+ ******************************************************************************/
+acpi_status
+acpi_rs_convert_aml_to_resource(struct acpi_resource *resource,
+ union aml_resource *aml,
+ struct acpi_rsconvert_info *info)
+{
+ acpi_rs_length aml_resource_length;
+ void *source;
+ void *destination;
+ char *target;
+ u8 count;
+ u8 flags_mode = FALSE;
+ u16 item_count = 0;
+ u16 temp16 = 0;
+
+ ACPI_FUNCTION_TRACE(rs_convert_aml_to_resource);
+
+ if (((acpi_size) resource) & 0x3) {
+
+ /* Each internal resource struct is expected to be 32-bit aligned */
+
+ ACPI_WARNING((AE_INFO,
+ "Misaligned resource pointer (get): %p Type %2.2X Len %X",
+ resource, resource->type, resource->length));
+ }
+
+ /* Extract the resource Length field (does not include header length) */
+
+ aml_resource_length = acpi_ut_get_resource_length(aml);
+
+ /*
+ * First table entry must be ACPI_RSC_INITxxx and must contain the
+ * table length (# of table entries)
+ */
+ count = INIT_TABLE_LENGTH(info);
+
+ while (count) {
+ /*
+ * Source is the external AML byte stream buffer,
+ * destination is the internal resource descriptor
+ */
+ source = ACPI_ADD_PTR(void, aml, info->aml_offset);
+ destination =
+ ACPI_ADD_PTR(void, resource, info->resource_offset);
+
+ switch (info->opcode) {
+ case ACPI_RSC_INITGET:
+ /*
+ * Get the resource type and the initial (minimum) length
+ */
+ ACPI_MEMSET(resource, 0, INIT_RESOURCE_LENGTH(info));
+ resource->type = INIT_RESOURCE_TYPE(info);
+ resource->length = INIT_RESOURCE_LENGTH(info);
+ break;
+
+ case ACPI_RSC_INITSET:
+ break;
+
+ case ACPI_RSC_FLAGINIT:
+
+ flags_mode = TRUE;
+ break;
+
+ case ACPI_RSC_1BITFLAG:
+ /*
+ * Mask and shift the flag bit
+ */
+ ACPI_SET8(destination) = (u8)
+ ((ACPI_GET8(source) >> info->value) & 0x01);
+ break;
+
+ case ACPI_RSC_2BITFLAG:
+ /*
+ * Mask and shift the flag bits
+ */
+ ACPI_SET8(destination) = (u8)
+ ((ACPI_GET8(source) >> info->value) & 0x03);
+ break;
+
+ case ACPI_RSC_COUNT:
+
+ item_count = ACPI_GET8(source);
+ ACPI_SET8(destination) = (u8) item_count;
+
+ resource->length = resource->length +
+ (info->value * (item_count - 1));
+ break;
+
+ case ACPI_RSC_COUNT16:
+
+ item_count = aml_resource_length;
+ ACPI_SET16(destination) = item_count;
+
+ resource->length = resource->length +
+ (info->value * (item_count - 1));
+ break;
+
+ case ACPI_RSC_LENGTH:
+
+ resource->length = resource->length + info->value;
+ break;
+
+ case ACPI_RSC_MOVE8:
+ case ACPI_RSC_MOVE16:
+ case ACPI_RSC_MOVE32:
+ case ACPI_RSC_MOVE64:
+ /*
+ * Raw data move. Use the Info value field unless item_count has
+ * been previously initialized via a COUNT opcode
+ */
+ if (info->value) {
+ item_count = info->value;
+ }
+ acpi_rs_move_data(destination, source, item_count,
+ info->opcode);
+ break;
+
+ case ACPI_RSC_SET8:
+
+ ACPI_MEMSET(destination, info->aml_offset, info->value);
+ break;
+
+ case ACPI_RSC_DATA8:
+
+ target = ACPI_ADD_PTR(char, resource, info->value);
+ ACPI_MEMCPY(destination, source, ACPI_GET16(target));
+ break;
+
+ case ACPI_RSC_ADDRESS:
+ /*
+ * Common handler for address descriptor flags
+ */
+ if (!acpi_rs_get_address_common(resource, aml)) {
+ return_ACPI_STATUS
+ (AE_AML_INVALID_RESOURCE_TYPE);
+ }
+ break;
+
+ case ACPI_RSC_SOURCE:
+ /*
+ * Optional resource_source (Index and String)
+ */
+ resource->length +=
+ acpi_rs_get_resource_source(aml_resource_length,
+ info->value,
+ destination, aml, NULL);
+ break;
+
+ case ACPI_RSC_SOURCEX:
+ /*
+ * Optional resource_source (Index and String). This is the more
+ * complicated case used by the Interrupt() macro
+ */
+ target =
+ ACPI_ADD_PTR(char, resource,
+ info->aml_offset + (item_count * 4));
+
+ resource->length +=
+ acpi_rs_get_resource_source(aml_resource_length,
+ (acpi_rs_length) (((item_count - 1) * sizeof(u32)) + info->value), destination, aml, target);
+ break;
+
+ case ACPI_RSC_BITMASK:
+ /*
+ * 8-bit encoded bitmask (DMA macro)
+ */
+ item_count =
+ acpi_rs_decode_bitmask(ACPI_GET8(source),
+ destination);
+ if (item_count) {
+ resource->length += (item_count - 1);
+ }
+
+ target = ACPI_ADD_PTR(char, resource, info->value);
+ ACPI_SET8(target) = (u8) item_count;
+ break;
+
+ case ACPI_RSC_BITMASK16:
+ /*
+ * 16-bit encoded bitmask (IRQ macro)
+ */
+ ACPI_MOVE_16_TO_16(&temp16, source);
+
+ item_count =
+ acpi_rs_decode_bitmask(temp16, destination);
+ if (item_count) {
+ resource->length += (item_count - 1);
+ }
+
+ target = ACPI_ADD_PTR(char, resource, info->value);
+ ACPI_SET8(target) = (u8) item_count;
+ break;
+
+ case ACPI_RSC_EXIT_NE:
+ /*
+ * Control - Exit conversion if not equal
+ */
+ switch (info->resource_offset) {
+ case ACPI_RSC_COMPARE_AML_LENGTH:
+ if (aml_resource_length != info->value) {
+ goto exit;
+ }
+ break;
+
+ case ACPI_RSC_COMPARE_VALUE:
+ if (ACPI_GET8(source) != info->value) {
+ goto exit;
+ }
+ break;
+
+ default:
+
+ ACPI_ERROR((AE_INFO,
+ "Invalid conversion sub-opcode"));
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
+ break;
+
+ default:
+
+ ACPI_ERROR((AE_INFO, "Invalid conversion opcode"));
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
+
+ count--;
+ info++;
+ }
+
+ exit:
+ if (!flags_mode) {
+
+ /* Round the resource struct length up to the next boundary (32 or 64) */
+
+ resource->length =
+ (u32) ACPI_ROUND_UP_TO_NATIVE_WORD(resource->length);
+ }
+ return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_rs_convert_resource_to_aml
+ *
+ * PARAMETERS: Resource - Pointer to the resource descriptor
+ * Aml - Where the AML descriptor is returned
+ * Info - Pointer to appropriate conversion table
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Convert an internal resource descriptor to the corresponding
+ * external AML resource descriptor.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_rs_convert_resource_to_aml(struct acpi_resource *resource,
+ union aml_resource *aml,
+ struct acpi_rsconvert_info *info)
+{
+ void *source = NULL;
+ void *destination;
+ acpi_rsdesc_size aml_length = 0;
+ u8 count;
+ u16 temp16 = 0;
+ u16 item_count = 0;
+
+ ACPI_FUNCTION_TRACE(rs_convert_resource_to_aml);
+
+ /*
+ * First table entry must be ACPI_RSC_INITxxx and must contain the
+ * table length (# of table entries)
+ */
+ count = INIT_TABLE_LENGTH(info);
+
+ while (count) {
+ /*
+ * Source is the internal resource descriptor,
+ * destination is the external AML byte stream buffer
+ */
+ source = ACPI_ADD_PTR(void, resource, info->resource_offset);
+ destination = ACPI_ADD_PTR(void, aml, info->aml_offset);
+
+ switch (info->opcode) {
+ case ACPI_RSC_INITSET:
+
+ ACPI_MEMSET(aml, 0, INIT_RESOURCE_LENGTH(info));
+ aml_length = INIT_RESOURCE_LENGTH(info);
+ acpi_rs_set_resource_header(INIT_RESOURCE_TYPE(info),
+ aml_length, aml);
+ break;
+
+ case ACPI_RSC_INITGET:
+ break;
+
+ case ACPI_RSC_FLAGINIT:
+ /*
+ * Clear the flag byte
+ */
+ ACPI_SET8(destination) = 0;
+ break;
+
+ case ACPI_RSC_1BITFLAG:
+ /*
+ * Mask and shift the flag bit
+ */
+ ACPI_SET8(destination) |= (u8)
+ ((ACPI_GET8(source) & 0x01) << info->value);
+ break;
+
+ case ACPI_RSC_2BITFLAG:
+ /*
+ * Mask and shift the flag bits
+ */
+ ACPI_SET8(destination) |= (u8)
+ ((ACPI_GET8(source) & 0x03) << info->value);
+ break;
+
+ case ACPI_RSC_COUNT:
+
+ item_count = ACPI_GET8(source);
+ ACPI_SET8(destination) = (u8) item_count;
+
+ aml_length =
+ (u16) (aml_length +
+ (info->value * (item_count - 1)));
+ break;
+
+ case ACPI_RSC_COUNT16:
+
+ item_count = ACPI_GET16(source);
+ aml_length = (u16) (aml_length + item_count);
+ acpi_rs_set_resource_length(aml_length, aml);
+ break;
+
+ case ACPI_RSC_LENGTH:
+
+ acpi_rs_set_resource_length(info->value, aml);
+ break;
+
+ case ACPI_RSC_MOVE8:
+ case ACPI_RSC_MOVE16:
+ case ACPI_RSC_MOVE32:
+ case ACPI_RSC_MOVE64:
+
+ if (info->value) {
+ item_count = info->value;
+ }
+ acpi_rs_move_data(destination, source, item_count,
+ info->opcode);
+ break;
+
+ case ACPI_RSC_ADDRESS:
+
+ /* Set the Resource Type, General Flags, and Type-Specific Flags */
+
+ acpi_rs_set_address_common(aml, resource);
+ break;
+
+ case ACPI_RSC_SOURCEX:
+ /*
+ * Optional resource_source (Index and String)
+ */
+ aml_length =
+ acpi_rs_set_resource_source(aml, (acpi_rs_length)
+ aml_length, source);
+ acpi_rs_set_resource_length(aml_length, aml);
+ break;
+
+ case ACPI_RSC_SOURCE:
+ /*
+ * Optional resource_source (Index and String). This is the more
+ * complicated case used by the Interrupt() macro
+ */
+ aml_length =
+ acpi_rs_set_resource_source(aml, info->value,
+ source);
+ acpi_rs_set_resource_length(aml_length, aml);
+ break;
+
+ case ACPI_RSC_BITMASK:
+ /*
+ * 8-bit encoded bitmask (DMA macro)
+ */
+ ACPI_SET8(destination) = (u8)
+ acpi_rs_encode_bitmask(source,
+ *ACPI_ADD_PTR(u8, resource,
+ info->value));
+ break;
+
+ case ACPI_RSC_BITMASK16:
+ /*
+ * 16-bit encoded bitmask (IRQ macro)
+ */
+ temp16 = acpi_rs_encode_bitmask(source,
+ *ACPI_ADD_PTR(u8,
+ resource,
+ info->
+ value));
+ ACPI_MOVE_16_TO_16(destination, &temp16);
+ break;
+
+ case ACPI_RSC_EXIT_LE:
+ /*
+ * Control - Exit conversion if less than or equal
+ */
+ if (item_count <= info->value) {
+ goto exit;
+ }
+ break;
+
+ case ACPI_RSC_EXIT_NE:
+ /*
+ * Control - Exit conversion if not equal
+ */
+ switch (COMPARE_OPCODE(info)) {
+ case ACPI_RSC_COMPARE_VALUE:
+
+ if (*ACPI_ADD_PTR(u8, resource,
+ COMPARE_TARGET(info)) !=
+ COMPARE_VALUE(info)) {
+ goto exit;
+ }
+ break;
+
+ default:
+
+ ACPI_ERROR((AE_INFO,
+ "Invalid conversion sub-opcode"));
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
+ break;
+
+ case ACPI_RSC_EXIT_EQ:
+ /*
+ * Control - Exit conversion if equal
+ */
+ if (*ACPI_ADD_PTR(u8, resource,
+ COMPARE_TARGET(info)) ==
+ COMPARE_VALUE(info)) {
+ goto exit;
+ }
+ break;
+
+ default:
+
+ ACPI_ERROR((AE_INFO, "Invalid conversion opcode"));
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
+
+ count--;
+ info++;
+ }
+
+ exit:
+ return_ACPI_STATUS(AE_OK);
+}
+
+#if 0
+/* Previous resource validations */
+
+if (aml->ext_address64.revision_iD != AML_RESOURCE_EXTENDED_ADDRESS_REVISION) {
+ return_ACPI_STATUS(AE_SUPPORT);
+}
+
+if (resource->data.start_dpf.performance_robustness >= 3) {
+ return_ACPI_STATUS(AE_AML_BAD_RESOURCE_VALUE);
+}
+
+if (((aml->irq.flags & 0x09) == 0x00) || ((aml->irq.flags & 0x09) == 0x09)) {
+ /*
+ * Only [active_high, edge_sensitive] or [active_low, level_sensitive]
+ * polarity/trigger interrupts are allowed (ACPI spec, section
+ * "IRQ Format"), so 0x00 and 0x09 are illegal.
+ */
+ ACPI_ERROR((AE_INFO,
+ "Invalid interrupt polarity/trigger in resource list, %X",
+ aml->irq.flags));
+ return_ACPI_STATUS(AE_BAD_DATA);
+}
+
+resource->data.extended_irq.interrupt_count = temp8;
+if (temp8 < 1) {
+
+ /* Must have at least one IRQ */
+
+ return_ACPI_STATUS(AE_AML_BAD_RESOURCE_LENGTH);
+}
+
+if (resource->data.dma.transfer == 0x03) {
+ ACPI_ERROR((AE_INFO, "Invalid DMA.Transfer preference (3)"));
+ return_ACPI_STATUS(AE_BAD_DATA);
+}
+#endif
diff --git a/drivers/acpi/resources/rsutils.c b/drivers/acpi/resources/rsutils.c
new file mode 100644
index 0000000..f7b3bcd
--- /dev/null
+++ b/drivers/acpi/resources/rsutils.c
@@ -0,0 +1,726 @@
+/*******************************************************************************
+ *
+ * Module Name: rsutils - Utilities for the resource manager
+ *
+ ******************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include <acpi/acnamesp.h>
+#include <acpi/acresrc.h>
+
+#define _COMPONENT ACPI_RESOURCES
+ACPI_MODULE_NAME("rsutils")
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_rs_decode_bitmask
+ *
+ * PARAMETERS: Mask - Bitmask to decode
+ * List - Where the converted list is returned
+ *
+ * RETURN: Count of bits set (length of list)
+ *
+ * DESCRIPTION: Convert a bit mask into a list of values
+ *
+ ******************************************************************************/
+u8 acpi_rs_decode_bitmask(u16 mask, u8 * list)
+{
+ u8 i;
+ u8 bit_count;
+
+ ACPI_FUNCTION_ENTRY();
+
+ /* Decode the mask bits */
+
+ for (i = 0, bit_count = 0; mask; i++) {
+ if (mask & 0x0001) {
+ list[bit_count] = i;
+ bit_count++;
+ }
+
+ mask >>= 1;
+ }
+
+ return (bit_count);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_rs_encode_bitmask
+ *
+ * PARAMETERS: List - List of values to encode
+ * Count - Length of list
+ *
+ * RETURN: Encoded bitmask
+ *
+ * DESCRIPTION: Convert a list of values to an encoded bitmask
+ *
+ ******************************************************************************/
+
+u16 acpi_rs_encode_bitmask(u8 * list, u8 count)
+{
+ u32 i;
+ u16 mask;
+
+ ACPI_FUNCTION_ENTRY();
+
+ /* Encode the list into a single bitmask */
+
+ for (i = 0, mask = 0; i < count; i++) {
+ mask |= (0x1 << list[i]);
+ }
+
+ return mask;
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_rs_move_data
+ *
+ * PARAMETERS: Destination - Pointer to the destination descriptor
+ * Source - Pointer to the source descriptor
+ * item_count - How many items to move
+ * move_type - Byte width
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Move multiple data items from one descriptor to another. Handles
+ * alignment issues and endian issues if necessary, as configured
+ * via the ACPI_MOVE_* macros. (This is why a memcpy is not used)
+ *
+ ******************************************************************************/
+
+void
+acpi_rs_move_data(void *destination, void *source, u16 item_count, u8 move_type)
+{
+ u32 i;
+
+ ACPI_FUNCTION_ENTRY();
+
+ /* One move per item */
+
+ for (i = 0; i < item_count; i++) {
+ switch (move_type) {
+ /*
+ * For the 8-bit case, we can perform the move all at once
+ * since there are no alignment or endian issues
+ */
+ case ACPI_RSC_MOVE8:
+ ACPI_MEMCPY(destination, source, item_count);
+ return;
+
+ /*
+ * 16-, 32-, and 64-bit cases must use the move macros that perform
+ * endian conversion and/or accomodate hardware that cannot perform
+ * misaligned memory transfers
+ */
+ case ACPI_RSC_MOVE16:
+ ACPI_MOVE_16_TO_16(&ACPI_CAST_PTR(u16, destination)[i],
+ &ACPI_CAST_PTR(u16, source)[i]);
+ break;
+
+ case ACPI_RSC_MOVE32:
+ ACPI_MOVE_32_TO_32(&ACPI_CAST_PTR(u32, destination)[i],
+ &ACPI_CAST_PTR(u32, source)[i]);
+ break;
+
+ case ACPI_RSC_MOVE64:
+ ACPI_MOVE_64_TO_64(&ACPI_CAST_PTR(u64, destination)[i],
+ &ACPI_CAST_PTR(u64, source)[i]);
+ break;
+
+ default:
+ return;
+ }
+ }
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_rs_set_resource_length
+ *
+ * PARAMETERS: total_length - Length of the AML descriptor, including
+ * the header and length fields.
+ * Aml - Pointer to the raw AML descriptor
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Set the resource_length field of an AML
+ * resource descriptor, both Large and Small descriptors are
+ * supported automatically. Note: Descriptor Type field must
+ * be valid.
+ *
+ ******************************************************************************/
+
+void
+acpi_rs_set_resource_length(acpi_rsdesc_size total_length,
+ union aml_resource *aml)
+{
+ acpi_rs_length resource_length;
+
+ ACPI_FUNCTION_ENTRY();
+
+ /* Length is the total descriptor length minus the header length */
+
+ resource_length = (acpi_rs_length)
+ (total_length - acpi_ut_get_resource_header_length(aml));
+
+ /* Length is stored differently for large and small descriptors */
+
+ if (aml->small_header.descriptor_type & ACPI_RESOURCE_NAME_LARGE) {
+
+ /* Large descriptor -- bytes 1-2 contain the 16-bit length */
+
+ ACPI_MOVE_16_TO_16(&aml->large_header.resource_length,
+ &resource_length);
+ } else {
+ /* Small descriptor -- bits 2:0 of byte 0 contain the length */
+
+ aml->small_header.descriptor_type = (u8)
+
+ /* Clear any existing length, preserving descriptor type bits */
+ ((aml->small_header.
+ descriptor_type & ~ACPI_RESOURCE_NAME_SMALL_LENGTH_MASK)
+
+ | resource_length);
+ }
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_rs_set_resource_header
+ *
+ * PARAMETERS: descriptor_type - Byte to be inserted as the type
+ * total_length - Length of the AML descriptor, including
+ * the header and length fields.
+ * Aml - Pointer to the raw AML descriptor
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Set the descriptor_type and resource_length fields of an AML
+ * resource descriptor, both Large and Small descriptors are
+ * supported automatically
+ *
+ ******************************************************************************/
+
+void
+acpi_rs_set_resource_header(u8 descriptor_type,
+ acpi_rsdesc_size total_length,
+ union aml_resource *aml)
+{
+ ACPI_FUNCTION_ENTRY();
+
+ /* Set the Resource Type */
+
+ aml->small_header.descriptor_type = descriptor_type;
+
+ /* Set the Resource Length */
+
+ acpi_rs_set_resource_length(total_length, aml);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_rs_strcpy
+ *
+ * PARAMETERS: Destination - Pointer to the destination string
+ * Source - Pointer to the source string
+ *
+ * RETURN: String length, including NULL terminator
+ *
+ * DESCRIPTION: Local string copy that returns the string length, saving a
+ * strcpy followed by a strlen.
+ *
+ ******************************************************************************/
+
+static u16 acpi_rs_strcpy(char *destination, char *source)
+{
+ u16 i;
+
+ ACPI_FUNCTION_ENTRY();
+
+ for (i = 0; source[i]; i++) {
+ destination[i] = source[i];
+ }
+
+ destination[i] = 0;
+
+ /* Return string length including the NULL terminator */
+
+ return ((u16) (i + 1));
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_rs_get_resource_source
+ *
+ * PARAMETERS: resource_length - Length field of the descriptor
+ * minimum_length - Minimum length of the descriptor (minus
+ * any optional fields)
+ * resource_source - Where the resource_source is returned
+ * Aml - Pointer to the raw AML descriptor
+ * string_ptr - (optional) where to store the actual
+ * resource_source string
+ *
+ * RETURN: Length of the string plus NULL terminator, rounded up to native
+ * word boundary
+ *
+ * DESCRIPTION: Copy the optional resource_source data from a raw AML descriptor
+ * to an internal resource descriptor
+ *
+ ******************************************************************************/
+
+acpi_rs_length
+acpi_rs_get_resource_source(acpi_rs_length resource_length,
+ acpi_rs_length minimum_length,
+ struct acpi_resource_source * resource_source,
+ union aml_resource * aml, char *string_ptr)
+{
+ acpi_rsdesc_size total_length;
+ u8 *aml_resource_source;
+
+ ACPI_FUNCTION_ENTRY();
+
+ total_length =
+ resource_length + sizeof(struct aml_resource_large_header);
+ aml_resource_source = ACPI_ADD_PTR(u8, aml, minimum_length);
+
+ /*
+ * resource_source is present if the length of the descriptor is longer than
+ * the minimum length.
+ *
+ * Note: Some resource descriptors will have an additional null, so
+ * we add 1 to the minimum length.
+ */
+ if (total_length > (acpi_rsdesc_size) (minimum_length + 1)) {
+
+ /* Get the resource_source_index */
+
+ resource_source->index = aml_resource_source[0];
+
+ resource_source->string_ptr = string_ptr;
+ if (!string_ptr) {
+ /*
+ * String destination pointer is not specified; Set the String
+ * pointer to the end of the current resource_source structure.
+ */
+ resource_source->string_ptr =
+ ACPI_ADD_PTR(char, resource_source,
+ sizeof(struct acpi_resource_source));
+ }
+
+ /*
+ * In order for the Resource length to be a multiple of the native
+ * word, calculate the length of the string (+1 for NULL terminator)
+ * and expand to the next word multiple.
+ *
+ * Zero the entire area of the buffer.
+ */
+ total_length = (u32)
+ ACPI_STRLEN(ACPI_CAST_PTR(char, &aml_resource_source[1])) + 1;
+ total_length = (u32) ACPI_ROUND_UP_TO_NATIVE_WORD(total_length);
+
+ ACPI_MEMSET(resource_source->string_ptr, 0, total_length);
+
+ /* Copy the resource_source string to the destination */
+
+ resource_source->string_length =
+ acpi_rs_strcpy(resource_source->string_ptr,
+ ACPI_CAST_PTR(char,
+ &aml_resource_source[1]));
+
+ return ((acpi_rs_length) total_length);
+ }
+
+ /* resource_source is not present */
+
+ resource_source->index = 0;
+ resource_source->string_length = 0;
+ resource_source->string_ptr = NULL;
+ return (0);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_rs_set_resource_source
+ *
+ * PARAMETERS: Aml - Pointer to the raw AML descriptor
+ * minimum_length - Minimum length of the descriptor (minus
+ * any optional fields)
+ * resource_source - Internal resource_source
+
+ *
+ * RETURN: Total length of the AML descriptor
+ *
+ * DESCRIPTION: Convert an optional resource_source from internal format to a
+ * raw AML resource descriptor
+ *
+ ******************************************************************************/
+
+acpi_rsdesc_size
+acpi_rs_set_resource_source(union aml_resource * aml,
+ acpi_rs_length minimum_length,
+ struct acpi_resource_source * resource_source)
+{
+ u8 *aml_resource_source;
+ acpi_rsdesc_size descriptor_length;
+
+ ACPI_FUNCTION_ENTRY();
+
+ descriptor_length = minimum_length;
+
+ /* Non-zero string length indicates presence of a resource_source */
+
+ if (resource_source->string_length) {
+
+ /* Point to the end of the AML descriptor */
+
+ aml_resource_source = ACPI_ADD_PTR(u8, aml, minimum_length);
+
+ /* Copy the resource_source_index */
+
+ aml_resource_source[0] = (u8) resource_source->index;
+
+ /* Copy the resource_source string */
+
+ ACPI_STRCPY(ACPI_CAST_PTR(char, &aml_resource_source[1]),
+ resource_source->string_ptr);
+
+ /*
+ * Add the length of the string (+ 1 for null terminator) to the
+ * final descriptor length
+ */
+ descriptor_length +=
+ ((acpi_rsdesc_size) resource_source->string_length + 1);
+ }
+
+ /* Return the new total length of the AML descriptor */
+
+ return (descriptor_length);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_rs_get_prt_method_data
+ *
+ * PARAMETERS: Node - Device node
+ * ret_buffer - Pointer to a buffer structure for the
+ * results
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: This function is called to get the _PRT value of an object
+ * contained in an object specified by the handle passed in
+ *
+ * If the function fails an appropriate status will be returned
+ * and the contents of the callers buffer is undefined.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_rs_get_prt_method_data(struct acpi_namespace_node * node,
+ struct acpi_buffer * ret_buffer)
+{
+ union acpi_operand_object *obj_desc;
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(rs_get_prt_method_data);
+
+ /* Parameters guaranteed valid by caller */
+
+ /* Execute the method, no parameters */
+
+ status = acpi_ut_evaluate_object(node, METHOD_NAME__PRT,
+ ACPI_BTYPE_PACKAGE, &obj_desc);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /*
+ * Create a resource linked list from the byte stream buffer that comes
+ * back from the _CRS method execution.
+ */
+ status = acpi_rs_create_pci_routing_table(obj_desc, ret_buffer);
+
+ /* On exit, we must delete the object returned by evaluate_object */
+
+ acpi_ut_remove_reference(obj_desc);
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_rs_get_crs_method_data
+ *
+ * PARAMETERS: Node - Device node
+ * ret_buffer - Pointer to a buffer structure for the
+ * results
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: This function is called to get the _CRS value of an object
+ * contained in an object specified by the handle passed in
+ *
+ * If the function fails an appropriate status will be returned
+ * and the contents of the callers buffer is undefined.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_rs_get_crs_method_data(struct acpi_namespace_node *node,
+ struct acpi_buffer *ret_buffer)
+{
+ union acpi_operand_object *obj_desc;
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(rs_get_crs_method_data);
+
+ /* Parameters guaranteed valid by caller */
+
+ /* Execute the method, no parameters */
+
+ status = acpi_ut_evaluate_object(node, METHOD_NAME__CRS,
+ ACPI_BTYPE_BUFFER, &obj_desc);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /*
+ * Make the call to create a resource linked list from the
+ * byte stream buffer that comes back from the _CRS method
+ * execution.
+ */
+ status = acpi_rs_create_resource_list(obj_desc, ret_buffer);
+
+ /* On exit, we must delete the object returned by evaluate_object */
+
+ acpi_ut_remove_reference(obj_desc);
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_rs_get_prs_method_data
+ *
+ * PARAMETERS: Node - Device node
+ * ret_buffer - Pointer to a buffer structure for the
+ * results
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: This function is called to get the _PRS value of an object
+ * contained in an object specified by the handle passed in
+ *
+ * If the function fails an appropriate status will be returned
+ * and the contents of the callers buffer is undefined.
+ *
+ ******************************************************************************/
+
+#ifdef ACPI_FUTURE_USAGE
+acpi_status
+acpi_rs_get_prs_method_data(struct acpi_namespace_node *node,
+ struct acpi_buffer *ret_buffer)
+{
+ union acpi_operand_object *obj_desc;
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(rs_get_prs_method_data);
+
+ /* Parameters guaranteed valid by caller */
+
+ /* Execute the method, no parameters */
+
+ status = acpi_ut_evaluate_object(node, METHOD_NAME__PRS,
+ ACPI_BTYPE_BUFFER, &obj_desc);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /*
+ * Make the call to create a resource linked list from the
+ * byte stream buffer that comes back from the _CRS method
+ * execution.
+ */
+ status = acpi_rs_create_resource_list(obj_desc, ret_buffer);
+
+ /* On exit, we must delete the object returned by evaluate_object */
+
+ acpi_ut_remove_reference(obj_desc);
+ return_ACPI_STATUS(status);
+}
+#endif /* ACPI_FUTURE_USAGE */
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_rs_get_method_data
+ *
+ * PARAMETERS: Handle - Handle to the containing object
+ * Path - Path to method, relative to Handle
+ * ret_buffer - Pointer to a buffer structure for the
+ * results
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: This function is called to get the _CRS or _PRS value of an
+ * object contained in an object specified by the handle passed in
+ *
+ * If the function fails an appropriate status will be returned
+ * and the contents of the callers buffer is undefined.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_rs_get_method_data(acpi_handle handle,
+ char *path, struct acpi_buffer *ret_buffer)
+{
+ union acpi_operand_object *obj_desc;
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(rs_get_method_data);
+
+ /* Parameters guaranteed valid by caller */
+
+ /* Execute the method, no parameters */
+
+ status =
+ acpi_ut_evaluate_object(handle, path, ACPI_BTYPE_BUFFER, &obj_desc);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /*
+ * Make the call to create a resource linked list from the
+ * byte stream buffer that comes back from the method
+ * execution.
+ */
+ status = acpi_rs_create_resource_list(obj_desc, ret_buffer);
+
+ /* On exit, we must delete the object returned by evaluate_object */
+
+ acpi_ut_remove_reference(obj_desc);
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_rs_set_srs_method_data
+ *
+ * PARAMETERS: Node - Device node
+ * in_buffer - Pointer to a buffer structure of the
+ * parameter
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: This function is called to set the _SRS of an object contained
+ * in an object specified by the handle passed in
+ *
+ * If the function fails an appropriate status will be returned
+ * and the contents of the callers buffer is undefined.
+ *
+ * Note: Parameters guaranteed valid by caller
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_rs_set_srs_method_data(struct acpi_namespace_node *node,
+ struct acpi_buffer *in_buffer)
+{
+ struct acpi_evaluate_info *info;
+ union acpi_operand_object *args[2];
+ acpi_status status;
+ struct acpi_buffer buffer;
+
+ ACPI_FUNCTION_TRACE(rs_set_srs_method_data);
+
+ /* Allocate and initialize the evaluation information block */
+
+ info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info));
+ if (!info) {
+ return_ACPI_STATUS(AE_NO_MEMORY);
+ }
+
+ info->prefix_node = node;
+ info->pathname = METHOD_NAME__SRS;
+ info->parameters = args;
+ info->flags = ACPI_IGNORE_RETURN_VALUE;
+
+ /*
+ * The in_buffer parameter will point to a linked list of
+ * resource parameters. It needs to be formatted into a
+ * byte stream to be sent in as an input parameter to _SRS
+ *
+ * Convert the linked list into a byte stream
+ */
+ buffer.length = ACPI_ALLOCATE_LOCAL_BUFFER;
+ status = acpi_rs_create_aml_resources(in_buffer->pointer, &buffer);
+ if (ACPI_FAILURE(status)) {
+ goto cleanup;
+ }
+
+ /* Create and initialize the method parameter object */
+
+ args[0] = acpi_ut_create_internal_object(ACPI_TYPE_BUFFER);
+ if (!args[0]) {
+ /*
+ * Must free the buffer allocated above (otherwise it is freed
+ * later)
+ */
+ ACPI_FREE(buffer.pointer);
+ status = AE_NO_MEMORY;
+ goto cleanup;
+ }
+
+ args[0]->buffer.length = (u32) buffer.length;
+ args[0]->buffer.pointer = buffer.pointer;
+ args[0]->common.flags = AOPOBJ_DATA_VALID;
+ args[1] = NULL;
+
+ /* Execute the method, no return value is expected */
+
+ status = acpi_ns_evaluate(info);
+
+ /* Clean up and return the status from acpi_ns_evaluate */
+
+ acpi_ut_remove_reference(args[0]);
+
+ cleanup:
+ ACPI_FREE(info);
+ return_ACPI_STATUS(status);
+}
diff --git a/drivers/acpi/resources/rsxface.c b/drivers/acpi/resources/rsxface.c
new file mode 100644
index 0000000..f59f4c4
--- /dev/null
+++ b/drivers/acpi/resources/rsxface.c
@@ -0,0 +1,570 @@
+/*******************************************************************************
+ *
+ * Module Name: rsxface - Public interfaces to the resource manager
+ *
+ ******************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include <acpi/acresrc.h>
+#include <acpi/acnamesp.h>
+
+#define _COMPONENT ACPI_RESOURCES
+ACPI_MODULE_NAME("rsxface")
+
+/* Local macros for 16,32-bit to 64-bit conversion */
+#define ACPI_COPY_FIELD(out, in, field) ((out)->field = (in)->field)
+#define ACPI_COPY_ADDRESS(out, in) \
+ ACPI_COPY_FIELD(out, in, resource_type); \
+ ACPI_COPY_FIELD(out, in, producer_consumer); \
+ ACPI_COPY_FIELD(out, in, decode); \
+ ACPI_COPY_FIELD(out, in, min_address_fixed); \
+ ACPI_COPY_FIELD(out, in, max_address_fixed); \
+ ACPI_COPY_FIELD(out, in, info); \
+ ACPI_COPY_FIELD(out, in, granularity); \
+ ACPI_COPY_FIELD(out, in, minimum); \
+ ACPI_COPY_FIELD(out, in, maximum); \
+ ACPI_COPY_FIELD(out, in, translation_offset); \
+ ACPI_COPY_FIELD(out, in, address_length); \
+ ACPI_COPY_FIELD(out, in, resource_source);
+/* Local prototypes */
+static acpi_status
+acpi_rs_match_vendor_resource(struct acpi_resource *resource, void *context);
+
+static acpi_status
+acpi_rs_validate_parameters(acpi_handle device_handle,
+ struct acpi_buffer *buffer,
+ struct acpi_namespace_node **return_node);
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_rs_validate_parameters
+ *
+ * PARAMETERS: device_handle - Handle to a device
+ * Buffer - Pointer to a data buffer
+ * return_node - Pointer to where the device node is returned
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Common parameter validation for resource interfaces
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_rs_validate_parameters(acpi_handle device_handle,
+ struct acpi_buffer *buffer,
+ struct acpi_namespace_node **return_node)
+{
+ acpi_status status;
+ struct acpi_namespace_node *node;
+
+ ACPI_FUNCTION_TRACE(rs_validate_parameters);
+
+ /*
+ * Must have a valid handle to an ACPI device
+ */
+ if (!device_handle) {
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
+
+ node = acpi_ns_map_handle_to_node(device_handle);
+ if (!node) {
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
+
+ if (node->type != ACPI_TYPE_DEVICE) {
+ return_ACPI_STATUS(AE_TYPE);
+ }
+
+ /*
+ * Validate the user buffer object
+ *
+ * if there is a non-zero buffer length we also need a valid pointer in
+ * the buffer. If it's a zero buffer length, we'll be returning the
+ * needed buffer size (later), so keep going.
+ */
+ status = acpi_ut_validate_buffer(buffer);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ *return_node = node;
+ return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_get_irq_routing_table
+ *
+ * PARAMETERS: device_handle - Handle to the Bus device we are querying
+ * ret_buffer - Pointer to a buffer to receive the
+ * current resources for the device
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: This function is called to get the IRQ routing table for a
+ * specific bus. The caller must first acquire a handle for the
+ * desired bus. The routine table is placed in the buffer pointed
+ * to by the ret_buffer variable parameter.
+ *
+ * If the function fails an appropriate status will be returned
+ * and the value of ret_buffer is undefined.
+ *
+ * This function attempts to execute the _PRT method contained in
+ * the object indicated by the passed device_handle.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_get_irq_routing_table(acpi_handle device_handle,
+ struct acpi_buffer *ret_buffer)
+{
+ acpi_status status;
+ struct acpi_namespace_node *node;
+
+ ACPI_FUNCTION_TRACE(acpi_get_irq_routing_table);
+
+ /* Validate parameters then dispatch to internal routine */
+
+ status = acpi_rs_validate_parameters(device_handle, ret_buffer, &node);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ status = acpi_rs_get_prt_method_data(node, ret_buffer);
+ return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_get_irq_routing_table)
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_get_current_resources
+ *
+ * PARAMETERS: device_handle - Handle to the device object for the
+ * device we are querying
+ * ret_buffer - Pointer to a buffer to receive the
+ * current resources for the device
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: This function is called to get the current resources for a
+ * specific device. The caller must first acquire a handle for
+ * the desired device. The resource data is placed in the buffer
+ * pointed to by the ret_buffer variable parameter.
+ *
+ * If the function fails an appropriate status will be returned
+ * and the value of ret_buffer is undefined.
+ *
+ * This function attempts to execute the _CRS method contained in
+ * the object indicated by the passed device_handle.
+ *
+ ******************************************************************************/
+acpi_status
+acpi_get_current_resources(acpi_handle device_handle,
+ struct acpi_buffer *ret_buffer)
+{
+ acpi_status status;
+ struct acpi_namespace_node *node;
+
+ ACPI_FUNCTION_TRACE(acpi_get_current_resources);
+
+ /* Validate parameters then dispatch to internal routine */
+
+ status = acpi_rs_validate_parameters(device_handle, ret_buffer, &node);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ status = acpi_rs_get_crs_method_data(node, ret_buffer);
+ return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_get_current_resources)
+#ifdef ACPI_FUTURE_USAGE
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_get_possible_resources
+ *
+ * PARAMETERS: device_handle - Handle to the device object for the
+ * device we are querying
+ * ret_buffer - Pointer to a buffer to receive the
+ * resources for the device
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: This function is called to get a list of the possible resources
+ * for a specific device. The caller must first acquire a handle
+ * for the desired device. The resource data is placed in the
+ * buffer pointed to by the ret_buffer variable.
+ *
+ * If the function fails an appropriate status will be returned
+ * and the value of ret_buffer is undefined.
+ *
+ ******************************************************************************/
+acpi_status
+acpi_get_possible_resources(acpi_handle device_handle,
+ struct acpi_buffer *ret_buffer)
+{
+ acpi_status status;
+ struct acpi_namespace_node *node;
+
+ ACPI_FUNCTION_TRACE(acpi_get_possible_resources);
+
+ /* Validate parameters then dispatch to internal routine */
+
+ status = acpi_rs_validate_parameters(device_handle, ret_buffer, &node);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ status = acpi_rs_get_prs_method_data(node, ret_buffer);
+ return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_get_possible_resources)
+#endif /* ACPI_FUTURE_USAGE */
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_set_current_resources
+ *
+ * PARAMETERS: device_handle - Handle to the device object for the
+ * device we are setting resources
+ * in_buffer - Pointer to a buffer containing the
+ * resources to be set for the device
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: This function is called to set the current resources for a
+ * specific device. The caller must first acquire a handle for
+ * the desired device. The resource data is passed to the routine
+ * the buffer pointed to by the in_buffer variable.
+ *
+ ******************************************************************************/
+acpi_status
+acpi_set_current_resources(acpi_handle device_handle,
+ struct acpi_buffer *in_buffer)
+{
+ acpi_status status;
+ struct acpi_namespace_node *node;
+
+ ACPI_FUNCTION_TRACE(acpi_set_current_resources);
+
+ /* Validate the buffer, don't allow zero length */
+
+ if ((!in_buffer) || (!in_buffer->pointer) || (!in_buffer->length)) {
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
+
+ /* Validate parameters then dispatch to internal routine */
+
+ status = acpi_rs_validate_parameters(device_handle, in_buffer, &node);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ status = acpi_rs_set_srs_method_data(node, in_buffer);
+ return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_set_current_resources)
+
+/******************************************************************************
+ *
+ * FUNCTION: acpi_resource_to_address64
+ *
+ * PARAMETERS: Resource - Pointer to a resource
+ * Out - Pointer to the users's return buffer
+ * (a struct acpi_resource_address64)
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: If the resource is an address16, address32, or address64,
+ * copy it to the address64 return buffer. This saves the
+ * caller from having to duplicate code for different-sized
+ * addresses.
+ *
+ ******************************************************************************/
+acpi_status
+acpi_resource_to_address64(struct acpi_resource *resource,
+ struct acpi_resource_address64 *out)
+{
+ struct acpi_resource_address16 *address16;
+ struct acpi_resource_address32 *address32;
+
+ if (!resource || !out) {
+ return (AE_BAD_PARAMETER);
+ }
+
+ /* Convert 16 or 32 address descriptor to 64 */
+
+ switch (resource->type) {
+ case ACPI_RESOURCE_TYPE_ADDRESS16:
+
+ address16 = (struct acpi_resource_address16 *)&resource->data;
+ ACPI_COPY_ADDRESS(out, address16);
+ break;
+
+ case ACPI_RESOURCE_TYPE_ADDRESS32:
+
+ address32 = (struct acpi_resource_address32 *)&resource->data;
+ ACPI_COPY_ADDRESS(out, address32);
+ break;
+
+ case ACPI_RESOURCE_TYPE_ADDRESS64:
+
+ /* Simple copy for 64 bit source */
+
+ ACPI_MEMCPY(out, &resource->data,
+ sizeof(struct acpi_resource_address64));
+ break;
+
+ default:
+ return (AE_BAD_PARAMETER);
+ }
+
+ return (AE_OK);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_resource_to_address64)
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_get_vendor_resource
+ *
+ * PARAMETERS: device_handle - Handle for the parent device object
+ * Name - Method name for the parent resource
+ * (METHOD_NAME__CRS or METHOD_NAME__PRS)
+ * Uuid - Pointer to the UUID to be matched.
+ * includes both subtype and 16-byte UUID
+ * ret_buffer - Where the vendor resource is returned
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Walk a resource template for the specified evice to find a
+ * vendor-defined resource that matches the supplied UUID and
+ * UUID subtype. Returns a struct acpi_resource of type Vendor.
+ *
+ ******************************************************************************/
+acpi_status
+acpi_get_vendor_resource(acpi_handle device_handle,
+ char *name,
+ struct acpi_vendor_uuid * uuid,
+ struct acpi_buffer * ret_buffer)
+{
+ struct acpi_vendor_walk_info info;
+ acpi_status status;
+
+ /* Other parameters are validated by acpi_walk_resources */
+
+ if (!uuid || !ret_buffer) {
+ return (AE_BAD_PARAMETER);
+ }
+
+ info.uuid = uuid;
+ info.buffer = ret_buffer;
+ info.status = AE_NOT_EXIST;
+
+ /* Walk the _CRS or _PRS resource list for this device */
+
+ status =
+ acpi_walk_resources(device_handle, name,
+ acpi_rs_match_vendor_resource, &info);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+
+ return (info.status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_get_vendor_resource)
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_rs_match_vendor_resource
+ *
+ * PARAMETERS: acpi_walk_resource_callback
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Match a vendor resource via the ACPI 3.0 UUID
+ *
+ ******************************************************************************/
+static acpi_status
+acpi_rs_match_vendor_resource(struct acpi_resource *resource, void *context)
+{
+ struct acpi_vendor_walk_info *info = context;
+ struct acpi_resource_vendor_typed *vendor;
+ struct acpi_buffer *buffer;
+ acpi_status status;
+
+ /* Ignore all descriptors except Vendor */
+
+ if (resource->type != ACPI_RESOURCE_TYPE_VENDOR) {
+ return (AE_OK);
+ }
+
+ vendor = &resource->data.vendor_typed;
+
+ /*
+ * For a valid match, these conditions must hold:
+ *
+ * 1) Length of descriptor data must be at least as long as a UUID struct
+ * 2) The UUID subtypes must match
+ * 3) The UUID data must match
+ */
+ if ((vendor->byte_length < (ACPI_UUID_LENGTH + 1)) ||
+ (vendor->uuid_subtype != info->uuid->subtype) ||
+ (ACPI_MEMCMP(vendor->uuid, info->uuid->data, ACPI_UUID_LENGTH))) {
+ return (AE_OK);
+ }
+
+ /* Validate/Allocate/Clear caller buffer */
+
+ buffer = info->buffer;
+ status = acpi_ut_initialize_buffer(buffer, resource->length);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+
+ /* Found the correct resource, copy and return it */
+
+ ACPI_MEMCPY(buffer->pointer, resource, resource->length);
+ buffer->length = resource->length;
+
+ /* Found the desired descriptor, terminate resource walk */
+
+ info->status = AE_OK;
+ return (AE_CTRL_TERMINATE);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_walk_resources
+ *
+ * PARAMETERS: device_handle - Handle to the device object for the
+ * device we are querying
+ * Name - Method name of the resources we want
+ * (METHOD_NAME__CRS or METHOD_NAME__PRS)
+ * user_function - Called for each resource
+ * Context - Passed to user_function
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Retrieves the current or possible resource list for the
+ * specified device. The user_function is called once for
+ * each resource in the list.
+ *
+ ******************************************************************************/
+acpi_status
+acpi_walk_resources(acpi_handle device_handle,
+ char *name,
+ acpi_walk_resource_callback user_function, void *context)
+{
+ acpi_status status;
+ struct acpi_buffer buffer;
+ struct acpi_resource *resource;
+ struct acpi_resource *resource_end;
+
+ ACPI_FUNCTION_TRACE(acpi_walk_resources);
+
+ /* Parameter validation */
+
+ if (!device_handle || !user_function || !name ||
+ (!ACPI_COMPARE_NAME(name, METHOD_NAME__CRS) &&
+ !ACPI_COMPARE_NAME(name, METHOD_NAME__PRS))) {
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
+
+ /* Get the _CRS or _PRS resource list */
+
+ buffer.length = ACPI_ALLOCATE_LOCAL_BUFFER;
+ status = acpi_rs_get_method_data(device_handle, name, &buffer);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /* Buffer now contains the resource list */
+
+ resource = ACPI_CAST_PTR(struct acpi_resource, buffer.pointer);
+ resource_end =
+ ACPI_ADD_PTR(struct acpi_resource, buffer.pointer, buffer.length);
+
+ /* Walk the resource list until the end_tag is found (or buffer end) */
+
+ while (resource < resource_end) {
+
+ /* Sanity check the resource */
+
+ if (resource->type > ACPI_RESOURCE_TYPE_MAX) {
+ status = AE_AML_INVALID_RESOURCE_TYPE;
+ break;
+ }
+
+ /* Invoke the user function, abort on any error returned */
+
+ status = user_function(resource, context);
+ if (ACPI_FAILURE(status)) {
+ if (status == AE_CTRL_TERMINATE) {
+
+ /* This is an OK termination by the user function */
+
+ status = AE_OK;
+ }
+ break;
+ }
+
+ /* end_tag indicates end-of-list */
+
+ if (resource->type == ACPI_RESOURCE_TYPE_END_TAG) {
+ break;
+ }
+
+ /* Get the next resource descriptor */
+
+ resource =
+ ACPI_ADD_PTR(struct acpi_resource, resource,
+ resource->length);
+ }
+
+ ACPI_FREE(buffer.pointer);
+ return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_walk_resources)
diff --git a/drivers/acpi/sbs.c b/drivers/acpi/sbs.c
new file mode 100644
index 0000000..6050ce4
--- /dev/null
+++ b/drivers/acpi/sbs.c
@@ -0,0 +1,1043 @@
+/*
+ * sbs.c - ACPI Smart Battery System Driver ($Revision: 2.0 $)
+ *
+ * Copyright (c) 2007 Alexey Starikovskiy <astarikovskiy@suse.de>
+ * Copyright (c) 2005-2007 Vladimir Lebedev <vladimir.p.lebedev@intel.com>
+ * Copyright (c) 2005 Rich Townsend <rhdt@bartol.udel.edu>
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+
+#ifdef CONFIG_ACPI_PROCFS_POWER
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <asm/uaccess.h>
+#endif
+
+#include <linux/acpi.h>
+#include <linux/timer.h>
+#include <linux/jiffies.h>
+#include <linux/delay.h>
+
+#ifdef CONFIG_ACPI_SYSFS_POWER
+#include <linux/power_supply.h>
+#endif
+
+#include "sbshc.h"
+
+#define ACPI_SBS_CLASS "sbs"
+#define ACPI_AC_CLASS "ac_adapter"
+#define ACPI_BATTERY_CLASS "battery"
+#define ACPI_SBS_DEVICE_NAME "Smart Battery System"
+#define ACPI_SBS_FILE_INFO "info"
+#define ACPI_SBS_FILE_STATE "state"
+#define ACPI_SBS_FILE_ALARM "alarm"
+#define ACPI_BATTERY_DIR_NAME "BAT%i"
+#define ACPI_AC_DIR_NAME "AC0"
+
+#define ACPI_SBS_NOTIFY_STATUS 0x80
+#define ACPI_SBS_NOTIFY_INFO 0x81
+
+MODULE_AUTHOR("Alexey Starikovskiy <astarikovskiy@suse.de>");
+MODULE_DESCRIPTION("Smart Battery System ACPI interface driver");
+MODULE_LICENSE("GPL");
+
+static unsigned int cache_time = 1000;
+module_param(cache_time, uint, 0644);
+MODULE_PARM_DESC(cache_time, "cache time in milliseconds");
+
+extern struct proc_dir_entry *acpi_lock_ac_dir(void);
+extern struct proc_dir_entry *acpi_lock_battery_dir(void);
+extern void acpi_unlock_ac_dir(struct proc_dir_entry *acpi_ac_dir);
+extern void acpi_unlock_battery_dir(struct proc_dir_entry *acpi_battery_dir);
+
+#define MAX_SBS_BAT 4
+#define ACPI_SBS_BLOCK_MAX 32
+
+static const struct acpi_device_id sbs_device_ids[] = {
+ {"ACPI0002", 0},
+ {"", 0},
+};
+MODULE_DEVICE_TABLE(acpi, sbs_device_ids);
+
+struct acpi_battery {
+#ifdef CONFIG_ACPI_SYSFS_POWER
+ struct power_supply bat;
+#endif
+ struct acpi_sbs *sbs;
+#ifdef CONFIG_ACPI_PROCFS_POWER
+ struct proc_dir_entry *proc_entry;
+#endif
+ unsigned long update_time;
+ char name[8];
+ char manufacturer_name[ACPI_SBS_BLOCK_MAX];
+ char device_name[ACPI_SBS_BLOCK_MAX];
+ char device_chemistry[ACPI_SBS_BLOCK_MAX];
+ u16 alarm_capacity;
+ u16 full_charge_capacity;
+ u16 design_capacity;
+ u16 design_voltage;
+ u16 serial_number;
+ u16 cycle_count;
+ u16 temp_now;
+ u16 voltage_now;
+ s16 current_now;
+ s16 current_avg;
+ u16 capacity_now;
+ u16 state_of_charge;
+ u16 state;
+ u16 mode;
+ u16 spec;
+ u8 id;
+ u8 present:1;
+ u8 have_sysfs_alarm:1;
+};
+
+#define to_acpi_battery(x) container_of(x, struct acpi_battery, bat);
+
+struct acpi_sbs {
+#ifdef CONFIG_ACPI_SYSFS_POWER
+ struct power_supply charger;
+#endif
+ struct acpi_device *device;
+ struct acpi_smb_hc *hc;
+ struct mutex lock;
+#ifdef CONFIG_ACPI_PROCFS_POWER
+ struct proc_dir_entry *charger_entry;
+#endif
+ struct acpi_battery battery[MAX_SBS_BAT];
+ u8 batteries_supported:4;
+ u8 manager_present:1;
+ u8 charger_present:1;
+};
+
+#define to_acpi_sbs(x) container_of(x, struct acpi_sbs, charger)
+
+static inline int battery_scale(int log)
+{
+ int scale = 1;
+ while (log--)
+ scale *= 10;
+ return scale;
+}
+
+static inline int acpi_battery_vscale(struct acpi_battery *battery)
+{
+ return battery_scale((battery->spec & 0x0f00) >> 8);
+}
+
+static inline int acpi_battery_ipscale(struct acpi_battery *battery)
+{
+ return battery_scale((battery->spec & 0xf000) >> 12);
+}
+
+static inline int acpi_battery_mode(struct acpi_battery *battery)
+{
+ return (battery->mode & 0x8000);
+}
+
+static inline int acpi_battery_scale(struct acpi_battery *battery)
+{
+ return (acpi_battery_mode(battery) ? 10 : 1) *
+ acpi_battery_ipscale(battery);
+}
+
+#ifdef CONFIG_ACPI_SYSFS_POWER
+static int sbs_get_ac_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct acpi_sbs *sbs = to_acpi_sbs(psy);
+ switch (psp) {
+ case POWER_SUPPLY_PROP_ONLINE:
+ val->intval = sbs->charger_present;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int acpi_battery_technology(struct acpi_battery *battery)
+{
+ if (!strcasecmp("NiCd", battery->device_chemistry))
+ return POWER_SUPPLY_TECHNOLOGY_NiCd;
+ if (!strcasecmp("NiMH", battery->device_chemistry))
+ return POWER_SUPPLY_TECHNOLOGY_NiMH;
+ if (!strcasecmp("LION", battery->device_chemistry))
+ return POWER_SUPPLY_TECHNOLOGY_LION;
+ if (!strcasecmp("LiP", battery->device_chemistry))
+ return POWER_SUPPLY_TECHNOLOGY_LIPO;
+ return POWER_SUPPLY_TECHNOLOGY_UNKNOWN;
+}
+
+static int acpi_sbs_battery_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct acpi_battery *battery = to_acpi_battery(psy);
+
+ if ((!battery->present) && psp != POWER_SUPPLY_PROP_PRESENT)
+ return -ENODEV;
+ switch (psp) {
+ case POWER_SUPPLY_PROP_STATUS:
+ if (battery->current_now < 0)
+ val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
+ else if (battery->current_now > 0)
+ val->intval = POWER_SUPPLY_STATUS_CHARGING;
+ else
+ val->intval = POWER_SUPPLY_STATUS_FULL;
+ break;
+ case POWER_SUPPLY_PROP_PRESENT:
+ val->intval = battery->present;
+ break;
+ case POWER_SUPPLY_PROP_TECHNOLOGY:
+ val->intval = acpi_battery_technology(battery);
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN:
+ val->intval = battery->design_voltage *
+ acpi_battery_vscale(battery) * 1000;
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ val->intval = battery->voltage_now *
+ acpi_battery_vscale(battery) * 1000;
+ break;
+ case POWER_SUPPLY_PROP_CURRENT_NOW:
+ val->intval = abs(battery->current_now) *
+ acpi_battery_ipscale(battery) * 1000;
+ break;
+ case POWER_SUPPLY_PROP_CURRENT_AVG:
+ val->intval = abs(battery->current_avg) *
+ acpi_battery_ipscale(battery) * 1000;
+ break;
+ case POWER_SUPPLY_PROP_CAPACITY:
+ val->intval = battery->state_of_charge;
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
+ case POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN:
+ val->intval = battery->design_capacity *
+ acpi_battery_scale(battery) * 1000;
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_FULL:
+ case POWER_SUPPLY_PROP_ENERGY_FULL:
+ val->intval = battery->full_charge_capacity *
+ acpi_battery_scale(battery) * 1000;
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_NOW:
+ case POWER_SUPPLY_PROP_ENERGY_NOW:
+ val->intval = battery->capacity_now *
+ acpi_battery_scale(battery) * 1000;
+ break;
+ case POWER_SUPPLY_PROP_TEMP:
+ val->intval = battery->temp_now - 2730; // dK -> dC
+ break;
+ case POWER_SUPPLY_PROP_MODEL_NAME:
+ val->strval = battery->device_name;
+ break;
+ case POWER_SUPPLY_PROP_MANUFACTURER:
+ val->strval = battery->manufacturer_name;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static enum power_supply_property sbs_ac_props[] = {
+ POWER_SUPPLY_PROP_ONLINE,
+};
+
+static enum power_supply_property sbs_charge_battery_props[] = {
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_TECHNOLOGY,
+ POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
+ POWER_SUPPLY_PROP_CURRENT_AVG,
+ POWER_SUPPLY_PROP_CAPACITY,
+ POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
+ POWER_SUPPLY_PROP_CHARGE_FULL,
+ POWER_SUPPLY_PROP_CHARGE_NOW,
+ POWER_SUPPLY_PROP_TEMP,
+ POWER_SUPPLY_PROP_MODEL_NAME,
+ POWER_SUPPLY_PROP_MANUFACTURER,
+};
+
+static enum power_supply_property sbs_energy_battery_props[] = {
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_TECHNOLOGY,
+ POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
+ POWER_SUPPLY_PROP_CURRENT_AVG,
+ POWER_SUPPLY_PROP_CAPACITY,
+ POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN,
+ POWER_SUPPLY_PROP_ENERGY_FULL,
+ POWER_SUPPLY_PROP_ENERGY_NOW,
+ POWER_SUPPLY_PROP_TEMP,
+ POWER_SUPPLY_PROP_MODEL_NAME,
+ POWER_SUPPLY_PROP_MANUFACTURER,
+};
+#endif
+
+/* --------------------------------------------------------------------------
+ Smart Battery System Management
+ -------------------------------------------------------------------------- */
+
+struct acpi_battery_reader {
+ u8 command; /* command for battery */
+ u8 mode; /* word or block? */
+ size_t offset; /* offset inside struct acpi_sbs_battery */
+};
+
+static struct acpi_battery_reader info_readers[] = {
+ {0x01, SMBUS_READ_WORD, offsetof(struct acpi_battery, alarm_capacity)},
+ {0x03, SMBUS_READ_WORD, offsetof(struct acpi_battery, mode)},
+ {0x10, SMBUS_READ_WORD, offsetof(struct acpi_battery, full_charge_capacity)},
+ {0x17, SMBUS_READ_WORD, offsetof(struct acpi_battery, cycle_count)},
+ {0x18, SMBUS_READ_WORD, offsetof(struct acpi_battery, design_capacity)},
+ {0x19, SMBUS_READ_WORD, offsetof(struct acpi_battery, design_voltage)},
+ {0x1a, SMBUS_READ_WORD, offsetof(struct acpi_battery, spec)},
+ {0x1c, SMBUS_READ_WORD, offsetof(struct acpi_battery, serial_number)},
+ {0x20, SMBUS_READ_BLOCK, offsetof(struct acpi_battery, manufacturer_name)},
+ {0x21, SMBUS_READ_BLOCK, offsetof(struct acpi_battery, device_name)},
+ {0x22, SMBUS_READ_BLOCK, offsetof(struct acpi_battery, device_chemistry)},
+};
+
+static struct acpi_battery_reader state_readers[] = {
+ {0x08, SMBUS_READ_WORD, offsetof(struct acpi_battery, temp_now)},
+ {0x09, SMBUS_READ_WORD, offsetof(struct acpi_battery, voltage_now)},
+ {0x0a, SMBUS_READ_WORD, offsetof(struct acpi_battery, current_now)},
+ {0x0b, SMBUS_READ_WORD, offsetof(struct acpi_battery, current_avg)},
+ {0x0f, SMBUS_READ_WORD, offsetof(struct acpi_battery, capacity_now)},
+ {0x0e, SMBUS_READ_WORD, offsetof(struct acpi_battery, state_of_charge)},
+ {0x16, SMBUS_READ_WORD, offsetof(struct acpi_battery, state)},
+};
+
+static int acpi_manager_get_info(struct acpi_sbs *sbs)
+{
+ int result = 0;
+ u16 battery_system_info;
+
+ result = acpi_smbus_read(sbs->hc, SMBUS_READ_WORD, ACPI_SBS_MANAGER,
+ 0x04, (u8 *)&battery_system_info);
+ if (!result)
+ sbs->batteries_supported = battery_system_info & 0x000f;
+ return result;
+}
+
+static int acpi_battery_get_info(struct acpi_battery *battery)
+{
+ int i, result = 0;
+
+ for (i = 0; i < ARRAY_SIZE(info_readers); ++i) {
+ result = acpi_smbus_read(battery->sbs->hc,
+ info_readers[i].mode,
+ ACPI_SBS_BATTERY,
+ info_readers[i].command,
+ (u8 *) battery +
+ info_readers[i].offset);
+ if (result)
+ break;
+ }
+ return result;
+}
+
+static int acpi_battery_get_state(struct acpi_battery *battery)
+{
+ int i, result = 0;
+
+ if (battery->update_time &&
+ time_before(jiffies, battery->update_time +
+ msecs_to_jiffies(cache_time)))
+ return 0;
+ for (i = 0; i < ARRAY_SIZE(state_readers); ++i) {
+ result = acpi_smbus_read(battery->sbs->hc,
+ state_readers[i].mode,
+ ACPI_SBS_BATTERY,
+ state_readers[i].command,
+ (u8 *)battery +
+ state_readers[i].offset);
+ if (result)
+ goto end;
+ }
+ end:
+ battery->update_time = jiffies;
+ return result;
+}
+
+static int acpi_battery_get_alarm(struct acpi_battery *battery)
+{
+ return acpi_smbus_read(battery->sbs->hc, SMBUS_READ_WORD,
+ ACPI_SBS_BATTERY, 0x01,
+ (u8 *)&battery->alarm_capacity);
+}
+
+static int acpi_battery_set_alarm(struct acpi_battery *battery)
+{
+ struct acpi_sbs *sbs = battery->sbs;
+ u16 value, sel = 1 << (battery->id + 12);
+
+ int ret;
+
+
+ if (sbs->manager_present) {
+ ret = acpi_smbus_read(sbs->hc, SMBUS_READ_WORD, ACPI_SBS_MANAGER,
+ 0x01, (u8 *)&value);
+ if (ret)
+ goto end;
+ if ((value & 0xf000) != sel) {
+ value &= 0x0fff;
+ value |= sel;
+ ret = acpi_smbus_write(sbs->hc, SMBUS_WRITE_WORD,
+ ACPI_SBS_MANAGER,
+ 0x01, (u8 *)&value, 2);
+ if (ret)
+ goto end;
+ }
+ }
+ ret = acpi_smbus_write(sbs->hc, SMBUS_WRITE_WORD, ACPI_SBS_BATTERY,
+ 0x01, (u8 *)&battery->alarm_capacity, 2);
+ end:
+ return ret;
+}
+
+static int acpi_ac_get_present(struct acpi_sbs *sbs)
+{
+ int result;
+ u16 status;
+
+ result = acpi_smbus_read(sbs->hc, SMBUS_READ_WORD, ACPI_SBS_CHARGER,
+ 0x13, (u8 *) & status);
+ if (!result)
+ sbs->charger_present = (status >> 15) & 0x1;
+ return result;
+}
+
+#ifdef CONFIG_ACPI_SYSFS_POWER
+static ssize_t acpi_battery_alarm_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct acpi_battery *battery = to_acpi_battery(dev_get_drvdata(dev));
+ acpi_battery_get_alarm(battery);
+ return sprintf(buf, "%d\n", battery->alarm_capacity *
+ acpi_battery_scale(battery) * 1000);
+}
+
+static ssize_t acpi_battery_alarm_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ unsigned long x;
+ struct acpi_battery *battery = to_acpi_battery(dev_get_drvdata(dev));
+ if (sscanf(buf, "%ld\n", &x) == 1)
+ battery->alarm_capacity = x /
+ (1000 * acpi_battery_scale(battery));
+ if (battery->present)
+ acpi_battery_set_alarm(battery);
+ return count;
+}
+
+static struct device_attribute alarm_attr = {
+ .attr = {.name = "alarm", .mode = 0644},
+ .show = acpi_battery_alarm_show,
+ .store = acpi_battery_alarm_store,
+};
+#endif
+
+/* --------------------------------------------------------------------------
+ FS Interface (/proc/acpi)
+ -------------------------------------------------------------------------- */
+
+#ifdef CONFIG_ACPI_PROCFS_POWER
+/* Generic Routines */
+static int
+acpi_sbs_add_fs(struct proc_dir_entry **dir,
+ struct proc_dir_entry *parent_dir,
+ char *dir_name,
+ struct file_operations *info_fops,
+ struct file_operations *state_fops,
+ struct file_operations *alarm_fops, void *data)
+{
+ if (!*dir) {
+ *dir = proc_mkdir(dir_name, parent_dir);
+ if (!*dir) {
+ return -ENODEV;
+ }
+ (*dir)->owner = THIS_MODULE;
+ }
+
+ /* 'info' [R] */
+ if (info_fops)
+ proc_create_data(ACPI_SBS_FILE_INFO, S_IRUGO, *dir,
+ info_fops, data);
+
+ /* 'state' [R] */
+ if (state_fops)
+ proc_create_data(ACPI_SBS_FILE_STATE, S_IRUGO, *dir,
+ state_fops, data);
+
+ /* 'alarm' [R/W] */
+ if (alarm_fops)
+ proc_create_data(ACPI_SBS_FILE_ALARM, S_IRUGO, *dir,
+ alarm_fops, data);
+ return 0;
+}
+
+static void
+acpi_sbs_remove_fs(struct proc_dir_entry **dir,
+ struct proc_dir_entry *parent_dir)
+{
+ if (*dir) {
+ remove_proc_entry(ACPI_SBS_FILE_INFO, *dir);
+ remove_proc_entry(ACPI_SBS_FILE_STATE, *dir);
+ remove_proc_entry(ACPI_SBS_FILE_ALARM, *dir);
+ remove_proc_entry((*dir)->name, parent_dir);
+ *dir = NULL;
+ }
+}
+
+/* Smart Battery Interface */
+static struct proc_dir_entry *acpi_battery_dir = NULL;
+
+static inline char *acpi_battery_units(struct acpi_battery *battery)
+{
+ return acpi_battery_mode(battery) ? " mW" : " mA";
+}
+
+
+static int acpi_battery_read_info(struct seq_file *seq, void *offset)
+{
+ struct acpi_battery *battery = seq->private;
+ struct acpi_sbs *sbs = battery->sbs;
+ int result = 0;
+
+ mutex_lock(&sbs->lock);
+
+ seq_printf(seq, "present: %s\n",
+ (battery->present) ? "yes" : "no");
+ if (!battery->present)
+ goto end;
+
+ seq_printf(seq, "design capacity: %i%sh\n",
+ battery->design_capacity * acpi_battery_scale(battery),
+ acpi_battery_units(battery));
+ seq_printf(seq, "last full capacity: %i%sh\n",
+ battery->full_charge_capacity * acpi_battery_scale(battery),
+ acpi_battery_units(battery));
+ seq_printf(seq, "battery technology: rechargeable\n");
+ seq_printf(seq, "design voltage: %i mV\n",
+ battery->design_voltage * acpi_battery_vscale(battery));
+ seq_printf(seq, "design capacity warning: unknown\n");
+ seq_printf(seq, "design capacity low: unknown\n");
+ seq_printf(seq, "capacity granularity 1: unknown\n");
+ seq_printf(seq, "capacity granularity 2: unknown\n");
+ seq_printf(seq, "model number: %s\n", battery->device_name);
+ seq_printf(seq, "serial number: %i\n",
+ battery->serial_number);
+ seq_printf(seq, "battery type: %s\n",
+ battery->device_chemistry);
+ seq_printf(seq, "OEM info: %s\n",
+ battery->manufacturer_name);
+ end:
+ mutex_unlock(&sbs->lock);
+ return result;
+}
+
+static int acpi_battery_info_open_fs(struct inode *inode, struct file *file)
+{
+ return single_open(file, acpi_battery_read_info, PDE(inode)->data);
+}
+
+static int acpi_battery_read_state(struct seq_file *seq, void *offset)
+{
+ struct acpi_battery *battery = seq->private;
+ struct acpi_sbs *sbs = battery->sbs;
+ int rate;
+
+ mutex_lock(&sbs->lock);
+ seq_printf(seq, "present: %s\n",
+ (battery->present) ? "yes" : "no");
+ if (!battery->present)
+ goto end;
+
+ acpi_battery_get_state(battery);
+ seq_printf(seq, "capacity state: %s\n",
+ (battery->state & 0x0010) ? "critical" : "ok");
+ seq_printf(seq, "charging state: %s\n",
+ (battery->current_now < 0) ? "discharging" :
+ ((battery->current_now > 0) ? "charging" : "charged"));
+ rate = abs(battery->current_now) * acpi_battery_ipscale(battery);
+ rate *= (acpi_battery_mode(battery))?(battery->voltage_now *
+ acpi_battery_vscale(battery)/1000):1;
+ seq_printf(seq, "present rate: %d%s\n", rate,
+ acpi_battery_units(battery));
+ seq_printf(seq, "remaining capacity: %i%sh\n",
+ battery->capacity_now * acpi_battery_scale(battery),
+ acpi_battery_units(battery));
+ seq_printf(seq, "present voltage: %i mV\n",
+ battery->voltage_now * acpi_battery_vscale(battery));
+
+ end:
+ mutex_unlock(&sbs->lock);
+ return 0;
+}
+
+static int acpi_battery_state_open_fs(struct inode *inode, struct file *file)
+{
+ return single_open(file, acpi_battery_read_state, PDE(inode)->data);
+}
+
+static int acpi_battery_read_alarm(struct seq_file *seq, void *offset)
+{
+ struct acpi_battery *battery = seq->private;
+ struct acpi_sbs *sbs = battery->sbs;
+ int result = 0;
+
+ mutex_lock(&sbs->lock);
+
+ if (!battery->present) {
+ seq_printf(seq, "present: no\n");
+ goto end;
+ }
+
+ acpi_battery_get_alarm(battery);
+ seq_printf(seq, "alarm: ");
+ if (battery->alarm_capacity)
+ seq_printf(seq, "%i%sh\n",
+ battery->alarm_capacity *
+ acpi_battery_scale(battery),
+ acpi_battery_units(battery));
+ else
+ seq_printf(seq, "disabled\n");
+ end:
+ mutex_unlock(&sbs->lock);
+ return result;
+}
+
+static ssize_t
+acpi_battery_write_alarm(struct file *file, const char __user * buffer,
+ size_t count, loff_t * ppos)
+{
+ struct seq_file *seq = file->private_data;
+ struct acpi_battery *battery = seq->private;
+ struct acpi_sbs *sbs = battery->sbs;
+ char alarm_string[12] = { '\0' };
+ int result = 0;
+ mutex_lock(&sbs->lock);
+ if (!battery->present) {
+ result = -ENODEV;
+ goto end;
+ }
+ if (count > sizeof(alarm_string) - 1) {
+ result = -EINVAL;
+ goto end;
+ }
+ if (copy_from_user(alarm_string, buffer, count)) {
+ result = -EFAULT;
+ goto end;
+ }
+ alarm_string[count] = 0;
+ battery->alarm_capacity = simple_strtoul(alarm_string, NULL, 0) /
+ acpi_battery_scale(battery);
+ acpi_battery_set_alarm(battery);
+ end:
+ mutex_unlock(&sbs->lock);
+ if (result)
+ return result;
+ return count;
+}
+
+static int acpi_battery_alarm_open_fs(struct inode *inode, struct file *file)
+{
+ return single_open(file, acpi_battery_read_alarm, PDE(inode)->data);
+}
+
+static struct file_operations acpi_battery_info_fops = {
+ .open = acpi_battery_info_open_fs,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static struct file_operations acpi_battery_state_fops = {
+ .open = acpi_battery_state_open_fs,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static struct file_operations acpi_battery_alarm_fops = {
+ .open = acpi_battery_alarm_open_fs,
+ .read = seq_read,
+ .write = acpi_battery_write_alarm,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+/* Legacy AC Adapter Interface */
+
+static struct proc_dir_entry *acpi_ac_dir = NULL;
+
+static int acpi_ac_read_state(struct seq_file *seq, void *offset)
+{
+
+ struct acpi_sbs *sbs = seq->private;
+
+ mutex_lock(&sbs->lock);
+
+ seq_printf(seq, "state: %s\n",
+ sbs->charger_present ? "on-line" : "off-line");
+
+ mutex_unlock(&sbs->lock);
+ return 0;
+}
+
+static int acpi_ac_state_open_fs(struct inode *inode, struct file *file)
+{
+ return single_open(file, acpi_ac_read_state, PDE(inode)->data);
+}
+
+static struct file_operations acpi_ac_state_fops = {
+ .open = acpi_ac_state_open_fs,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+#endif
+
+/* --------------------------------------------------------------------------
+ Driver Interface
+ -------------------------------------------------------------------------- */
+static int acpi_battery_read(struct acpi_battery *battery)
+{
+ int result = 0, saved_present = battery->present;
+ u16 state;
+
+ if (battery->sbs->manager_present) {
+ result = acpi_smbus_read(battery->sbs->hc, SMBUS_READ_WORD,
+ ACPI_SBS_MANAGER, 0x01, (u8 *)&state);
+ if (!result)
+ battery->present = state & (1 << battery->id);
+ state &= 0x0fff;
+ state |= 1 << (battery->id + 12);
+ acpi_smbus_write(battery->sbs->hc, SMBUS_WRITE_WORD,
+ ACPI_SBS_MANAGER, 0x01, (u8 *)&state, 2);
+ } else if (battery->id == 0)
+ battery->present = 1;
+ if (result || !battery->present)
+ return result;
+
+ if (saved_present != battery->present) {
+ battery->update_time = 0;
+ result = acpi_battery_get_info(battery);
+ if (result)
+ return result;
+ }
+ result = acpi_battery_get_state(battery);
+ return result;
+}
+
+/* Smart Battery */
+static int acpi_battery_add(struct acpi_sbs *sbs, int id)
+{
+ struct acpi_battery *battery = &sbs->battery[id];
+ int result;
+
+ battery->id = id;
+ battery->sbs = sbs;
+ result = acpi_battery_read(battery);
+ if (result)
+ return result;
+
+ sprintf(battery->name, ACPI_BATTERY_DIR_NAME, id);
+#ifdef CONFIG_ACPI_PROCFS_POWER
+ acpi_sbs_add_fs(&battery->proc_entry, acpi_battery_dir,
+ battery->name, &acpi_battery_info_fops,
+ &acpi_battery_state_fops, &acpi_battery_alarm_fops,
+ battery);
+#endif
+#ifdef CONFIG_ACPI_SYSFS_POWER
+ battery->bat.name = battery->name;
+ battery->bat.type = POWER_SUPPLY_TYPE_BATTERY;
+ if (!acpi_battery_mode(battery)) {
+ battery->bat.properties = sbs_charge_battery_props;
+ battery->bat.num_properties =
+ ARRAY_SIZE(sbs_charge_battery_props);
+ } else {
+ battery->bat.properties = sbs_energy_battery_props;
+ battery->bat.num_properties =
+ ARRAY_SIZE(sbs_energy_battery_props);
+ }
+ battery->bat.get_property = acpi_sbs_battery_get_property;
+ result = power_supply_register(&sbs->device->dev, &battery->bat);
+ if (result)
+ goto end;
+ result = device_create_file(battery->bat.dev, &alarm_attr);
+ if (result)
+ goto end;
+ battery->have_sysfs_alarm = 1;
+ end:
+#endif
+ printk(KERN_INFO PREFIX "%s [%s]: Battery Slot [%s] (battery %s)\n",
+ ACPI_SBS_DEVICE_NAME, acpi_device_bid(sbs->device),
+ battery->name, battery->present ? "present" : "absent");
+ return result;
+}
+
+static void acpi_battery_remove(struct acpi_sbs *sbs, int id)
+{
+ struct acpi_battery *battery = &sbs->battery[id];
+#ifdef CONFIG_ACPI_SYSFS_POWER
+ if (battery->bat.dev) {
+ if (battery->have_sysfs_alarm)
+ device_remove_file(battery->bat.dev, &alarm_attr);
+ power_supply_unregister(&battery->bat);
+ }
+#endif
+#ifdef CONFIG_ACPI_PROCFS_POWER
+ if (battery->proc_entry)
+ acpi_sbs_remove_fs(&battery->proc_entry, acpi_battery_dir);
+#endif
+}
+
+static int acpi_charger_add(struct acpi_sbs *sbs)
+{
+ int result;
+
+ result = acpi_ac_get_present(sbs);
+ if (result)
+ goto end;
+#ifdef CONFIG_ACPI_PROCFS_POWER
+ result = acpi_sbs_add_fs(&sbs->charger_entry, acpi_ac_dir,
+ ACPI_AC_DIR_NAME, NULL,
+ &acpi_ac_state_fops, NULL, sbs);
+ if (result)
+ goto end;
+#endif
+#ifdef CONFIG_ACPI_SYSFS_POWER
+ sbs->charger.name = "sbs-charger";
+ sbs->charger.type = POWER_SUPPLY_TYPE_MAINS;
+ sbs->charger.properties = sbs_ac_props;
+ sbs->charger.num_properties = ARRAY_SIZE(sbs_ac_props);
+ sbs->charger.get_property = sbs_get_ac_property;
+ power_supply_register(&sbs->device->dev, &sbs->charger);
+#endif
+ printk(KERN_INFO PREFIX "%s [%s]: AC Adapter [%s] (%s)\n",
+ ACPI_SBS_DEVICE_NAME, acpi_device_bid(sbs->device),
+ ACPI_AC_DIR_NAME, sbs->charger_present ? "on-line" : "off-line");
+ end:
+ return result;
+}
+
+static void acpi_charger_remove(struct acpi_sbs *sbs)
+{
+#ifdef CONFIG_ACPI_SYSFS_POWER
+ if (sbs->charger.dev)
+ power_supply_unregister(&sbs->charger);
+#endif
+#ifdef CONFIG_ACPI_PROCFS_POWER
+ if (sbs->charger_entry)
+ acpi_sbs_remove_fs(&sbs->charger_entry, acpi_ac_dir);
+#endif
+}
+
+static void acpi_sbs_callback(void *context)
+{
+ int id;
+ struct acpi_sbs *sbs = context;
+ struct acpi_battery *bat;
+ u8 saved_charger_state = sbs->charger_present;
+ u8 saved_battery_state;
+ acpi_ac_get_present(sbs);
+ if (sbs->charger_present != saved_charger_state) {
+#ifdef CONFIG_ACPI_PROC_EVENT
+ acpi_bus_generate_proc_event4(ACPI_AC_CLASS, ACPI_AC_DIR_NAME,
+ ACPI_SBS_NOTIFY_STATUS,
+ sbs->charger_present);
+#endif
+#ifdef CONFIG_ACPI_SYSFS_POWER
+ kobject_uevent(&sbs->charger.dev->kobj, KOBJ_CHANGE);
+#endif
+ }
+ if (sbs->manager_present) {
+ for (id = 0; id < MAX_SBS_BAT; ++id) {
+ if (!(sbs->batteries_supported & (1 << id)))
+ continue;
+ bat = &sbs->battery[id];
+ saved_battery_state = bat->present;
+ acpi_battery_read(bat);
+ if (saved_battery_state == bat->present)
+ continue;
+#ifdef CONFIG_ACPI_PROC_EVENT
+ acpi_bus_generate_proc_event4(ACPI_BATTERY_CLASS,
+ bat->name,
+ ACPI_SBS_NOTIFY_STATUS,
+ bat->present);
+#endif
+#ifdef CONFIG_ACPI_SYSFS_POWER
+ kobject_uevent(&bat->bat.dev->kobj, KOBJ_CHANGE);
+#endif
+ }
+ }
+}
+
+static int acpi_sbs_remove(struct acpi_device *device, int type);
+
+static int acpi_sbs_add(struct acpi_device *device)
+{
+ struct acpi_sbs *sbs;
+ int result = 0;
+ int id;
+
+ sbs = kzalloc(sizeof(struct acpi_sbs), GFP_KERNEL);
+ if (!sbs) {
+ result = -ENOMEM;
+ goto end;
+ }
+
+ mutex_init(&sbs->lock);
+
+ sbs->hc = acpi_driver_data(device->parent);
+ sbs->device = device;
+ strcpy(acpi_device_name(device), ACPI_SBS_DEVICE_NAME);
+ strcpy(acpi_device_class(device), ACPI_SBS_CLASS);
+ device->driver_data = sbs;
+
+ result = acpi_charger_add(sbs);
+ if (result)
+ goto end;
+
+ result = acpi_manager_get_info(sbs);
+ if (!result) {
+ sbs->manager_present = 1;
+ for (id = 0; id < MAX_SBS_BAT; ++id)
+ if ((sbs->batteries_supported & (1 << id)))
+ acpi_battery_add(sbs, id);
+ } else
+ acpi_battery_add(sbs, 0);
+ acpi_smbus_register_callback(sbs->hc, acpi_sbs_callback, sbs);
+ end:
+ if (result)
+ acpi_sbs_remove(device, 0);
+ return result;
+}
+
+static int acpi_sbs_remove(struct acpi_device *device, int type)
+{
+ struct acpi_sbs *sbs;
+ int id;
+
+ if (!device)
+ return -EINVAL;
+ sbs = acpi_driver_data(device);
+ if (!sbs)
+ return -EINVAL;
+ mutex_lock(&sbs->lock);
+ acpi_smbus_unregister_callback(sbs->hc);
+ for (id = 0; id < MAX_SBS_BAT; ++id)
+ acpi_battery_remove(sbs, id);
+ acpi_charger_remove(sbs);
+ mutex_unlock(&sbs->lock);
+ mutex_destroy(&sbs->lock);
+ kfree(sbs);
+ return 0;
+}
+
+static void acpi_sbs_rmdirs(void)
+{
+#ifdef CONFIG_ACPI_PROCFS_POWER
+ if (acpi_ac_dir) {
+ acpi_unlock_ac_dir(acpi_ac_dir);
+ acpi_ac_dir = NULL;
+ }
+ if (acpi_battery_dir) {
+ acpi_unlock_battery_dir(acpi_battery_dir);
+ acpi_battery_dir = NULL;
+ }
+#endif
+}
+
+static int acpi_sbs_resume(struct acpi_device *device)
+{
+ struct acpi_sbs *sbs;
+ if (!device)
+ return -EINVAL;
+ sbs = device->driver_data;
+ acpi_sbs_callback(sbs);
+ return 0;
+}
+
+static struct acpi_driver acpi_sbs_driver = {
+ .name = "sbs",
+ .class = ACPI_SBS_CLASS,
+ .ids = sbs_device_ids,
+ .ops = {
+ .add = acpi_sbs_add,
+ .remove = acpi_sbs_remove,
+ .resume = acpi_sbs_resume,
+ },
+};
+
+static int __init acpi_sbs_init(void)
+{
+ int result = 0;
+
+ if (acpi_disabled)
+ return -ENODEV;
+#ifdef CONFIG_ACPI_PROCFS_POWER
+ acpi_ac_dir = acpi_lock_ac_dir();
+ if (!acpi_ac_dir)
+ return -ENODEV;
+ acpi_battery_dir = acpi_lock_battery_dir();
+ if (!acpi_battery_dir) {
+ acpi_sbs_rmdirs();
+ return -ENODEV;
+ }
+#endif
+ result = acpi_bus_register_driver(&acpi_sbs_driver);
+ if (result < 0) {
+ acpi_sbs_rmdirs();
+ return -ENODEV;
+ }
+ return 0;
+}
+
+static void __exit acpi_sbs_exit(void)
+{
+ acpi_bus_unregister_driver(&acpi_sbs_driver);
+ acpi_sbs_rmdirs();
+ return;
+}
+
+module_init(acpi_sbs_init);
+module_exit(acpi_sbs_exit);
diff --git a/drivers/acpi/sbshc.c b/drivers/acpi/sbshc.c
new file mode 100644
index 0000000..e53e590
--- /dev/null
+++ b/drivers/acpi/sbshc.c
@@ -0,0 +1,330 @@
+/*
+ * SMBus driver for ACPI Embedded Controller (v0.1)
+ *
+ * Copyright (c) 2007 Alexey Starikovskiy
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation version 2.
+ */
+
+#include <acpi/acpi_bus.h>
+#include <acpi/acpi_drivers.h>
+#include <acpi/actypes.h>
+#include <linux/wait.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include "sbshc.h"
+
+#define ACPI_SMB_HC_CLASS "smbus_host_controller"
+#define ACPI_SMB_HC_DEVICE_NAME "ACPI SMBus HC"
+
+struct acpi_smb_hc {
+ struct acpi_ec *ec;
+ struct mutex lock;
+ wait_queue_head_t wait;
+ u8 offset;
+ u8 query_bit;
+ smbus_alarm_callback callback;
+ void *context;
+};
+
+static int acpi_smbus_hc_add(struct acpi_device *device);
+static int acpi_smbus_hc_remove(struct acpi_device *device, int type);
+
+static const struct acpi_device_id sbs_device_ids[] = {
+ {"ACPI0001", 0},
+ {"ACPI0005", 0},
+ {"", 0},
+};
+
+MODULE_DEVICE_TABLE(acpi, sbs_device_ids);
+
+static struct acpi_driver acpi_smb_hc_driver = {
+ .name = "smbus_hc",
+ .class = ACPI_SMB_HC_CLASS,
+ .ids = sbs_device_ids,
+ .ops = {
+ .add = acpi_smbus_hc_add,
+ .remove = acpi_smbus_hc_remove,
+ },
+};
+
+union acpi_smb_status {
+ u8 raw;
+ struct {
+ u8 status:5;
+ u8 reserved:1;
+ u8 alarm:1;
+ u8 done:1;
+ } fields;
+};
+
+enum acpi_smb_status_codes {
+ SMBUS_OK = 0,
+ SMBUS_UNKNOWN_FAILURE = 0x07,
+ SMBUS_DEVICE_ADDRESS_NACK = 0x10,
+ SMBUS_DEVICE_ERROR = 0x11,
+ SMBUS_DEVICE_COMMAND_ACCESS_DENIED = 0x12,
+ SMBUS_UNKNOWN_ERROR = 0x13,
+ SMBUS_DEVICE_ACCESS_DENIED = 0x17,
+ SMBUS_TIMEOUT = 0x18,
+ SMBUS_HOST_UNSUPPORTED_PROTOCOL = 0x19,
+ SMBUS_BUSY = 0x1a,
+ SMBUS_PEC_ERROR = 0x1f,
+};
+
+enum acpi_smb_offset {
+ ACPI_SMB_PROTOCOL = 0, /* protocol, PEC */
+ ACPI_SMB_STATUS = 1, /* status */
+ ACPI_SMB_ADDRESS = 2, /* address */
+ ACPI_SMB_COMMAND = 3, /* command */
+ ACPI_SMB_DATA = 4, /* 32 data registers */
+ ACPI_SMB_BLOCK_COUNT = 0x24, /* number of data bytes */
+ ACPI_SMB_ALARM_ADDRESS = 0x25, /* alarm address */
+ ACPI_SMB_ALARM_DATA = 0x26, /* 2 bytes alarm data */
+};
+
+static inline int smb_hc_read(struct acpi_smb_hc *hc, u8 address, u8 *data)
+{
+ return ec_read(hc->offset + address, data);
+}
+
+static inline int smb_hc_write(struct acpi_smb_hc *hc, u8 address, u8 data)
+{
+ return ec_write(hc->offset + address, data);
+}
+
+static inline int smb_check_done(struct acpi_smb_hc *hc)
+{
+ union acpi_smb_status status = {.raw = 0};
+ smb_hc_read(hc, ACPI_SMB_STATUS, &status.raw);
+ return status.fields.done && (status.fields.status == SMBUS_OK);
+}
+
+static int wait_transaction_complete(struct acpi_smb_hc *hc, int timeout)
+{
+ if (wait_event_timeout(hc->wait, smb_check_done(hc),
+ msecs_to_jiffies(timeout)))
+ return 0;
+ /*
+ * After the timeout happens, OS will try to check the status of SMbus.
+ * If the status is what OS expected, it will be regarded as the bogus
+ * timeout.
+ */
+ if (smb_check_done(hc))
+ return 0;
+ else
+ return -ETIME;
+}
+
+static int acpi_smbus_transaction(struct acpi_smb_hc *hc, u8 protocol,
+ u8 address, u8 command, u8 *data, u8 length)
+{
+ int ret = -EFAULT, i;
+ u8 temp, sz = 0;
+
+ if (!hc) {
+ printk(KERN_ERR PREFIX "host controller is not configured\n");
+ return ret;
+ }
+
+ mutex_lock(&hc->lock);
+ if (smb_hc_read(hc, ACPI_SMB_PROTOCOL, &temp))
+ goto end;
+ if (temp) {
+ ret = -EBUSY;
+ goto end;
+ }
+ smb_hc_write(hc, ACPI_SMB_COMMAND, command);
+ if (!(protocol & 0x01)) {
+ smb_hc_write(hc, ACPI_SMB_BLOCK_COUNT, length);
+ for (i = 0; i < length; ++i)
+ smb_hc_write(hc, ACPI_SMB_DATA + i, data[i]);
+ }
+ smb_hc_write(hc, ACPI_SMB_ADDRESS, address << 1);
+ smb_hc_write(hc, ACPI_SMB_PROTOCOL, protocol);
+ /*
+ * Wait for completion. Save the status code, data size,
+ * and data into the return package (if required by the protocol).
+ */
+ ret = wait_transaction_complete(hc, 1000);
+ if (ret || !(protocol & 0x01))
+ goto end;
+ switch (protocol) {
+ case SMBUS_RECEIVE_BYTE:
+ case SMBUS_READ_BYTE:
+ sz = 1;
+ break;
+ case SMBUS_READ_WORD:
+ sz = 2;
+ break;
+ case SMBUS_READ_BLOCK:
+ if (smb_hc_read(hc, ACPI_SMB_BLOCK_COUNT, &sz)) {
+ ret = -EFAULT;
+ goto end;
+ }
+ sz &= 0x1f;
+ break;
+ }
+ for (i = 0; i < sz; ++i)
+ smb_hc_read(hc, ACPI_SMB_DATA + i, &data[i]);
+ end:
+ mutex_unlock(&hc->lock);
+ return ret;
+}
+
+int acpi_smbus_read(struct acpi_smb_hc *hc, u8 protocol, u8 address,
+ u8 command, u8 *data)
+{
+ return acpi_smbus_transaction(hc, protocol, address, command, data, 0);
+}
+
+EXPORT_SYMBOL_GPL(acpi_smbus_read);
+
+int acpi_smbus_write(struct acpi_smb_hc *hc, u8 protocol, u8 address,
+ u8 command, u8 *data, u8 length)
+{
+ return acpi_smbus_transaction(hc, protocol, address, command, data, length);
+}
+
+EXPORT_SYMBOL_GPL(acpi_smbus_write);
+
+int acpi_smbus_register_callback(struct acpi_smb_hc *hc,
+ smbus_alarm_callback callback, void *context)
+{
+ mutex_lock(&hc->lock);
+ hc->callback = callback;
+ hc->context = context;
+ mutex_unlock(&hc->lock);
+ return 0;
+}
+
+EXPORT_SYMBOL_GPL(acpi_smbus_register_callback);
+
+int acpi_smbus_unregister_callback(struct acpi_smb_hc *hc)
+{
+ mutex_lock(&hc->lock);
+ hc->callback = NULL;
+ hc->context = NULL;
+ mutex_unlock(&hc->lock);
+ return 0;
+}
+
+EXPORT_SYMBOL_GPL(acpi_smbus_unregister_callback);
+
+static inline void acpi_smbus_callback(void *context)
+{
+ struct acpi_smb_hc *hc = context;
+ if (hc->callback)
+ hc->callback(hc->context);
+}
+
+static int smbus_alarm(void *context)
+{
+ struct acpi_smb_hc *hc = context;
+ union acpi_smb_status status;
+ u8 address;
+ if (smb_hc_read(hc, ACPI_SMB_STATUS, &status.raw))
+ return 0;
+ /* Check if it is only a completion notify */
+ if (status.fields.done)
+ wake_up(&hc->wait);
+ if (!status.fields.alarm)
+ return 0;
+ mutex_lock(&hc->lock);
+ smb_hc_read(hc, ACPI_SMB_ALARM_ADDRESS, &address);
+ status.fields.alarm = 0;
+ smb_hc_write(hc, ACPI_SMB_STATUS, status.raw);
+ /* We are only interested in events coming from known devices */
+ switch (address >> 1) {
+ case ACPI_SBS_CHARGER:
+ case ACPI_SBS_MANAGER:
+ case ACPI_SBS_BATTERY:
+ acpi_os_execute(OSL_GPE_HANDLER,
+ acpi_smbus_callback, hc);
+ default:;
+ }
+ mutex_unlock(&hc->lock);
+ return 0;
+}
+
+typedef int (*acpi_ec_query_func) (void *data);
+
+extern int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit,
+ acpi_handle handle, acpi_ec_query_func func,
+ void *data);
+
+static int acpi_smbus_hc_add(struct acpi_device *device)
+{
+ int status;
+ unsigned long long val;
+ struct acpi_smb_hc *hc;
+
+ if (!device)
+ return -EINVAL;
+
+ status = acpi_evaluate_integer(device->handle, "_EC", NULL, &val);
+ if (ACPI_FAILURE(status)) {
+ printk(KERN_ERR PREFIX "error obtaining _EC.\n");
+ return -EIO;
+ }
+
+ strcpy(acpi_device_name(device), ACPI_SMB_HC_DEVICE_NAME);
+ strcpy(acpi_device_class(device), ACPI_SMB_HC_CLASS);
+
+ hc = kzalloc(sizeof(struct acpi_smb_hc), GFP_KERNEL);
+ if (!hc)
+ return -ENOMEM;
+ mutex_init(&hc->lock);
+ init_waitqueue_head(&hc->wait);
+
+ hc->ec = acpi_driver_data(device->parent);
+ hc->offset = (val >> 8) & 0xff;
+ hc->query_bit = val & 0xff;
+ device->driver_data = hc;
+
+ acpi_ec_add_query_handler(hc->ec, hc->query_bit, NULL, smbus_alarm, hc);
+ printk(KERN_INFO PREFIX "SBS HC: EC = 0x%p, offset = 0x%0x, query_bit = 0x%0x\n",
+ hc->ec, hc->offset, hc->query_bit);
+
+ return 0;
+}
+
+extern void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit);
+
+static int acpi_smbus_hc_remove(struct acpi_device *device, int type)
+{
+ struct acpi_smb_hc *hc;
+
+ if (!device)
+ return -EINVAL;
+
+ hc = acpi_driver_data(device);
+ acpi_ec_remove_query_handler(hc->ec, hc->query_bit);
+ kfree(hc);
+ device->driver_data = NULL;
+ return 0;
+}
+
+static int __init acpi_smb_hc_init(void)
+{
+ int result;
+
+ result = acpi_bus_register_driver(&acpi_smb_hc_driver);
+ if (result < 0)
+ return -ENODEV;
+ return 0;
+}
+
+static void __exit acpi_smb_hc_exit(void)
+{
+ acpi_bus_unregister_driver(&acpi_smb_hc_driver);
+}
+
+module_init(acpi_smb_hc_init);
+module_exit(acpi_smb_hc_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Alexey Starikovskiy");
+MODULE_DESCRIPTION("ACPI SMBus HC driver");
diff --git a/drivers/acpi/sbshc.h b/drivers/acpi/sbshc.h
new file mode 100644
index 0000000..a57b076
--- /dev/null
+++ b/drivers/acpi/sbshc.h
@@ -0,0 +1,33 @@
+struct acpi_smb_hc;
+enum acpi_smb_protocol {
+ SMBUS_WRITE_QUICK = 2,
+ SMBUS_READ_QUICK = 3,
+ SMBUS_SEND_BYTE = 4,
+ SMBUS_RECEIVE_BYTE = 5,
+ SMBUS_WRITE_BYTE = 6,
+ SMBUS_READ_BYTE = 7,
+ SMBUS_WRITE_WORD = 8,
+ SMBUS_READ_WORD = 9,
+ SMBUS_WRITE_BLOCK = 0xa,
+ SMBUS_READ_BLOCK = 0xb,
+ SMBUS_PROCESS_CALL = 0xc,
+ SMBUS_BLOCK_PROCESS_CALL = 0xd,
+};
+
+static const u8 SMBUS_PEC = 0x80;
+
+enum acpi_sbs_device_addr {
+ ACPI_SBS_CHARGER = 0x9,
+ ACPI_SBS_MANAGER = 0xa,
+ ACPI_SBS_BATTERY = 0xb,
+};
+
+typedef void (*smbus_alarm_callback)(void *context);
+
+extern int acpi_smbus_read(struct acpi_smb_hc *hc, u8 protocol, u8 address,
+ u8 command, u8 * data);
+extern int acpi_smbus_write(struct acpi_smb_hc *hc, u8 protocol, u8 slave_address,
+ u8 command, u8 * data, u8 length);
+extern int acpi_smbus_register_callback(struct acpi_smb_hc *hc,
+ smbus_alarm_callback callback, void *context);
+extern int acpi_smbus_unregister_callback(struct acpi_smb_hc *hc);
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
new file mode 100644
index 0000000..39b7233
--- /dev/null
+++ b/drivers/acpi/scan.c
@@ -0,0 +1,1571 @@
+/*
+ * scan.c - support for transforming the ACPI namespace into individual objects
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/acpi.h>
+#include <linux/signal.h>
+#include <linux/kthread.h>
+
+#include <acpi/acpi_drivers.h>
+#include <acpi/acinterp.h> /* for acpi_ex_eisa_id_to_string() */
+
+#define _COMPONENT ACPI_BUS_COMPONENT
+ACPI_MODULE_NAME("scan");
+#define STRUCT_TO_INT(s) (*((int*)&s))
+extern struct acpi_device *acpi_root;
+
+#define ACPI_BUS_CLASS "system_bus"
+#define ACPI_BUS_HID "LNXSYBUS"
+#define ACPI_BUS_DEVICE_NAME "System Bus"
+
+static LIST_HEAD(acpi_device_list);
+static LIST_HEAD(acpi_bus_id_list);
+DEFINE_SPINLOCK(acpi_device_lock);
+LIST_HEAD(acpi_wakeup_device_list);
+
+struct acpi_device_bus_id{
+ char bus_id[15];
+ unsigned int instance_no;
+ struct list_head node;
+};
+
+/*
+ * Creates hid/cid(s) string needed for modalias and uevent
+ * e.g. on a device with hid:IBM0001 and cid:ACPI0001 you get:
+ * char *modalias: "acpi:IBM0001:ACPI0001"
+*/
+static int create_modalias(struct acpi_device *acpi_dev, char *modalias,
+ int size)
+{
+ int len;
+ int count;
+
+ if (!acpi_dev->flags.hardware_id && !acpi_dev->flags.compatible_ids)
+ return -ENODEV;
+
+ len = snprintf(modalias, size, "acpi:");
+ size -= len;
+
+ if (acpi_dev->flags.hardware_id) {
+ count = snprintf(&modalias[len], size, "%s:",
+ acpi_dev->pnp.hardware_id);
+ if (count < 0 || count >= size)
+ return -EINVAL;
+ len += count;
+ size -= count;
+ }
+
+ if (acpi_dev->flags.compatible_ids) {
+ struct acpi_compatible_id_list *cid_list;
+ int i;
+
+ cid_list = acpi_dev->pnp.cid_list;
+ for (i = 0; i < cid_list->count; i++) {
+ count = snprintf(&modalias[len], size, "%s:",
+ cid_list->id[i].value);
+ if (count < 0 || count >= size) {
+ printk(KERN_ERR PREFIX "%s cid[%i] exceeds event buffer size",
+ acpi_dev->pnp.device_name, i);
+ break;
+ }
+ len += count;
+ size -= count;
+ }
+ }
+
+ modalias[len] = '\0';
+ return len;
+}
+
+static ssize_t
+acpi_device_modalias_show(struct device *dev, struct device_attribute *attr, char *buf) {
+ struct acpi_device *acpi_dev = to_acpi_device(dev);
+ int len;
+
+ /* Device has no HID and no CID or string is >1024 */
+ len = create_modalias(acpi_dev, buf, 1024);
+ if (len <= 0)
+ return 0;
+ buf[len++] = '\n';
+ return len;
+}
+static DEVICE_ATTR(modalias, 0444, acpi_device_modalias_show, NULL);
+
+static int acpi_bus_hot_remove_device(void *context)
+{
+ struct acpi_device *device;
+ acpi_handle handle = context;
+ struct acpi_object_list arg_list;
+ union acpi_object arg;
+ acpi_status status = AE_OK;
+
+ if (acpi_bus_get_device(handle, &device))
+ return 0;
+
+ if (!device)
+ return 0;
+
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Hot-removing device %s...\n", dev_name(&device->dev)));
+
+ if (acpi_bus_trim(device, 1)) {
+ printk(KERN_ERR PREFIX
+ "Removing device failed\n");
+ return -1;
+ }
+
+ /* power off device */
+ status = acpi_evaluate_object(handle, "_PS3", NULL, NULL);
+ if (ACPI_FAILURE(status) && status != AE_NOT_FOUND)
+ printk(KERN_WARNING PREFIX
+ "Power-off device failed\n");
+
+ if (device->flags.lockable) {
+ arg_list.count = 1;
+ arg_list.pointer = &arg;
+ arg.type = ACPI_TYPE_INTEGER;
+ arg.integer.value = 0;
+ acpi_evaluate_object(handle, "_LCK", &arg_list, NULL);
+ }
+
+ arg_list.count = 1;
+ arg_list.pointer = &arg;
+ arg.type = ACPI_TYPE_INTEGER;
+ arg.integer.value = 1;
+
+ /*
+ * TBD: _EJD support.
+ */
+ status = acpi_evaluate_object(handle, "_EJ0", &arg_list, NULL);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+
+ return 0;
+}
+
+static ssize_t
+acpi_eject_store(struct device *d, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int ret = count;
+ acpi_status status;
+ acpi_object_type type = 0;
+ struct acpi_device *acpi_device = to_acpi_device(d);
+ struct task_struct *task;
+
+ if ((!count) || (buf[0] != '1')) {
+ return -EINVAL;
+ }
+#ifndef FORCE_EJECT
+ if (acpi_device->driver == NULL) {
+ ret = -ENODEV;
+ goto err;
+ }
+#endif
+ status = acpi_get_type(acpi_device->handle, &type);
+ if (ACPI_FAILURE(status) || (!acpi_device->flags.ejectable)) {
+ ret = -ENODEV;
+ goto err;
+ }
+
+ /* remove the device in another thread to fix the deadlock issue */
+ task = kthread_run(acpi_bus_hot_remove_device,
+ acpi_device->handle, "acpi_hot_remove_device");
+ if (IS_ERR(task))
+ ret = PTR_ERR(task);
+err:
+ return ret;
+}
+
+static DEVICE_ATTR(eject, 0200, NULL, acpi_eject_store);
+
+static ssize_t
+acpi_device_hid_show(struct device *dev, struct device_attribute *attr, char *buf) {
+ struct acpi_device *acpi_dev = to_acpi_device(dev);
+
+ return sprintf(buf, "%s\n", acpi_dev->pnp.hardware_id);
+}
+static DEVICE_ATTR(hid, 0444, acpi_device_hid_show, NULL);
+
+static ssize_t
+acpi_device_path_show(struct device *dev, struct device_attribute *attr, char *buf) {
+ struct acpi_device *acpi_dev = to_acpi_device(dev);
+ struct acpi_buffer path = {ACPI_ALLOCATE_BUFFER, NULL};
+ int result;
+
+ result = acpi_get_name(acpi_dev->handle, ACPI_FULL_PATHNAME, &path);
+ if(result)
+ goto end;
+
+ result = sprintf(buf, "%s\n", (char*)path.pointer);
+ kfree(path.pointer);
+ end:
+ return result;
+}
+static DEVICE_ATTR(path, 0444, acpi_device_path_show, NULL);
+
+static int acpi_device_setup_files(struct acpi_device *dev)
+{
+ acpi_status status;
+ acpi_handle temp;
+ int result = 0;
+
+ /*
+ * Devices gotten from FADT don't have a "path" attribute
+ */
+ if(dev->handle) {
+ result = device_create_file(&dev->dev, &dev_attr_path);
+ if(result)
+ goto end;
+ }
+
+ if(dev->flags.hardware_id) {
+ result = device_create_file(&dev->dev, &dev_attr_hid);
+ if(result)
+ goto end;
+ }
+
+ if (dev->flags.hardware_id || dev->flags.compatible_ids){
+ result = device_create_file(&dev->dev, &dev_attr_modalias);
+ if(result)
+ goto end;
+ }
+
+ /*
+ * If device has _EJ0, 'eject' file is created that is used to trigger
+ * hot-removal function from userland.
+ */
+ status = acpi_get_handle(dev->handle, "_EJ0", &temp);
+ if (ACPI_SUCCESS(status))
+ result = device_create_file(&dev->dev, &dev_attr_eject);
+ end:
+ return result;
+}
+
+static void acpi_device_remove_files(struct acpi_device *dev)
+{
+ acpi_status status;
+ acpi_handle temp;
+
+ /*
+ * If device has _EJ0, 'eject' file is created that is used to trigger
+ * hot-removal function from userland.
+ */
+ status = acpi_get_handle(dev->handle, "_EJ0", &temp);
+ if (ACPI_SUCCESS(status))
+ device_remove_file(&dev->dev, &dev_attr_eject);
+
+ if (dev->flags.hardware_id || dev->flags.compatible_ids)
+ device_remove_file(&dev->dev, &dev_attr_modalias);
+
+ if(dev->flags.hardware_id)
+ device_remove_file(&dev->dev, &dev_attr_hid);
+ if(dev->handle)
+ device_remove_file(&dev->dev, &dev_attr_path);
+}
+/* --------------------------------------------------------------------------
+ ACPI Bus operations
+ -------------------------------------------------------------------------- */
+
+int acpi_match_device_ids(struct acpi_device *device,
+ const struct acpi_device_id *ids)
+{
+ const struct acpi_device_id *id;
+
+ /*
+ * If the device is not present, it is unnecessary to load device
+ * driver for it.
+ */
+ if (!device->status.present)
+ return -ENODEV;
+
+ if (device->flags.hardware_id) {
+ for (id = ids; id->id[0]; id++) {
+ if (!strcmp((char*)id->id, device->pnp.hardware_id))
+ return 0;
+ }
+ }
+
+ if (device->flags.compatible_ids) {
+ struct acpi_compatible_id_list *cid_list = device->pnp.cid_list;
+ int i;
+
+ for (id = ids; id->id[0]; id++) {
+ /* compare multiple _CID entries against driver ids */
+ for (i = 0; i < cid_list->count; i++) {
+ if (!strcmp((char*)id->id,
+ cid_list->id[i].value))
+ return 0;
+ }
+ }
+ }
+
+ return -ENOENT;
+}
+EXPORT_SYMBOL(acpi_match_device_ids);
+
+static void acpi_device_release(struct device *dev)
+{
+ struct acpi_device *acpi_dev = to_acpi_device(dev);
+
+ kfree(acpi_dev->pnp.cid_list);
+ kfree(acpi_dev);
+}
+
+static int acpi_device_suspend(struct device *dev, pm_message_t state)
+{
+ struct acpi_device *acpi_dev = to_acpi_device(dev);
+ struct acpi_driver *acpi_drv = acpi_dev->driver;
+
+ if (acpi_drv && acpi_drv->ops.suspend)
+ return acpi_drv->ops.suspend(acpi_dev, state);
+ return 0;
+}
+
+static int acpi_device_resume(struct device *dev)
+{
+ struct acpi_device *acpi_dev = to_acpi_device(dev);
+ struct acpi_driver *acpi_drv = acpi_dev->driver;
+
+ if (acpi_drv && acpi_drv->ops.resume)
+ return acpi_drv->ops.resume(acpi_dev);
+ return 0;
+}
+
+static int acpi_bus_match(struct device *dev, struct device_driver *drv)
+{
+ struct acpi_device *acpi_dev = to_acpi_device(dev);
+ struct acpi_driver *acpi_drv = to_acpi_driver(drv);
+
+ return !acpi_match_device_ids(acpi_dev, acpi_drv->ids);
+}
+
+static int acpi_device_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ struct acpi_device *acpi_dev = to_acpi_device(dev);
+ int len;
+
+ if (add_uevent_var(env, "MODALIAS="))
+ return -ENOMEM;
+ len = create_modalias(acpi_dev, &env->buf[env->buflen - 1],
+ sizeof(env->buf) - env->buflen);
+ if (len >= (sizeof(env->buf) - env->buflen))
+ return -ENOMEM;
+ env->buflen += len;
+ return 0;
+}
+
+static int acpi_bus_driver_init(struct acpi_device *, struct acpi_driver *);
+static int acpi_start_single_object(struct acpi_device *);
+static int acpi_device_probe(struct device * dev)
+{
+ struct acpi_device *acpi_dev = to_acpi_device(dev);
+ struct acpi_driver *acpi_drv = to_acpi_driver(dev->driver);
+ int ret;
+
+ ret = acpi_bus_driver_init(acpi_dev, acpi_drv);
+ if (!ret) {
+ if (acpi_dev->bus_ops.acpi_op_start)
+ acpi_start_single_object(acpi_dev);
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Found driver [%s] for device [%s]\n",
+ acpi_drv->name, acpi_dev->pnp.bus_id));
+ get_device(dev);
+ }
+ return ret;
+}
+
+static int acpi_device_remove(struct device * dev)
+{
+ struct acpi_device *acpi_dev = to_acpi_device(dev);
+ struct acpi_driver *acpi_drv = acpi_dev->driver;
+
+ if (acpi_drv) {
+ if (acpi_drv->ops.stop)
+ acpi_drv->ops.stop(acpi_dev, acpi_dev->removal_type);
+ if (acpi_drv->ops.remove)
+ acpi_drv->ops.remove(acpi_dev, acpi_dev->removal_type);
+ }
+ acpi_dev->driver = NULL;
+ acpi_dev->driver_data = NULL;
+
+ put_device(dev);
+ return 0;
+}
+
+static void acpi_device_shutdown(struct device *dev)
+{
+ struct acpi_device *acpi_dev = to_acpi_device(dev);
+ struct acpi_driver *acpi_drv = acpi_dev->driver;
+
+ if (acpi_drv && acpi_drv->ops.shutdown)
+ acpi_drv->ops.shutdown(acpi_dev);
+
+ return ;
+}
+
+struct bus_type acpi_bus_type = {
+ .name = "acpi",
+ .suspend = acpi_device_suspend,
+ .resume = acpi_device_resume,
+ .shutdown = acpi_device_shutdown,
+ .match = acpi_bus_match,
+ .probe = acpi_device_probe,
+ .remove = acpi_device_remove,
+ .uevent = acpi_device_uevent,
+};
+
+static int acpi_device_register(struct acpi_device *device,
+ struct acpi_device *parent)
+{
+ int result;
+ struct acpi_device_bus_id *acpi_device_bus_id, *new_bus_id;
+ int found = 0;
+ /*
+ * Linkage
+ * -------
+ * Link this device to its parent and siblings.
+ */
+ INIT_LIST_HEAD(&device->children);
+ INIT_LIST_HEAD(&device->node);
+ INIT_LIST_HEAD(&device->g_list);
+ INIT_LIST_HEAD(&device->wakeup_list);
+
+ new_bus_id = kzalloc(sizeof(struct acpi_device_bus_id), GFP_KERNEL);
+ if (!new_bus_id) {
+ printk(KERN_ERR PREFIX "Memory allocation error\n");
+ return -ENOMEM;
+ }
+
+ spin_lock(&acpi_device_lock);
+ /*
+ * Find suitable bus_id and instance number in acpi_bus_id_list
+ * If failed, create one and link it into acpi_bus_id_list
+ */
+ list_for_each_entry(acpi_device_bus_id, &acpi_bus_id_list, node) {
+ if(!strcmp(acpi_device_bus_id->bus_id, device->flags.hardware_id? device->pnp.hardware_id : "device")) {
+ acpi_device_bus_id->instance_no ++;
+ found = 1;
+ kfree(new_bus_id);
+ break;
+ }
+ }
+ if(!found) {
+ acpi_device_bus_id = new_bus_id;
+ strcpy(acpi_device_bus_id->bus_id, device->flags.hardware_id ? device->pnp.hardware_id : "device");
+ acpi_device_bus_id->instance_no = 0;
+ list_add_tail(&acpi_device_bus_id->node, &acpi_bus_id_list);
+ }
+ dev_set_name(&device->dev, "%s:%02x", acpi_device_bus_id->bus_id, acpi_device_bus_id->instance_no);
+
+ if (device->parent) {
+ list_add_tail(&device->node, &device->parent->children);
+ list_add_tail(&device->g_list, &device->parent->g_list);
+ } else
+ list_add_tail(&device->g_list, &acpi_device_list);
+ if (device->wakeup.flags.valid)
+ list_add_tail(&device->wakeup_list, &acpi_wakeup_device_list);
+ spin_unlock(&acpi_device_lock);
+
+ if (device->parent)
+ device->dev.parent = &parent->dev;
+ device->dev.bus = &acpi_bus_type;
+ device_initialize(&device->dev);
+ device->dev.release = &acpi_device_release;
+ result = device_add(&device->dev);
+ if(result) {
+ dev_err(&device->dev, "Error adding device\n");
+ goto end;
+ }
+
+ result = acpi_device_setup_files(device);
+ if(result)
+ printk(KERN_ERR PREFIX "Error creating sysfs interface for device %s\n",
+ dev_name(&device->dev));
+
+ device->removal_type = ACPI_BUS_REMOVAL_NORMAL;
+ return 0;
+ end:
+ spin_lock(&acpi_device_lock);
+ if (device->parent) {
+ list_del(&device->node);
+ list_del(&device->g_list);
+ } else
+ list_del(&device->g_list);
+ list_del(&device->wakeup_list);
+ spin_unlock(&acpi_device_lock);
+ return result;
+}
+
+static void acpi_device_unregister(struct acpi_device *device, int type)
+{
+ spin_lock(&acpi_device_lock);
+ if (device->parent) {
+ list_del(&device->node);
+ list_del(&device->g_list);
+ } else
+ list_del(&device->g_list);
+
+ list_del(&device->wakeup_list);
+ spin_unlock(&acpi_device_lock);
+
+ acpi_detach_data(device->handle, acpi_bus_data_handler);
+
+ acpi_device_remove_files(device);
+ device_unregister(&device->dev);
+}
+
+/* --------------------------------------------------------------------------
+ Driver Management
+ -------------------------------------------------------------------------- */
+/**
+ * acpi_bus_driver_init - add a device to a driver
+ * @device: the device to add and initialize
+ * @driver: driver for the device
+ *
+ * Used to initialize a device via its device driver. Called whenever a
+ * driver is bound to a device. Invokes the driver's add() ops.
+ */
+static int
+acpi_bus_driver_init(struct acpi_device *device, struct acpi_driver *driver)
+{
+ int result = 0;
+
+
+ if (!device || !driver)
+ return -EINVAL;
+
+ if (!driver->ops.add)
+ return -ENOSYS;
+
+ result = driver->ops.add(device);
+ if (result) {
+ device->driver = NULL;
+ device->driver_data = NULL;
+ return result;
+ }
+
+ device->driver = driver;
+
+ /*
+ * TBD - Configuration Management: Assign resources to device based
+ * upon possible configuration and currently allocated resources.
+ */
+
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Driver successfully bound to device\n"));
+ return 0;
+}
+
+static int acpi_start_single_object(struct acpi_device *device)
+{
+ int result = 0;
+ struct acpi_driver *driver;
+
+
+ if (!(driver = device->driver))
+ return 0;
+
+ if (driver->ops.start) {
+ result = driver->ops.start(device);
+ if (result && driver->ops.remove)
+ driver->ops.remove(device, ACPI_BUS_REMOVAL_NORMAL);
+ }
+
+ return result;
+}
+
+/**
+ * acpi_bus_register_driver - register a driver with the ACPI bus
+ * @driver: driver being registered
+ *
+ * Registers a driver with the ACPI bus. Searches the namespace for all
+ * devices that match the driver's criteria and binds. Returns zero for
+ * success or a negative error status for failure.
+ */
+int acpi_bus_register_driver(struct acpi_driver *driver)
+{
+ int ret;
+
+ if (acpi_disabled)
+ return -ENODEV;
+ driver->drv.name = driver->name;
+ driver->drv.bus = &acpi_bus_type;
+ driver->drv.owner = driver->owner;
+
+ ret = driver_register(&driver->drv);
+ return ret;
+}
+
+EXPORT_SYMBOL(acpi_bus_register_driver);
+
+/**
+ * acpi_bus_unregister_driver - unregisters a driver with the APIC bus
+ * @driver: driver to unregister
+ *
+ * Unregisters a driver with the ACPI bus. Searches the namespace for all
+ * devices that match the driver's criteria and unbinds.
+ */
+void acpi_bus_unregister_driver(struct acpi_driver *driver)
+{
+ driver_unregister(&driver->drv);
+}
+
+EXPORT_SYMBOL(acpi_bus_unregister_driver);
+
+/* --------------------------------------------------------------------------
+ Device Enumeration
+ -------------------------------------------------------------------------- */
+acpi_status
+acpi_bus_get_ejd(acpi_handle handle, acpi_handle *ejd)
+{
+ acpi_status status;
+ acpi_handle tmp;
+ struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
+ union acpi_object *obj;
+
+ status = acpi_get_handle(handle, "_EJD", &tmp);
+ if (ACPI_FAILURE(status))
+ return status;
+
+ status = acpi_evaluate_object(handle, "_EJD", NULL, &buffer);
+ if (ACPI_SUCCESS(status)) {
+ obj = buffer.pointer;
+ status = acpi_get_handle(ACPI_ROOT_OBJECT, obj->string.pointer,
+ ejd);
+ kfree(buffer.pointer);
+ }
+ return status;
+}
+EXPORT_SYMBOL_GPL(acpi_bus_get_ejd);
+
+void acpi_bus_data_handler(acpi_handle handle, u32 function, void *context)
+{
+
+ /* TBD */
+
+ return;
+}
+
+static int acpi_bus_get_perf_flags(struct acpi_device *device)
+{
+ device->performance.state = ACPI_STATE_UNKNOWN;
+ return 0;
+}
+
+static acpi_status
+acpi_bus_extract_wakeup_device_power_package(struct acpi_device *device,
+ union acpi_object *package)
+{
+ int i = 0;
+ union acpi_object *element = NULL;
+
+ if (!device || !package || (package->package.count < 2))
+ return AE_BAD_PARAMETER;
+
+ element = &(package->package.elements[0]);
+ if (!element)
+ return AE_BAD_PARAMETER;
+ if (element->type == ACPI_TYPE_PACKAGE) {
+ if ((element->package.count < 2) ||
+ (element->package.elements[0].type !=
+ ACPI_TYPE_LOCAL_REFERENCE)
+ || (element->package.elements[1].type != ACPI_TYPE_INTEGER))
+ return AE_BAD_DATA;
+ device->wakeup.gpe_device =
+ element->package.elements[0].reference.handle;
+ device->wakeup.gpe_number =
+ (u32) element->package.elements[1].integer.value;
+ } else if (element->type == ACPI_TYPE_INTEGER) {
+ device->wakeup.gpe_number = element->integer.value;
+ } else
+ return AE_BAD_DATA;
+
+ element = &(package->package.elements[1]);
+ if (element->type != ACPI_TYPE_INTEGER) {
+ return AE_BAD_DATA;
+ }
+ device->wakeup.sleep_state = element->integer.value;
+
+ if ((package->package.count - 2) > ACPI_MAX_HANDLES) {
+ return AE_NO_MEMORY;
+ }
+ device->wakeup.resources.count = package->package.count - 2;
+ for (i = 0; i < device->wakeup.resources.count; i++) {
+ element = &(package->package.elements[i + 2]);
+ if (element->type != ACPI_TYPE_LOCAL_REFERENCE)
+ return AE_BAD_DATA;
+
+ device->wakeup.resources.handles[i] = element->reference.handle;
+ }
+
+ return AE_OK;
+}
+
+static int acpi_bus_get_wakeup_device_flags(struct acpi_device *device)
+{
+ acpi_status status = 0;
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ union acpi_object *package = NULL;
+ int psw_error;
+
+ struct acpi_device_id button_device_ids[] = {
+ {"PNP0C0D", 0},
+ {"PNP0C0C", 0},
+ {"PNP0C0E", 0},
+ {"", 0},
+ };
+
+ /* _PRW */
+ status = acpi_evaluate_object(device->handle, "_PRW", NULL, &buffer);
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PRW"));
+ goto end;
+ }
+
+ package = (union acpi_object *)buffer.pointer;
+ status = acpi_bus_extract_wakeup_device_power_package(device, package);
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status, "Extracting _PRW package"));
+ goto end;
+ }
+
+ kfree(buffer.pointer);
+
+ device->wakeup.flags.valid = 1;
+ /* Call _PSW/_DSW object to disable its ability to wake the sleeping
+ * system for the ACPI device with the _PRW object.
+ * The _PSW object is depreciated in ACPI 3.0 and is replaced by _DSW.
+ * So it is necessary to call _DSW object first. Only when it is not
+ * present will the _PSW object used.
+ */
+ psw_error = acpi_device_sleep_wake(device, 0, 0, 0);
+ if (psw_error)
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "error in _DSW or _PSW evaluation\n"));
+
+ /* Power button, Lid switch always enable wakeup */
+ if (!acpi_match_device_ids(device, button_device_ids))
+ device->wakeup.flags.run_wake = 1;
+
+ end:
+ if (ACPI_FAILURE(status))
+ device->flags.wake_capable = 0;
+ return 0;
+}
+
+static int acpi_bus_get_power_flags(struct acpi_device *device)
+{
+ acpi_status status = 0;
+ acpi_handle handle = NULL;
+ u32 i = 0;
+
+
+ /*
+ * Power Management Flags
+ */
+ status = acpi_get_handle(device->handle, "_PSC", &handle);
+ if (ACPI_SUCCESS(status))
+ device->power.flags.explicit_get = 1;
+ status = acpi_get_handle(device->handle, "_IRC", &handle);
+ if (ACPI_SUCCESS(status))
+ device->power.flags.inrush_current = 1;
+
+ /*
+ * Enumerate supported power management states
+ */
+ for (i = ACPI_STATE_D0; i <= ACPI_STATE_D3; i++) {
+ struct acpi_device_power_state *ps = &device->power.states[i];
+ char object_name[5] = { '_', 'P', 'R', '0' + i, '\0' };
+
+ /* Evaluate "_PRx" to se if power resources are referenced */
+ acpi_evaluate_reference(device->handle, object_name, NULL,
+ &ps->resources);
+ if (ps->resources.count) {
+ device->power.flags.power_resources = 1;
+ ps->flags.valid = 1;
+ }
+
+ /* Evaluate "_PSx" to see if we can do explicit sets */
+ object_name[2] = 'S';
+ status = acpi_get_handle(device->handle, object_name, &handle);
+ if (ACPI_SUCCESS(status)) {
+ ps->flags.explicit_set = 1;
+ ps->flags.valid = 1;
+ }
+
+ /* State is valid if we have some power control */
+ if (ps->resources.count || ps->flags.explicit_set)
+ ps->flags.valid = 1;
+
+ ps->power = -1; /* Unknown - driver assigned */
+ ps->latency = -1; /* Unknown - driver assigned */
+ }
+
+ /* Set defaults for D0 and D3 states (always valid) */
+ device->power.states[ACPI_STATE_D0].flags.valid = 1;
+ device->power.states[ACPI_STATE_D0].power = 100;
+ device->power.states[ACPI_STATE_D3].flags.valid = 1;
+ device->power.states[ACPI_STATE_D3].power = 0;
+
+ /* TBD: System wake support and resource requirements. */
+
+ device->power.state = ACPI_STATE_UNKNOWN;
+ acpi_bus_get_power(device->handle, &(device->power.state));
+
+ return 0;
+}
+
+static int acpi_bus_get_flags(struct acpi_device *device)
+{
+ acpi_status status = AE_OK;
+ acpi_handle temp = NULL;
+
+
+ /* Presence of _STA indicates 'dynamic_status' */
+ status = acpi_get_handle(device->handle, "_STA", &temp);
+ if (ACPI_SUCCESS(status))
+ device->flags.dynamic_status = 1;
+
+ /* Presence of _CID indicates 'compatible_ids' */
+ status = acpi_get_handle(device->handle, "_CID", &temp);
+ if (ACPI_SUCCESS(status))
+ device->flags.compatible_ids = 1;
+
+ /* Presence of _RMV indicates 'removable' */
+ status = acpi_get_handle(device->handle, "_RMV", &temp);
+ if (ACPI_SUCCESS(status))
+ device->flags.removable = 1;
+
+ /* Presence of _EJD|_EJ0 indicates 'ejectable' */
+ status = acpi_get_handle(device->handle, "_EJD", &temp);
+ if (ACPI_SUCCESS(status))
+ device->flags.ejectable = 1;
+ else {
+ status = acpi_get_handle(device->handle, "_EJ0", &temp);
+ if (ACPI_SUCCESS(status))
+ device->flags.ejectable = 1;
+ }
+
+ /* Presence of _LCK indicates 'lockable' */
+ status = acpi_get_handle(device->handle, "_LCK", &temp);
+ if (ACPI_SUCCESS(status))
+ device->flags.lockable = 1;
+
+ /* Presence of _PS0|_PR0 indicates 'power manageable' */
+ status = acpi_get_handle(device->handle, "_PS0", &temp);
+ if (ACPI_FAILURE(status))
+ status = acpi_get_handle(device->handle, "_PR0", &temp);
+ if (ACPI_SUCCESS(status))
+ device->flags.power_manageable = 1;
+
+ /* Presence of _PRW indicates wake capable */
+ status = acpi_get_handle(device->handle, "_PRW", &temp);
+ if (ACPI_SUCCESS(status))
+ device->flags.wake_capable = 1;
+
+ /* TBD: Performance management */
+
+ return 0;
+}
+
+static void acpi_device_get_busid(struct acpi_device *device,
+ acpi_handle handle, int type)
+{
+ char bus_id[5] = { '?', 0 };
+ struct acpi_buffer buffer = { sizeof(bus_id), bus_id };
+ int i = 0;
+
+ /*
+ * Bus ID
+ * ------
+ * The device's Bus ID is simply the object name.
+ * TBD: Shouldn't this value be unique (within the ACPI namespace)?
+ */
+ switch (type) {
+ case ACPI_BUS_TYPE_SYSTEM:
+ strcpy(device->pnp.bus_id, "ACPI");
+ break;
+ case ACPI_BUS_TYPE_POWER_BUTTON:
+ strcpy(device->pnp.bus_id, "PWRF");
+ break;
+ case ACPI_BUS_TYPE_SLEEP_BUTTON:
+ strcpy(device->pnp.bus_id, "SLPF");
+ break;
+ default:
+ acpi_get_name(handle, ACPI_SINGLE_NAME, &buffer);
+ /* Clean up trailing underscores (if any) */
+ for (i = 3; i > 1; i--) {
+ if (bus_id[i] == '_')
+ bus_id[i] = '\0';
+ else
+ break;
+ }
+ strcpy(device->pnp.bus_id, bus_id);
+ break;
+ }
+}
+
+/*
+ * acpi_bay_match - see if a device is an ejectable driver bay
+ *
+ * If an acpi object is ejectable and has one of the ACPI ATA methods defined,
+ * then we can safely call it an ejectable drive bay
+ */
+static int acpi_bay_match(struct acpi_device *device){
+ acpi_status status;
+ acpi_handle handle;
+ acpi_handle tmp;
+ acpi_handle phandle;
+
+ handle = device->handle;
+
+ status = acpi_get_handle(handle, "_EJ0", &tmp);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+
+ if ((ACPI_SUCCESS(acpi_get_handle(handle, "_GTF", &tmp))) ||
+ (ACPI_SUCCESS(acpi_get_handle(handle, "_GTM", &tmp))) ||
+ (ACPI_SUCCESS(acpi_get_handle(handle, "_STM", &tmp))) ||
+ (ACPI_SUCCESS(acpi_get_handle(handle, "_SDD", &tmp))))
+ return 0;
+
+ if (acpi_get_parent(handle, &phandle))
+ return -ENODEV;
+
+ if ((ACPI_SUCCESS(acpi_get_handle(phandle, "_GTF", &tmp))) ||
+ (ACPI_SUCCESS(acpi_get_handle(phandle, "_GTM", &tmp))) ||
+ (ACPI_SUCCESS(acpi_get_handle(phandle, "_STM", &tmp))) ||
+ (ACPI_SUCCESS(acpi_get_handle(phandle, "_SDD", &tmp))))
+ return 0;
+
+ return -ENODEV;
+}
+
+/*
+ * acpi_dock_match - see if a device has a _DCK method
+ */
+static int acpi_dock_match(struct acpi_device *device)
+{
+ acpi_handle tmp;
+ return acpi_get_handle(device->handle, "_DCK", &tmp);
+}
+
+static void acpi_device_set_id(struct acpi_device *device,
+ struct acpi_device *parent, acpi_handle handle,
+ int type)
+{
+ struct acpi_device_info *info;
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ char *hid = NULL;
+ char *uid = NULL;
+ struct acpi_compatible_id_list *cid_list = NULL;
+ const char *cid_add = NULL;
+ acpi_status status;
+
+ switch (type) {
+ case ACPI_BUS_TYPE_DEVICE:
+ status = acpi_get_object_info(handle, &buffer);
+ if (ACPI_FAILURE(status)) {
+ printk(KERN_ERR PREFIX "%s: Error reading device info\n", __func__);
+ return;
+ }
+
+ info = buffer.pointer;
+ if (info->valid & ACPI_VALID_HID)
+ hid = info->hardware_id.value;
+ if (info->valid & ACPI_VALID_UID)
+ uid = info->unique_id.value;
+ if (info->valid & ACPI_VALID_CID)
+ cid_list = &info->compatibility_id;
+ if (info->valid & ACPI_VALID_ADR) {
+ device->pnp.bus_address = info->address;
+ device->flags.bus_address = 1;
+ }
+
+ /* If we have a video/bay/dock device, add our selfdefined
+ HID to the CID list. Like that the video/bay/dock drivers
+ will get autoloaded and the device might still match
+ against another driver.
+ */
+ if (acpi_is_video_device(device))
+ cid_add = ACPI_VIDEO_HID;
+ else if (ACPI_SUCCESS(acpi_bay_match(device)))
+ cid_add = ACPI_BAY_HID;
+ else if (ACPI_SUCCESS(acpi_dock_match(device)))
+ cid_add = ACPI_DOCK_HID;
+
+ break;
+ case ACPI_BUS_TYPE_POWER:
+ hid = ACPI_POWER_HID;
+ break;
+ case ACPI_BUS_TYPE_PROCESSOR:
+ hid = ACPI_PROCESSOR_OBJECT_HID;
+ break;
+ case ACPI_BUS_TYPE_SYSTEM:
+ hid = ACPI_SYSTEM_HID;
+ break;
+ case ACPI_BUS_TYPE_THERMAL:
+ hid = ACPI_THERMAL_HID;
+ break;
+ case ACPI_BUS_TYPE_POWER_BUTTON:
+ hid = ACPI_BUTTON_HID_POWERF;
+ break;
+ case ACPI_BUS_TYPE_SLEEP_BUTTON:
+ hid = ACPI_BUTTON_HID_SLEEPF;
+ break;
+ }
+
+ /*
+ * \_SB
+ * ----
+ * Fix for the system root bus device -- the only root-level device.
+ */
+ if (((acpi_handle)parent == ACPI_ROOT_OBJECT) && (type == ACPI_BUS_TYPE_DEVICE)) {
+ hid = ACPI_BUS_HID;
+ strcpy(device->pnp.device_name, ACPI_BUS_DEVICE_NAME);
+ strcpy(device->pnp.device_class, ACPI_BUS_CLASS);
+ }
+
+ if (hid) {
+ strcpy(device->pnp.hardware_id, hid);
+ device->flags.hardware_id = 1;
+ }
+ if (uid) {
+ strcpy(device->pnp.unique_id, uid);
+ device->flags.unique_id = 1;
+ }
+ if (cid_list || cid_add) {
+ struct acpi_compatible_id_list *list;
+ int size = 0;
+ int count = 0;
+
+ if (cid_list) {
+ size = cid_list->size;
+ } else if (cid_add) {
+ size = sizeof(struct acpi_compatible_id_list);
+ cid_list = ACPI_ALLOCATE_ZEROED((acpi_size) size);
+ if (!cid_list) {
+ printk(KERN_ERR "Memory allocation error\n");
+ kfree(buffer.pointer);
+ return;
+ } else {
+ cid_list->count = 0;
+ cid_list->size = size;
+ }
+ }
+ if (cid_add)
+ size += sizeof(struct acpi_compatible_id);
+ list = kmalloc(size, GFP_KERNEL);
+
+ if (list) {
+ if (cid_list) {
+ memcpy(list, cid_list, cid_list->size);
+ count = cid_list->count;
+ }
+ if (cid_add) {
+ strncpy(list->id[count].value, cid_add,
+ ACPI_MAX_CID_LENGTH);
+ count++;
+ device->flags.compatible_ids = 1;
+ }
+ list->size = size;
+ list->count = count;
+ device->pnp.cid_list = list;
+ } else
+ printk(KERN_ERR PREFIX "Memory allocation error\n");
+ }
+
+ kfree(buffer.pointer);
+}
+
+static int acpi_device_set_context(struct acpi_device *device, int type)
+{
+ acpi_status status = AE_OK;
+ int result = 0;
+ /*
+ * Context
+ * -------
+ * Attach this 'struct acpi_device' to the ACPI object. This makes
+ * resolutions from handle->device very efficient. Note that we need
+ * to be careful with fixed-feature devices as they all attach to the
+ * root object.
+ */
+ if (type != ACPI_BUS_TYPE_POWER_BUTTON &&
+ type != ACPI_BUS_TYPE_SLEEP_BUTTON) {
+ status = acpi_attach_data(device->handle,
+ acpi_bus_data_handler, device);
+
+ if (ACPI_FAILURE(status)) {
+ printk(KERN_ERR PREFIX "Error attaching device data\n");
+ result = -ENODEV;
+ }
+ }
+ return result;
+}
+
+static int acpi_bus_remove(struct acpi_device *dev, int rmdevice)
+{
+ if (!dev)
+ return -EINVAL;
+
+ dev->removal_type = ACPI_BUS_REMOVAL_EJECT;
+ device_release_driver(&dev->dev);
+
+ if (!rmdevice)
+ return 0;
+
+ /*
+ * unbind _ADR-Based Devices when hot removal
+ */
+ if (dev->flags.bus_address) {
+ if ((dev->parent) && (dev->parent->ops.unbind))
+ dev->parent->ops.unbind(dev);
+ }
+ acpi_device_unregister(dev, ACPI_BUS_REMOVAL_EJECT);
+
+ return 0;
+}
+
+static int
+acpi_add_single_object(struct acpi_device **child,
+ struct acpi_device *parent, acpi_handle handle, int type,
+ struct acpi_bus_ops *ops)
+{
+ int result = 0;
+ struct acpi_device *device = NULL;
+
+
+ if (!child)
+ return -EINVAL;
+
+ device = kzalloc(sizeof(struct acpi_device), GFP_KERNEL);
+ if (!device) {
+ printk(KERN_ERR PREFIX "Memory allocation error\n");
+ return -ENOMEM;
+ }
+
+ device->handle = handle;
+ device->parent = parent;
+ device->bus_ops = *ops; /* workround for not call .start */
+
+
+ acpi_device_get_busid(device, handle, type);
+
+ /*
+ * Flags
+ * -----
+ * Get prior to calling acpi_bus_get_status() so we know whether
+ * or not _STA is present. Note that we only look for object
+ * handles -- cannot evaluate objects until we know the device is
+ * present and properly initialized.
+ */
+ result = acpi_bus_get_flags(device);
+ if (result)
+ goto end;
+
+ /*
+ * Status
+ * ------
+ * See if the device is present. We always assume that non-Device
+ * and non-Processor objects (e.g. thermal zones, power resources,
+ * etc.) are present, functioning, etc. (at least when parent object
+ * is present). Note that _STA has a different meaning for some
+ * objects (e.g. power resources) so we need to be careful how we use
+ * it.
+ */
+ switch (type) {
+ case ACPI_BUS_TYPE_PROCESSOR:
+ case ACPI_BUS_TYPE_DEVICE:
+ result = acpi_bus_get_status(device);
+ if (ACPI_FAILURE(result)) {
+ result = -ENODEV;
+ goto end;
+ }
+ /*
+ * When the device is neither present nor functional, the
+ * device should not be added to Linux ACPI device tree.
+ * When the status of the device is not present but functinal,
+ * it should be added to Linux ACPI tree. For example : bay
+ * device , dock device.
+ * In such conditions it is unncessary to check whether it is
+ * bay device or dock device.
+ */
+ if (!device->status.present && !device->status.functional) {
+ result = -ENODEV;
+ goto end;
+ }
+ break;
+ default:
+ STRUCT_TO_INT(device->status) =
+ ACPI_STA_DEVICE_PRESENT | ACPI_STA_DEVICE_ENABLED |
+ ACPI_STA_DEVICE_UI | ACPI_STA_DEVICE_FUNCTIONING;
+ break;
+ }
+
+ /*
+ * Initialize Device
+ * -----------------
+ * TBD: Synch with Core's enumeration/initialization process.
+ */
+
+ /*
+ * Hardware ID, Unique ID, & Bus Address
+ * -------------------------------------
+ */
+ acpi_device_set_id(device, parent, handle, type);
+
+ /*
+ * The ACPI device is attached to acpi handle before getting
+ * the power/wakeup/peformance flags. Otherwise OS can't get
+ * the corresponding ACPI device by the acpi handle in the course
+ * of getting the power/wakeup/performance flags.
+ */
+ result = acpi_device_set_context(device, type);
+ if (result)
+ goto end;
+
+ /*
+ * Power Management
+ * ----------------
+ */
+ if (device->flags.power_manageable) {
+ result = acpi_bus_get_power_flags(device);
+ if (result)
+ goto end;
+ }
+
+ /*
+ * Wakeup device management
+ *-----------------------
+ */
+ if (device->flags.wake_capable) {
+ result = acpi_bus_get_wakeup_device_flags(device);
+ if (result)
+ goto end;
+ }
+
+ /*
+ * Performance Management
+ * ----------------------
+ */
+ if (device->flags.performance_manageable) {
+ result = acpi_bus_get_perf_flags(device);
+ if (result)
+ goto end;
+ }
+
+
+ result = acpi_device_register(device, parent);
+
+ /*
+ * Bind _ADR-Based Devices when hot add
+ */
+ if (device->flags.bus_address) {
+ if (device->parent && device->parent->ops.bind)
+ device->parent->ops.bind(device);
+ }
+
+ end:
+ if (!result)
+ *child = device;
+ else {
+ kfree(device->pnp.cid_list);
+ kfree(device);
+ }
+
+ return result;
+}
+
+static int acpi_bus_scan(struct acpi_device *start, struct acpi_bus_ops *ops)
+{
+ acpi_status status = AE_OK;
+ struct acpi_device *parent = NULL;
+ struct acpi_device *child = NULL;
+ acpi_handle phandle = NULL;
+ acpi_handle chandle = NULL;
+ acpi_object_type type = 0;
+ u32 level = 1;
+
+
+ if (!start)
+ return -EINVAL;
+
+ parent = start;
+ phandle = start->handle;
+
+ /*
+ * Parse through the ACPI namespace, identify all 'devices', and
+ * create a new 'struct acpi_device' for each.
+ */
+ while ((level > 0) && parent) {
+
+ status = acpi_get_next_object(ACPI_TYPE_ANY, phandle,
+ chandle, &chandle);
+
+ /*
+ * If this scope is exhausted then move our way back up.
+ */
+ if (ACPI_FAILURE(status)) {
+ level--;
+ chandle = phandle;
+ acpi_get_parent(phandle, &phandle);
+ if (parent->parent)
+ parent = parent->parent;
+ continue;
+ }
+
+ status = acpi_get_type(chandle, &type);
+ if (ACPI_FAILURE(status))
+ continue;
+
+ /*
+ * If this is a scope object then parse it (depth-first).
+ */
+ if (type == ACPI_TYPE_LOCAL_SCOPE) {
+ level++;
+ phandle = chandle;
+ chandle = NULL;
+ continue;
+ }
+
+ /*
+ * We're only interested in objects that we consider 'devices'.
+ */
+ switch (type) {
+ case ACPI_TYPE_DEVICE:
+ type = ACPI_BUS_TYPE_DEVICE;
+ break;
+ case ACPI_TYPE_PROCESSOR:
+ type = ACPI_BUS_TYPE_PROCESSOR;
+ break;
+ case ACPI_TYPE_THERMAL:
+ type = ACPI_BUS_TYPE_THERMAL;
+ break;
+ case ACPI_TYPE_POWER:
+ type = ACPI_BUS_TYPE_POWER;
+ break;
+ default:
+ continue;
+ }
+
+ if (ops->acpi_op_add)
+ status = acpi_add_single_object(&child, parent,
+ chandle, type, ops);
+ else
+ status = acpi_bus_get_device(chandle, &child);
+
+ if (ACPI_FAILURE(status))
+ continue;
+
+ if (ops->acpi_op_start && !(ops->acpi_op_add)) {
+ status = acpi_start_single_object(child);
+ if (ACPI_FAILURE(status))
+ continue;
+ }
+
+ /*
+ * If the device is present, enabled, and functioning then
+ * parse its scope (depth-first). Note that we need to
+ * represent absent devices to facilitate PnP notifications
+ * -- but only the subtree head (not all of its children,
+ * which will be enumerated when the parent is inserted).
+ *
+ * TBD: Need notifications and other detection mechanisms
+ * in place before we can fully implement this.
+ */
+ /*
+ * When the device is not present but functional, it is also
+ * necessary to scan the children of this device.
+ */
+ if (child->status.present || (!child->status.present &&
+ child->status.functional)) {
+ status = acpi_get_next_object(ACPI_TYPE_ANY, chandle,
+ NULL, NULL);
+ if (ACPI_SUCCESS(status)) {
+ level++;
+ phandle = chandle;
+ chandle = NULL;
+ parent = child;
+ }
+ }
+ }
+
+ return 0;
+}
+
+int
+acpi_bus_add(struct acpi_device **child,
+ struct acpi_device *parent, acpi_handle handle, int type)
+{
+ int result;
+ struct acpi_bus_ops ops;
+
+ memset(&ops, 0, sizeof(ops));
+ ops.acpi_op_add = 1;
+
+ result = acpi_add_single_object(child, parent, handle, type, &ops);
+ if (!result)
+ result = acpi_bus_scan(*child, &ops);
+
+ return result;
+}
+
+EXPORT_SYMBOL(acpi_bus_add);
+
+int acpi_bus_start(struct acpi_device *device)
+{
+ int result;
+ struct acpi_bus_ops ops;
+
+
+ if (!device)
+ return -EINVAL;
+
+ result = acpi_start_single_object(device);
+ if (!result) {
+ memset(&ops, 0, sizeof(ops));
+ ops.acpi_op_start = 1;
+ result = acpi_bus_scan(device, &ops);
+ }
+ return result;
+}
+
+EXPORT_SYMBOL(acpi_bus_start);
+
+int acpi_bus_trim(struct acpi_device *start, int rmdevice)
+{
+ acpi_status status;
+ struct acpi_device *parent, *child;
+ acpi_handle phandle, chandle;
+ acpi_object_type type;
+ u32 level = 1;
+ int err = 0;
+
+ parent = start;
+ phandle = start->handle;
+ child = chandle = NULL;
+
+ while ((level > 0) && parent && (!err)) {
+ status = acpi_get_next_object(ACPI_TYPE_ANY, phandle,
+ chandle, &chandle);
+
+ /*
+ * If this scope is exhausted then move our way back up.
+ */
+ if (ACPI_FAILURE(status)) {
+ level--;
+ chandle = phandle;
+ acpi_get_parent(phandle, &phandle);
+ child = parent;
+ parent = parent->parent;
+
+ if (level == 0)
+ err = acpi_bus_remove(child, rmdevice);
+ else
+ err = acpi_bus_remove(child, 1);
+
+ continue;
+ }
+
+ status = acpi_get_type(chandle, &type);
+ if (ACPI_FAILURE(status)) {
+ continue;
+ }
+ /*
+ * If there is a device corresponding to chandle then
+ * parse it (depth-first).
+ */
+ if (acpi_bus_get_device(chandle, &child) == 0) {
+ level++;
+ phandle = chandle;
+ chandle = NULL;
+ parent = child;
+ }
+ continue;
+ }
+ return err;
+}
+EXPORT_SYMBOL_GPL(acpi_bus_trim);
+
+
+static int acpi_bus_scan_fixed(struct acpi_device *root)
+{
+ int result = 0;
+ struct acpi_device *device = NULL;
+ struct acpi_bus_ops ops;
+
+ if (!root)
+ return -ENODEV;
+
+ memset(&ops, 0, sizeof(ops));
+ ops.acpi_op_add = 1;
+ ops.acpi_op_start = 1;
+
+ /*
+ * Enumerate all fixed-feature devices.
+ */
+ if ((acpi_gbl_FADT.flags & ACPI_FADT_POWER_BUTTON) == 0) {
+ result = acpi_add_single_object(&device, acpi_root,
+ NULL,
+ ACPI_BUS_TYPE_POWER_BUTTON,
+ &ops);
+ }
+
+ if ((acpi_gbl_FADT.flags & ACPI_FADT_SLEEP_BUTTON) == 0) {
+ result = acpi_add_single_object(&device, acpi_root,
+ NULL,
+ ACPI_BUS_TYPE_SLEEP_BUTTON,
+ &ops);
+ }
+
+ return result;
+}
+
+
+static int __init acpi_scan_init(void)
+{
+ int result;
+ struct acpi_bus_ops ops;
+
+
+ if (acpi_disabled)
+ return 0;
+
+ memset(&ops, 0, sizeof(ops));
+ ops.acpi_op_add = 1;
+ ops.acpi_op_start = 1;
+
+ result = bus_register(&acpi_bus_type);
+ if (result) {
+ /* We don't want to quit even if we failed to add suspend/resume */
+ printk(KERN_ERR PREFIX "Could not register bus type\n");
+ }
+
+ /*
+ * Create the root device in the bus's device tree
+ */
+ result = acpi_add_single_object(&acpi_root, NULL, ACPI_ROOT_OBJECT,
+ ACPI_BUS_TYPE_SYSTEM, &ops);
+ if (result)
+ goto Done;
+
+ /*
+ * Enumerate devices in the ACPI namespace.
+ */
+ result = acpi_bus_scan_fixed(acpi_root);
+
+ if (!result)
+ result = acpi_bus_scan(acpi_root, &ops);
+
+ if (result)
+ acpi_device_unregister(acpi_root, ACPI_BUS_REMOVAL_NORMAL);
+
+ Done:
+ return result;
+}
+
+subsys_initcall(acpi_scan_init);
diff --git a/drivers/acpi/sleep/Makefile b/drivers/acpi/sleep/Makefile
new file mode 100644
index 0000000..f1fb888
--- /dev/null
+++ b/drivers/acpi/sleep/Makefile
@@ -0,0 +1,5 @@
+obj-y := wakeup.o
+obj-y += main.o
+obj-$(CONFIG_ACPI_SLEEP) += proc.o
+
+EXTRA_CFLAGS += $(ACPI_CFLAGS)
diff --git a/drivers/acpi/sleep/main.c b/drivers/acpi/sleep/main.c
new file mode 100644
index 0000000..28a691c
--- /dev/null
+++ b/drivers/acpi/sleep/main.c
@@ -0,0 +1,696 @@
+/*
+ * sleep.c - ACPI sleep support.
+ *
+ * Copyright (c) 2005 Alexey Starikovskiy <alexey.y.starikovskiy@intel.com>
+ * Copyright (c) 2004 David Shaohua Li <shaohua.li@intel.com>
+ * Copyright (c) 2000-2003 Patrick Mochel
+ * Copyright (c) 2003 Open Source Development Lab
+ *
+ * This file is released under the GPLv2.
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/irq.h>
+#include <linux/dmi.h>
+#include <linux/device.h>
+#include <linux/suspend.h>
+#include <linux/reboot.h>
+
+#include <asm/io.h>
+
+#include <acpi/acpi_bus.h>
+#include <acpi/acpi_drivers.h>
+#include "sleep.h"
+
+u8 sleep_states[ACPI_S_STATE_COUNT];
+
+static void acpi_sleep_tts_switch(u32 acpi_state)
+{
+ union acpi_object in_arg = { ACPI_TYPE_INTEGER };
+ struct acpi_object_list arg_list = { 1, &in_arg };
+ acpi_status status = AE_OK;
+
+ in_arg.integer.value = acpi_state;
+ status = acpi_evaluate_object(NULL, "\\_TTS", &arg_list, NULL);
+ if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
+ /*
+ * OS can't evaluate the _TTS object correctly. Some warning
+ * message will be printed. But it won't break anything.
+ */
+ printk(KERN_NOTICE "Failure in evaluating _TTS object\n");
+ }
+}
+
+static int tts_notify_reboot(struct notifier_block *this,
+ unsigned long code, void *x)
+{
+ acpi_sleep_tts_switch(ACPI_STATE_S5);
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block tts_notifier = {
+ .notifier_call = tts_notify_reboot,
+ .next = NULL,
+ .priority = 0,
+};
+
+static int acpi_sleep_prepare(u32 acpi_state)
+{
+#ifdef CONFIG_ACPI_SLEEP
+ /* do we have a wakeup address for S2 and S3? */
+ if (acpi_state == ACPI_STATE_S3) {
+ if (!acpi_wakeup_address) {
+ return -EFAULT;
+ }
+ acpi_set_firmware_waking_vector(
+ (acpi_physical_address)acpi_wakeup_address);
+
+ }
+ ACPI_FLUSH_CPU_CACHE();
+ acpi_enable_wakeup_device_prep(acpi_state);
+#endif
+ printk(KERN_INFO PREFIX "Preparing to enter system sleep state S%d\n",
+ acpi_state);
+ acpi_enter_sleep_state_prep(acpi_state);
+ return 0;
+}
+
+#ifdef CONFIG_ACPI_SLEEP
+static u32 acpi_target_sleep_state = ACPI_STATE_S0;
+/*
+ * ACPI 1.0 wants us to execute _PTS before suspending devices, so we allow the
+ * user to request that behavior by using the 'acpi_old_suspend_ordering'
+ * kernel command line option that causes the following variable to be set.
+ */
+static bool old_suspend_ordering;
+
+void __init acpi_old_suspend_ordering(void)
+{
+ old_suspend_ordering = true;
+}
+
+/*
+ * According to the ACPI specification the BIOS should make sure that ACPI is
+ * enabled and SCI_EN bit is set on wake-up from S1 - S3 sleep states. Still,
+ * some BIOSes don't do that and therefore we use acpi_enable() to enable ACPI
+ * on such systems during resume. Unfortunately that doesn't help in
+ * particularly pathological cases in which SCI_EN has to be set directly on
+ * resume, although the specification states very clearly that this flag is
+ * owned by the hardware. The set_sci_en_on_resume variable will be set in such
+ * cases.
+ */
+static bool set_sci_en_on_resume;
+
+/**
+ * acpi_pm_disable_gpes - Disable the GPEs.
+ */
+static int acpi_pm_disable_gpes(void)
+{
+ acpi_hw_disable_all_gpes();
+ return 0;
+}
+
+/**
+ * __acpi_pm_prepare - Prepare the platform to enter the target state.
+ *
+ * If necessary, set the firmware waking vector and do arch-specific
+ * nastiness to get the wakeup code to the waking vector.
+ */
+static int __acpi_pm_prepare(void)
+{
+ int error = acpi_sleep_prepare(acpi_target_sleep_state);
+
+ if (error)
+ acpi_target_sleep_state = ACPI_STATE_S0;
+ return error;
+}
+
+/**
+ * acpi_pm_prepare - Prepare the platform to enter the target sleep
+ * state and disable the GPEs.
+ */
+static int acpi_pm_prepare(void)
+{
+ int error = __acpi_pm_prepare();
+
+ if (!error)
+ acpi_hw_disable_all_gpes();
+ return error;
+}
+
+/**
+ * acpi_pm_finish - Instruct the platform to leave a sleep state.
+ *
+ * This is called after we wake back up (or if entering the sleep state
+ * failed).
+ */
+static void acpi_pm_finish(void)
+{
+ u32 acpi_state = acpi_target_sleep_state;
+
+ if (acpi_state == ACPI_STATE_S0)
+ return;
+
+ printk(KERN_INFO PREFIX "Waking up from system sleep state S%d\n",
+ acpi_state);
+ acpi_disable_wakeup_device(acpi_state);
+ acpi_leave_sleep_state(acpi_state);
+
+ /* reset firmware waking vector */
+ acpi_set_firmware_waking_vector((acpi_physical_address) 0);
+
+ acpi_target_sleep_state = ACPI_STATE_S0;
+}
+
+/**
+ * acpi_pm_end - Finish up suspend sequence.
+ */
+static void acpi_pm_end(void)
+{
+ /*
+ * This is necessary in case acpi_pm_finish() is not called during a
+ * failing transition to a sleep state.
+ */
+ acpi_target_sleep_state = ACPI_STATE_S0;
+ acpi_sleep_tts_switch(acpi_target_sleep_state);
+}
+#else /* !CONFIG_ACPI_SLEEP */
+#define acpi_target_sleep_state ACPI_STATE_S0
+#endif /* CONFIG_ACPI_SLEEP */
+
+#ifdef CONFIG_SUSPEND
+extern void do_suspend_lowlevel(void);
+
+static u32 acpi_suspend_states[] = {
+ [PM_SUSPEND_ON] = ACPI_STATE_S0,
+ [PM_SUSPEND_STANDBY] = ACPI_STATE_S1,
+ [PM_SUSPEND_MEM] = ACPI_STATE_S3,
+ [PM_SUSPEND_MAX] = ACPI_STATE_S5
+};
+
+/**
+ * acpi_suspend_begin - Set the target system sleep state to the state
+ * associated with given @pm_state, if supported.
+ */
+static int acpi_suspend_begin(suspend_state_t pm_state)
+{
+ u32 acpi_state = acpi_suspend_states[pm_state];
+ int error = 0;
+
+ if (sleep_states[acpi_state]) {
+ acpi_target_sleep_state = acpi_state;
+ acpi_sleep_tts_switch(acpi_target_sleep_state);
+ } else {
+ printk(KERN_ERR "ACPI does not support this state: %d\n",
+ pm_state);
+ error = -ENOSYS;
+ }
+ return error;
+}
+
+/**
+ * acpi_suspend_enter - Actually enter a sleep state.
+ * @pm_state: ignored
+ *
+ * Flush caches and go to sleep. For STR we have to call arch-specific
+ * assembly, which in turn call acpi_enter_sleep_state().
+ * It's unfortunate, but it works. Please fix if you're feeling frisky.
+ */
+static int acpi_suspend_enter(suspend_state_t pm_state)
+{
+ acpi_status status = AE_OK;
+ unsigned long flags = 0;
+ u32 acpi_state = acpi_target_sleep_state;
+
+ ACPI_FLUSH_CPU_CACHE();
+
+ /* Do arch specific saving of state. */
+ if (acpi_state == ACPI_STATE_S3) {
+ int error = acpi_save_state_mem();
+
+ if (error)
+ return error;
+ }
+
+ local_irq_save(flags);
+ acpi_enable_wakeup_device(acpi_state);
+ switch (acpi_state) {
+ case ACPI_STATE_S1:
+ barrier();
+ status = acpi_enter_sleep_state(acpi_state);
+ break;
+
+ case ACPI_STATE_S3:
+ do_suspend_lowlevel();
+ break;
+ }
+
+ /* If ACPI is not enabled by the BIOS, we need to enable it here. */
+ if (set_sci_en_on_resume)
+ acpi_set_register(ACPI_BITREG_SCI_ENABLE, 1);
+ else
+ acpi_enable();
+
+ /* Reprogram control registers and execute _BFS */
+ acpi_leave_sleep_state_prep(acpi_state);
+
+ /* ACPI 3.0 specs (P62) says that it's the responsibility
+ * of the OSPM to clear the status bit [ implying that the
+ * POWER_BUTTON event should not reach userspace ]
+ */
+ if (ACPI_SUCCESS(status) && (acpi_state == ACPI_STATE_S3))
+ acpi_clear_event(ACPI_EVENT_POWER_BUTTON);
+
+ /*
+ * Disable and clear GPE status before interrupt is enabled. Some GPEs
+ * (like wakeup GPE) haven't handler, this can avoid such GPE misfire.
+ * acpi_leave_sleep_state will reenable specific GPEs later
+ */
+ acpi_hw_disable_all_gpes();
+
+ local_irq_restore(flags);
+ printk(KERN_DEBUG "Back to C!\n");
+
+ /* restore processor state */
+ if (acpi_state == ACPI_STATE_S3)
+ acpi_restore_state_mem();
+
+ return ACPI_SUCCESS(status) ? 0 : -EFAULT;
+}
+
+static int acpi_suspend_state_valid(suspend_state_t pm_state)
+{
+ u32 acpi_state;
+
+ switch (pm_state) {
+ case PM_SUSPEND_ON:
+ case PM_SUSPEND_STANDBY:
+ case PM_SUSPEND_MEM:
+ acpi_state = acpi_suspend_states[pm_state];
+
+ return sleep_states[acpi_state];
+ default:
+ return 0;
+ }
+}
+
+static struct platform_suspend_ops acpi_suspend_ops = {
+ .valid = acpi_suspend_state_valid,
+ .begin = acpi_suspend_begin,
+ .prepare = acpi_pm_prepare,
+ .enter = acpi_suspend_enter,
+ .finish = acpi_pm_finish,
+ .end = acpi_pm_end,
+};
+
+/**
+ * acpi_suspend_begin_old - Set the target system sleep state to the
+ * state associated with given @pm_state, if supported, and
+ * execute the _PTS control method. This function is used if the
+ * pre-ACPI 2.0 suspend ordering has been requested.
+ */
+static int acpi_suspend_begin_old(suspend_state_t pm_state)
+{
+ int error = acpi_suspend_begin(pm_state);
+
+ if (!error)
+ error = __acpi_pm_prepare();
+ return error;
+}
+
+/*
+ * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has
+ * been requested.
+ */
+static struct platform_suspend_ops acpi_suspend_ops_old = {
+ .valid = acpi_suspend_state_valid,
+ .begin = acpi_suspend_begin_old,
+ .prepare = acpi_pm_disable_gpes,
+ .enter = acpi_suspend_enter,
+ .finish = acpi_pm_finish,
+ .end = acpi_pm_end,
+ .recover = acpi_pm_finish,
+};
+
+static int __init init_old_suspend_ordering(const struct dmi_system_id *d)
+{
+ old_suspend_ordering = true;
+ return 0;
+}
+
+static int __init init_set_sci_en_on_resume(const struct dmi_system_id *d)
+{
+ set_sci_en_on_resume = true;
+ return 0;
+}
+
+static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
+ {
+ .callback = init_old_suspend_ordering,
+ .ident = "Abit KN9 (nForce4 variant)",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "http://www.abit.com.tw/"),
+ DMI_MATCH(DMI_BOARD_NAME, "KN9 Series(NF-CK804)"),
+ },
+ },
+ {
+ .callback = init_old_suspend_ordering,
+ .ident = "HP xw4600 Workstation",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP xw4600 Workstation"),
+ },
+ },
+ {
+ .callback = init_set_sci_en_on_resume,
+ .ident = "Apple MacBook 1,1",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Apple Computer, Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MacBook1,1"),
+ },
+ },
+ {
+ .callback = init_set_sci_en_on_resume,
+ .ident = "Apple MacMini 1,1",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Apple Computer, Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Macmini1,1"),
+ },
+ },
+ {},
+};
+#endif /* CONFIG_SUSPEND */
+
+#ifdef CONFIG_HIBERNATION
+static unsigned long s4_hardware_signature;
+static struct acpi_table_facs *facs;
+static bool nosigcheck;
+
+void __init acpi_no_s4_hw_signature(void)
+{
+ nosigcheck = true;
+}
+
+static int acpi_hibernation_begin(void)
+{
+ acpi_target_sleep_state = ACPI_STATE_S4;
+ acpi_sleep_tts_switch(acpi_target_sleep_state);
+ return 0;
+}
+
+static int acpi_hibernation_enter(void)
+{
+ acpi_status status = AE_OK;
+ unsigned long flags = 0;
+
+ ACPI_FLUSH_CPU_CACHE();
+
+ local_irq_save(flags);
+ acpi_enable_wakeup_device(ACPI_STATE_S4);
+ /* This shouldn't return. If it returns, we have a problem */
+ status = acpi_enter_sleep_state(ACPI_STATE_S4);
+ /* Reprogram control registers and execute _BFS */
+ acpi_leave_sleep_state_prep(ACPI_STATE_S4);
+ local_irq_restore(flags);
+
+ return ACPI_SUCCESS(status) ? 0 : -EFAULT;
+}
+
+static void acpi_hibernation_leave(void)
+{
+ /*
+ * If ACPI is not enabled by the BIOS and the boot kernel, we need to
+ * enable it here.
+ */
+ acpi_enable();
+ /* Reprogram control registers and execute _BFS */
+ acpi_leave_sleep_state_prep(ACPI_STATE_S4);
+ /* Check the hardware signature */
+ if (facs && s4_hardware_signature != facs->hardware_signature) {
+ printk(KERN_EMERG "ACPI: Hardware changed while hibernated, "
+ "cannot resume!\n");
+ panic("ACPI S4 hardware signature mismatch");
+ }
+}
+
+static void acpi_pm_enable_gpes(void)
+{
+ acpi_hw_enable_all_runtime_gpes();
+}
+
+static struct platform_hibernation_ops acpi_hibernation_ops = {
+ .begin = acpi_hibernation_begin,
+ .end = acpi_pm_end,
+ .pre_snapshot = acpi_pm_prepare,
+ .finish = acpi_pm_finish,
+ .prepare = acpi_pm_prepare,
+ .enter = acpi_hibernation_enter,
+ .leave = acpi_hibernation_leave,
+ .pre_restore = acpi_pm_disable_gpes,
+ .restore_cleanup = acpi_pm_enable_gpes,
+};
+
+/**
+ * acpi_hibernation_begin_old - Set the target system sleep state to
+ * ACPI_STATE_S4 and execute the _PTS control method. This
+ * function is used if the pre-ACPI 2.0 suspend ordering has been
+ * requested.
+ */
+static int acpi_hibernation_begin_old(void)
+{
+ int error;
+ /*
+ * The _TTS object should always be evaluated before the _PTS object.
+ * When the old_suspended_ordering is true, the _PTS object is
+ * evaluated in the acpi_sleep_prepare.
+ */
+ acpi_sleep_tts_switch(ACPI_STATE_S4);
+
+ error = acpi_sleep_prepare(ACPI_STATE_S4);
+
+ if (!error)
+ acpi_target_sleep_state = ACPI_STATE_S4;
+ return error;
+}
+
+/*
+ * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has
+ * been requested.
+ */
+static struct platform_hibernation_ops acpi_hibernation_ops_old = {
+ .begin = acpi_hibernation_begin_old,
+ .end = acpi_pm_end,
+ .pre_snapshot = acpi_pm_disable_gpes,
+ .finish = acpi_pm_finish,
+ .prepare = acpi_pm_disable_gpes,
+ .enter = acpi_hibernation_enter,
+ .leave = acpi_hibernation_leave,
+ .pre_restore = acpi_pm_disable_gpes,
+ .restore_cleanup = acpi_pm_enable_gpes,
+ .recover = acpi_pm_finish,
+};
+#endif /* CONFIG_HIBERNATION */
+
+int acpi_suspend(u32 acpi_state)
+{
+ suspend_state_t states[] = {
+ [1] = PM_SUSPEND_STANDBY,
+ [3] = PM_SUSPEND_MEM,
+ [5] = PM_SUSPEND_MAX
+ };
+
+ if (acpi_state < 6 && states[acpi_state])
+ return pm_suspend(states[acpi_state]);
+ if (acpi_state == 4)
+ return hibernate();
+ return -EINVAL;
+}
+
+#ifdef CONFIG_PM_SLEEP
+/**
+ * acpi_pm_device_sleep_state - return preferred power state of ACPI device
+ * in the system sleep state given by %acpi_target_sleep_state
+ * @dev: device to examine; its driver model wakeup flags control
+ * whether it should be able to wake up the system
+ * @d_min_p: used to store the upper limit of allowed states range
+ * Return value: preferred power state of the device on success, -ENODEV on
+ * failure (ie. if there's no 'struct acpi_device' for @dev)
+ *
+ * Find the lowest power (highest number) ACPI device power state that
+ * device @dev can be in while the system is in the sleep state represented
+ * by %acpi_target_sleep_state. If @wake is nonzero, the device should be
+ * able to wake up the system from this sleep state. If @d_min_p is set,
+ * the highest power (lowest number) device power state of @dev allowed
+ * in this system sleep state is stored at the location pointed to by it.
+ *
+ * The caller must ensure that @dev is valid before using this function.
+ * The caller is also responsible for figuring out if the device is
+ * supposed to be able to wake up the system and passing this information
+ * via @wake.
+ */
+
+int acpi_pm_device_sleep_state(struct device *dev, int *d_min_p)
+{
+ acpi_handle handle = DEVICE_ACPI_HANDLE(dev);
+ struct acpi_device *adev;
+ char acpi_method[] = "_SxD";
+ unsigned long long d_min, d_max;
+
+ if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &adev))) {
+ printk(KERN_DEBUG "ACPI handle has no context!\n");
+ return -ENODEV;
+ }
+
+ acpi_method[2] = '0' + acpi_target_sleep_state;
+ /*
+ * If the sleep state is S0, we will return D3, but if the device has
+ * _S0W, we will use the value from _S0W
+ */
+ d_min = ACPI_STATE_D0;
+ d_max = ACPI_STATE_D3;
+
+ /*
+ * If present, _SxD methods return the minimum D-state (highest power
+ * state) we can use for the corresponding S-states. Otherwise, the
+ * minimum D-state is D0 (ACPI 3.x).
+ *
+ * NOTE: We rely on acpi_evaluate_integer() not clobbering the integer
+ * provided -- that's our fault recovery, we ignore retval.
+ */
+ if (acpi_target_sleep_state > ACPI_STATE_S0)
+ acpi_evaluate_integer(handle, acpi_method, NULL, &d_min);
+
+ /*
+ * If _PRW says we can wake up the system from the target sleep state,
+ * the D-state returned by _SxD is sufficient for that (we assume a
+ * wakeup-aware driver if wake is set). Still, if _SxW exists
+ * (ACPI 3.x), it should return the maximum (lowest power) D-state that
+ * can wake the system. _S0W may be valid, too.
+ */
+ if (acpi_target_sleep_state == ACPI_STATE_S0 ||
+ (device_may_wakeup(dev) && adev->wakeup.state.enabled &&
+ adev->wakeup.sleep_state <= acpi_target_sleep_state)) {
+ acpi_status status;
+
+ acpi_method[3] = 'W';
+ status = acpi_evaluate_integer(handle, acpi_method, NULL,
+ &d_max);
+ if (ACPI_FAILURE(status)) {
+ d_max = d_min;
+ } else if (d_max < d_min) {
+ /* Warn the user of the broken DSDT */
+ printk(KERN_WARNING "ACPI: Wrong value from %s\n",
+ acpi_method);
+ /* Sanitize it */
+ d_min = d_max;
+ }
+ }
+
+ if (d_min_p)
+ *d_min_p = d_min;
+ return d_max;
+}
+
+/**
+ * acpi_pm_device_sleep_wake - enable or disable the system wake-up
+ * capability of given device
+ * @dev: device to handle
+ * @enable: 'true' - enable, 'false' - disable the wake-up capability
+ */
+int acpi_pm_device_sleep_wake(struct device *dev, bool enable)
+{
+ acpi_handle handle;
+ struct acpi_device *adev;
+
+ if (!device_may_wakeup(dev))
+ return -EINVAL;
+
+ handle = DEVICE_ACPI_HANDLE(dev);
+ if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &adev))) {
+ printk(KERN_DEBUG "ACPI handle has no context!\n");
+ return -ENODEV;
+ }
+
+ return enable ?
+ acpi_enable_wakeup_device_power(adev, acpi_target_sleep_state) :
+ acpi_disable_wakeup_device_power(adev);
+}
+#endif
+
+static void acpi_power_off_prepare(void)
+{
+ /* Prepare to power off the system */
+ acpi_sleep_prepare(ACPI_STATE_S5);
+ acpi_hw_disable_all_gpes();
+}
+
+static void acpi_power_off(void)
+{
+ /* acpi_sleep_prepare(ACPI_STATE_S5) should have already been called */
+ printk("%s called\n", __func__);
+ local_irq_disable();
+ acpi_enable_wakeup_device(ACPI_STATE_S5);
+ acpi_enter_sleep_state(ACPI_STATE_S5);
+}
+
+int __init acpi_sleep_init(void)
+{
+ acpi_status status;
+ u8 type_a, type_b;
+#ifdef CONFIG_SUSPEND
+ int i = 0;
+
+ dmi_check_system(acpisleep_dmi_table);
+#endif
+
+ if (acpi_disabled)
+ return 0;
+
+ sleep_states[ACPI_STATE_S0] = 1;
+ printk(KERN_INFO PREFIX "(supports S0");
+
+#ifdef CONFIG_SUSPEND
+ for (i = ACPI_STATE_S1; i < ACPI_STATE_S4; i++) {
+ status = acpi_get_sleep_type_data(i, &type_a, &type_b);
+ if (ACPI_SUCCESS(status)) {
+ sleep_states[i] = 1;
+ printk(" S%d", i);
+ }
+ }
+
+ suspend_set_ops(old_suspend_ordering ?
+ &acpi_suspend_ops_old : &acpi_suspend_ops);
+#endif
+
+#ifdef CONFIG_HIBERNATION
+ status = acpi_get_sleep_type_data(ACPI_STATE_S4, &type_a, &type_b);
+ if (ACPI_SUCCESS(status)) {
+ hibernation_set_ops(old_suspend_ordering ?
+ &acpi_hibernation_ops_old : &acpi_hibernation_ops);
+ sleep_states[ACPI_STATE_S4] = 1;
+ printk(" S4");
+ if (!nosigcheck) {
+ acpi_get_table_by_index(ACPI_TABLE_INDEX_FACS,
+ (struct acpi_table_header **)&facs);
+ if (facs)
+ s4_hardware_signature =
+ facs->hardware_signature;
+ }
+ }
+#endif
+ status = acpi_get_sleep_type_data(ACPI_STATE_S5, &type_a, &type_b);
+ if (ACPI_SUCCESS(status)) {
+ sleep_states[ACPI_STATE_S5] = 1;
+ printk(" S5");
+ pm_power_off_prepare = acpi_power_off_prepare;
+ pm_power_off = acpi_power_off;
+ }
+ printk(")\n");
+ /*
+ * Register the tts_notifier to reboot notifier list so that the _TTS
+ * object can also be evaluated when the system enters S5.
+ */
+ register_reboot_notifier(&tts_notifier);
+ return 0;
+}
diff --git a/drivers/acpi/sleep/proc.c b/drivers/acpi/sleep/proc.c
new file mode 100644
index 0000000..4dbc227
--- /dev/null
+++ b/drivers/acpi/sleep/proc.c
@@ -0,0 +1,526 @@
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/suspend.h>
+#include <linux/bcd.h>
+#include <asm/uaccess.h>
+
+#include <acpi/acpi_bus.h>
+#include <acpi/acpi_drivers.h>
+
+#ifdef CONFIG_X86
+#include <linux/mc146818rtc.h>
+#endif
+
+#include "sleep.h"
+
+#define _COMPONENT ACPI_SYSTEM_COMPONENT
+
+/*
+ * this file provides support for:
+ * /proc/acpi/sleep
+ * /proc/acpi/alarm
+ * /proc/acpi/wakeup
+ */
+
+ACPI_MODULE_NAME("sleep")
+#ifdef CONFIG_ACPI_PROCFS
+static int acpi_system_sleep_seq_show(struct seq_file *seq, void *offset)
+{
+ int i;
+
+ ACPI_FUNCTION_TRACE("acpi_system_sleep_seq_show");
+
+ for (i = 0; i <= ACPI_STATE_S5; i++) {
+ if (sleep_states[i]) {
+ seq_printf(seq, "S%d ", i);
+ }
+ }
+
+ seq_puts(seq, "\n");
+
+ return 0;
+}
+
+static int acpi_system_sleep_open_fs(struct inode *inode, struct file *file)
+{
+ return single_open(file, acpi_system_sleep_seq_show, PDE(inode)->data);
+}
+
+static ssize_t
+acpi_system_write_sleep(struct file *file,
+ const char __user * buffer, size_t count, loff_t * ppos)
+{
+ char str[12];
+ u32 state = 0;
+ int error = 0;
+
+ if (count > sizeof(str) - 1)
+ goto Done;
+ memset(str, 0, sizeof(str));
+ if (copy_from_user(str, buffer, count))
+ return -EFAULT;
+
+ /* Check for S4 bios request */
+ if (!strcmp(str, "4b")) {
+ error = acpi_suspend(4);
+ goto Done;
+ }
+ state = simple_strtoul(str, NULL, 0);
+#ifdef CONFIG_HIBERNATION
+ if (state == 4) {
+ error = hibernate();
+ goto Done;
+ }
+#endif
+ error = acpi_suspend(state);
+ Done:
+ return error ? error : count;
+}
+#endif /* CONFIG_ACPI_PROCFS */
+
+#if defined(CONFIG_RTC_DRV_CMOS) || defined(CONFIG_RTC_DRV_CMOS_MODULE) || !defined(CONFIG_X86)
+/* use /sys/class/rtc/rtcX/wakealarm instead; it's not ACPI-specific */
+#else
+#define HAVE_ACPI_LEGACY_ALARM
+#endif
+
+#ifdef HAVE_ACPI_LEGACY_ALARM
+
+static int acpi_system_alarm_seq_show(struct seq_file *seq, void *offset)
+{
+ u32 sec, min, hr;
+ u32 day, mo, yr, cent = 0;
+ unsigned char rtc_control = 0;
+ unsigned long flags;
+
+ ACPI_FUNCTION_TRACE("acpi_system_alarm_seq_show");
+
+ spin_lock_irqsave(&rtc_lock, flags);
+
+ sec = CMOS_READ(RTC_SECONDS_ALARM);
+ min = CMOS_READ(RTC_MINUTES_ALARM);
+ hr = CMOS_READ(RTC_HOURS_ALARM);
+ rtc_control = CMOS_READ(RTC_CONTROL);
+
+ /* If we ever get an FACP with proper values... */
+ if (acpi_gbl_FADT.day_alarm)
+ /* ACPI spec: only low 6 its should be cared */
+ day = CMOS_READ(acpi_gbl_FADT.day_alarm) & 0x3F;
+ else
+ day = CMOS_READ(RTC_DAY_OF_MONTH);
+ if (acpi_gbl_FADT.month_alarm)
+ mo = CMOS_READ(acpi_gbl_FADT.month_alarm);
+ else
+ mo = CMOS_READ(RTC_MONTH);
+ if (acpi_gbl_FADT.century)
+ cent = CMOS_READ(acpi_gbl_FADT.century);
+
+ yr = CMOS_READ(RTC_YEAR);
+
+ spin_unlock_irqrestore(&rtc_lock, flags);
+
+ if (!(rtc_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
+ sec = bcd2bin(sec);
+ min = bcd2bin(min);
+ hr = bcd2bin(hr);
+ day = bcd2bin(day);
+ mo = bcd2bin(mo);
+ yr = bcd2bin(yr);
+ cent = bcd2bin(cent);
+ }
+
+ /* we're trusting the FADT (see above) */
+ if (!acpi_gbl_FADT.century)
+ /* If we're not trusting the FADT, we should at least make it
+ * right for _this_ century... ehm, what is _this_ century?
+ *
+ * TBD:
+ * ASAP: find piece of code in the kernel, e.g. star tracker driver,
+ * which we can trust to determine the century correctly. Atom
+ * watch driver would be nice, too...
+ *
+ * if that has not happened, change for first release in 2050:
+ * if (yr<50)
+ * yr += 2100;
+ * else
+ * yr += 2000; // current line of code
+ *
+ * if that has not happened either, please do on 2099/12/31:23:59:59
+ * s/2000/2100
+ *
+ */
+ yr += 2000;
+ else
+ yr += cent * 100;
+
+ seq_printf(seq, "%4.4u-", yr);
+ (mo > 12) ? seq_puts(seq, "**-") : seq_printf(seq, "%2.2u-", mo);
+ (day > 31) ? seq_puts(seq, "** ") : seq_printf(seq, "%2.2u ", day);
+ (hr > 23) ? seq_puts(seq, "**:") : seq_printf(seq, "%2.2u:", hr);
+ (min > 59) ? seq_puts(seq, "**:") : seq_printf(seq, "%2.2u:", min);
+ (sec > 59) ? seq_puts(seq, "**\n") : seq_printf(seq, "%2.2u\n", sec);
+
+ return 0;
+}
+
+static int acpi_system_alarm_open_fs(struct inode *inode, struct file *file)
+{
+ return single_open(file, acpi_system_alarm_seq_show, PDE(inode)->data);
+}
+
+static int get_date_field(char **p, u32 * value)
+{
+ char *next = NULL;
+ char *string_end = NULL;
+ int result = -EINVAL;
+
+ /*
+ * Try to find delimeter, only to insert null. The end of the
+ * string won't have one, but is still valid.
+ */
+ if (*p == NULL)
+ return result;
+
+ next = strpbrk(*p, "- :");
+ if (next)
+ *next++ = '\0';
+
+ *value = simple_strtoul(*p, &string_end, 10);
+
+ /* Signal success if we got a good digit */
+ if (string_end != *p)
+ result = 0;
+
+ if (next)
+ *p = next;
+ else
+ *p = NULL;
+
+ return result;
+}
+
+/* Read a possibly BCD register, always return binary */
+static u32 cmos_bcd_read(int offset, int rtc_control)
+{
+ u32 val = CMOS_READ(offset);
+ if (!(rtc_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
+ val = bcd2bin(val);
+ return val;
+}
+
+/* Write binary value into possibly BCD register */
+static void cmos_bcd_write(u32 val, int offset, int rtc_control)
+{
+ if (!(rtc_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
+ val = bin2bcd(val);
+ CMOS_WRITE(val, offset);
+}
+
+static ssize_t
+acpi_system_write_alarm(struct file *file,
+ const char __user * buffer, size_t count, loff_t * ppos)
+{
+ int result = 0;
+ char alarm_string[30] = { '\0' };
+ char *p = alarm_string;
+ u32 sec, min, hr, day, mo, yr;
+ int adjust = 0;
+ unsigned char rtc_control = 0;
+
+ ACPI_FUNCTION_TRACE("acpi_system_write_alarm");
+
+ if (count > sizeof(alarm_string) - 1)
+ return_VALUE(-EINVAL);
+
+ if (copy_from_user(alarm_string, buffer, count))
+ return_VALUE(-EFAULT);
+
+ alarm_string[count] = '\0';
+
+ /* check for time adjustment */
+ if (alarm_string[0] == '+') {
+ p++;
+ adjust = 1;
+ }
+
+ if ((result = get_date_field(&p, &yr)))
+ goto end;
+ if ((result = get_date_field(&p, &mo)))
+ goto end;
+ if ((result = get_date_field(&p, &day)))
+ goto end;
+ if ((result = get_date_field(&p, &hr)))
+ goto end;
+ if ((result = get_date_field(&p, &min)))
+ goto end;
+ if ((result = get_date_field(&p, &sec)))
+ goto end;
+
+ spin_lock_irq(&rtc_lock);
+
+ rtc_control = CMOS_READ(RTC_CONTROL);
+
+ if (adjust) {
+ yr += cmos_bcd_read(RTC_YEAR, rtc_control);
+ mo += cmos_bcd_read(RTC_MONTH, rtc_control);
+ day += cmos_bcd_read(RTC_DAY_OF_MONTH, rtc_control);
+ hr += cmos_bcd_read(RTC_HOURS, rtc_control);
+ min += cmos_bcd_read(RTC_MINUTES, rtc_control);
+ sec += cmos_bcd_read(RTC_SECONDS, rtc_control);
+ }
+
+ spin_unlock_irq(&rtc_lock);
+
+ if (sec > 59) {
+ min += sec/60;
+ sec = sec%60;
+ }
+ if (min > 59) {
+ hr += min/60;
+ min = min%60;
+ }
+ if (hr > 23) {
+ day += hr/24;
+ hr = hr%24;
+ }
+ if (day > 31) {
+ mo += day/32;
+ day = day%32;
+ }
+ if (mo > 12) {
+ yr += mo/13;
+ mo = mo%13;
+ }
+
+ spin_lock_irq(&rtc_lock);
+ /*
+ * Disable alarm interrupt before setting alarm timer or else
+ * when ACPI_EVENT_RTC is enabled, a spurious ACPI interrupt occurs
+ */
+ rtc_control &= ~RTC_AIE;
+ CMOS_WRITE(rtc_control, RTC_CONTROL);
+ CMOS_READ(RTC_INTR_FLAGS);
+
+ /* write the fields the rtc knows about */
+ cmos_bcd_write(hr, RTC_HOURS_ALARM, rtc_control);
+ cmos_bcd_write(min, RTC_MINUTES_ALARM, rtc_control);
+ cmos_bcd_write(sec, RTC_SECONDS_ALARM, rtc_control);
+
+ /*
+ * If the system supports an enhanced alarm it will have non-zero
+ * offsets into the CMOS RAM here -- which for some reason are pointing
+ * to the RTC area of memory.
+ */
+ if (acpi_gbl_FADT.day_alarm)
+ cmos_bcd_write(day, acpi_gbl_FADT.day_alarm, rtc_control);
+ if (acpi_gbl_FADT.month_alarm)
+ cmos_bcd_write(mo, acpi_gbl_FADT.month_alarm, rtc_control);
+ if (acpi_gbl_FADT.century) {
+ if (adjust)
+ yr += cmos_bcd_read(acpi_gbl_FADT.century, rtc_control) * 100;
+ cmos_bcd_write(yr / 100, acpi_gbl_FADT.century, rtc_control);
+ }
+ /* enable the rtc alarm interrupt */
+ rtc_control |= RTC_AIE;
+ CMOS_WRITE(rtc_control, RTC_CONTROL);
+ CMOS_READ(RTC_INTR_FLAGS);
+
+ spin_unlock_irq(&rtc_lock);
+
+ acpi_clear_event(ACPI_EVENT_RTC);
+ acpi_enable_event(ACPI_EVENT_RTC, 0);
+
+ *ppos += count;
+
+ result = 0;
+ end:
+ return_VALUE(result ? result : count);
+}
+#endif /* HAVE_ACPI_LEGACY_ALARM */
+
+extern struct list_head acpi_wakeup_device_list;
+extern spinlock_t acpi_device_lock;
+
+static int
+acpi_system_wakeup_device_seq_show(struct seq_file *seq, void *offset)
+{
+ struct list_head *node, *next;
+
+ seq_printf(seq, "Device\tS-state\t Status Sysfs node\n");
+
+ spin_lock(&acpi_device_lock);
+ list_for_each_safe(node, next, &acpi_wakeup_device_list) {
+ struct acpi_device *dev =
+ container_of(node, struct acpi_device, wakeup_list);
+ struct device *ldev;
+
+ if (!dev->wakeup.flags.valid)
+ continue;
+ spin_unlock(&acpi_device_lock);
+
+ ldev = acpi_get_physical_device(dev->handle);
+ seq_printf(seq, "%s\t S%d\t%c%-8s ",
+ dev->pnp.bus_id,
+ (u32) dev->wakeup.sleep_state,
+ dev->wakeup.flags.run_wake ? '*' : ' ',
+ dev->wakeup.state.enabled ? "enabled" : "disabled");
+ if (ldev)
+ seq_printf(seq, "%s:%s",
+ ldev->bus ? ldev->bus->name : "no-bus",
+ dev_name(ldev));
+ seq_printf(seq, "\n");
+ put_device(ldev);
+
+ spin_lock(&acpi_device_lock);
+ }
+ spin_unlock(&acpi_device_lock);
+ return 0;
+}
+
+static void physical_device_enable_wakeup(struct acpi_device *adev)
+{
+ struct device *dev = acpi_get_physical_device(adev->handle);
+
+ if (dev && device_can_wakeup(dev))
+ device_set_wakeup_enable(dev, adev->wakeup.state.enabled);
+}
+
+static ssize_t
+acpi_system_write_wakeup_device(struct file *file,
+ const char __user * buffer,
+ size_t count, loff_t * ppos)
+{
+ struct list_head *node, *next;
+ char strbuf[5];
+ char str[5] = "";
+ int len = count;
+ struct acpi_device *found_dev = NULL;
+
+ if (len > 4)
+ len = 4;
+
+ if (copy_from_user(strbuf, buffer, len))
+ return -EFAULT;
+ strbuf[len] = '\0';
+ sscanf(strbuf, "%s", str);
+
+ spin_lock(&acpi_device_lock);
+ list_for_each_safe(node, next, &acpi_wakeup_device_list) {
+ struct acpi_device *dev =
+ container_of(node, struct acpi_device, wakeup_list);
+ if (!dev->wakeup.flags.valid)
+ continue;
+
+ if (!strncmp(dev->pnp.bus_id, str, 4)) {
+ dev->wakeup.state.enabled =
+ dev->wakeup.state.enabled ? 0 : 1;
+ found_dev = dev;
+ break;
+ }
+ }
+ if (found_dev) {
+ physical_device_enable_wakeup(found_dev);
+ list_for_each_safe(node, next, &acpi_wakeup_device_list) {
+ struct acpi_device *dev = container_of(node,
+ struct
+ acpi_device,
+ wakeup_list);
+
+ if ((dev != found_dev) &&
+ (dev->wakeup.gpe_number ==
+ found_dev->wakeup.gpe_number)
+ && (dev->wakeup.gpe_device ==
+ found_dev->wakeup.gpe_device)) {
+ printk(KERN_WARNING
+ "ACPI: '%s' and '%s' have the same GPE, "
+ "can't disable/enable one seperately\n",
+ dev->pnp.bus_id, found_dev->pnp.bus_id);
+ dev->wakeup.state.enabled =
+ found_dev->wakeup.state.enabled;
+ physical_device_enable_wakeup(dev);
+ }
+ }
+ }
+ spin_unlock(&acpi_device_lock);
+ return count;
+}
+
+static int
+acpi_system_wakeup_device_open_fs(struct inode *inode, struct file *file)
+{
+ return single_open(file, acpi_system_wakeup_device_seq_show,
+ PDE(inode)->data);
+}
+
+static const struct file_operations acpi_system_wakeup_device_fops = {
+ .owner = THIS_MODULE,
+ .open = acpi_system_wakeup_device_open_fs,
+ .read = seq_read,
+ .write = acpi_system_write_wakeup_device,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+#ifdef CONFIG_ACPI_PROCFS
+static const struct file_operations acpi_system_sleep_fops = {
+ .owner = THIS_MODULE,
+ .open = acpi_system_sleep_open_fs,
+ .read = seq_read,
+ .write = acpi_system_write_sleep,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+#endif /* CONFIG_ACPI_PROCFS */
+
+#ifdef HAVE_ACPI_LEGACY_ALARM
+static const struct file_operations acpi_system_alarm_fops = {
+ .owner = THIS_MODULE,
+ .open = acpi_system_alarm_open_fs,
+ .read = seq_read,
+ .write = acpi_system_write_alarm,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static u32 rtc_handler(void *context)
+{
+ acpi_clear_event(ACPI_EVENT_RTC);
+ acpi_disable_event(ACPI_EVENT_RTC, 0);
+
+ return ACPI_INTERRUPT_HANDLED;
+}
+#endif /* HAVE_ACPI_LEGACY_ALARM */
+
+static int __init acpi_sleep_proc_init(void)
+{
+ if (acpi_disabled)
+ return 0;
+
+#ifdef CONFIG_ACPI_PROCFS
+ /* 'sleep' [R/W] */
+ proc_create("sleep", S_IFREG | S_IRUGO | S_IWUSR,
+ acpi_root_dir, &acpi_system_sleep_fops);
+#endif /* CONFIG_ACPI_PROCFS */
+
+#ifdef HAVE_ACPI_LEGACY_ALARM
+ /* 'alarm' [R/W] */
+ proc_create("alarm", S_IFREG | S_IRUGO | S_IWUSR,
+ acpi_root_dir, &acpi_system_alarm_fops);
+
+ acpi_install_fixed_event_handler(ACPI_EVENT_RTC, rtc_handler, NULL);
+ /*
+ * Disable the RTC event after installing RTC handler.
+ * Only when RTC alarm is set will it be enabled.
+ */
+ acpi_clear_event(ACPI_EVENT_RTC);
+ acpi_disable_event(ACPI_EVENT_RTC, 0);
+#endif /* HAVE_ACPI_LEGACY_ALARM */
+
+ /* 'wakeup device' [R/W] */
+ proc_create("wakeup", S_IFREG | S_IRUGO | S_IWUSR,
+ acpi_root_dir, &acpi_system_wakeup_device_fops);
+
+ return 0;
+}
+
+late_initcall(acpi_sleep_proc_init);
diff --git a/drivers/acpi/sleep/sleep.h b/drivers/acpi/sleep/sleep.h
new file mode 100644
index 0000000..cfaf8f5
--- /dev/null
+++ b/drivers/acpi/sleep/sleep.h
@@ -0,0 +1,7 @@
+
+extern u8 sleep_states[];
+extern int acpi_suspend (u32 state);
+
+extern void acpi_enable_wakeup_device_prep(u8 sleep_state);
+extern void acpi_enable_wakeup_device(u8 sleep_state);
+extern void acpi_disable_wakeup_device(u8 sleep_state);
diff --git a/drivers/acpi/sleep/wakeup.c b/drivers/acpi/sleep/wakeup.c
new file mode 100644
index 0000000..dea4c23
--- /dev/null
+++ b/drivers/acpi/sleep/wakeup.c
@@ -0,0 +1,173 @@
+/*
+ * wakeup.c - support wakeup devices
+ * Copyright (C) 2004 Li Shaohua <shaohua.li@intel.com>
+ */
+
+#include <linux/init.h>
+#include <linux/acpi.h>
+#include <acpi/acpi_drivers.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <acpi/acevents.h>
+#include "sleep.h"
+
+#define _COMPONENT ACPI_SYSTEM_COMPONENT
+ACPI_MODULE_NAME("wakeup_devices")
+
+extern struct list_head acpi_wakeup_device_list;
+extern spinlock_t acpi_device_lock;
+
+/**
+ * acpi_enable_wakeup_device_prep - prepare wakeup devices
+ * @sleep_state: ACPI state
+ * Enable all wakup devices power if the devices' wakeup level
+ * is higher than requested sleep level
+ */
+
+void acpi_enable_wakeup_device_prep(u8 sleep_state)
+{
+ struct list_head *node, *next;
+
+ ACPI_FUNCTION_TRACE("acpi_enable_wakeup_device_prep");
+
+ spin_lock(&acpi_device_lock);
+ list_for_each_safe(node, next, &acpi_wakeup_device_list) {
+ struct acpi_device *dev = container_of(node,
+ struct acpi_device,
+ wakeup_list);
+
+ if (!dev->wakeup.flags.valid ||
+ !dev->wakeup.state.enabled ||
+ (sleep_state > (u32) dev->wakeup.sleep_state))
+ continue;
+
+ spin_unlock(&acpi_device_lock);
+ acpi_enable_wakeup_device_power(dev, sleep_state);
+ spin_lock(&acpi_device_lock);
+ }
+ spin_unlock(&acpi_device_lock);
+}
+
+/**
+ * acpi_enable_wakeup_device - enable wakeup devices
+ * @sleep_state: ACPI state
+ * Enable all wakup devices's GPE
+ */
+void acpi_enable_wakeup_device(u8 sleep_state)
+{
+ struct list_head *node, *next;
+
+ /*
+ * Caution: this routine must be invoked when interrupt is disabled
+ * Refer ACPI2.0: P212
+ */
+ ACPI_FUNCTION_TRACE("acpi_enable_wakeup_device");
+ spin_lock(&acpi_device_lock);
+ list_for_each_safe(node, next, &acpi_wakeup_device_list) {
+ struct acpi_device *dev =
+ container_of(node, struct acpi_device, wakeup_list);
+
+ if (!dev->wakeup.flags.valid)
+ continue;
+
+ /* If users want to disable run-wake GPE,
+ * we only disable it for wake and leave it for runtime
+ */
+ if ((!dev->wakeup.state.enabled && !dev->wakeup.flags.prepared)
+ || sleep_state > (u32) dev->wakeup.sleep_state) {
+ if (dev->wakeup.flags.run_wake) {
+ spin_unlock(&acpi_device_lock);
+ /* set_gpe_type will disable GPE, leave it like that */
+ acpi_set_gpe_type(dev->wakeup.gpe_device,
+ dev->wakeup.gpe_number,
+ ACPI_GPE_TYPE_RUNTIME);
+ spin_lock(&acpi_device_lock);
+ }
+ continue;
+ }
+ spin_unlock(&acpi_device_lock);
+ if (!dev->wakeup.flags.run_wake)
+ acpi_enable_gpe(dev->wakeup.gpe_device,
+ dev->wakeup.gpe_number);
+ spin_lock(&acpi_device_lock);
+ }
+ spin_unlock(&acpi_device_lock);
+}
+
+/**
+ * acpi_disable_wakeup_device - disable devices' wakeup capability
+ * @sleep_state: ACPI state
+ * Disable all wakup devices's GPE and wakeup capability
+ */
+void acpi_disable_wakeup_device(u8 sleep_state)
+{
+ struct list_head *node, *next;
+
+ ACPI_FUNCTION_TRACE("acpi_disable_wakeup_device");
+
+ spin_lock(&acpi_device_lock);
+ list_for_each_safe(node, next, &acpi_wakeup_device_list) {
+ struct acpi_device *dev =
+ container_of(node, struct acpi_device, wakeup_list);
+
+ if (!dev->wakeup.flags.valid)
+ continue;
+
+ if ((!dev->wakeup.state.enabled && !dev->wakeup.flags.prepared)
+ || sleep_state > (u32) dev->wakeup.sleep_state) {
+ if (dev->wakeup.flags.run_wake) {
+ spin_unlock(&acpi_device_lock);
+ acpi_set_gpe_type(dev->wakeup.gpe_device,
+ dev->wakeup.gpe_number,
+ ACPI_GPE_TYPE_WAKE_RUN);
+ /* Re-enable it, since set_gpe_type will disable it */
+ acpi_enable_gpe(dev->wakeup.gpe_device,
+ dev->wakeup.gpe_number);
+ spin_lock(&acpi_device_lock);
+ }
+ continue;
+ }
+
+ spin_unlock(&acpi_device_lock);
+ acpi_disable_wakeup_device_power(dev);
+ /* Never disable run-wake GPE */
+ if (!dev->wakeup.flags.run_wake) {
+ acpi_disable_gpe(dev->wakeup.gpe_device,
+ dev->wakeup.gpe_number);
+ acpi_clear_gpe(dev->wakeup.gpe_device,
+ dev->wakeup.gpe_number, ACPI_NOT_ISR);
+ }
+ spin_lock(&acpi_device_lock);
+ }
+ spin_unlock(&acpi_device_lock);
+}
+
+static int __init acpi_wakeup_device_init(void)
+{
+ struct list_head *node, *next;
+
+ if (acpi_disabled)
+ return 0;
+
+ spin_lock(&acpi_device_lock);
+ list_for_each_safe(node, next, &acpi_wakeup_device_list) {
+ struct acpi_device *dev = container_of(node,
+ struct acpi_device,
+ wakeup_list);
+ /* In case user doesn't load button driver */
+ if (!dev->wakeup.flags.run_wake || dev->wakeup.state.enabled)
+ continue;
+ spin_unlock(&acpi_device_lock);
+ acpi_set_gpe_type(dev->wakeup.gpe_device,
+ dev->wakeup.gpe_number,
+ ACPI_GPE_TYPE_WAKE_RUN);
+ acpi_enable_gpe(dev->wakeup.gpe_device,
+ dev->wakeup.gpe_number);
+ dev->wakeup.state.enabled = 1;
+ spin_lock(&acpi_device_lock);
+ }
+ spin_unlock(&acpi_device_lock);
+ return 0;
+}
+
+late_initcall(acpi_wakeup_device_init);
diff --git a/drivers/acpi/system.c b/drivers/acpi/system.c
new file mode 100644
index 0000000..6e4107f
--- /dev/null
+++ b/drivers/acpi/system.c
@@ -0,0 +1,649 @@
+/*
+ * acpi_system.c - ACPI System Driver ($Revision: 63 $)
+ *
+ * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
+ * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/init.h>
+#include <linux/string.h>
+#include <asm/uaccess.h>
+
+#include <acpi/acpi_drivers.h>
+
+#define _COMPONENT ACPI_SYSTEM_COMPONENT
+ACPI_MODULE_NAME("system");
+#ifdef MODULE_PARAM_PREFIX
+#undef MODULE_PARAM_PREFIX
+#endif
+#define MODULE_PARAM_PREFIX "acpi."
+
+#define ACPI_SYSTEM_CLASS "system"
+#define ACPI_SYSTEM_DEVICE_NAME "System"
+
+u32 acpi_irq_handled;
+
+/*
+ * Make ACPICA version work as module param
+ */
+static int param_get_acpica_version(char *buffer, struct kernel_param *kp)
+{
+ int result;
+
+ result = sprintf(buffer, "%x", ACPI_CA_VERSION);
+
+ return result;
+}
+
+module_param_call(acpica_version, NULL, param_get_acpica_version, NULL, 0444);
+
+/* --------------------------------------------------------------------------
+ FS Interface (/sys)
+ -------------------------------------------------------------------------- */
+static LIST_HEAD(acpi_table_attr_list);
+static struct kobject *tables_kobj;
+
+struct acpi_table_attr {
+ struct bin_attribute attr;
+ char name[8];
+ int instance;
+ struct list_head node;
+};
+
+static ssize_t acpi_table_show(struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf,
+ loff_t offset, size_t count)
+{
+ struct acpi_table_attr *table_attr =
+ container_of(bin_attr, struct acpi_table_attr, attr);
+ struct acpi_table_header *table_header = NULL;
+ acpi_status status;
+ char name[ACPI_NAME_SIZE];
+
+ if (strncmp(table_attr->name, "NULL", 4))
+ memcpy(name, table_attr->name, ACPI_NAME_SIZE);
+ else
+ memcpy(name, "\0\0\0\0", 4);
+
+ status =
+ acpi_get_table(name, table_attr->instance,
+ &table_header);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+
+ return memory_read_from_buffer(buf, count, &offset,
+ table_header, table_header->length);
+}
+
+static void acpi_table_attr_init(struct acpi_table_attr *table_attr,
+ struct acpi_table_header *table_header)
+{
+ struct acpi_table_header *header = NULL;
+ struct acpi_table_attr *attr = NULL;
+
+ if (table_header->signature[0] != '\0')
+ memcpy(table_attr->name, table_header->signature,
+ ACPI_NAME_SIZE);
+ else
+ memcpy(table_attr->name, "NULL", 4);
+
+ list_for_each_entry(attr, &acpi_table_attr_list, node) {
+ if (!memcmp(table_attr->name, attr->name, ACPI_NAME_SIZE))
+ if (table_attr->instance < attr->instance)
+ table_attr->instance = attr->instance;
+ }
+ table_attr->instance++;
+
+ if (table_attr->instance > 1 || (table_attr->instance == 1 &&
+ !acpi_get_table
+ (table_header->signature, 2, &header)))
+ sprintf(table_attr->name + ACPI_NAME_SIZE, "%d",
+ table_attr->instance);
+
+ table_attr->attr.size = 0;
+ table_attr->attr.read = acpi_table_show;
+ table_attr->attr.attr.name = table_attr->name;
+ table_attr->attr.attr.mode = 0444;
+
+ return;
+}
+
+static int acpi_system_sysfs_init(void)
+{
+ struct acpi_table_attr *table_attr;
+ struct acpi_table_header *table_header = NULL;
+ int table_index = 0;
+ int result;
+
+ tables_kobj = kobject_create_and_add("tables", acpi_kobj);
+ if (!tables_kobj)
+ return -ENOMEM;
+
+ do {
+ result = acpi_get_table_by_index(table_index, &table_header);
+ if (!result) {
+ table_index++;
+ table_attr = NULL;
+ table_attr =
+ kzalloc(sizeof(struct acpi_table_attr), GFP_KERNEL);
+ if (!table_attr)
+ return -ENOMEM;
+
+ acpi_table_attr_init(table_attr, table_header);
+ result =
+ sysfs_create_bin_file(tables_kobj,
+ &table_attr->attr);
+ if (result) {
+ kfree(table_attr);
+ return result;
+ } else
+ list_add_tail(&table_attr->node,
+ &acpi_table_attr_list);
+ }
+ } while (!result);
+ kobject_uevent(tables_kobj, KOBJ_ADD);
+
+ return 0;
+}
+
+/*
+ * Detailed ACPI IRQ counters in /sys/firmware/acpi/interrupts/
+ * See Documentation/ABI/testing/sysfs-firmware-acpi
+ */
+
+#define COUNT_GPE 0
+#define COUNT_SCI 1 /* acpi_irq_handled */
+#define COUNT_ERROR 2 /* other */
+#define NUM_COUNTERS_EXTRA 3
+
+struct event_counter {
+ u32 count;
+ u32 flags;
+};
+
+static struct event_counter *all_counters;
+static u32 num_gpes;
+static u32 num_counters;
+static struct attribute **all_attrs;
+static u32 acpi_gpe_count;
+
+static struct attribute_group interrupt_stats_attr_group = {
+ .name = "interrupts",
+};
+static struct kobj_attribute *counter_attrs;
+
+static int count_num_gpes(void)
+{
+ int count = 0;
+ struct acpi_gpe_xrupt_info *gpe_xrupt_info;
+ struct acpi_gpe_block_info *gpe_block;
+ acpi_cpu_flags flags;
+
+ flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+
+ gpe_xrupt_info = acpi_gbl_gpe_xrupt_list_head;
+ while (gpe_xrupt_info) {
+ gpe_block = gpe_xrupt_info->gpe_block_list_head;
+ while (gpe_block) {
+ count += gpe_block->register_count *
+ ACPI_GPE_REGISTER_WIDTH;
+ gpe_block = gpe_block->next;
+ }
+ gpe_xrupt_info = gpe_xrupt_info->next;
+ }
+ acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
+
+ return count;
+}
+
+static int get_gpe_device(int index, acpi_handle *handle)
+{
+ struct acpi_gpe_xrupt_info *gpe_xrupt_info;
+ struct acpi_gpe_block_info *gpe_block;
+ acpi_cpu_flags flags;
+ struct acpi_namespace_node *node;
+
+ flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+
+ gpe_xrupt_info = acpi_gbl_gpe_xrupt_list_head;
+ while (gpe_xrupt_info) {
+ gpe_block = gpe_xrupt_info->gpe_block_list_head;
+ node = gpe_block->node;
+ while (gpe_block) {
+ index -= gpe_block->register_count *
+ ACPI_GPE_REGISTER_WIDTH;
+ if (index < 0) {
+ acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
+ /* return NULL if it's FADT GPE */
+ if (node->type != ACPI_TYPE_DEVICE)
+ *handle = NULL;
+ else
+ *handle = node;
+ return 0;
+ }
+ node = gpe_block->node;
+ gpe_block = gpe_block->next;
+ }
+ gpe_xrupt_info = gpe_xrupt_info->next;
+ }
+ acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
+
+ return -ENODEV;
+}
+
+static void delete_gpe_attr_array(void)
+{
+ struct event_counter *tmp = all_counters;
+
+ all_counters = NULL;
+ kfree(tmp);
+
+ if (counter_attrs) {
+ int i;
+
+ for (i = 0; i < num_gpes; i++)
+ kfree(counter_attrs[i].attr.name);
+
+ kfree(counter_attrs);
+ }
+ kfree(all_attrs);
+
+ return;
+}
+
+void acpi_os_gpe_count(u32 gpe_number)
+{
+ acpi_gpe_count++;
+
+ if (!all_counters)
+ return;
+
+ if (gpe_number < num_gpes)
+ all_counters[gpe_number].count++;
+ else
+ all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_ERROR].
+ count++;
+
+ return;
+}
+
+void acpi_os_fixed_event_count(u32 event_number)
+{
+ if (!all_counters)
+ return;
+
+ if (event_number < ACPI_NUM_FIXED_EVENTS)
+ all_counters[num_gpes + event_number].count++;
+ else
+ all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_ERROR].
+ count++;
+
+ return;
+}
+
+static int get_status(u32 index, acpi_event_status *status, acpi_handle *handle)
+{
+ int result = 0;
+
+ if (index >= num_gpes + ACPI_NUM_FIXED_EVENTS)
+ goto end;
+
+ if (index < num_gpes) {
+ result = get_gpe_device(index, handle);
+ if (result) {
+ ACPI_EXCEPTION((AE_INFO, AE_NOT_FOUND,
+ "Invalid GPE 0x%x\n", index));
+ goto end;
+ }
+ result = acpi_get_gpe_status(*handle, index,
+ ACPI_NOT_ISR, status);
+ } else if (index < (num_gpes + ACPI_NUM_FIXED_EVENTS))
+ result = acpi_get_event_status(index - num_gpes, status);
+
+end:
+ return result;
+}
+
+static ssize_t counter_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ int index = attr - counter_attrs;
+ int size;
+ acpi_handle handle;
+ acpi_event_status status;
+ int result = 0;
+
+ all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_SCI].count =
+ acpi_irq_handled;
+ all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_GPE].count =
+ acpi_gpe_count;
+
+ size = sprintf(buf, "%8d", all_counters[index].count);
+
+ /* "gpe_all" or "sci" */
+ if (index >= num_gpes + ACPI_NUM_FIXED_EVENTS)
+ goto end;
+
+ result = get_status(index, &status, &handle);
+ if (result)
+ goto end;
+
+ if (!(status & ACPI_EVENT_FLAG_HANDLE))
+ size += sprintf(buf + size, " invalid");
+ else if (status & ACPI_EVENT_FLAG_ENABLED)
+ size += sprintf(buf + size, " enabled");
+ else if (status & ACPI_EVENT_FLAG_WAKE_ENABLED)
+ size += sprintf(buf + size, " wake_enabled");
+ else
+ size += sprintf(buf + size, " disabled");
+
+end:
+ size += sprintf(buf + size, "\n");
+ return result ? result : size;
+}
+
+/*
+ * counter_set() sets the specified counter.
+ * setting the total "sci" file to any value clears all counters.
+ * enable/disable/clear a gpe/fixed event in user space.
+ */
+static ssize_t counter_set(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buf, size_t size)
+{
+ int index = attr - counter_attrs;
+ acpi_event_status status;
+ acpi_handle handle;
+ int result = 0;
+
+ if (index == num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_SCI) {
+ int i;
+ for (i = 0; i < num_counters; ++i)
+ all_counters[i].count = 0;
+ acpi_gpe_count = 0;
+ acpi_irq_handled = 0;
+ goto end;
+ }
+
+ /* show the event status for both GPEs and Fixed Events */
+ result = get_status(index, &status, &handle);
+ if (result)
+ goto end;
+
+ if (!(status & ACPI_EVENT_FLAG_HANDLE)) {
+ printk(KERN_WARNING PREFIX
+ "Can not change Invalid GPE/Fixed Event status\n");
+ return -EINVAL;
+ }
+
+ if (index < num_gpes) {
+ if (!strcmp(buf, "disable\n") &&
+ (status & ACPI_EVENT_FLAG_ENABLED))
+ result = acpi_disable_gpe(handle, index);
+ else if (!strcmp(buf, "enable\n") &&
+ !(status & ACPI_EVENT_FLAG_ENABLED))
+ result = acpi_enable_gpe(handle, index);
+ else if (!strcmp(buf, "clear\n") &&
+ (status & ACPI_EVENT_FLAG_SET))
+ result = acpi_clear_gpe(handle, index, ACPI_NOT_ISR);
+ else
+ all_counters[index].count = strtoul(buf, NULL, 0);
+ } else if (index < num_gpes + ACPI_NUM_FIXED_EVENTS) {
+ int event = index - num_gpes;
+ if (!strcmp(buf, "disable\n") &&
+ (status & ACPI_EVENT_FLAG_ENABLED))
+ result = acpi_disable_event(event, ACPI_NOT_ISR);
+ else if (!strcmp(buf, "enable\n") &&
+ !(status & ACPI_EVENT_FLAG_ENABLED))
+ result = acpi_enable_event(event, ACPI_NOT_ISR);
+ else if (!strcmp(buf, "clear\n") &&
+ (status & ACPI_EVENT_FLAG_SET))
+ result = acpi_clear_event(event);
+ else
+ all_counters[index].count = strtoul(buf, NULL, 0);
+ } else
+ all_counters[index].count = strtoul(buf, NULL, 0);
+
+ if (ACPI_FAILURE(result))
+ result = -EINVAL;
+end:
+ return result ? result : size;
+}
+
+void acpi_irq_stats_init(void)
+{
+ int i;
+
+ if (all_counters)
+ return;
+
+ num_gpes = count_num_gpes();
+ num_counters = num_gpes + ACPI_NUM_FIXED_EVENTS + NUM_COUNTERS_EXTRA;
+
+ all_attrs = kzalloc(sizeof(struct attribute *) * (num_counters + 1),
+ GFP_KERNEL);
+ if (all_attrs == NULL)
+ return;
+
+ all_counters = kzalloc(sizeof(struct event_counter) * (num_counters),
+ GFP_KERNEL);
+ if (all_counters == NULL)
+ goto fail;
+
+ counter_attrs = kzalloc(sizeof(struct kobj_attribute) * (num_counters),
+ GFP_KERNEL);
+ if (counter_attrs == NULL)
+ goto fail;
+
+ for (i = 0; i < num_counters; ++i) {
+ char buffer[12];
+ char *name;
+
+ if (i < num_gpes)
+ sprintf(buffer, "gpe%02X", i);
+ else if (i == num_gpes + ACPI_EVENT_PMTIMER)
+ sprintf(buffer, "ff_pmtimer");
+ else if (i == num_gpes + ACPI_EVENT_GLOBAL)
+ sprintf(buffer, "ff_gbl_lock");
+ else if (i == num_gpes + ACPI_EVENT_POWER_BUTTON)
+ sprintf(buffer, "ff_pwr_btn");
+ else if (i == num_gpes + ACPI_EVENT_SLEEP_BUTTON)
+ sprintf(buffer, "ff_slp_btn");
+ else if (i == num_gpes + ACPI_EVENT_RTC)
+ sprintf(buffer, "ff_rt_clk");
+ else if (i == num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_GPE)
+ sprintf(buffer, "gpe_all");
+ else if (i == num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_SCI)
+ sprintf(buffer, "sci");
+ else if (i == num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_ERROR)
+ sprintf(buffer, "error");
+ else
+ sprintf(buffer, "bug%02X", i);
+
+ name = kzalloc(strlen(buffer) + 1, GFP_KERNEL);
+ if (name == NULL)
+ goto fail;
+ strncpy(name, buffer, strlen(buffer) + 1);
+
+ counter_attrs[i].attr.name = name;
+ counter_attrs[i].attr.mode = 0644;
+ counter_attrs[i].show = counter_show;
+ counter_attrs[i].store = counter_set;
+
+ all_attrs[i] = &counter_attrs[i].attr;
+ }
+
+ interrupt_stats_attr_group.attrs = all_attrs;
+ if (!sysfs_create_group(acpi_kobj, &interrupt_stats_attr_group))
+ return;
+
+fail:
+ delete_gpe_attr_array();
+ return;
+}
+
+static void __exit interrupt_stats_exit(void)
+{
+ sysfs_remove_group(acpi_kobj, &interrupt_stats_attr_group);
+
+ delete_gpe_attr_array();
+
+ return;
+}
+
+/* --------------------------------------------------------------------------
+ FS Interface (/proc)
+ -------------------------------------------------------------------------- */
+#ifdef CONFIG_ACPI_PROCFS
+#define ACPI_SYSTEM_FILE_INFO "info"
+#define ACPI_SYSTEM_FILE_EVENT "event"
+#define ACPI_SYSTEM_FILE_DSDT "dsdt"
+#define ACPI_SYSTEM_FILE_FADT "fadt"
+
+static int acpi_system_read_info(struct seq_file *seq, void *offset)
+{
+
+ seq_printf(seq, "version: %x\n", ACPI_CA_VERSION);
+ return 0;
+}
+
+static int acpi_system_info_open_fs(struct inode *inode, struct file *file)
+{
+ return single_open(file, acpi_system_read_info, PDE(inode)->data);
+}
+
+static const struct file_operations acpi_system_info_ops = {
+ .owner = THIS_MODULE,
+ .open = acpi_system_info_open_fs,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static ssize_t acpi_system_read_dsdt(struct file *, char __user *, size_t,
+ loff_t *);
+
+static const struct file_operations acpi_system_dsdt_ops = {
+ .owner = THIS_MODULE,
+ .read = acpi_system_read_dsdt,
+};
+
+static ssize_t
+acpi_system_read_dsdt(struct file *file,
+ char __user * buffer, size_t count, loff_t * ppos)
+{
+ acpi_status status = AE_OK;
+ struct acpi_table_header *dsdt = NULL;
+ ssize_t res;
+
+ status = acpi_get_table(ACPI_SIG_DSDT, 1, &dsdt);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+
+ res = simple_read_from_buffer(buffer, count, ppos, dsdt, dsdt->length);
+
+ return res;
+}
+
+static ssize_t acpi_system_read_fadt(struct file *, char __user *, size_t,
+ loff_t *);
+
+static const struct file_operations acpi_system_fadt_ops = {
+ .owner = THIS_MODULE,
+ .read = acpi_system_read_fadt,
+};
+
+static ssize_t
+acpi_system_read_fadt(struct file *file,
+ char __user * buffer, size_t count, loff_t * ppos)
+{
+ acpi_status status = AE_OK;
+ struct acpi_table_header *fadt = NULL;
+ ssize_t res;
+
+ status = acpi_get_table(ACPI_SIG_FADT, 1, &fadt);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+
+ res = simple_read_from_buffer(buffer, count, ppos, fadt, fadt->length);
+
+ return res;
+}
+
+static int acpi_system_procfs_init(void)
+{
+ struct proc_dir_entry *entry;
+ int error = 0;
+
+ /* 'info' [R] */
+ entry = proc_create(ACPI_SYSTEM_FILE_INFO, S_IRUGO, acpi_root_dir,
+ &acpi_system_info_ops);
+ if (!entry)
+ goto Error;
+
+ /* 'dsdt' [R] */
+ entry = proc_create(ACPI_SYSTEM_FILE_DSDT, S_IRUSR, acpi_root_dir,
+ &acpi_system_dsdt_ops);
+ if (!entry)
+ goto Error;
+
+ /* 'fadt' [R] */
+ entry = proc_create(ACPI_SYSTEM_FILE_FADT, S_IRUSR, acpi_root_dir,
+ &acpi_system_fadt_ops);
+ if (!entry)
+ goto Error;
+
+ Done:
+ return error;
+
+ Error:
+ remove_proc_entry(ACPI_SYSTEM_FILE_FADT, acpi_root_dir);
+ remove_proc_entry(ACPI_SYSTEM_FILE_DSDT, acpi_root_dir);
+ remove_proc_entry(ACPI_SYSTEM_FILE_INFO, acpi_root_dir);
+
+ error = -EFAULT;
+ goto Done;
+}
+#else
+static int acpi_system_procfs_init(void)
+{
+ return 0;
+}
+#endif
+
+static int __init acpi_system_init(void)
+{
+ int result = 0;
+
+ if (acpi_disabled)
+ return 0;
+
+ result = acpi_system_procfs_init();
+ if (result)
+ return result;
+
+ result = acpi_system_sysfs_init();
+
+ return result;
+}
+
+subsys_initcall(acpi_system_init);
diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c
new file mode 100644
index 0000000..a885295
--- /dev/null
+++ b/drivers/acpi/tables.c
@@ -0,0 +1,319 @@
+/*
+ * acpi_tables.c - ACPI Boot-Time Table Parsing
+ *
+ * Copyright (C) 2001 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/smp.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/irq.h>
+#include <linux/errno.h>
+#include <linux/acpi.h>
+#include <linux/bootmem.h>
+
+#define PREFIX "ACPI: "
+
+#define ACPI_MAX_TABLES 128
+
+static char *mps_inti_flags_polarity[] = { "dfl", "high", "res", "low" };
+static char *mps_inti_flags_trigger[] = { "dfl", "edge", "res", "level" };
+
+static struct acpi_table_desc initial_tables[ACPI_MAX_TABLES] __initdata;
+
+static int acpi_apic_instance __initdata;
+
+void acpi_table_print_madt_entry(struct acpi_subtable_header *header)
+{
+ if (!header)
+ return;
+
+ switch (header->type) {
+
+ case ACPI_MADT_TYPE_LOCAL_APIC:
+ {
+ struct acpi_madt_local_apic *p =
+ (struct acpi_madt_local_apic *)header;
+ printk(KERN_INFO PREFIX
+ "LAPIC (acpi_id[0x%02x] lapic_id[0x%02x] %s)\n",
+ p->processor_id, p->id,
+ (p->lapic_flags & ACPI_MADT_ENABLED) ? "enabled" : "disabled");
+ }
+ break;
+
+ case ACPI_MADT_TYPE_IO_APIC:
+ {
+ struct acpi_madt_io_apic *p =
+ (struct acpi_madt_io_apic *)header;
+ printk(KERN_INFO PREFIX
+ "IOAPIC (id[0x%02x] address[0x%08x] gsi_base[%d])\n",
+ p->id, p->address, p->global_irq_base);
+ }
+ break;
+
+ case ACPI_MADT_TYPE_INTERRUPT_OVERRIDE:
+ {
+ struct acpi_madt_interrupt_override *p =
+ (struct acpi_madt_interrupt_override *)header;
+ printk(KERN_INFO PREFIX
+ "INT_SRC_OVR (bus %d bus_irq %d global_irq %d %s %s)\n",
+ p->bus, p->source_irq, p->global_irq,
+ mps_inti_flags_polarity[p->inti_flags & ACPI_MADT_POLARITY_MASK],
+ mps_inti_flags_trigger[(p->inti_flags & ACPI_MADT_TRIGGER_MASK) >> 2]);
+ if (p->inti_flags &
+ ~(ACPI_MADT_POLARITY_MASK | ACPI_MADT_TRIGGER_MASK))
+ printk(KERN_INFO PREFIX
+ "INT_SRC_OVR unexpected reserved flags: 0x%x\n",
+ p->inti_flags &
+ ~(ACPI_MADT_POLARITY_MASK | ACPI_MADT_TRIGGER_MASK));
+
+ }
+ break;
+
+ case ACPI_MADT_TYPE_NMI_SOURCE:
+ {
+ struct acpi_madt_nmi_source *p =
+ (struct acpi_madt_nmi_source *)header;
+ printk(KERN_INFO PREFIX
+ "NMI_SRC (%s %s global_irq %d)\n",
+ mps_inti_flags_polarity[p->inti_flags & ACPI_MADT_POLARITY_MASK],
+ mps_inti_flags_trigger[(p->inti_flags & ACPI_MADT_TRIGGER_MASK) >> 2],
+ p->global_irq);
+ }
+ break;
+
+ case ACPI_MADT_TYPE_LOCAL_APIC_NMI:
+ {
+ struct acpi_madt_local_apic_nmi *p =
+ (struct acpi_madt_local_apic_nmi *)header;
+ printk(KERN_INFO PREFIX
+ "LAPIC_NMI (acpi_id[0x%02x] %s %s lint[0x%x])\n",
+ p->processor_id,
+ mps_inti_flags_polarity[p->inti_flags & ACPI_MADT_POLARITY_MASK ],
+ mps_inti_flags_trigger[(p->inti_flags & ACPI_MADT_TRIGGER_MASK) >> 2],
+ p->lint);
+ }
+ break;
+
+ case ACPI_MADT_TYPE_LOCAL_APIC_OVERRIDE:
+ {
+ struct acpi_madt_local_apic_override *p =
+ (struct acpi_madt_local_apic_override *)header;
+ printk(KERN_INFO PREFIX
+ "LAPIC_ADDR_OVR (address[%p])\n",
+ (void *)(unsigned long)p->address);
+ }
+ break;
+
+ case ACPI_MADT_TYPE_IO_SAPIC:
+ {
+ struct acpi_madt_io_sapic *p =
+ (struct acpi_madt_io_sapic *)header;
+ printk(KERN_INFO PREFIX
+ "IOSAPIC (id[0x%x] address[%p] gsi_base[%d])\n",
+ p->id, (void *)(unsigned long)p->address,
+ p->global_irq_base);
+ }
+ break;
+
+ case ACPI_MADT_TYPE_LOCAL_SAPIC:
+ {
+ struct acpi_madt_local_sapic *p =
+ (struct acpi_madt_local_sapic *)header;
+ printk(KERN_INFO PREFIX
+ "LSAPIC (acpi_id[0x%02x] lsapic_id[0x%02x] lsapic_eid[0x%02x] %s)\n",
+ p->processor_id, p->id, p->eid,
+ (p->lapic_flags & ACPI_MADT_ENABLED) ? "enabled" : "disabled");
+ }
+ break;
+
+ case ACPI_MADT_TYPE_INTERRUPT_SOURCE:
+ {
+ struct acpi_madt_interrupt_source *p =
+ (struct acpi_madt_interrupt_source *)header;
+ printk(KERN_INFO PREFIX
+ "PLAT_INT_SRC (%s %s type[0x%x] id[0x%04x] eid[0x%x] iosapic_vector[0x%x] global_irq[0x%x]\n",
+ mps_inti_flags_polarity[p->inti_flags & ACPI_MADT_POLARITY_MASK],
+ mps_inti_flags_trigger[(p->inti_flags & ACPI_MADT_TRIGGER_MASK) >> 2],
+ p->type, p->id, p->eid, p->io_sapic_vector,
+ p->global_irq);
+ }
+ break;
+
+ default:
+ printk(KERN_WARNING PREFIX
+ "Found unsupported MADT entry (type = 0x%x)\n",
+ header->type);
+ break;
+ }
+}
+
+
+int __init
+acpi_table_parse_entries(char *id,
+ unsigned long table_size,
+ int entry_id,
+ acpi_table_entry_handler handler,
+ unsigned int max_entries)
+{
+ struct acpi_table_header *table_header = NULL;
+ struct acpi_subtable_header *entry;
+ unsigned int count = 0;
+ unsigned long table_end;
+
+ if (!handler)
+ return -EINVAL;
+
+ if (strncmp(id, ACPI_SIG_MADT, 4) == 0)
+ acpi_get_table(id, acpi_apic_instance, &table_header);
+ else
+ acpi_get_table(id, 0, &table_header);
+
+ if (!table_header) {
+ printk(KERN_WARNING PREFIX "%4.4s not present\n", id);
+ return -ENODEV;
+ }
+
+ table_end = (unsigned long)table_header + table_header->length;
+
+ /* Parse all entries looking for a match. */
+
+ entry = (struct acpi_subtable_header *)
+ ((unsigned long)table_header + table_size);
+
+ while (((unsigned long)entry) + sizeof(struct acpi_subtable_header) <
+ table_end) {
+ if (entry->type == entry_id
+ && (!max_entries || count++ < max_entries))
+ if (handler(entry, table_end))
+ return -EINVAL;
+
+ entry = (struct acpi_subtable_header *)
+ ((unsigned long)entry + entry->length);
+ }
+ if (max_entries && count > max_entries) {
+ printk(KERN_WARNING PREFIX "[%4.4s:0x%02x] ignored %i entries of "
+ "%i found\n", id, entry_id, count - max_entries, count);
+ }
+
+ return count;
+}
+
+int __init
+acpi_table_parse_madt(enum acpi_madt_type id,
+ acpi_table_entry_handler handler, unsigned int max_entries)
+{
+ return acpi_table_parse_entries(ACPI_SIG_MADT,
+ sizeof(struct acpi_table_madt), id,
+ handler, max_entries);
+}
+
+/**
+ * acpi_table_parse - find table with @id, run @handler on it
+ *
+ * @id: table id to find
+ * @handler: handler to run
+ *
+ * Scan the ACPI System Descriptor Table (STD) for a table matching @id,
+ * run @handler on it. Return 0 if table found, return on if not.
+ */
+int __init acpi_table_parse(char *id, acpi_table_handler handler)
+{
+ struct acpi_table_header *table = NULL;
+
+ if (!handler)
+ return -EINVAL;
+
+ if (strncmp(id, ACPI_SIG_MADT, 4) == 0)
+ acpi_get_table(id, acpi_apic_instance, &table);
+ else
+ acpi_get_table(id, 0, &table);
+
+ if (table) {
+ handler(table);
+ return 0;
+ } else
+ return 1;
+}
+
+/*
+ * The BIOS is supposed to supply a single APIC/MADT,
+ * but some report two. Provide a knob to use either.
+ * (don't you wish instance 0 and 1 were not the same?)
+ */
+static void __init check_multiple_madt(void)
+{
+ struct acpi_table_header *table = NULL;
+
+ acpi_get_table(ACPI_SIG_MADT, 2, &table);
+ if (table) {
+ printk(KERN_WARNING PREFIX
+ "BIOS bug: multiple APIC/MADT found,"
+ " using %d\n", acpi_apic_instance);
+ printk(KERN_WARNING PREFIX
+ "If \"acpi_apic_instance=%d\" works better, "
+ "notify linux-acpi@vger.kernel.org\n",
+ acpi_apic_instance ? 0 : 2);
+
+ } else
+ acpi_apic_instance = 0;
+
+ return;
+}
+
+/*
+ * acpi_table_init()
+ *
+ * find RSDP, find and checksum SDT/XSDT.
+ * checksum all tables, print SDT/XSDT
+ *
+ * result: sdt_entry[] is initialized
+ */
+
+int __init acpi_table_init(void)
+{
+ acpi_status status;
+
+ status = acpi_initialize_tables(initial_tables, ACPI_MAX_TABLES, 0);
+ if (ACPI_FAILURE(status))
+ return 1;
+
+ check_multiple_madt();
+ return 0;
+}
+
+static int __init acpi_parse_apic_instance(char *str)
+{
+ if (!str)
+ return -EINVAL;
+
+ acpi_apic_instance = simple_strtoul(str, NULL, 0);
+
+ printk(KERN_NOTICE PREFIX "Shall use APIC/MADT table %d\n",
+ acpi_apic_instance);
+
+ return 0;
+}
+
+early_param("acpi_apic_instance", acpi_parse_apic_instance);
diff --git a/drivers/acpi/tables/Makefile b/drivers/acpi/tables/Makefile
new file mode 100644
index 0000000..7385efa
--- /dev/null
+++ b/drivers/acpi/tables/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for all Linux ACPI interpreter subdirectories
+#
+
+obj-y := tbxface.o tbinstal.o tbutils.o tbfind.o tbfadt.o tbxfroot.o
+
+EXTRA_CFLAGS += $(ACPI_CFLAGS)
diff --git a/drivers/acpi/tables/tbfadt.c b/drivers/acpi/tables/tbfadt.c
new file mode 100644
index 0000000..2817158
--- /dev/null
+++ b/drivers/acpi/tables/tbfadt.c
@@ -0,0 +1,474 @@
+/******************************************************************************
+ *
+ * Module Name: tbfadt - FADT table utilities
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include <acpi/actables.h>
+
+#define _COMPONENT ACPI_TABLES
+ACPI_MODULE_NAME("tbfadt")
+
+/* Local prototypes */
+static void inline
+acpi_tb_init_generic_address(struct acpi_generic_address *generic_address,
+ u8 byte_width, u64 address);
+
+static void acpi_tb_convert_fadt(void);
+
+static void acpi_tb_validate_fadt(void);
+
+/* Table for conversion of FADT to common internal format and FADT validation */
+
+typedef struct acpi_fadt_info {
+ char *name;
+ u8 target;
+ u8 source;
+ u8 length;
+ u8 type;
+
+} acpi_fadt_info;
+
+#define ACPI_FADT_REQUIRED 1
+#define ACPI_FADT_SEPARATE_LENGTH 2
+
+static struct acpi_fadt_info fadt_info_table[] = {
+ {"Pm1aEventBlock", ACPI_FADT_OFFSET(xpm1a_event_block),
+ ACPI_FADT_OFFSET(pm1a_event_block),
+ ACPI_FADT_OFFSET(pm1_event_length), ACPI_FADT_REQUIRED},
+
+ {"Pm1bEventBlock", ACPI_FADT_OFFSET(xpm1b_event_block),
+ ACPI_FADT_OFFSET(pm1b_event_block),
+ ACPI_FADT_OFFSET(pm1_event_length), 0},
+
+ {"Pm1aControlBlock", ACPI_FADT_OFFSET(xpm1a_control_block),
+ ACPI_FADT_OFFSET(pm1a_control_block),
+ ACPI_FADT_OFFSET(pm1_control_length), ACPI_FADT_REQUIRED},
+
+ {"Pm1bControlBlock", ACPI_FADT_OFFSET(xpm1b_control_block),
+ ACPI_FADT_OFFSET(pm1b_control_block),
+ ACPI_FADT_OFFSET(pm1_control_length), 0},
+
+ {"Pm2ControlBlock", ACPI_FADT_OFFSET(xpm2_control_block),
+ ACPI_FADT_OFFSET(pm2_control_block),
+ ACPI_FADT_OFFSET(pm2_control_length), ACPI_FADT_SEPARATE_LENGTH},
+
+ {"PmTimerBlock", ACPI_FADT_OFFSET(xpm_timer_block),
+ ACPI_FADT_OFFSET(pm_timer_block),
+ ACPI_FADT_OFFSET(pm_timer_length), ACPI_FADT_REQUIRED},
+
+ {"Gpe0Block", ACPI_FADT_OFFSET(xgpe0_block),
+ ACPI_FADT_OFFSET(gpe0_block),
+ ACPI_FADT_OFFSET(gpe0_block_length), ACPI_FADT_SEPARATE_LENGTH},
+
+ {"Gpe1Block", ACPI_FADT_OFFSET(xgpe1_block),
+ ACPI_FADT_OFFSET(gpe1_block),
+ ACPI_FADT_OFFSET(gpe1_block_length), ACPI_FADT_SEPARATE_LENGTH}
+};
+
+#define ACPI_FADT_INFO_ENTRIES (sizeof (fadt_info_table) / sizeof (struct acpi_fadt_info))
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_tb_init_generic_address
+ *
+ * PARAMETERS: generic_address - GAS struct to be initialized
+ * byte_width - Width of this register
+ * Address - Address of the register
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Initialize a Generic Address Structure (GAS)
+ * See the ACPI specification for a full description and
+ * definition of this structure.
+ *
+ ******************************************************************************/
+
+static void inline
+acpi_tb_init_generic_address(struct acpi_generic_address *generic_address,
+ u8 byte_width, u64 address)
+{
+
+ /*
+ * The 64-bit Address field is non-aligned in the byte packed
+ * GAS struct.
+ */
+ ACPI_MOVE_64_TO_64(&generic_address->address, &address);
+
+ /* All other fields are byte-wide */
+
+ generic_address->space_id = ACPI_ADR_SPACE_SYSTEM_IO;
+ generic_address->bit_width = byte_width << 3;
+ generic_address->bit_offset = 0;
+ generic_address->access_width = 0;
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_tb_parse_fadt
+ *
+ * PARAMETERS: table_index - Index for the FADT
+ * Flags - Flags
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Initialize the FADT, DSDT and FACS tables
+ * (FADT contains the addresses of the DSDT and FACS)
+ *
+ ******************************************************************************/
+
+void acpi_tb_parse_fadt(u32 table_index, u8 flags)
+{
+ u32 length;
+ struct acpi_table_header *table;
+
+ /*
+ * The FADT has multiple versions with different lengths,
+ * and it contains pointers to both the DSDT and FACS tables.
+ *
+ * Get a local copy of the FADT and convert it to a common format
+ * Map entire FADT, assumed to be smaller than one page.
+ */
+ length = acpi_gbl_root_table_list.tables[table_index].length;
+
+ table =
+ acpi_os_map_memory(acpi_gbl_root_table_list.tables[table_index].
+ address, length);
+ if (!table) {
+ return;
+ }
+
+ /*
+ * Validate the FADT checksum before we copy the table. Ignore
+ * checksum error as we want to try to get the DSDT and FACS.
+ */
+ (void)acpi_tb_verify_checksum(table, length);
+
+ /* Obtain a local copy of the FADT in common ACPI 2.0+ format */
+
+ acpi_tb_create_local_fadt(table, length);
+
+ /* All done with the real FADT, unmap it */
+
+ acpi_os_unmap_memory(table, length);
+
+ /* Obtain the DSDT and FACS tables via their addresses within the FADT */
+
+ acpi_tb_install_table((acpi_physical_address) acpi_gbl_FADT.Xdsdt,
+ flags, ACPI_SIG_DSDT, ACPI_TABLE_INDEX_DSDT);
+
+ acpi_tb_install_table((acpi_physical_address) acpi_gbl_FADT.Xfacs,
+ flags, ACPI_SIG_FACS, ACPI_TABLE_INDEX_FACS);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_tb_create_local_fadt
+ *
+ * PARAMETERS: Table - Pointer to BIOS FADT
+ * Length - Length of the table
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Get a local copy of the FADT and convert it to a common format.
+ * Performs validation on some important FADT fields.
+ *
+ * NOTE: We create a local copy of the FADT regardless of the version.
+ *
+ ******************************************************************************/
+
+void acpi_tb_create_local_fadt(struct acpi_table_header *table, u32 length)
+{
+
+ /*
+ * Check if the FADT is larger than the largest table that we expect
+ * (the ACPI 2.0/3.0 version). If so, truncate the table, and issue
+ * a warning.
+ */
+ if (length > sizeof(struct acpi_table_fadt)) {
+ ACPI_WARNING((AE_INFO,
+ "FADT (revision %u) is longer than ACPI 2.0 version, truncating length 0x%X to 0x%zX",
+ table->revision, (unsigned)length,
+ sizeof(struct acpi_table_fadt)));
+ }
+
+ /* Clear the entire local FADT */
+
+ ACPI_MEMSET(&acpi_gbl_FADT, 0, sizeof(struct acpi_table_fadt));
+
+ /* Copy the original FADT, up to sizeof (struct acpi_table_fadt) */
+
+ ACPI_MEMCPY(&acpi_gbl_FADT, table,
+ ACPI_MIN(length, sizeof(struct acpi_table_fadt)));
+
+ /*
+ * 1) Convert the local copy of the FADT to the common internal format
+ * 2) Validate some of the important values within the FADT
+ */
+ acpi_tb_convert_fadt();
+ acpi_tb_validate_fadt();
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_tb_convert_fadt
+ *
+ * PARAMETERS: None, uses acpi_gbl_FADT
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Converts all versions of the FADT to a common internal format.
+ * Expand all 32-bit addresses to 64-bit.
+ *
+ * NOTE: acpi_gbl_FADT must be of size (struct acpi_table_fadt),
+ * and must contain a copy of the actual FADT.
+ *
+ * ACPICA will use the "X" fields of the FADT for all addresses.
+ *
+ * "X" fields are optional extensions to the original V1.0 fields. Even if
+ * they are present in the structure, they can be optionally not used by
+ * setting them to zero. Therefore, we must selectively expand V1.0 fields
+ * if the corresponding X field is zero.
+ *
+ * For ACPI 1.0 FADTs, all address fields are expanded to the corresponding
+ * "X" fields.
+ *
+ * For ACPI 2.0 FADTs, any "X" fields that are NULL are filled in by
+ * expanding the corresponding ACPI 1.0 field.
+ *
+ ******************************************************************************/
+
+static void acpi_tb_convert_fadt(void)
+{
+ u8 pm1_register_length;
+ struct acpi_generic_address *target;
+ u32 i;
+
+ /* Update the local FADT table header length */
+
+ acpi_gbl_FADT.header.length = sizeof(struct acpi_table_fadt);
+
+ /* Expand the 32-bit FACS and DSDT addresses to 64-bit as necessary */
+
+ if (!acpi_gbl_FADT.Xfacs) {
+ acpi_gbl_FADT.Xfacs = (u64) acpi_gbl_FADT.facs;
+ }
+
+ if (!acpi_gbl_FADT.Xdsdt) {
+ acpi_gbl_FADT.Xdsdt = (u64) acpi_gbl_FADT.dsdt;
+ }
+
+ /*
+ * For ACPI 1.0 FADTs (revision 1 or 2), ensure that reserved fields which
+ * should be zero are indeed zero. This will workaround BIOSs that
+ * inadvertently place values in these fields.
+ *
+ * The ACPI 1.0 reserved fields that will be zeroed are the bytes located at
+ * offset 45, 55, 95, and the word located at offset 109, 110.
+ */
+ if (acpi_gbl_FADT.header.revision < FADT2_REVISION_ID) {
+ acpi_gbl_FADT.preferred_profile = 0;
+ acpi_gbl_FADT.pstate_control = 0;
+ acpi_gbl_FADT.cst_control = 0;
+ acpi_gbl_FADT.boot_flags = 0;
+ }
+
+ /*
+ * Expand the ACPI 1.0 32-bit V1.0 addresses to the ACPI 2.0 64-bit "X"
+ * generic address structures as necessary.
+ */
+ for (i = 0; i < ACPI_FADT_INFO_ENTRIES; i++) {
+ target =
+ ACPI_ADD_PTR(struct acpi_generic_address, &acpi_gbl_FADT,
+ fadt_info_table[i].target);
+
+ /* Expand only if the X target is null */
+
+ if (!target->address) {
+ acpi_tb_init_generic_address(target,
+ *ACPI_ADD_PTR(u8,
+ &acpi_gbl_FADT,
+ fadt_info_table
+ [i].length),
+ (u64) * ACPI_ADD_PTR(u32,
+ &acpi_gbl_FADT,
+ fadt_info_table
+ [i].
+ source));
+ }
+ }
+
+ /*
+ * Calculate separate GAS structs for the PM1 Enable registers.
+ * These addresses do not appear (directly) in the FADT, so it is
+ * useful to calculate them once, here.
+ *
+ * The PM event blocks are split into two register blocks, first is the
+ * PM Status Register block, followed immediately by the PM Enable
+ * Register block. Each is of length (xpm1x_event_block.bit_width/2).
+ *
+ * On various systems the v2 fields (and particularly the bit widths)
+ * cannot be relied upon, though. Hence resort to using the v1 length
+ * here (and warn about the inconsistency).
+ */
+ if (acpi_gbl_FADT.xpm1a_event_block.bit_width
+ != acpi_gbl_FADT.pm1_event_length * 8)
+ printk(KERN_WARNING "FADT: "
+ "X_PM1a_EVT_BLK.bit_width (%u) does not match"
+ " PM1_EVT_LEN (%u)\n",
+ acpi_gbl_FADT.xpm1a_event_block.bit_width,
+ acpi_gbl_FADT.pm1_event_length);
+ pm1_register_length = (u8) ACPI_DIV_2(acpi_gbl_FADT.pm1_event_length);
+
+ /* The PM1A register block is required */
+
+ acpi_tb_init_generic_address(&acpi_gbl_xpm1a_enable,
+ pm1_register_length,
+ (acpi_gbl_FADT.xpm1a_event_block.address +
+ pm1_register_length));
+ /* Don't forget to copy space_id of the GAS */
+ acpi_gbl_xpm1a_enable.space_id =
+ acpi_gbl_FADT.xpm1a_event_block.space_id;
+
+ /* The PM1B register block is optional, ignore if not present */
+
+ if (acpi_gbl_FADT.xpm1b_event_block.address) {
+ if (acpi_gbl_FADT.xpm1b_event_block.bit_width
+ != acpi_gbl_FADT.pm1_event_length * 8)
+ printk(KERN_WARNING "FADT: "
+ "X_PM1b_EVT_BLK.bit_width (%u) does not match"
+ " PM1_EVT_LEN (%u)\n",
+ acpi_gbl_FADT.xpm1b_event_block.bit_width,
+ acpi_gbl_FADT.pm1_event_length);
+ acpi_tb_init_generic_address(&acpi_gbl_xpm1b_enable,
+ pm1_register_length,
+ (acpi_gbl_FADT.xpm1b_event_block.
+ address + pm1_register_length));
+ /* Don't forget to copy space_id of the GAS */
+ acpi_gbl_xpm1b_enable.space_id =
+ acpi_gbl_FADT.xpm1b_event_block.space_id;
+
+ }
+}
+
+/******************************************************************************
+ *
+ * FUNCTION: acpi_tb_validate_fadt
+ *
+ * PARAMETERS: Table - Pointer to the FADT to be validated
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Validate various important fields within the FADT. If a problem
+ * is found, issue a message, but no status is returned.
+ * Used by both the table manager and the disassembler.
+ *
+ * Possible additional checks:
+ * (acpi_gbl_FADT.pm1_event_length >= 4)
+ * (acpi_gbl_FADT.pm1_control_length >= 2)
+ * (acpi_gbl_FADT.pm_timer_length >= 4)
+ * Gpe block lengths must be multiple of 2
+ *
+ ******************************************************************************/
+
+static void acpi_tb_validate_fadt(void)
+{
+ u32 *address32;
+ struct acpi_generic_address *address64;
+ u8 length;
+ u32 i;
+
+ /* Examine all of the 64-bit extended address fields (X fields) */
+
+ for (i = 0; i < ACPI_FADT_INFO_ENTRIES; i++) {
+
+ /* Generate pointers to the 32-bit and 64-bit addresses and get the length */
+
+ address64 =
+ ACPI_ADD_PTR(struct acpi_generic_address, &acpi_gbl_FADT,
+ fadt_info_table[i].target);
+ address32 =
+ ACPI_ADD_PTR(u32, &acpi_gbl_FADT,
+ fadt_info_table[i].source);
+ length =
+ *ACPI_ADD_PTR(u8, &acpi_gbl_FADT,
+ fadt_info_table[i].length);
+
+ if (fadt_info_table[i].type & ACPI_FADT_REQUIRED) {
+ /*
+ * Field is required (Pm1a_event, Pm1a_control, pm_timer).
+ * Both the address and length must be non-zero.
+ */
+ if (!address64->address || !length) {
+ ACPI_ERROR((AE_INFO,
+ "Required field \"%s\" has zero address and/or length: %8.8X%8.8X/%X",
+ fadt_info_table[i].name,
+ ACPI_FORMAT_UINT64(address64->
+ address),
+ length));
+ }
+ } else if (fadt_info_table[i].type & ACPI_FADT_SEPARATE_LENGTH) {
+ /*
+ * Field is optional (PM2Control, GPE0, GPE1) AND has its own
+ * length field. If present, both the address and length must be valid.
+ */
+ if ((address64->address && !length)
+ || (!address64->address && length)) {
+ ACPI_WARNING((AE_INFO,
+ "Optional field \"%s\" has zero address or length: %8.8X%8.8X/%X",
+ fadt_info_table[i].name,
+ ACPI_FORMAT_UINT64(address64->
+ address),
+ length));
+ }
+ }
+
+ /* If both 32- and 64-bit addresses are valid (non-zero), they must match */
+
+ if (address64->address && *address32 &&
+ (address64->address != (u64) * address32)) {
+ ACPI_ERROR((AE_INFO,
+ "32/64X address mismatch in \"%s\": [%8.8X] [%8.8X%8.8X], using 64X",
+ fadt_info_table[i].name, *address32,
+ ACPI_FORMAT_UINT64(address64->address)));
+ }
+ }
+}
diff --git a/drivers/acpi/tables/tbfind.c b/drivers/acpi/tables/tbfind.c
new file mode 100644
index 0000000..531584d
--- /dev/null
+++ b/drivers/acpi/tables/tbfind.c
@@ -0,0 +1,139 @@
+/******************************************************************************
+ *
+ * Module Name: tbfind - find table
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include <acpi/actables.h>
+
+#define _COMPONENT ACPI_TABLES
+ACPI_MODULE_NAME("tbfind")
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_tb_find_table
+ *
+ * PARAMETERS: Signature - String with ACPI table signature
+ * oem_id - String with the table OEM ID
+ * oem_table_id - String with the OEM Table ID
+ * table_index - Where the table index is returned
+ *
+ * RETURN: Status and table index
+ *
+ * DESCRIPTION: Find an ACPI table (in the RSDT/XSDT) that matches the
+ * Signature, OEM ID and OEM Table ID. Returns an index that can
+ * be used to get the table header or entire table.
+ *
+ ******************************************************************************/
+acpi_status
+acpi_tb_find_table(char *signature,
+ char *oem_id, char *oem_table_id, u32 *table_index)
+{
+ u32 i;
+ acpi_status status;
+ struct acpi_table_header header;
+
+ ACPI_FUNCTION_TRACE(tb_find_table);
+
+ /* Normalize the input strings */
+
+ ACPI_MEMSET(&header, 0, sizeof(struct acpi_table_header));
+ ACPI_STRNCPY(header.signature, signature, ACPI_NAME_SIZE);
+ ACPI_STRNCPY(header.oem_id, oem_id, ACPI_OEM_ID_SIZE);
+ ACPI_STRNCPY(header.oem_table_id, oem_table_id, ACPI_OEM_TABLE_ID_SIZE);
+
+ /* Search for the table */
+
+ for (i = 0; i < acpi_gbl_root_table_list.count; ++i) {
+ if (ACPI_MEMCMP(&(acpi_gbl_root_table_list.tables[i].signature),
+ header.signature, ACPI_NAME_SIZE)) {
+
+ /* Not the requested table */
+
+ continue;
+ }
+
+ /* Table with matching signature has been found */
+
+ if (!acpi_gbl_root_table_list.tables[i].pointer) {
+
+ /* Table is not currently mapped, map it */
+
+ status =
+ acpi_tb_verify_table(&acpi_gbl_root_table_list.
+ tables[i]);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ if (!acpi_gbl_root_table_list.tables[i].pointer) {
+ continue;
+ }
+ }
+
+ /* Check for table match on all IDs */
+
+ if (!ACPI_MEMCMP
+ (acpi_gbl_root_table_list.tables[i].pointer->signature,
+ header.signature, ACPI_NAME_SIZE) && (!oem_id[0]
+ ||
+ !ACPI_MEMCMP
+ (acpi_gbl_root_table_list.
+ tables[i].pointer->
+ oem_id,
+ header.oem_id,
+ ACPI_OEM_ID_SIZE))
+ && (!oem_table_id[0]
+ || !ACPI_MEMCMP(acpi_gbl_root_table_list.tables[i].
+ pointer->oem_table_id,
+ header.oem_table_id,
+ ACPI_OEM_TABLE_ID_SIZE))) {
+ *table_index = i;
+
+ ACPI_DEBUG_PRINT((ACPI_DB_TABLES,
+ "Found table [%4.4s]\n",
+ header.signature));
+ return_ACPI_STATUS(AE_OK);
+ }
+ }
+
+ return_ACPI_STATUS(AE_NOT_FOUND);
+}
diff --git a/drivers/acpi/tables/tbinstal.c b/drivers/acpi/tables/tbinstal.c
new file mode 100644
index 0000000..18747ce
--- /dev/null
+++ b/drivers/acpi/tables/tbinstal.c
@@ -0,0 +1,573 @@
+/******************************************************************************
+ *
+ * Module Name: tbinstal - ACPI table installation and removal
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include <acpi/acnamesp.h>
+#include <acpi/actables.h>
+
+#define _COMPONENT ACPI_TABLES
+ACPI_MODULE_NAME("tbinstal")
+
+/******************************************************************************
+ *
+ * FUNCTION: acpi_tb_verify_table
+ *
+ * PARAMETERS: table_desc - table
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: this function is called to verify and map table
+ *
+ *****************************************************************************/
+acpi_status acpi_tb_verify_table(struct acpi_table_desc *table_desc)
+{
+ acpi_status status = AE_OK;
+
+ ACPI_FUNCTION_TRACE(tb_verify_table);
+
+ /* Map the table if necessary */
+
+ if (!table_desc->pointer) {
+ if ((table_desc->flags & ACPI_TABLE_ORIGIN_MASK) ==
+ ACPI_TABLE_ORIGIN_MAPPED) {
+ table_desc->pointer =
+ acpi_os_map_memory(table_desc->address,
+ table_desc->length);
+ }
+ if (!table_desc->pointer) {
+ return_ACPI_STATUS(AE_NO_MEMORY);
+ }
+ }
+
+ /* FACS is the odd table, has no standard ACPI header and no checksum */
+
+ if (!ACPI_COMPARE_NAME(&table_desc->signature, ACPI_SIG_FACS)) {
+
+ /* Always calculate checksum, ignore bad checksum if requested */
+
+ status =
+ acpi_tb_verify_checksum(table_desc->pointer,
+ table_desc->length);
+ }
+
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_tb_add_table
+ *
+ * PARAMETERS: table_desc - Table descriptor
+ * table_index - Where the table index is returned
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: This function is called to add the ACPI table
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_tb_add_table(struct acpi_table_desc *table_desc, u32 *table_index)
+{
+ u32 i;
+ acpi_status status = AE_OK;
+
+ ACPI_FUNCTION_TRACE(tb_add_table);
+
+ if (!table_desc->pointer) {
+ status = acpi_tb_verify_table(table_desc);
+ if (ACPI_FAILURE(status) || !table_desc->pointer) {
+ return_ACPI_STATUS(status);
+ }
+ }
+
+ /*
+ * Originally, we checked the table signature for "SSDT" or "PSDT" here.
+ * Next, we added support for OEMx tables, signature "OEM".
+ * Valid tables were encountered with a null signature, so we've just
+ * given up on validating the signature, since it seems to be a waste
+ * of code. The original code was removed (05/2008).
+ */
+
+ (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
+
+ /* Check if table is already registered */
+
+ for (i = 0; i < acpi_gbl_root_table_list.count; ++i) {
+ if (!acpi_gbl_root_table_list.tables[i].pointer) {
+ status =
+ acpi_tb_verify_table(&acpi_gbl_root_table_list.
+ tables[i]);
+ if (ACPI_FAILURE(status)
+ || !acpi_gbl_root_table_list.tables[i].pointer) {
+ continue;
+ }
+ }
+
+ /*
+ * Check for a table match on the entire table length,
+ * not just the header.
+ */
+ if (table_desc->length !=
+ acpi_gbl_root_table_list.tables[i].length) {
+ continue;
+ }
+
+ if (ACPI_MEMCMP(table_desc->pointer,
+ acpi_gbl_root_table_list.tables[i].pointer,
+ acpi_gbl_root_table_list.tables[i].length)) {
+ continue;
+ }
+
+ /*
+ * Note: the current mechanism does not unregister a table if it is
+ * dynamically unloaded. The related namespace entries are deleted,
+ * but the table remains in the root table list.
+ *
+ * The assumption here is that the number of different tables that
+ * will be loaded is actually small, and there is minimal overhead
+ * in just keeping the table in case it is needed again.
+ *
+ * If this assumption changes in the future (perhaps on large
+ * machines with many table load/unload operations), tables will
+ * need to be unregistered when they are unloaded, and slots in the
+ * root table list should be reused when empty.
+ */
+
+ /*
+ * Table is already registered.
+ * We can delete the table that was passed as a parameter.
+ */
+ acpi_tb_delete_table(table_desc);
+ *table_index = i;
+
+ if (acpi_gbl_root_table_list.tables[i].
+ flags & ACPI_TABLE_IS_LOADED) {
+
+ /* Table is still loaded, this is an error */
+
+ status = AE_ALREADY_EXISTS;
+ goto release;
+ } else {
+ /* Table was unloaded, allow it to be reloaded */
+
+ table_desc->pointer =
+ acpi_gbl_root_table_list.tables[i].pointer;
+ table_desc->address =
+ acpi_gbl_root_table_list.tables[i].address;
+ status = AE_OK;
+ goto print_header;
+ }
+ }
+
+ /* Add the table to the global root table list */
+
+ status = acpi_tb_store_table(table_desc->address, table_desc->pointer,
+ table_desc->length, table_desc->flags,
+ table_index);
+ if (ACPI_FAILURE(status)) {
+ goto release;
+ }
+
+ print_header:
+ acpi_tb_print_table_header(table_desc->address, table_desc->pointer);
+
+ release:
+ (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_tb_resize_root_table_list
+ *
+ * PARAMETERS: None
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Expand the size of global table array
+ *
+ ******************************************************************************/
+
+acpi_status acpi_tb_resize_root_table_list(void)
+{
+ struct acpi_table_desc *tables;
+
+ ACPI_FUNCTION_TRACE(tb_resize_root_table_list);
+
+ /* allow_resize flag is a parameter to acpi_initialize_tables */
+
+ if (!(acpi_gbl_root_table_list.flags & ACPI_ROOT_ALLOW_RESIZE)) {
+ ACPI_ERROR((AE_INFO,
+ "Resize of Root Table Array is not allowed"));
+ return_ACPI_STATUS(AE_SUPPORT);
+ }
+
+ /* Increase the Table Array size */
+
+ tables = ACPI_ALLOCATE_ZEROED(((acpi_size) acpi_gbl_root_table_list.
+ size + ACPI_ROOT_TABLE_SIZE_INCREMENT)
+ * sizeof(struct acpi_table_desc));
+ if (!tables) {
+ ACPI_ERROR((AE_INFO,
+ "Could not allocate new root table array"));
+ return_ACPI_STATUS(AE_NO_MEMORY);
+ }
+
+ /* Copy and free the previous table array */
+
+ if (acpi_gbl_root_table_list.tables) {
+ ACPI_MEMCPY(tables, acpi_gbl_root_table_list.tables,
+ (acpi_size) acpi_gbl_root_table_list.size *
+ sizeof(struct acpi_table_desc));
+
+ if (acpi_gbl_root_table_list.flags & ACPI_ROOT_ORIGIN_ALLOCATED) {
+ ACPI_FREE(acpi_gbl_root_table_list.tables);
+ }
+ }
+
+ acpi_gbl_root_table_list.tables = tables;
+ acpi_gbl_root_table_list.size += ACPI_ROOT_TABLE_SIZE_INCREMENT;
+ acpi_gbl_root_table_list.flags |= (u8) ACPI_ROOT_ORIGIN_ALLOCATED;
+
+ return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_tb_store_table
+ *
+ * PARAMETERS: Address - Table address
+ * Table - Table header
+ * Length - Table length
+ * Flags - flags
+ *
+ * RETURN: Status and table index.
+ *
+ * DESCRIPTION: Add an ACPI table to the global table list
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_tb_store_table(acpi_physical_address address,
+ struct acpi_table_header *table,
+ u32 length, u8 flags, u32 *table_index)
+{
+ acpi_status status = AE_OK;
+
+ /* Ensure that there is room for the table in the Root Table List */
+
+ if (acpi_gbl_root_table_list.count >= acpi_gbl_root_table_list.size) {
+ status = acpi_tb_resize_root_table_list();
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+ }
+
+ /* Initialize added table */
+
+ acpi_gbl_root_table_list.tables[acpi_gbl_root_table_list.count].
+ address = address;
+ acpi_gbl_root_table_list.tables[acpi_gbl_root_table_list.count].
+ pointer = table;
+ acpi_gbl_root_table_list.tables[acpi_gbl_root_table_list.count].length =
+ length;
+ acpi_gbl_root_table_list.tables[acpi_gbl_root_table_list.count].
+ owner_id = 0;
+ acpi_gbl_root_table_list.tables[acpi_gbl_root_table_list.count].flags =
+ flags;
+
+ ACPI_MOVE_32_TO_32(&
+ (acpi_gbl_root_table_list.
+ tables[acpi_gbl_root_table_list.count].signature),
+ table->signature);
+
+ *table_index = acpi_gbl_root_table_list.count;
+ acpi_gbl_root_table_list.count++;
+ return (status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_tb_delete_table
+ *
+ * PARAMETERS: table_index - Table index
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Delete one internal ACPI table
+ *
+ ******************************************************************************/
+
+void acpi_tb_delete_table(struct acpi_table_desc *table_desc)
+{
+ /* Table must be mapped or allocated */
+ if (!table_desc->pointer) {
+ return;
+ }
+ switch (table_desc->flags & ACPI_TABLE_ORIGIN_MASK) {
+ case ACPI_TABLE_ORIGIN_MAPPED:
+ acpi_os_unmap_memory(table_desc->pointer, table_desc->length);
+ break;
+ case ACPI_TABLE_ORIGIN_ALLOCATED:
+ ACPI_FREE(table_desc->pointer);
+ break;
+ default:;
+ }
+
+ table_desc->pointer = NULL;
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_tb_terminate
+ *
+ * PARAMETERS: None
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Delete all internal ACPI tables
+ *
+ ******************************************************************************/
+
+void acpi_tb_terminate(void)
+{
+ u32 i;
+
+ ACPI_FUNCTION_TRACE(tb_terminate);
+
+ (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
+
+ /* Delete the individual tables */
+
+ for (i = 0; i < acpi_gbl_root_table_list.count; ++i) {
+ acpi_tb_delete_table(&acpi_gbl_root_table_list.tables[i]);
+ }
+
+ /*
+ * Delete the root table array if allocated locally. Array cannot be
+ * mapped, so we don't need to check for that flag.
+ */
+ if (acpi_gbl_root_table_list.flags & ACPI_ROOT_ORIGIN_ALLOCATED) {
+ ACPI_FREE(acpi_gbl_root_table_list.tables);
+ }
+
+ acpi_gbl_root_table_list.tables = NULL;
+ acpi_gbl_root_table_list.flags = 0;
+ acpi_gbl_root_table_list.count = 0;
+
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "ACPI Tables freed\n"));
+ (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_tb_delete_namespace_by_owner
+ *
+ * PARAMETERS: table_index - Table index
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Delete all namespace objects created when this table was loaded.
+ *
+ ******************************************************************************/
+
+void acpi_tb_delete_namespace_by_owner(u32 table_index)
+{
+ acpi_owner_id owner_id;
+
+ (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
+ if (table_index < acpi_gbl_root_table_list.count) {
+ owner_id =
+ acpi_gbl_root_table_list.tables[table_index].owner_id;
+ } else {
+ (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
+ return;
+ }
+
+ (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
+ acpi_ns_delete_namespace_by_owner(owner_id);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_tb_allocate_owner_id
+ *
+ * PARAMETERS: table_index - Table index
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Allocates owner_id in table_desc
+ *
+ ******************************************************************************/
+
+acpi_status acpi_tb_allocate_owner_id(u32 table_index)
+{
+ acpi_status status = AE_BAD_PARAMETER;
+
+ ACPI_FUNCTION_TRACE(tb_allocate_owner_id);
+
+ (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
+ if (table_index < acpi_gbl_root_table_list.count) {
+ status = acpi_ut_allocate_owner_id
+ (&(acpi_gbl_root_table_list.tables[table_index].owner_id));
+ }
+
+ (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_tb_release_owner_id
+ *
+ * PARAMETERS: table_index - Table index
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Releases owner_id in table_desc
+ *
+ ******************************************************************************/
+
+acpi_status acpi_tb_release_owner_id(u32 table_index)
+{
+ acpi_status status = AE_BAD_PARAMETER;
+
+ ACPI_FUNCTION_TRACE(tb_release_owner_id);
+
+ (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
+ if (table_index < acpi_gbl_root_table_list.count) {
+ acpi_ut_release_owner_id(&
+ (acpi_gbl_root_table_list.
+ tables[table_index].owner_id));
+ status = AE_OK;
+ }
+
+ (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_tb_get_owner_id
+ *
+ * PARAMETERS: table_index - Table index
+ * owner_id - Where the table owner_id is returned
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: returns owner_id for the ACPI table
+ *
+ ******************************************************************************/
+
+acpi_status acpi_tb_get_owner_id(u32 table_index, acpi_owner_id *owner_id)
+{
+ acpi_status status = AE_BAD_PARAMETER;
+
+ ACPI_FUNCTION_TRACE(tb_get_owner_id);
+
+ (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
+ if (table_index < acpi_gbl_root_table_list.count) {
+ *owner_id =
+ acpi_gbl_root_table_list.tables[table_index].owner_id;
+ status = AE_OK;
+ }
+
+ (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_tb_is_table_loaded
+ *
+ * PARAMETERS: table_index - Table index
+ *
+ * RETURN: Table Loaded Flag
+ *
+ ******************************************************************************/
+
+u8 acpi_tb_is_table_loaded(u32 table_index)
+{
+ u8 is_loaded = FALSE;
+
+ (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
+ if (table_index < acpi_gbl_root_table_list.count) {
+ is_loaded = (u8)
+ (acpi_gbl_root_table_list.tables[table_index].
+ flags & ACPI_TABLE_IS_LOADED);
+ }
+
+ (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
+ return (is_loaded);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_tb_set_table_loaded_flag
+ *
+ * PARAMETERS: table_index - Table index
+ * is_loaded - TRUE if table is loaded, FALSE otherwise
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Sets the table loaded flag to either TRUE or FALSE.
+ *
+ ******************************************************************************/
+
+void acpi_tb_set_table_loaded_flag(u32 table_index, u8 is_loaded)
+{
+
+ (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
+ if (table_index < acpi_gbl_root_table_list.count) {
+ if (is_loaded) {
+ acpi_gbl_root_table_list.tables[table_index].flags |=
+ ACPI_TABLE_IS_LOADED;
+ } else {
+ acpi_gbl_root_table_list.tables[table_index].flags &=
+ ~ACPI_TABLE_IS_LOADED;
+ }
+ }
+
+ (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
+}
diff --git a/drivers/acpi/tables/tbutils.c b/drivers/acpi/tables/tbutils.c
new file mode 100644
index 0000000..f5d7aec
--- /dev/null
+++ b/drivers/acpi/tables/tbutils.c
@@ -0,0 +1,556 @@
+/******************************************************************************
+ *
+ * Module Name: tbutils - table utilities
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include <acpi/actables.h>
+
+#define _COMPONENT ACPI_TABLES
+ACPI_MODULE_NAME("tbutils")
+
+/* Local prototypes */
+static acpi_physical_address
+acpi_tb_get_root_table_entry(u8 *table_entry, u32 table_entry_size);
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_tb_check_xsdt
+ *
+ * PARAMETERS: address - Pointer to the XSDT
+ *
+ * RETURN: status
+ * AE_OK - XSDT is okay
+ * AE_NO_MEMORY - can't map XSDT
+ * AE_INVALID_TABLE_LENGTH - invalid table length
+ * AE_NULL_ENTRY - XSDT has NULL entry
+ *
+ * DESCRIPTION: validate XSDT
+******************************************************************************/
+
+static acpi_status
+acpi_tb_check_xsdt(acpi_physical_address address)
+{
+ struct acpi_table_header *table;
+ u32 length;
+ u64 xsdt_entry_address;
+ u8 *table_entry;
+ u32 table_count;
+ int i;
+
+ table = acpi_os_map_memory(address, sizeof(struct acpi_table_header));
+ if (!table)
+ return AE_NO_MEMORY;
+
+ length = table->length;
+ acpi_os_unmap_memory(table, sizeof(struct acpi_table_header));
+ if (length < sizeof(struct acpi_table_header))
+ return AE_INVALID_TABLE_LENGTH;
+
+ table = acpi_os_map_memory(address, length);
+ if (!table)
+ return AE_NO_MEMORY;
+
+ /* Calculate the number of tables described in XSDT */
+ table_count =
+ (u32) ((table->length -
+ sizeof(struct acpi_table_header)) / sizeof(u64));
+ table_entry =
+ ACPI_CAST_PTR(u8, table) + sizeof(struct acpi_table_header);
+ for (i = 0; i < table_count; i++) {
+ ACPI_MOVE_64_TO_64(&xsdt_entry_address, table_entry);
+ if (!xsdt_entry_address) {
+ /* XSDT has NULL entry */
+ break;
+ }
+ table_entry += sizeof(u64);
+ }
+ acpi_os_unmap_memory(table, length);
+
+ if (i < table_count)
+ return AE_NULL_ENTRY;
+ else
+ return AE_OK;
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_tb_tables_loaded
+ *
+ * PARAMETERS: None
+ *
+ * RETURN: TRUE if required ACPI tables are loaded
+ *
+ * DESCRIPTION: Determine if the minimum required ACPI tables are present
+ * (FADT, FACS, DSDT)
+ *
+ ******************************************************************************/
+
+u8 acpi_tb_tables_loaded(void)
+{
+
+ if (acpi_gbl_root_table_list.count >= 3) {
+ return (TRUE);
+ }
+
+ return (FALSE);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_tb_print_table_header
+ *
+ * PARAMETERS: Address - Table physical address
+ * Header - Table header
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Print an ACPI table header. Special cases for FACS and RSDP.
+ *
+ ******************************************************************************/
+
+void
+acpi_tb_print_table_header(acpi_physical_address address,
+ struct acpi_table_header *header)
+{
+
+ if (ACPI_COMPARE_NAME(header->signature, ACPI_SIG_FACS)) {
+
+ /* FACS only has signature and length fields of common table header */
+
+ ACPI_INFO((AE_INFO, "%4.4s %08lX, %04X",
+ header->signature, (unsigned long)address,
+ header->length));
+ } else if (ACPI_COMPARE_NAME(header->signature, ACPI_SIG_RSDP)) {
+
+ /* RSDP has no common fields */
+
+ ACPI_INFO((AE_INFO, "RSDP %08lX, %04X (r%d %6.6s)",
+ (unsigned long)address,
+ (ACPI_CAST_PTR(struct acpi_table_rsdp, header)->
+ revision >
+ 0) ? ACPI_CAST_PTR(struct acpi_table_rsdp,
+ header)->length : 20,
+ ACPI_CAST_PTR(struct acpi_table_rsdp,
+ header)->revision,
+ ACPI_CAST_PTR(struct acpi_table_rsdp,
+ header)->oem_id));
+ } else {
+ /* Standard ACPI table with full common header */
+
+ ACPI_INFO((AE_INFO,
+ "%4.4s %08lX, %04X (r%d %6.6s %8.8s %8X %4.4s %8X)",
+ header->signature, (unsigned long)address,
+ header->length, header->revision, header->oem_id,
+ header->oem_table_id, header->oem_revision,
+ header->asl_compiler_id,
+ header->asl_compiler_revision));
+ }
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_tb_validate_checksum
+ *
+ * PARAMETERS: Table - ACPI table to verify
+ * Length - Length of entire table
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Verifies that the table checksums to zero. Optionally returns
+ * exception on bad checksum.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_tb_verify_checksum(struct acpi_table_header *table, u32 length)
+{
+ u8 checksum;
+
+ /* Compute the checksum on the table */
+
+ checksum = acpi_tb_checksum(ACPI_CAST_PTR(u8, table), length);
+
+ /* Checksum ok? (should be zero) */
+
+ if (checksum) {
+ ACPI_WARNING((AE_INFO,
+ "Incorrect checksum in table [%4.4s] - %2.2X, should be %2.2X",
+ table->signature, table->checksum,
+ (u8) (table->checksum - checksum)));
+
+#if (ACPI_CHECKSUM_ABORT)
+
+ return (AE_BAD_CHECKSUM);
+#endif
+ }
+
+ return (AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_tb_checksum
+ *
+ * PARAMETERS: Buffer - Pointer to memory region to be checked
+ * Length - Length of this memory region
+ *
+ * RETURN: Checksum (u8)
+ *
+ * DESCRIPTION: Calculates circular checksum of memory region.
+ *
+ ******************************************************************************/
+
+u8 acpi_tb_checksum(u8 *buffer, u32 length)
+{
+ u8 sum = 0;
+ u8 *end = buffer + length;
+
+ while (buffer < end) {
+ sum = (u8) (sum + *(buffer++));
+ }
+
+ return sum;
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_tb_install_table
+ *
+ * PARAMETERS: Address - Physical address of DSDT or FACS
+ * Flags - Flags
+ * Signature - Table signature, NULL if no need to
+ * match
+ * table_index - Index into root table array
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Install an ACPI table into the global data structure.
+ *
+ ******************************************************************************/
+
+void
+acpi_tb_install_table(acpi_physical_address address,
+ u8 flags, char *signature, u32 table_index)
+{
+ struct acpi_table_header *table;
+
+ if (!address) {
+ ACPI_ERROR((AE_INFO,
+ "Null physical address for ACPI table [%s]",
+ signature));
+ return;
+ }
+
+ /* Map just the table header */
+
+ table = acpi_os_map_memory(address, sizeof(struct acpi_table_header));
+ if (!table) {
+ return;
+ }
+
+ /* If a particular signature is expected, signature must match */
+
+ if (signature && !ACPI_COMPARE_NAME(table->signature, signature)) {
+ ACPI_ERROR((AE_INFO,
+ "Invalid signature 0x%X for ACPI table [%s]",
+ *ACPI_CAST_PTR(u32, table->signature), signature));
+ goto unmap_and_exit;
+ }
+
+ /* Initialize the table entry */
+
+ acpi_gbl_root_table_list.tables[table_index].address = address;
+ acpi_gbl_root_table_list.tables[table_index].length = table->length;
+ acpi_gbl_root_table_list.tables[table_index].flags = flags;
+
+ ACPI_MOVE_32_TO_32(&
+ (acpi_gbl_root_table_list.tables[table_index].
+ signature), table->signature);
+
+ acpi_tb_print_table_header(address, table);
+
+ if (table_index == ACPI_TABLE_INDEX_DSDT) {
+
+ /* Global integer width is based upon revision of the DSDT */
+
+ acpi_ut_set_integer_width(table->revision);
+ }
+
+ unmap_and_exit:
+ acpi_os_unmap_memory(table, sizeof(struct acpi_table_header));
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_tb_get_root_table_entry
+ *
+ * PARAMETERS: table_entry - Pointer to the RSDT/XSDT table entry
+ * table_entry_size - sizeof 32 or 64 (RSDT or XSDT)
+ *
+ * RETURN: Physical address extracted from the root table
+ *
+ * DESCRIPTION: Get one root table entry. Handles 32-bit and 64-bit cases on
+ * both 32-bit and 64-bit platforms
+ *
+ * NOTE: acpi_physical_address is 32-bit on 32-bit platforms, 64-bit on
+ * 64-bit platforms.
+ *
+ ******************************************************************************/
+
+static acpi_physical_address
+acpi_tb_get_root_table_entry(u8 *table_entry, u32 table_entry_size)
+{
+ u64 address64;
+
+ /*
+ * Get the table physical address (32-bit for RSDT, 64-bit for XSDT):
+ * Note: Addresses are 32-bit aligned (not 64) in both RSDT and XSDT
+ */
+ if (table_entry_size == sizeof(u32)) {
+ /*
+ * 32-bit platform, RSDT: Return 32-bit table entry
+ * 64-bit platform, RSDT: Expand 32-bit to 64-bit and return
+ */
+ return ((acpi_physical_address)
+ (*ACPI_CAST_PTR(u32, table_entry)));
+ } else {
+ /*
+ * 32-bit platform, XSDT: Truncate 64-bit to 32-bit and return
+ * 64-bit platform, XSDT: Move (unaligned) 64-bit to local, return 64-bit
+ */
+ ACPI_MOVE_64_TO_64(&address64, table_entry);
+
+#if ACPI_MACHINE_WIDTH == 32
+ if (address64 > ACPI_UINT32_MAX) {
+
+ /* Will truncate 64-bit address to 32 bits, issue warning */
+
+ ACPI_WARNING((AE_INFO,
+ "64-bit Physical Address in XSDT is too large (%8.8X%8.8X), truncating",
+ ACPI_FORMAT_UINT64(address64)));
+ }
+#endif
+ return ((acpi_physical_address) (address64));
+ }
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_tb_parse_root_table
+ *
+ * PARAMETERS: Rsdp - Pointer to the RSDP
+ * Flags - Flags
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: This function is called to parse the Root System Description
+ * Table (RSDT or XSDT)
+ *
+ * NOTE: Tables are mapped (not copied) for efficiency. The FACS must
+ * be mapped and cannot be copied because it contains the actual
+ * memory location of the ACPI Global Lock.
+ *
+ ******************************************************************************/
+
+acpi_status __init
+acpi_tb_parse_root_table(acpi_physical_address rsdp_address, u8 flags)
+{
+ struct acpi_table_rsdp *rsdp;
+ u32 table_entry_size;
+ u32 i;
+ u32 table_count;
+ struct acpi_table_header *table;
+ acpi_physical_address address;
+ acpi_physical_address uninitialized_var(rsdt_address);
+ u32 length;
+ u8 *table_entry;
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(tb_parse_root_table);
+
+ /*
+ * Map the entire RSDP and extract the address of the RSDT or XSDT
+ */
+ rsdp = acpi_os_map_memory(rsdp_address, sizeof(struct acpi_table_rsdp));
+ if (!rsdp) {
+ return_ACPI_STATUS(AE_NO_MEMORY);
+ }
+
+ acpi_tb_print_table_header(rsdp_address,
+ ACPI_CAST_PTR(struct acpi_table_header,
+ rsdp));
+
+ /* Differentiate between RSDT and XSDT root tables */
+
+ if (rsdp->revision > 1 && rsdp->xsdt_physical_address) {
+ /*
+ * Root table is an XSDT (64-bit physical addresses). We must use the
+ * XSDT if the revision is > 1 and the XSDT pointer is present, as per
+ * the ACPI specification.
+ */
+ address = (acpi_physical_address) rsdp->xsdt_physical_address;
+ table_entry_size = sizeof(u64);
+ rsdt_address = (acpi_physical_address)
+ rsdp->rsdt_physical_address;
+ } else {
+ /* Root table is an RSDT (32-bit physical addresses) */
+
+ address = (acpi_physical_address) rsdp->rsdt_physical_address;
+ table_entry_size = sizeof(u32);
+ }
+
+ /*
+ * It is not possible to map more than one entry in some environments,
+ * so unmap the RSDP here before mapping other tables
+ */
+ acpi_os_unmap_memory(rsdp, sizeof(struct acpi_table_rsdp));
+
+ if (table_entry_size == sizeof(u64)) {
+ if (acpi_tb_check_xsdt(address) == AE_NULL_ENTRY) {
+ /* XSDT has NULL entry, RSDT is used */
+ address = rsdt_address;
+ table_entry_size = sizeof(u32);
+ ACPI_WARNING((AE_INFO, "BIOS XSDT has NULL entry, "
+ "using RSDT"));
+ }
+ }
+ /* Map the RSDT/XSDT table header to get the full table length */
+
+ table = acpi_os_map_memory(address, sizeof(struct acpi_table_header));
+ if (!table) {
+ return_ACPI_STATUS(AE_NO_MEMORY);
+ }
+
+ acpi_tb_print_table_header(address, table);
+
+ /* Get the length of the full table, verify length and map entire table */
+
+ length = table->length;
+ acpi_os_unmap_memory(table, sizeof(struct acpi_table_header));
+
+ if (length < sizeof(struct acpi_table_header)) {
+ ACPI_ERROR((AE_INFO, "Invalid length 0x%X in RSDT/XSDT",
+ length));
+ return_ACPI_STATUS(AE_INVALID_TABLE_LENGTH);
+ }
+
+ table = acpi_os_map_memory(address, length);
+ if (!table) {
+ return_ACPI_STATUS(AE_NO_MEMORY);
+ }
+
+ /* Validate the root table checksum */
+
+ status = acpi_tb_verify_checksum(table, length);
+ if (ACPI_FAILURE(status)) {
+ acpi_os_unmap_memory(table, length);
+ return_ACPI_STATUS(status);
+ }
+
+ /* Calculate the number of tables described in the root table */
+
+ table_count =
+ (u32) ((table->length -
+ sizeof(struct acpi_table_header)) / table_entry_size);
+
+ /*
+ * First two entries in the table array are reserved for the DSDT and FACS,
+ * which are not actually present in the RSDT/XSDT - they come from the FADT
+ */
+ table_entry =
+ ACPI_CAST_PTR(u8, table) + sizeof(struct acpi_table_header);
+ acpi_gbl_root_table_list.count = 2;
+
+ /*
+ * Initialize the root table array from the RSDT/XSDT
+ */
+ for (i = 0; i < table_count; i++) {
+ if (acpi_gbl_root_table_list.count >=
+ acpi_gbl_root_table_list.size) {
+
+ /* There is no more room in the root table array, attempt resize */
+
+ status = acpi_tb_resize_root_table_list();
+ if (ACPI_FAILURE(status)) {
+ ACPI_WARNING((AE_INFO,
+ "Truncating %u table entries!",
+ (unsigned) (table_count -
+ (acpi_gbl_root_table_list.
+ count - 2))));
+ break;
+ }
+ }
+
+ /* Get the table physical address (32-bit for RSDT, 64-bit for XSDT) */
+
+ acpi_gbl_root_table_list.tables[acpi_gbl_root_table_list.count].
+ address =
+ acpi_tb_get_root_table_entry(table_entry, table_entry_size);
+
+ table_entry += table_entry_size;
+ acpi_gbl_root_table_list.count++;
+ }
+
+ /*
+ * It is not possible to map more than one entry in some environments,
+ * so unmap the root table here before mapping other tables
+ */
+ acpi_os_unmap_memory(table, length);
+
+ /*
+ * Complete the initialization of the root table array by examining
+ * the header of each table
+ */
+ for (i = 2; i < acpi_gbl_root_table_list.count; i++) {
+ acpi_tb_install_table(acpi_gbl_root_table_list.tables[i].
+ address, flags, NULL, i);
+
+ /* Special case for FADT - get the DSDT and FACS */
+
+ if (ACPI_COMPARE_NAME
+ (&acpi_gbl_root_table_list.tables[i].signature,
+ ACPI_SIG_FADT)) {
+ acpi_tb_parse_fadt(i, flags);
+ }
+ }
+
+ return_ACPI_STATUS(AE_OK);
+}
diff --git a/drivers/acpi/tables/tbxface.c b/drivers/acpi/tables/tbxface.c
new file mode 100644
index 0000000..fd7770a
--- /dev/null
+++ b/drivers/acpi/tables/tbxface.c
@@ -0,0 +1,734 @@
+/******************************************************************************
+ *
+ * Module Name: tbxface - Public interfaces to the ACPI subsystem
+ * ACPI table oriented interfaces
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include <acpi/acnamesp.h>
+#include <acpi/actables.h>
+
+#define _COMPONENT ACPI_TABLES
+ACPI_MODULE_NAME("tbxface")
+
+/* Local prototypes */
+static acpi_status acpi_tb_load_namespace(void);
+
+static int no_auto_ssdt;
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_allocate_root_table
+ *
+ * PARAMETERS: initial_table_count - Size of initial_table_array, in number of
+ * struct acpi_table_desc structures
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Allocate a root table array. Used by i_aSL compiler and
+ * acpi_initialize_tables.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_allocate_root_table(u32 initial_table_count)
+{
+
+ acpi_gbl_root_table_list.size = initial_table_count;
+ acpi_gbl_root_table_list.flags = ACPI_ROOT_ALLOW_RESIZE;
+
+ return (acpi_tb_resize_root_table_list());
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_initialize_tables
+ *
+ * PARAMETERS: initial_table_array - Pointer to an array of pre-allocated
+ * struct acpi_table_desc structures. If NULL, the
+ * array is dynamically allocated.
+ * initial_table_count - Size of initial_table_array, in number of
+ * struct acpi_table_desc structures
+ * allow_realloc - Flag to tell Table Manager if resize of
+ * pre-allocated array is allowed. Ignored
+ * if initial_table_array is NULL.
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Initialize the table manager, get the RSDP and RSDT/XSDT.
+ *
+ * NOTE: Allows static allocation of the initial table array in order
+ * to avoid the use of dynamic memory in confined environments
+ * such as the kernel boot sequence where it may not be available.
+ *
+ * If the host OS memory managers are initialized, use NULL for
+ * initial_table_array, and the table will be dynamically allocated.
+ *
+ ******************************************************************************/
+
+acpi_status __init
+acpi_initialize_tables(struct acpi_table_desc * initial_table_array,
+ u32 initial_table_count, u8 allow_resize)
+{
+ acpi_physical_address rsdp_address;
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(acpi_initialize_tables);
+
+ /*
+ * Set up the Root Table Array
+ * Allocate the table array if requested
+ */
+ if (!initial_table_array) {
+ status = acpi_allocate_root_table(initial_table_count);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+ } else {
+ /* Root Table Array has been statically allocated by the host */
+
+ ACPI_MEMSET(initial_table_array, 0,
+ (acpi_size) initial_table_count *
+ sizeof(struct acpi_table_desc));
+
+ acpi_gbl_root_table_list.tables = initial_table_array;
+ acpi_gbl_root_table_list.size = initial_table_count;
+ acpi_gbl_root_table_list.flags = ACPI_ROOT_ORIGIN_UNKNOWN;
+ if (allow_resize) {
+ acpi_gbl_root_table_list.flags |=
+ ACPI_ROOT_ALLOW_RESIZE;
+ }
+ }
+
+ /* Get the address of the RSDP */
+
+ rsdp_address = acpi_os_get_root_pointer();
+ if (!rsdp_address) {
+ return_ACPI_STATUS(AE_NOT_FOUND);
+ }
+
+ /*
+ * Get the root table (RSDT or XSDT) and extract all entries to the local
+ * Root Table Array. This array contains the information of the RSDT/XSDT
+ * in a common, more useable format.
+ */
+ status =
+ acpi_tb_parse_root_table(rsdp_address, ACPI_TABLE_ORIGIN_MAPPED);
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_reallocate_root_table
+ *
+ * PARAMETERS: None
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Reallocate Root Table List into dynamic memory. Copies the
+ * root list from the previously provided scratch area. Should
+ * be called once dynamic memory allocation is available in the
+ * kernel
+ *
+ ******************************************************************************/
+acpi_status acpi_reallocate_root_table(void)
+{
+ struct acpi_table_desc *tables;
+ acpi_size new_size;
+
+ ACPI_FUNCTION_TRACE(acpi_reallocate_root_table);
+
+ /*
+ * Only reallocate the root table if the host provided a static buffer
+ * for the table array in the call to acpi_initialize_tables.
+ */
+ if (acpi_gbl_root_table_list.flags & ACPI_ROOT_ORIGIN_ALLOCATED) {
+ return_ACPI_STATUS(AE_SUPPORT);
+ }
+
+ new_size = ((acpi_size) acpi_gbl_root_table_list.count +
+ ACPI_ROOT_TABLE_SIZE_INCREMENT) *
+ sizeof(struct acpi_table_desc);
+
+ /* Create new array and copy the old array */
+
+ tables = ACPI_ALLOCATE_ZEROED(new_size);
+ if (!tables) {
+ return_ACPI_STATUS(AE_NO_MEMORY);
+ }
+
+ ACPI_MEMCPY(tables, acpi_gbl_root_table_list.tables, new_size);
+
+ acpi_gbl_root_table_list.size = acpi_gbl_root_table_list.count;
+ acpi_gbl_root_table_list.tables = tables;
+ acpi_gbl_root_table_list.flags =
+ ACPI_ROOT_ORIGIN_ALLOCATED | ACPI_ROOT_ALLOW_RESIZE;
+
+ return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_load_table
+ *
+ * PARAMETERS: table_ptr - pointer to a buffer containing the entire
+ * table to be loaded
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: This function is called to load a table from the caller's
+ * buffer. The buffer must contain an entire ACPI Table including
+ * a valid header. The header fields will be verified, and if it
+ * is determined that the table is invalid, the call will fail.
+ *
+ ******************************************************************************/
+acpi_status acpi_load_table(struct acpi_table_header *table_ptr)
+{
+ acpi_status status;
+ u32 table_index;
+ struct acpi_table_desc table_desc;
+
+ if (!table_ptr)
+ return AE_BAD_PARAMETER;
+
+ ACPI_MEMSET(&table_desc, 0, sizeof(struct acpi_table_desc));
+ table_desc.pointer = table_ptr;
+ table_desc.length = table_ptr->length;
+ table_desc.flags = ACPI_TABLE_ORIGIN_UNKNOWN;
+
+ /*
+ * Install the new table into the local data structures
+ */
+ status = acpi_tb_add_table(&table_desc, &table_index);
+ if (ACPI_FAILURE(status)) {
+ return status;
+ }
+ status = acpi_ns_load_table(table_index, acpi_gbl_root_node);
+ return status;
+}
+
+ACPI_EXPORT_SYMBOL(acpi_load_table)
+
+/******************************************************************************
+ *
+ * FUNCTION: acpi_get_table_header
+ *
+ * PARAMETERS: Signature - ACPI signature of needed table
+ * Instance - Which instance (for SSDTs)
+ * out_table_header - The pointer to the table header to fill
+ *
+ * RETURN: Status and pointer to mapped table header
+ *
+ * DESCRIPTION: Finds an ACPI table header.
+ *
+ * NOTE: Caller is responsible in unmapping the header with
+ * acpi_os_unmap_memory
+ *
+ *****************************************************************************/
+acpi_status
+acpi_get_table_header(char *signature,
+ u32 instance, struct acpi_table_header *out_table_header)
+{
+ u32 i;
+ u32 j;
+ struct acpi_table_header *header;
+
+ /* Parameter validation */
+
+ if (!signature || !out_table_header) {
+ return (AE_BAD_PARAMETER);
+ }
+
+ /*
+ * Walk the root table list
+ */
+ for (i = 0, j = 0; i < acpi_gbl_root_table_list.count; i++) {
+ if (!ACPI_COMPARE_NAME
+ (&(acpi_gbl_root_table_list.tables[i].signature),
+ signature)) {
+ continue;
+ }
+
+ if (++j < instance) {
+ continue;
+ }
+
+ if (!acpi_gbl_root_table_list.tables[i].pointer) {
+ if ((acpi_gbl_root_table_list.tables[i].
+ flags & ACPI_TABLE_ORIGIN_MASK) ==
+ ACPI_TABLE_ORIGIN_MAPPED) {
+ header =
+ acpi_os_map_memory(acpi_gbl_root_table_list.
+ tables[i].address,
+ sizeof(struct
+ acpi_table_header));
+ if (!header) {
+ return AE_NO_MEMORY;
+ }
+ ACPI_MEMCPY(out_table_header, header,
+ sizeof(struct acpi_table_header));
+ acpi_os_unmap_memory(header,
+ sizeof(struct
+ acpi_table_header));
+ } else {
+ return AE_NOT_FOUND;
+ }
+ } else {
+ ACPI_MEMCPY(out_table_header,
+ acpi_gbl_root_table_list.tables[i].pointer,
+ sizeof(struct acpi_table_header));
+ }
+ return (AE_OK);
+ }
+
+ return (AE_NOT_FOUND);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_get_table_header)
+
+/******************************************************************************
+ *
+ * FUNCTION: acpi_unload_table_id
+ *
+ * PARAMETERS: id - Owner ID of the table to be removed.
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: This routine is used to force the unload of a table (by id)
+ *
+ ******************************************************************************/
+acpi_status acpi_unload_table_id(acpi_owner_id id)
+{
+ int i;
+ acpi_status status = AE_NOT_EXIST;
+
+ ACPI_FUNCTION_TRACE(acpi_unload_table_id);
+
+ /* Find table in the global table list */
+ for (i = 0; i < acpi_gbl_root_table_list.count; ++i) {
+ if (id != acpi_gbl_root_table_list.tables[i].owner_id) {
+ continue;
+ }
+ /*
+ * Delete all namespace objects owned by this table. Note that these
+ * objects can appear anywhere in the namespace by virtue of the AML
+ * "Scope" operator. Thus, we need to track ownership by an ID, not
+ * simply a position within the hierarchy
+ */
+ acpi_tb_delete_namespace_by_owner(i);
+ status = acpi_tb_release_owner_id(i);
+ acpi_tb_set_table_loaded_flag(i, FALSE);
+ break;
+ }
+ return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_unload_table_id)
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_get_table
+ *
+ * PARAMETERS: Signature - ACPI signature of needed table
+ * Instance - Which instance (for SSDTs)
+ * out_table - Where the pointer to the table is returned
+ *
+ * RETURN: Status and pointer to table
+ *
+ * DESCRIPTION: Finds and verifies an ACPI table.
+ *
+ *****************************************************************************/
+acpi_status
+acpi_get_table(char *signature,
+ u32 instance, struct acpi_table_header **out_table)
+{
+ u32 i;
+ u32 j;
+ acpi_status status;
+
+ /* Parameter validation */
+
+ if (!signature || !out_table) {
+ return (AE_BAD_PARAMETER);
+ }
+
+ /*
+ * Walk the root table list
+ */
+ for (i = 0, j = 0; i < acpi_gbl_root_table_list.count; i++) {
+ if (!ACPI_COMPARE_NAME
+ (&(acpi_gbl_root_table_list.tables[i].signature),
+ signature)) {
+ continue;
+ }
+
+ if (++j < instance) {
+ continue;
+ }
+
+ status =
+ acpi_tb_verify_table(&acpi_gbl_root_table_list.tables[i]);
+ if (ACPI_SUCCESS(status)) {
+ *out_table = acpi_gbl_root_table_list.tables[i].pointer;
+ }
+
+ if (!acpi_gbl_permanent_mmap) {
+ acpi_gbl_root_table_list.tables[i].pointer = NULL;
+ }
+
+ return (status);
+ }
+
+ return (AE_NOT_FOUND);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_get_table)
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_get_table_by_index
+ *
+ * PARAMETERS: table_index - Table index
+ * Table - Where the pointer to the table is returned
+ *
+ * RETURN: Status and pointer to the table
+ *
+ * DESCRIPTION: Obtain a table by an index into the global table list.
+ *
+ ******************************************************************************/
+acpi_status
+acpi_get_table_by_index(u32 table_index, struct acpi_table_header **table)
+{
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(acpi_get_table_by_index);
+
+ /* Parameter validation */
+
+ if (!table) {
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
+
+ (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
+
+ /* Validate index */
+
+ if (table_index >= acpi_gbl_root_table_list.count) {
+ (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
+
+ if (!acpi_gbl_root_table_list.tables[table_index].pointer) {
+
+ /* Table is not mapped, map it */
+
+ status =
+ acpi_tb_verify_table(&acpi_gbl_root_table_list.
+ tables[table_index]);
+ if (ACPI_FAILURE(status)) {
+ (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
+ return_ACPI_STATUS(status);
+ }
+ }
+
+ *table = acpi_gbl_root_table_list.tables[table_index].pointer;
+ (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
+ return_ACPI_STATUS(AE_OK);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_get_table_by_index)
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_tb_load_namespace
+ *
+ * PARAMETERS: None
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Load the namespace from the DSDT and all SSDTs/PSDTs found in
+ * the RSDT/XSDT.
+ *
+ ******************************************************************************/
+static acpi_status acpi_tb_load_namespace(void)
+{
+ acpi_status status;
+ struct acpi_table_header *table;
+ u32 i;
+
+ ACPI_FUNCTION_TRACE(tb_load_namespace);
+
+ (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
+
+ /*
+ * Load the namespace. The DSDT is required, but any SSDT and PSDT tables
+ * are optional.
+ */
+ if (!acpi_gbl_root_table_list.count ||
+ !ACPI_COMPARE_NAME(&
+ (acpi_gbl_root_table_list.
+ tables[ACPI_TABLE_INDEX_DSDT].signature),
+ ACPI_SIG_DSDT)
+ ||
+ ACPI_FAILURE(acpi_tb_verify_table
+ (&acpi_gbl_root_table_list.
+ tables[ACPI_TABLE_INDEX_DSDT]))) {
+ status = AE_NO_ACPI_TABLES;
+ goto unlock_and_exit;
+ }
+
+ /*
+ * Find DSDT table
+ */
+ status =
+ acpi_os_table_override(acpi_gbl_root_table_list.
+ tables[ACPI_TABLE_INDEX_DSDT].pointer,
+ &table);
+ if (ACPI_SUCCESS(status) && table) {
+ /*
+ * DSDT table has been found
+ */
+ acpi_tb_delete_table(&acpi_gbl_root_table_list.
+ tables[ACPI_TABLE_INDEX_DSDT]);
+ acpi_gbl_root_table_list.tables[ACPI_TABLE_INDEX_DSDT].pointer =
+ table;
+ acpi_gbl_root_table_list.tables[ACPI_TABLE_INDEX_DSDT].length =
+ table->length;
+ acpi_gbl_root_table_list.tables[ACPI_TABLE_INDEX_DSDT].flags =
+ ACPI_TABLE_ORIGIN_UNKNOWN;
+
+ ACPI_INFO((AE_INFO, "Table DSDT replaced by host OS"));
+ acpi_tb_print_table_header(0, table);
+
+ if (no_auto_ssdt == 0) {
+ printk(KERN_WARNING "ACPI: DSDT override uses original SSDTs unless \"acpi_no_auto_ssdt\"\n");
+ }
+ }
+
+ status =
+ acpi_tb_verify_table(&acpi_gbl_root_table_list.
+ tables[ACPI_TABLE_INDEX_DSDT]);
+ if (ACPI_FAILURE(status)) {
+
+ /* A valid DSDT is required */
+
+ status = AE_NO_ACPI_TABLES;
+ goto unlock_and_exit;
+ }
+
+ (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
+
+ /*
+ * Load and parse tables.
+ */
+ status = acpi_ns_load_table(ACPI_TABLE_INDEX_DSDT, acpi_gbl_root_node);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /*
+ * Load any SSDT or PSDT tables. Note: Loop leaves tables locked
+ */
+ (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
+ for (i = 0; i < acpi_gbl_root_table_list.count; ++i) {
+ if ((!ACPI_COMPARE_NAME
+ (&(acpi_gbl_root_table_list.tables[i].signature),
+ ACPI_SIG_SSDT)
+ &&
+ !ACPI_COMPARE_NAME(&
+ (acpi_gbl_root_table_list.tables[i].
+ signature), ACPI_SIG_PSDT))
+ ||
+ ACPI_FAILURE(acpi_tb_verify_table
+ (&acpi_gbl_root_table_list.tables[i]))) {
+ continue;
+ }
+
+ if (no_auto_ssdt) {
+ printk(KERN_WARNING "ACPI: SSDT ignored due to \"acpi_no_auto_ssdt\"\n");
+ continue;
+ }
+
+ /* Ignore errors while loading tables, get as many as possible */
+
+ (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
+ (void)acpi_ns_load_table(i, acpi_gbl_root_node);
+ (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
+ }
+
+ ACPI_DEBUG_PRINT((ACPI_DB_INIT, "ACPI Tables successfully acquired\n"));
+
+ unlock_and_exit:
+ (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_load_tables
+ *
+ * PARAMETERS: None
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Load the ACPI tables from the RSDT/XSDT
+ *
+ ******************************************************************************/
+
+acpi_status acpi_load_tables(void)
+{
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(acpi_load_tables);
+
+ /*
+ * Load the namespace from the tables
+ */
+ status = acpi_tb_load_namespace();
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status,
+ "While loading namespace from ACPI tables"));
+ }
+
+ return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_load_tables)
+
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_install_table_handler
+ *
+ * PARAMETERS: Handler - Table event handler
+ * Context - Value passed to the handler on each event
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Install table event handler
+ *
+ ******************************************************************************/
+acpi_status
+acpi_install_table_handler(acpi_tbl_handler handler, void *context)
+{
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(acpi_install_table_handler);
+
+ if (!handler) {
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
+
+ status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /* Don't allow more than one handler */
+
+ if (acpi_gbl_table_handler) {
+ status = AE_ALREADY_EXISTS;
+ goto cleanup;
+ }
+
+ /* Install the handler */
+
+ acpi_gbl_table_handler = handler;
+ acpi_gbl_table_handler_context = context;
+
+ cleanup:
+ (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
+ return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_install_table_handler)
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_remove_table_handler
+ *
+ * PARAMETERS: Handler - Table event handler that was installed
+ * previously.
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Remove table event handler
+ *
+ ******************************************************************************/
+acpi_status acpi_remove_table_handler(acpi_tbl_handler handler)
+{
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(acpi_remove_table_handler);
+
+ status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /* Make sure that the installed handler is the same */
+
+ if (!handler || handler != acpi_gbl_table_handler) {
+ status = AE_BAD_PARAMETER;
+ goto cleanup;
+ }
+
+ /* Remove the handler */
+
+ acpi_gbl_table_handler = NULL;
+
+ cleanup:
+ (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
+ return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_remove_table_handler)
+
+
+static int __init acpi_no_auto_ssdt_setup(char *s) {
+
+ printk(KERN_NOTICE "ACPI: SSDT auto-load disabled\n");
+
+ no_auto_ssdt = 1;
+
+ return 1;
+}
+
+__setup("acpi_no_auto_ssdt", acpi_no_auto_ssdt_setup);
diff --git a/drivers/acpi/tables/tbxfroot.c b/drivers/acpi/tables/tbxfroot.c
new file mode 100644
index 0000000..2d157e0
--- /dev/null
+++ b/drivers/acpi/tables/tbxfroot.c
@@ -0,0 +1,273 @@
+/******************************************************************************
+ *
+ * Module Name: tbxfroot - Find the root ACPI table (RSDT)
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include <acpi/actables.h>
+
+#define _COMPONENT ACPI_TABLES
+ACPI_MODULE_NAME("tbxfroot")
+
+/* Local prototypes */
+static u8 *acpi_tb_scan_memory_for_rsdp(u8 * start_address, u32 length);
+
+static acpi_status acpi_tb_validate_rsdp(struct acpi_table_rsdp *rsdp);
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_tb_validate_rsdp
+ *
+ * PARAMETERS: Rsdp - Pointer to unvalidated RSDP
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Validate the RSDP (ptr)
+ *
+ ******************************************************************************/
+
+static acpi_status acpi_tb_validate_rsdp(struct acpi_table_rsdp *rsdp)
+{
+ ACPI_FUNCTION_ENTRY();
+
+ /*
+ * The signature and checksum must both be correct
+ *
+ * Note: Sometimes there exists more than one RSDP in memory; the valid
+ * RSDP has a valid checksum, all others have an invalid checksum.
+ */
+ if (ACPI_STRNCMP((char *)rsdp, ACPI_SIG_RSDP, sizeof(ACPI_SIG_RSDP) - 1)
+ != 0) {
+
+ /* Nope, BAD Signature */
+
+ return (AE_BAD_SIGNATURE);
+ }
+
+ /* Check the standard checksum */
+
+ if (acpi_tb_checksum((u8 *) rsdp, ACPI_RSDP_CHECKSUM_LENGTH) != 0) {
+ return (AE_BAD_CHECKSUM);
+ }
+
+ /* Check extended checksum if table version >= 2 */
+
+ if ((rsdp->revision >= 2) &&
+ (acpi_tb_checksum((u8 *) rsdp, ACPI_RSDP_XCHECKSUM_LENGTH) != 0)) {
+ return (AE_BAD_CHECKSUM);
+ }
+
+ return (AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_find_root_pointer
+ *
+ * PARAMETERS: table_address - Where the table pointer is returned
+ *
+ * RETURN: Status, RSDP physical address
+ *
+ * DESCRIPTION: Search lower 1_mbyte of memory for the root system descriptor
+ * pointer structure. If it is found, set *RSDP to point to it.
+ *
+ * NOTE1: The RSDP must be either in the first 1_k of the Extended
+ * BIOS Data Area or between E0000 and FFFFF (From ACPI Spec.)
+ * Only a 32-bit physical address is necessary.
+ *
+ * NOTE2: This function is always available, regardless of the
+ * initialization state of the rest of ACPI.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_find_root_pointer(acpi_size *table_address)
+{
+ u8 *table_ptr;
+ u8 *mem_rover;
+ u32 physical_address;
+
+ ACPI_FUNCTION_TRACE(acpi_find_root_pointer);
+
+ /* 1a) Get the location of the Extended BIOS Data Area (EBDA) */
+
+ table_ptr = acpi_os_map_memory((acpi_physical_address)
+ ACPI_EBDA_PTR_LOCATION,
+ ACPI_EBDA_PTR_LENGTH);
+ if (!table_ptr) {
+ ACPI_ERROR((AE_INFO,
+ "Could not map memory at %8.8X for length %X",
+ ACPI_EBDA_PTR_LOCATION, ACPI_EBDA_PTR_LENGTH));
+
+ return_ACPI_STATUS(AE_NO_MEMORY);
+ }
+
+ ACPI_MOVE_16_TO_32(&physical_address, table_ptr);
+
+ /* Convert segment part to physical address */
+
+ physical_address <<= 4;
+ acpi_os_unmap_memory(table_ptr, ACPI_EBDA_PTR_LENGTH);
+
+ /* EBDA present? */
+
+ if (physical_address > 0x400) {
+ /*
+ * 1b) Search EBDA paragraphs (EBDA is required to be a
+ * minimum of 1_k length)
+ */
+ table_ptr = acpi_os_map_memory((acpi_physical_address)
+ physical_address,
+ ACPI_EBDA_WINDOW_SIZE);
+ if (!table_ptr) {
+ ACPI_ERROR((AE_INFO,
+ "Could not map memory at %8.8X for length %X",
+ physical_address, ACPI_EBDA_WINDOW_SIZE));
+
+ return_ACPI_STATUS(AE_NO_MEMORY);
+ }
+
+ mem_rover =
+ acpi_tb_scan_memory_for_rsdp(table_ptr,
+ ACPI_EBDA_WINDOW_SIZE);
+ acpi_os_unmap_memory(table_ptr, ACPI_EBDA_WINDOW_SIZE);
+
+ if (mem_rover) {
+
+ /* Return the physical address */
+
+ physical_address +=
+ (u32) ACPI_PTR_DIFF(mem_rover, table_ptr);
+
+ *table_address = physical_address;
+ return_ACPI_STATUS(AE_OK);
+ }
+ }
+
+ /*
+ * 2) Search upper memory: 16-byte boundaries in E0000h-FFFFFh
+ */
+ table_ptr = acpi_os_map_memory((acpi_physical_address)
+ ACPI_HI_RSDP_WINDOW_BASE,
+ ACPI_HI_RSDP_WINDOW_SIZE);
+
+ if (!table_ptr) {
+ ACPI_ERROR((AE_INFO,
+ "Could not map memory at %8.8X for length %X",
+ ACPI_HI_RSDP_WINDOW_BASE,
+ ACPI_HI_RSDP_WINDOW_SIZE));
+
+ return_ACPI_STATUS(AE_NO_MEMORY);
+ }
+
+ mem_rover =
+ acpi_tb_scan_memory_for_rsdp(table_ptr, ACPI_HI_RSDP_WINDOW_SIZE);
+ acpi_os_unmap_memory(table_ptr, ACPI_HI_RSDP_WINDOW_SIZE);
+
+ if (mem_rover) {
+
+ /* Return the physical address */
+
+ physical_address = (u32)
+ (ACPI_HI_RSDP_WINDOW_BASE +
+ ACPI_PTR_DIFF(mem_rover, table_ptr));
+
+ *table_address = physical_address;
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ /* A valid RSDP was not found */
+
+ ACPI_ERROR((AE_INFO, "A valid RSDP was not found"));
+ return_ACPI_STATUS(AE_NOT_FOUND);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_tb_scan_memory_for_rsdp
+ *
+ * PARAMETERS: start_address - Starting pointer for search
+ * Length - Maximum length to search
+ *
+ * RETURN: Pointer to the RSDP if found, otherwise NULL.
+ *
+ * DESCRIPTION: Search a block of memory for the RSDP signature
+ *
+ ******************************************************************************/
+static u8 *acpi_tb_scan_memory_for_rsdp(u8 * start_address, u32 length)
+{
+ acpi_status status;
+ u8 *mem_rover;
+ u8 *end_address;
+
+ ACPI_FUNCTION_TRACE(tb_scan_memory_for_rsdp);
+
+ end_address = start_address + length;
+
+ /* Search from given start address for the requested length */
+
+ for (mem_rover = start_address; mem_rover < end_address;
+ mem_rover += ACPI_RSDP_SCAN_STEP) {
+
+ /* The RSDP signature and checksum must both be correct */
+
+ status =
+ acpi_tb_validate_rsdp(ACPI_CAST_PTR
+ (struct acpi_table_rsdp, mem_rover));
+ if (ACPI_SUCCESS(status)) {
+
+ /* Sig and checksum valid, we have found a real RSDP */
+
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "RSDP located at physical address %p\n",
+ mem_rover));
+ return_PTR(mem_rover);
+ }
+
+ /* No sig match or bad checksum, keep searching */
+ }
+
+ /* Searched entire block, no RSDP was found */
+
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Searched entire block from %p, valid RSDP was not found\n",
+ start_address));
+ return_PTR(NULL);
+}
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
new file mode 100644
index 0000000..073ff09
--- /dev/null
+++ b/drivers/acpi/thermal.c
@@ -0,0 +1,1892 @@
+/*
+ * acpi_thermal.c - ACPI Thermal Zone Driver ($Revision: 41 $)
+ *
+ * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
+ * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This driver fully implements the ACPI thermal policy as described in the
+ * ACPI 2.0 Specification.
+ *
+ * TBD: 1. Implement passive cooling hysteresis.
+ * 2. Enhance passive cooling (CPU) states/limit interface to support
+ * concepts of 'multiple limiters', upper/lower limits, etc.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/dmi.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/proc_fs.h>
+#include <linux/timer.h>
+#include <linux/jiffies.h>
+#include <linux/kmod.h>
+#include <linux/seq_file.h>
+#include <linux/reboot.h>
+#include <asm/uaccess.h>
+#include <linux/thermal.h>
+#include <acpi/acpi_bus.h>
+#include <acpi/acpi_drivers.h>
+
+#define ACPI_THERMAL_CLASS "thermal_zone"
+#define ACPI_THERMAL_DEVICE_NAME "Thermal Zone"
+#define ACPI_THERMAL_FILE_STATE "state"
+#define ACPI_THERMAL_FILE_TEMPERATURE "temperature"
+#define ACPI_THERMAL_FILE_TRIP_POINTS "trip_points"
+#define ACPI_THERMAL_FILE_COOLING_MODE "cooling_mode"
+#define ACPI_THERMAL_FILE_POLLING_FREQ "polling_frequency"
+#define ACPI_THERMAL_NOTIFY_TEMPERATURE 0x80
+#define ACPI_THERMAL_NOTIFY_THRESHOLDS 0x81
+#define ACPI_THERMAL_NOTIFY_DEVICES 0x82
+#define ACPI_THERMAL_NOTIFY_CRITICAL 0xF0
+#define ACPI_THERMAL_NOTIFY_HOT 0xF1
+#define ACPI_THERMAL_MODE_ACTIVE 0x00
+
+#define ACPI_THERMAL_MAX_ACTIVE 10
+#define ACPI_THERMAL_MAX_LIMIT_STR_LEN 65
+
+#define _COMPONENT ACPI_THERMAL_COMPONENT
+ACPI_MODULE_NAME("thermal");
+
+MODULE_AUTHOR("Paul Diefenbaugh");
+MODULE_DESCRIPTION("ACPI Thermal Zone Driver");
+MODULE_LICENSE("GPL");
+
+static int act;
+module_param(act, int, 0644);
+MODULE_PARM_DESC(act, "Disable or override all lowest active trip points.");
+
+static int crt;
+module_param(crt, int, 0644);
+MODULE_PARM_DESC(crt, "Disable or lower all critical trip points.");
+
+static int tzp;
+module_param(tzp, int, 0444);
+MODULE_PARM_DESC(tzp, "Thermal zone polling frequency, in 1/10 seconds.");
+
+static int nocrt;
+module_param(nocrt, int, 0);
+MODULE_PARM_DESC(nocrt, "Set to take no action upon ACPI thermal zone critical trips points.");
+
+static int off;
+module_param(off, int, 0);
+MODULE_PARM_DESC(off, "Set to disable ACPI thermal support.");
+
+static int psv;
+module_param(psv, int, 0644);
+MODULE_PARM_DESC(psv, "Disable or override all passive trip points.");
+
+static int acpi_thermal_add(struct acpi_device *device);
+static int acpi_thermal_remove(struct acpi_device *device, int type);
+static int acpi_thermal_resume(struct acpi_device *device);
+static int acpi_thermal_state_open_fs(struct inode *inode, struct file *file);
+static int acpi_thermal_temp_open_fs(struct inode *inode, struct file *file);
+static int acpi_thermal_trip_open_fs(struct inode *inode, struct file *file);
+static int acpi_thermal_cooling_open_fs(struct inode *inode, struct file *file);
+static ssize_t acpi_thermal_write_cooling_mode(struct file *,
+ const char __user *, size_t,
+ loff_t *);
+static int acpi_thermal_polling_open_fs(struct inode *inode, struct file *file);
+static ssize_t acpi_thermal_write_polling(struct file *, const char __user *,
+ size_t, loff_t *);
+
+static const struct acpi_device_id thermal_device_ids[] = {
+ {ACPI_THERMAL_HID, 0},
+ {"", 0},
+};
+MODULE_DEVICE_TABLE(acpi, thermal_device_ids);
+
+static struct acpi_driver acpi_thermal_driver = {
+ .name = "thermal",
+ .class = ACPI_THERMAL_CLASS,
+ .ids = thermal_device_ids,
+ .ops = {
+ .add = acpi_thermal_add,
+ .remove = acpi_thermal_remove,
+ .resume = acpi_thermal_resume,
+ },
+};
+
+struct acpi_thermal_state {
+ u8 critical:1;
+ u8 hot:1;
+ u8 passive:1;
+ u8 active:1;
+ u8 reserved:4;
+ int active_index;
+};
+
+struct acpi_thermal_state_flags {
+ u8 valid:1;
+ u8 enabled:1;
+ u8 reserved:6;
+};
+
+struct acpi_thermal_critical {
+ struct acpi_thermal_state_flags flags;
+ unsigned long temperature;
+};
+
+struct acpi_thermal_hot {
+ struct acpi_thermal_state_flags flags;
+ unsigned long temperature;
+};
+
+struct acpi_thermal_passive {
+ struct acpi_thermal_state_flags flags;
+ unsigned long temperature;
+ unsigned long tc1;
+ unsigned long tc2;
+ unsigned long tsp;
+ struct acpi_handle_list devices;
+};
+
+struct acpi_thermal_active {
+ struct acpi_thermal_state_flags flags;
+ unsigned long temperature;
+ struct acpi_handle_list devices;
+};
+
+struct acpi_thermal_trips {
+ struct acpi_thermal_critical critical;
+ struct acpi_thermal_hot hot;
+ struct acpi_thermal_passive passive;
+ struct acpi_thermal_active active[ACPI_THERMAL_MAX_ACTIVE];
+};
+
+struct acpi_thermal_flags {
+ u8 cooling_mode:1; /* _SCP */
+ u8 devices:1; /* _TZD */
+ u8 reserved:6;
+};
+
+struct acpi_thermal {
+ struct acpi_device * device;
+ acpi_bus_id name;
+ unsigned long temperature;
+ unsigned long last_temperature;
+ unsigned long polling_frequency;
+ volatile u8 zombie;
+ struct acpi_thermal_flags flags;
+ struct acpi_thermal_state state;
+ struct acpi_thermal_trips trips;
+ struct acpi_handle_list devices;
+ struct timer_list timer;
+ struct thermal_zone_device *thermal_zone;
+ int tz_enabled;
+ struct mutex lock;
+};
+
+static const struct file_operations acpi_thermal_state_fops = {
+ .owner = THIS_MODULE,
+ .open = acpi_thermal_state_open_fs,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static const struct file_operations acpi_thermal_temp_fops = {
+ .owner = THIS_MODULE,
+ .open = acpi_thermal_temp_open_fs,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static const struct file_operations acpi_thermal_trip_fops = {
+ .owner = THIS_MODULE,
+ .open = acpi_thermal_trip_open_fs,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static const struct file_operations acpi_thermal_cooling_fops = {
+ .owner = THIS_MODULE,
+ .open = acpi_thermal_cooling_open_fs,
+ .read = seq_read,
+ .write = acpi_thermal_write_cooling_mode,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static const struct file_operations acpi_thermal_polling_fops = {
+ .owner = THIS_MODULE,
+ .open = acpi_thermal_polling_open_fs,
+ .read = seq_read,
+ .write = acpi_thermal_write_polling,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+/* --------------------------------------------------------------------------
+ Thermal Zone Management
+ -------------------------------------------------------------------------- */
+
+static int acpi_thermal_get_temperature(struct acpi_thermal *tz)
+{
+ acpi_status status = AE_OK;
+ unsigned long long tmp;
+
+ if (!tz)
+ return -EINVAL;
+
+ tz->last_temperature = tz->temperature;
+
+ status = acpi_evaluate_integer(tz->device->handle, "_TMP", NULL, &tmp);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+
+ tz->temperature = tmp;
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Temperature is %lu dK\n",
+ tz->temperature));
+
+ return 0;
+}
+
+static int acpi_thermal_get_polling_frequency(struct acpi_thermal *tz)
+{
+ acpi_status status = AE_OK;
+ unsigned long long tmp;
+
+ if (!tz)
+ return -EINVAL;
+
+ status = acpi_evaluate_integer(tz->device->handle, "_TZP", NULL, &tmp);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+
+ tz->polling_frequency = tmp;
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Polling frequency is %lu dS\n",
+ tz->polling_frequency));
+
+ return 0;
+}
+
+static int acpi_thermal_set_polling(struct acpi_thermal *tz, int seconds)
+{
+
+ if (!tz)
+ return -EINVAL;
+
+ tz->polling_frequency = seconds * 10; /* Convert value to deci-seconds */
+
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Polling frequency set to %lu seconds\n",
+ tz->polling_frequency/10));
+
+ return 0;
+}
+
+static int acpi_thermal_set_cooling_mode(struct acpi_thermal *tz, int mode)
+{
+ acpi_status status = AE_OK;
+ union acpi_object arg0 = { ACPI_TYPE_INTEGER };
+ struct acpi_object_list arg_list = { 1, &arg0 };
+ acpi_handle handle = NULL;
+
+
+ if (!tz)
+ return -EINVAL;
+
+ status = acpi_get_handle(tz->device->handle, "_SCP", &handle);
+ if (ACPI_FAILURE(status)) {
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "_SCP not present\n"));
+ return -ENODEV;
+ }
+
+ arg0.integer.value = mode;
+
+ status = acpi_evaluate_object(handle, NULL, &arg_list, NULL);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+
+ return 0;
+}
+
+#define ACPI_TRIPS_CRITICAL 0x01
+#define ACPI_TRIPS_HOT 0x02
+#define ACPI_TRIPS_PASSIVE 0x04
+#define ACPI_TRIPS_ACTIVE 0x08
+#define ACPI_TRIPS_DEVICES 0x10
+
+#define ACPI_TRIPS_REFRESH_THRESHOLDS (ACPI_TRIPS_PASSIVE | ACPI_TRIPS_ACTIVE)
+#define ACPI_TRIPS_REFRESH_DEVICES ACPI_TRIPS_DEVICES
+
+#define ACPI_TRIPS_INIT (ACPI_TRIPS_CRITICAL | ACPI_TRIPS_HOT | \
+ ACPI_TRIPS_PASSIVE | ACPI_TRIPS_ACTIVE | \
+ ACPI_TRIPS_DEVICES)
+
+/*
+ * This exception is thrown out in two cases:
+ * 1.An invalid trip point becomes invalid or a valid trip point becomes invalid
+ * when re-evaluating the AML code.
+ * 2.TODO: Devices listed in _PSL, _ALx, _TZD may change.
+ * We need to re-bind the cooling devices of a thermal zone when this occurs.
+ */
+#define ACPI_THERMAL_TRIPS_EXCEPTION(flags, str) \
+do { \
+ if (flags != ACPI_TRIPS_INIT) \
+ ACPI_EXCEPTION((AE_INFO, AE_ERROR, \
+ "ACPI thermal trip point %s changed\n" \
+ "Please send acpidump to linux-acpi@vger.kernel.org\n", str)); \
+} while (0)
+
+static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag)
+{
+ acpi_status status = AE_OK;
+ unsigned long long tmp;
+ struct acpi_handle_list devices;
+ int valid = 0;
+ int i;
+
+ /* Critical Shutdown (required) */
+ if (flag & ACPI_TRIPS_CRITICAL) {
+ status = acpi_evaluate_integer(tz->device->handle,
+ "_CRT", NULL, &tmp);
+ tz->trips.critical.temperature = tmp;
+ /*
+ * Treat freezing temperatures as invalid as well; some
+ * BIOSes return really low values and cause reboots at startup.
+ * Below zero (Celcius) values clearly aren't right for sure..
+ * ... so lets discard those as invalid.
+ */
+ if (ACPI_FAILURE(status) ||
+ tz->trips.critical.temperature <= 2732) {
+ tz->trips.critical.flags.valid = 0;
+ ACPI_EXCEPTION((AE_INFO, status,
+ "No or invalid critical threshold"));
+ return -ENODEV;
+ } else {
+ tz->trips.critical.flags.valid = 1;
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Found critical threshold [%lu]\n",
+ tz->trips.critical.temperature));
+ }
+ if (tz->trips.critical.flags.valid == 1) {
+ if (crt == -1) {
+ tz->trips.critical.flags.valid = 0;
+ } else if (crt > 0) {
+ unsigned long crt_k = CELSIUS_TO_KELVIN(crt);
+ /*
+ * Allow override critical threshold
+ */
+ if (crt_k > tz->trips.critical.temperature)
+ printk(KERN_WARNING PREFIX
+ "Critical threshold %d C\n", crt);
+ tz->trips.critical.temperature = crt_k;
+ }
+ }
+ }
+
+ /* Critical Sleep (optional) */
+ if (flag & ACPI_TRIPS_HOT) {
+ status = acpi_evaluate_integer(tz->device->handle,
+ "_HOT", NULL, &tmp);
+ if (ACPI_FAILURE(status)) {
+ tz->trips.hot.flags.valid = 0;
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "No hot threshold\n"));
+ } else {
+ tz->trips.hot.temperature = tmp;
+ tz->trips.hot.flags.valid = 1;
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Found hot threshold [%lu]\n",
+ tz->trips.critical.temperature));
+ }
+ }
+
+ /* Passive (optional) */
+ if (flag & ACPI_TRIPS_PASSIVE) {
+ valid = tz->trips.passive.flags.valid;
+ if (psv == -1) {
+ status = AE_SUPPORT;
+ } else if (psv > 0) {
+ tmp = CELSIUS_TO_KELVIN(psv);
+ status = AE_OK;
+ } else {
+ status = acpi_evaluate_integer(tz->device->handle,
+ "_PSV", NULL, &tmp);
+ }
+
+ if (ACPI_FAILURE(status))
+ tz->trips.passive.flags.valid = 0;
+ else {
+ tz->trips.passive.temperature = tmp;
+ tz->trips.passive.flags.valid = 1;
+ if (flag == ACPI_TRIPS_INIT) {
+ status = acpi_evaluate_integer(
+ tz->device->handle, "_TC1",
+ NULL, &tmp);
+ if (ACPI_FAILURE(status))
+ tz->trips.passive.flags.valid = 0;
+ else
+ tz->trips.passive.tc1 = tmp;
+ status = acpi_evaluate_integer(
+ tz->device->handle, "_TC2",
+ NULL, &tmp);
+ if (ACPI_FAILURE(status))
+ tz->trips.passive.flags.valid = 0;
+ else
+ tz->trips.passive.tc2 = tmp;
+ status = acpi_evaluate_integer(
+ tz->device->handle, "_TSP",
+ NULL, &tmp);
+ if (ACPI_FAILURE(status))
+ tz->trips.passive.flags.valid = 0;
+ else
+ tz->trips.passive.tsp = tmp;
+ }
+ }
+ }
+ if ((flag & ACPI_TRIPS_DEVICES) && tz->trips.passive.flags.valid) {
+ memset(&devices, 0, sizeof(struct acpi_handle_list));
+ status = acpi_evaluate_reference(tz->device->handle, "_PSL",
+ NULL, &devices);
+ if (ACPI_FAILURE(status))
+ tz->trips.passive.flags.valid = 0;
+ else
+ tz->trips.passive.flags.valid = 1;
+
+ if (memcmp(&tz->trips.passive.devices, &devices,
+ sizeof(struct acpi_handle_list))) {
+ memcpy(&tz->trips.passive.devices, &devices,
+ sizeof(struct acpi_handle_list));
+ ACPI_THERMAL_TRIPS_EXCEPTION(flag, "device");
+ }
+ }
+ if ((flag & ACPI_TRIPS_PASSIVE) || (flag & ACPI_TRIPS_DEVICES)) {
+ if (valid != tz->trips.passive.flags.valid)
+ ACPI_THERMAL_TRIPS_EXCEPTION(flag, "state");
+ }
+
+ /* Active (optional) */
+ for (i = 0; i < ACPI_THERMAL_MAX_ACTIVE; i++) {
+ char name[5] = { '_', 'A', 'C', ('0' + i), '\0' };
+ valid = tz->trips.active[i].flags.valid;
+
+ if (act == -1)
+ break; /* disable all active trip points */
+
+ if (flag & ACPI_TRIPS_ACTIVE) {
+ status = acpi_evaluate_integer(tz->device->handle,
+ name, NULL, &tmp);
+ if (ACPI_FAILURE(status)) {
+ tz->trips.active[i].flags.valid = 0;
+ if (i == 0)
+ break;
+ if (act <= 0)
+ break;
+ if (i == 1)
+ tz->trips.active[0].temperature =
+ CELSIUS_TO_KELVIN(act);
+ else
+ /*
+ * Don't allow override higher than
+ * the next higher trip point
+ */
+ tz->trips.active[i - 1].temperature =
+ (tz->trips.active[i - 2].temperature <
+ CELSIUS_TO_KELVIN(act) ?
+ tz->trips.active[i - 2].temperature :
+ CELSIUS_TO_KELVIN(act));
+ break;
+ } else {
+ tz->trips.active[i].temperature = tmp;
+ tz->trips.active[i].flags.valid = 1;
+ }
+ }
+
+ name[2] = 'L';
+ if ((flag & ACPI_TRIPS_DEVICES) && tz->trips.active[i].flags.valid ) {
+ memset(&devices, 0, sizeof(struct acpi_handle_list));
+ status = acpi_evaluate_reference(tz->device->handle,
+ name, NULL, &devices);
+ if (ACPI_FAILURE(status))
+ tz->trips.active[i].flags.valid = 0;
+ else
+ tz->trips.active[i].flags.valid = 1;
+
+ if (memcmp(&tz->trips.active[i].devices, &devices,
+ sizeof(struct acpi_handle_list))) {
+ memcpy(&tz->trips.active[i].devices, &devices,
+ sizeof(struct acpi_handle_list));
+ ACPI_THERMAL_TRIPS_EXCEPTION(flag, "device");
+ }
+ }
+ if ((flag & ACPI_TRIPS_ACTIVE) || (flag & ACPI_TRIPS_DEVICES))
+ if (valid != tz->trips.active[i].flags.valid)
+ ACPI_THERMAL_TRIPS_EXCEPTION(flag, "state");
+
+ if (!tz->trips.active[i].flags.valid)
+ break;
+ }
+
+ if (flag & ACPI_TRIPS_DEVICES) {
+ memset(&devices, 0, sizeof(struct acpi_handle_list));
+ status = acpi_evaluate_reference(tz->device->handle, "_TZD",
+ NULL, &devices);
+ if (memcmp(&tz->devices, &devices,
+ sizeof(struct acpi_handle_list))) {
+ memcpy(&tz->devices, &devices,
+ sizeof(struct acpi_handle_list));
+ ACPI_THERMAL_TRIPS_EXCEPTION(flag, "device");
+ }
+ }
+
+ return 0;
+}
+
+static int acpi_thermal_get_trip_points(struct acpi_thermal *tz)
+{
+ return acpi_thermal_trips_update(tz, ACPI_TRIPS_INIT);
+}
+
+static int acpi_thermal_critical(struct acpi_thermal *tz)
+{
+ if (!tz || !tz->trips.critical.flags.valid)
+ return -EINVAL;
+
+ if (tz->temperature >= tz->trips.critical.temperature) {
+ printk(KERN_WARNING PREFIX "Critical trip point\n");
+ tz->trips.critical.flags.enabled = 1;
+ } else if (tz->trips.critical.flags.enabled)
+ tz->trips.critical.flags.enabled = 0;
+
+ acpi_bus_generate_proc_event(tz->device, ACPI_THERMAL_NOTIFY_CRITICAL,
+ tz->trips.critical.flags.enabled);
+ acpi_bus_generate_netlink_event(tz->device->pnp.device_class,
+ dev_name(&tz->device->dev),
+ ACPI_THERMAL_NOTIFY_CRITICAL,
+ tz->trips.critical.flags.enabled);
+
+ /* take no action if nocrt is set */
+ if(!nocrt) {
+ printk(KERN_EMERG
+ "Critical temperature reached (%ld C), shutting down.\n",
+ KELVIN_TO_CELSIUS(tz->temperature));
+ orderly_poweroff(true);
+ }
+
+ return 0;
+}
+
+static int acpi_thermal_hot(struct acpi_thermal *tz)
+{
+ if (!tz || !tz->trips.hot.flags.valid)
+ return -EINVAL;
+
+ if (tz->temperature >= tz->trips.hot.temperature) {
+ printk(KERN_WARNING PREFIX "Hot trip point\n");
+ tz->trips.hot.flags.enabled = 1;
+ } else if (tz->trips.hot.flags.enabled)
+ tz->trips.hot.flags.enabled = 0;
+
+ acpi_bus_generate_proc_event(tz->device, ACPI_THERMAL_NOTIFY_HOT,
+ tz->trips.hot.flags.enabled);
+ acpi_bus_generate_netlink_event(tz->device->pnp.device_class,
+ dev_name(&tz->device->dev),
+ ACPI_THERMAL_NOTIFY_HOT,
+ tz->trips.hot.flags.enabled);
+
+ /* TBD: Call user-mode "sleep(S4)" function if nocrt is cleared */
+
+ return 0;
+}
+
+static void acpi_thermal_passive(struct acpi_thermal *tz)
+{
+ int result = 1;
+ struct acpi_thermal_passive *passive = NULL;
+ int trend = 0;
+ int i = 0;
+
+
+ if (!tz || !tz->trips.passive.flags.valid)
+ return;
+
+ passive = &(tz->trips.passive);
+
+ /*
+ * Above Trip?
+ * -----------
+ * Calculate the thermal trend (using the passive cooling equation)
+ * and modify the performance limit for all passive cooling devices
+ * accordingly. Note that we assume symmetry.
+ */
+ if (tz->temperature >= passive->temperature) {
+ trend =
+ (passive->tc1 * (tz->temperature - tz->last_temperature)) +
+ (passive->tc2 * (tz->temperature - passive->temperature));
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "trend[%d]=(tc1[%lu]*(tmp[%lu]-last[%lu]))+(tc2[%lu]*(tmp[%lu]-psv[%lu]))\n",
+ trend, passive->tc1, tz->temperature,
+ tz->last_temperature, passive->tc2,
+ tz->temperature, passive->temperature));
+ passive->flags.enabled = 1;
+ /* Heating up? */
+ if (trend > 0)
+ for (i = 0; i < passive->devices.count; i++)
+ acpi_processor_set_thermal_limit(passive->
+ devices.
+ handles[i],
+ ACPI_PROCESSOR_LIMIT_INCREMENT);
+ /* Cooling off? */
+ else if (trend < 0) {
+ for (i = 0; i < passive->devices.count; i++)
+ /*
+ * assume that we are on highest
+ * freq/lowest thrott and can leave
+ * passive mode, even in error case
+ */
+ if (!acpi_processor_set_thermal_limit
+ (passive->devices.handles[i],
+ ACPI_PROCESSOR_LIMIT_DECREMENT))
+ result = 0;
+ /*
+ * Leave cooling mode, even if the temp might
+ * higher than trip point This is because some
+ * machines might have long thermal polling
+ * frequencies (tsp) defined. We will fall back
+ * into passive mode in next cycle (probably quicker)
+ */
+ if (result) {
+ passive->flags.enabled = 0;
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Disabling passive cooling, still above threshold,"
+ " but we are cooling down\n"));
+ }
+ }
+ return;
+ }
+
+ /*
+ * Below Trip?
+ * -----------
+ * Implement passive cooling hysteresis to slowly increase performance
+ * and avoid thrashing around the passive trip point. Note that we
+ * assume symmetry.
+ */
+ if (!passive->flags.enabled)
+ return;
+ for (i = 0; i < passive->devices.count; i++)
+ if (!acpi_processor_set_thermal_limit
+ (passive->devices.handles[i],
+ ACPI_PROCESSOR_LIMIT_DECREMENT))
+ result = 0;
+ if (result) {
+ passive->flags.enabled = 0;
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Disabling passive cooling (zone is cool)\n"));
+ }
+}
+
+static void acpi_thermal_active(struct acpi_thermal *tz)
+{
+ int result = 0;
+ struct acpi_thermal_active *active = NULL;
+ int i = 0;
+ int j = 0;
+ unsigned long maxtemp = 0;
+
+
+ if (!tz)
+ return;
+
+ for (i = 0; i < ACPI_THERMAL_MAX_ACTIVE; i++) {
+ active = &(tz->trips.active[i]);
+ if (!active || !active->flags.valid)
+ break;
+ if (tz->temperature >= active->temperature) {
+ /*
+ * Above Threshold?
+ * ----------------
+ * If not already enabled, turn ON all cooling devices
+ * associated with this active threshold.
+ */
+ if (active->temperature > maxtemp)
+ tz->state.active_index = i;
+ maxtemp = active->temperature;
+ if (active->flags.enabled)
+ continue;
+ for (j = 0; j < active->devices.count; j++) {
+ result =
+ acpi_bus_set_power(active->devices.
+ handles[j],
+ ACPI_STATE_D0);
+ if (result) {
+ printk(KERN_WARNING PREFIX
+ "Unable to turn cooling device [%p] 'on'\n",
+ active->devices.
+ handles[j]);
+ continue;
+ }
+ active->flags.enabled = 1;
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Cooling device [%p] now 'on'\n",
+ active->devices.handles[j]));
+ }
+ continue;
+ }
+ if (!active->flags.enabled)
+ continue;
+ /*
+ * Below Threshold?
+ * ----------------
+ * Turn OFF all cooling devices associated with this
+ * threshold.
+ */
+ for (j = 0; j < active->devices.count; j++) {
+ result = acpi_bus_set_power(active->devices.handles[j],
+ ACPI_STATE_D3);
+ if (result) {
+ printk(KERN_WARNING PREFIX
+ "Unable to turn cooling device [%p] 'off'\n",
+ active->devices.handles[j]);
+ continue;
+ }
+ active->flags.enabled = 0;
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Cooling device [%p] now 'off'\n",
+ active->devices.handles[j]));
+ }
+ }
+}
+
+static void acpi_thermal_check(void *context);
+
+static void acpi_thermal_run(unsigned long data)
+{
+ struct acpi_thermal *tz = (struct acpi_thermal *)data;
+ if (!tz->zombie)
+ acpi_os_execute(OSL_GPE_HANDLER, acpi_thermal_check, (void *)data);
+}
+
+static void acpi_thermal_active_off(void *data)
+{
+ int result = 0;
+ struct acpi_thermal *tz = data;
+ int i = 0;
+ int j = 0;
+ struct acpi_thermal_active *active = NULL;
+
+ if (!tz) {
+ printk(KERN_ERR PREFIX "Invalid (NULL) context\n");
+ return;
+ }
+
+ result = acpi_thermal_get_temperature(tz);
+ if (result)
+ return;
+
+ for (i = 0; i < ACPI_THERMAL_MAX_ACTIVE; i++) {
+ active = &(tz->trips.active[i]);
+ if (!active || !active->flags.valid)
+ break;
+ if (tz->temperature >= active->temperature) {
+ /*
+ * If the thermal temperature is greater than the
+ * active threshod, unnecessary to turn off the
+ * the active cooling device.
+ */
+ continue;
+ }
+ /*
+ * Below Threshold?
+ * ----------------
+ * Turn OFF all cooling devices associated with this
+ * threshold.
+ */
+ for (j = 0; j < active->devices.count; j++)
+ result = acpi_bus_set_power(active->devices.handles[j],
+ ACPI_STATE_D3);
+ }
+}
+
+static void acpi_thermal_check(void *data)
+{
+ int result = 0;
+ struct acpi_thermal *tz = data;
+ unsigned long sleep_time = 0;
+ unsigned long timeout_jiffies = 0;
+ int i = 0;
+ struct acpi_thermal_state state;
+
+
+ if (!tz) {
+ printk(KERN_ERR PREFIX "Invalid (NULL) context\n");
+ return;
+ }
+
+ /* Check if someone else is already running */
+ if (!mutex_trylock(&tz->lock))
+ return;
+
+ state = tz->state;
+
+ result = acpi_thermal_get_temperature(tz);
+ if (result)
+ goto unlock;
+
+ if (!tz->tz_enabled)
+ goto unlock;
+
+ memset(&tz->state, 0, sizeof(tz->state));
+
+ /*
+ * Check Trip Points
+ * -----------------
+ * Compare the current temperature to the trip point values to see
+ * if we've entered one of the thermal policy states. Note that
+ * this function determines when a state is entered, but the
+ * individual policy decides when it is exited (e.g. hysteresis).
+ */
+ if (tz->trips.critical.flags.valid)
+ state.critical |=
+ (tz->temperature >= tz->trips.critical.temperature);
+ if (tz->trips.hot.flags.valid)
+ state.hot |= (tz->temperature >= tz->trips.hot.temperature);
+ if (tz->trips.passive.flags.valid)
+ state.passive |=
+ (tz->temperature >= tz->trips.passive.temperature);
+ for (i = 0; i < ACPI_THERMAL_MAX_ACTIVE; i++)
+ if (tz->trips.active[i].flags.valid)
+ state.active |=
+ (tz->temperature >=
+ tz->trips.active[i].temperature);
+
+ /*
+ * Invoke Policy
+ * -------------
+ * Separated from the above check to allow individual policy to
+ * determine when to exit a given state.
+ */
+ if (state.critical)
+ acpi_thermal_critical(tz);
+ if (state.hot)
+ acpi_thermal_hot(tz);
+ if (state.passive)
+ acpi_thermal_passive(tz);
+ if (state.active)
+ acpi_thermal_active(tz);
+
+ /*
+ * Calculate State
+ * ---------------
+ * Again, separated from the above two to allow independent policy
+ * decisions.
+ */
+ tz->state.critical = tz->trips.critical.flags.enabled;
+ tz->state.hot = tz->trips.hot.flags.enabled;
+ tz->state.passive = tz->trips.passive.flags.enabled;
+ tz->state.active = 0;
+ for (i = 0; i < ACPI_THERMAL_MAX_ACTIVE; i++)
+ tz->state.active |= tz->trips.active[i].flags.enabled;
+
+ /*
+ * Calculate Sleep Time
+ * --------------------
+ * If we're in the passive state, use _TSP's value. Otherwise
+ * use the default polling frequency (e.g. _TZP). If no polling
+ * frequency is specified then we'll wait forever (at least until
+ * a thermal event occurs). Note that _TSP and _TZD values are
+ * given in 1/10th seconds (we must covert to milliseconds).
+ */
+ if (tz->state.passive) {
+ sleep_time = tz->trips.passive.tsp * 100;
+ timeout_jiffies = jiffies + (HZ * sleep_time) / 1000;
+ } else if (tz->polling_frequency > 0) {
+ sleep_time = tz->polling_frequency * 100;
+ timeout_jiffies = round_jiffies(jiffies + (HZ * sleep_time) / 1000);
+ }
+
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "%s: temperature[%lu] sleep[%lu]\n",
+ tz->name, tz->temperature, sleep_time));
+
+ /*
+ * Schedule Next Poll
+ * ------------------
+ */
+ if (!sleep_time) {
+ if (timer_pending(&(tz->timer)))
+ del_timer(&(tz->timer));
+ } else {
+ if (timer_pending(&(tz->timer)))
+ mod_timer(&(tz->timer), timeout_jiffies);
+ else {
+ tz->timer.data = (unsigned long)tz;
+ tz->timer.function = acpi_thermal_run;
+ tz->timer.expires = timeout_jiffies;
+ add_timer(&(tz->timer));
+ }
+ }
+ unlock:
+ mutex_unlock(&tz->lock);
+}
+
+/* sys I/F for generic thermal sysfs support */
+#define KELVIN_TO_MILLICELSIUS(t) (t * 100 - 273200)
+
+static int thermal_get_temp(struct thermal_zone_device *thermal, char *buf)
+{
+ struct acpi_thermal *tz = thermal->devdata;
+ int result;
+
+ if (!tz)
+ return -EINVAL;
+
+ result = acpi_thermal_get_temperature(tz);
+ if (result)
+ return result;
+
+ return sprintf(buf, "%ld\n", KELVIN_TO_MILLICELSIUS(tz->temperature));
+}
+
+static const char enabled[] = "kernel";
+static const char disabled[] = "user";
+static int thermal_get_mode(struct thermal_zone_device *thermal,
+ char *buf)
+{
+ struct acpi_thermal *tz = thermal->devdata;
+
+ if (!tz)
+ return -EINVAL;
+
+ return sprintf(buf, "%s\n", tz->tz_enabled ?
+ enabled : disabled);
+}
+
+static int thermal_set_mode(struct thermal_zone_device *thermal,
+ const char *buf)
+{
+ struct acpi_thermal *tz = thermal->devdata;
+ int enable;
+
+ if (!tz)
+ return -EINVAL;
+
+ /*
+ * enable/disable thermal management from ACPI thermal driver
+ */
+ if (!strncmp(buf, enabled, sizeof enabled - 1))
+ enable = 1;
+ else if (!strncmp(buf, disabled, sizeof disabled - 1))
+ enable = 0;
+ else
+ return -EINVAL;
+
+ if (enable != tz->tz_enabled) {
+ tz->tz_enabled = enable;
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "%s ACPI thermal control\n",
+ tz->tz_enabled ? enabled : disabled));
+ acpi_thermal_check(tz);
+ }
+ return 0;
+}
+
+static int thermal_get_trip_type(struct thermal_zone_device *thermal,
+ int trip, char *buf)
+{
+ struct acpi_thermal *tz = thermal->devdata;
+ int i;
+
+ if (!tz || trip < 0)
+ return -EINVAL;
+
+ if (tz->trips.critical.flags.valid) {
+ if (!trip)
+ return sprintf(buf, "critical\n");
+ trip--;
+ }
+
+ if (tz->trips.hot.flags.valid) {
+ if (!trip)
+ return sprintf(buf, "hot\n");
+ trip--;
+ }
+
+ if (tz->trips.passive.flags.valid) {
+ if (!trip)
+ return sprintf(buf, "passive\n");
+ trip--;
+ }
+
+ for (i = 0; i < ACPI_THERMAL_MAX_ACTIVE &&
+ tz->trips.active[i].flags.valid; i++) {
+ if (!trip)
+ return sprintf(buf, "active%d\n", i);
+ trip--;
+ }
+
+ return -EINVAL;
+}
+
+static int thermal_get_trip_temp(struct thermal_zone_device *thermal,
+ int trip, char *buf)
+{
+ struct acpi_thermal *tz = thermal->devdata;
+ int i;
+
+ if (!tz || trip < 0)
+ return -EINVAL;
+
+ if (tz->trips.critical.flags.valid) {
+ if (!trip)
+ return sprintf(buf, "%ld\n", KELVIN_TO_MILLICELSIUS(
+ tz->trips.critical.temperature));
+ trip--;
+ }
+
+ if (tz->trips.hot.flags.valid) {
+ if (!trip)
+ return sprintf(buf, "%ld\n", KELVIN_TO_MILLICELSIUS(
+ tz->trips.hot.temperature));
+ trip--;
+ }
+
+ if (tz->trips.passive.flags.valid) {
+ if (!trip)
+ return sprintf(buf, "%ld\n", KELVIN_TO_MILLICELSIUS(
+ tz->trips.passive.temperature));
+ trip--;
+ }
+
+ for (i = 0; i < ACPI_THERMAL_MAX_ACTIVE &&
+ tz->trips.active[i].flags.valid; i++) {
+ if (!trip)
+ return sprintf(buf, "%ld\n", KELVIN_TO_MILLICELSIUS(
+ tz->trips.active[i].temperature));
+ trip--;
+ }
+
+ return -EINVAL;
+}
+
+static int thermal_get_crit_temp(struct thermal_zone_device *thermal,
+ unsigned long *temperature) {
+ struct acpi_thermal *tz = thermal->devdata;
+
+ if (tz->trips.critical.flags.valid) {
+ *temperature = KELVIN_TO_MILLICELSIUS(
+ tz->trips.critical.temperature);
+ return 0;
+ } else
+ return -EINVAL;
+}
+
+typedef int (*cb)(struct thermal_zone_device *, int,
+ struct thermal_cooling_device *);
+static int acpi_thermal_cooling_device_cb(struct thermal_zone_device *thermal,
+ struct thermal_cooling_device *cdev,
+ cb action)
+{
+ struct acpi_device *device = cdev->devdata;
+ struct acpi_thermal *tz = thermal->devdata;
+ struct acpi_device *dev;
+ acpi_status status;
+ acpi_handle handle;
+ int i;
+ int j;
+ int trip = -1;
+ int result = 0;
+
+ if (tz->trips.critical.flags.valid)
+ trip++;
+
+ if (tz->trips.hot.flags.valid)
+ trip++;
+
+ if (tz->trips.passive.flags.valid) {
+ trip++;
+ for (i = 0; i < tz->trips.passive.devices.count;
+ i++) {
+ handle = tz->trips.passive.devices.handles[i];
+ status = acpi_bus_get_device(handle, &dev);
+ if (ACPI_SUCCESS(status) && (dev == device)) {
+ result = action(thermal, trip, cdev);
+ if (result)
+ goto failed;
+ }
+ }
+ }
+
+ for (i = 0; i < ACPI_THERMAL_MAX_ACTIVE; i++) {
+ if (!tz->trips.active[i].flags.valid)
+ break;
+ trip++;
+ for (j = 0;
+ j < tz->trips.active[i].devices.count;
+ j++) {
+ handle = tz->trips.active[i].devices.handles[j];
+ status = acpi_bus_get_device(handle, &dev);
+ if (ACPI_SUCCESS(status) && (dev == device)) {
+ result = action(thermal, trip, cdev);
+ if (result)
+ goto failed;
+ }
+ }
+ }
+
+ for (i = 0; i < tz->devices.count; i++) {
+ handle = tz->devices.handles[i];
+ status = acpi_bus_get_device(handle, &dev);
+ if (ACPI_SUCCESS(status) && (dev == device)) {
+ result = action(thermal, -1, cdev);
+ if (result)
+ goto failed;
+ }
+ }
+
+failed:
+ return result;
+}
+
+static int
+acpi_thermal_bind_cooling_device(struct thermal_zone_device *thermal,
+ struct thermal_cooling_device *cdev)
+{
+ return acpi_thermal_cooling_device_cb(thermal, cdev,
+ thermal_zone_bind_cooling_device);
+}
+
+static int
+acpi_thermal_unbind_cooling_device(struct thermal_zone_device *thermal,
+ struct thermal_cooling_device *cdev)
+{
+ return acpi_thermal_cooling_device_cb(thermal, cdev,
+ thermal_zone_unbind_cooling_device);
+}
+
+static struct thermal_zone_device_ops acpi_thermal_zone_ops = {
+ .bind = acpi_thermal_bind_cooling_device,
+ .unbind = acpi_thermal_unbind_cooling_device,
+ .get_temp = thermal_get_temp,
+ .get_mode = thermal_get_mode,
+ .set_mode = thermal_set_mode,
+ .get_trip_type = thermal_get_trip_type,
+ .get_trip_temp = thermal_get_trip_temp,
+ .get_crit_temp = thermal_get_crit_temp,
+};
+
+static int acpi_thermal_register_thermal_zone(struct acpi_thermal *tz)
+{
+ int trips = 0;
+ int result;
+ acpi_status status;
+ int i;
+
+ if (tz->trips.critical.flags.valid)
+ trips++;
+
+ if (tz->trips.hot.flags.valid)
+ trips++;
+
+ if (tz->trips.passive.flags.valid)
+ trips++;
+
+ for (i = 0; i < ACPI_THERMAL_MAX_ACTIVE &&
+ tz->trips.active[i].flags.valid; i++, trips++);
+ tz->thermal_zone = thermal_zone_device_register("acpitz",
+ trips, tz, &acpi_thermal_zone_ops);
+ if (IS_ERR(tz->thermal_zone))
+ return -ENODEV;
+
+ result = sysfs_create_link(&tz->device->dev.kobj,
+ &tz->thermal_zone->device.kobj, "thermal_zone");
+ if (result)
+ return result;
+
+ result = sysfs_create_link(&tz->thermal_zone->device.kobj,
+ &tz->device->dev.kobj, "device");
+ if (result)
+ return result;
+
+ status = acpi_attach_data(tz->device->handle,
+ acpi_bus_private_data_handler,
+ tz->thermal_zone);
+ if (ACPI_FAILURE(status)) {
+ printk(KERN_ERR PREFIX
+ "Error attaching device data\n");
+ return -ENODEV;
+ }
+
+ tz->tz_enabled = 1;
+
+ dev_info(&tz->device->dev, "registered as thermal_zone%d\n",
+ tz->thermal_zone->id);
+ return 0;
+}
+
+static void acpi_thermal_unregister_thermal_zone(struct acpi_thermal *tz)
+{
+ sysfs_remove_link(&tz->device->dev.kobj, "thermal_zone");
+ sysfs_remove_link(&tz->thermal_zone->device.kobj, "device");
+ thermal_zone_device_unregister(tz->thermal_zone);
+ tz->thermal_zone = NULL;
+ acpi_detach_data(tz->device->handle, acpi_bus_private_data_handler);
+}
+
+
+/* --------------------------------------------------------------------------
+ FS Interface (/proc)
+ -------------------------------------------------------------------------- */
+
+static struct proc_dir_entry *acpi_thermal_dir;
+
+static int acpi_thermal_state_seq_show(struct seq_file *seq, void *offset)
+{
+ struct acpi_thermal *tz = seq->private;
+
+
+ if (!tz)
+ goto end;
+
+ seq_puts(seq, "state: ");
+
+ if (!tz->state.critical && !tz->state.hot && !tz->state.passive
+ && !tz->state.active)
+ seq_puts(seq, "ok\n");
+ else {
+ if (tz->state.critical)
+ seq_puts(seq, "critical ");
+ if (tz->state.hot)
+ seq_puts(seq, "hot ");
+ if (tz->state.passive)
+ seq_puts(seq, "passive ");
+ if (tz->state.active)
+ seq_printf(seq, "active[%d]", tz->state.active_index);
+ seq_puts(seq, "\n");
+ }
+
+ end:
+ return 0;
+}
+
+static int acpi_thermal_state_open_fs(struct inode *inode, struct file *file)
+{
+ return single_open(file, acpi_thermal_state_seq_show, PDE(inode)->data);
+}
+
+static int acpi_thermal_temp_seq_show(struct seq_file *seq, void *offset)
+{
+ int result = 0;
+ struct acpi_thermal *tz = seq->private;
+
+
+ if (!tz)
+ goto end;
+
+ result = acpi_thermal_get_temperature(tz);
+ if (result)
+ goto end;
+
+ seq_printf(seq, "temperature: %ld C\n",
+ KELVIN_TO_CELSIUS(tz->temperature));
+
+ end:
+ return 0;
+}
+
+static int acpi_thermal_temp_open_fs(struct inode *inode, struct file *file)
+{
+ return single_open(file, acpi_thermal_temp_seq_show, PDE(inode)->data);
+}
+
+static int acpi_thermal_trip_seq_show(struct seq_file *seq, void *offset)
+{
+ struct acpi_thermal *tz = seq->private;
+ struct acpi_device *device;
+ acpi_status status;
+
+ int i = 0;
+ int j = 0;
+
+
+ if (!tz)
+ goto end;
+
+ if (tz->trips.critical.flags.valid)
+ seq_printf(seq, "critical (S5): %ld C%s",
+ KELVIN_TO_CELSIUS(tz->trips.critical.temperature),
+ nocrt ? " <disabled>\n" : "\n");
+
+ if (tz->trips.hot.flags.valid)
+ seq_printf(seq, "hot (S4): %ld C%s",
+ KELVIN_TO_CELSIUS(tz->trips.hot.temperature),
+ nocrt ? " <disabled>\n" : "\n");
+
+ if (tz->trips.passive.flags.valid) {
+ seq_printf(seq,
+ "passive: %ld C: tc1=%lu tc2=%lu tsp=%lu devices=",
+ KELVIN_TO_CELSIUS(tz->trips.passive.temperature),
+ tz->trips.passive.tc1, tz->trips.passive.tc2,
+ tz->trips.passive.tsp);
+ for (j = 0; j < tz->trips.passive.devices.count; j++) {
+ status = acpi_bus_get_device(tz->trips.passive.devices.
+ handles[j], &device);
+ seq_printf(seq, "%4.4s ", status ? "" :
+ acpi_device_bid(device));
+ }
+ seq_puts(seq, "\n");
+ }
+
+ for (i = 0; i < ACPI_THERMAL_MAX_ACTIVE; i++) {
+ if (!(tz->trips.active[i].flags.valid))
+ break;
+ seq_printf(seq, "active[%d]: %ld C: devices=",
+ i,
+ KELVIN_TO_CELSIUS(tz->trips.active[i].temperature));
+ for (j = 0; j < tz->trips.active[i].devices.count; j++){
+ status = acpi_bus_get_device(tz->trips.active[i].
+ devices.handles[j],
+ &device);
+ seq_printf(seq, "%4.4s ", status ? "" :
+ acpi_device_bid(device));
+ }
+ seq_puts(seq, "\n");
+ }
+
+ end:
+ return 0;
+}
+
+static int acpi_thermal_trip_open_fs(struct inode *inode, struct file *file)
+{
+ return single_open(file, acpi_thermal_trip_seq_show, PDE(inode)->data);
+}
+
+static int acpi_thermal_cooling_seq_show(struct seq_file *seq, void *offset)
+{
+ struct acpi_thermal *tz = seq->private;
+
+
+ if (!tz)
+ goto end;
+
+ if (!tz->flags.cooling_mode)
+ seq_puts(seq, "<setting not supported>\n");
+ else
+ seq_puts(seq, "0 - Active; 1 - Passive\n");
+
+ end:
+ return 0;
+}
+
+static int acpi_thermal_cooling_open_fs(struct inode *inode, struct file *file)
+{
+ return single_open(file, acpi_thermal_cooling_seq_show,
+ PDE(inode)->data);
+}
+
+static ssize_t
+acpi_thermal_write_cooling_mode(struct file *file,
+ const char __user * buffer,
+ size_t count, loff_t * ppos)
+{
+ struct seq_file *m = file->private_data;
+ struct acpi_thermal *tz = m->private;
+ int result = 0;
+ char mode_string[12] = { '\0' };
+
+
+ if (!tz || (count > sizeof(mode_string) - 1))
+ return -EINVAL;
+
+ if (!tz->flags.cooling_mode)
+ return -ENODEV;
+
+ if (copy_from_user(mode_string, buffer, count))
+ return -EFAULT;
+
+ mode_string[count] = '\0';
+
+ result = acpi_thermal_set_cooling_mode(tz,
+ simple_strtoul(mode_string, NULL,
+ 0));
+ if (result)
+ return result;
+
+ acpi_thermal_check(tz);
+
+ return count;
+}
+
+static int acpi_thermal_polling_seq_show(struct seq_file *seq, void *offset)
+{
+ struct acpi_thermal *tz = seq->private;
+
+
+ if (!tz)
+ goto end;
+
+ if (!tz->polling_frequency) {
+ seq_puts(seq, "<polling disabled>\n");
+ goto end;
+ }
+
+ seq_printf(seq, "polling frequency: %lu seconds\n",
+ (tz->polling_frequency / 10));
+
+ end:
+ return 0;
+}
+
+static int acpi_thermal_polling_open_fs(struct inode *inode, struct file *file)
+{
+ return single_open(file, acpi_thermal_polling_seq_show,
+ PDE(inode)->data);
+}
+
+static ssize_t
+acpi_thermal_write_polling(struct file *file,
+ const char __user * buffer,
+ size_t count, loff_t * ppos)
+{
+ struct seq_file *m = file->private_data;
+ struct acpi_thermal *tz = m->private;
+ int result = 0;
+ char polling_string[12] = { '\0' };
+ int seconds = 0;
+
+
+ if (!tz || (count > sizeof(polling_string) - 1))
+ return -EINVAL;
+
+ if (copy_from_user(polling_string, buffer, count))
+ return -EFAULT;
+
+ polling_string[count] = '\0';
+
+ seconds = simple_strtoul(polling_string, NULL, 0);
+
+ result = acpi_thermal_set_polling(tz, seconds);
+ if (result)
+ return result;
+
+ acpi_thermal_check(tz);
+
+ return count;
+}
+
+static int acpi_thermal_add_fs(struct acpi_device *device)
+{
+ struct proc_dir_entry *entry = NULL;
+
+
+ if (!acpi_device_dir(device)) {
+ acpi_device_dir(device) = proc_mkdir(acpi_device_bid(device),
+ acpi_thermal_dir);
+ if (!acpi_device_dir(device))
+ return -ENODEV;
+ acpi_device_dir(device)->owner = THIS_MODULE;
+ }
+
+ /* 'state' [R] */
+ entry = proc_create_data(ACPI_THERMAL_FILE_STATE,
+ S_IRUGO, acpi_device_dir(device),
+ &acpi_thermal_state_fops,
+ acpi_driver_data(device));
+ if (!entry)
+ return -ENODEV;
+
+ /* 'temperature' [R] */
+ entry = proc_create_data(ACPI_THERMAL_FILE_TEMPERATURE,
+ S_IRUGO, acpi_device_dir(device),
+ &acpi_thermal_temp_fops,
+ acpi_driver_data(device));
+ if (!entry)
+ return -ENODEV;
+
+ /* 'trip_points' [R] */
+ entry = proc_create_data(ACPI_THERMAL_FILE_TRIP_POINTS,
+ S_IRUGO,
+ acpi_device_dir(device),
+ &acpi_thermal_trip_fops,
+ acpi_driver_data(device));
+ if (!entry)
+ return -ENODEV;
+
+ /* 'cooling_mode' [R/W] */
+ entry = proc_create_data(ACPI_THERMAL_FILE_COOLING_MODE,
+ S_IFREG | S_IRUGO | S_IWUSR,
+ acpi_device_dir(device),
+ &acpi_thermal_cooling_fops,
+ acpi_driver_data(device));
+ if (!entry)
+ return -ENODEV;
+
+ /* 'polling_frequency' [R/W] */
+ entry = proc_create_data(ACPI_THERMAL_FILE_POLLING_FREQ,
+ S_IFREG | S_IRUGO | S_IWUSR,
+ acpi_device_dir(device),
+ &acpi_thermal_polling_fops,
+ acpi_driver_data(device));
+ if (!entry)
+ return -ENODEV;
+ return 0;
+}
+
+static int acpi_thermal_remove_fs(struct acpi_device *device)
+{
+
+ if (acpi_device_dir(device)) {
+ remove_proc_entry(ACPI_THERMAL_FILE_POLLING_FREQ,
+ acpi_device_dir(device));
+ remove_proc_entry(ACPI_THERMAL_FILE_COOLING_MODE,
+ acpi_device_dir(device));
+ remove_proc_entry(ACPI_THERMAL_FILE_TRIP_POINTS,
+ acpi_device_dir(device));
+ remove_proc_entry(ACPI_THERMAL_FILE_TEMPERATURE,
+ acpi_device_dir(device));
+ remove_proc_entry(ACPI_THERMAL_FILE_STATE,
+ acpi_device_dir(device));
+ remove_proc_entry(acpi_device_bid(device), acpi_thermal_dir);
+ acpi_device_dir(device) = NULL;
+ }
+
+ return 0;
+}
+
+/* --------------------------------------------------------------------------
+ Driver Interface
+ -------------------------------------------------------------------------- */
+
+static void acpi_thermal_notify(acpi_handle handle, u32 event, void *data)
+{
+ struct acpi_thermal *tz = data;
+ struct acpi_device *device = NULL;
+
+
+ if (!tz)
+ return;
+
+ device = tz->device;
+
+ switch (event) {
+ case ACPI_THERMAL_NOTIFY_TEMPERATURE:
+ acpi_thermal_check(tz);
+ break;
+ case ACPI_THERMAL_NOTIFY_THRESHOLDS:
+ acpi_thermal_trips_update(tz, ACPI_TRIPS_REFRESH_THRESHOLDS);
+ acpi_thermal_check(tz);
+ acpi_bus_generate_proc_event(device, event, 0);
+ acpi_bus_generate_netlink_event(device->pnp.device_class,
+ dev_name(&device->dev), event, 0);
+ break;
+ case ACPI_THERMAL_NOTIFY_DEVICES:
+ acpi_thermal_trips_update(tz, ACPI_TRIPS_REFRESH_DEVICES);
+ acpi_thermal_check(tz);
+ acpi_bus_generate_proc_event(device, event, 0);
+ acpi_bus_generate_netlink_event(device->pnp.device_class,
+ dev_name(&device->dev), event, 0);
+ break;
+ default:
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Unsupported event [0x%x]\n", event));
+ break;
+ }
+
+ return;
+}
+
+static int acpi_thermal_get_info(struct acpi_thermal *tz)
+{
+ int result = 0;
+
+
+ if (!tz)
+ return -EINVAL;
+
+ /* Get temperature [_TMP] (required) */
+ result = acpi_thermal_get_temperature(tz);
+ if (result)
+ return result;
+
+ /* Get trip points [_CRT, _PSV, etc.] (required) */
+ result = acpi_thermal_get_trip_points(tz);
+ if (result)
+ return result;
+
+ /* Set the cooling mode [_SCP] to active cooling (default) */
+ result = acpi_thermal_set_cooling_mode(tz, ACPI_THERMAL_MODE_ACTIVE);
+ if (!result)
+ tz->flags.cooling_mode = 1;
+
+ /* Get default polling frequency [_TZP] (optional) */
+ if (tzp)
+ tz->polling_frequency = tzp;
+ else
+ acpi_thermal_get_polling_frequency(tz);
+
+ return 0;
+}
+
+static int acpi_thermal_add(struct acpi_device *device)
+{
+ int result = 0;
+ acpi_status status = AE_OK;
+ struct acpi_thermal *tz = NULL;
+
+
+ if (!device)
+ return -EINVAL;
+
+ tz = kzalloc(sizeof(struct acpi_thermal), GFP_KERNEL);
+ if (!tz)
+ return -ENOMEM;
+
+ tz->device = device;
+ strcpy(tz->name, device->pnp.bus_id);
+ strcpy(acpi_device_name(device), ACPI_THERMAL_DEVICE_NAME);
+ strcpy(acpi_device_class(device), ACPI_THERMAL_CLASS);
+ device->driver_data = tz;
+ mutex_init(&tz->lock);
+
+
+ result = acpi_thermal_get_info(tz);
+ if (result)
+ goto free_memory;
+
+ result = acpi_thermal_register_thermal_zone(tz);
+ if (result)
+ goto free_memory;
+
+ result = acpi_thermal_add_fs(device);
+ if (result)
+ goto unregister_thermal_zone;
+
+ init_timer(&tz->timer);
+
+ acpi_thermal_active_off(tz);
+
+ acpi_thermal_check(tz);
+
+ status = acpi_install_notify_handler(device->handle,
+ ACPI_DEVICE_NOTIFY,
+ acpi_thermal_notify, tz);
+ if (ACPI_FAILURE(status)) {
+ result = -ENODEV;
+ goto remove_fs;
+ }
+
+ printk(KERN_INFO PREFIX "%s [%s] (%ld C)\n",
+ acpi_device_name(device), acpi_device_bid(device),
+ KELVIN_TO_CELSIUS(tz->temperature));
+ goto end;
+
+remove_fs:
+ acpi_thermal_remove_fs(device);
+unregister_thermal_zone:
+ thermal_zone_device_unregister(tz->thermal_zone);
+free_memory:
+ kfree(tz);
+end:
+ return result;
+}
+
+static int acpi_thermal_remove(struct acpi_device *device, int type)
+{
+ acpi_status status = AE_OK;
+ struct acpi_thermal *tz = NULL;
+
+
+ if (!device || !acpi_driver_data(device))
+ return -EINVAL;
+
+ tz = acpi_driver_data(device);
+
+ /* avoid timer adding new defer task */
+ tz->zombie = 1;
+ /* wait for running timer (on other CPUs) finish */
+ del_timer_sync(&(tz->timer));
+ /* synchronize deferred task */
+ acpi_os_wait_events_complete(NULL);
+ /* deferred task may reinsert timer */
+ del_timer_sync(&(tz->timer));
+
+ status = acpi_remove_notify_handler(device->handle,
+ ACPI_DEVICE_NOTIFY,
+ acpi_thermal_notify);
+
+ /* Terminate policy */
+ if (tz->trips.passive.flags.valid && tz->trips.passive.flags.enabled) {
+ tz->trips.passive.flags.enabled = 0;
+ acpi_thermal_passive(tz);
+ }
+ if (tz->trips.active[0].flags.valid
+ && tz->trips.active[0].flags.enabled) {
+ tz->trips.active[0].flags.enabled = 0;
+ acpi_thermal_active(tz);
+ }
+
+ acpi_thermal_remove_fs(device);
+ acpi_thermal_unregister_thermal_zone(tz);
+ mutex_destroy(&tz->lock);
+ kfree(tz);
+ return 0;
+}
+
+static int acpi_thermal_resume(struct acpi_device *device)
+{
+ struct acpi_thermal *tz = NULL;
+ int i, j, power_state, result;
+
+
+ if (!device || !acpi_driver_data(device))
+ return -EINVAL;
+
+ tz = acpi_driver_data(device);
+
+ for (i = 0; i < ACPI_THERMAL_MAX_ACTIVE; i++) {
+ if (!(&tz->trips.active[i]))
+ break;
+ if (!tz->trips.active[i].flags.valid)
+ break;
+ tz->trips.active[i].flags.enabled = 1;
+ for (j = 0; j < tz->trips.active[i].devices.count; j++) {
+ result = acpi_bus_get_power(tz->trips.active[i].devices.
+ handles[j], &power_state);
+ if (result || (power_state != ACPI_STATE_D0)) {
+ tz->trips.active[i].flags.enabled = 0;
+ break;
+ }
+ }
+ tz->state.active |= tz->trips.active[i].flags.enabled;
+ }
+
+ acpi_thermal_check(tz);
+
+ return AE_OK;
+}
+
+static int thermal_act(const struct dmi_system_id *d) {
+
+ if (act == 0) {
+ printk(KERN_NOTICE "ACPI: %s detected: "
+ "disabling all active thermal trip points\n", d->ident);
+ act = -1;
+ }
+ return 0;
+}
+static int thermal_nocrt(const struct dmi_system_id *d) {
+
+ printk(KERN_NOTICE "ACPI: %s detected: "
+ "disabling all critical thermal trip point actions.\n", d->ident);
+ nocrt = 1;
+ return 0;
+}
+static int thermal_tzp(const struct dmi_system_id *d) {
+
+ if (tzp == 0) {
+ printk(KERN_NOTICE "ACPI: %s detected: "
+ "enabling thermal zone polling\n", d->ident);
+ tzp = 300; /* 300 dS = 30 Seconds */
+ }
+ return 0;
+}
+static int thermal_psv(const struct dmi_system_id *d) {
+
+ if (psv == 0) {
+ printk(KERN_NOTICE "ACPI: %s detected: "
+ "disabling all passive thermal trip points\n", d->ident);
+ psv = -1;
+ }
+ return 0;
+}
+
+static struct dmi_system_id thermal_dmi_table[] __initdata = {
+ /*
+ * Award BIOS on this AOpen makes thermal control almost worthless.
+ * http://bugzilla.kernel.org/show_bug.cgi?id=8842
+ */
+ {
+ .callback = thermal_act,
+ .ident = "AOpen i915GMm-HFS",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "AOpen"),
+ DMI_MATCH(DMI_BOARD_NAME, "i915GMm-HFS"),
+ },
+ },
+ {
+ .callback = thermal_psv,
+ .ident = "AOpen i915GMm-HFS",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "AOpen"),
+ DMI_MATCH(DMI_BOARD_NAME, "i915GMm-HFS"),
+ },
+ },
+ {
+ .callback = thermal_tzp,
+ .ident = "AOpen i915GMm-HFS",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "AOpen"),
+ DMI_MATCH(DMI_BOARD_NAME, "i915GMm-HFS"),
+ },
+ },
+ {
+ .callback = thermal_nocrt,
+ .ident = "Gigabyte GA-7ZX",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."),
+ DMI_MATCH(DMI_BOARD_NAME, "7ZX"),
+ },
+ },
+ {}
+};
+
+static int __init acpi_thermal_init(void)
+{
+ int result = 0;
+
+ dmi_check_system(thermal_dmi_table);
+
+ if (off) {
+ printk(KERN_NOTICE "ACPI: thermal control disabled\n");
+ return -ENODEV;
+ }
+ acpi_thermal_dir = proc_mkdir(ACPI_THERMAL_CLASS, acpi_root_dir);
+ if (!acpi_thermal_dir)
+ return -ENODEV;
+ acpi_thermal_dir->owner = THIS_MODULE;
+
+ result = acpi_bus_register_driver(&acpi_thermal_driver);
+ if (result < 0) {
+ remove_proc_entry(ACPI_THERMAL_CLASS, acpi_root_dir);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static void __exit acpi_thermal_exit(void)
+{
+
+ acpi_bus_unregister_driver(&acpi_thermal_driver);
+
+ remove_proc_entry(ACPI_THERMAL_CLASS, acpi_root_dir);
+
+ return;
+}
+
+module_init(acpi_thermal_init);
+module_exit(acpi_thermal_exit);
diff --git a/drivers/acpi/toshiba_acpi.c b/drivers/acpi/toshiba_acpi.c
new file mode 100644
index 0000000..40e60fc
--- /dev/null
+++ b/drivers/acpi/toshiba_acpi.c
@@ -0,0 +1,863 @@
+/*
+ * toshiba_acpi.c - Toshiba Laptop ACPI Extras
+ *
+ *
+ * Copyright (C) 2002-2004 John Belmonte
+ * Copyright (C) 2008 Philip Langdale
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ *
+ * The devolpment page for this driver is located at
+ * http://memebeam.org/toys/ToshibaAcpiDriver.
+ *
+ * Credits:
+ * Jonathan A. Buzzard - Toshiba HCI info, and critical tips on reverse
+ * engineering the Windows drivers
+ * Yasushi Nagato - changes for linux kernel 2.4 -> 2.5
+ * Rob Miller - TV out and hotkeys help
+ *
+ *
+ * TODO
+ *
+ */
+
+#define TOSHIBA_ACPI_VERSION "0.19"
+#define PROC_INTERFACE_VERSION 1
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/proc_fs.h>
+#include <linux/backlight.h>
+#include <linux/platform_device.h>
+#include <linux/rfkill.h>
+#include <linux/input-polldev.h>
+
+#include <asm/uaccess.h>
+
+#include <acpi/acpi_drivers.h>
+
+MODULE_AUTHOR("John Belmonte");
+MODULE_DESCRIPTION("Toshiba Laptop ACPI Extras Driver");
+MODULE_LICENSE("GPL");
+
+#define MY_LOGPREFIX "toshiba_acpi: "
+#define MY_ERR KERN_ERR MY_LOGPREFIX
+#define MY_NOTICE KERN_NOTICE MY_LOGPREFIX
+#define MY_INFO KERN_INFO MY_LOGPREFIX
+
+/* Toshiba ACPI method paths */
+#define METHOD_LCD_BRIGHTNESS "\\_SB_.PCI0.VGA_.LCD_._BCM"
+#define METHOD_HCI_1 "\\_SB_.VALD.GHCI"
+#define METHOD_HCI_2 "\\_SB_.VALZ.GHCI"
+#define METHOD_VIDEO_OUT "\\_SB_.VALX.DSSX"
+
+/* Toshiba HCI interface definitions
+ *
+ * HCI is Toshiba's "Hardware Control Interface" which is supposed to
+ * be uniform across all their models. Ideally we would just call
+ * dedicated ACPI methods instead of using this primitive interface.
+ * However the ACPI methods seem to be incomplete in some areas (for
+ * example they allow setting, but not reading, the LCD brightness value),
+ * so this is still useful.
+ */
+
+#define HCI_WORDS 6
+
+/* operations */
+#define HCI_SET 0xff00
+#define HCI_GET 0xfe00
+
+/* return codes */
+#define HCI_SUCCESS 0x0000
+#define HCI_FAILURE 0x1000
+#define HCI_NOT_SUPPORTED 0x8000
+#define HCI_EMPTY 0x8c00
+
+/* registers */
+#define HCI_FAN 0x0004
+#define HCI_SYSTEM_EVENT 0x0016
+#define HCI_VIDEO_OUT 0x001c
+#define HCI_HOTKEY_EVENT 0x001e
+#define HCI_LCD_BRIGHTNESS 0x002a
+#define HCI_WIRELESS 0x0056
+
+/* field definitions */
+#define HCI_LCD_BRIGHTNESS_BITS 3
+#define HCI_LCD_BRIGHTNESS_SHIFT (16-HCI_LCD_BRIGHTNESS_BITS)
+#define HCI_LCD_BRIGHTNESS_LEVELS (1 << HCI_LCD_BRIGHTNESS_BITS)
+#define HCI_VIDEO_OUT_LCD 0x1
+#define HCI_VIDEO_OUT_CRT 0x2
+#define HCI_VIDEO_OUT_TV 0x4
+#define HCI_WIRELESS_KILL_SWITCH 0x01
+#define HCI_WIRELESS_BT_PRESENT 0x0f
+#define HCI_WIRELESS_BT_ATTACH 0x40
+#define HCI_WIRELESS_BT_POWER 0x80
+
+static const struct acpi_device_id toshiba_device_ids[] = {
+ {"TOS6200", 0},
+ {"TOS6208", 0},
+ {"TOS1900", 0},
+ {"", 0},
+};
+MODULE_DEVICE_TABLE(acpi, toshiba_device_ids);
+
+/* utility
+ */
+
+static __inline__ void _set_bit(u32 * word, u32 mask, int value)
+{
+ *word = (*word & ~mask) | (mask * value);
+}
+
+/* acpi interface wrappers
+ */
+
+static int is_valid_acpi_path(const char *methodName)
+{
+ acpi_handle handle;
+ acpi_status status;
+
+ status = acpi_get_handle(NULL, (char *)methodName, &handle);
+ return !ACPI_FAILURE(status);
+}
+
+static int write_acpi_int(const char *methodName, int val)
+{
+ struct acpi_object_list params;
+ union acpi_object in_objs[1];
+ acpi_status status;
+
+ params.count = ARRAY_SIZE(in_objs);
+ params.pointer = in_objs;
+ in_objs[0].type = ACPI_TYPE_INTEGER;
+ in_objs[0].integer.value = val;
+
+ status = acpi_evaluate_object(NULL, (char *)methodName, &params, NULL);
+ return (status == AE_OK);
+}
+
+#if 0
+static int read_acpi_int(const char *methodName, int *pVal)
+{
+ struct acpi_buffer results;
+ union acpi_object out_objs[1];
+ acpi_status status;
+
+ results.length = sizeof(out_objs);
+ results.pointer = out_objs;
+
+ status = acpi_evaluate_object(0, (char *)methodName, 0, &results);
+ *pVal = out_objs[0].integer.value;
+
+ return (status == AE_OK) && (out_objs[0].type == ACPI_TYPE_INTEGER);
+}
+#endif
+
+static const char *method_hci /*= 0*/ ;
+
+/* Perform a raw HCI call. Here we don't care about input or output buffer
+ * format.
+ */
+static acpi_status hci_raw(const u32 in[HCI_WORDS], u32 out[HCI_WORDS])
+{
+ struct acpi_object_list params;
+ union acpi_object in_objs[HCI_WORDS];
+ struct acpi_buffer results;
+ union acpi_object out_objs[HCI_WORDS + 1];
+ acpi_status status;
+ int i;
+
+ params.count = HCI_WORDS;
+ params.pointer = in_objs;
+ for (i = 0; i < HCI_WORDS; ++i) {
+ in_objs[i].type = ACPI_TYPE_INTEGER;
+ in_objs[i].integer.value = in[i];
+ }
+
+ results.length = sizeof(out_objs);
+ results.pointer = out_objs;
+
+ status = acpi_evaluate_object(NULL, (char *)method_hci, &params,
+ &results);
+ if ((status == AE_OK) && (out_objs->package.count <= HCI_WORDS)) {
+ for (i = 0; i < out_objs->package.count; ++i) {
+ out[i] = out_objs->package.elements[i].integer.value;
+ }
+ }
+
+ return status;
+}
+
+/* common hci tasks (get or set one or two value)
+ *
+ * In addition to the ACPI status, the HCI system returns a result which
+ * may be useful (such as "not supported").
+ */
+
+static acpi_status hci_write1(u32 reg, u32 in1, u32 * result)
+{
+ u32 in[HCI_WORDS] = { HCI_SET, reg, in1, 0, 0, 0 };
+ u32 out[HCI_WORDS];
+ acpi_status status = hci_raw(in, out);
+ *result = (status == AE_OK) ? out[0] : HCI_FAILURE;
+ return status;
+}
+
+static acpi_status hci_read1(u32 reg, u32 * out1, u32 * result)
+{
+ u32 in[HCI_WORDS] = { HCI_GET, reg, 0, 0, 0, 0 };
+ u32 out[HCI_WORDS];
+ acpi_status status = hci_raw(in, out);
+ *out1 = out[2];
+ *result = (status == AE_OK) ? out[0] : HCI_FAILURE;
+ return status;
+}
+
+static acpi_status hci_write2(u32 reg, u32 in1, u32 in2, u32 *result)
+{
+ u32 in[HCI_WORDS] = { HCI_SET, reg, in1, in2, 0, 0 };
+ u32 out[HCI_WORDS];
+ acpi_status status = hci_raw(in, out);
+ *result = (status == AE_OK) ? out[0] : HCI_FAILURE;
+ return status;
+}
+
+static acpi_status hci_read2(u32 reg, u32 *out1, u32 *out2, u32 *result)
+{
+ u32 in[HCI_WORDS] = { HCI_GET, reg, *out1, *out2, 0, 0 };
+ u32 out[HCI_WORDS];
+ acpi_status status = hci_raw(in, out);
+ *out1 = out[2];
+ *out2 = out[3];
+ *result = (status == AE_OK) ? out[0] : HCI_FAILURE;
+ return status;
+}
+
+struct toshiba_acpi_dev {
+ struct platform_device *p_dev;
+ struct rfkill *rfk_dev;
+ struct input_polled_dev *poll_dev;
+
+ const char *bt_name;
+ const char *rfk_name;
+
+ bool last_rfk_state;
+
+ struct mutex mutex;
+};
+
+static struct toshiba_acpi_dev toshiba_acpi = {
+ .bt_name = "Toshiba Bluetooth",
+ .rfk_name = "Toshiba RFKill Switch",
+ .last_rfk_state = false,
+};
+
+/* Bluetooth rfkill handlers */
+
+static u32 hci_get_bt_present(bool *present)
+{
+ u32 hci_result;
+ u32 value, value2;
+
+ value = 0;
+ value2 = 0;
+ hci_read2(HCI_WIRELESS, &value, &value2, &hci_result);
+ if (hci_result == HCI_SUCCESS)
+ *present = (value & HCI_WIRELESS_BT_PRESENT) ? true : false;
+
+ return hci_result;
+}
+
+static u32 hci_get_bt_on(bool *on)
+{
+ u32 hci_result;
+ u32 value, value2;
+
+ value = 0;
+ value2 = 0x0001;
+ hci_read2(HCI_WIRELESS, &value, &value2, &hci_result);
+ if (hci_result == HCI_SUCCESS)
+ *on = (value & HCI_WIRELESS_BT_POWER) &&
+ (value & HCI_WIRELESS_BT_ATTACH);
+
+ return hci_result;
+}
+
+static u32 hci_get_radio_state(bool *radio_state)
+{
+ u32 hci_result;
+ u32 value, value2;
+
+ value = 0;
+ value2 = 0x0001;
+ hci_read2(HCI_WIRELESS, &value, &value2, &hci_result);
+
+ *radio_state = value & HCI_WIRELESS_KILL_SWITCH;
+ return hci_result;
+}
+
+static int bt_rfkill_toggle_radio(void *data, enum rfkill_state state)
+{
+ u32 result1, result2;
+ u32 value;
+ bool radio_state;
+ struct toshiba_acpi_dev *dev = data;
+
+ value = (state == RFKILL_STATE_UNBLOCKED);
+
+ if (hci_get_radio_state(&radio_state) != HCI_SUCCESS)
+ return -EFAULT;
+
+ switch (state) {
+ case RFKILL_STATE_UNBLOCKED:
+ if (!radio_state)
+ return -EPERM;
+ break;
+ case RFKILL_STATE_SOFT_BLOCKED:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ mutex_lock(&dev->mutex);
+ hci_write2(HCI_WIRELESS, value, HCI_WIRELESS_BT_POWER, &result1);
+ hci_write2(HCI_WIRELESS, value, HCI_WIRELESS_BT_ATTACH, &result2);
+ mutex_unlock(&dev->mutex);
+
+ if (result1 != HCI_SUCCESS || result2 != HCI_SUCCESS)
+ return -EFAULT;
+
+ return 0;
+}
+
+static void bt_poll_rfkill(struct input_polled_dev *poll_dev)
+{
+ bool state_changed;
+ bool new_rfk_state;
+ bool value;
+ u32 hci_result;
+ struct toshiba_acpi_dev *dev = poll_dev->private;
+
+ hci_result = hci_get_radio_state(&value);
+ if (hci_result != HCI_SUCCESS)
+ return; /* Can't do anything useful */
+
+ new_rfk_state = value;
+
+ mutex_lock(&dev->mutex);
+ state_changed = new_rfk_state != dev->last_rfk_state;
+ dev->last_rfk_state = new_rfk_state;
+ mutex_unlock(&dev->mutex);
+
+ if (unlikely(state_changed)) {
+ rfkill_force_state(dev->rfk_dev,
+ new_rfk_state ?
+ RFKILL_STATE_SOFT_BLOCKED :
+ RFKILL_STATE_HARD_BLOCKED);
+ input_report_switch(poll_dev->input, SW_RFKILL_ALL,
+ new_rfk_state);
+ input_sync(poll_dev->input);
+ }
+}
+
+static struct proc_dir_entry *toshiba_proc_dir /*= 0*/ ;
+static struct backlight_device *toshiba_backlight_device;
+static int force_fan;
+static int last_key_event;
+static int key_event_valid;
+
+typedef struct _ProcItem {
+ const char *name;
+ char *(*read_func) (char *);
+ unsigned long (*write_func) (const char *, unsigned long);
+} ProcItem;
+
+/* proc file handlers
+ */
+
+static int
+dispatch_read(char *page, char **start, off_t off, int count, int *eof,
+ ProcItem * item)
+{
+ char *p = page;
+ int len;
+
+ if (off == 0)
+ p = item->read_func(p);
+
+ /* ISSUE: I don't understand this code */
+ len = (p - page);
+ if (len <= off + count)
+ *eof = 1;
+ *start = page + off;
+ len -= off;
+ if (len > count)
+ len = count;
+ if (len < 0)
+ len = 0;
+ return len;
+}
+
+static int
+dispatch_write(struct file *file, const char __user * buffer,
+ unsigned long count, ProcItem * item)
+{
+ int result;
+ char *tmp_buffer;
+
+ /* Arg buffer points to userspace memory, which can't be accessed
+ * directly. Since we're making a copy, zero-terminate the
+ * destination so that sscanf can be used on it safely.
+ */
+ tmp_buffer = kmalloc(count + 1, GFP_KERNEL);
+ if (!tmp_buffer)
+ return -ENOMEM;
+
+ if (copy_from_user(tmp_buffer, buffer, count)) {
+ result = -EFAULT;
+ } else {
+ tmp_buffer[count] = 0;
+ result = item->write_func(tmp_buffer, count);
+ }
+ kfree(tmp_buffer);
+ return result;
+}
+
+static int get_lcd(struct backlight_device *bd)
+{
+ u32 hci_result;
+ u32 value;
+
+ hci_read1(HCI_LCD_BRIGHTNESS, &value, &hci_result);
+ if (hci_result == HCI_SUCCESS) {
+ return (value >> HCI_LCD_BRIGHTNESS_SHIFT);
+ } else
+ return -EFAULT;
+}
+
+static char *read_lcd(char *p)
+{
+ int value = get_lcd(NULL);
+
+ if (value >= 0) {
+ p += sprintf(p, "brightness: %d\n", value);
+ p += sprintf(p, "brightness_levels: %d\n",
+ HCI_LCD_BRIGHTNESS_LEVELS);
+ } else {
+ printk(MY_ERR "Error reading LCD brightness\n");
+ }
+
+ return p;
+}
+
+static int set_lcd(int value)
+{
+ u32 hci_result;
+
+ value = value << HCI_LCD_BRIGHTNESS_SHIFT;
+ hci_write1(HCI_LCD_BRIGHTNESS, value, &hci_result);
+ if (hci_result != HCI_SUCCESS)
+ return -EFAULT;
+
+ return 0;
+}
+
+static int set_lcd_status(struct backlight_device *bd)
+{
+ return set_lcd(bd->props.brightness);
+}
+
+static unsigned long write_lcd(const char *buffer, unsigned long count)
+{
+ int value;
+ int ret;
+
+ if (sscanf(buffer, " brightness : %i", &value) == 1 &&
+ value >= 0 && value < HCI_LCD_BRIGHTNESS_LEVELS) {
+ ret = set_lcd(value);
+ if (ret == 0)
+ ret = count;
+ } else {
+ ret = -EINVAL;
+ }
+ return ret;
+}
+
+static char *read_video(char *p)
+{
+ u32 hci_result;
+ u32 value;
+
+ hci_read1(HCI_VIDEO_OUT, &value, &hci_result);
+ if (hci_result == HCI_SUCCESS) {
+ int is_lcd = (value & HCI_VIDEO_OUT_LCD) ? 1 : 0;
+ int is_crt = (value & HCI_VIDEO_OUT_CRT) ? 1 : 0;
+ int is_tv = (value & HCI_VIDEO_OUT_TV) ? 1 : 0;
+ p += sprintf(p, "lcd_out: %d\n", is_lcd);
+ p += sprintf(p, "crt_out: %d\n", is_crt);
+ p += sprintf(p, "tv_out: %d\n", is_tv);
+ } else {
+ printk(MY_ERR "Error reading video out status\n");
+ }
+
+ return p;
+}
+
+static unsigned long write_video(const char *buffer, unsigned long count)
+{
+ int value;
+ int remain = count;
+ int lcd_out = -1;
+ int crt_out = -1;
+ int tv_out = -1;
+ u32 hci_result;
+ u32 video_out;
+
+ /* scan expression. Multiple expressions may be delimited with ;
+ *
+ * NOTE: to keep scanning simple, invalid fields are ignored
+ */
+ while (remain) {
+ if (sscanf(buffer, " lcd_out : %i", &value) == 1)
+ lcd_out = value & 1;
+ else if (sscanf(buffer, " crt_out : %i", &value) == 1)
+ crt_out = value & 1;
+ else if (sscanf(buffer, " tv_out : %i", &value) == 1)
+ tv_out = value & 1;
+ /* advance to one character past the next ; */
+ do {
+ ++buffer;
+ --remain;
+ }
+ while (remain && *(buffer - 1) != ';');
+ }
+
+ hci_read1(HCI_VIDEO_OUT, &video_out, &hci_result);
+ if (hci_result == HCI_SUCCESS) {
+ unsigned int new_video_out = video_out;
+ if (lcd_out != -1)
+ _set_bit(&new_video_out, HCI_VIDEO_OUT_LCD, lcd_out);
+ if (crt_out != -1)
+ _set_bit(&new_video_out, HCI_VIDEO_OUT_CRT, crt_out);
+ if (tv_out != -1)
+ _set_bit(&new_video_out, HCI_VIDEO_OUT_TV, tv_out);
+ /* To avoid unnecessary video disruption, only write the new
+ * video setting if something changed. */
+ if (new_video_out != video_out)
+ write_acpi_int(METHOD_VIDEO_OUT, new_video_out);
+ } else {
+ return -EFAULT;
+ }
+
+ return count;
+}
+
+static char *read_fan(char *p)
+{
+ u32 hci_result;
+ u32 value;
+
+ hci_read1(HCI_FAN, &value, &hci_result);
+ if (hci_result == HCI_SUCCESS) {
+ p += sprintf(p, "running: %d\n", (value > 0));
+ p += sprintf(p, "force_on: %d\n", force_fan);
+ } else {
+ printk(MY_ERR "Error reading fan status\n");
+ }
+
+ return p;
+}
+
+static unsigned long write_fan(const char *buffer, unsigned long count)
+{
+ int value;
+ u32 hci_result;
+
+ if (sscanf(buffer, " force_on : %i", &value) == 1 &&
+ value >= 0 && value <= 1) {
+ hci_write1(HCI_FAN, value, &hci_result);
+ if (hci_result != HCI_SUCCESS)
+ return -EFAULT;
+ else
+ force_fan = value;
+ } else {
+ return -EINVAL;
+ }
+
+ return count;
+}
+
+static char *read_keys(char *p)
+{
+ u32 hci_result;
+ u32 value;
+
+ if (!key_event_valid) {
+ hci_read1(HCI_SYSTEM_EVENT, &value, &hci_result);
+ if (hci_result == HCI_SUCCESS) {
+ key_event_valid = 1;
+ last_key_event = value;
+ } else if (hci_result == HCI_EMPTY) {
+ /* better luck next time */
+ } else if (hci_result == HCI_NOT_SUPPORTED) {
+ /* This is a workaround for an unresolved issue on
+ * some machines where system events sporadically
+ * become disabled. */
+ hci_write1(HCI_SYSTEM_EVENT, 1, &hci_result);
+ printk(MY_NOTICE "Re-enabled hotkeys\n");
+ } else {
+ printk(MY_ERR "Error reading hotkey status\n");
+ goto end;
+ }
+ }
+
+ p += sprintf(p, "hotkey_ready: %d\n", key_event_valid);
+ p += sprintf(p, "hotkey: 0x%04x\n", last_key_event);
+
+ end:
+ return p;
+}
+
+static unsigned long write_keys(const char *buffer, unsigned long count)
+{
+ int value;
+
+ if (sscanf(buffer, " hotkey_ready : %i", &value) == 1 && value == 0) {
+ key_event_valid = 0;
+ } else {
+ return -EINVAL;
+ }
+
+ return count;
+}
+
+static char *read_version(char *p)
+{
+ p += sprintf(p, "driver: %s\n", TOSHIBA_ACPI_VERSION);
+ p += sprintf(p, "proc_interface: %d\n",
+ PROC_INTERFACE_VERSION);
+ return p;
+}
+
+/* proc and module init
+ */
+
+#define PROC_TOSHIBA "toshiba"
+
+static ProcItem proc_items[] = {
+ {"lcd", read_lcd, write_lcd},
+ {"video", read_video, write_video},
+ {"fan", read_fan, write_fan},
+ {"keys", read_keys, write_keys},
+ {"version", read_version, NULL},
+ {NULL}
+};
+
+static acpi_status __init add_device(void)
+{
+ struct proc_dir_entry *proc;
+ ProcItem *item;
+
+ for (item = proc_items; item->name; ++item) {
+ proc = create_proc_read_entry(item->name,
+ S_IFREG | S_IRUGO | S_IWUSR,
+ toshiba_proc_dir,
+ (read_proc_t *) dispatch_read,
+ item);
+ if (proc)
+ proc->owner = THIS_MODULE;
+ if (proc && item->write_func)
+ proc->write_proc = (write_proc_t *) dispatch_write;
+ }
+
+ return AE_OK;
+}
+
+static acpi_status remove_device(void)
+{
+ ProcItem *item;
+
+ for (item = proc_items; item->name; ++item)
+ remove_proc_entry(item->name, toshiba_proc_dir);
+ return AE_OK;
+}
+
+static struct backlight_ops toshiba_backlight_data = {
+ .get_brightness = get_lcd,
+ .update_status = set_lcd_status,
+};
+
+static void toshiba_acpi_exit(void)
+{
+ if (toshiba_acpi.poll_dev) {
+ input_unregister_polled_device(toshiba_acpi.poll_dev);
+ input_free_polled_device(toshiba_acpi.poll_dev);
+ }
+
+ if (toshiba_acpi.rfk_dev)
+ rfkill_unregister(toshiba_acpi.rfk_dev);
+
+ if (toshiba_backlight_device)
+ backlight_device_unregister(toshiba_backlight_device);
+
+ remove_device();
+
+ if (toshiba_proc_dir)
+ remove_proc_entry(PROC_TOSHIBA, acpi_root_dir);
+
+ platform_device_unregister(toshiba_acpi.p_dev);
+
+ return;
+}
+
+static int __init toshiba_acpi_init(void)
+{
+ acpi_status status = AE_OK;
+ u32 hci_result;
+ bool bt_present;
+ bool bt_on;
+ bool radio_on;
+ int ret = 0;
+
+ if (acpi_disabled)
+ return -ENODEV;
+
+ /* simple device detection: look for HCI method */
+ if (is_valid_acpi_path(METHOD_HCI_1))
+ method_hci = METHOD_HCI_1;
+ else if (is_valid_acpi_path(METHOD_HCI_2))
+ method_hci = METHOD_HCI_2;
+ else
+ return -ENODEV;
+
+ printk(MY_INFO "Toshiba Laptop ACPI Extras version %s\n",
+ TOSHIBA_ACPI_VERSION);
+ printk(MY_INFO " HCI method: %s\n", method_hci);
+
+ mutex_init(&toshiba_acpi.mutex);
+
+ toshiba_acpi.p_dev = platform_device_register_simple("toshiba_acpi",
+ -1, NULL, 0);
+ if (IS_ERR(toshiba_acpi.p_dev)) {
+ ret = PTR_ERR(toshiba_acpi.p_dev);
+ printk(MY_ERR "unable to register platform device\n");
+ toshiba_acpi.p_dev = NULL;
+ toshiba_acpi_exit();
+ return ret;
+ }
+
+ force_fan = 0;
+ key_event_valid = 0;
+
+ /* enable event fifo */
+ hci_write1(HCI_SYSTEM_EVENT, 1, &hci_result);
+
+ toshiba_proc_dir = proc_mkdir(PROC_TOSHIBA, acpi_root_dir);
+ if (!toshiba_proc_dir) {
+ toshiba_acpi_exit();
+ return -ENODEV;
+ } else {
+ toshiba_proc_dir->owner = THIS_MODULE;
+ status = add_device();
+ if (ACPI_FAILURE(status)) {
+ toshiba_acpi_exit();
+ return -ENODEV;
+ }
+ }
+
+ toshiba_backlight_device = backlight_device_register("toshiba",
+ &toshiba_acpi.p_dev->dev,
+ NULL,
+ &toshiba_backlight_data);
+ if (IS_ERR(toshiba_backlight_device)) {
+ ret = PTR_ERR(toshiba_backlight_device);
+
+ printk(KERN_ERR "Could not register toshiba backlight device\n");
+ toshiba_backlight_device = NULL;
+ toshiba_acpi_exit();
+ return ret;
+ }
+ toshiba_backlight_device->props.max_brightness = HCI_LCD_BRIGHTNESS_LEVELS - 1;
+
+ /* Register rfkill switch for Bluetooth */
+ if (hci_get_bt_present(&bt_present) == HCI_SUCCESS && bt_present) {
+ toshiba_acpi.rfk_dev = rfkill_allocate(&toshiba_acpi.p_dev->dev,
+ RFKILL_TYPE_BLUETOOTH);
+ if (!toshiba_acpi.rfk_dev) {
+ printk(MY_ERR "unable to allocate rfkill device\n");
+ toshiba_acpi_exit();
+ return -ENOMEM;
+ }
+
+ toshiba_acpi.rfk_dev->name = toshiba_acpi.bt_name;
+ toshiba_acpi.rfk_dev->toggle_radio = bt_rfkill_toggle_radio;
+ toshiba_acpi.rfk_dev->user_claim_unsupported = 1;
+ toshiba_acpi.rfk_dev->data = &toshiba_acpi;
+
+ if (hci_get_bt_on(&bt_on) == HCI_SUCCESS && bt_on) {
+ toshiba_acpi.rfk_dev->state = RFKILL_STATE_UNBLOCKED;
+ } else if (hci_get_radio_state(&radio_on) == HCI_SUCCESS &&
+ radio_on) {
+ toshiba_acpi.rfk_dev->state = RFKILL_STATE_SOFT_BLOCKED;
+ } else {
+ toshiba_acpi.rfk_dev->state = RFKILL_STATE_HARD_BLOCKED;
+ }
+
+ ret = rfkill_register(toshiba_acpi.rfk_dev);
+ if (ret) {
+ printk(MY_ERR "unable to register rfkill device\n");
+ toshiba_acpi_exit();
+ return -ENOMEM;
+ }
+
+ /* Register input device for kill switch */
+ toshiba_acpi.poll_dev = input_allocate_polled_device();
+ if (!toshiba_acpi.poll_dev) {
+ printk(MY_ERR
+ "unable to allocate kill-switch input device\n");
+ toshiba_acpi_exit();
+ return -ENOMEM;
+ }
+ toshiba_acpi.poll_dev->private = &toshiba_acpi;
+ toshiba_acpi.poll_dev->poll = bt_poll_rfkill;
+ toshiba_acpi.poll_dev->poll_interval = 1000; /* msecs */
+
+ toshiba_acpi.poll_dev->input->name = toshiba_acpi.rfk_name;
+ toshiba_acpi.poll_dev->input->id.bustype = BUS_HOST;
+ /* Toshiba USB ID */
+ toshiba_acpi.poll_dev->input->id.vendor = 0x0930;
+ set_bit(EV_SW, toshiba_acpi.poll_dev->input->evbit);
+ set_bit(SW_RFKILL_ALL, toshiba_acpi.poll_dev->input->swbit);
+ input_report_switch(toshiba_acpi.poll_dev->input,
+ SW_RFKILL_ALL, TRUE);
+ input_sync(toshiba_acpi.poll_dev->input);
+
+ ret = input_register_polled_device(toshiba_acpi.poll_dev);
+ if (ret) {
+ printk(MY_ERR
+ "unable to register kill-switch input device\n");
+ toshiba_acpi_exit();
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+module_init(toshiba_acpi_init);
+module_exit(toshiba_acpi_exit);
diff --git a/drivers/acpi/utilities/Makefile b/drivers/acpi/utilities/Makefile
new file mode 100644
index 0000000..88eff14
--- /dev/null
+++ b/drivers/acpi/utilities/Makefile
@@ -0,0 +1,9 @@
+#
+# Makefile for all Linux ACPI interpreter subdirectories
+#
+
+obj-y := utalloc.o utdebug.o uteval.o utinit.o utmisc.o utxface.o \
+ utcopy.o utdelete.o utglobal.o utmath.o utobject.o \
+ utstate.o utmutex.o utobject.o utcache.o utresrc.o
+
+EXTRA_CFLAGS += $(ACPI_CFLAGS)
diff --git a/drivers/acpi/utilities/utalloc.c b/drivers/acpi/utilities/utalloc.c
new file mode 100644
index 0000000..241c535
--- /dev/null
+++ b/drivers/acpi/utilities/utalloc.c
@@ -0,0 +1,382 @@
+/******************************************************************************
+ *
+ * Module Name: utalloc - local memory allocation routines
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include <acpi/acdebug.h>
+
+#define _COMPONENT ACPI_UTILITIES
+ACPI_MODULE_NAME("utalloc")
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_create_caches
+ *
+ * PARAMETERS: None
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Create all local caches
+ *
+ ******************************************************************************/
+acpi_status acpi_ut_create_caches(void)
+{
+ acpi_status status;
+
+ /* Object Caches, for frequently used objects */
+
+ status =
+ acpi_os_create_cache("Acpi-Namespace",
+ sizeof(struct acpi_namespace_node),
+ ACPI_MAX_NAMESPACE_CACHE_DEPTH,
+ &acpi_gbl_namespace_cache);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+
+ status =
+ acpi_os_create_cache("Acpi-State", sizeof(union acpi_generic_state),
+ ACPI_MAX_STATE_CACHE_DEPTH,
+ &acpi_gbl_state_cache);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+
+ status =
+ acpi_os_create_cache("Acpi-Parse",
+ sizeof(struct acpi_parse_obj_common),
+ ACPI_MAX_PARSE_CACHE_DEPTH,
+ &acpi_gbl_ps_node_cache);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+
+ status =
+ acpi_os_create_cache("Acpi-ParseExt",
+ sizeof(struct acpi_parse_obj_named),
+ ACPI_MAX_EXTPARSE_CACHE_DEPTH,
+ &acpi_gbl_ps_node_ext_cache);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+
+ status =
+ acpi_os_create_cache("Acpi-Operand",
+ sizeof(union acpi_operand_object),
+ ACPI_MAX_OBJECT_CACHE_DEPTH,
+ &acpi_gbl_operand_cache);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+#ifdef ACPI_DBG_TRACK_ALLOCATIONS
+
+ /* Memory allocation lists */
+
+ status = acpi_ut_create_list("Acpi-Global", 0, &acpi_gbl_global_list);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+
+ status =
+ acpi_ut_create_list("Acpi-Namespace",
+ sizeof(struct acpi_namespace_node),
+ &acpi_gbl_ns_node_list);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+#endif
+
+ return (AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_delete_caches
+ *
+ * PARAMETERS: None
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Purge and delete all local caches
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ut_delete_caches(void)
+{
+#ifdef ACPI_DBG_TRACK_ALLOCATIONS
+ char buffer[7];
+
+ if (acpi_gbl_display_final_mem_stats) {
+ ACPI_STRCPY(buffer, "MEMORY");
+ (void)acpi_db_display_statistics(buffer);
+ }
+#endif
+
+ (void)acpi_os_delete_cache(acpi_gbl_namespace_cache);
+ acpi_gbl_namespace_cache = NULL;
+
+ (void)acpi_os_delete_cache(acpi_gbl_state_cache);
+ acpi_gbl_state_cache = NULL;
+
+ (void)acpi_os_delete_cache(acpi_gbl_operand_cache);
+ acpi_gbl_operand_cache = NULL;
+
+ (void)acpi_os_delete_cache(acpi_gbl_ps_node_cache);
+ acpi_gbl_ps_node_cache = NULL;
+
+ (void)acpi_os_delete_cache(acpi_gbl_ps_node_ext_cache);
+ acpi_gbl_ps_node_ext_cache = NULL;
+
+#ifdef ACPI_DBG_TRACK_ALLOCATIONS
+
+ /* Debug only - display leftover memory allocation, if any */
+
+ acpi_ut_dump_allocations(ACPI_UINT32_MAX, NULL);
+
+ /* Free memory lists */
+
+ ACPI_FREE(acpi_gbl_global_list);
+ acpi_gbl_global_list = NULL;
+
+ ACPI_FREE(acpi_gbl_ns_node_list);
+ acpi_gbl_ns_node_list = NULL;
+#endif
+
+ return (AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_validate_buffer
+ *
+ * PARAMETERS: Buffer - Buffer descriptor to be validated
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Perform parameter validation checks on an struct acpi_buffer
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ut_validate_buffer(struct acpi_buffer * buffer)
+{
+
+ /* Obviously, the structure pointer must be valid */
+
+ if (!buffer) {
+ return (AE_BAD_PARAMETER);
+ }
+
+ /* Special semantics for the length */
+
+ if ((buffer->length == ACPI_NO_BUFFER) ||
+ (buffer->length == ACPI_ALLOCATE_BUFFER) ||
+ (buffer->length == ACPI_ALLOCATE_LOCAL_BUFFER)) {
+ return (AE_OK);
+ }
+
+ /* Length is valid, the buffer pointer must be also */
+
+ if (!buffer->pointer) {
+ return (AE_BAD_PARAMETER);
+ }
+
+ return (AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_initialize_buffer
+ *
+ * PARAMETERS: Buffer - Buffer to be validated
+ * required_length - Length needed
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Validate that the buffer is of the required length or
+ * allocate a new buffer. Returned buffer is always zeroed.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ut_initialize_buffer(struct acpi_buffer * buffer,
+ acpi_size required_length)
+{
+ acpi_size input_buffer_length;
+
+ /* Parameter validation */
+
+ if (!buffer || !required_length) {
+ return (AE_BAD_PARAMETER);
+ }
+
+ /*
+ * Buffer->Length is used as both an input and output parameter. Get the
+ * input actual length and set the output required buffer length.
+ */
+ input_buffer_length = buffer->length;
+ buffer->length = required_length;
+
+ /*
+ * The input buffer length contains the actual buffer length, or the type
+ * of buffer to be allocated by this routine.
+ */
+ switch (input_buffer_length) {
+ case ACPI_NO_BUFFER:
+
+ /* Return the exception (and the required buffer length) */
+
+ return (AE_BUFFER_OVERFLOW);
+
+ case ACPI_ALLOCATE_BUFFER:
+
+ /* Allocate a new buffer */
+
+ buffer->pointer = acpi_os_allocate(required_length);
+ break;
+
+ case ACPI_ALLOCATE_LOCAL_BUFFER:
+
+ /* Allocate a new buffer with local interface to allow tracking */
+
+ buffer->pointer = ACPI_ALLOCATE(required_length);
+ break;
+
+ default:
+
+ /* Existing buffer: Validate the size of the buffer */
+
+ if (input_buffer_length < required_length) {
+ return (AE_BUFFER_OVERFLOW);
+ }
+ break;
+ }
+
+ /* Validate allocation from above or input buffer pointer */
+
+ if (!buffer->pointer) {
+ return (AE_NO_MEMORY);
+ }
+
+ /* Have a valid buffer, clear it */
+
+ ACPI_MEMSET(buffer->pointer, 0, required_length);
+ return (AE_OK);
+}
+
+#ifdef NOT_USED_BY_LINUX
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_allocate
+ *
+ * PARAMETERS: Size - Size of the allocation
+ * Component - Component type of caller
+ * Module - Source file name of caller
+ * Line - Line number of caller
+ *
+ * RETURN: Address of the allocated memory on success, NULL on failure.
+ *
+ * DESCRIPTION: Subsystem equivalent of malloc.
+ *
+ ******************************************************************************/
+
+void *acpi_ut_allocate(acpi_size size,
+ u32 component, const char *module, u32 line)
+{
+ void *allocation;
+
+ ACPI_FUNCTION_TRACE_U32(ut_allocate, size);
+
+ /* Check for an inadvertent size of zero bytes */
+
+ if (!size) {
+ ACPI_WARNING((module, line,
+ "Attempt to allocate zero bytes, allocating 1 byte"));
+ size = 1;
+ }
+
+ allocation = acpi_os_allocate(size);
+ if (!allocation) {
+
+ /* Report allocation error */
+
+ ACPI_WARNING((module, line,
+ "Could not allocate size %X", (u32) size));
+
+ return_PTR(NULL);
+ }
+
+ return_PTR(allocation);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_allocate_zeroed
+ *
+ * PARAMETERS: Size - Size of the allocation
+ * Component - Component type of caller
+ * Module - Source file name of caller
+ * Line - Line number of caller
+ *
+ * RETURN: Address of the allocated memory on success, NULL on failure.
+ *
+ * DESCRIPTION: Subsystem equivalent of calloc. Allocate and zero memory.
+ *
+ ******************************************************************************/
+
+void *acpi_ut_allocate_zeroed(acpi_size size,
+ u32 component, const char *module, u32 line)
+{
+ void *allocation;
+
+ ACPI_FUNCTION_ENTRY();
+
+ allocation = acpi_ut_allocate(size, component, module, line);
+ if (allocation) {
+
+ /* Clear the memory block */
+
+ ACPI_MEMSET(allocation, 0, size);
+ }
+
+ return (allocation);
+}
+#endif
diff --git a/drivers/acpi/utilities/utcache.c b/drivers/acpi/utilities/utcache.c
new file mode 100644
index 0000000..245fa80
--- /dev/null
+++ b/drivers/acpi/utilities/utcache.c
@@ -0,0 +1,314 @@
+/******************************************************************************
+ *
+ * Module Name: utcache - local cache allocation routines
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+
+#define _COMPONENT ACPI_UTILITIES
+ACPI_MODULE_NAME("utcache")
+#ifdef ACPI_USE_LOCAL_CACHE
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_os_create_cache
+ *
+ * PARAMETERS: cache_name - Ascii name for the cache
+ * object_size - Size of each cached object
+ * max_depth - Maximum depth of the cache (in objects)
+ * return_cache - Where the new cache object is returned
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Create a cache object
+ *
+ ******************************************************************************/
+acpi_status
+acpi_os_create_cache(char *cache_name,
+ u16 object_size,
+ u16 max_depth, struct acpi_memory_list ** return_cache)
+{
+ struct acpi_memory_list *cache;
+
+ ACPI_FUNCTION_ENTRY();
+
+ if (!cache_name || !return_cache || (object_size < 16)) {
+ return (AE_BAD_PARAMETER);
+ }
+
+ /* Create the cache object */
+
+ cache = acpi_os_allocate(sizeof(struct acpi_memory_list));
+ if (!cache) {
+ return (AE_NO_MEMORY);
+ }
+
+ /* Populate the cache object and return it */
+
+ ACPI_MEMSET(cache, 0, sizeof(struct acpi_memory_list));
+ cache->link_offset = 8;
+ cache->list_name = cache_name;
+ cache->object_size = object_size;
+ cache->max_depth = max_depth;
+
+ *return_cache = cache;
+ return (AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_os_purge_cache
+ *
+ * PARAMETERS: Cache - Handle to cache object
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Free all objects within the requested cache.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_os_purge_cache(struct acpi_memory_list * cache)
+{
+ char *next;
+
+ ACPI_FUNCTION_ENTRY();
+
+ if (!cache) {
+ return (AE_BAD_PARAMETER);
+ }
+
+ /* Walk the list of objects in this cache */
+
+ while (cache->list_head) {
+
+ /* Delete and unlink one cached state object */
+
+ next = *(ACPI_CAST_INDIRECT_PTR(char,
+ &(((char *)cache->
+ list_head)[cache->
+ link_offset])));
+ ACPI_FREE(cache->list_head);
+
+ cache->list_head = next;
+ cache->current_depth--;
+ }
+
+ return (AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_os_delete_cache
+ *
+ * PARAMETERS: Cache - Handle to cache object
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Free all objects within the requested cache and delete the
+ * cache object.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_os_delete_cache(struct acpi_memory_list * cache)
+{
+ acpi_status status;
+
+ ACPI_FUNCTION_ENTRY();
+
+ /* Purge all objects in the cache */
+
+ status = acpi_os_purge_cache(cache);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+
+ /* Now we can delete the cache object */
+
+ ACPI_FREE(cache);
+ return (AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_os_release_object
+ *
+ * PARAMETERS: Cache - Handle to cache object
+ * Object - The object to be released
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Release an object to the specified cache. If cache is full,
+ * the object is deleted.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_os_release_object(struct acpi_memory_list * cache, void *object)
+{
+ acpi_status status;
+
+ ACPI_FUNCTION_ENTRY();
+
+ if (!cache || !object) {
+ return (AE_BAD_PARAMETER);
+ }
+
+ /* If cache is full, just free this object */
+
+ if (cache->current_depth >= cache->max_depth) {
+ ACPI_FREE(object);
+ ACPI_MEM_TRACKING(cache->total_freed++);
+ }
+
+ /* Otherwise put this object back into the cache */
+
+ else {
+ status = acpi_ut_acquire_mutex(ACPI_MTX_CACHES);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+
+ /* Mark the object as cached */
+
+ ACPI_MEMSET(object, 0xCA, cache->object_size);
+ ACPI_SET_DESCRIPTOR_TYPE(object, ACPI_DESC_TYPE_CACHED);
+
+ /* Put the object at the head of the cache list */
+
+ *(ACPI_CAST_INDIRECT_PTR(char,
+ &(((char *)object)[cache->
+ link_offset]))) =
+ cache->list_head;
+ cache->list_head = object;
+ cache->current_depth++;
+
+ (void)acpi_ut_release_mutex(ACPI_MTX_CACHES);
+ }
+
+ return (AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_os_acquire_object
+ *
+ * PARAMETERS: Cache - Handle to cache object
+ *
+ * RETURN: the acquired object. NULL on error
+ *
+ * DESCRIPTION: Get an object from the specified cache. If cache is empty,
+ * the object is allocated.
+ *
+ ******************************************************************************/
+
+void *acpi_os_acquire_object(struct acpi_memory_list *cache)
+{
+ acpi_status status;
+ void *object;
+
+ ACPI_FUNCTION_NAME(os_acquire_object);
+
+ if (!cache) {
+ return (NULL);
+ }
+
+ status = acpi_ut_acquire_mutex(ACPI_MTX_CACHES);
+ if (ACPI_FAILURE(status)) {
+ return (NULL);
+ }
+
+ ACPI_MEM_TRACKING(cache->requests++);
+
+ /* Check the cache first */
+
+ if (cache->list_head) {
+
+ /* There is an object available, use it */
+
+ object = cache->list_head;
+ cache->list_head = *(ACPI_CAST_INDIRECT_PTR(char,
+ &(((char *)
+ object)[cache->
+ link_offset])));
+
+ cache->current_depth--;
+
+ ACPI_MEM_TRACKING(cache->hits++);
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+ "Object %p from %s cache\n", object,
+ cache->list_name));
+
+ status = acpi_ut_release_mutex(ACPI_MTX_CACHES);
+ if (ACPI_FAILURE(status)) {
+ return (NULL);
+ }
+
+ /* Clear (zero) the previously used Object */
+
+ ACPI_MEMSET(object, 0, cache->object_size);
+ } else {
+ /* The cache is empty, create a new object */
+
+ ACPI_MEM_TRACKING(cache->total_allocated++);
+
+#ifdef ACPI_DBG_TRACK_ALLOCATIONS
+ if ((cache->total_allocated - cache->total_freed) >
+ cache->max_occupied) {
+ cache->max_occupied =
+ cache->total_allocated - cache->total_freed;
+ }
+#endif
+
+ /* Avoid deadlock with ACPI_ALLOCATE_ZEROED */
+
+ status = acpi_ut_release_mutex(ACPI_MTX_CACHES);
+ if (ACPI_FAILURE(status)) {
+ return (NULL);
+ }
+
+ object = ACPI_ALLOCATE_ZEROED(cache->object_size);
+ if (!object) {
+ return (NULL);
+ }
+ }
+
+ return (object);
+}
+#endif /* ACPI_USE_LOCAL_CACHE */
diff --git a/drivers/acpi/utilities/utcopy.c b/drivers/acpi/utilities/utcopy.c
new file mode 100644
index 0000000..5b2f7c2
--- /dev/null
+++ b/drivers/acpi/utilities/utcopy.c
@@ -0,0 +1,969 @@
+/******************************************************************************
+ *
+ * Module Name: utcopy - Internal to external object translation utilities
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include <acpi/acnamesp.h>
+
+
+#define _COMPONENT ACPI_UTILITIES
+ACPI_MODULE_NAME("utcopy")
+
+/* Local prototypes */
+static acpi_status
+acpi_ut_copy_isimple_to_esimple(union acpi_operand_object *internal_object,
+ union acpi_object *external_object,
+ u8 * data_space, acpi_size * buffer_space_used);
+
+static acpi_status
+acpi_ut_copy_ielement_to_ielement(u8 object_type,
+ union acpi_operand_object *source_object,
+ union acpi_generic_state *state,
+ void *context);
+
+static acpi_status
+acpi_ut_copy_ipackage_to_epackage(union acpi_operand_object *internal_object,
+ u8 * buffer, acpi_size * space_used);
+
+static acpi_status
+acpi_ut_copy_esimple_to_isimple(union acpi_object *user_obj,
+ union acpi_operand_object **return_obj);
+
+static acpi_status
+acpi_ut_copy_epackage_to_ipackage(union acpi_object *external_object,
+ union acpi_operand_object **internal_object);
+
+static acpi_status
+acpi_ut_copy_simple_object(union acpi_operand_object *source_desc,
+ union acpi_operand_object *dest_desc);
+
+static acpi_status
+acpi_ut_copy_ielement_to_eelement(u8 object_type,
+ union acpi_operand_object *source_object,
+ union acpi_generic_state *state,
+ void *context);
+
+static acpi_status
+acpi_ut_copy_ipackage_to_ipackage(union acpi_operand_object *source_obj,
+ union acpi_operand_object *dest_obj,
+ struct acpi_walk_state *walk_state);
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_copy_isimple_to_esimple
+ *
+ * PARAMETERS: internal_object - Source object to be copied
+ * external_object - Where to return the copied object
+ * data_space - Where object data is returned (such as
+ * buffer and string data)
+ * buffer_space_used - Length of data_space that was used
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: This function is called to copy a simple internal object to
+ * an external object.
+ *
+ * The data_space buffer is assumed to have sufficient space for
+ * the object.
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ut_copy_isimple_to_esimple(union acpi_operand_object *internal_object,
+ union acpi_object *external_object,
+ u8 * data_space, acpi_size * buffer_space_used)
+{
+ acpi_status status = AE_OK;
+
+ ACPI_FUNCTION_TRACE(ut_copy_isimple_to_esimple);
+
+ *buffer_space_used = 0;
+
+ /*
+ * Check for NULL object case (could be an uninitialized
+ * package element)
+ */
+ if (!internal_object) {
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ /* Always clear the external object */
+
+ ACPI_MEMSET(external_object, 0, sizeof(union acpi_object));
+
+ /*
+ * In general, the external object will be the same type as
+ * the internal object
+ */
+ external_object->type = ACPI_GET_OBJECT_TYPE(internal_object);
+
+ /* However, only a limited number of external types are supported */
+
+ switch (ACPI_GET_OBJECT_TYPE(internal_object)) {
+ case ACPI_TYPE_STRING:
+
+ external_object->string.pointer = (char *)data_space;
+ external_object->string.length = internal_object->string.length;
+ *buffer_space_used = ACPI_ROUND_UP_TO_NATIVE_WORD((acpi_size)
+ internal_object->
+ string.
+ length + 1);
+
+ ACPI_MEMCPY((void *)data_space,
+ (void *)internal_object->string.pointer,
+ (acpi_size) internal_object->string.length + 1);
+ break;
+
+ case ACPI_TYPE_BUFFER:
+
+ external_object->buffer.pointer = data_space;
+ external_object->buffer.length = internal_object->buffer.length;
+ *buffer_space_used =
+ ACPI_ROUND_UP_TO_NATIVE_WORD(internal_object->string.
+ length);
+
+ ACPI_MEMCPY((void *)data_space,
+ (void *)internal_object->buffer.pointer,
+ internal_object->buffer.length);
+ break;
+
+ case ACPI_TYPE_INTEGER:
+
+ external_object->integer.value = internal_object->integer.value;
+ break;
+
+ case ACPI_TYPE_LOCAL_REFERENCE:
+
+ /* This is an object reference. */
+
+ switch (internal_object->reference.class) {
+ case ACPI_REFCLASS_NAME:
+
+ /*
+ * For namepath, return the object handle ("reference")
+ * We are referring to the namespace node
+ */
+ external_object->reference.handle =
+ internal_object->reference.node;
+ external_object->reference.actual_type =
+ acpi_ns_get_type(internal_object->reference.node);
+ break;
+
+ default:
+
+ /* All other reference types are unsupported */
+
+ return_ACPI_STATUS(AE_TYPE);
+ }
+ break;
+
+ case ACPI_TYPE_PROCESSOR:
+
+ external_object->processor.proc_id =
+ internal_object->processor.proc_id;
+ external_object->processor.pblk_address =
+ internal_object->processor.address;
+ external_object->processor.pblk_length =
+ internal_object->processor.length;
+ break;
+
+ case ACPI_TYPE_POWER:
+
+ external_object->power_resource.system_level =
+ internal_object->power_resource.system_level;
+
+ external_object->power_resource.resource_order =
+ internal_object->power_resource.resource_order;
+ break;
+
+ default:
+ /*
+ * There is no corresponding external object type
+ */
+ ACPI_ERROR((AE_INFO,
+ "Unsupported object type, cannot convert to external object: %s",
+ acpi_ut_get_type_name(ACPI_GET_OBJECT_TYPE
+ (internal_object))));
+
+ return_ACPI_STATUS(AE_SUPPORT);
+ }
+
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_copy_ielement_to_eelement
+ *
+ * PARAMETERS: acpi_pkg_callback
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Copy one package element to another package element
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ut_copy_ielement_to_eelement(u8 object_type,
+ union acpi_operand_object *source_object,
+ union acpi_generic_state *state,
+ void *context)
+{
+ acpi_status status = AE_OK;
+ struct acpi_pkg_info *info = (struct acpi_pkg_info *)context;
+ acpi_size object_space;
+ u32 this_index;
+ union acpi_object *target_object;
+
+ ACPI_FUNCTION_ENTRY();
+
+ this_index = state->pkg.index;
+ target_object = (union acpi_object *)
+ &((union acpi_object *)(state->pkg.dest_object))->package.
+ elements[this_index];
+
+ switch (object_type) {
+ case ACPI_COPY_TYPE_SIMPLE:
+
+ /*
+ * This is a simple or null object
+ */
+ status = acpi_ut_copy_isimple_to_esimple(source_object,
+ target_object,
+ info->free_space,
+ &object_space);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+ break;
+
+ case ACPI_COPY_TYPE_PACKAGE:
+
+ /*
+ * Build the package object
+ */
+ target_object->type = ACPI_TYPE_PACKAGE;
+ target_object->package.count = source_object->package.count;
+ target_object->package.elements =
+ ACPI_CAST_PTR(union acpi_object, info->free_space);
+
+ /*
+ * Pass the new package object back to the package walk routine
+ */
+ state->pkg.this_target_obj = target_object;
+
+ /*
+ * Save space for the array of objects (Package elements)
+ * update the buffer length counter
+ */
+ object_space = ACPI_ROUND_UP_TO_NATIVE_WORD((acpi_size)
+ target_object->
+ package.count *
+ sizeof(union
+ acpi_object));
+ break;
+
+ default:
+ return (AE_BAD_PARAMETER);
+ }
+
+ info->free_space += object_space;
+ info->length += object_space;
+ return (status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_copy_ipackage_to_epackage
+ *
+ * PARAMETERS: internal_object - Pointer to the object we are returning
+ * Buffer - Where the object is returned
+ * space_used - Where the object length is returned
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: This function is called to place a package object in a user
+ * buffer. A package object by definition contains other objects.
+ *
+ * The buffer is assumed to have sufficient space for the object.
+ * The caller must have verified the buffer length needed using the
+ * acpi_ut_get_object_size function before calling this function.
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ut_copy_ipackage_to_epackage(union acpi_operand_object *internal_object,
+ u8 * buffer, acpi_size * space_used)
+{
+ union acpi_object *external_object;
+ acpi_status status;
+ struct acpi_pkg_info info;
+
+ ACPI_FUNCTION_TRACE(ut_copy_ipackage_to_epackage);
+
+ /*
+ * First package at head of the buffer
+ */
+ external_object = ACPI_CAST_PTR(union acpi_object, buffer);
+
+ /*
+ * Free space begins right after the first package
+ */
+ info.length = ACPI_ROUND_UP_TO_NATIVE_WORD(sizeof(union acpi_object));
+ info.free_space =
+ buffer + ACPI_ROUND_UP_TO_NATIVE_WORD(sizeof(union acpi_object));
+ info.object_space = 0;
+ info.num_packages = 1;
+
+ external_object->type = ACPI_GET_OBJECT_TYPE(internal_object);
+ external_object->package.count = internal_object->package.count;
+ external_object->package.elements = ACPI_CAST_PTR(union acpi_object,
+ info.free_space);
+
+ /*
+ * Leave room for an array of ACPI_OBJECTS in the buffer
+ * and move the free space past it
+ */
+ info.length += (acpi_size) external_object->package.count *
+ ACPI_ROUND_UP_TO_NATIVE_WORD(sizeof(union acpi_object));
+ info.free_space += external_object->package.count *
+ ACPI_ROUND_UP_TO_NATIVE_WORD(sizeof(union acpi_object));
+
+ status = acpi_ut_walk_package_tree(internal_object, external_object,
+ acpi_ut_copy_ielement_to_eelement,
+ &info);
+
+ *space_used = info.length;
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_copy_iobject_to_eobject
+ *
+ * PARAMETERS: internal_object - The internal object to be converted
+ * buffer_ptr - Where the object is returned
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: This function is called to build an API object to be returned to
+ * the caller.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ut_copy_iobject_to_eobject(union acpi_operand_object *internal_object,
+ struct acpi_buffer *ret_buffer)
+{
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(ut_copy_iobject_to_eobject);
+
+ if (ACPI_GET_OBJECT_TYPE(internal_object) == ACPI_TYPE_PACKAGE) {
+ /*
+ * Package object: Copy all subobjects (including
+ * nested packages)
+ */
+ status = acpi_ut_copy_ipackage_to_epackage(internal_object,
+ ret_buffer->pointer,
+ &ret_buffer->length);
+ } else {
+ /*
+ * Build a simple object (no nested objects)
+ */
+ status = acpi_ut_copy_isimple_to_esimple(internal_object,
+ ACPI_CAST_PTR(union
+ acpi_object,
+ ret_buffer->
+ pointer),
+ ACPI_ADD_PTR(u8,
+ ret_buffer->
+ pointer,
+ ACPI_ROUND_UP_TO_NATIVE_WORD
+ (sizeof
+ (union
+ acpi_object))),
+ &ret_buffer->length);
+ /*
+ * build simple does not include the object size in the length
+ * so we add it in here
+ */
+ ret_buffer->length += sizeof(union acpi_object);
+ }
+
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_copy_esimple_to_isimple
+ *
+ * PARAMETERS: external_object - The external object to be converted
+ * ret_internal_object - Where the internal object is returned
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: This function copies an external object to an internal one.
+ * NOTE: Pointers can be copied, we don't need to copy data.
+ * (The pointers have to be valid in our address space no matter
+ * what we do with them!)
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ut_copy_esimple_to_isimple(union acpi_object *external_object,
+ union acpi_operand_object **ret_internal_object)
+{
+ union acpi_operand_object *internal_object;
+
+ ACPI_FUNCTION_TRACE(ut_copy_esimple_to_isimple);
+
+ /*
+ * Simple types supported are: String, Buffer, Integer
+ */
+ switch (external_object->type) {
+ case ACPI_TYPE_STRING:
+ case ACPI_TYPE_BUFFER:
+ case ACPI_TYPE_INTEGER:
+ case ACPI_TYPE_LOCAL_REFERENCE:
+
+ internal_object = acpi_ut_create_internal_object((u8)
+ external_object->
+ type);
+ if (!internal_object) {
+ return_ACPI_STATUS(AE_NO_MEMORY);
+ }
+ break;
+
+ case ACPI_TYPE_ANY: /* This is the case for a NULL object */
+
+ *ret_internal_object = NULL;
+ return_ACPI_STATUS(AE_OK);
+
+ default:
+ /* All other types are not supported */
+
+ ACPI_ERROR((AE_INFO,
+ "Unsupported object type, cannot convert to internal object: %s",
+ acpi_ut_get_type_name(external_object->type)));
+
+ return_ACPI_STATUS(AE_SUPPORT);
+ }
+
+ /* Must COPY string and buffer contents */
+
+ switch (external_object->type) {
+ case ACPI_TYPE_STRING:
+
+ internal_object->string.pointer =
+ ACPI_ALLOCATE_ZEROED((acpi_size) external_object->string.
+ length + 1);
+ if (!internal_object->string.pointer) {
+ goto error_exit;
+ }
+
+ ACPI_MEMCPY(internal_object->string.pointer,
+ external_object->string.pointer,
+ external_object->string.length);
+
+ internal_object->string.length = external_object->string.length;
+ break;
+
+ case ACPI_TYPE_BUFFER:
+
+ internal_object->buffer.pointer =
+ ACPI_ALLOCATE_ZEROED(external_object->buffer.length);
+ if (!internal_object->buffer.pointer) {
+ goto error_exit;
+ }
+
+ ACPI_MEMCPY(internal_object->buffer.pointer,
+ external_object->buffer.pointer,
+ external_object->buffer.length);
+
+ internal_object->buffer.length = external_object->buffer.length;
+
+ /* Mark buffer data valid */
+
+ internal_object->buffer.flags |= AOPOBJ_DATA_VALID;
+ break;
+
+ case ACPI_TYPE_INTEGER:
+
+ internal_object->integer.value = external_object->integer.value;
+ break;
+
+ case ACPI_TYPE_LOCAL_REFERENCE:
+
+ /* TBD: should validate incoming handle */
+
+ internal_object->reference.class = ACPI_REFCLASS_NAME;
+ internal_object->reference.node =
+ external_object->reference.handle;
+ break;
+
+ default:
+ /* Other types can't get here */
+ break;
+ }
+
+ *ret_internal_object = internal_object;
+ return_ACPI_STATUS(AE_OK);
+
+ error_exit:
+ acpi_ut_remove_reference(internal_object);
+ return_ACPI_STATUS(AE_NO_MEMORY);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_copy_epackage_to_ipackage
+ *
+ * PARAMETERS: external_object - The external object to be converted
+ * internal_object - Where the internal object is returned
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Copy an external package object to an internal package.
+ * Handles nested packages.
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ut_copy_epackage_to_ipackage(union acpi_object *external_object,
+ union acpi_operand_object **internal_object)
+{
+ acpi_status status = AE_OK;
+ union acpi_operand_object *package_object;
+ union acpi_operand_object **package_elements;
+ u32 i;
+
+ ACPI_FUNCTION_TRACE(ut_copy_epackage_to_ipackage);
+
+ /* Create the package object */
+
+ package_object =
+ acpi_ut_create_package_object(external_object->package.count);
+ if (!package_object) {
+ return_ACPI_STATUS(AE_NO_MEMORY);
+ }
+
+ package_elements = package_object->package.elements;
+
+ /*
+ * Recursive implementation. Probably ok, since nested external packages
+ * as parameters should be very rare.
+ */
+ for (i = 0; i < external_object->package.count; i++) {
+ status =
+ acpi_ut_copy_eobject_to_iobject(&external_object->package.
+ elements[i],
+ &package_elements[i]);
+ if (ACPI_FAILURE(status)) {
+
+ /* Truncate package and delete it */
+
+ package_object->package.count = i;
+ package_elements[i] = NULL;
+ acpi_ut_remove_reference(package_object);
+ return_ACPI_STATUS(status);
+ }
+ }
+
+ /* Mark package data valid */
+
+ package_object->package.flags |= AOPOBJ_DATA_VALID;
+
+ *internal_object = package_object;
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_copy_eobject_to_iobject
+ *
+ * PARAMETERS: external_object - The external object to be converted
+ * internal_object - Where the internal object is returned
+ *
+ * RETURN: Status - the status of the call
+ *
+ * DESCRIPTION: Converts an external object to an internal object.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ut_copy_eobject_to_iobject(union acpi_object *external_object,
+ union acpi_operand_object **internal_object)
+{
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(ut_copy_eobject_to_iobject);
+
+ if (external_object->type == ACPI_TYPE_PACKAGE) {
+ status =
+ acpi_ut_copy_epackage_to_ipackage(external_object,
+ internal_object);
+ } else {
+ /*
+ * Build a simple object (no nested objects)
+ */
+ status =
+ acpi_ut_copy_esimple_to_isimple(external_object,
+ internal_object);
+ }
+
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_copy_simple_object
+ *
+ * PARAMETERS: source_desc - The internal object to be copied
+ * dest_desc - New target object
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Simple copy of one internal object to another. Reference count
+ * of the destination object is preserved.
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ut_copy_simple_object(union acpi_operand_object *source_desc,
+ union acpi_operand_object *dest_desc)
+{
+ u16 reference_count;
+ union acpi_operand_object *next_object;
+
+ /* Save fields from destination that we don't want to overwrite */
+
+ reference_count = dest_desc->common.reference_count;
+ next_object = dest_desc->common.next_object;
+
+ /* Copy the entire source object over the destination object */
+
+ ACPI_MEMCPY((char *)dest_desc, (char *)source_desc,
+ sizeof(union acpi_operand_object));
+
+ /* Restore the saved fields */
+
+ dest_desc->common.reference_count = reference_count;
+ dest_desc->common.next_object = next_object;
+
+ /* New object is not static, regardless of source */
+
+ dest_desc->common.flags &= ~AOPOBJ_STATIC_POINTER;
+
+ /* Handle the objects with extra data */
+
+ switch (ACPI_GET_OBJECT_TYPE(dest_desc)) {
+ case ACPI_TYPE_BUFFER:
+ /*
+ * Allocate and copy the actual buffer if and only if:
+ * 1) There is a valid buffer pointer
+ * 2) The buffer has a length > 0
+ */
+ if ((source_desc->buffer.pointer) &&
+ (source_desc->buffer.length)) {
+ dest_desc->buffer.pointer =
+ ACPI_ALLOCATE(source_desc->buffer.length);
+ if (!dest_desc->buffer.pointer) {
+ return (AE_NO_MEMORY);
+ }
+
+ /* Copy the actual buffer data */
+
+ ACPI_MEMCPY(dest_desc->buffer.pointer,
+ source_desc->buffer.pointer,
+ source_desc->buffer.length);
+ }
+ break;
+
+ case ACPI_TYPE_STRING:
+ /*
+ * Allocate and copy the actual string if and only if:
+ * 1) There is a valid string pointer
+ * (Pointer to a NULL string is allowed)
+ */
+ if (source_desc->string.pointer) {
+ dest_desc->string.pointer =
+ ACPI_ALLOCATE((acpi_size) source_desc->string.
+ length + 1);
+ if (!dest_desc->string.pointer) {
+ return (AE_NO_MEMORY);
+ }
+
+ /* Copy the actual string data */
+
+ ACPI_MEMCPY(dest_desc->string.pointer,
+ source_desc->string.pointer,
+ (acpi_size) source_desc->string.length + 1);
+ }
+ break;
+
+ case ACPI_TYPE_LOCAL_REFERENCE:
+ /*
+ * We copied the reference object, so we now must add a reference
+ * to the object pointed to by the reference
+ *
+ * DDBHandle reference (from Load/load_table) is a special reference,
+ * it does not have a Reference.Object, so does not need to
+ * increase the reference count
+ */
+ if (source_desc->reference.class == ACPI_REFCLASS_TABLE) {
+ break;
+ }
+
+ acpi_ut_add_reference(source_desc->reference.object);
+ break;
+
+ case ACPI_TYPE_REGION:
+ /*
+ * We copied the Region Handler, so we now must add a reference
+ */
+ if (dest_desc->region.handler) {
+ acpi_ut_add_reference(dest_desc->region.handler);
+ }
+ break;
+
+ default:
+ /* Nothing to do for other simple objects */
+ break;
+ }
+
+ return (AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_copy_ielement_to_ielement
+ *
+ * PARAMETERS: acpi_pkg_callback
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Copy one package element to another package element
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ut_copy_ielement_to_ielement(u8 object_type,
+ union acpi_operand_object *source_object,
+ union acpi_generic_state *state,
+ void *context)
+{
+ acpi_status status = AE_OK;
+ u32 this_index;
+ union acpi_operand_object **this_target_ptr;
+ union acpi_operand_object *target_object;
+
+ ACPI_FUNCTION_ENTRY();
+
+ this_index = state->pkg.index;
+ this_target_ptr = (union acpi_operand_object **)
+ &state->pkg.dest_object->package.elements[this_index];
+
+ switch (object_type) {
+ case ACPI_COPY_TYPE_SIMPLE:
+
+ /* A null source object indicates a (legal) null package element */
+
+ if (source_object) {
+ /*
+ * This is a simple object, just copy it
+ */
+ target_object =
+ acpi_ut_create_internal_object(ACPI_GET_OBJECT_TYPE
+ (source_object));
+ if (!target_object) {
+ return (AE_NO_MEMORY);
+ }
+
+ status =
+ acpi_ut_copy_simple_object(source_object,
+ target_object);
+ if (ACPI_FAILURE(status)) {
+ goto error_exit;
+ }
+
+ *this_target_ptr = target_object;
+ } else {
+ /* Pass through a null element */
+
+ *this_target_ptr = NULL;
+ }
+ break;
+
+ case ACPI_COPY_TYPE_PACKAGE:
+
+ /*
+ * This object is a package - go down another nesting level
+ * Create and build the package object
+ */
+ target_object =
+ acpi_ut_create_package_object(source_object->package.count);
+ if (!target_object) {
+ return (AE_NO_MEMORY);
+ }
+
+ target_object->common.flags = source_object->common.flags;
+
+ /* Pass the new package object back to the package walk routine */
+
+ state->pkg.this_target_obj = target_object;
+
+ /* Store the object pointer in the parent package object */
+
+ *this_target_ptr = target_object;
+ break;
+
+ default:
+ return (AE_BAD_PARAMETER);
+ }
+
+ return (status);
+
+ error_exit:
+ acpi_ut_remove_reference(target_object);
+ return (status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_copy_ipackage_to_ipackage
+ *
+ * PARAMETERS: *source_obj - Pointer to the source package object
+ * *dest_obj - Where the internal object is returned
+ *
+ * RETURN: Status - the status of the call
+ *
+ * DESCRIPTION: This function is called to copy an internal package object
+ * into another internal package object.
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ut_copy_ipackage_to_ipackage(union acpi_operand_object *source_obj,
+ union acpi_operand_object *dest_obj,
+ struct acpi_walk_state *walk_state)
+{
+ acpi_status status = AE_OK;
+
+ ACPI_FUNCTION_TRACE(ut_copy_ipackage_to_ipackage);
+
+ dest_obj->common.type = ACPI_GET_OBJECT_TYPE(source_obj);
+ dest_obj->common.flags = source_obj->common.flags;
+ dest_obj->package.count = source_obj->package.count;
+
+ /*
+ * Create the object array and walk the source package tree
+ */
+ dest_obj->package.elements = ACPI_ALLOCATE_ZEROED(((acpi_size)
+ source_obj->package.
+ count +
+ 1) * sizeof(void *));
+ if (!dest_obj->package.elements) {
+ ACPI_ERROR((AE_INFO, "Package allocation failure"));
+ return_ACPI_STATUS(AE_NO_MEMORY);
+ }
+
+ /*
+ * Copy the package element-by-element by walking the package "tree".
+ * This handles nested packages of arbitrary depth.
+ */
+ status = acpi_ut_walk_package_tree(source_obj, dest_obj,
+ acpi_ut_copy_ielement_to_ielement,
+ walk_state);
+ if (ACPI_FAILURE(status)) {
+
+ /* On failure, delete the destination package object */
+
+ acpi_ut_remove_reference(dest_obj);
+ }
+
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_copy_iobject_to_iobject
+ *
+ * PARAMETERS: walk_state - Current walk state
+ * source_desc - The internal object to be copied
+ * dest_desc - Where the copied object is returned
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Copy an internal object to a new internal object
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ut_copy_iobject_to_iobject(union acpi_operand_object *source_desc,
+ union acpi_operand_object **dest_desc,
+ struct acpi_walk_state *walk_state)
+{
+ acpi_status status = AE_OK;
+
+ ACPI_FUNCTION_TRACE(ut_copy_iobject_to_iobject);
+
+ /* Create the top level object */
+
+ *dest_desc =
+ acpi_ut_create_internal_object(ACPI_GET_OBJECT_TYPE(source_desc));
+ if (!*dest_desc) {
+ return_ACPI_STATUS(AE_NO_MEMORY);
+ }
+
+ /* Copy the object and possible subobjects */
+
+ if (ACPI_GET_OBJECT_TYPE(source_desc) == ACPI_TYPE_PACKAGE) {
+ status =
+ acpi_ut_copy_ipackage_to_ipackage(source_desc, *dest_desc,
+ walk_state);
+ } else {
+ status = acpi_ut_copy_simple_object(source_desc, *dest_desc);
+ }
+
+ return_ACPI_STATUS(status);
+}
diff --git a/drivers/acpi/utilities/utdebug.c b/drivers/acpi/utilities/utdebug.c
new file mode 100644
index 0000000..fd66ecb
--- /dev/null
+++ b/drivers/acpi/utilities/utdebug.c
@@ -0,0 +1,654 @@
+/******************************************************************************
+ *
+ * Module Name: utdebug - Debug print routines
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+
+#define _COMPONENT ACPI_UTILITIES
+ACPI_MODULE_NAME("utdebug")
+#ifdef ACPI_DEBUG_OUTPUT
+static acpi_thread_id acpi_gbl_prev_thread_id;
+static char *acpi_gbl_fn_entry_str = "----Entry";
+static char *acpi_gbl_fn_exit_str = "----Exit-";
+
+/* Local prototypes */
+
+static const char *acpi_ut_trim_function_name(const char *function_name);
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_init_stack_ptr_trace
+ *
+ * PARAMETERS: None
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Save the current CPU stack pointer at subsystem startup
+ *
+ ******************************************************************************/
+
+void acpi_ut_init_stack_ptr_trace(void)
+{
+ acpi_size current_sp;
+
+ acpi_gbl_entry_stack_pointer = &current_sp;
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_track_stack_ptr
+ *
+ * PARAMETERS: None
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Save the current CPU stack pointer
+ *
+ ******************************************************************************/
+
+void acpi_ut_track_stack_ptr(void)
+{
+ acpi_size current_sp;
+
+ if (&current_sp < acpi_gbl_lowest_stack_pointer) {
+ acpi_gbl_lowest_stack_pointer = &current_sp;
+ }
+
+ if (acpi_gbl_nesting_level > acpi_gbl_deepest_nesting) {
+ acpi_gbl_deepest_nesting = acpi_gbl_nesting_level;
+ }
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_trim_function_name
+ *
+ * PARAMETERS: function_name - Ascii string containing a procedure name
+ *
+ * RETURN: Updated pointer to the function name
+ *
+ * DESCRIPTION: Remove the "Acpi" prefix from the function name, if present.
+ * This allows compiler macros such as __func__ to be used
+ * with no change to the debug output.
+ *
+ ******************************************************************************/
+
+static const char *acpi_ut_trim_function_name(const char *function_name)
+{
+
+ /* All Function names are longer than 4 chars, check is safe */
+
+ if (*(ACPI_CAST_PTR(u32, function_name)) == ACPI_PREFIX_MIXED) {
+
+ /* This is the case where the original source has not been modified */
+
+ return (function_name + 4);
+ }
+
+ if (*(ACPI_CAST_PTR(u32, function_name)) == ACPI_PREFIX_LOWER) {
+
+ /* This is the case where the source has been 'linuxized' */
+
+ return (function_name + 5);
+ }
+
+ return (function_name);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_debug_print
+ *
+ * PARAMETERS: requested_debug_level - Requested debug print level
+ * line_number - Caller's line number (for error output)
+ * function_name - Caller's procedure name
+ * module_name - Caller's module name
+ * component_id - Caller's component ID
+ * Format - Printf format field
+ * ... - Optional printf arguments
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Print error message with prefix consisting of the module name,
+ * line number, and component ID.
+ *
+ ******************************************************************************/
+
+void ACPI_INTERNAL_VAR_XFACE
+acpi_ut_debug_print(u32 requested_debug_level,
+ u32 line_number,
+ const char *function_name,
+ const char *module_name,
+ u32 component_id, const char *format, ...)
+{
+ acpi_thread_id thread_id;
+ va_list args;
+
+ /*
+ * Stay silent if the debug level or component ID is disabled
+ */
+ if (!(requested_debug_level & acpi_dbg_level) ||
+ !(component_id & acpi_dbg_layer)) {
+ return;
+ }
+
+ /*
+ * Thread tracking and context switch notification
+ */
+ thread_id = acpi_os_get_thread_id();
+ if (thread_id != acpi_gbl_prev_thread_id) {
+ if (ACPI_LV_THREADS & acpi_dbg_level) {
+ acpi_os_printf
+ ("\n**** Context Switch from TID %lX to TID %lX ****\n\n",
+ (unsigned long)acpi_gbl_prev_thread_id,
+ (unsigned long)thread_id);
+ }
+
+ acpi_gbl_prev_thread_id = thread_id;
+ }
+
+ /*
+ * Display the module name, current line number, thread ID (if requested),
+ * current procedure nesting level, and the current procedure name
+ */
+ acpi_os_printf("%8s-%04ld ", module_name, line_number);
+
+ if (ACPI_LV_THREADS & acpi_dbg_level) {
+ acpi_os_printf("[%04lX] ", (unsigned long)thread_id);
+ }
+
+ acpi_os_printf("[%02ld] %-22.22s: ",
+ acpi_gbl_nesting_level,
+ acpi_ut_trim_function_name(function_name));
+
+ va_start(args, format);
+ acpi_os_vprintf(format, args);
+ va_end(args);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_ut_debug_print)
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_debug_print_raw
+ *
+ * PARAMETERS: requested_debug_level - Requested debug print level
+ * line_number - Caller's line number
+ * function_name - Caller's procedure name
+ * module_name - Caller's module name
+ * component_id - Caller's component ID
+ * Format - Printf format field
+ * ... - Optional printf arguments
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Print message with no headers. Has same interface as
+ * debug_print so that the same macros can be used.
+ *
+ ******************************************************************************/
+void ACPI_INTERNAL_VAR_XFACE
+acpi_ut_debug_print_raw(u32 requested_debug_level,
+ u32 line_number,
+ const char *function_name,
+ const char *module_name,
+ u32 component_id, const char *format, ...)
+{
+ va_list args;
+
+ if (!(requested_debug_level & acpi_dbg_level) ||
+ !(component_id & acpi_dbg_layer)) {
+ return;
+ }
+
+ va_start(args, format);
+ acpi_os_vprintf(format, args);
+ va_end(args);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_ut_debug_print_raw)
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_trace
+ *
+ * PARAMETERS: line_number - Caller's line number
+ * function_name - Caller's procedure name
+ * module_name - Caller's module name
+ * component_id - Caller's component ID
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Function entry trace. Prints only if TRACE_FUNCTIONS bit is
+ * set in debug_level
+ *
+ ******************************************************************************/
+void
+acpi_ut_trace(u32 line_number,
+ const char *function_name,
+ const char *module_name, u32 component_id)
+{
+
+ acpi_gbl_nesting_level++;
+ acpi_ut_track_stack_ptr();
+
+ acpi_ut_debug_print(ACPI_LV_FUNCTIONS,
+ line_number, function_name, module_name,
+ component_id, "%s\n", acpi_gbl_fn_entry_str);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_ut_trace)
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_trace_ptr
+ *
+ * PARAMETERS: line_number - Caller's line number
+ * function_name - Caller's procedure name
+ * module_name - Caller's module name
+ * component_id - Caller's component ID
+ * Pointer - Pointer to display
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Function entry trace. Prints only if TRACE_FUNCTIONS bit is
+ * set in debug_level
+ *
+ ******************************************************************************/
+void
+acpi_ut_trace_ptr(u32 line_number,
+ const char *function_name,
+ const char *module_name, u32 component_id, void *pointer)
+{
+ acpi_gbl_nesting_level++;
+ acpi_ut_track_stack_ptr();
+
+ acpi_ut_debug_print(ACPI_LV_FUNCTIONS,
+ line_number, function_name, module_name,
+ component_id, "%s %p\n", acpi_gbl_fn_entry_str,
+ pointer);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_trace_str
+ *
+ * PARAMETERS: line_number - Caller's line number
+ * function_name - Caller's procedure name
+ * module_name - Caller's module name
+ * component_id - Caller's component ID
+ * String - Additional string to display
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Function entry trace. Prints only if TRACE_FUNCTIONS bit is
+ * set in debug_level
+ *
+ ******************************************************************************/
+
+void
+acpi_ut_trace_str(u32 line_number,
+ const char *function_name,
+ const char *module_name, u32 component_id, char *string)
+{
+
+ acpi_gbl_nesting_level++;
+ acpi_ut_track_stack_ptr();
+
+ acpi_ut_debug_print(ACPI_LV_FUNCTIONS,
+ line_number, function_name, module_name,
+ component_id, "%s %s\n", acpi_gbl_fn_entry_str,
+ string);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_trace_u32
+ *
+ * PARAMETERS: line_number - Caller's line number
+ * function_name - Caller's procedure name
+ * module_name - Caller's module name
+ * component_id - Caller's component ID
+ * Integer - Integer to display
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Function entry trace. Prints only if TRACE_FUNCTIONS bit is
+ * set in debug_level
+ *
+ ******************************************************************************/
+
+void
+acpi_ut_trace_u32(u32 line_number,
+ const char *function_name,
+ const char *module_name, u32 component_id, u32 integer)
+{
+
+ acpi_gbl_nesting_level++;
+ acpi_ut_track_stack_ptr();
+
+ acpi_ut_debug_print(ACPI_LV_FUNCTIONS,
+ line_number, function_name, module_name,
+ component_id, "%s %08X\n", acpi_gbl_fn_entry_str,
+ integer);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_exit
+ *
+ * PARAMETERS: line_number - Caller's line number
+ * function_name - Caller's procedure name
+ * module_name - Caller's module name
+ * component_id - Caller's component ID
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Function exit trace. Prints only if TRACE_FUNCTIONS bit is
+ * set in debug_level
+ *
+ ******************************************************************************/
+
+void
+acpi_ut_exit(u32 line_number,
+ const char *function_name,
+ const char *module_name, u32 component_id)
+{
+
+ acpi_ut_debug_print(ACPI_LV_FUNCTIONS,
+ line_number, function_name, module_name,
+ component_id, "%s\n", acpi_gbl_fn_exit_str);
+
+ acpi_gbl_nesting_level--;
+}
+
+ACPI_EXPORT_SYMBOL(acpi_ut_exit)
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_status_exit
+ *
+ * PARAMETERS: line_number - Caller's line number
+ * function_name - Caller's procedure name
+ * module_name - Caller's module name
+ * component_id - Caller's component ID
+ * Status - Exit status code
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Function exit trace. Prints only if TRACE_FUNCTIONS bit is
+ * set in debug_level. Prints exit status also.
+ *
+ ******************************************************************************/
+void
+acpi_ut_status_exit(u32 line_number,
+ const char *function_name,
+ const char *module_name,
+ u32 component_id, acpi_status status)
+{
+
+ if (ACPI_SUCCESS(status)) {
+ acpi_ut_debug_print(ACPI_LV_FUNCTIONS,
+ line_number, function_name, module_name,
+ component_id, "%s %s\n",
+ acpi_gbl_fn_exit_str,
+ acpi_format_exception(status));
+ } else {
+ acpi_ut_debug_print(ACPI_LV_FUNCTIONS,
+ line_number, function_name, module_name,
+ component_id, "%s ****Exception****: %s\n",
+ acpi_gbl_fn_exit_str,
+ acpi_format_exception(status));
+ }
+
+ acpi_gbl_nesting_level--;
+}
+
+ACPI_EXPORT_SYMBOL(acpi_ut_status_exit)
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_value_exit
+ *
+ * PARAMETERS: line_number - Caller's line number
+ * function_name - Caller's procedure name
+ * module_name - Caller's module name
+ * component_id - Caller's component ID
+ * Value - Value to be printed with exit msg
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Function exit trace. Prints only if TRACE_FUNCTIONS bit is
+ * set in debug_level. Prints exit value also.
+ *
+ ******************************************************************************/
+void
+acpi_ut_value_exit(u32 line_number,
+ const char *function_name,
+ const char *module_name,
+ u32 component_id, acpi_integer value)
+{
+
+ acpi_ut_debug_print(ACPI_LV_FUNCTIONS,
+ line_number, function_name, module_name,
+ component_id, "%s %8.8X%8.8X\n",
+ acpi_gbl_fn_exit_str, ACPI_FORMAT_UINT64(value));
+
+ acpi_gbl_nesting_level--;
+}
+
+ACPI_EXPORT_SYMBOL(acpi_ut_value_exit)
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_ptr_exit
+ *
+ * PARAMETERS: line_number - Caller's line number
+ * function_name - Caller's procedure name
+ * module_name - Caller's module name
+ * component_id - Caller's component ID
+ * Ptr - Pointer to display
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Function exit trace. Prints only if TRACE_FUNCTIONS bit is
+ * set in debug_level. Prints exit value also.
+ *
+ ******************************************************************************/
+void
+acpi_ut_ptr_exit(u32 line_number,
+ const char *function_name,
+ const char *module_name, u32 component_id, u8 *ptr)
+{
+
+ acpi_ut_debug_print(ACPI_LV_FUNCTIONS,
+ line_number, function_name, module_name,
+ component_id, "%s %p\n", acpi_gbl_fn_exit_str, ptr);
+
+ acpi_gbl_nesting_level--;
+}
+
+#endif
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_dump_buffer
+ *
+ * PARAMETERS: Buffer - Buffer to dump
+ * Count - Amount to dump, in bytes
+ * Display - BYTE, WORD, DWORD, or QWORD display
+ * component_iD - Caller's component ID
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Generic dump buffer in both hex and ascii.
+ *
+ ******************************************************************************/
+
+void acpi_ut_dump_buffer2(u8 * buffer, u32 count, u32 display)
+{
+ u32 i = 0;
+ u32 j;
+ u32 temp32;
+ u8 buf_char;
+
+ if (!buffer) {
+ acpi_os_printf("Null Buffer Pointer in DumpBuffer!\n");
+ return;
+ }
+
+ if ((count < 4) || (count & 0x01)) {
+ display = DB_BYTE_DISPLAY;
+ }
+
+ /* Nasty little dump buffer routine! */
+
+ while (i < count) {
+
+ /* Print current offset */
+
+ acpi_os_printf("%6.4X: ", i);
+
+ /* Print 16 hex chars */
+
+ for (j = 0; j < 16;) {
+ if (i + j >= count) {
+
+ /* Dump fill spaces */
+
+ acpi_os_printf("%*s", ((display * 2) + 1), " ");
+ j += display;
+ continue;
+ }
+
+ switch (display) {
+ case DB_BYTE_DISPLAY:
+ default: /* Default is BYTE display */
+
+ acpi_os_printf("%02X ",
+ buffer[(acpi_size) i + j]);
+ break;
+
+ case DB_WORD_DISPLAY:
+
+ ACPI_MOVE_16_TO_32(&temp32,
+ &buffer[(acpi_size) i + j]);
+ acpi_os_printf("%04X ", temp32);
+ break;
+
+ case DB_DWORD_DISPLAY:
+
+ ACPI_MOVE_32_TO_32(&temp32,
+ &buffer[(acpi_size) i + j]);
+ acpi_os_printf("%08X ", temp32);
+ break;
+
+ case DB_QWORD_DISPLAY:
+
+ ACPI_MOVE_32_TO_32(&temp32,
+ &buffer[(acpi_size) i + j]);
+ acpi_os_printf("%08X", temp32);
+
+ ACPI_MOVE_32_TO_32(&temp32,
+ &buffer[(acpi_size) i + j +
+ 4]);
+ acpi_os_printf("%08X ", temp32);
+ break;
+ }
+
+ j += display;
+ }
+
+ /*
+ * Print the ASCII equivalent characters but watch out for the bad
+ * unprintable ones (printable chars are 0x20 through 0x7E)
+ */
+ acpi_os_printf(" ");
+ for (j = 0; j < 16; j++) {
+ if (i + j >= count) {
+ acpi_os_printf("\n");
+ return;
+ }
+
+ buf_char = buffer[(acpi_size) i + j];
+ if (ACPI_IS_PRINT(buf_char)) {
+ acpi_os_printf("%c", buf_char);
+ } else {
+ acpi_os_printf(".");
+ }
+ }
+
+ /* Done with that line. */
+
+ acpi_os_printf("\n");
+ i += 16;
+ }
+
+ return;
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_dump_buffer
+ *
+ * PARAMETERS: Buffer - Buffer to dump
+ * Count - Amount to dump, in bytes
+ * Display - BYTE, WORD, DWORD, or QWORD display
+ * component_iD - Caller's component ID
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Generic dump buffer in both hex and ascii.
+ *
+ ******************************************************************************/
+
+void acpi_ut_dump_buffer(u8 * buffer, u32 count, u32 display, u32 component_id)
+{
+
+ /* Only dump the buffer if tracing is enabled */
+
+ if (!((ACPI_LV_TABLES & acpi_dbg_level) &&
+ (component_id & acpi_dbg_layer))) {
+ return;
+ }
+
+ acpi_ut_dump_buffer2(buffer, count, display);
+}
diff --git a/drivers/acpi/utilities/utdelete.c b/drivers/acpi/utilities/utdelete.c
new file mode 100644
index 0000000..d197c6b
--- /dev/null
+++ b/drivers/acpi/utilities/utdelete.c
@@ -0,0 +1,676 @@
+/*******************************************************************************
+ *
+ * Module Name: utdelete - object deletion and reference count utilities
+ *
+ ******************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include <acpi/acinterp.h>
+#include <acpi/acnamesp.h>
+#include <acpi/acevents.h>
+
+#define _COMPONENT ACPI_UTILITIES
+ACPI_MODULE_NAME("utdelete")
+
+/* Local prototypes */
+static void acpi_ut_delete_internal_obj(union acpi_operand_object *object);
+
+static void
+acpi_ut_update_ref_count(union acpi_operand_object *object, u32 action);
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_delete_internal_obj
+ *
+ * PARAMETERS: Object - Object to be deleted
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Low level object deletion, after reference counts have been
+ * updated (All reference counts, including sub-objects!)
+ *
+ ******************************************************************************/
+
+static void acpi_ut_delete_internal_obj(union acpi_operand_object *object)
+{
+ void *obj_pointer = NULL;
+ union acpi_operand_object *handler_desc;
+ union acpi_operand_object *second_desc;
+ union acpi_operand_object *next_desc;
+
+ ACPI_FUNCTION_TRACE_PTR(ut_delete_internal_obj, object);
+
+ if (!object) {
+ return_VOID;
+ }
+
+ /*
+ * Must delete or free any pointers within the object that are not
+ * actual ACPI objects (for example, a raw buffer pointer).
+ */
+ switch (ACPI_GET_OBJECT_TYPE(object)) {
+ case ACPI_TYPE_STRING:
+
+ ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS,
+ "**** String %p, ptr %p\n", object,
+ object->string.pointer));
+
+ /* Free the actual string buffer */
+
+ if (!(object->common.flags & AOPOBJ_STATIC_POINTER)) {
+
+ /* But only if it is NOT a pointer into an ACPI table */
+
+ obj_pointer = object->string.pointer;
+ }
+ break;
+
+ case ACPI_TYPE_BUFFER:
+
+ ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS,
+ "**** Buffer %p, ptr %p\n", object,
+ object->buffer.pointer));
+
+ /* Free the actual buffer */
+
+ if (!(object->common.flags & AOPOBJ_STATIC_POINTER)) {
+
+ /* But only if it is NOT a pointer into an ACPI table */
+
+ obj_pointer = object->buffer.pointer;
+ }
+ break;
+
+ case ACPI_TYPE_PACKAGE:
+
+ ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS,
+ " **** Package of count %X\n",
+ object->package.count));
+
+ /*
+ * Elements of the package are not handled here, they are deleted
+ * separately
+ */
+
+ /* Free the (variable length) element pointer array */
+
+ obj_pointer = object->package.elements;
+ break;
+
+ /*
+ * These objects have a possible list of notify handlers.
+ * Device object also may have a GPE block.
+ */
+ case ACPI_TYPE_DEVICE:
+
+ if (object->device.gpe_block) {
+ (void)acpi_ev_delete_gpe_block(object->device.
+ gpe_block);
+ }
+
+ /*lint -fallthrough */
+
+ case ACPI_TYPE_PROCESSOR:
+ case ACPI_TYPE_THERMAL:
+
+ /* Walk the notify handler list for this object */
+
+ handler_desc = object->common_notify.handler;
+ while (handler_desc) {
+ next_desc = handler_desc->address_space.next;
+ acpi_ut_remove_reference(handler_desc);
+ handler_desc = next_desc;
+ }
+ break;
+
+ case ACPI_TYPE_MUTEX:
+
+ ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS,
+ "***** Mutex %p, OS Mutex %p\n",
+ object, object->mutex.os_mutex));
+
+ if (object == acpi_gbl_global_lock_mutex) {
+
+ /* Global Lock has extra semaphore */
+
+ (void)
+ acpi_os_delete_semaphore
+ (acpi_gbl_global_lock_semaphore);
+ acpi_gbl_global_lock_semaphore = NULL;
+
+ acpi_os_delete_mutex(object->mutex.os_mutex);
+ acpi_gbl_global_lock_mutex = NULL;
+ } else {
+ acpi_ex_unlink_mutex(object);
+ acpi_os_delete_mutex(object->mutex.os_mutex);
+ }
+ break;
+
+ case ACPI_TYPE_EVENT:
+
+ ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS,
+ "***** Event %p, OS Semaphore %p\n",
+ object, object->event.os_semaphore));
+
+ (void)acpi_os_delete_semaphore(object->event.os_semaphore);
+ object->event.os_semaphore = NULL;
+ break;
+
+ case ACPI_TYPE_METHOD:
+
+ ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS,
+ "***** Method %p\n", object));
+
+ /* Delete the method mutex if it exists */
+
+ if (object->method.mutex) {
+ acpi_os_delete_mutex(object->method.mutex->mutex.
+ os_mutex);
+ acpi_ut_delete_object_desc(object->method.mutex);
+ object->method.mutex = NULL;
+ }
+ break;
+
+ case ACPI_TYPE_REGION:
+
+ ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS,
+ "***** Region %p\n", object));
+
+ second_desc = acpi_ns_get_secondary_object(object);
+ if (second_desc) {
+ /*
+ * Free the region_context if and only if the handler is one of the
+ * default handlers -- and therefore, we created the context object
+ * locally, it was not created by an external caller.
+ */
+ handler_desc = object->region.handler;
+ if (handler_desc) {
+ if (handler_desc->address_space.handler_flags &
+ ACPI_ADDR_HANDLER_DEFAULT_INSTALLED) {
+
+ /* Deactivate region and free region context */
+
+ if (handler_desc->address_space.setup) {
+ (void)handler_desc->
+ address_space.setup(object,
+ ACPI_REGION_DEACTIVATE,
+ handler_desc->
+ address_space.
+ context,
+ &second_desc->
+ extra.
+ region_context);
+ }
+ }
+
+ acpi_ut_remove_reference(handler_desc);
+ }
+
+ /* Now we can free the Extra object */
+
+ acpi_ut_delete_object_desc(second_desc);
+ }
+ break;
+
+ case ACPI_TYPE_BUFFER_FIELD:
+
+ ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS,
+ "***** Buffer Field %p\n", object));
+
+ second_desc = acpi_ns_get_secondary_object(object);
+ if (second_desc) {
+ acpi_ut_delete_object_desc(second_desc);
+ }
+ break;
+
+ case ACPI_TYPE_LOCAL_BANK_FIELD:
+
+ ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS,
+ "***** Bank Field %p\n", object));
+
+ second_desc = acpi_ns_get_secondary_object(object);
+ if (second_desc) {
+ acpi_ut_delete_object_desc(second_desc);
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ /* Free any allocated memory (pointer within the object) found above */
+
+ if (obj_pointer) {
+ ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS,
+ "Deleting Object Subptr %p\n", obj_pointer));
+ ACPI_FREE(obj_pointer);
+ }
+
+ /* Now the object can be safely deleted */
+
+ ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS, "Deleting Object %p [%s]\n",
+ object, acpi_ut_get_object_type_name(object)));
+
+ acpi_ut_delete_object_desc(object);
+ return_VOID;
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_delete_internal_object_list
+ *
+ * PARAMETERS: obj_list - Pointer to the list to be deleted
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: This function deletes an internal object list, including both
+ * simple objects and package objects
+ *
+ ******************************************************************************/
+
+void acpi_ut_delete_internal_object_list(union acpi_operand_object **obj_list)
+{
+ union acpi_operand_object **internal_obj;
+
+ ACPI_FUNCTION_TRACE(ut_delete_internal_object_list);
+
+ /* Walk the null-terminated internal list */
+
+ for (internal_obj = obj_list; *internal_obj; internal_obj++) {
+ acpi_ut_remove_reference(*internal_obj);
+ }
+
+ /* Free the combined parameter pointer list and object array */
+
+ ACPI_FREE(obj_list);
+ return_VOID;
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_update_ref_count
+ *
+ * PARAMETERS: Object - Object whose ref count is to be updated
+ * Action - What to do
+ *
+ * RETURN: New ref count
+ *
+ * DESCRIPTION: Modify the ref count and return it.
+ *
+ ******************************************************************************/
+
+static void
+acpi_ut_update_ref_count(union acpi_operand_object *object, u32 action)
+{
+ u16 count;
+ u16 new_count;
+
+ ACPI_FUNCTION_NAME(ut_update_ref_count);
+
+ if (!object) {
+ return;
+ }
+
+ count = object->common.reference_count;
+ new_count = count;
+
+ /*
+ * Perform the reference count action (increment, decrement, force delete)
+ */
+ switch (action) {
+ case REF_INCREMENT:
+
+ new_count++;
+ object->common.reference_count = new_count;
+
+ ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS,
+ "Obj %p Refs=%X, [Incremented]\n",
+ object, new_count));
+ break;
+
+ case REF_DECREMENT:
+
+ if (count < 1) {
+ ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS,
+ "Obj %p Refs=%X, can't decrement! (Set to 0)\n",
+ object, new_count));
+
+ new_count = 0;
+ } else {
+ new_count--;
+
+ ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS,
+ "Obj %p Refs=%X, [Decremented]\n",
+ object, new_count));
+ }
+
+ if (ACPI_GET_OBJECT_TYPE(object) == ACPI_TYPE_METHOD) {
+ ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS,
+ "Method Obj %p Refs=%X, [Decremented]\n",
+ object, new_count));
+ }
+
+ object->common.reference_count = new_count;
+ if (new_count == 0) {
+ acpi_ut_delete_internal_obj(object);
+ }
+ break;
+
+ case REF_FORCE_DELETE:
+
+ ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS,
+ "Obj %p Refs=%X, Force delete! (Set to 0)\n",
+ object, count));
+
+ new_count = 0;
+ object->common.reference_count = new_count;
+ acpi_ut_delete_internal_obj(object);
+ break;
+
+ default:
+
+ ACPI_ERROR((AE_INFO, "Unknown action (%X)", action));
+ break;
+ }
+
+ /*
+ * Sanity check the reference count, for debug purposes only.
+ * (A deleted object will have a huge reference count)
+ */
+ if (count > ACPI_MAX_REFERENCE_COUNT) {
+ ACPI_WARNING((AE_INFO,
+ "Large Reference Count (%X) in object %p", count,
+ object));
+ }
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_update_object_reference
+ *
+ * PARAMETERS: Object - Increment ref count for this object
+ * and all sub-objects
+ * Action - Either REF_INCREMENT or REF_DECREMENT or
+ * REF_FORCE_DELETE
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Increment the object reference count
+ *
+ * Object references are incremented when:
+ * 1) An object is attached to a Node (namespace object)
+ * 2) An object is copied (all subobjects must be incremented)
+ *
+ * Object references are decremented when:
+ * 1) An object is detached from an Node
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ut_update_object_reference(union acpi_operand_object *object, u16 action)
+{
+ acpi_status status = AE_OK;
+ union acpi_generic_state *state_list = NULL;
+ union acpi_operand_object *next_object = NULL;
+ union acpi_generic_state *state;
+ u32 i;
+
+ ACPI_FUNCTION_TRACE_PTR(ut_update_object_reference, object);
+
+ while (object) {
+
+ /* Make sure that this isn't a namespace handle */
+
+ if (ACPI_GET_DESCRIPTOR_TYPE(object) == ACPI_DESC_TYPE_NAMED) {
+ ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS,
+ "Object %p is NS handle\n", object));
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ /*
+ * All sub-objects must have their reference count incremented also.
+ * Different object types have different subobjects.
+ */
+ switch (ACPI_GET_OBJECT_TYPE(object)) {
+ case ACPI_TYPE_DEVICE:
+ case ACPI_TYPE_PROCESSOR:
+ case ACPI_TYPE_POWER:
+ case ACPI_TYPE_THERMAL:
+
+ /* Update the notify objects for these types (if present) */
+
+ acpi_ut_update_ref_count(object->common_notify.
+ system_notify, action);
+ acpi_ut_update_ref_count(object->common_notify.
+ device_notify, action);
+ break;
+
+ case ACPI_TYPE_PACKAGE:
+ /*
+ * We must update all the sub-objects of the package,
+ * each of whom may have their own sub-objects.
+ */
+ for (i = 0; i < object->package.count; i++) {
+ /*
+ * Push each element onto the stack for later processing.
+ * Note: There can be null elements within the package,
+ * these are simply ignored
+ */
+ status =
+ acpi_ut_create_update_state_and_push
+ (object->package.elements[i], action,
+ &state_list);
+ if (ACPI_FAILURE(status)) {
+ goto error_exit;
+ }
+ }
+ break;
+
+ case ACPI_TYPE_BUFFER_FIELD:
+
+ next_object = object->buffer_field.buffer_obj;
+ break;
+
+ case ACPI_TYPE_LOCAL_REGION_FIELD:
+
+ next_object = object->field.region_obj;
+ break;
+
+ case ACPI_TYPE_LOCAL_BANK_FIELD:
+
+ next_object = object->bank_field.bank_obj;
+ status =
+ acpi_ut_create_update_state_and_push(object->
+ bank_field.
+ region_obj,
+ action,
+ &state_list);
+ if (ACPI_FAILURE(status)) {
+ goto error_exit;
+ }
+ break;
+
+ case ACPI_TYPE_LOCAL_INDEX_FIELD:
+
+ next_object = object->index_field.index_obj;
+ status =
+ acpi_ut_create_update_state_and_push(object->
+ index_field.
+ data_obj,
+ action,
+ &state_list);
+ if (ACPI_FAILURE(status)) {
+ goto error_exit;
+ }
+ break;
+
+ case ACPI_TYPE_LOCAL_REFERENCE:
+ /*
+ * The target of an Index (a package, string, or buffer) or a named
+ * reference must track changes to the ref count of the index or
+ * target object.
+ */
+ if ((object->reference.class == ACPI_REFCLASS_INDEX) ||
+ (object->reference.class == ACPI_REFCLASS_NAME)) {
+ next_object = object->reference.object;
+ }
+ break;
+
+ case ACPI_TYPE_REGION:
+ default:
+ break; /* No subobjects for all other types */
+ }
+
+ /*
+ * Now we can update the count in the main object. This can only
+ * happen after we update the sub-objects in case this causes the
+ * main object to be deleted.
+ */
+ acpi_ut_update_ref_count(object, action);
+ object = NULL;
+
+ /* Move on to the next object to be updated */
+
+ if (next_object) {
+ object = next_object;
+ next_object = NULL;
+ } else if (state_list) {
+ state = acpi_ut_pop_generic_state(&state_list);
+ object = state->update.object;
+ acpi_ut_delete_generic_state(state);
+ }
+ }
+
+ return_ACPI_STATUS(AE_OK);
+
+ error_exit:
+
+ ACPI_EXCEPTION((AE_INFO, status,
+ "Could not update object reference count"));
+
+ /* Free any stacked Update State objects */
+
+ while (state_list) {
+ state = acpi_ut_pop_generic_state(&state_list);
+ acpi_ut_delete_generic_state(state);
+ }
+
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_add_reference
+ *
+ * PARAMETERS: Object - Object whose reference count is to be
+ * incremented
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Add one reference to an ACPI object
+ *
+ ******************************************************************************/
+
+void acpi_ut_add_reference(union acpi_operand_object *object)
+{
+
+ ACPI_FUNCTION_TRACE_PTR(ut_add_reference, object);
+
+ /* Ensure that we have a valid object */
+
+ if (!acpi_ut_valid_internal_object(object)) {
+ return_VOID;
+ }
+
+ ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS,
+ "Obj %p Current Refs=%X [To Be Incremented]\n",
+ object, object->common.reference_count));
+
+ /* Increment the reference count */
+
+ (void)acpi_ut_update_object_reference(object, REF_INCREMENT);
+ return_VOID;
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_remove_reference
+ *
+ * PARAMETERS: Object - Object whose ref count will be decremented
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Decrement the reference count of an ACPI internal object
+ *
+ ******************************************************************************/
+
+void acpi_ut_remove_reference(union acpi_operand_object *object)
+{
+
+ ACPI_FUNCTION_TRACE_PTR(ut_remove_reference, object);
+
+ /*
+ * Allow a NULL pointer to be passed in, just ignore it. This saves
+ * each caller from having to check. Also, ignore NS nodes.
+ *
+ */
+ if (!object ||
+ (ACPI_GET_DESCRIPTOR_TYPE(object) == ACPI_DESC_TYPE_NAMED)) {
+ return_VOID;
+ }
+
+ /* Ensure that we have a valid object */
+
+ if (!acpi_ut_valid_internal_object(object)) {
+ return_VOID;
+ }
+
+ ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS,
+ "Obj %p Current Refs=%X [To Be Decremented]\n",
+ object, object->common.reference_count));
+
+ /*
+ * Decrement the reference count, and only actually delete the object
+ * if the reference count becomes 0. (Must also decrement the ref count
+ * of all subobjects!)
+ */
+ (void)acpi_ut_update_object_reference(object, REF_DECREMENT);
+ return_VOID;
+}
diff --git a/drivers/acpi/utilities/uteval.c b/drivers/acpi/utilities/uteval.c
new file mode 100644
index 0000000..352747e
--- /dev/null
+++ b/drivers/acpi/utilities/uteval.c
@@ -0,0 +1,751 @@
+/******************************************************************************
+ *
+ * Module Name: uteval - Object evaluation
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include <acpi/acnamesp.h>
+#include <acpi/acinterp.h>
+
+#define _COMPONENT ACPI_UTILITIES
+ACPI_MODULE_NAME("uteval")
+
+/* Local prototypes */
+static void
+acpi_ut_copy_id_string(char *destination, char *source, acpi_size max_length);
+
+static acpi_status
+acpi_ut_translate_one_cid(union acpi_operand_object *obj_desc,
+ struct acpi_compatible_id *one_cid);
+
+/*
+ * Strings supported by the _OSI predefined (internal) method.
+ */
+static char *acpi_interfaces_supported[] = {
+ /* Operating System Vendor Strings */
+
+ "Windows 2000", /* Windows 2000 */
+ "Windows 2001", /* Windows XP */
+ "Windows 2001 SP1", /* Windows XP SP1 */
+ "Windows 2001 SP2", /* Windows XP SP2 */
+ "Windows 2001.1", /* Windows Server 2003 */
+ "Windows 2001.1 SP1", /* Windows Server 2003 SP1 - Added 03/2006 */
+ "Windows 2006", /* Windows Vista - Added 03/2006 */
+
+ /* Feature Group Strings */
+
+ "Extended Address Space Descriptor"
+ /*
+ * All "optional" feature group strings (features that are implemented
+ * by the host) should be implemented in the host version of
+ * acpi_os_validate_interface and should not be added here.
+ */
+};
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_osi_implementation
+ *
+ * PARAMETERS: walk_state - Current walk state
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Implementation of the _OSI predefined control method
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ut_osi_implementation(struct acpi_walk_state *walk_state)
+{
+ acpi_status status;
+ union acpi_operand_object *string_desc;
+ union acpi_operand_object *return_desc;
+ u32 i;
+
+ ACPI_FUNCTION_TRACE(ut_osi_implementation);
+
+ /* Validate the string input argument */
+
+ string_desc = walk_state->arguments[0].object;
+ if (!string_desc || (string_desc->common.type != ACPI_TYPE_STRING)) {
+ return_ACPI_STATUS(AE_TYPE);
+ }
+
+ /* Create a return object */
+
+ return_desc = acpi_ut_create_internal_object(ACPI_TYPE_INTEGER);
+ if (!return_desc) {
+ return_ACPI_STATUS(AE_NO_MEMORY);
+ }
+
+ /* Default return value is SUPPORTED */
+
+ return_desc->integer.value = ACPI_UINT32_MAX;
+ walk_state->return_desc = return_desc;
+
+ /* Compare input string to static table of supported interfaces */
+
+ for (i = 0; i < ACPI_ARRAY_LENGTH(acpi_interfaces_supported); i++) {
+ if (!ACPI_STRCMP
+ (string_desc->string.pointer,
+ acpi_interfaces_supported[i])) {
+
+ /* The interface is supported */
+
+ return_ACPI_STATUS(AE_CTRL_TERMINATE);
+ }
+ }
+
+ /*
+ * Did not match the string in the static table, call the host OSL to
+ * check for a match with one of the optional strings (such as
+ * "Module Device", "3.0 Thermal Model", etc.)
+ */
+ status = acpi_os_validate_interface(string_desc->string.pointer);
+ if (ACPI_SUCCESS(status)) {
+
+ /* The interface is supported */
+
+ return_ACPI_STATUS(AE_CTRL_TERMINATE);
+ }
+
+ /* The interface is not supported */
+
+ return_desc->integer.value = 0;
+ return_ACPI_STATUS(AE_CTRL_TERMINATE);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_osi_invalidate
+ *
+ * PARAMETERS: interface_string
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: invalidate string in pre-defiend _OSI string list
+ *
+ ******************************************************************************/
+
+acpi_status acpi_osi_invalidate(char *interface)
+{
+ int i;
+
+ for (i = 0; i < ACPI_ARRAY_LENGTH(acpi_interfaces_supported); i++) {
+ if (!ACPI_STRCMP(interface, acpi_interfaces_supported[i])) {
+ *acpi_interfaces_supported[i] = '\0';
+ return AE_OK;
+ }
+ }
+ return AE_NOT_FOUND;
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_evaluate_object
+ *
+ * PARAMETERS: prefix_node - Starting node
+ * Path - Path to object from starting node
+ * expected_return_types - Bitmap of allowed return types
+ * return_desc - Where a return value is stored
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Evaluates a namespace object and verifies the type of the
+ * return object. Common code that simplifies accessing objects
+ * that have required return objects of fixed types.
+ *
+ * NOTE: Internal function, no parameter validation
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ut_evaluate_object(struct acpi_namespace_node *prefix_node,
+ char *path,
+ u32 expected_return_btypes,
+ union acpi_operand_object **return_desc)
+{
+ struct acpi_evaluate_info *info;
+ acpi_status status;
+ u32 return_btype;
+
+ ACPI_FUNCTION_TRACE(ut_evaluate_object);
+
+ /* Allocate the evaluation information block */
+
+ info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info));
+ if (!info) {
+ return_ACPI_STATUS(AE_NO_MEMORY);
+ }
+
+ info->prefix_node = prefix_node;
+ info->pathname = path;
+
+ /* Evaluate the object/method */
+
+ status = acpi_ns_evaluate(info);
+ if (ACPI_FAILURE(status)) {
+ if (status == AE_NOT_FOUND) {
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+ "[%4.4s.%s] was not found\n",
+ acpi_ut_get_node_name(prefix_node),
+ path));
+ } else {
+ ACPI_ERROR_METHOD("Method execution failed",
+ prefix_node, path, status);
+ }
+
+ goto cleanup;
+ }
+
+ /* Did we get a return object? */
+
+ if (!info->return_object) {
+ if (expected_return_btypes) {
+ ACPI_ERROR_METHOD("No object was returned from",
+ prefix_node, path, AE_NOT_EXIST);
+
+ status = AE_NOT_EXIST;
+ }
+
+ goto cleanup;
+ }
+
+ /* Map the return object type to the bitmapped type */
+
+ switch (ACPI_GET_OBJECT_TYPE(info->return_object)) {
+ case ACPI_TYPE_INTEGER:
+ return_btype = ACPI_BTYPE_INTEGER;
+ break;
+
+ case ACPI_TYPE_BUFFER:
+ return_btype = ACPI_BTYPE_BUFFER;
+ break;
+
+ case ACPI_TYPE_STRING:
+ return_btype = ACPI_BTYPE_STRING;
+ break;
+
+ case ACPI_TYPE_PACKAGE:
+ return_btype = ACPI_BTYPE_PACKAGE;
+ break;
+
+ default:
+ return_btype = 0;
+ break;
+ }
+
+ if ((acpi_gbl_enable_interpreter_slack) && (!expected_return_btypes)) {
+ /*
+ * We received a return object, but one was not expected. This can
+ * happen frequently if the "implicit return" feature is enabled.
+ * Just delete the return object and return AE_OK.
+ */
+ acpi_ut_remove_reference(info->return_object);
+ goto cleanup;
+ }
+
+ /* Is the return object one of the expected types? */
+
+ if (!(expected_return_btypes & return_btype)) {
+ ACPI_ERROR_METHOD("Return object type is incorrect",
+ prefix_node, path, AE_TYPE);
+
+ ACPI_ERROR((AE_INFO,
+ "Type returned from %s was incorrect: %s, expected Btypes: %X",
+ path,
+ acpi_ut_get_object_type_name(info->return_object),
+ expected_return_btypes));
+
+ /* On error exit, we must delete the return object */
+
+ acpi_ut_remove_reference(info->return_object);
+ status = AE_TYPE;
+ goto cleanup;
+ }
+
+ /* Object type is OK, return it */
+
+ *return_desc = info->return_object;
+
+ cleanup:
+ ACPI_FREE(info);
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_evaluate_numeric_object
+ *
+ * PARAMETERS: object_name - Object name to be evaluated
+ * device_node - Node for the device
+ * Address - Where the value is returned
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Evaluates a numeric namespace object for a selected device
+ * and stores result in *Address.
+ *
+ * NOTE: Internal function, no parameter validation
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ut_evaluate_numeric_object(char *object_name,
+ struct acpi_namespace_node *device_node,
+ acpi_integer * address)
+{
+ union acpi_operand_object *obj_desc;
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(ut_evaluate_numeric_object);
+
+ status = acpi_ut_evaluate_object(device_node, object_name,
+ ACPI_BTYPE_INTEGER, &obj_desc);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /* Get the returned Integer */
+
+ *address = obj_desc->integer.value;
+
+ /* On exit, we must delete the return object */
+
+ acpi_ut_remove_reference(obj_desc);
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_copy_id_string
+ *
+ * PARAMETERS: Destination - Where to copy the string
+ * Source - Source string
+ * max_length - Length of the destination buffer
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Copies an ID string for the _HID, _CID, and _UID methods.
+ * Performs removal of a leading asterisk if present -- workaround
+ * for a known issue on a bunch of machines.
+ *
+ ******************************************************************************/
+
+static void
+acpi_ut_copy_id_string(char *destination, char *source, acpi_size max_length)
+{
+
+ /*
+ * Workaround for ID strings that have a leading asterisk. This construct
+ * is not allowed by the ACPI specification (ID strings must be
+ * alphanumeric), but enough existing machines have this embedded in their
+ * ID strings that the following code is useful.
+ */
+ if (*source == '*') {
+ source++;
+ }
+
+ /* Do the actual copy */
+
+ ACPI_STRNCPY(destination, source, max_length);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_execute_HID
+ *
+ * PARAMETERS: device_node - Node for the device
+ * Hid - Where the HID is returned
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Executes the _HID control method that returns the hardware
+ * ID of the device.
+ *
+ * NOTE: Internal function, no parameter validation
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ut_execute_HID(struct acpi_namespace_node *device_node,
+ struct acpica_device_id *hid)
+{
+ union acpi_operand_object *obj_desc;
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(ut_execute_HID);
+
+ status = acpi_ut_evaluate_object(device_node, METHOD_NAME__HID,
+ ACPI_BTYPE_INTEGER | ACPI_BTYPE_STRING,
+ &obj_desc);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ if (ACPI_GET_OBJECT_TYPE(obj_desc) == ACPI_TYPE_INTEGER) {
+
+ /* Convert the Numeric HID to string */
+
+ acpi_ex_eisa_id_to_string((u32) obj_desc->integer.value,
+ hid->value);
+ } else {
+ /* Copy the String HID from the returned object */
+
+ acpi_ut_copy_id_string(hid->value, obj_desc->string.pointer,
+ sizeof(hid->value));
+ }
+
+ /* On exit, we must delete the return object */
+
+ acpi_ut_remove_reference(obj_desc);
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_translate_one_cid
+ *
+ * PARAMETERS: obj_desc - _CID object, must be integer or string
+ * one_cid - Where the CID string is returned
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Return a numeric or string _CID value as a string.
+ * (Compatible ID)
+ *
+ * NOTE: Assumes a maximum _CID string length of
+ * ACPI_MAX_CID_LENGTH.
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ut_translate_one_cid(union acpi_operand_object *obj_desc,
+ struct acpi_compatible_id *one_cid)
+{
+
+ switch (ACPI_GET_OBJECT_TYPE(obj_desc)) {
+ case ACPI_TYPE_INTEGER:
+
+ /* Convert the Numeric CID to string */
+
+ acpi_ex_eisa_id_to_string((u32) obj_desc->integer.value,
+ one_cid->value);
+ return (AE_OK);
+
+ case ACPI_TYPE_STRING:
+
+ if (obj_desc->string.length > ACPI_MAX_CID_LENGTH) {
+ return (AE_AML_STRING_LIMIT);
+ }
+
+ /* Copy the String CID from the returned object */
+
+ acpi_ut_copy_id_string(one_cid->value, obj_desc->string.pointer,
+ ACPI_MAX_CID_LENGTH);
+ return (AE_OK);
+
+ default:
+
+ return (AE_TYPE);
+ }
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_execute_CID
+ *
+ * PARAMETERS: device_node - Node for the device
+ * return_cid_list - Where the CID list is returned
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Executes the _CID control method that returns one or more
+ * compatible hardware IDs for the device.
+ *
+ * NOTE: Internal function, no parameter validation
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ut_execute_CID(struct acpi_namespace_node * device_node,
+ struct acpi_compatible_id_list ** return_cid_list)
+{
+ union acpi_operand_object *obj_desc;
+ acpi_status status;
+ u32 count;
+ u32 size;
+ struct acpi_compatible_id_list *cid_list;
+ u32 i;
+
+ ACPI_FUNCTION_TRACE(ut_execute_CID);
+
+ /* Evaluate the _CID method for this device */
+
+ status = acpi_ut_evaluate_object(device_node, METHOD_NAME__CID,
+ ACPI_BTYPE_INTEGER | ACPI_BTYPE_STRING
+ | ACPI_BTYPE_PACKAGE, &obj_desc);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /* Get the number of _CIDs returned */
+
+ count = 1;
+ if (ACPI_GET_OBJECT_TYPE(obj_desc) == ACPI_TYPE_PACKAGE) {
+ count = obj_desc->package.count;
+ }
+
+ /* Allocate a worst-case buffer for the _CIDs */
+
+ size = (((count - 1) * sizeof(struct acpi_compatible_id)) +
+ sizeof(struct acpi_compatible_id_list));
+
+ cid_list = ACPI_ALLOCATE_ZEROED((acpi_size) size);
+ if (!cid_list) {
+ return_ACPI_STATUS(AE_NO_MEMORY);
+ }
+
+ /* Init CID list */
+
+ cid_list->count = count;
+ cid_list->size = size;
+
+ /*
+ * A _CID can return either a single compatible ID or a package of
+ * compatible IDs. Each compatible ID can be one of the following:
+ * 1) Integer (32 bit compressed EISA ID) or
+ * 2) String (PCI ID format, e.g. "PCI\VEN_vvvv&DEV_dddd&SUBSYS_ssssssss")
+ */
+
+ /* The _CID object can be either a single CID or a package (list) of CIDs */
+
+ if (ACPI_GET_OBJECT_TYPE(obj_desc) == ACPI_TYPE_PACKAGE) {
+
+ /* Translate each package element */
+
+ for (i = 0; i < count; i++) {
+ status =
+ acpi_ut_translate_one_cid(obj_desc->package.
+ elements[i],
+ &cid_list->id[i]);
+ if (ACPI_FAILURE(status)) {
+ break;
+ }
+ }
+ } else {
+ /* Only one CID, translate to a string */
+
+ status = acpi_ut_translate_one_cid(obj_desc, cid_list->id);
+ }
+
+ /* Cleanup on error */
+
+ if (ACPI_FAILURE(status)) {
+ ACPI_FREE(cid_list);
+ } else {
+ *return_cid_list = cid_list;
+ }
+
+ /* On exit, we must delete the _CID return object */
+
+ acpi_ut_remove_reference(obj_desc);
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_execute_UID
+ *
+ * PARAMETERS: device_node - Node for the device
+ * Uid - Where the UID is returned
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Executes the _UID control method that returns the hardware
+ * ID of the device.
+ *
+ * NOTE: Internal function, no parameter validation
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ut_execute_UID(struct acpi_namespace_node *device_node,
+ struct acpica_device_id *uid)
+{
+ union acpi_operand_object *obj_desc;
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(ut_execute_UID);
+
+ status = acpi_ut_evaluate_object(device_node, METHOD_NAME__UID,
+ ACPI_BTYPE_INTEGER | ACPI_BTYPE_STRING,
+ &obj_desc);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ if (ACPI_GET_OBJECT_TYPE(obj_desc) == ACPI_TYPE_INTEGER) {
+
+ /* Convert the Numeric UID to string */
+
+ acpi_ex_unsigned_integer_to_string(obj_desc->integer.value,
+ uid->value);
+ } else {
+ /* Copy the String UID from the returned object */
+
+ acpi_ut_copy_id_string(uid->value, obj_desc->string.pointer,
+ sizeof(uid->value));
+ }
+
+ /* On exit, we must delete the return object */
+
+ acpi_ut_remove_reference(obj_desc);
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_execute_STA
+ *
+ * PARAMETERS: device_node - Node for the device
+ * Flags - Where the status flags are returned
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Executes _STA for selected device and stores results in
+ * *Flags.
+ *
+ * NOTE: Internal function, no parameter validation
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ut_execute_STA(struct acpi_namespace_node *device_node, u32 * flags)
+{
+ union acpi_operand_object *obj_desc;
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(ut_execute_STA);
+
+ status = acpi_ut_evaluate_object(device_node, METHOD_NAME__STA,
+ ACPI_BTYPE_INTEGER, &obj_desc);
+ if (ACPI_FAILURE(status)) {
+ if (AE_NOT_FOUND == status) {
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+ "_STA on %4.4s was not found, assuming device is present\n",
+ acpi_ut_get_node_name(device_node)));
+
+ *flags = ACPI_UINT32_MAX;
+ status = AE_OK;
+ }
+
+ return_ACPI_STATUS(status);
+ }
+
+ /* Extract the status flags */
+
+ *flags = (u32) obj_desc->integer.value;
+
+ /* On exit, we must delete the return object */
+
+ acpi_ut_remove_reference(obj_desc);
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_execute_Sxds
+ *
+ * PARAMETERS: device_node - Node for the device
+ * Flags - Where the status flags are returned
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Executes _STA for selected device and stores results in
+ * *Flags.
+ *
+ * NOTE: Internal function, no parameter validation
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ut_execute_sxds(struct acpi_namespace_node *device_node, u8 * highest)
+{
+ union acpi_operand_object *obj_desc;
+ acpi_status status;
+ u32 i;
+
+ ACPI_FUNCTION_TRACE(ut_execute_sxds);
+
+ for (i = 0; i < 4; i++) {
+ highest[i] = 0xFF;
+ status = acpi_ut_evaluate_object(device_node,
+ ACPI_CAST_PTR(char,
+ acpi_gbl_highest_dstate_names
+ [i]),
+ ACPI_BTYPE_INTEGER, &obj_desc);
+ if (ACPI_FAILURE(status)) {
+ if (status != AE_NOT_FOUND) {
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+ "%s on Device %4.4s, %s\n",
+ ACPI_CAST_PTR(char,
+ acpi_gbl_highest_dstate_names
+ [i]),
+ acpi_ut_get_node_name
+ (device_node),
+ acpi_format_exception
+ (status)));
+
+ return_ACPI_STATUS(status);
+ }
+ } else {
+ /* Extract the Dstate value */
+
+ highest[i] = (u8) obj_desc->integer.value;
+
+ /* Delete the return object */
+
+ acpi_ut_remove_reference(obj_desc);
+ }
+ }
+
+ return_ACPI_STATUS(AE_OK);
+}
diff --git a/drivers/acpi/utilities/utglobal.c b/drivers/acpi/utilities/utglobal.c
new file mode 100644
index 0000000..17ed5ac
--- /dev/null
+++ b/drivers/acpi/utilities/utglobal.c
@@ -0,0 +1,819 @@
+/******************************************************************************
+ *
+ * Module Name: utglobal - Global variables for the ACPI subsystem
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#define DEFINE_ACPI_GLOBALS
+
+#include <acpi/acpi.h>
+#include <acpi/acnamesp.h>
+
+ACPI_EXPORT_SYMBOL(acpi_gbl_FADT)
+#define _COMPONENT ACPI_UTILITIES
+ ACPI_MODULE_NAME("utglobal")
+
+/*******************************************************************************
+ *
+ * Static global variable initialization.
+ *
+ ******************************************************************************/
+/*
+ * We want the debug switches statically initialized so they
+ * are already set when the debugger is entered.
+ */
+/* Debug switch - level and trace mask */
+u32 acpi_dbg_level = ACPI_DEBUG_DEFAULT;
+
+/* Debug switch - layer (component) mask */
+
+u32 acpi_dbg_layer = 0;
+u32 acpi_gbl_nesting_level = 0;
+
+/* Debugger globals */
+
+u8 acpi_gbl_db_terminate_threads = FALSE;
+u8 acpi_gbl_abort_method = FALSE;
+u8 acpi_gbl_method_executing = FALSE;
+
+/* System flags */
+
+u32 acpi_gbl_startup_flags = 0;
+
+/* System starts uninitialized */
+
+u8 acpi_gbl_shutdown = TRUE;
+
+const char *acpi_gbl_sleep_state_names[ACPI_S_STATE_COUNT] = {
+ "\\_S0_",
+ "\\_S1_",
+ "\\_S2_",
+ "\\_S3_",
+ "\\_S4_",
+ "\\_S5_"
+};
+
+const char *acpi_gbl_highest_dstate_names[4] = {
+ "_S1D",
+ "_S2D",
+ "_S3D",
+ "_S4D"
+};
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_format_exception
+ *
+ * PARAMETERS: Status - The acpi_status code to be formatted
+ *
+ * RETURN: A string containing the exception text. A valid pointer is
+ * always returned.
+ *
+ * DESCRIPTION: This function translates an ACPI exception into an ASCII string
+ * It is here instead of utxface.c so it is always present.
+ *
+ ******************************************************************************/
+
+const char *acpi_format_exception(acpi_status status)
+{
+ const char *exception = NULL;
+
+ ACPI_FUNCTION_ENTRY();
+
+ exception = acpi_ut_validate_exception(status);
+ if (!exception) {
+
+ /* Exception code was not recognized */
+
+ ACPI_ERROR((AE_INFO,
+ "Unknown exception code: 0x%8.8X", status));
+
+ exception = "UNKNOWN_STATUS_CODE";
+ dump_stack();
+ }
+
+ return (ACPI_CAST_PTR(const char, exception));
+}
+
+ACPI_EXPORT_SYMBOL(acpi_format_exception)
+
+/*******************************************************************************
+ *
+ * Namespace globals
+ *
+ ******************************************************************************/
+/*
+ * Predefined ACPI Names (Built-in to the Interpreter)
+ *
+ * NOTES:
+ * 1) _SB_ is defined to be a device to allow \_SB_._INI to be run
+ * during the initialization sequence.
+ * 2) _TZ_ is defined to be a thermal zone in order to allow ASL code to
+ * perform a Notify() operation on it.
+ */
+const struct acpi_predefined_names acpi_gbl_pre_defined_names[] = {
+ {"_GPE", ACPI_TYPE_LOCAL_SCOPE, NULL},
+ {"_PR_", ACPI_TYPE_LOCAL_SCOPE, NULL},
+ {"_SB_", ACPI_TYPE_DEVICE, NULL},
+ {"_SI_", ACPI_TYPE_LOCAL_SCOPE, NULL},
+ {"_TZ_", ACPI_TYPE_THERMAL, NULL},
+ {"_REV", ACPI_TYPE_INTEGER, (char *)ACPI_CA_SUPPORT_LEVEL},
+ {"_OS_", ACPI_TYPE_STRING, ACPI_OS_NAME},
+ {"_GL_", ACPI_TYPE_MUTEX, (char *)1},
+
+#if !defined (ACPI_NO_METHOD_EXECUTION) || defined (ACPI_CONSTANT_EVAL_ONLY)
+ {"_OSI", ACPI_TYPE_METHOD, (char *)1},
+#endif
+
+ /* Table terminator */
+
+ {NULL, ACPI_TYPE_ANY, NULL}
+};
+
+/*
+ * Properties of the ACPI Object Types, both internal and external.
+ * The table is indexed by values of acpi_object_type
+ */
+const u8 acpi_gbl_ns_properties[] = {
+ ACPI_NS_NORMAL, /* 00 Any */
+ ACPI_NS_NORMAL, /* 01 Number */
+ ACPI_NS_NORMAL, /* 02 String */
+ ACPI_NS_NORMAL, /* 03 Buffer */
+ ACPI_NS_NORMAL, /* 04 Package */
+ ACPI_NS_NORMAL, /* 05 field_unit */
+ ACPI_NS_NEWSCOPE, /* 06 Device */
+ ACPI_NS_NORMAL, /* 07 Event */
+ ACPI_NS_NEWSCOPE, /* 08 Method */
+ ACPI_NS_NORMAL, /* 09 Mutex */
+ ACPI_NS_NORMAL, /* 10 Region */
+ ACPI_NS_NEWSCOPE, /* 11 Power */
+ ACPI_NS_NEWSCOPE, /* 12 Processor */
+ ACPI_NS_NEWSCOPE, /* 13 Thermal */
+ ACPI_NS_NORMAL, /* 14 buffer_field */
+ ACPI_NS_NORMAL, /* 15 ddb_handle */
+ ACPI_NS_NORMAL, /* 16 Debug Object */
+ ACPI_NS_NORMAL, /* 17 def_field */
+ ACPI_NS_NORMAL, /* 18 bank_field */
+ ACPI_NS_NORMAL, /* 19 index_field */
+ ACPI_NS_NORMAL, /* 20 Reference */
+ ACPI_NS_NORMAL, /* 21 Alias */
+ ACPI_NS_NORMAL, /* 22 method_alias */
+ ACPI_NS_NORMAL, /* 23 Notify */
+ ACPI_NS_NORMAL, /* 24 Address Handler */
+ ACPI_NS_NEWSCOPE | ACPI_NS_LOCAL, /* 25 Resource Desc */
+ ACPI_NS_NEWSCOPE | ACPI_NS_LOCAL, /* 26 Resource Field */
+ ACPI_NS_NEWSCOPE, /* 27 Scope */
+ ACPI_NS_NORMAL, /* 28 Extra */
+ ACPI_NS_NORMAL, /* 29 Data */
+ ACPI_NS_NORMAL /* 30 Invalid */
+};
+
+/* Hex to ASCII conversion table */
+
+static const char acpi_gbl_hex_to_ascii[] = {
+ '0', '1', '2', '3', '4', '5', '6', '7',
+ '8', '9', 'A', 'B', 'C', 'D', 'E', 'F'
+};
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_hex_to_ascii_char
+ *
+ * PARAMETERS: Integer - Contains the hex digit
+ * Position - bit position of the digit within the
+ * integer (multiple of 4)
+ *
+ * RETURN: The converted Ascii character
+ *
+ * DESCRIPTION: Convert a hex digit to an Ascii character
+ *
+ ******************************************************************************/
+
+char acpi_ut_hex_to_ascii_char(acpi_integer integer, u32 position)
+{
+
+ return (acpi_gbl_hex_to_ascii[(integer >> position) & 0xF]);
+}
+
+/******************************************************************************
+ *
+ * Event and Hardware globals
+ *
+ ******************************************************************************/
+
+struct acpi_bit_register_info acpi_gbl_bit_register_info[ACPI_NUM_BITREG] = {
+ /* Name Parent Register Register Bit Position Register Bit Mask */
+
+ /* ACPI_BITREG_TIMER_STATUS */ {ACPI_REGISTER_PM1_STATUS,
+ ACPI_BITPOSITION_TIMER_STATUS,
+ ACPI_BITMASK_TIMER_STATUS},
+ /* ACPI_BITREG_BUS_MASTER_STATUS */ {ACPI_REGISTER_PM1_STATUS,
+ ACPI_BITPOSITION_BUS_MASTER_STATUS,
+ ACPI_BITMASK_BUS_MASTER_STATUS},
+ /* ACPI_BITREG_GLOBAL_LOCK_STATUS */ {ACPI_REGISTER_PM1_STATUS,
+ ACPI_BITPOSITION_GLOBAL_LOCK_STATUS,
+ ACPI_BITMASK_GLOBAL_LOCK_STATUS},
+ /* ACPI_BITREG_POWER_BUTTON_STATUS */ {ACPI_REGISTER_PM1_STATUS,
+ ACPI_BITPOSITION_POWER_BUTTON_STATUS,
+ ACPI_BITMASK_POWER_BUTTON_STATUS},
+ /* ACPI_BITREG_SLEEP_BUTTON_STATUS */ {ACPI_REGISTER_PM1_STATUS,
+ ACPI_BITPOSITION_SLEEP_BUTTON_STATUS,
+ ACPI_BITMASK_SLEEP_BUTTON_STATUS},
+ /* ACPI_BITREG_RT_CLOCK_STATUS */ {ACPI_REGISTER_PM1_STATUS,
+ ACPI_BITPOSITION_RT_CLOCK_STATUS,
+ ACPI_BITMASK_RT_CLOCK_STATUS},
+ /* ACPI_BITREG_WAKE_STATUS */ {ACPI_REGISTER_PM1_STATUS,
+ ACPI_BITPOSITION_WAKE_STATUS,
+ ACPI_BITMASK_WAKE_STATUS},
+ /* ACPI_BITREG_PCIEXP_WAKE_STATUS */ {ACPI_REGISTER_PM1_STATUS,
+ ACPI_BITPOSITION_PCIEXP_WAKE_STATUS,
+ ACPI_BITMASK_PCIEXP_WAKE_STATUS},
+
+ /* ACPI_BITREG_TIMER_ENABLE */ {ACPI_REGISTER_PM1_ENABLE,
+ ACPI_BITPOSITION_TIMER_ENABLE,
+ ACPI_BITMASK_TIMER_ENABLE},
+ /* ACPI_BITREG_GLOBAL_LOCK_ENABLE */ {ACPI_REGISTER_PM1_ENABLE,
+ ACPI_BITPOSITION_GLOBAL_LOCK_ENABLE,
+ ACPI_BITMASK_GLOBAL_LOCK_ENABLE},
+ /* ACPI_BITREG_POWER_BUTTON_ENABLE */ {ACPI_REGISTER_PM1_ENABLE,
+ ACPI_BITPOSITION_POWER_BUTTON_ENABLE,
+ ACPI_BITMASK_POWER_BUTTON_ENABLE},
+ /* ACPI_BITREG_SLEEP_BUTTON_ENABLE */ {ACPI_REGISTER_PM1_ENABLE,
+ ACPI_BITPOSITION_SLEEP_BUTTON_ENABLE,
+ ACPI_BITMASK_SLEEP_BUTTON_ENABLE},
+ /* ACPI_BITREG_RT_CLOCK_ENABLE */ {ACPI_REGISTER_PM1_ENABLE,
+ ACPI_BITPOSITION_RT_CLOCK_ENABLE,
+ ACPI_BITMASK_RT_CLOCK_ENABLE},
+ /* ACPI_BITREG_PCIEXP_WAKE_DISABLE */ {ACPI_REGISTER_PM1_ENABLE,
+ ACPI_BITPOSITION_PCIEXP_WAKE_DISABLE,
+ ACPI_BITMASK_PCIEXP_WAKE_DISABLE},
+
+ /* ACPI_BITREG_SCI_ENABLE */ {ACPI_REGISTER_PM1_CONTROL,
+ ACPI_BITPOSITION_SCI_ENABLE,
+ ACPI_BITMASK_SCI_ENABLE},
+ /* ACPI_BITREG_BUS_MASTER_RLD */ {ACPI_REGISTER_PM1_CONTROL,
+ ACPI_BITPOSITION_BUS_MASTER_RLD,
+ ACPI_BITMASK_BUS_MASTER_RLD},
+ /* ACPI_BITREG_GLOBAL_LOCK_RELEASE */ {ACPI_REGISTER_PM1_CONTROL,
+ ACPI_BITPOSITION_GLOBAL_LOCK_RELEASE,
+ ACPI_BITMASK_GLOBAL_LOCK_RELEASE},
+ /* ACPI_BITREG_SLEEP_TYPE_A */ {ACPI_REGISTER_PM1_CONTROL,
+ ACPI_BITPOSITION_SLEEP_TYPE_X,
+ ACPI_BITMASK_SLEEP_TYPE_X},
+ /* ACPI_BITREG_SLEEP_TYPE_B */ {ACPI_REGISTER_PM1_CONTROL,
+ ACPI_BITPOSITION_SLEEP_TYPE_X,
+ ACPI_BITMASK_SLEEP_TYPE_X},
+ /* ACPI_BITREG_SLEEP_ENABLE */ {ACPI_REGISTER_PM1_CONTROL,
+ ACPI_BITPOSITION_SLEEP_ENABLE,
+ ACPI_BITMASK_SLEEP_ENABLE},
+
+ /* ACPI_BITREG_ARB_DIS */ {ACPI_REGISTER_PM2_CONTROL,
+ ACPI_BITPOSITION_ARB_DISABLE,
+ ACPI_BITMASK_ARB_DISABLE}
+};
+
+struct acpi_fixed_event_info acpi_gbl_fixed_event_info[ACPI_NUM_FIXED_EVENTS] = {
+ /* ACPI_EVENT_PMTIMER */ {ACPI_BITREG_TIMER_STATUS,
+ ACPI_BITREG_TIMER_ENABLE,
+ ACPI_BITMASK_TIMER_STATUS,
+ ACPI_BITMASK_TIMER_ENABLE},
+ /* ACPI_EVENT_GLOBAL */ {ACPI_BITREG_GLOBAL_LOCK_STATUS,
+ ACPI_BITREG_GLOBAL_LOCK_ENABLE,
+ ACPI_BITMASK_GLOBAL_LOCK_STATUS,
+ ACPI_BITMASK_GLOBAL_LOCK_ENABLE},
+ /* ACPI_EVENT_POWER_BUTTON */ {ACPI_BITREG_POWER_BUTTON_STATUS,
+ ACPI_BITREG_POWER_BUTTON_ENABLE,
+ ACPI_BITMASK_POWER_BUTTON_STATUS,
+ ACPI_BITMASK_POWER_BUTTON_ENABLE},
+ /* ACPI_EVENT_SLEEP_BUTTON */ {ACPI_BITREG_SLEEP_BUTTON_STATUS,
+ ACPI_BITREG_SLEEP_BUTTON_ENABLE,
+ ACPI_BITMASK_SLEEP_BUTTON_STATUS,
+ ACPI_BITMASK_SLEEP_BUTTON_ENABLE},
+ /* ACPI_EVENT_RTC */ {ACPI_BITREG_RT_CLOCK_STATUS,
+ ACPI_BITREG_RT_CLOCK_ENABLE,
+ ACPI_BITMASK_RT_CLOCK_STATUS,
+ ACPI_BITMASK_RT_CLOCK_ENABLE},
+};
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_get_region_name
+ *
+ * PARAMETERS: None.
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Translate a Space ID into a name string (Debug only)
+ *
+ ******************************************************************************/
+
+/* Region type decoding */
+
+const char *acpi_gbl_region_types[ACPI_NUM_PREDEFINED_REGIONS] = {
+ "SystemMemory",
+ "SystemIO",
+ "PCI_Config",
+ "EmbeddedControl",
+ "SMBus",
+ "CMOS",
+ "PCIBARTarget",
+ "DataTable"
+};
+
+char *acpi_ut_get_region_name(u8 space_id)
+{
+
+ if (space_id >= ACPI_USER_REGION_BEGIN) {
+ return ("UserDefinedRegion");
+ } else if (space_id >= ACPI_NUM_PREDEFINED_REGIONS) {
+ return ("InvalidSpaceId");
+ }
+
+ return (ACPI_CAST_PTR(char, acpi_gbl_region_types[space_id]));
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_get_event_name
+ *
+ * PARAMETERS: None.
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Translate a Event ID into a name string (Debug only)
+ *
+ ******************************************************************************/
+
+/* Event type decoding */
+
+static const char *acpi_gbl_event_types[ACPI_NUM_FIXED_EVENTS] = {
+ "PM_Timer",
+ "GlobalLock",
+ "PowerButton",
+ "SleepButton",
+ "RealTimeClock",
+};
+
+char *acpi_ut_get_event_name(u32 event_id)
+{
+
+ if (event_id > ACPI_EVENT_MAX) {
+ return ("InvalidEventID");
+ }
+
+ return (ACPI_CAST_PTR(char, acpi_gbl_event_types[event_id]));
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_get_type_name
+ *
+ * PARAMETERS: None.
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Translate a Type ID into a name string (Debug only)
+ *
+ ******************************************************************************/
+
+/*
+ * Elements of acpi_gbl_ns_type_names below must match
+ * one-to-one with values of acpi_object_type
+ *
+ * The type ACPI_TYPE_ANY (Untyped) is used as a "don't care" when searching;
+ * when stored in a table it really means that we have thus far seen no
+ * evidence to indicate what type is actually going to be stored for this entry.
+ */
+static const char acpi_gbl_bad_type[] = "UNDEFINED";
+
+/* Printable names of the ACPI object types */
+
+static const char *acpi_gbl_ns_type_names[] = {
+ /* 00 */ "Untyped",
+ /* 01 */ "Integer",
+ /* 02 */ "String",
+ /* 03 */ "Buffer",
+ /* 04 */ "Package",
+ /* 05 */ "FieldUnit",
+ /* 06 */ "Device",
+ /* 07 */ "Event",
+ /* 08 */ "Method",
+ /* 09 */ "Mutex",
+ /* 10 */ "Region",
+ /* 11 */ "Power",
+ /* 12 */ "Processor",
+ /* 13 */ "Thermal",
+ /* 14 */ "BufferField",
+ /* 15 */ "DdbHandle",
+ /* 16 */ "DebugObject",
+ /* 17 */ "RegionField",
+ /* 18 */ "BankField",
+ /* 19 */ "IndexField",
+ /* 20 */ "Reference",
+ /* 21 */ "Alias",
+ /* 22 */ "MethodAlias",
+ /* 23 */ "Notify",
+ /* 24 */ "AddrHandler",
+ /* 25 */ "ResourceDesc",
+ /* 26 */ "ResourceFld",
+ /* 27 */ "Scope",
+ /* 28 */ "Extra",
+ /* 29 */ "Data",
+ /* 30 */ "Invalid"
+};
+
+char *acpi_ut_get_type_name(acpi_object_type type)
+{
+
+ if (type > ACPI_TYPE_INVALID) {
+ return (ACPI_CAST_PTR(char, acpi_gbl_bad_type));
+ }
+
+ return (ACPI_CAST_PTR(char, acpi_gbl_ns_type_names[type]));
+}
+
+char *acpi_ut_get_object_type_name(union acpi_operand_object *obj_desc)
+{
+
+ if (!obj_desc) {
+ return ("[NULL Object Descriptor]");
+ }
+
+ return (acpi_ut_get_type_name(ACPI_GET_OBJECT_TYPE(obj_desc)));
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_get_node_name
+ *
+ * PARAMETERS: Object - A namespace node
+ *
+ * RETURN: Pointer to a string
+ *
+ * DESCRIPTION: Validate the node and return the node's ACPI name.
+ *
+ ******************************************************************************/
+
+char *acpi_ut_get_node_name(void *object)
+{
+ struct acpi_namespace_node *node = (struct acpi_namespace_node *)object;
+
+ /* Must return a string of exactly 4 characters == ACPI_NAME_SIZE */
+
+ if (!object) {
+ return ("NULL");
+ }
+
+ /* Check for Root node */
+
+ if ((object == ACPI_ROOT_OBJECT) || (object == acpi_gbl_root_node)) {
+ return ("\"\\\" ");
+ }
+
+ /* Descriptor must be a namespace node */
+
+ if (ACPI_GET_DESCRIPTOR_TYPE(node) != ACPI_DESC_TYPE_NAMED) {
+ return ("####");
+ }
+
+ /* Name must be a valid ACPI name */
+
+ if (!acpi_ut_valid_acpi_name(node->name.integer)) {
+ node->name.integer = acpi_ut_repair_name(node->name.ascii);
+ }
+
+ /* Return the name */
+
+ return (node->name.ascii);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_get_descriptor_name
+ *
+ * PARAMETERS: Object - An ACPI object
+ *
+ * RETURN: Pointer to a string
+ *
+ * DESCRIPTION: Validate object and return the descriptor type
+ *
+ ******************************************************************************/
+
+/* Printable names of object descriptor types */
+
+static const char *acpi_gbl_desc_type_names[] = {
+ /* 00 */ "Invalid",
+ /* 01 */ "Cached",
+ /* 02 */ "State-Generic",
+ /* 03 */ "State-Update",
+ /* 04 */ "State-Package",
+ /* 05 */ "State-Control",
+ /* 06 */ "State-RootParseScope",
+ /* 07 */ "State-ParseScope",
+ /* 08 */ "State-WalkScope",
+ /* 09 */ "State-Result",
+ /* 10 */ "State-Notify",
+ /* 11 */ "State-Thread",
+ /* 12 */ "Walk",
+ /* 13 */ "Parser",
+ /* 14 */ "Operand",
+ /* 15 */ "Node"
+};
+
+char *acpi_ut_get_descriptor_name(void *object)
+{
+
+ if (!object) {
+ return ("NULL OBJECT");
+ }
+
+ if (ACPI_GET_DESCRIPTOR_TYPE(object) > ACPI_DESC_TYPE_MAX) {
+ return (ACPI_CAST_PTR(char, acpi_gbl_bad_type));
+ }
+
+ return (ACPI_CAST_PTR(char,
+ acpi_gbl_desc_type_names[ACPI_GET_DESCRIPTOR_TYPE
+ (object)]));
+
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_get_reference_name
+ *
+ * PARAMETERS: Object - An ACPI reference object
+ *
+ * RETURN: Pointer to a string
+ *
+ * DESCRIPTION: Decode a reference object sub-type to a string.
+ *
+ ******************************************************************************/
+
+/* Printable names of reference object sub-types */
+
+static const char *acpi_gbl_ref_class_names[] = {
+ /* 00 */ "Local",
+ /* 01 */ "Argument",
+ /* 02 */ "RefOf",
+ /* 03 */ "Index",
+ /* 04 */ "DdbHandle",
+ /* 05 */ "Named Object",
+ /* 06 */ "Debug"
+};
+
+const char *acpi_ut_get_reference_name(union acpi_operand_object *object)
+{
+ if (!object)
+ return "NULL Object";
+
+ if (ACPI_GET_DESCRIPTOR_TYPE(object) != ACPI_DESC_TYPE_OPERAND)
+ return "Not an Operand object";
+
+ if (object->common.type != ACPI_TYPE_LOCAL_REFERENCE)
+ return "Not a Reference object";
+
+ if (object->reference.class > ACPI_REFCLASS_MAX)
+ return "Unknown Reference class";
+
+ return acpi_gbl_ref_class_names[object->reference.class];
+}
+
+#if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER)
+/*
+ * Strings and procedures used for debug only
+ */
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_get_mutex_name
+ *
+ * PARAMETERS: mutex_id - The predefined ID for this mutex.
+ *
+ * RETURN: String containing the name of the mutex. Always returns a valid
+ * pointer.
+ *
+ * DESCRIPTION: Translate a mutex ID into a name string (Debug only)
+ *
+ ******************************************************************************/
+
+char *acpi_ut_get_mutex_name(u32 mutex_id)
+{
+
+ if (mutex_id > ACPI_MAX_MUTEX) {
+ return ("Invalid Mutex ID");
+ }
+
+ return (acpi_gbl_mutex_names[mutex_id]);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_get_notify_name
+ *
+ * PARAMETERS: notify_value - Value from the Notify() request
+ *
+ * RETURN: String corresponding to the Notify Value.
+ *
+ * DESCRIPTION: Translate a Notify Value to a notify namestring.
+ *
+ ******************************************************************************/
+
+/* Names for Notify() values, used for debug output */
+
+static const char *acpi_gbl_notify_value_names[] = {
+ "Bus Check",
+ "Device Check",
+ "Device Wake",
+ "Eject Request",
+ "Device Check Light",
+ "Frequency Mismatch",
+ "Bus Mode Mismatch",
+ "Power Fault",
+ "Capabilities Check",
+ "Device PLD Check",
+ "Reserved",
+ "System Locality Update"
+};
+
+const char *acpi_ut_get_notify_name(u32 notify_value)
+{
+
+ if (notify_value <= ACPI_NOTIFY_MAX) {
+ return (acpi_gbl_notify_value_names[notify_value]);
+ } else if (notify_value <= ACPI_MAX_SYS_NOTIFY) {
+ return ("Reserved");
+ } else { /* Greater or equal to 0x80 */
+
+ return ("**Device Specific**");
+ }
+}
+#endif
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_valid_object_type
+ *
+ * PARAMETERS: Type - Object type to be validated
+ *
+ * RETURN: TRUE if valid object type, FALSE otherwise
+ *
+ * DESCRIPTION: Validate an object type
+ *
+ ******************************************************************************/
+
+u8 acpi_ut_valid_object_type(acpi_object_type type)
+{
+
+ if (type > ACPI_TYPE_LOCAL_MAX) {
+
+ /* Note: Assumes all TYPEs are contiguous (external/local) */
+
+ return (FALSE);
+ }
+
+ return (TRUE);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_init_globals
+ *
+ * PARAMETERS: None
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Init library globals. All globals that require specific
+ * initialization should be initialized here!
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ut_init_globals(void)
+{
+ acpi_status status;
+ u32 i;
+
+ ACPI_FUNCTION_TRACE(ut_init_globals);
+
+ /* Create all memory caches */
+
+ status = acpi_ut_create_caches();
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /* Mutex locked flags */
+
+ for (i = 0; i < ACPI_NUM_MUTEX; i++) {
+ acpi_gbl_mutex_info[i].mutex = NULL;
+ acpi_gbl_mutex_info[i].thread_id = ACPI_MUTEX_NOT_ACQUIRED;
+ acpi_gbl_mutex_info[i].use_count = 0;
+ }
+
+ for (i = 0; i < ACPI_NUM_OWNERID_MASKS; i++) {
+ acpi_gbl_owner_id_mask[i] = 0;
+ }
+ acpi_gbl_owner_id_mask[ACPI_NUM_OWNERID_MASKS - 1] = 0x80000000; /* Last ID is never valid */
+
+ /* GPE support */
+
+ acpi_gbl_gpe_xrupt_list_head = NULL;
+ acpi_gbl_gpe_fadt_blocks[0] = NULL;
+ acpi_gbl_gpe_fadt_blocks[1] = NULL;
+
+ /* Global handlers */
+
+ acpi_gbl_system_notify.handler = NULL;
+ acpi_gbl_device_notify.handler = NULL;
+ acpi_gbl_exception_handler = NULL;
+ acpi_gbl_init_handler = NULL;
+ acpi_gbl_table_handler = NULL;
+
+ /* Global Lock support */
+
+ acpi_gbl_global_lock_semaphore = NULL;
+ acpi_gbl_global_lock_mutex = NULL;
+ acpi_gbl_global_lock_acquired = FALSE;
+ acpi_gbl_global_lock_handle = 0;
+
+ /* Miscellaneous variables */
+
+ acpi_gbl_cm_single_step = FALSE;
+ acpi_gbl_db_terminate_threads = FALSE;
+ acpi_gbl_shutdown = FALSE;
+ acpi_gbl_ns_lookup_count = 0;
+ acpi_gbl_ps_find_count = 0;
+ acpi_gbl_acpi_hardware_present = TRUE;
+ acpi_gbl_last_owner_id_index = 0;
+ acpi_gbl_next_owner_id_offset = 0;
+ acpi_gbl_trace_method_name = 0;
+ acpi_gbl_trace_dbg_level = 0;
+ acpi_gbl_trace_dbg_layer = 0;
+ acpi_gbl_debugger_configuration = DEBUGGER_THREADING;
+ acpi_gbl_db_output_flags = ACPI_DB_CONSOLE_OUTPUT;
+
+ /* Hardware oriented */
+
+ acpi_gbl_events_initialized = FALSE;
+ acpi_gbl_system_awake_and_running = TRUE;
+
+ /* Namespace */
+
+ acpi_gbl_root_node = NULL;
+ acpi_gbl_root_node_struct.name.integer = ACPI_ROOT_NAME;
+ acpi_gbl_root_node_struct.descriptor_type = ACPI_DESC_TYPE_NAMED;
+ acpi_gbl_root_node_struct.type = ACPI_TYPE_DEVICE;
+ acpi_gbl_root_node_struct.child = NULL;
+ acpi_gbl_root_node_struct.peer = NULL;
+ acpi_gbl_root_node_struct.object = NULL;
+ acpi_gbl_root_node_struct.flags = ANOBJ_END_OF_PEER_LIST;
+
+#ifdef ACPI_DEBUG_OUTPUT
+ acpi_gbl_lowest_stack_pointer = ACPI_CAST_PTR(acpi_size, ACPI_SIZE_MAX);
+#endif
+
+#ifdef ACPI_DBG_TRACK_ALLOCATIONS
+ acpi_gbl_display_final_mem_stats = FALSE;
+#endif
+
+ return_ACPI_STATUS(AE_OK);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_dbg_level)
+ACPI_EXPORT_SYMBOL(acpi_dbg_layer)
diff --git a/drivers/acpi/utilities/utinit.c b/drivers/acpi/utilities/utinit.c
new file mode 100644
index 0000000..cae515f
--- /dev/null
+++ b/drivers/acpi/utilities/utinit.c
@@ -0,0 +1,151 @@
+/******************************************************************************
+ *
+ * Module Name: utinit - Common ACPI subsystem initialization
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include <acpi/acnamesp.h>
+#include <acpi/acevents.h>
+#include <acpi/actables.h>
+
+#define _COMPONENT ACPI_UTILITIES
+ACPI_MODULE_NAME("utinit")
+
+/* Local prototypes */
+static void acpi_ut_terminate(void);
+
+/******************************************************************************
+ *
+ * FUNCTION: acpi_ut_terminate
+ *
+ * PARAMETERS: none
+ *
+ * RETURN: none
+ *
+ * DESCRIPTION: Free global memory
+ *
+ ******************************************************************************/
+
+static void acpi_ut_terminate(void)
+{
+ struct acpi_gpe_block_info *gpe_block;
+ struct acpi_gpe_block_info *next_gpe_block;
+ struct acpi_gpe_xrupt_info *gpe_xrupt_info;
+ struct acpi_gpe_xrupt_info *next_gpe_xrupt_info;
+
+ ACPI_FUNCTION_TRACE(ut_terminate);
+
+ /* Free global GPE blocks and related info structures */
+
+ gpe_xrupt_info = acpi_gbl_gpe_xrupt_list_head;
+ while (gpe_xrupt_info) {
+ gpe_block = gpe_xrupt_info->gpe_block_list_head;
+ while (gpe_block) {
+ next_gpe_block = gpe_block->next;
+ ACPI_FREE(gpe_block->event_info);
+ ACPI_FREE(gpe_block->register_info);
+ ACPI_FREE(gpe_block);
+
+ gpe_block = next_gpe_block;
+ }
+ next_gpe_xrupt_info = gpe_xrupt_info->next;
+ ACPI_FREE(gpe_xrupt_info);
+ gpe_xrupt_info = next_gpe_xrupt_info;
+ }
+
+ return_VOID;
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_subsystem_shutdown
+ *
+ * PARAMETERS: none
+ *
+ * RETURN: none
+ *
+ * DESCRIPTION: Shutdown the various subsystems. Don't delete the mutex
+ * objects here -- because the AML debugger may be still running.
+ *
+ ******************************************************************************/
+
+void acpi_ut_subsystem_shutdown(void)
+{
+
+ ACPI_FUNCTION_TRACE(ut_subsystem_shutdown);
+
+ /* Just exit if subsystem is already shutdown */
+
+ if (acpi_gbl_shutdown) {
+ ACPI_ERROR((AE_INFO, "ACPI Subsystem is already terminated"));
+ return_VOID;
+ }
+
+ /* Subsystem appears active, go ahead and shut it down */
+
+ acpi_gbl_shutdown = TRUE;
+ acpi_gbl_startup_flags = 0;
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Shutting down ACPI Subsystem\n"));
+
+#ifndef ACPI_ASL_COMPILER
+
+ /* Close the acpi_event Handling */
+
+ acpi_ev_terminate();
+#endif
+
+ /* Close the Namespace */
+
+ acpi_ns_terminate();
+
+ /* Delete the ACPI tables */
+
+ acpi_tb_terminate();
+
+ /* Close the globals */
+
+ acpi_ut_terminate();
+
+ /* Purge the local caches */
+
+ (void)acpi_ut_delete_caches();
+ return_VOID;
+}
diff --git a/drivers/acpi/utilities/utmath.c b/drivers/acpi/utilities/utmath.c
new file mode 100644
index 0000000..c927324
--- /dev/null
+++ b/drivers/acpi/utilities/utmath.c
@@ -0,0 +1,311 @@
+/*******************************************************************************
+ *
+ * Module Name: utmath - Integer math support routines
+ *
+ ******************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+
+#define _COMPONENT ACPI_UTILITIES
+ACPI_MODULE_NAME("utmath")
+
+/*
+ * Support for double-precision integer divide. This code is included here
+ * in order to support kernel environments where the double-precision math
+ * library is not available.
+ */
+#ifndef ACPI_USE_NATIVE_DIVIDE
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_short_divide
+ *
+ * PARAMETERS: Dividend - 64-bit dividend
+ * Divisor - 32-bit divisor
+ * out_quotient - Pointer to where the quotient is returned
+ * out_remainder - Pointer to where the remainder is returned
+ *
+ * RETURN: Status (Checks for divide-by-zero)
+ *
+ * DESCRIPTION: Perform a short (maximum 64 bits divided by 32 bits)
+ * divide and modulo. The result is a 64-bit quotient and a
+ * 32-bit remainder.
+ *
+ ******************************************************************************/
+acpi_status
+acpi_ut_short_divide(acpi_integer dividend,
+ u32 divisor,
+ acpi_integer * out_quotient, u32 * out_remainder)
+{
+ union uint64_overlay dividend_ovl;
+ union uint64_overlay quotient;
+ u32 remainder32;
+
+ ACPI_FUNCTION_TRACE(ut_short_divide);
+
+ /* Always check for a zero divisor */
+
+ if (divisor == 0) {
+ ACPI_ERROR((AE_INFO, "Divide by zero"));
+ return_ACPI_STATUS(AE_AML_DIVIDE_BY_ZERO);
+ }
+
+ dividend_ovl.full = dividend;
+
+ /*
+ * The quotient is 64 bits, the remainder is always 32 bits,
+ * and is generated by the second divide.
+ */
+ ACPI_DIV_64_BY_32(0, dividend_ovl.part.hi, divisor,
+ quotient.part.hi, remainder32);
+ ACPI_DIV_64_BY_32(remainder32, dividend_ovl.part.lo, divisor,
+ quotient.part.lo, remainder32);
+
+ /* Return only what was requested */
+
+ if (out_quotient) {
+ *out_quotient = quotient.full;
+ }
+ if (out_remainder) {
+ *out_remainder = remainder32;
+ }
+
+ return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_divide
+ *
+ * PARAMETERS: in_dividend - Dividend
+ * in_divisor - Divisor
+ * out_quotient - Pointer to where the quotient is returned
+ * out_remainder - Pointer to where the remainder is returned
+ *
+ * RETURN: Status (Checks for divide-by-zero)
+ *
+ * DESCRIPTION: Perform a divide and modulo.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ut_divide(acpi_integer in_dividend,
+ acpi_integer in_divisor,
+ acpi_integer * out_quotient, acpi_integer * out_remainder)
+{
+ union uint64_overlay dividend;
+ union uint64_overlay divisor;
+ union uint64_overlay quotient;
+ union uint64_overlay remainder;
+ union uint64_overlay normalized_dividend;
+ union uint64_overlay normalized_divisor;
+ u32 partial1;
+ union uint64_overlay partial2;
+ union uint64_overlay partial3;
+
+ ACPI_FUNCTION_TRACE(ut_divide);
+
+ /* Always check for a zero divisor */
+
+ if (in_divisor == 0) {
+ ACPI_ERROR((AE_INFO, "Divide by zero"));
+ return_ACPI_STATUS(AE_AML_DIVIDE_BY_ZERO);
+ }
+
+ divisor.full = in_divisor;
+ dividend.full = in_dividend;
+ if (divisor.part.hi == 0) {
+ /*
+ * 1) Simplest case is where the divisor is 32 bits, we can
+ * just do two divides
+ */
+ remainder.part.hi = 0;
+
+ /*
+ * The quotient is 64 bits, the remainder is always 32 bits,
+ * and is generated by the second divide.
+ */
+ ACPI_DIV_64_BY_32(0, dividend.part.hi, divisor.part.lo,
+ quotient.part.hi, partial1);
+ ACPI_DIV_64_BY_32(partial1, dividend.part.lo, divisor.part.lo,
+ quotient.part.lo, remainder.part.lo);
+ }
+
+ else {
+ /*
+ * 2) The general case where the divisor is a full 64 bits
+ * is more difficult
+ */
+ quotient.part.hi = 0;
+ normalized_dividend = dividend;
+ normalized_divisor = divisor;
+
+ /* Normalize the operands (shift until the divisor is < 32 bits) */
+
+ do {
+ ACPI_SHIFT_RIGHT_64(normalized_divisor.part.hi,
+ normalized_divisor.part.lo);
+ ACPI_SHIFT_RIGHT_64(normalized_dividend.part.hi,
+ normalized_dividend.part.lo);
+
+ } while (normalized_divisor.part.hi != 0);
+
+ /* Partial divide */
+
+ ACPI_DIV_64_BY_32(normalized_dividend.part.hi,
+ normalized_dividend.part.lo,
+ normalized_divisor.part.lo,
+ quotient.part.lo, partial1);
+
+ /*
+ * The quotient is always 32 bits, and simply requires adjustment.
+ * The 64-bit remainder must be generated.
+ */
+ partial1 = quotient.part.lo * divisor.part.hi;
+ partial2.full =
+ (acpi_integer) quotient.part.lo * divisor.part.lo;
+ partial3.full = (acpi_integer) partial2.part.hi + partial1;
+
+ remainder.part.hi = partial3.part.lo;
+ remainder.part.lo = partial2.part.lo;
+
+ if (partial3.part.hi == 0) {
+ if (partial3.part.lo >= dividend.part.hi) {
+ if (partial3.part.lo == dividend.part.hi) {
+ if (partial2.part.lo > dividend.part.lo) {
+ quotient.part.lo--;
+ remainder.full -= divisor.full;
+ }
+ } else {
+ quotient.part.lo--;
+ remainder.full -= divisor.full;
+ }
+ }
+
+ remainder.full = remainder.full - dividend.full;
+ remainder.part.hi = (u32) - ((s32) remainder.part.hi);
+ remainder.part.lo = (u32) - ((s32) remainder.part.lo);
+
+ if (remainder.part.lo) {
+ remainder.part.hi--;
+ }
+ }
+ }
+
+ /* Return only what was requested */
+
+ if (out_quotient) {
+ *out_quotient = quotient.full;
+ }
+ if (out_remainder) {
+ *out_remainder = remainder.full;
+ }
+
+ return_ACPI_STATUS(AE_OK);
+}
+
+#else
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_short_divide, acpi_ut_divide
+ *
+ * PARAMETERS: See function headers above
+ *
+ * DESCRIPTION: Native versions of the ut_divide functions. Use these if either
+ * 1) The target is a 64-bit platform and therefore 64-bit
+ * integer math is supported directly by the machine.
+ * 2) The target is a 32-bit or 16-bit platform, and the
+ * double-precision integer math library is available to
+ * perform the divide.
+ *
+ ******************************************************************************/
+acpi_status
+acpi_ut_short_divide(acpi_integer in_dividend,
+ u32 divisor,
+ acpi_integer * out_quotient, u32 * out_remainder)
+{
+
+ ACPI_FUNCTION_TRACE(ut_short_divide);
+
+ /* Always check for a zero divisor */
+
+ if (divisor == 0) {
+ ACPI_ERROR((AE_INFO, "Divide by zero"));
+ return_ACPI_STATUS(AE_AML_DIVIDE_BY_ZERO);
+ }
+
+ /* Return only what was requested */
+
+ if (out_quotient) {
+ *out_quotient = in_dividend / divisor;
+ }
+ if (out_remainder) {
+ *out_remainder = (u32) (in_dividend % divisor);
+ }
+
+ return_ACPI_STATUS(AE_OK);
+}
+
+acpi_status
+acpi_ut_divide(acpi_integer in_dividend,
+ acpi_integer in_divisor,
+ acpi_integer * out_quotient, acpi_integer * out_remainder)
+{
+ ACPI_FUNCTION_TRACE(ut_divide);
+
+ /* Always check for a zero divisor */
+
+ if (in_divisor == 0) {
+ ACPI_ERROR((AE_INFO, "Divide by zero"));
+ return_ACPI_STATUS(AE_AML_DIVIDE_BY_ZERO);
+ }
+
+ /* Return only what was requested */
+
+ if (out_quotient) {
+ *out_quotient = in_dividend / in_divisor;
+ }
+ if (out_remainder) {
+ *out_remainder = in_dividend % in_divisor;
+ }
+
+ return_ACPI_STATUS(AE_OK);
+}
+
+#endif
diff --git a/drivers/acpi/utilities/utmisc.c b/drivers/acpi/utilities/utmisc.c
new file mode 100644
index 0000000..9089a15
--- /dev/null
+++ b/drivers/acpi/utilities/utmisc.c
@@ -0,0 +1,1090 @@
+/*******************************************************************************
+ *
+ * Module Name: utmisc - common utility procedures
+ *
+ ******************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <linux/module.h>
+
+#include <acpi/acpi.h>
+#include <acpi/acnamesp.h>
+
+#define _COMPONENT ACPI_UTILITIES
+ACPI_MODULE_NAME("utmisc")
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_validate_exception
+ *
+ * PARAMETERS: Status - The acpi_status code to be formatted
+ *
+ * RETURN: A string containing the exception text. NULL if exception is
+ * not valid.
+ *
+ * DESCRIPTION: This function validates and translates an ACPI exception into
+ * an ASCII string.
+ *
+ ******************************************************************************/
+const char *acpi_ut_validate_exception(acpi_status status)
+{
+ u32 sub_status;
+ const char *exception = NULL;
+
+ ACPI_FUNCTION_ENTRY();
+
+ /*
+ * Status is composed of two parts, a "type" and an actual code
+ */
+ sub_status = (status & ~AE_CODE_MASK);
+
+ switch (status & AE_CODE_MASK) {
+ case AE_CODE_ENVIRONMENTAL:
+
+ if (sub_status <= AE_CODE_ENV_MAX) {
+ exception = acpi_gbl_exception_names_env[sub_status];
+ }
+ break;
+
+ case AE_CODE_PROGRAMMER:
+
+ if (sub_status <= AE_CODE_PGM_MAX) {
+ exception = acpi_gbl_exception_names_pgm[sub_status];
+ }
+ break;
+
+ case AE_CODE_ACPI_TABLES:
+
+ if (sub_status <= AE_CODE_TBL_MAX) {
+ exception = acpi_gbl_exception_names_tbl[sub_status];
+ }
+ break;
+
+ case AE_CODE_AML:
+
+ if (sub_status <= AE_CODE_AML_MAX) {
+ exception = acpi_gbl_exception_names_aml[sub_status];
+ }
+ break;
+
+ case AE_CODE_CONTROL:
+
+ if (sub_status <= AE_CODE_CTRL_MAX) {
+ exception = acpi_gbl_exception_names_ctrl[sub_status];
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ return (ACPI_CAST_PTR(const char, exception));
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_is_aml_table
+ *
+ * PARAMETERS: Table - An ACPI table
+ *
+ * RETURN: TRUE if table contains executable AML; FALSE otherwise
+ *
+ * DESCRIPTION: Check ACPI Signature for a table that contains AML code.
+ * Currently, these are DSDT,SSDT,PSDT. All other table types are
+ * data tables that do not contain AML code.
+ *
+ ******************************************************************************/
+
+u8 acpi_ut_is_aml_table(struct acpi_table_header *table)
+{
+
+ /* These are the only tables that contain executable AML */
+
+ if (ACPI_COMPARE_NAME(table->signature, ACPI_SIG_DSDT) ||
+ ACPI_COMPARE_NAME(table->signature, ACPI_SIG_PSDT) ||
+ ACPI_COMPARE_NAME(table->signature, ACPI_SIG_SSDT)) {
+ return (TRUE);
+ }
+
+ return (FALSE);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_allocate_owner_id
+ *
+ * PARAMETERS: owner_id - Where the new owner ID is returned
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Allocate a table or method owner ID. The owner ID is used to
+ * track objects created by the table or method, to be deleted
+ * when the method exits or the table is unloaded.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ut_allocate_owner_id(acpi_owner_id * owner_id)
+{
+ u32 i;
+ u32 j;
+ u32 k;
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(ut_allocate_owner_id);
+
+ /* Guard against multiple allocations of ID to the same location */
+
+ if (*owner_id) {
+ ACPI_ERROR((AE_INFO, "Owner ID [%2.2X] already exists",
+ *owner_id));
+ return_ACPI_STATUS(AE_ALREADY_EXISTS);
+ }
+
+ /* Mutex for the global ID mask */
+
+ status = acpi_ut_acquire_mutex(ACPI_MTX_CACHES);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /*
+ * Find a free owner ID, cycle through all possible IDs on repeated
+ * allocations. (ACPI_NUM_OWNERID_MASKS + 1) because first index may have
+ * to be scanned twice.
+ */
+ for (i = 0, j = acpi_gbl_last_owner_id_index;
+ i < (ACPI_NUM_OWNERID_MASKS + 1); i++, j++) {
+ if (j >= ACPI_NUM_OWNERID_MASKS) {
+ j = 0; /* Wraparound to start of mask array */
+ }
+
+ for (k = acpi_gbl_next_owner_id_offset; k < 32; k++) {
+ if (acpi_gbl_owner_id_mask[j] == ACPI_UINT32_MAX) {
+
+ /* There are no free IDs in this mask */
+
+ break;
+ }
+
+ if (!(acpi_gbl_owner_id_mask[j] & (1 << k))) {
+ /*
+ * Found a free ID. The actual ID is the bit index plus one,
+ * making zero an invalid Owner ID. Save this as the last ID
+ * allocated and update the global ID mask.
+ */
+ acpi_gbl_owner_id_mask[j] |= (1 << k);
+
+ acpi_gbl_last_owner_id_index = (u8) j;
+ acpi_gbl_next_owner_id_offset = (u8) (k + 1);
+
+ /*
+ * Construct encoded ID from the index and bit position
+ *
+ * Note: Last [j].k (bit 255) is never used and is marked
+ * permanently allocated (prevents +1 overflow)
+ */
+ *owner_id =
+ (acpi_owner_id) ((k + 1) + ACPI_MUL_32(j));
+
+ ACPI_DEBUG_PRINT((ACPI_DB_VALUES,
+ "Allocated OwnerId: %2.2X\n",
+ (unsigned int)*owner_id));
+ goto exit;
+ }
+ }
+
+ acpi_gbl_next_owner_id_offset = 0;
+ }
+
+ /*
+ * All owner_ids have been allocated. This typically should
+ * not happen since the IDs are reused after deallocation. The IDs are
+ * allocated upon table load (one per table) and method execution, and
+ * they are released when a table is unloaded or a method completes
+ * execution.
+ *
+ * If this error happens, there may be very deep nesting of invoked control
+ * methods, or there may be a bug where the IDs are not released.
+ */
+ status = AE_OWNER_ID_LIMIT;
+ ACPI_ERROR((AE_INFO,
+ "Could not allocate new OwnerId (255 max), AE_OWNER_ID_LIMIT"));
+
+ exit:
+ (void)acpi_ut_release_mutex(ACPI_MTX_CACHES);
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_release_owner_id
+ *
+ * PARAMETERS: owner_id_ptr - Pointer to a previously allocated owner_iD
+ *
+ * RETURN: None. No error is returned because we are either exiting a
+ * control method or unloading a table. Either way, we would
+ * ignore any error anyway.
+ *
+ * DESCRIPTION: Release a table or method owner ID. Valid IDs are 1 - 255
+ *
+ ******************************************************************************/
+
+void acpi_ut_release_owner_id(acpi_owner_id * owner_id_ptr)
+{
+ acpi_owner_id owner_id = *owner_id_ptr;
+ acpi_status status;
+ u32 index;
+ u32 bit;
+
+ ACPI_FUNCTION_TRACE_U32(ut_release_owner_id, owner_id);
+
+ /* Always clear the input owner_id (zero is an invalid ID) */
+
+ *owner_id_ptr = 0;
+
+ /* Zero is not a valid owner_iD */
+
+ if (owner_id == 0) {
+ ACPI_ERROR((AE_INFO, "Invalid OwnerId: %2.2X", owner_id));
+ return_VOID;
+ }
+
+ /* Mutex for the global ID mask */
+
+ status = acpi_ut_acquire_mutex(ACPI_MTX_CACHES);
+ if (ACPI_FAILURE(status)) {
+ return_VOID;
+ }
+
+ /* Normalize the ID to zero */
+
+ owner_id--;
+
+ /* Decode ID to index/offset pair */
+
+ index = ACPI_DIV_32(owner_id);
+ bit = 1 << ACPI_MOD_32(owner_id);
+
+ /* Free the owner ID only if it is valid */
+
+ if (acpi_gbl_owner_id_mask[index] & bit) {
+ acpi_gbl_owner_id_mask[index] ^= bit;
+ } else {
+ ACPI_ERROR((AE_INFO,
+ "Release of non-allocated OwnerId: %2.2X",
+ owner_id + 1));
+ }
+
+ (void)acpi_ut_release_mutex(ACPI_MTX_CACHES);
+ return_VOID;
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_strupr (strupr)
+ *
+ * PARAMETERS: src_string - The source string to convert
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Convert string to uppercase
+ *
+ * NOTE: This is not a POSIX function, so it appears here, not in utclib.c
+ *
+ ******************************************************************************/
+
+void acpi_ut_strupr(char *src_string)
+{
+ char *string;
+
+ ACPI_FUNCTION_ENTRY();
+
+ if (!src_string) {
+ return;
+ }
+
+ /* Walk entire string, uppercasing the letters */
+
+ for (string = src_string; *string; string++) {
+ *string = (char)ACPI_TOUPPER(*string);
+ }
+
+ return;
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_print_string
+ *
+ * PARAMETERS: String - Null terminated ASCII string
+ * max_length - Maximum output length
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Dump an ASCII string with support for ACPI-defined escape
+ * sequences.
+ *
+ ******************************************************************************/
+
+void acpi_ut_print_string(char *string, u8 max_length)
+{
+ u32 i;
+
+ if (!string) {
+ acpi_os_printf("<\"NULL STRING PTR\">");
+ return;
+ }
+
+ acpi_os_printf("\"");
+ for (i = 0; string[i] && (i < max_length); i++) {
+
+ /* Escape sequences */
+
+ switch (string[i]) {
+ case 0x07:
+ acpi_os_printf("\\a"); /* BELL */
+ break;
+
+ case 0x08:
+ acpi_os_printf("\\b"); /* BACKSPACE */
+ break;
+
+ case 0x0C:
+ acpi_os_printf("\\f"); /* FORMFEED */
+ break;
+
+ case 0x0A:
+ acpi_os_printf("\\n"); /* LINEFEED */
+ break;
+
+ case 0x0D:
+ acpi_os_printf("\\r"); /* CARRIAGE RETURN */
+ break;
+
+ case 0x09:
+ acpi_os_printf("\\t"); /* HORIZONTAL TAB */
+ break;
+
+ case 0x0B:
+ acpi_os_printf("\\v"); /* VERTICAL TAB */
+ break;
+
+ case '\'': /* Single Quote */
+ case '\"': /* Double Quote */
+ case '\\': /* Backslash */
+ acpi_os_printf("\\%c", (int)string[i]);
+ break;
+
+ default:
+
+ /* Check for printable character or hex escape */
+
+ if (ACPI_IS_PRINT(string[i])) {
+ /* This is a normal character */
+
+ acpi_os_printf("%c", (int)string[i]);
+ } else {
+ /* All others will be Hex escapes */
+
+ acpi_os_printf("\\x%2.2X", (s32) string[i]);
+ }
+ break;
+ }
+ }
+ acpi_os_printf("\"");
+
+ if (i == max_length && string[i]) {
+ acpi_os_printf("...");
+ }
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_dword_byte_swap
+ *
+ * PARAMETERS: Value - Value to be converted
+ *
+ * RETURN: u32 integer with bytes swapped
+ *
+ * DESCRIPTION: Convert a 32-bit value to big-endian (swap the bytes)
+ *
+ ******************************************************************************/
+
+u32 acpi_ut_dword_byte_swap(u32 value)
+{
+ union {
+ u32 value;
+ u8 bytes[4];
+ } out;
+ union {
+ u32 value;
+ u8 bytes[4];
+ } in;
+
+ ACPI_FUNCTION_ENTRY();
+
+ in.value = value;
+
+ out.bytes[0] = in.bytes[3];
+ out.bytes[1] = in.bytes[2];
+ out.bytes[2] = in.bytes[1];
+ out.bytes[3] = in.bytes[0];
+
+ return (out.value);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_set_integer_width
+ *
+ * PARAMETERS: Revision From DSDT header
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Set the global integer bit width based upon the revision
+ * of the DSDT. For Revision 1 and 0, Integers are 32 bits.
+ * For Revision 2 and above, Integers are 64 bits. Yes, this
+ * makes a difference.
+ *
+ ******************************************************************************/
+
+void acpi_ut_set_integer_width(u8 revision)
+{
+
+ if (revision < 2) {
+
+ /* 32-bit case */
+
+ acpi_gbl_integer_bit_width = 32;
+ acpi_gbl_integer_nybble_width = 8;
+ acpi_gbl_integer_byte_width = 4;
+ } else {
+ /* 64-bit case (ACPI 2.0+) */
+
+ acpi_gbl_integer_bit_width = 64;
+ acpi_gbl_integer_nybble_width = 16;
+ acpi_gbl_integer_byte_width = 8;
+ }
+}
+
+#ifdef ACPI_DEBUG_OUTPUT
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_display_init_pathname
+ *
+ * PARAMETERS: Type - Object type of the node
+ * obj_handle - Handle whose pathname will be displayed
+ * Path - Additional path string to be appended.
+ * (NULL if no extra path)
+ *
+ * RETURN: acpi_status
+ *
+ * DESCRIPTION: Display full pathname of an object, DEBUG ONLY
+ *
+ ******************************************************************************/
+
+void
+acpi_ut_display_init_pathname(u8 type,
+ struct acpi_namespace_node *obj_handle,
+ char *path)
+{
+ acpi_status status;
+ struct acpi_buffer buffer;
+
+ ACPI_FUNCTION_ENTRY();
+
+ /* Only print the path if the appropriate debug level is enabled */
+
+ if (!(acpi_dbg_level & ACPI_LV_INIT_NAMES)) {
+ return;
+ }
+
+ /* Get the full pathname to the node */
+
+ buffer.length = ACPI_ALLOCATE_LOCAL_BUFFER;
+ status = acpi_ns_handle_to_pathname(obj_handle, &buffer);
+ if (ACPI_FAILURE(status)) {
+ return;
+ }
+
+ /* Print what we're doing */
+
+ switch (type) {
+ case ACPI_TYPE_METHOD:
+ acpi_os_printf("Executing ");
+ break;
+
+ default:
+ acpi_os_printf("Initializing ");
+ break;
+ }
+
+ /* Print the object type and pathname */
+
+ acpi_os_printf("%-12s %s",
+ acpi_ut_get_type_name(type), (char *)buffer.pointer);
+
+ /* Extra path is used to append names like _STA, _INI, etc. */
+
+ if (path) {
+ acpi_os_printf(".%s", path);
+ }
+ acpi_os_printf("\n");
+
+ ACPI_FREE(buffer.pointer);
+}
+#endif
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_valid_acpi_char
+ *
+ * PARAMETERS: Char - The character to be examined
+ * Position - Byte position (0-3)
+ *
+ * RETURN: TRUE if the character is valid, FALSE otherwise
+ *
+ * DESCRIPTION: Check for a valid ACPI character. Must be one of:
+ * 1) Upper case alpha
+ * 2) numeric
+ * 3) underscore
+ *
+ * We allow a '!' as the last character because of the ASF! table
+ *
+ ******************************************************************************/
+
+u8 acpi_ut_valid_acpi_char(char character, u32 position)
+{
+
+ if (!((character >= 'A' && character <= 'Z') ||
+ (character >= '0' && character <= '9') || (character == '_'))) {
+
+ /* Allow a '!' in the last position */
+
+ if (character == '!' && position == 3) {
+ return (TRUE);
+ }
+
+ return (FALSE);
+ }
+
+ return (TRUE);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_valid_acpi_name
+ *
+ * PARAMETERS: Name - The name to be examined
+ *
+ * RETURN: TRUE if the name is valid, FALSE otherwise
+ *
+ * DESCRIPTION: Check for a valid ACPI name. Each character must be one of:
+ * 1) Upper case alpha
+ * 2) numeric
+ * 3) underscore
+ *
+ ******************************************************************************/
+
+u8 acpi_ut_valid_acpi_name(u32 name)
+{
+ u32 i;
+
+ ACPI_FUNCTION_ENTRY();
+
+ for (i = 0; i < ACPI_NAME_SIZE; i++) {
+ if (!acpi_ut_valid_acpi_char
+ ((ACPI_CAST_PTR(char, &name))[i], i)) {
+ return (FALSE);
+ }
+ }
+
+ return (TRUE);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_repair_name
+ *
+ * PARAMETERS: Name - The ACPI name to be repaired
+ *
+ * RETURN: Repaired version of the name
+ *
+ * DESCRIPTION: Repair an ACPI name: Change invalid characters to '*' and
+ * return the new name.
+ *
+ ******************************************************************************/
+
+acpi_name acpi_ut_repair_name(char *name)
+{
+ u32 i;
+ char new_name[ACPI_NAME_SIZE];
+
+ for (i = 0; i < ACPI_NAME_SIZE; i++) {
+ new_name[i] = name[i];
+
+ /*
+ * Replace a bad character with something printable, yet technically
+ * still invalid. This prevents any collisions with existing "good"
+ * names in the namespace.
+ */
+ if (!acpi_ut_valid_acpi_char(name[i], i)) {
+ new_name[i] = '*';
+ }
+ }
+
+ return (*(u32 *) new_name);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_strtoul64
+ *
+ * PARAMETERS: String - Null terminated string
+ * Base - Radix of the string: 16 or ACPI_ANY_BASE;
+ * ACPI_ANY_BASE means 'in behalf of to_integer'
+ * ret_integer - Where the converted integer is returned
+ *
+ * RETURN: Status and Converted value
+ *
+ * DESCRIPTION: Convert a string into an unsigned value. Performs either a
+ * 32-bit or 64-bit conversion, depending on the current mode
+ * of the interpreter.
+ * NOTE: Does not support Octal strings, not needed.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ut_strtoul64(char *string, u32 base, acpi_integer * ret_integer)
+{
+ u32 this_digit = 0;
+ acpi_integer return_value = 0;
+ acpi_integer quotient;
+ acpi_integer dividend;
+ u32 to_integer_op = (base == ACPI_ANY_BASE);
+ u32 mode32 = (acpi_gbl_integer_byte_width == 4);
+ u8 valid_digits = 0;
+ u8 sign_of0x = 0;
+ u8 term = 0;
+
+ ACPI_FUNCTION_TRACE_STR(ut_stroul64, string);
+
+ switch (base) {
+ case ACPI_ANY_BASE:
+ case 16:
+ break;
+
+ default:
+ /* Invalid Base */
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
+
+ if (!string) {
+ goto error_exit;
+ }
+
+ /* Skip over any white space in the buffer */
+
+ while ((*string) && (ACPI_IS_SPACE(*string) || *string == '\t')) {
+ string++;
+ }
+
+ if (to_integer_op) {
+ /*
+ * Base equal to ACPI_ANY_BASE means 'to_integer operation case'.
+ * We need to determine if it is decimal or hexadecimal.
+ */
+ if ((*string == '0') && (ACPI_TOLOWER(*(string + 1)) == 'x')) {
+ sign_of0x = 1;
+ base = 16;
+
+ /* Skip over the leading '0x' */
+ string += 2;
+ } else {
+ base = 10;
+ }
+ }
+
+ /* Any string left? Check that '0x' is not followed by white space. */
+
+ if (!(*string) || ACPI_IS_SPACE(*string) || *string == '\t') {
+ if (to_integer_op) {
+ goto error_exit;
+ } else {
+ goto all_done;
+ }
+ }
+
+ /*
+ * Perform a 32-bit or 64-bit conversion, depending upon the current
+ * execution mode of the interpreter
+ */
+ dividend = (mode32) ? ACPI_UINT32_MAX : ACPI_UINT64_MAX;
+
+ /* Main loop: convert the string to a 32- or 64-bit integer */
+
+ while (*string) {
+ if (ACPI_IS_DIGIT(*string)) {
+
+ /* Convert ASCII 0-9 to Decimal value */
+
+ this_digit = ((u8) * string) - '0';
+ } else if (base == 10) {
+
+ /* Digit is out of range; possible in to_integer case only */
+
+ term = 1;
+ } else {
+ this_digit = (u8) ACPI_TOUPPER(*string);
+ if (ACPI_IS_XDIGIT((char)this_digit)) {
+
+ /* Convert ASCII Hex char to value */
+
+ this_digit = this_digit - 'A' + 10;
+ } else {
+ term = 1;
+ }
+ }
+
+ if (term) {
+ if (to_integer_op) {
+ goto error_exit;
+ } else {
+ break;
+ }
+ } else if ((valid_digits == 0) && (this_digit == 0)
+ && !sign_of0x) {
+
+ /* Skip zeros */
+ string++;
+ continue;
+ }
+
+ valid_digits++;
+
+ if (sign_of0x && ((valid_digits > 16)
+ || ((valid_digits > 8) && mode32))) {
+ /*
+ * This is to_integer operation case.
+ * No any restrictions for string-to-integer conversion,
+ * see ACPI spec.
+ */
+ goto error_exit;
+ }
+
+ /* Divide the digit into the correct position */
+
+ (void)
+ acpi_ut_short_divide((dividend - (acpi_integer) this_digit),
+ base, &quotient, NULL);
+
+ if (return_value > quotient) {
+ if (to_integer_op) {
+ goto error_exit;
+ } else {
+ break;
+ }
+ }
+
+ return_value *= base;
+ return_value += this_digit;
+ string++;
+ }
+
+ /* All done, normal exit */
+
+ all_done:
+
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Converted value: %8.8X%8.8X\n",
+ ACPI_FORMAT_UINT64(return_value)));
+
+ *ret_integer = return_value;
+ return_ACPI_STATUS(AE_OK);
+
+ error_exit:
+ /* Base was set/validated above */
+
+ if (base == 10) {
+ return_ACPI_STATUS(AE_BAD_DECIMAL_CONSTANT);
+ } else {
+ return_ACPI_STATUS(AE_BAD_HEX_CONSTANT);
+ }
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_create_update_state_and_push
+ *
+ * PARAMETERS: Object - Object to be added to the new state
+ * Action - Increment/Decrement
+ * state_list - List the state will be added to
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Create a new state and push it
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ut_create_update_state_and_push(union acpi_operand_object *object,
+ u16 action,
+ union acpi_generic_state **state_list)
+{
+ union acpi_generic_state *state;
+
+ ACPI_FUNCTION_ENTRY();
+
+ /* Ignore null objects; these are expected */
+
+ if (!object) {
+ return (AE_OK);
+ }
+
+ state = acpi_ut_create_update_state(object, action);
+ if (!state) {
+ return (AE_NO_MEMORY);
+ }
+
+ acpi_ut_push_generic_state(state_list, state);
+ return (AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_walk_package_tree
+ *
+ * PARAMETERS: source_object - The package to walk
+ * target_object - Target object (if package is being copied)
+ * walk_callback - Called once for each package element
+ * Context - Passed to the callback function
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Walk through a package
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ut_walk_package_tree(union acpi_operand_object * source_object,
+ void *target_object,
+ acpi_pkg_callback walk_callback, void *context)
+{
+ acpi_status status = AE_OK;
+ union acpi_generic_state *state_list = NULL;
+ union acpi_generic_state *state;
+ u32 this_index;
+ union acpi_operand_object *this_source_obj;
+
+ ACPI_FUNCTION_TRACE(ut_walk_package_tree);
+
+ state = acpi_ut_create_pkg_state(source_object, target_object, 0);
+ if (!state) {
+ return_ACPI_STATUS(AE_NO_MEMORY);
+ }
+
+ while (state) {
+
+ /* Get one element of the package */
+
+ this_index = state->pkg.index;
+ this_source_obj = (union acpi_operand_object *)
+ state->pkg.source_object->package.elements[this_index];
+
+ /*
+ * Check for:
+ * 1) An uninitialized package element. It is completely
+ * legal to declare a package and leave it uninitialized
+ * 2) Not an internal object - can be a namespace node instead
+ * 3) Any type other than a package. Packages are handled in else
+ * case below.
+ */
+ if ((!this_source_obj) ||
+ (ACPI_GET_DESCRIPTOR_TYPE(this_source_obj) !=
+ ACPI_DESC_TYPE_OPERAND)
+ || (ACPI_GET_OBJECT_TYPE(this_source_obj) !=
+ ACPI_TYPE_PACKAGE)) {
+ status =
+ walk_callback(ACPI_COPY_TYPE_SIMPLE,
+ this_source_obj, state, context);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ state->pkg.index++;
+ while (state->pkg.index >=
+ state->pkg.source_object->package.count) {
+ /*
+ * We've handled all of the objects at this level, This means
+ * that we have just completed a package. That package may
+ * have contained one or more packages itself.
+ *
+ * Delete this state and pop the previous state (package).
+ */
+ acpi_ut_delete_generic_state(state);
+ state = acpi_ut_pop_generic_state(&state_list);
+
+ /* Finished when there are no more states */
+
+ if (!state) {
+ /*
+ * We have handled all of the objects in the top level
+ * package just add the length of the package objects
+ * and exit
+ */
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ /*
+ * Go back up a level and move the index past the just
+ * completed package object.
+ */
+ state->pkg.index++;
+ }
+ } else {
+ /* This is a subobject of type package */
+
+ status =
+ walk_callback(ACPI_COPY_TYPE_PACKAGE,
+ this_source_obj, state, context);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /*
+ * Push the current state and create a new one
+ * The callback above returned a new target package object.
+ */
+ acpi_ut_push_generic_state(&state_list, state);
+ state = acpi_ut_create_pkg_state(this_source_obj,
+ state->pkg.
+ this_target_obj, 0);
+ if (!state) {
+
+ /* Free any stacked Update State objects */
+
+ while (state_list) {
+ state =
+ acpi_ut_pop_generic_state
+ (&state_list);
+ acpi_ut_delete_generic_state(state);
+ }
+ return_ACPI_STATUS(AE_NO_MEMORY);
+ }
+ }
+ }
+
+ /* We should never get here */
+
+ return_ACPI_STATUS(AE_AML_INTERNAL);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_error, acpi_ut_warning, acpi_ut_info
+ *
+ * PARAMETERS: module_name - Caller's module name (for error output)
+ * line_number - Caller's line number (for error output)
+ * Format - Printf format string + additional args
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Print message with module/line/version info
+ *
+ ******************************************************************************/
+
+void ACPI_INTERNAL_VAR_XFACE
+acpi_ut_error(const char *module_name, u32 line_number, const char *format, ...)
+{
+ va_list args;
+
+ acpi_os_printf("ACPI Error (%s-%04d): ", module_name, line_number);
+
+ va_start(args, format);
+ acpi_os_vprintf(format, args);
+ acpi_os_printf(" [%X]\n", ACPI_CA_VERSION);
+ va_end(args);
+}
+
+void ACPI_INTERNAL_VAR_XFACE
+acpi_ut_exception(const char *module_name,
+ u32 line_number, acpi_status status, const char *format, ...)
+{
+ va_list args;
+
+ acpi_os_printf("ACPI Exception (%s-%04d): %s, ", module_name,
+ line_number, acpi_format_exception(status));
+
+ va_start(args, format);
+ acpi_os_vprintf(format, args);
+ acpi_os_printf(" [%X]\n", ACPI_CA_VERSION);
+ va_end(args);
+}
+
+EXPORT_SYMBOL(acpi_ut_exception);
+
+void ACPI_INTERNAL_VAR_XFACE
+acpi_ut_warning(const char *module_name,
+ u32 line_number, const char *format, ...)
+{
+ va_list args;
+
+ acpi_os_printf("ACPI Warning (%s-%04d): ", module_name, line_number);
+
+ va_start(args, format);
+ acpi_os_vprintf(format, args);
+ acpi_os_printf(" [%X]\n", ACPI_CA_VERSION);
+ va_end(args);
+}
+
+void ACPI_INTERNAL_VAR_XFACE
+acpi_ut_info(const char *module_name, u32 line_number, const char *format, ...)
+{
+ va_list args;
+
+ /*
+ * Removed module_name, line_number, and acpica version, not needed
+ * for info output
+ */
+ acpi_os_printf("ACPI: ");
+
+ va_start(args, format);
+ acpi_os_vprintf(format, args);
+ acpi_os_printf("\n");
+ va_end(args);
+}
diff --git a/drivers/acpi/utilities/utmutex.c b/drivers/acpi/utilities/utmutex.c
new file mode 100644
index 0000000..7331dde
--- /dev/null
+++ b/drivers/acpi/utilities/utmutex.c
@@ -0,0 +1,341 @@
+/*******************************************************************************
+ *
+ * Module Name: utmutex - local mutex support
+ *
+ ******************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+
+#define _COMPONENT ACPI_UTILITIES
+ACPI_MODULE_NAME("utmutex")
+
+/* Local prototypes */
+static acpi_status acpi_ut_create_mutex(acpi_mutex_handle mutex_id);
+
+static acpi_status acpi_ut_delete_mutex(acpi_mutex_handle mutex_id);
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_mutex_initialize
+ *
+ * PARAMETERS: None.
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Create the system mutex objects.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ut_mutex_initialize(void)
+{
+ u32 i;
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(ut_mutex_initialize);
+
+ /*
+ * Create each of the predefined mutex objects
+ */
+ for (i = 0; i < ACPI_NUM_MUTEX; i++) {
+ status = acpi_ut_create_mutex(i);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+ }
+
+ /* Create the spinlocks for use at interrupt level */
+
+ spin_lock_init(acpi_gbl_gpe_lock);
+ spin_lock_init(acpi_gbl_hardware_lock);
+
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_mutex_terminate
+ *
+ * PARAMETERS: None.
+ *
+ * RETURN: None.
+ *
+ * DESCRIPTION: Delete all of the system mutex objects.
+ *
+ ******************************************************************************/
+
+void acpi_ut_mutex_terminate(void)
+{
+ u32 i;
+
+ ACPI_FUNCTION_TRACE(ut_mutex_terminate);
+
+ /*
+ * Delete each predefined mutex object
+ */
+ for (i = 0; i < ACPI_NUM_MUTEX; i++) {
+ (void)acpi_ut_delete_mutex(i);
+ }
+
+ /* Delete the spinlocks */
+
+ acpi_os_delete_lock(acpi_gbl_gpe_lock);
+ acpi_os_delete_lock(acpi_gbl_hardware_lock);
+ return_VOID;
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_create_mutex
+ *
+ * PARAMETERS: mutex_iD - ID of the mutex to be created
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Create a mutex object.
+ *
+ ******************************************************************************/
+
+static acpi_status acpi_ut_create_mutex(acpi_mutex_handle mutex_id)
+{
+ acpi_status status = AE_OK;
+
+ ACPI_FUNCTION_TRACE_U32(ut_create_mutex, mutex_id);
+
+ if (mutex_id > ACPI_MAX_MUTEX) {
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
+
+ if (!acpi_gbl_mutex_info[mutex_id].mutex) {
+ status =
+ acpi_os_create_mutex(&acpi_gbl_mutex_info[mutex_id].mutex);
+ acpi_gbl_mutex_info[mutex_id].thread_id =
+ ACPI_MUTEX_NOT_ACQUIRED;
+ acpi_gbl_mutex_info[mutex_id].use_count = 0;
+ }
+
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_delete_mutex
+ *
+ * PARAMETERS: mutex_iD - ID of the mutex to be deleted
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Delete a mutex object.
+ *
+ ******************************************************************************/
+
+static acpi_status acpi_ut_delete_mutex(acpi_mutex_handle mutex_id)
+{
+
+ ACPI_FUNCTION_TRACE_U32(ut_delete_mutex, mutex_id);
+
+ if (mutex_id > ACPI_MAX_MUTEX) {
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
+
+ acpi_os_delete_mutex(acpi_gbl_mutex_info[mutex_id].mutex);
+
+ acpi_gbl_mutex_info[mutex_id].mutex = NULL;
+ acpi_gbl_mutex_info[mutex_id].thread_id = ACPI_MUTEX_NOT_ACQUIRED;
+
+ return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_acquire_mutex
+ *
+ * PARAMETERS: mutex_iD - ID of the mutex to be acquired
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Acquire a mutex object.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ut_acquire_mutex(acpi_mutex_handle mutex_id)
+{
+ acpi_status status;
+ acpi_thread_id this_thread_id;
+
+ ACPI_FUNCTION_NAME(ut_acquire_mutex);
+
+ if (mutex_id > ACPI_MAX_MUTEX) {
+ return (AE_BAD_PARAMETER);
+ }
+
+ this_thread_id = acpi_os_get_thread_id();
+
+#ifdef ACPI_MUTEX_DEBUG
+ {
+ u32 i;
+ /*
+ * Mutex debug code, for internal debugging only.
+ *
+ * Deadlock prevention. Check if this thread owns any mutexes of value
+ * greater than or equal to this one. If so, the thread has violated
+ * the mutex ordering rule. This indicates a coding error somewhere in
+ * the ACPI subsystem code.
+ */
+ for (i = mutex_id; i < ACPI_NUM_MUTEX; i++) {
+ if (acpi_gbl_mutex_info[i].thread_id == this_thread_id) {
+ if (i == mutex_id) {
+ ACPI_ERROR((AE_INFO,
+ "Mutex [%s] already acquired by this thread [%X]",
+ acpi_ut_get_mutex_name
+ (mutex_id),
+ this_thread_id));
+
+ return (AE_ALREADY_ACQUIRED);
+ }
+
+ ACPI_ERROR((AE_INFO,
+ "Invalid acquire order: Thread %X owns [%s], wants [%s]",
+ this_thread_id,
+ acpi_ut_get_mutex_name(i),
+ acpi_ut_get_mutex_name(mutex_id)));
+
+ return (AE_ACQUIRE_DEADLOCK);
+ }
+ }
+ }
+#endif
+
+ ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
+ "Thread %lX attempting to acquire Mutex [%s]\n",
+ (unsigned long)this_thread_id,
+ acpi_ut_get_mutex_name(mutex_id)));
+
+ status = acpi_os_acquire_mutex(acpi_gbl_mutex_info[mutex_id].mutex,
+ ACPI_WAIT_FOREVER);
+ if (ACPI_SUCCESS(status)) {
+ ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
+ "Thread %lX acquired Mutex [%s]\n",
+ (unsigned long)this_thread_id,
+ acpi_ut_get_mutex_name(mutex_id)));
+
+ acpi_gbl_mutex_info[mutex_id].use_count++;
+ acpi_gbl_mutex_info[mutex_id].thread_id = this_thread_id;
+ } else {
+ ACPI_EXCEPTION((AE_INFO, status,
+ "Thread %lX could not acquire Mutex [%X]",
+ (unsigned long)this_thread_id, mutex_id));
+ }
+
+ return (status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_release_mutex
+ *
+ * PARAMETERS: mutex_iD - ID of the mutex to be released
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Release a mutex object.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ut_release_mutex(acpi_mutex_handle mutex_id)
+{
+ acpi_thread_id this_thread_id;
+
+ ACPI_FUNCTION_NAME(ut_release_mutex);
+
+ this_thread_id = acpi_os_get_thread_id();
+ ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
+ "Thread %lX releasing Mutex [%s]\n",
+ (unsigned long)this_thread_id,
+ acpi_ut_get_mutex_name(mutex_id)));
+
+ if (mutex_id > ACPI_MAX_MUTEX) {
+ return (AE_BAD_PARAMETER);
+ }
+
+ /*
+ * Mutex must be acquired in order to release it!
+ */
+ if (acpi_gbl_mutex_info[mutex_id].thread_id == ACPI_MUTEX_NOT_ACQUIRED) {
+ ACPI_ERROR((AE_INFO,
+ "Mutex [%X] is not acquired, cannot release",
+ mutex_id));
+
+ return (AE_NOT_ACQUIRED);
+ }
+#ifdef ACPI_MUTEX_DEBUG
+ {
+ u32 i;
+ /*
+ * Mutex debug code, for internal debugging only.
+ *
+ * Deadlock prevention. Check if this thread owns any mutexes of value
+ * greater than this one. If so, the thread has violated the mutex
+ * ordering rule. This indicates a coding error somewhere in
+ * the ACPI subsystem code.
+ */
+ for (i = mutex_id; i < ACPI_NUM_MUTEX; i++) {
+ if (acpi_gbl_mutex_info[i].thread_id == this_thread_id) {
+ if (i == mutex_id) {
+ continue;
+ }
+
+ ACPI_ERROR((AE_INFO,
+ "Invalid release order: owns [%s], releasing [%s]",
+ acpi_ut_get_mutex_name(i),
+ acpi_ut_get_mutex_name(mutex_id)));
+
+ return (AE_RELEASE_DEADLOCK);
+ }
+ }
+ }
+#endif
+
+ /* Mark unlocked FIRST */
+
+ acpi_gbl_mutex_info[mutex_id].thread_id = ACPI_MUTEX_NOT_ACQUIRED;
+
+ acpi_os_release_mutex(acpi_gbl_mutex_info[mutex_id].mutex);
+ return (AE_OK);
+}
diff --git a/drivers/acpi/utilities/utobject.c b/drivers/acpi/utilities/utobject.c
new file mode 100644
index 0000000..c354e7a
--- /dev/null
+++ b/drivers/acpi/utilities/utobject.c
@@ -0,0 +1,676 @@
+/******************************************************************************
+ *
+ * Module Name: utobject - ACPI object create/delete/size/cache routines
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include <acpi/acnamesp.h>
+
+#define _COMPONENT ACPI_UTILITIES
+ACPI_MODULE_NAME("utobject")
+
+/* Local prototypes */
+static acpi_status
+acpi_ut_get_simple_object_size(union acpi_operand_object *obj,
+ acpi_size * obj_length);
+
+static acpi_status
+acpi_ut_get_package_object_size(union acpi_operand_object *obj,
+ acpi_size * obj_length);
+
+static acpi_status
+acpi_ut_get_element_length(u8 object_type,
+ union acpi_operand_object *source_object,
+ union acpi_generic_state *state, void *context);
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_create_internal_object_dbg
+ *
+ * PARAMETERS: module_name - Source file name of caller
+ * line_number - Line number of caller
+ * component_id - Component type of caller
+ * Type - ACPI Type of the new object
+ *
+ * RETURN: A new internal object, null on failure
+ *
+ * DESCRIPTION: Create and initialize a new internal object.
+ *
+ * NOTE: We always allocate the worst-case object descriptor because
+ * these objects are cached, and we want them to be
+ * one-size-satisifies-any-request. This in itself may not be
+ * the most memory efficient, but the efficiency of the object
+ * cache should more than make up for this!
+ *
+ ******************************************************************************/
+
+union acpi_operand_object *acpi_ut_create_internal_object_dbg(const char
+ *module_name,
+ u32 line_number,
+ u32 component_id,
+ acpi_object_type
+ type)
+{
+ union acpi_operand_object *object;
+ union acpi_operand_object *second_object;
+
+ ACPI_FUNCTION_TRACE_STR(ut_create_internal_object_dbg,
+ acpi_ut_get_type_name(type));
+
+ /* Allocate the raw object descriptor */
+
+ object =
+ acpi_ut_allocate_object_desc_dbg(module_name, line_number,
+ component_id);
+ if (!object) {
+ return_PTR(NULL);
+ }
+
+ switch (type) {
+ case ACPI_TYPE_REGION:
+ case ACPI_TYPE_BUFFER_FIELD:
+ case ACPI_TYPE_LOCAL_BANK_FIELD:
+
+ /* These types require a secondary object */
+
+ second_object = acpi_ut_allocate_object_desc_dbg(module_name,
+ line_number,
+ component_id);
+ if (!second_object) {
+ acpi_ut_delete_object_desc(object);
+ return_PTR(NULL);
+ }
+
+ second_object->common.type = ACPI_TYPE_LOCAL_EXTRA;
+ second_object->common.reference_count = 1;
+
+ /* Link the second object to the first */
+
+ object->common.next_object = second_object;
+ break;
+
+ default:
+ /* All others have no secondary object */
+ break;
+ }
+
+ /* Save the object type in the object descriptor */
+
+ object->common.type = (u8) type;
+
+ /* Init the reference count */
+
+ object->common.reference_count = 1;
+
+ /* Any per-type initialization should go here */
+
+ return_PTR(object);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_create_package_object
+ *
+ * PARAMETERS: Count - Number of package elements
+ *
+ * RETURN: Pointer to a new Package object, null on failure
+ *
+ * DESCRIPTION: Create a fully initialized package object
+ *
+ ******************************************************************************/
+
+union acpi_operand_object *acpi_ut_create_package_object(u32 count)
+{
+ union acpi_operand_object *package_desc;
+ union acpi_operand_object **package_elements;
+
+ ACPI_FUNCTION_TRACE_U32(ut_create_package_object, count);
+
+ /* Create a new Package object */
+
+ package_desc = acpi_ut_create_internal_object(ACPI_TYPE_PACKAGE);
+ if (!package_desc) {
+ return_PTR(NULL);
+ }
+
+ /*
+ * Create the element array. Count+1 allows the array to be null
+ * terminated.
+ */
+ package_elements = ACPI_ALLOCATE_ZEROED(((acpi_size) count +
+ 1) * sizeof(void *));
+ if (!package_elements) {
+ acpi_ut_remove_reference(package_desc);
+ return_PTR(NULL);
+ }
+
+ package_desc->package.count = count;
+ package_desc->package.elements = package_elements;
+ return_PTR(package_desc);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_create_buffer_object
+ *
+ * PARAMETERS: buffer_size - Size of buffer to be created
+ *
+ * RETURN: Pointer to a new Buffer object, null on failure
+ *
+ * DESCRIPTION: Create a fully initialized buffer object
+ *
+ ******************************************************************************/
+
+union acpi_operand_object *acpi_ut_create_buffer_object(acpi_size buffer_size)
+{
+ union acpi_operand_object *buffer_desc;
+ u8 *buffer = NULL;
+
+ ACPI_FUNCTION_TRACE_U32(ut_create_buffer_object, buffer_size);
+
+ /* Create a new Buffer object */
+
+ buffer_desc = acpi_ut_create_internal_object(ACPI_TYPE_BUFFER);
+ if (!buffer_desc) {
+ return_PTR(NULL);
+ }
+
+ /* Create an actual buffer only if size > 0 */
+
+ if (buffer_size > 0) {
+
+ /* Allocate the actual buffer */
+
+ buffer = ACPI_ALLOCATE_ZEROED(buffer_size);
+ if (!buffer) {
+ ACPI_ERROR((AE_INFO, "Could not allocate size %X",
+ (u32) buffer_size));
+ acpi_ut_remove_reference(buffer_desc);
+ return_PTR(NULL);
+ }
+ }
+
+ /* Complete buffer object initialization */
+
+ buffer_desc->buffer.flags |= AOPOBJ_DATA_VALID;
+ buffer_desc->buffer.pointer = buffer;
+ buffer_desc->buffer.length = (u32) buffer_size;
+
+ /* Return the new buffer descriptor */
+
+ return_PTR(buffer_desc);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_create_string_object
+ *
+ * PARAMETERS: string_size - Size of string to be created. Does not
+ * include NULL terminator, this is added
+ * automatically.
+ *
+ * RETURN: Pointer to a new String object
+ *
+ * DESCRIPTION: Create a fully initialized string object
+ *
+ ******************************************************************************/
+
+union acpi_operand_object *acpi_ut_create_string_object(acpi_size string_size)
+{
+ union acpi_operand_object *string_desc;
+ char *string;
+
+ ACPI_FUNCTION_TRACE_U32(ut_create_string_object, string_size);
+
+ /* Create a new String object */
+
+ string_desc = acpi_ut_create_internal_object(ACPI_TYPE_STRING);
+ if (!string_desc) {
+ return_PTR(NULL);
+ }
+
+ /*
+ * Allocate the actual string buffer -- (Size + 1) for NULL terminator.
+ * NOTE: Zero-length strings are NULL terminated
+ */
+ string = ACPI_ALLOCATE_ZEROED(string_size + 1);
+ if (!string) {
+ ACPI_ERROR((AE_INFO, "Could not allocate size %X",
+ (u32) string_size));
+ acpi_ut_remove_reference(string_desc);
+ return_PTR(NULL);
+ }
+
+ /* Complete string object initialization */
+
+ string_desc->string.pointer = string;
+ string_desc->string.length = (u32) string_size;
+
+ /* Return the new string descriptor */
+
+ return_PTR(string_desc);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_valid_internal_object
+ *
+ * PARAMETERS: Object - Object to be validated
+ *
+ * RETURN: TRUE if object is valid, FALSE otherwise
+ *
+ * DESCRIPTION: Validate a pointer to be an union acpi_operand_object
+ *
+ ******************************************************************************/
+
+u8 acpi_ut_valid_internal_object(void *object)
+{
+
+ ACPI_FUNCTION_NAME(ut_valid_internal_object);
+
+ /* Check for a null pointer */
+
+ if (!object) {
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "**** Null Object Ptr\n"));
+ return (FALSE);
+ }
+
+ /* Check the descriptor type field */
+
+ switch (ACPI_GET_DESCRIPTOR_TYPE(object)) {
+ case ACPI_DESC_TYPE_OPERAND:
+
+ /* The object appears to be a valid union acpi_operand_object */
+
+ return (TRUE);
+
+ default:
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "%p is not not an ACPI operand obj [%s]\n",
+ object, acpi_ut_get_descriptor_name(object)));
+ break;
+ }
+
+ return (FALSE);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_allocate_object_desc_dbg
+ *
+ * PARAMETERS: module_name - Caller's module name (for error output)
+ * line_number - Caller's line number (for error output)
+ * component_id - Caller's component ID (for error output)
+ *
+ * RETURN: Pointer to newly allocated object descriptor. Null on error
+ *
+ * DESCRIPTION: Allocate a new object descriptor. Gracefully handle
+ * error conditions.
+ *
+ ******************************************************************************/
+
+void *acpi_ut_allocate_object_desc_dbg(const char *module_name,
+ u32 line_number, u32 component_id)
+{
+ union acpi_operand_object *object;
+
+ ACPI_FUNCTION_TRACE(ut_allocate_object_desc_dbg);
+
+ object = acpi_os_acquire_object(acpi_gbl_operand_cache);
+ if (!object) {
+ ACPI_ERROR((module_name, line_number,
+ "Could not allocate an object descriptor"));
+
+ return_PTR(NULL);
+ }
+
+ /* Mark the descriptor type */
+
+ memset(object, 0, sizeof(union acpi_operand_object));
+ ACPI_SET_DESCRIPTOR_TYPE(object, ACPI_DESC_TYPE_OPERAND);
+
+ ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS, "%p Size %X\n",
+ object, (u32) sizeof(union acpi_operand_object)));
+
+ return_PTR(object);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_delete_object_desc
+ *
+ * PARAMETERS: Object - An Acpi internal object to be deleted
+ *
+ * RETURN: None.
+ *
+ * DESCRIPTION: Free an ACPI object descriptor or add it to the object cache
+ *
+ ******************************************************************************/
+
+void acpi_ut_delete_object_desc(union acpi_operand_object *object)
+{
+ ACPI_FUNCTION_TRACE_PTR(ut_delete_object_desc, object);
+
+ /* Object must be an union acpi_operand_object */
+
+ if (ACPI_GET_DESCRIPTOR_TYPE(object) != ACPI_DESC_TYPE_OPERAND) {
+ ACPI_ERROR((AE_INFO,
+ "%p is not an ACPI Operand object [%s]", object,
+ acpi_ut_get_descriptor_name(object)));
+ return_VOID;
+ }
+
+ (void)acpi_os_release_object(acpi_gbl_operand_cache, object);
+ return_VOID;
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_get_simple_object_size
+ *
+ * PARAMETERS: internal_object - An ACPI operand object
+ * obj_length - Where the length is returned
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: This function is called to determine the space required to
+ * contain a simple object for return to an external user.
+ *
+ * The length includes the object structure plus any additional
+ * needed space.
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ut_get_simple_object_size(union acpi_operand_object *internal_object,
+ acpi_size * obj_length)
+{
+ acpi_size length;
+ acpi_size size;
+ acpi_status status = AE_OK;
+
+ ACPI_FUNCTION_TRACE_PTR(ut_get_simple_object_size, internal_object);
+
+ /*
+ * Handle a null object (Could be a uninitialized package
+ * element -- which is legal)
+ */
+ if (!internal_object) {
+ *obj_length = sizeof(union acpi_object);
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ /* Start with the length of the Acpi object */
+
+ length = sizeof(union acpi_object);
+
+ if (ACPI_GET_DESCRIPTOR_TYPE(internal_object) == ACPI_DESC_TYPE_NAMED) {
+
+ /* Object is a named object (reference), just return the length */
+
+ *obj_length = ACPI_ROUND_UP_TO_NATIVE_WORD(length);
+ return_ACPI_STATUS(status);
+ }
+
+ /*
+ * The final length depends on the object type
+ * Strings and Buffers are packed right up against the parent object and
+ * must be accessed bytewise or there may be alignment problems on
+ * certain processors
+ */
+ switch (ACPI_GET_OBJECT_TYPE(internal_object)) {
+ case ACPI_TYPE_STRING:
+
+ length += (acpi_size) internal_object->string.length + 1;
+ break;
+
+ case ACPI_TYPE_BUFFER:
+
+ length += (acpi_size) internal_object->buffer.length;
+ break;
+
+ case ACPI_TYPE_INTEGER:
+ case ACPI_TYPE_PROCESSOR:
+ case ACPI_TYPE_POWER:
+
+ /* No extra data for these types */
+
+ break;
+
+ case ACPI_TYPE_LOCAL_REFERENCE:
+
+ switch (internal_object->reference.class) {
+ case ACPI_REFCLASS_NAME:
+
+ /*
+ * Get the actual length of the full pathname to this object.
+ * The reference will be converted to the pathname to the object
+ */
+ size =
+ acpi_ns_get_pathname_length(internal_object->
+ reference.node);
+ if (!size) {
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
+
+ length += ACPI_ROUND_UP_TO_NATIVE_WORD(size);
+ break;
+
+ default:
+
+ /*
+ * No other reference opcodes are supported.
+ * Notably, Locals and Args are not supported, but this may be
+ * required eventually.
+ */
+ ACPI_ERROR((AE_INFO,
+ "Cannot convert to external object - "
+ "unsupported Reference Class [%s] %X in object %p",
+ acpi_ut_get_reference_name(internal_object),
+ internal_object->reference.class,
+ internal_object));
+ status = AE_TYPE;
+ break;
+ }
+ break;
+
+ default:
+
+ ACPI_ERROR((AE_INFO, "Cannot convert to external object - "
+ "unsupported type [%s] %X in object %p",
+ acpi_ut_get_object_type_name(internal_object),
+ ACPI_GET_OBJECT_TYPE(internal_object),
+ internal_object));
+ status = AE_TYPE;
+ break;
+ }
+
+ /*
+ * Account for the space required by the object rounded up to the next
+ * multiple of the machine word size. This keeps each object aligned
+ * on a machine word boundary. (preventing alignment faults on some
+ * machines.)
+ */
+ *obj_length = ACPI_ROUND_UP_TO_NATIVE_WORD(length);
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_get_element_length
+ *
+ * PARAMETERS: acpi_pkg_callback
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Get the length of one package element.
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ut_get_element_length(u8 object_type,
+ union acpi_operand_object *source_object,
+ union acpi_generic_state *state, void *context)
+{
+ acpi_status status = AE_OK;
+ struct acpi_pkg_info *info = (struct acpi_pkg_info *)context;
+ acpi_size object_space;
+
+ switch (object_type) {
+ case ACPI_COPY_TYPE_SIMPLE:
+
+ /*
+ * Simple object - just get the size (Null object/entry is handled
+ * here also) and sum it into the running package length
+ */
+ status =
+ acpi_ut_get_simple_object_size(source_object,
+ &object_space);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+
+ info->length += object_space;
+ break;
+
+ case ACPI_COPY_TYPE_PACKAGE:
+
+ /* Package object - nothing much to do here, let the walk handle it */
+
+ info->num_packages++;
+ state->pkg.this_target_obj = NULL;
+ break;
+
+ default:
+
+ /* No other types allowed */
+
+ return (AE_BAD_PARAMETER);
+ }
+
+ return (status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_get_package_object_size
+ *
+ * PARAMETERS: internal_object - An ACPI internal object
+ * obj_length - Where the length is returned
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: This function is called to determine the space required to
+ * contain a package object for return to an external user.
+ *
+ * This is moderately complex since a package contains other
+ * objects including packages.
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ut_get_package_object_size(union acpi_operand_object *internal_object,
+ acpi_size * obj_length)
+{
+ acpi_status status;
+ struct acpi_pkg_info info;
+
+ ACPI_FUNCTION_TRACE_PTR(ut_get_package_object_size, internal_object);
+
+ info.length = 0;
+ info.object_space = 0;
+ info.num_packages = 1;
+
+ status = acpi_ut_walk_package_tree(internal_object, NULL,
+ acpi_ut_get_element_length, &info);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /*
+ * We have handled all of the objects in all levels of the package.
+ * just add the length of the package objects themselves.
+ * Round up to the next machine word.
+ */
+ info.length += ACPI_ROUND_UP_TO_NATIVE_WORD(sizeof(union acpi_object)) *
+ (acpi_size) info.num_packages;
+
+ /* Return the total package length */
+
+ *obj_length = info.length;
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_get_object_size
+ *
+ * PARAMETERS: internal_object - An ACPI internal object
+ * obj_length - Where the length will be returned
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: This function is called to determine the space required to
+ * contain an object for return to an API user.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ut_get_object_size(union acpi_operand_object *internal_object,
+ acpi_size * obj_length)
+{
+ acpi_status status;
+
+ ACPI_FUNCTION_ENTRY();
+
+ if ((ACPI_GET_DESCRIPTOR_TYPE(internal_object) ==
+ ACPI_DESC_TYPE_OPERAND)
+ && (ACPI_GET_OBJECT_TYPE(internal_object) == ACPI_TYPE_PACKAGE)) {
+ status =
+ acpi_ut_get_package_object_size(internal_object,
+ obj_length);
+ } else {
+ status =
+ acpi_ut_get_simple_object_size(internal_object, obj_length);
+ }
+
+ return (status);
+}
diff --git a/drivers/acpi/utilities/utresrc.c b/drivers/acpi/utilities/utresrc.c
new file mode 100644
index 0000000..c3e3e13
--- /dev/null
+++ b/drivers/acpi/utilities/utresrc.c
@@ -0,0 +1,615 @@
+/*******************************************************************************
+ *
+ * Module Name: utresrc - Resource management utilities
+ *
+ ******************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include <acpi/amlresrc.h>
+
+#define _COMPONENT ACPI_UTILITIES
+ACPI_MODULE_NAME("utresrc")
+#if defined(ACPI_DISASSEMBLER) || defined (ACPI_DEBUGGER)
+/*
+ * Strings used to decode resource descriptors.
+ * Used by both the disasssembler and the debugger resource dump routines
+ */
+const char *acpi_gbl_bm_decode[] = {
+ "NotBusMaster",
+ "BusMaster"
+};
+
+const char *acpi_gbl_config_decode[] = {
+ "0 - Good Configuration",
+ "1 - Acceptable Configuration",
+ "2 - Suboptimal Configuration",
+ "3 - ***Invalid Configuration***",
+};
+
+const char *acpi_gbl_consume_decode[] = {
+ "ResourceProducer",
+ "ResourceConsumer"
+};
+
+const char *acpi_gbl_dec_decode[] = {
+ "PosDecode",
+ "SubDecode"
+};
+
+const char *acpi_gbl_he_decode[] = {
+ "Level",
+ "Edge"
+};
+
+const char *acpi_gbl_io_decode[] = {
+ "Decode10",
+ "Decode16"
+};
+
+const char *acpi_gbl_ll_decode[] = {
+ "ActiveHigh",
+ "ActiveLow"
+};
+
+const char *acpi_gbl_max_decode[] = {
+ "MaxNotFixed",
+ "MaxFixed"
+};
+
+const char *acpi_gbl_mem_decode[] = {
+ "NonCacheable",
+ "Cacheable",
+ "WriteCombining",
+ "Prefetchable"
+};
+
+const char *acpi_gbl_min_decode[] = {
+ "MinNotFixed",
+ "MinFixed"
+};
+
+const char *acpi_gbl_mtp_decode[] = {
+ "AddressRangeMemory",
+ "AddressRangeReserved",
+ "AddressRangeACPI",
+ "AddressRangeNVS"
+};
+
+const char *acpi_gbl_rng_decode[] = {
+ "InvalidRanges",
+ "NonISAOnlyRanges",
+ "ISAOnlyRanges",
+ "EntireRange"
+};
+
+const char *acpi_gbl_rw_decode[] = {
+ "ReadOnly",
+ "ReadWrite"
+};
+
+const char *acpi_gbl_shr_decode[] = {
+ "Exclusive",
+ "Shared"
+};
+
+const char *acpi_gbl_siz_decode[] = {
+ "Transfer8",
+ "Transfer8_16",
+ "Transfer16",
+ "InvalidSize"
+};
+
+const char *acpi_gbl_trs_decode[] = {
+ "DenseTranslation",
+ "SparseTranslation"
+};
+
+const char *acpi_gbl_ttp_decode[] = {
+ "TypeStatic",
+ "TypeTranslation"
+};
+
+const char *acpi_gbl_typ_decode[] = {
+ "Compatibility",
+ "TypeA",
+ "TypeB",
+ "TypeF"
+};
+
+#endif
+
+/*
+ * Base sizes of the raw AML resource descriptors, indexed by resource type.
+ * Zero indicates a reserved (and therefore invalid) resource type.
+ */
+const u8 acpi_gbl_resource_aml_sizes[] = {
+ /* Small descriptors */
+
+ 0,
+ 0,
+ 0,
+ 0,
+ ACPI_AML_SIZE_SMALL(struct aml_resource_irq),
+ ACPI_AML_SIZE_SMALL(struct aml_resource_dma),
+ ACPI_AML_SIZE_SMALL(struct aml_resource_start_dependent),
+ ACPI_AML_SIZE_SMALL(struct aml_resource_end_dependent),
+ ACPI_AML_SIZE_SMALL(struct aml_resource_io),
+ ACPI_AML_SIZE_SMALL(struct aml_resource_fixed_io),
+ 0,
+ 0,
+ 0,
+ 0,
+ ACPI_AML_SIZE_SMALL(struct aml_resource_vendor_small),
+ ACPI_AML_SIZE_SMALL(struct aml_resource_end_tag),
+
+ /* Large descriptors */
+
+ 0,
+ ACPI_AML_SIZE_LARGE(struct aml_resource_memory24),
+ ACPI_AML_SIZE_LARGE(struct aml_resource_generic_register),
+ 0,
+ ACPI_AML_SIZE_LARGE(struct aml_resource_vendor_large),
+ ACPI_AML_SIZE_LARGE(struct aml_resource_memory32),
+ ACPI_AML_SIZE_LARGE(struct aml_resource_fixed_memory32),
+ ACPI_AML_SIZE_LARGE(struct aml_resource_address32),
+ ACPI_AML_SIZE_LARGE(struct aml_resource_address16),
+ ACPI_AML_SIZE_LARGE(struct aml_resource_extended_irq),
+ ACPI_AML_SIZE_LARGE(struct aml_resource_address64),
+ ACPI_AML_SIZE_LARGE(struct aml_resource_extended_address64)
+};
+
+/*
+ * Resource types, used to validate the resource length field.
+ * The length of fixed-length types must match exactly, variable
+ * lengths must meet the minimum required length, etc.
+ * Zero indicates a reserved (and therefore invalid) resource type.
+ */
+static const u8 acpi_gbl_resource_types[] = {
+ /* Small descriptors */
+
+ 0,
+ 0,
+ 0,
+ 0,
+ ACPI_SMALL_VARIABLE_LENGTH,
+ ACPI_FIXED_LENGTH,
+ ACPI_SMALL_VARIABLE_LENGTH,
+ ACPI_FIXED_LENGTH,
+ ACPI_FIXED_LENGTH,
+ ACPI_FIXED_LENGTH,
+ 0,
+ 0,
+ 0,
+ 0,
+ ACPI_VARIABLE_LENGTH,
+ ACPI_FIXED_LENGTH,
+
+ /* Large descriptors */
+
+ 0,
+ ACPI_FIXED_LENGTH,
+ ACPI_FIXED_LENGTH,
+ 0,
+ ACPI_VARIABLE_LENGTH,
+ ACPI_FIXED_LENGTH,
+ ACPI_FIXED_LENGTH,
+ ACPI_VARIABLE_LENGTH,
+ ACPI_VARIABLE_LENGTH,
+ ACPI_VARIABLE_LENGTH,
+ ACPI_VARIABLE_LENGTH,
+ ACPI_FIXED_LENGTH
+};
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_walk_aml_resources
+ *
+ * PARAMETERS: Aml - Pointer to the raw AML resource template
+ * aml_length - Length of the entire template
+ * user_function - Called once for each descriptor found. If
+ * NULL, a pointer to the end_tag is returned
+ * Context - Passed to user_function
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Walk a raw AML resource list(buffer). User function called
+ * once for each resource found.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ut_walk_aml_resources(u8 * aml,
+ acpi_size aml_length,
+ acpi_walk_aml_callback user_function, void **context)
+{
+ acpi_status status;
+ u8 *end_aml;
+ u8 resource_index;
+ u32 length;
+ u32 offset = 0;
+
+ ACPI_FUNCTION_TRACE(ut_walk_aml_resources);
+
+ /* The absolute minimum resource template is one end_tag descriptor */
+
+ if (aml_length < sizeof(struct aml_resource_end_tag)) {
+ return_ACPI_STATUS(AE_AML_NO_RESOURCE_END_TAG);
+ }
+
+ /* Point to the end of the resource template buffer */
+
+ end_aml = aml + aml_length;
+
+ /* Walk the byte list, abort on any invalid descriptor type or length */
+
+ while (aml < end_aml) {
+
+ /* Validate the Resource Type and Resource Length */
+
+ status = acpi_ut_validate_resource(aml, &resource_index);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /* Get the length of this descriptor */
+
+ length = acpi_ut_get_descriptor_length(aml);
+
+ /* Invoke the user function */
+
+ if (user_function) {
+ status =
+ user_function(aml, length, offset, resource_index,
+ context);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+ }
+
+ /* An end_tag descriptor terminates this resource template */
+
+ if (acpi_ut_get_resource_type(aml) ==
+ ACPI_RESOURCE_NAME_END_TAG) {
+ /*
+ * There must be at least one more byte in the buffer for
+ * the 2nd byte of the end_tag
+ */
+ if ((aml + 1) >= end_aml) {
+ return_ACPI_STATUS(AE_AML_NO_RESOURCE_END_TAG);
+ }
+
+ /* Return the pointer to the end_tag if requested */
+
+ if (!user_function) {
+ *context = aml;
+ }
+
+ /* Normal exit */
+
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ aml += length;
+ offset += length;
+ }
+
+ /* Did not find an end_tag descriptor */
+
+ return (AE_AML_NO_RESOURCE_END_TAG);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_validate_resource
+ *
+ * PARAMETERS: Aml - Pointer to the raw AML resource descriptor
+ * return_index - Where the resource index is returned. NULL
+ * if the index is not required.
+ *
+ * RETURN: Status, and optionally the Index into the global resource tables
+ *
+ * DESCRIPTION: Validate an AML resource descriptor by checking the Resource
+ * Type and Resource Length. Returns an index into the global
+ * resource information/dispatch tables for later use.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ut_validate_resource(void *aml, u8 * return_index)
+{
+ u8 resource_type;
+ u8 resource_index;
+ acpi_rs_length resource_length;
+ acpi_rs_length minimum_resource_length;
+
+ ACPI_FUNCTION_ENTRY();
+
+ /*
+ * 1) Validate the resource_type field (Byte 0)
+ */
+ resource_type = ACPI_GET8(aml);
+
+ /*
+ * Byte 0 contains the descriptor name (Resource Type)
+ * Examine the large/small bit in the resource header
+ */
+ if (resource_type & ACPI_RESOURCE_NAME_LARGE) {
+
+ /* Verify the large resource type (name) against the max */
+
+ if (resource_type > ACPI_RESOURCE_NAME_LARGE_MAX) {
+ return (AE_AML_INVALID_RESOURCE_TYPE);
+ }
+
+ /*
+ * Large Resource Type -- bits 6:0 contain the name
+ * Translate range 0x80-0x8B to index range 0x10-0x1B
+ */
+ resource_index = (u8) (resource_type - 0x70);
+ } else {
+ /*
+ * Small Resource Type -- bits 6:3 contain the name
+ * Shift range to index range 0x00-0x0F
+ */
+ resource_index = (u8)
+ ((resource_type & ACPI_RESOURCE_NAME_SMALL_MASK) >> 3);
+ }
+
+ /* Check validity of the resource type, zero indicates name is invalid */
+
+ if (!acpi_gbl_resource_types[resource_index]) {
+ return (AE_AML_INVALID_RESOURCE_TYPE);
+ }
+
+ /*
+ * 2) Validate the resource_length field. This ensures that the length
+ * is at least reasonable, and guarantees that it is non-zero.
+ */
+ resource_length = acpi_ut_get_resource_length(aml);
+ minimum_resource_length = acpi_gbl_resource_aml_sizes[resource_index];
+
+ /* Validate based upon the type of resource - fixed length or variable */
+
+ switch (acpi_gbl_resource_types[resource_index]) {
+ case ACPI_FIXED_LENGTH:
+
+ /* Fixed length resource, length must match exactly */
+
+ if (resource_length != minimum_resource_length) {
+ return (AE_AML_BAD_RESOURCE_LENGTH);
+ }
+ break;
+
+ case ACPI_VARIABLE_LENGTH:
+
+ /* Variable length resource, length must be at least the minimum */
+
+ if (resource_length < minimum_resource_length) {
+ return (AE_AML_BAD_RESOURCE_LENGTH);
+ }
+ break;
+
+ case ACPI_SMALL_VARIABLE_LENGTH:
+
+ /* Small variable length resource, length can be (Min) or (Min-1) */
+
+ if ((resource_length > minimum_resource_length) ||
+ (resource_length < (minimum_resource_length - 1))) {
+ return (AE_AML_BAD_RESOURCE_LENGTH);
+ }
+ break;
+
+ default:
+
+ /* Shouldn't happen (because of validation earlier), but be sure */
+
+ return (AE_AML_INVALID_RESOURCE_TYPE);
+ }
+
+ /* Optionally return the resource table index */
+
+ if (return_index) {
+ *return_index = resource_index;
+ }
+
+ return (AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_get_resource_type
+ *
+ * PARAMETERS: Aml - Pointer to the raw AML resource descriptor
+ *
+ * RETURN: The Resource Type with no extraneous bits (except the
+ * Large/Small descriptor bit -- this is left alone)
+ *
+ * DESCRIPTION: Extract the Resource Type/Name from the first byte of
+ * a resource descriptor.
+ *
+ ******************************************************************************/
+
+u8 acpi_ut_get_resource_type(void *aml)
+{
+ ACPI_FUNCTION_ENTRY();
+
+ /*
+ * Byte 0 contains the descriptor name (Resource Type)
+ * Examine the large/small bit in the resource header
+ */
+ if (ACPI_GET8(aml) & ACPI_RESOURCE_NAME_LARGE) {
+
+ /* Large Resource Type -- bits 6:0 contain the name */
+
+ return (ACPI_GET8(aml));
+ } else {
+ /* Small Resource Type -- bits 6:3 contain the name */
+
+ return ((u8) (ACPI_GET8(aml) & ACPI_RESOURCE_NAME_SMALL_MASK));
+ }
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_get_resource_length
+ *
+ * PARAMETERS: Aml - Pointer to the raw AML resource descriptor
+ *
+ * RETURN: Byte Length
+ *
+ * DESCRIPTION: Get the "Resource Length" of a raw AML descriptor. By
+ * definition, this does not include the size of the descriptor
+ * header or the length field itself.
+ *
+ ******************************************************************************/
+
+u16 acpi_ut_get_resource_length(void *aml)
+{
+ acpi_rs_length resource_length;
+
+ ACPI_FUNCTION_ENTRY();
+
+ /*
+ * Byte 0 contains the descriptor name (Resource Type)
+ * Examine the large/small bit in the resource header
+ */
+ if (ACPI_GET8(aml) & ACPI_RESOURCE_NAME_LARGE) {
+
+ /* Large Resource type -- bytes 1-2 contain the 16-bit length */
+
+ ACPI_MOVE_16_TO_16(&resource_length, ACPI_ADD_PTR(u8, aml, 1));
+
+ } else {
+ /* Small Resource type -- bits 2:0 of byte 0 contain the length */
+
+ resource_length = (u16) (ACPI_GET8(aml) &
+ ACPI_RESOURCE_NAME_SMALL_LENGTH_MASK);
+ }
+
+ return (resource_length);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_get_resource_header_length
+ *
+ * PARAMETERS: Aml - Pointer to the raw AML resource descriptor
+ *
+ * RETURN: Length of the AML header (depends on large/small descriptor)
+ *
+ * DESCRIPTION: Get the length of the header for this resource.
+ *
+ ******************************************************************************/
+
+u8 acpi_ut_get_resource_header_length(void *aml)
+{
+ ACPI_FUNCTION_ENTRY();
+
+ /* Examine the large/small bit in the resource header */
+
+ if (ACPI_GET8(aml) & ACPI_RESOURCE_NAME_LARGE) {
+ return (sizeof(struct aml_resource_large_header));
+ } else {
+ return (sizeof(struct aml_resource_small_header));
+ }
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_get_descriptor_length
+ *
+ * PARAMETERS: Aml - Pointer to the raw AML resource descriptor
+ *
+ * RETURN: Byte length
+ *
+ * DESCRIPTION: Get the total byte length of a raw AML descriptor, including the
+ * length of the descriptor header and the length field itself.
+ * Used to walk descriptor lists.
+ *
+ ******************************************************************************/
+
+u32 acpi_ut_get_descriptor_length(void *aml)
+{
+ ACPI_FUNCTION_ENTRY();
+
+ /*
+ * Get the Resource Length (does not include header length) and add
+ * the header length (depends on if this is a small or large resource)
+ */
+ return (acpi_ut_get_resource_length(aml) +
+ acpi_ut_get_resource_header_length(aml));
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_get_resource_end_tag
+ *
+ * PARAMETERS: obj_desc - The resource template buffer object
+ * end_tag - Where the pointer to the end_tag is returned
+ *
+ * RETURN: Status, pointer to the end tag
+ *
+ * DESCRIPTION: Find the end_tag resource descriptor in an AML resource template
+ * Note: allows a buffer length of zero.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ut_get_resource_end_tag(union acpi_operand_object * obj_desc,
+ u8 ** end_tag)
+{
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(ut_get_resource_end_tag);
+
+ /* Allow a buffer length of zero */
+
+ if (!obj_desc->buffer.length) {
+ *end_tag = obj_desc->buffer.pointer;
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ /* Validate the template and get a pointer to the end_tag */
+
+ status = acpi_ut_walk_aml_resources(obj_desc->buffer.pointer,
+ obj_desc->buffer.length, NULL,
+ (void **)end_tag);
+
+ return_ACPI_STATUS(status);
+}
diff --git a/drivers/acpi/utilities/utstate.c b/drivers/acpi/utilities/utstate.c
new file mode 100644
index 0000000..63a6d3d
--- /dev/null
+++ b/drivers/acpi/utilities/utstate.c
@@ -0,0 +1,346 @@
+/*******************************************************************************
+ *
+ * Module Name: utstate - state object support procedures
+ *
+ ******************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+
+#define _COMPONENT ACPI_UTILITIES
+ACPI_MODULE_NAME("utstate")
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_create_pkg_state_and_push
+ *
+ * PARAMETERS: Object - Object to be added to the new state
+ * Action - Increment/Decrement
+ * state_list - List the state will be added to
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Create a new state and push it
+ *
+ ******************************************************************************/
+acpi_status
+acpi_ut_create_pkg_state_and_push(void *internal_object,
+ void *external_object,
+ u16 index,
+ union acpi_generic_state **state_list)
+{
+ union acpi_generic_state *state;
+
+ ACPI_FUNCTION_ENTRY();
+
+ state =
+ acpi_ut_create_pkg_state(internal_object, external_object, index);
+ if (!state) {
+ return (AE_NO_MEMORY);
+ }
+
+ acpi_ut_push_generic_state(state_list, state);
+ return (AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_push_generic_state
+ *
+ * PARAMETERS: list_head - Head of the state stack
+ * State - State object to push
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Push a state object onto a state stack
+ *
+ ******************************************************************************/
+
+void
+acpi_ut_push_generic_state(union acpi_generic_state **list_head,
+ union acpi_generic_state *state)
+{
+ ACPI_FUNCTION_TRACE(ut_push_generic_state);
+
+ /* Push the state object onto the front of the list (stack) */
+
+ state->common.next = *list_head;
+ *list_head = state;
+
+ return_VOID;
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_pop_generic_state
+ *
+ * PARAMETERS: list_head - Head of the state stack
+ *
+ * RETURN: The popped state object
+ *
+ * DESCRIPTION: Pop a state object from a state stack
+ *
+ ******************************************************************************/
+
+union acpi_generic_state *acpi_ut_pop_generic_state(union acpi_generic_state
+ **list_head)
+{
+ union acpi_generic_state *state;
+
+ ACPI_FUNCTION_TRACE(ut_pop_generic_state);
+
+ /* Remove the state object at the head of the list (stack) */
+
+ state = *list_head;
+ if (state) {
+
+ /* Update the list head */
+
+ *list_head = state->common.next;
+ }
+
+ return_PTR(state);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_create_generic_state
+ *
+ * PARAMETERS: None
+ *
+ * RETURN: The new state object. NULL on failure.
+ *
+ * DESCRIPTION: Create a generic state object. Attempt to obtain one from
+ * the global state cache; If none available, create a new one.
+ *
+ ******************************************************************************/
+
+union acpi_generic_state *acpi_ut_create_generic_state(void)
+{
+ union acpi_generic_state *state;
+
+ ACPI_FUNCTION_ENTRY();
+
+ state = acpi_os_acquire_object(acpi_gbl_state_cache);
+ if (state) {
+
+ /* Initialize */
+ memset(state, 0, sizeof(union acpi_generic_state));
+ state->common.descriptor_type = ACPI_DESC_TYPE_STATE;
+ }
+
+ return (state);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_create_thread_state
+ *
+ * PARAMETERS: None
+ *
+ * RETURN: New Thread State. NULL on failure
+ *
+ * DESCRIPTION: Create a "Thread State" - a flavor of the generic state used
+ * to track per-thread info during method execution
+ *
+ ******************************************************************************/
+
+struct acpi_thread_state *acpi_ut_create_thread_state(void)
+{
+ union acpi_generic_state *state;
+
+ ACPI_FUNCTION_TRACE(ut_create_thread_state);
+
+ /* Create the generic state object */
+
+ state = acpi_ut_create_generic_state();
+ if (!state) {
+ return_PTR(NULL);
+ }
+
+ /* Init fields specific to the update struct */
+
+ state->common.descriptor_type = ACPI_DESC_TYPE_STATE_THREAD;
+ state->thread.thread_id = acpi_os_get_thread_id();
+
+ /* Check for invalid thread ID - zero is very bad, it will break things */
+
+ if (!state->thread.thread_id) {
+ ACPI_ERROR((AE_INFO, "Invalid zero ID from AcpiOsGetThreadId"));
+ state->thread.thread_id = (acpi_thread_id) 1;
+ }
+
+ return_PTR((struct acpi_thread_state *)state);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_create_update_state
+ *
+ * PARAMETERS: Object - Initial Object to be installed in the state
+ * Action - Update action to be performed
+ *
+ * RETURN: New state object, null on failure
+ *
+ * DESCRIPTION: Create an "Update State" - a flavor of the generic state used
+ * to update reference counts and delete complex objects such
+ * as packages.
+ *
+ ******************************************************************************/
+
+union acpi_generic_state *acpi_ut_create_update_state(union acpi_operand_object
+ *object, u16 action)
+{
+ union acpi_generic_state *state;
+
+ ACPI_FUNCTION_TRACE_PTR(ut_create_update_state, object);
+
+ /* Create the generic state object */
+
+ state = acpi_ut_create_generic_state();
+ if (!state) {
+ return_PTR(NULL);
+ }
+
+ /* Init fields specific to the update struct */
+
+ state->common.descriptor_type = ACPI_DESC_TYPE_STATE_UPDATE;
+ state->update.object = object;
+ state->update.value = action;
+
+ return_PTR(state);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_create_pkg_state
+ *
+ * PARAMETERS: Object - Initial Object to be installed in the state
+ * Action - Update action to be performed
+ *
+ * RETURN: New state object, null on failure
+ *
+ * DESCRIPTION: Create a "Package State"
+ *
+ ******************************************************************************/
+
+union acpi_generic_state *acpi_ut_create_pkg_state(void *internal_object,
+ void *external_object,
+ u16 index)
+{
+ union acpi_generic_state *state;
+
+ ACPI_FUNCTION_TRACE_PTR(ut_create_pkg_state, internal_object);
+
+ /* Create the generic state object */
+
+ state = acpi_ut_create_generic_state();
+ if (!state) {
+ return_PTR(NULL);
+ }
+
+ /* Init fields specific to the update struct */
+
+ state->common.descriptor_type = ACPI_DESC_TYPE_STATE_PACKAGE;
+ state->pkg.source_object = (union acpi_operand_object *)internal_object;
+ state->pkg.dest_object = external_object;
+ state->pkg.index = index;
+ state->pkg.num_packages = 1;
+
+ return_PTR(state);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_create_control_state
+ *
+ * PARAMETERS: None
+ *
+ * RETURN: New state object, null on failure
+ *
+ * DESCRIPTION: Create a "Control State" - a flavor of the generic state used
+ * to support nested IF/WHILE constructs in the AML.
+ *
+ ******************************************************************************/
+
+union acpi_generic_state *acpi_ut_create_control_state(void)
+{
+ union acpi_generic_state *state;
+
+ ACPI_FUNCTION_TRACE(ut_create_control_state);
+
+ /* Create the generic state object */
+
+ state = acpi_ut_create_generic_state();
+ if (!state) {
+ return_PTR(NULL);
+ }
+
+ /* Init fields specific to the control struct */
+
+ state->common.descriptor_type = ACPI_DESC_TYPE_STATE_CONTROL;
+ state->common.state = ACPI_CONTROL_CONDITIONAL_EXECUTING;
+
+ return_PTR(state);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_delete_generic_state
+ *
+ * PARAMETERS: State - The state object to be deleted
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Release a state object to the state cache. NULL state objects
+ * are ignored.
+ *
+ ******************************************************************************/
+
+void acpi_ut_delete_generic_state(union acpi_generic_state *state)
+{
+ ACPI_FUNCTION_TRACE(ut_delete_generic_state);
+
+ /* Ignore null state */
+
+ if (state) {
+ (void)acpi_os_release_object(acpi_gbl_state_cache, state);
+ }
+ return_VOID;
+}
diff --git a/drivers/acpi/utilities/utxface.c b/drivers/acpi/utilities/utxface.c
new file mode 100644
index 0000000..c198a4d
--- /dev/null
+++ b/drivers/acpi/utilities/utxface.c
@@ -0,0 +1,500 @@
+/******************************************************************************
+ *
+ * Module Name: utxface - External interfaces for "global" ACPI functions
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include <acpi/acevents.h>
+#include <acpi/acnamesp.h>
+#include <acpi/acdebug.h>
+
+#define _COMPONENT ACPI_UTILITIES
+ACPI_MODULE_NAME("utxface")
+
+#ifndef ACPI_ASL_COMPILER
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_initialize_subsystem
+ *
+ * PARAMETERS: None
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Initializes all global variables. This is the first function
+ * called, so any early initialization belongs here.
+ *
+ ******************************************************************************/
+acpi_status __init acpi_initialize_subsystem(void)
+{
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(acpi_initialize_subsystem);
+
+ acpi_gbl_startup_flags = ACPI_SUBSYSTEM_INITIALIZE;
+ ACPI_DEBUG_EXEC(acpi_ut_init_stack_ptr_trace());
+
+ /* Initialize the OS-Dependent layer */
+
+ status = acpi_os_initialize();
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status, "During OSL initialization"));
+ return_ACPI_STATUS(status);
+ }
+
+ /* Initialize all globals used by the subsystem */
+
+ status = acpi_ut_init_globals();
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status,
+ "During initialization of globals"));
+ return_ACPI_STATUS(status);
+ }
+
+ /* Create the default mutex objects */
+
+ status = acpi_ut_mutex_initialize();
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status,
+ "During Global Mutex creation"));
+ return_ACPI_STATUS(status);
+ }
+
+ /*
+ * Initialize the namespace manager and
+ * the root of the namespace tree
+ */
+ status = acpi_ns_root_initialize();
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status,
+ "During Namespace initialization"));
+ return_ACPI_STATUS(status);
+ }
+
+ /* If configured, initialize the AML debugger */
+
+ ACPI_DEBUGGER_EXEC(status = acpi_db_initialize());
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_enable_subsystem
+ *
+ * PARAMETERS: Flags - Init/enable Options
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Completes the subsystem initialization including hardware.
+ * Puts system into ACPI mode if it isn't already.
+ *
+ ******************************************************************************/
+acpi_status acpi_enable_subsystem(u32 flags)
+{
+ acpi_status status = AE_OK;
+
+ ACPI_FUNCTION_TRACE(acpi_enable_subsystem);
+
+ /* Enable ACPI mode */
+
+ if (!(flags & ACPI_NO_ACPI_ENABLE)) {
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+ "[Init] Going into ACPI mode\n"));
+
+ acpi_gbl_original_mode = acpi_hw_get_mode();
+
+ status = acpi_enable();
+ if (ACPI_FAILURE(status)) {
+ ACPI_WARNING((AE_INFO, "AcpiEnable failed"));
+ return_ACPI_STATUS(status);
+ }
+ }
+
+ /*
+ * Install the default op_region handlers. These are installed unless
+ * other handlers have already been installed via the
+ * install_address_space_handler interface.
+ */
+ if (!(flags & ACPI_NO_ADDRESS_SPACE_INIT)) {
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+ "[Init] Installing default address space handlers\n"));
+
+ status = acpi_ev_install_region_handlers();
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+ }
+
+ /*
+ * Initialize ACPI Event handling (Fixed and General Purpose)
+ *
+ * Note1: We must have the hardware and events initialized before we can
+ * execute any control methods safely. Any control method can require
+ * ACPI hardware support, so the hardware must be fully initialized before
+ * any method execution!
+ *
+ * Note2: Fixed events are initialized and enabled here. GPEs are
+ * initialized, but cannot be enabled until after the hardware is
+ * completely initialized (SCI and global_lock activated)
+ */
+ if (!(flags & ACPI_NO_EVENT_INIT)) {
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+ "[Init] Initializing ACPI events\n"));
+
+ status = acpi_ev_initialize_events();
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+ }
+
+ /*
+ * Install the SCI handler and Global Lock handler. This completes the
+ * hardware initialization.
+ */
+ if (!(flags & ACPI_NO_HANDLER_INIT)) {
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+ "[Init] Installing SCI/GL handlers\n"));
+
+ status = acpi_ev_install_xrupt_handlers();
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+ }
+
+ return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_enable_subsystem)
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_initialize_objects
+ *
+ * PARAMETERS: Flags - Init/enable Options
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Completes namespace initialization by initializing device
+ * objects and executing AML code for Regions, buffers, etc.
+ *
+ ******************************************************************************/
+acpi_status acpi_initialize_objects(u32 flags)
+{
+ acpi_status status = AE_OK;
+
+ ACPI_FUNCTION_TRACE(acpi_initialize_objects);
+
+ /*
+ * Run all _REG methods
+ *
+ * Note: Any objects accessed by the _REG methods will be automatically
+ * initialized, even if they contain executable AML (see the call to
+ * acpi_ns_initialize_objects below).
+ */
+ if (!(flags & ACPI_NO_ADDRESS_SPACE_INIT)) {
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+ "[Init] Executing _REG OpRegion methods\n"));
+
+ status = acpi_ev_initialize_op_regions();
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+ }
+
+ /*
+ * Initialize the objects that remain uninitialized. This runs the
+ * executable AML that may be part of the declaration of these objects:
+ * operation_regions, buffer_fields, Buffers, and Packages.
+ */
+ if (!(flags & ACPI_NO_OBJECT_INIT)) {
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+ "[Init] Completing Initialization of ACPI Objects\n"));
+
+ status = acpi_ns_initialize_objects();
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+ }
+
+ /*
+ * Initialize all device objects in the namespace. This runs the device
+ * _STA and _INI methods.
+ */
+ if (!(flags & ACPI_NO_DEVICE_INIT)) {
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+ "[Init] Initializing ACPI Devices\n"));
+
+ status = acpi_ns_initialize_devices();
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+ }
+
+ /*
+ * Complete the GPE initialization for the GPE blocks defined in the FADT
+ * (GPE block 0 and 1).
+ *
+ * Note1: This is where the _PRW methods are executed for the GPEs. These
+ * methods can only be executed after the SCI and Global Lock handlers are
+ * installed and initialized.
+ *
+ * Note2: Currently, there seems to be no need to run the _REG methods
+ * before execution of the _PRW methods and enabling of the GPEs.
+ */
+ if (!(flags & ACPI_NO_EVENT_INIT)) {
+ status = acpi_ev_install_fadt_gpes();
+ if (ACPI_FAILURE(status))
+ return (status);
+ }
+
+ /*
+ * Empty the caches (delete the cached objects) on the assumption that
+ * the table load filled them up more than they will be at runtime --
+ * thus wasting non-paged memory.
+ */
+ status = acpi_purge_cached_objects();
+
+ acpi_gbl_startup_flags |= ACPI_INITIALIZED_OK;
+ return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_initialize_objects)
+
+#endif
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_terminate
+ *
+ * PARAMETERS: None
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Shutdown the ACPI subsystem. Release all resources.
+ *
+ ******************************************************************************/
+acpi_status acpi_terminate(void)
+{
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(acpi_terminate);
+
+ /* Terminate the AML Debugger if present */
+
+ ACPI_DEBUGGER_EXEC(acpi_gbl_db_terminate_threads = TRUE);
+
+ /* Shutdown and free all resources */
+
+ acpi_ut_subsystem_shutdown();
+
+ /* Free the mutex objects */
+
+ acpi_ut_mutex_terminate();
+
+#ifdef ACPI_DEBUGGER
+
+ /* Shut down the debugger */
+
+ acpi_db_terminate();
+#endif
+
+ /* Now we can shutdown the OS-dependent layer */
+
+ status = acpi_os_terminate();
+ return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_terminate)
+#ifndef ACPI_ASL_COMPILER
+#ifdef ACPI_FUTURE_USAGE
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_subsystem_status
+ *
+ * PARAMETERS: None
+ *
+ * RETURN: Status of the ACPI subsystem
+ *
+ * DESCRIPTION: Other drivers that use the ACPI subsystem should call this
+ * before making any other calls, to ensure the subsystem
+ * initialized successfully.
+ *
+ ******************************************************************************/
+acpi_status acpi_subsystem_status(void)
+{
+
+ if (acpi_gbl_startup_flags & ACPI_INITIALIZED_OK) {
+ return (AE_OK);
+ } else {
+ return (AE_ERROR);
+ }
+}
+
+ACPI_EXPORT_SYMBOL(acpi_subsystem_status)
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_get_system_info
+ *
+ * PARAMETERS: out_buffer - A buffer to receive the resources for the
+ * device
+ *
+ * RETURN: Status - the status of the call
+ *
+ * DESCRIPTION: This function is called to get information about the current
+ * state of the ACPI subsystem. It will return system information
+ * in the out_buffer.
+ *
+ * If the function fails an appropriate status will be returned
+ * and the value of out_buffer is undefined.
+ *
+ ******************************************************************************/
+acpi_status acpi_get_system_info(struct acpi_buffer * out_buffer)
+{
+ struct acpi_system_info *info_ptr;
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(acpi_get_system_info);
+
+ /* Parameter validation */
+
+ status = acpi_ut_validate_buffer(out_buffer);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /* Validate/Allocate/Clear caller buffer */
+
+ status =
+ acpi_ut_initialize_buffer(out_buffer,
+ sizeof(struct acpi_system_info));
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /*
+ * Populate the return buffer
+ */
+ info_ptr = (struct acpi_system_info *)out_buffer->pointer;
+
+ info_ptr->acpi_ca_version = ACPI_CA_VERSION;
+
+ /* System flags (ACPI capabilities) */
+
+ info_ptr->flags = ACPI_SYS_MODE_ACPI;
+
+ /* Timer resolution - 24 or 32 bits */
+
+ if (acpi_gbl_FADT.flags & ACPI_FADT_32BIT_TIMER) {
+ info_ptr->timer_resolution = 24;
+ } else {
+ info_ptr->timer_resolution = 32;
+ }
+
+ /* Clear the reserved fields */
+
+ info_ptr->reserved1 = 0;
+ info_ptr->reserved2 = 0;
+
+ /* Current debug levels */
+
+ info_ptr->debug_layer = acpi_dbg_layer;
+ info_ptr->debug_level = acpi_dbg_level;
+
+ return_ACPI_STATUS(AE_OK);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_get_system_info)
+
+/*****************************************************************************
+ *
+ * FUNCTION: acpi_install_initialization_handler
+ *
+ * PARAMETERS: Handler - Callback procedure
+ * Function - Not (currently) used, see below
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Install an initialization handler
+ *
+ * TBD: When a second function is added, must save the Function also.
+ *
+ ****************************************************************************/
+acpi_status
+acpi_install_initialization_handler(acpi_init_handler handler, u32 function)
+{
+
+ if (!handler) {
+ return (AE_BAD_PARAMETER);
+ }
+
+ if (acpi_gbl_init_handler) {
+ return (AE_ALREADY_EXISTS);
+ }
+
+ acpi_gbl_init_handler = handler;
+ return AE_OK;
+}
+
+ACPI_EXPORT_SYMBOL(acpi_install_initialization_handler)
+#endif /* ACPI_FUTURE_USAGE */
+/*****************************************************************************
+ *
+ * FUNCTION: acpi_purge_cached_objects
+ *
+ * PARAMETERS: None
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Empty all caches (delete the cached objects)
+ *
+ ****************************************************************************/
+acpi_status acpi_purge_cached_objects(void)
+{
+ ACPI_FUNCTION_TRACE(acpi_purge_cached_objects);
+
+ (void)acpi_os_purge_cache(acpi_gbl_state_cache);
+ (void)acpi_os_purge_cache(acpi_gbl_operand_cache);
+ (void)acpi_os_purge_cache(acpi_gbl_ps_node_cache);
+ (void)acpi_os_purge_cache(acpi_gbl_ps_node_ext_cache);
+ return_ACPI_STATUS(AE_OK);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_purge_cached_objects)
+#endif
diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c
new file mode 100644
index 0000000..f844941
--- /dev/null
+++ b/drivers/acpi/utils.c
@@ -0,0 +1,426 @@
+/*
+ * acpi_utils.c - ACPI Utility Functions ($Revision: 10 $)
+ *
+ * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
+ * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <acpi/acpi_bus.h>
+#include <acpi/acpi_drivers.h>
+
+#define _COMPONENT ACPI_BUS_COMPONENT
+ACPI_MODULE_NAME("utils");
+
+/* --------------------------------------------------------------------------
+ Object Evaluation Helpers
+ -------------------------------------------------------------------------- */
+static void
+acpi_util_eval_error(acpi_handle h, acpi_string p, acpi_status s)
+{
+#ifdef ACPI_DEBUG_OUTPUT
+ char prefix[80] = {'\0'};
+ struct acpi_buffer buffer = {sizeof(prefix), prefix};
+ acpi_get_name(h, ACPI_FULL_PATHNAME, &buffer);
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Evaluate [%s.%s]: %s\n",
+ (char *) prefix, p, acpi_format_exception(s)));
+#else
+ return;
+#endif
+}
+
+acpi_status
+acpi_extract_package(union acpi_object *package,
+ struct acpi_buffer *format, struct acpi_buffer *buffer)
+{
+ u32 size_required = 0;
+ u32 tail_offset = 0;
+ char *format_string = NULL;
+ u32 format_count = 0;
+ u32 i = 0;
+ u8 *head = NULL;
+ u8 *tail = NULL;
+
+
+ if (!package || (package->type != ACPI_TYPE_PACKAGE)
+ || (package->package.count < 1)) {
+ printk(KERN_WARNING PREFIX "Invalid package argument\n");
+ return AE_BAD_PARAMETER;
+ }
+
+ if (!format || !format->pointer || (format->length < 1)) {
+ printk(KERN_WARNING PREFIX "Invalid format argument\n");
+ return AE_BAD_PARAMETER;
+ }
+
+ if (!buffer) {
+ printk(KERN_WARNING PREFIX "Invalid buffer argument\n");
+ return AE_BAD_PARAMETER;
+ }
+
+ format_count = (format->length / sizeof(char)) - 1;
+ if (format_count > package->package.count) {
+ printk(KERN_WARNING PREFIX "Format specifies more objects [%d]"
+ " than exist in package [%d].\n",
+ format_count, package->package.count);
+ return AE_BAD_DATA;
+ }
+
+ format_string = format->pointer;
+
+ /*
+ * Calculate size_required.
+ */
+ for (i = 0; i < format_count; i++) {
+
+ union acpi_object *element = &(package->package.elements[i]);
+
+ if (!element) {
+ return AE_BAD_DATA;
+ }
+
+ switch (element->type) {
+
+ case ACPI_TYPE_INTEGER:
+ switch (format_string[i]) {
+ case 'N':
+ size_required += sizeof(acpi_integer);
+ tail_offset += sizeof(acpi_integer);
+ break;
+ case 'S':
+ size_required +=
+ sizeof(char *) + sizeof(acpi_integer) +
+ sizeof(char);
+ tail_offset += sizeof(char *);
+ break;
+ default:
+ printk(KERN_WARNING PREFIX "Invalid package element"
+ " [%d]: got number, expecing"
+ " [%c]\n",
+ i, format_string[i]);
+ return AE_BAD_DATA;
+ break;
+ }
+ break;
+
+ case ACPI_TYPE_STRING:
+ case ACPI_TYPE_BUFFER:
+ switch (format_string[i]) {
+ case 'S':
+ size_required +=
+ sizeof(char *) +
+ (element->string.length * sizeof(char)) +
+ sizeof(char);
+ tail_offset += sizeof(char *);
+ break;
+ case 'B':
+ size_required +=
+ sizeof(u8 *) +
+ (element->buffer.length * sizeof(u8));
+ tail_offset += sizeof(u8 *);
+ break;
+ default:
+ printk(KERN_WARNING PREFIX "Invalid package element"
+ " [%d] got string/buffer,"
+ " expecing [%c]\n",
+ i, format_string[i]);
+ return AE_BAD_DATA;
+ break;
+ }
+ break;
+
+ case ACPI_TYPE_PACKAGE:
+ default:
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Found unsupported element at index=%d\n",
+ i));
+ /* TBD: handle nested packages... */
+ return AE_SUPPORT;
+ break;
+ }
+ }
+
+ /*
+ * Validate output buffer.
+ */
+ if (buffer->length < size_required) {
+ buffer->length = size_required;
+ return AE_BUFFER_OVERFLOW;
+ } else if (buffer->length != size_required || !buffer->pointer) {
+ return AE_BAD_PARAMETER;
+ }
+
+ head = buffer->pointer;
+ tail = buffer->pointer + tail_offset;
+
+ /*
+ * Extract package data.
+ */
+ for (i = 0; i < format_count; i++) {
+
+ u8 **pointer = NULL;
+ union acpi_object *element = &(package->package.elements[i]);
+
+ if (!element) {
+ return AE_BAD_DATA;
+ }
+
+ switch (element->type) {
+
+ case ACPI_TYPE_INTEGER:
+ switch (format_string[i]) {
+ case 'N':
+ *((acpi_integer *) head) =
+ element->integer.value;
+ head += sizeof(acpi_integer);
+ break;
+ case 'S':
+ pointer = (u8 **) head;
+ *pointer = tail;
+ *((acpi_integer *) tail) =
+ element->integer.value;
+ head += sizeof(acpi_integer *);
+ tail += sizeof(acpi_integer);
+ /* NULL terminate string */
+ *tail = (char)0;
+ tail += sizeof(char);
+ break;
+ default:
+ /* Should never get here */
+ break;
+ }
+ break;
+
+ case ACPI_TYPE_STRING:
+ case ACPI_TYPE_BUFFER:
+ switch (format_string[i]) {
+ case 'S':
+ pointer = (u8 **) head;
+ *pointer = tail;
+ memcpy(tail, element->string.pointer,
+ element->string.length);
+ head += sizeof(char *);
+ tail += element->string.length * sizeof(char);
+ /* NULL terminate string */
+ *tail = (char)0;
+ tail += sizeof(char);
+ break;
+ case 'B':
+ pointer = (u8 **) head;
+ *pointer = tail;
+ memcpy(tail, element->buffer.pointer,
+ element->buffer.length);
+ head += sizeof(u8 *);
+ tail += element->buffer.length * sizeof(u8);
+ break;
+ default:
+ /* Should never get here */
+ break;
+ }
+ break;
+
+ case ACPI_TYPE_PACKAGE:
+ /* TBD: handle nested packages... */
+ default:
+ /* Should never get here */
+ break;
+ }
+ }
+
+ return AE_OK;
+}
+
+EXPORT_SYMBOL(acpi_extract_package);
+
+acpi_status
+acpi_evaluate_integer(acpi_handle handle,
+ acpi_string pathname,
+ struct acpi_object_list *arguments, unsigned long long *data)
+{
+ acpi_status status = AE_OK;
+ union acpi_object element;
+ struct acpi_buffer buffer = { 0, NULL };
+
+ if (!data)
+ return AE_BAD_PARAMETER;
+
+ buffer.length = sizeof(union acpi_object);
+ buffer.pointer = &element;
+ status = acpi_evaluate_object(handle, pathname, arguments, &buffer);
+ if (ACPI_FAILURE(status)) {
+ acpi_util_eval_error(handle, pathname, status);
+ return status;
+ }
+
+ if (element.type != ACPI_TYPE_INTEGER) {
+ acpi_util_eval_error(handle, pathname, AE_BAD_DATA);
+ return AE_BAD_DATA;
+ }
+
+ *data = element.integer.value;
+
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Return value [%llu]\n", *data));
+
+ return AE_OK;
+}
+
+EXPORT_SYMBOL(acpi_evaluate_integer);
+
+#if 0
+acpi_status
+acpi_evaluate_string(acpi_handle handle,
+ acpi_string pathname,
+ acpi_object_list * arguments, acpi_string * data)
+{
+ acpi_status status = AE_OK;
+ acpi_object *element = NULL;
+ acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+
+
+ if (!data)
+ return AE_BAD_PARAMETER;
+
+ status = acpi_evaluate_object(handle, pathname, arguments, &buffer);
+ if (ACPI_FAILURE(status)) {
+ acpi_util_eval_error(handle, pathname, status);
+ return status;
+ }
+
+ element = (acpi_object *) buffer.pointer;
+
+ if ((element->type != ACPI_TYPE_STRING)
+ || (element->type != ACPI_TYPE_BUFFER)
+ || !element->string.length) {
+ acpi_util_eval_error(handle, pathname, AE_BAD_DATA);
+ return AE_BAD_DATA;
+ }
+
+ *data = kzalloc(element->string.length + 1, GFP_KERNEL);
+ if (!data) {
+ printk(KERN_ERR PREFIX "Memory allocation\n");
+ return -ENOMEM;
+ }
+
+ memcpy(*data, element->string.pointer, element->string.length);
+
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Return value [%s]\n", *data));
+
+ kfree(buffer.pointer);
+
+ return AE_OK;
+}
+#endif
+
+acpi_status
+acpi_evaluate_reference(acpi_handle handle,
+ acpi_string pathname,
+ struct acpi_object_list *arguments,
+ struct acpi_handle_list *list)
+{
+ acpi_status status = AE_OK;
+ union acpi_object *package = NULL;
+ union acpi_object *element = NULL;
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ u32 i = 0;
+
+
+ if (!list) {
+ return AE_BAD_PARAMETER;
+ }
+
+ /* Evaluate object. */
+
+ status = acpi_evaluate_object(handle, pathname, arguments, &buffer);
+ if (ACPI_FAILURE(status))
+ goto end;
+
+ package = buffer.pointer;
+
+ if ((buffer.length == 0) || !package) {
+ printk(KERN_ERR PREFIX "No return object (len %X ptr %p)\n",
+ (unsigned)buffer.length, package);
+ status = AE_BAD_DATA;
+ acpi_util_eval_error(handle, pathname, status);
+ goto end;
+ }
+ if (package->type != ACPI_TYPE_PACKAGE) {
+ printk(KERN_ERR PREFIX "Expecting a [Package], found type %X\n",
+ package->type);
+ status = AE_BAD_DATA;
+ acpi_util_eval_error(handle, pathname, status);
+ goto end;
+ }
+ if (!package->package.count) {
+ printk(KERN_ERR PREFIX "[Package] has zero elements (%p)\n",
+ package);
+ status = AE_BAD_DATA;
+ acpi_util_eval_error(handle, pathname, status);
+ goto end;
+ }
+
+ if (package->package.count > ACPI_MAX_HANDLES) {
+ return AE_NO_MEMORY;
+ }
+ list->count = package->package.count;
+
+ /* Extract package data. */
+
+ for (i = 0; i < list->count; i++) {
+
+ element = &(package->package.elements[i]);
+
+ if (element->type != ACPI_TYPE_LOCAL_REFERENCE) {
+ status = AE_BAD_DATA;
+ printk(KERN_ERR PREFIX
+ "Expecting a [Reference] package element, found type %X\n",
+ element->type);
+ acpi_util_eval_error(handle, pathname, status);
+ break;
+ }
+
+ if (!element->reference.handle) {
+ printk(KERN_WARNING PREFIX "Invalid reference in"
+ " package %s\n", pathname);
+ status = AE_NULL_ENTRY;
+ break;
+ }
+ /* Get the acpi_handle. */
+
+ list->handles[i] = element->reference.handle;
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found reference [%p]\n",
+ list->handles[i]));
+ }
+
+ end:
+ if (ACPI_FAILURE(status)) {
+ list->count = 0;
+ //kfree(list->handles);
+ }
+
+ kfree(buffer.pointer);
+
+ return status;
+}
+
+EXPORT_SYMBOL(acpi_evaluate_reference);
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
new file mode 100644
index 0000000..8bcca4b
--- /dev/null
+++ b/drivers/acpi/video.c
@@ -0,0 +1,2156 @@
+/*
+ * video.c - ACPI Video Driver ($Revision:$)
+ *
+ * Copyright (C) 2004 Luming Yu <luming.yu@intel.com>
+ * Copyright (C) 2004 Bruno Ducrot <ducrot@poupinou.org>
+ * Copyright (C) 2006 Thomas Tuttle <linux-kernel@ttuttle.net>
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/input.h>
+#include <linux/backlight.h>
+#include <linux/thermal.h>
+#include <linux/video_output.h>
+#include <linux/sort.h>
+#include <asm/uaccess.h>
+
+#include <acpi/acpi_bus.h>
+#include <acpi/acpi_drivers.h>
+
+#define ACPI_VIDEO_CLASS "video"
+#define ACPI_VIDEO_BUS_NAME "Video Bus"
+#define ACPI_VIDEO_DEVICE_NAME "Video Device"
+#define ACPI_VIDEO_NOTIFY_SWITCH 0x80
+#define ACPI_VIDEO_NOTIFY_PROBE 0x81
+#define ACPI_VIDEO_NOTIFY_CYCLE 0x82
+#define ACPI_VIDEO_NOTIFY_NEXT_OUTPUT 0x83
+#define ACPI_VIDEO_NOTIFY_PREV_OUTPUT 0x84
+
+#define ACPI_VIDEO_NOTIFY_CYCLE_BRIGHTNESS 0x85
+#define ACPI_VIDEO_NOTIFY_INC_BRIGHTNESS 0x86
+#define ACPI_VIDEO_NOTIFY_DEC_BRIGHTNESS 0x87
+#define ACPI_VIDEO_NOTIFY_ZERO_BRIGHTNESS 0x88
+#define ACPI_VIDEO_NOTIFY_DISPLAY_OFF 0x89
+
+#define MAX_NAME_LEN 20
+
+#define ACPI_VIDEO_DISPLAY_CRT 1
+#define ACPI_VIDEO_DISPLAY_TV 2
+#define ACPI_VIDEO_DISPLAY_DVI 3
+#define ACPI_VIDEO_DISPLAY_LCD 4
+
+#define _COMPONENT ACPI_VIDEO_COMPONENT
+ACPI_MODULE_NAME("video");
+
+MODULE_AUTHOR("Bruno Ducrot");
+MODULE_DESCRIPTION("ACPI Video Driver");
+MODULE_LICENSE("GPL");
+
+static int brightness_switch_enabled = 1;
+module_param(brightness_switch_enabled, bool, 0644);
+
+static int acpi_video_bus_add(struct acpi_device *device);
+static int acpi_video_bus_remove(struct acpi_device *device, int type);
+static int acpi_video_resume(struct acpi_device *device);
+
+static const struct acpi_device_id video_device_ids[] = {
+ {ACPI_VIDEO_HID, 0},
+ {"", 0},
+};
+MODULE_DEVICE_TABLE(acpi, video_device_ids);
+
+static struct acpi_driver acpi_video_bus = {
+ .name = "video",
+ .class = ACPI_VIDEO_CLASS,
+ .ids = video_device_ids,
+ .ops = {
+ .add = acpi_video_bus_add,
+ .remove = acpi_video_bus_remove,
+ .resume = acpi_video_resume,
+ },
+};
+
+struct acpi_video_bus_flags {
+ u8 multihead:1; /* can switch video heads */
+ u8 rom:1; /* can retrieve a video rom */
+ u8 post:1; /* can configure the head to */
+ u8 reserved:5;
+};
+
+struct acpi_video_bus_cap {
+ u8 _DOS:1; /*Enable/Disable output switching */
+ u8 _DOD:1; /*Enumerate all devices attached to display adapter */
+ u8 _ROM:1; /*Get ROM Data */
+ u8 _GPD:1; /*Get POST Device */
+ u8 _SPD:1; /*Set POST Device */
+ u8 _VPO:1; /*Video POST Options */
+ u8 reserved:2;
+};
+
+struct acpi_video_device_attrib {
+ u32 display_index:4; /* A zero-based instance of the Display */
+ u32 display_port_attachment:4; /*This field differentiates the display type */
+ u32 display_type:4; /*Describe the specific type in use */
+ u32 vendor_specific:4; /*Chipset Vendor Specific */
+ u32 bios_can_detect:1; /*BIOS can detect the device */
+ u32 depend_on_vga:1; /*Non-VGA output device whose power is related to
+ the VGA device. */
+ u32 pipe_id:3; /*For VGA multiple-head devices. */
+ u32 reserved:10; /*Must be 0 */
+ u32 device_id_scheme:1; /*Device ID Scheme */
+};
+
+struct acpi_video_enumerated_device {
+ union {
+ u32 int_val;
+ struct acpi_video_device_attrib attrib;
+ } value;
+ struct acpi_video_device *bind_info;
+};
+
+struct acpi_video_bus {
+ struct acpi_device *device;
+ u8 dos_setting;
+ struct acpi_video_enumerated_device *attached_array;
+ u8 attached_count;
+ struct acpi_video_bus_cap cap;
+ struct acpi_video_bus_flags flags;
+ struct list_head video_device_list;
+ struct mutex device_list_lock; /* protects video_device_list */
+ struct proc_dir_entry *dir;
+ struct input_dev *input;
+ char phys[32]; /* for input device */
+};
+
+struct acpi_video_device_flags {
+ u8 crt:1;
+ u8 lcd:1;
+ u8 tvout:1;
+ u8 dvi:1;
+ u8 bios:1;
+ u8 unknown:1;
+ u8 reserved:2;
+};
+
+struct acpi_video_device_cap {
+ u8 _ADR:1; /*Return the unique ID */
+ u8 _BCL:1; /*Query list of brightness control levels supported */
+ u8 _BCM:1; /*Set the brightness level */
+ u8 _BQC:1; /* Get current brightness level */
+ u8 _DDC:1; /*Return the EDID for this device */
+ u8 _DCS:1; /*Return status of output device */
+ u8 _DGS:1; /*Query graphics state */
+ u8 _DSS:1; /*Device state set */
+};
+
+struct acpi_video_device_brightness {
+ int curr;
+ int count;
+ int *levels;
+};
+
+struct acpi_video_device {
+ unsigned long device_id;
+ struct acpi_video_device_flags flags;
+ struct acpi_video_device_cap cap;
+ struct list_head entry;
+ struct acpi_video_bus *video;
+ struct acpi_device *dev;
+ struct acpi_video_device_brightness *brightness;
+ struct backlight_device *backlight;
+ struct thermal_cooling_device *cdev;
+ struct output_device *output_dev;
+};
+
+/* bus */
+static int acpi_video_bus_info_open_fs(struct inode *inode, struct file *file);
+static struct file_operations acpi_video_bus_info_fops = {
+ .owner = THIS_MODULE,
+ .open = acpi_video_bus_info_open_fs,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int acpi_video_bus_ROM_open_fs(struct inode *inode, struct file *file);
+static struct file_operations acpi_video_bus_ROM_fops = {
+ .owner = THIS_MODULE,
+ .open = acpi_video_bus_ROM_open_fs,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int acpi_video_bus_POST_info_open_fs(struct inode *inode,
+ struct file *file);
+static struct file_operations acpi_video_bus_POST_info_fops = {
+ .owner = THIS_MODULE,
+ .open = acpi_video_bus_POST_info_open_fs,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int acpi_video_bus_POST_open_fs(struct inode *inode, struct file *file);
+static struct file_operations acpi_video_bus_POST_fops = {
+ .owner = THIS_MODULE,
+ .open = acpi_video_bus_POST_open_fs,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int acpi_video_bus_DOS_open_fs(struct inode *inode, struct file *file);
+static struct file_operations acpi_video_bus_DOS_fops = {
+ .owner = THIS_MODULE,
+ .open = acpi_video_bus_DOS_open_fs,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+/* device */
+static int acpi_video_device_info_open_fs(struct inode *inode,
+ struct file *file);
+static struct file_operations acpi_video_device_info_fops = {
+ .owner = THIS_MODULE,
+ .open = acpi_video_device_info_open_fs,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int acpi_video_device_state_open_fs(struct inode *inode,
+ struct file *file);
+static struct file_operations acpi_video_device_state_fops = {
+ .owner = THIS_MODULE,
+ .open = acpi_video_device_state_open_fs,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int acpi_video_device_brightness_open_fs(struct inode *inode,
+ struct file *file);
+static struct file_operations acpi_video_device_brightness_fops = {
+ .owner = THIS_MODULE,
+ .open = acpi_video_device_brightness_open_fs,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int acpi_video_device_EDID_open_fs(struct inode *inode,
+ struct file *file);
+static struct file_operations acpi_video_device_EDID_fops = {
+ .owner = THIS_MODULE,
+ .open = acpi_video_device_EDID_open_fs,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static char device_decode[][30] = {
+ "motherboard VGA device",
+ "PCI VGA device",
+ "AGP VGA device",
+ "UNKNOWN",
+};
+
+static void acpi_video_device_notify(acpi_handle handle, u32 event, void *data);
+static void acpi_video_device_rebind(struct acpi_video_bus *video);
+static void acpi_video_device_bind(struct acpi_video_bus *video,
+ struct acpi_video_device *device);
+static int acpi_video_device_enumerate(struct acpi_video_bus *video);
+static int acpi_video_device_lcd_set_level(struct acpi_video_device *device,
+ int level);
+static int acpi_video_device_lcd_get_level_current(
+ struct acpi_video_device *device,
+ unsigned long long *level);
+static int acpi_video_get_next_level(struct acpi_video_device *device,
+ u32 level_current, u32 event);
+static void acpi_video_switch_brightness(struct acpi_video_device *device,
+ int event);
+static int acpi_video_device_get_state(struct acpi_video_device *device,
+ unsigned long long *state);
+static int acpi_video_output_get(struct output_device *od);
+static int acpi_video_device_set_state(struct acpi_video_device *device, int state);
+
+/*backlight device sysfs support*/
+static int acpi_video_get_brightness(struct backlight_device *bd)
+{
+ unsigned long long cur_level;
+ int i;
+ struct acpi_video_device *vd =
+ (struct acpi_video_device *)bl_get_data(bd);
+ acpi_video_device_lcd_get_level_current(vd, &cur_level);
+ for (i = 2; i < vd->brightness->count; i++) {
+ if (vd->brightness->levels[i] == cur_level)
+ /* The first two entries are special - see page 575
+ of the ACPI spec 3.0 */
+ return i-2;
+ }
+ return 0;
+}
+
+static int acpi_video_set_brightness(struct backlight_device *bd)
+{
+ int request_level = bd->props.brightness+2;
+ struct acpi_video_device *vd =
+ (struct acpi_video_device *)bl_get_data(bd);
+ acpi_video_device_lcd_set_level(vd,
+ vd->brightness->levels[request_level]);
+ return 0;
+}
+
+static struct backlight_ops acpi_backlight_ops = {
+ .get_brightness = acpi_video_get_brightness,
+ .update_status = acpi_video_set_brightness,
+};
+
+/*video output device sysfs support*/
+static int acpi_video_output_get(struct output_device *od)
+{
+ unsigned long long state;
+ struct acpi_video_device *vd =
+ (struct acpi_video_device *)dev_get_drvdata(&od->dev);
+ acpi_video_device_get_state(vd, &state);
+ return (int)state;
+}
+
+static int acpi_video_output_set(struct output_device *od)
+{
+ unsigned long state = od->request_state;
+ struct acpi_video_device *vd=
+ (struct acpi_video_device *)dev_get_drvdata(&od->dev);
+ return acpi_video_device_set_state(vd, state);
+}
+
+static struct output_properties acpi_output_properties = {
+ .set_state = acpi_video_output_set,
+ .get_status = acpi_video_output_get,
+};
+
+
+/* thermal cooling device callbacks */
+static int video_get_max_state(struct thermal_cooling_device *cdev, char *buf)
+{
+ struct acpi_device *device = cdev->devdata;
+ struct acpi_video_device *video = acpi_driver_data(device);
+
+ return sprintf(buf, "%d\n", video->brightness->count - 3);
+}
+
+static int video_get_cur_state(struct thermal_cooling_device *cdev, char *buf)
+{
+ struct acpi_device *device = cdev->devdata;
+ struct acpi_video_device *video = acpi_driver_data(device);
+ unsigned long long level;
+ int state;
+
+ acpi_video_device_lcd_get_level_current(video, &level);
+ for (state = 2; state < video->brightness->count; state++)
+ if (level == video->brightness->levels[state])
+ return sprintf(buf, "%d\n",
+ video->brightness->count - state - 1);
+
+ return -EINVAL;
+}
+
+static int
+video_set_cur_state(struct thermal_cooling_device *cdev, unsigned int state)
+{
+ struct acpi_device *device = cdev->devdata;
+ struct acpi_video_device *video = acpi_driver_data(device);
+ int level;
+
+ if ( state >= video->brightness->count - 2)
+ return -EINVAL;
+
+ state = video->brightness->count - state;
+ level = video->brightness->levels[state -1];
+ return acpi_video_device_lcd_set_level(video, level);
+}
+
+static struct thermal_cooling_device_ops video_cooling_ops = {
+ .get_max_state = video_get_max_state,
+ .get_cur_state = video_get_cur_state,
+ .set_cur_state = video_set_cur_state,
+};
+
+/* --------------------------------------------------------------------------
+ Video Management
+ -------------------------------------------------------------------------- */
+
+/* device */
+
+static int
+acpi_video_device_query(struct acpi_video_device *device, unsigned long long *state)
+{
+ int status;
+
+ status = acpi_evaluate_integer(device->dev->handle, "_DGS", NULL, state);
+
+ return status;
+}
+
+static int
+acpi_video_device_get_state(struct acpi_video_device *device,
+ unsigned long long *state)
+{
+ int status;
+
+ status = acpi_evaluate_integer(device->dev->handle, "_DCS", NULL, state);
+
+ return status;
+}
+
+static int
+acpi_video_device_set_state(struct acpi_video_device *device, int state)
+{
+ int status;
+ union acpi_object arg0 = { ACPI_TYPE_INTEGER };
+ struct acpi_object_list args = { 1, &arg0 };
+ unsigned long long ret;
+
+
+ arg0.integer.value = state;
+ status = acpi_evaluate_integer(device->dev->handle, "_DSS", &args, &ret);
+
+ return status;
+}
+
+static int
+acpi_video_device_lcd_query_levels(struct acpi_video_device *device,
+ union acpi_object **levels)
+{
+ int status;
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ union acpi_object *obj;
+
+
+ *levels = NULL;
+
+ status = acpi_evaluate_object(device->dev->handle, "_BCL", NULL, &buffer);
+ if (!ACPI_SUCCESS(status))
+ return status;
+ obj = (union acpi_object *)buffer.pointer;
+ if (!obj || (obj->type != ACPI_TYPE_PACKAGE)) {
+ printk(KERN_ERR PREFIX "Invalid _BCL data\n");
+ status = -EFAULT;
+ goto err;
+ }
+
+ *levels = obj;
+
+ return 0;
+
+ err:
+ kfree(buffer.pointer);
+
+ return status;
+}
+
+static int
+acpi_video_device_lcd_set_level(struct acpi_video_device *device, int level)
+{
+ int status = AE_OK;
+ union acpi_object arg0 = { ACPI_TYPE_INTEGER };
+ struct acpi_object_list args = { 1, &arg0 };
+ int state;
+
+
+ arg0.integer.value = level;
+
+ if (device->cap._BCM)
+ status = acpi_evaluate_object(device->dev->handle, "_BCM",
+ &args, NULL);
+ device->brightness->curr = level;
+ for (state = 2; state < device->brightness->count; state++)
+ if (level == device->brightness->levels[state])
+ device->backlight->props.brightness = state - 2;
+
+ return status;
+}
+
+static int
+acpi_video_device_lcd_get_level_current(struct acpi_video_device *device,
+ unsigned long long *level)
+{
+ if (device->cap._BQC)
+ return acpi_evaluate_integer(device->dev->handle, "_BQC", NULL,
+ level);
+ *level = device->brightness->curr;
+ return AE_OK;
+}
+
+static int
+acpi_video_device_EDID(struct acpi_video_device *device,
+ union acpi_object **edid, ssize_t length)
+{
+ int status;
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ union acpi_object *obj;
+ union acpi_object arg0 = { ACPI_TYPE_INTEGER };
+ struct acpi_object_list args = { 1, &arg0 };
+
+
+ *edid = NULL;
+
+ if (!device)
+ return -ENODEV;
+ if (length == 128)
+ arg0.integer.value = 1;
+ else if (length == 256)
+ arg0.integer.value = 2;
+ else
+ return -EINVAL;
+
+ status = acpi_evaluate_object(device->dev->handle, "_DDC", &args, &buffer);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+
+ obj = buffer.pointer;
+
+ if (obj && obj->type == ACPI_TYPE_BUFFER)
+ *edid = obj;
+ else {
+ printk(KERN_ERR PREFIX "Invalid _DDC data\n");
+ status = -EFAULT;
+ kfree(obj);
+ }
+
+ return status;
+}
+
+/* bus */
+
+static int
+acpi_video_bus_set_POST(struct acpi_video_bus *video, unsigned long option)
+{
+ int status;
+ unsigned long long tmp;
+ union acpi_object arg0 = { ACPI_TYPE_INTEGER };
+ struct acpi_object_list args = { 1, &arg0 };
+
+
+ arg0.integer.value = option;
+
+ status = acpi_evaluate_integer(video->device->handle, "_SPD", &args, &tmp);
+ if (ACPI_SUCCESS(status))
+ status = tmp ? (-EINVAL) : (AE_OK);
+
+ return status;
+}
+
+static int
+acpi_video_bus_get_POST(struct acpi_video_bus *video, unsigned long long *id)
+{
+ int status;
+
+ status = acpi_evaluate_integer(video->device->handle, "_GPD", NULL, id);
+
+ return status;
+}
+
+static int
+acpi_video_bus_POST_options(struct acpi_video_bus *video,
+ unsigned long long *options)
+{
+ int status;
+
+ status = acpi_evaluate_integer(video->device->handle, "_VPO", NULL, options);
+ *options &= 3;
+
+ return status;
+}
+
+/*
+ * Arg:
+ * video : video bus device pointer
+ * bios_flag :
+ * 0. The system BIOS should NOT automatically switch(toggle)
+ * the active display output.
+ * 1. The system BIOS should automatically switch (toggle) the
+ * active display output. No switch event.
+ * 2. The _DGS value should be locked.
+ * 3. The system BIOS should not automatically switch (toggle) the
+ * active display output, but instead generate the display switch
+ * event notify code.
+ * lcd_flag :
+ * 0. The system BIOS should automatically control the brightness level
+ * of the LCD when the power changes from AC to DC
+ * 1. The system BIOS should NOT automatically control the brightness
+ * level of the LCD when the power changes from AC to DC.
+ * Return Value:
+ * -1 wrong arg.
+ */
+
+static int
+acpi_video_bus_DOS(struct acpi_video_bus *video, int bios_flag, int lcd_flag)
+{
+ acpi_integer status = 0;
+ union acpi_object arg0 = { ACPI_TYPE_INTEGER };
+ struct acpi_object_list args = { 1, &arg0 };
+
+
+ if (bios_flag < 0 || bios_flag > 3 || lcd_flag < 0 || lcd_flag > 1) {
+ status = -1;
+ goto Failed;
+ }
+ arg0.integer.value = (lcd_flag << 2) | bios_flag;
+ video->dos_setting = arg0.integer.value;
+ acpi_evaluate_object(video->device->handle, "_DOS", &args, NULL);
+
+ Failed:
+ return status;
+}
+
+/*
+ * Simple comparison function used to sort backlight levels.
+ */
+
+static int
+acpi_video_cmp_level(const void *a, const void *b)
+{
+ return *(int *)a - *(int *)b;
+}
+
+/*
+ * Arg:
+ * device : video output device (LCD, CRT, ..)
+ *
+ * Return Value:
+ * Maximum brightness level
+ *
+ * Allocate and initialize device->brightness.
+ */
+
+static int
+acpi_video_init_brightness(struct acpi_video_device *device)
+{
+ union acpi_object *obj = NULL;
+ int i, max_level = 0, count = 0;
+ union acpi_object *o;
+ struct acpi_video_device_brightness *br = NULL;
+
+ if (!ACPI_SUCCESS(acpi_video_device_lcd_query_levels(device, &obj))) {
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Could not query available "
+ "LCD brightness level\n"));
+ goto out;
+ }
+
+ if (obj->package.count < 2)
+ goto out;
+
+ br = kzalloc(sizeof(*br), GFP_KERNEL);
+ if (!br) {
+ printk(KERN_ERR "can't allocate memory\n");
+ goto out;
+ }
+
+ br->levels = kmalloc(obj->package.count * sizeof *(br->levels),
+ GFP_KERNEL);
+ if (!br->levels)
+ goto out_free;
+
+ for (i = 0; i < obj->package.count; i++) {
+ o = (union acpi_object *)&obj->package.elements[i];
+ if (o->type != ACPI_TYPE_INTEGER) {
+ printk(KERN_ERR PREFIX "Invalid data\n");
+ continue;
+ }
+ br->levels[count] = (u32) o->integer.value;
+
+ if (br->levels[count] > max_level)
+ max_level = br->levels[count];
+ count++;
+ }
+
+ /* don't sort the first two brightness levels */
+ sort(&br->levels[2], count - 2, sizeof(br->levels[2]),
+ acpi_video_cmp_level, NULL);
+
+ if (count < 2)
+ goto out_free_levels;
+
+ br->count = count;
+ device->brightness = br;
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "found %d brightness levels\n", count));
+ kfree(obj);
+ return max_level;
+
+out_free_levels:
+ kfree(br->levels);
+out_free:
+ kfree(br);
+out:
+ device->brightness = NULL;
+ kfree(obj);
+ return 0;
+}
+
+/*
+ * Arg:
+ * device : video output device (LCD, CRT, ..)
+ *
+ * Return Value:
+ * None
+ *
+ * Find out all required AML methods defined under the output
+ * device.
+ */
+
+static void acpi_video_device_find_cap(struct acpi_video_device *device)
+{
+ acpi_handle h_dummy1;
+ u32 max_level = 0;
+
+
+ memset(&device->cap, 0, sizeof(device->cap));
+
+ if (ACPI_SUCCESS(acpi_get_handle(device->dev->handle, "_ADR", &h_dummy1))) {
+ device->cap._ADR = 1;
+ }
+ if (ACPI_SUCCESS(acpi_get_handle(device->dev->handle, "_BCL", &h_dummy1))) {
+ device->cap._BCL = 1;
+ }
+ if (ACPI_SUCCESS(acpi_get_handle(device->dev->handle, "_BCM", &h_dummy1))) {
+ device->cap._BCM = 1;
+ }
+ if (ACPI_SUCCESS(acpi_get_handle(device->dev->handle,"_BQC",&h_dummy1)))
+ device->cap._BQC = 1;
+ if (ACPI_SUCCESS(acpi_get_handle(device->dev->handle, "_DDC", &h_dummy1))) {
+ device->cap._DDC = 1;
+ }
+ if (ACPI_SUCCESS(acpi_get_handle(device->dev->handle, "_DCS", &h_dummy1))) {
+ device->cap._DCS = 1;
+ }
+ if (ACPI_SUCCESS(acpi_get_handle(device->dev->handle, "_DGS", &h_dummy1))) {
+ device->cap._DGS = 1;
+ }
+ if (ACPI_SUCCESS(acpi_get_handle(device->dev->handle, "_DSS", &h_dummy1))) {
+ device->cap._DSS = 1;
+ }
+
+ if (acpi_video_backlight_support())
+ max_level = acpi_video_init_brightness(device);
+
+ if (device->cap._BCL && device->cap._BCM && max_level > 0) {
+ int result;
+ static int count = 0;
+ char *name;
+ name = kzalloc(MAX_NAME_LEN, GFP_KERNEL);
+ if (!name)
+ return;
+
+ sprintf(name, "acpi_video%d", count++);
+ device->backlight = backlight_device_register(name,
+ NULL, device, &acpi_backlight_ops);
+ device->backlight->props.max_brightness = device->brightness->count-3;
+ /*
+ * If there exists the _BQC object, the _BQC object will be
+ * called to get the current backlight brightness. Otherwise
+ * the brightness will be set to the maximum.
+ */
+ if (device->cap._BQC)
+ device->backlight->props.brightness =
+ acpi_video_get_brightness(device->backlight);
+ else
+ device->backlight->props.brightness =
+ device->backlight->props.max_brightness;
+ backlight_update_status(device->backlight);
+ kfree(name);
+
+ device->cdev = thermal_cooling_device_register("LCD",
+ device->dev, &video_cooling_ops);
+ if (IS_ERR(device->cdev))
+ return;
+
+ dev_info(&device->dev->dev, "registered as cooling_device%d\n",
+ device->cdev->id);
+ result = sysfs_create_link(&device->dev->dev.kobj,
+ &device->cdev->device.kobj,
+ "thermal_cooling");
+ if (result)
+ printk(KERN_ERR PREFIX "Create sysfs link\n");
+ result = sysfs_create_link(&device->cdev->device.kobj,
+ &device->dev->dev.kobj, "device");
+ if (result)
+ printk(KERN_ERR PREFIX "Create sysfs link\n");
+
+ }
+
+ if (acpi_video_display_switch_support()) {
+
+ if (device->cap._DCS && device->cap._DSS) {
+ static int count;
+ char *name;
+ name = kzalloc(MAX_NAME_LEN, GFP_KERNEL);
+ if (!name)
+ return;
+ sprintf(name, "acpi_video%d", count++);
+ device->output_dev = video_output_register(name,
+ NULL, device, &acpi_output_properties);
+ kfree(name);
+ }
+ }
+}
+
+/*
+ * Arg:
+ * device : video output device (VGA)
+ *
+ * Return Value:
+ * None
+ *
+ * Find out all required AML methods defined under the video bus device.
+ */
+
+static void acpi_video_bus_find_cap(struct acpi_video_bus *video)
+{
+ acpi_handle h_dummy1;
+
+ memset(&video->cap, 0, sizeof(video->cap));
+ if (ACPI_SUCCESS(acpi_get_handle(video->device->handle, "_DOS", &h_dummy1))) {
+ video->cap._DOS = 1;
+ }
+ if (ACPI_SUCCESS(acpi_get_handle(video->device->handle, "_DOD", &h_dummy1))) {
+ video->cap._DOD = 1;
+ }
+ if (ACPI_SUCCESS(acpi_get_handle(video->device->handle, "_ROM", &h_dummy1))) {
+ video->cap._ROM = 1;
+ }
+ if (ACPI_SUCCESS(acpi_get_handle(video->device->handle, "_GPD", &h_dummy1))) {
+ video->cap._GPD = 1;
+ }
+ if (ACPI_SUCCESS(acpi_get_handle(video->device->handle, "_SPD", &h_dummy1))) {
+ video->cap._SPD = 1;
+ }
+ if (ACPI_SUCCESS(acpi_get_handle(video->device->handle, "_VPO", &h_dummy1))) {
+ video->cap._VPO = 1;
+ }
+}
+
+/*
+ * Check whether the video bus device has required AML method to
+ * support the desired features
+ */
+
+static int acpi_video_bus_check(struct acpi_video_bus *video)
+{
+ acpi_status status = -ENOENT;
+ struct device *dev;
+
+ if (!video)
+ return -EINVAL;
+
+ dev = acpi_get_physical_pci_device(video->device->handle);
+ if (!dev)
+ return -ENODEV;
+ put_device(dev);
+
+ /* Since there is no HID, CID and so on for VGA driver, we have
+ * to check well known required nodes.
+ */
+
+ /* Does this device support video switching? */
+ if (video->cap._DOS) {
+ video->flags.multihead = 1;
+ status = 0;
+ }
+
+ /* Does this device support retrieving a video ROM? */
+ if (video->cap._ROM) {
+ video->flags.rom = 1;
+ status = 0;
+ }
+
+ /* Does this device support configuring which video device to POST? */
+ if (video->cap._GPD && video->cap._SPD && video->cap._VPO) {
+ video->flags.post = 1;
+ status = 0;
+ }
+
+ return status;
+}
+
+/* --------------------------------------------------------------------------
+ FS Interface (/proc)
+ -------------------------------------------------------------------------- */
+
+static struct proc_dir_entry *acpi_video_dir;
+
+/* video devices */
+
+static int acpi_video_device_info_seq_show(struct seq_file *seq, void *offset)
+{
+ struct acpi_video_device *dev = seq->private;
+
+
+ if (!dev)
+ goto end;
+
+ seq_printf(seq, "device_id: 0x%04x\n", (u32) dev->device_id);
+ seq_printf(seq, "type: ");
+ if (dev->flags.crt)
+ seq_printf(seq, "CRT\n");
+ else if (dev->flags.lcd)
+ seq_printf(seq, "LCD\n");
+ else if (dev->flags.tvout)
+ seq_printf(seq, "TVOUT\n");
+ else if (dev->flags.dvi)
+ seq_printf(seq, "DVI\n");
+ else
+ seq_printf(seq, "UNKNOWN\n");
+
+ seq_printf(seq, "known by bios: %s\n", dev->flags.bios ? "yes" : "no");
+
+ end:
+ return 0;
+}
+
+static int
+acpi_video_device_info_open_fs(struct inode *inode, struct file *file)
+{
+ return single_open(file, acpi_video_device_info_seq_show,
+ PDE(inode)->data);
+}
+
+static int acpi_video_device_state_seq_show(struct seq_file *seq, void *offset)
+{
+ int status;
+ struct acpi_video_device *dev = seq->private;
+ unsigned long long state;
+
+
+ if (!dev)
+ goto end;
+
+ status = acpi_video_device_get_state(dev, &state);
+ seq_printf(seq, "state: ");
+ if (ACPI_SUCCESS(status))
+ seq_printf(seq, "0x%02llx\n", state);
+ else
+ seq_printf(seq, "<not supported>\n");
+
+ status = acpi_video_device_query(dev, &state);
+ seq_printf(seq, "query: ");
+ if (ACPI_SUCCESS(status))
+ seq_printf(seq, "0x%02llx\n", state);
+ else
+ seq_printf(seq, "<not supported>\n");
+
+ end:
+ return 0;
+}
+
+static int
+acpi_video_device_state_open_fs(struct inode *inode, struct file *file)
+{
+ return single_open(file, acpi_video_device_state_seq_show,
+ PDE(inode)->data);
+}
+
+static ssize_t
+acpi_video_device_write_state(struct file *file,
+ const char __user * buffer,
+ size_t count, loff_t * data)
+{
+ int status;
+ struct seq_file *m = file->private_data;
+ struct acpi_video_device *dev = m->private;
+ char str[12] = { 0 };
+ u32 state = 0;
+
+
+ if (!dev || count + 1 > sizeof str)
+ return -EINVAL;
+
+ if (copy_from_user(str, buffer, count))
+ return -EFAULT;
+
+ str[count] = 0;
+ state = simple_strtoul(str, NULL, 0);
+ state &= ((1ul << 31) | (1ul << 30) | (1ul << 0));
+
+ status = acpi_video_device_set_state(dev, state);
+
+ if (status)
+ return -EFAULT;
+
+ return count;
+}
+
+static int
+acpi_video_device_brightness_seq_show(struct seq_file *seq, void *offset)
+{
+ struct acpi_video_device *dev = seq->private;
+ int i;
+
+
+ if (!dev || !dev->brightness) {
+ seq_printf(seq, "<not supported>\n");
+ return 0;
+ }
+
+ seq_printf(seq, "levels: ");
+ for (i = 2; i < dev->brightness->count; i++)
+ seq_printf(seq, " %d", dev->brightness->levels[i]);
+ seq_printf(seq, "\ncurrent: %d\n", dev->brightness->curr);
+
+ return 0;
+}
+
+static int
+acpi_video_device_brightness_open_fs(struct inode *inode, struct file *file)
+{
+ return single_open(file, acpi_video_device_brightness_seq_show,
+ PDE(inode)->data);
+}
+
+static ssize_t
+acpi_video_device_write_brightness(struct file *file,
+ const char __user * buffer,
+ size_t count, loff_t * data)
+{
+ struct seq_file *m = file->private_data;
+ struct acpi_video_device *dev = m->private;
+ char str[5] = { 0 };
+ unsigned int level = 0;
+ int i;
+
+
+ if (!dev || !dev->brightness || count + 1 > sizeof str)
+ return -EINVAL;
+
+ if (copy_from_user(str, buffer, count))
+ return -EFAULT;
+
+ str[count] = 0;
+ level = simple_strtoul(str, NULL, 0);
+
+ if (level > 100)
+ return -EFAULT;
+
+ /* validate through the list of available levels */
+ for (i = 2; i < dev->brightness->count; i++)
+ if (level == dev->brightness->levels[i]) {
+ if (ACPI_SUCCESS
+ (acpi_video_device_lcd_set_level(dev, level)))
+ dev->brightness->curr = level;
+ break;
+ }
+
+ return count;
+}
+
+static int acpi_video_device_EDID_seq_show(struct seq_file *seq, void *offset)
+{
+ struct acpi_video_device *dev = seq->private;
+ int status;
+ int i;
+ union acpi_object *edid = NULL;
+
+
+ if (!dev)
+ goto out;
+
+ status = acpi_video_device_EDID(dev, &edid, 128);
+ if (ACPI_FAILURE(status)) {
+ status = acpi_video_device_EDID(dev, &edid, 256);
+ }
+
+ if (ACPI_FAILURE(status)) {
+ goto out;
+ }
+
+ if (edid && edid->type == ACPI_TYPE_BUFFER) {
+ for (i = 0; i < edid->buffer.length; i++)
+ seq_putc(seq, edid->buffer.pointer[i]);
+ }
+
+ out:
+ if (!edid)
+ seq_printf(seq, "<not supported>\n");
+ else
+ kfree(edid);
+
+ return 0;
+}
+
+static int
+acpi_video_device_EDID_open_fs(struct inode *inode, struct file *file)
+{
+ return single_open(file, acpi_video_device_EDID_seq_show,
+ PDE(inode)->data);
+}
+
+static int acpi_video_device_add_fs(struct acpi_device *device)
+{
+ struct proc_dir_entry *entry, *device_dir;
+ struct acpi_video_device *vid_dev;
+
+ vid_dev = acpi_driver_data(device);
+ if (!vid_dev)
+ return -ENODEV;
+
+ device_dir = proc_mkdir(acpi_device_bid(device),
+ vid_dev->video->dir);
+ if (!device_dir)
+ return -ENOMEM;
+
+ device_dir->owner = THIS_MODULE;
+
+ /* 'info' [R] */
+ entry = proc_create_data("info", S_IRUGO, device_dir,
+ &acpi_video_device_info_fops, acpi_driver_data(device));
+ if (!entry)
+ goto err_remove_dir;
+
+ /* 'state' [R/W] */
+ acpi_video_device_state_fops.write = acpi_video_device_write_state;
+ entry = proc_create_data("state", S_IFREG | S_IRUGO | S_IWUSR,
+ device_dir,
+ &acpi_video_device_state_fops,
+ acpi_driver_data(device));
+ if (!entry)
+ goto err_remove_info;
+
+ /* 'brightness' [R/W] */
+ acpi_video_device_brightness_fops.write =
+ acpi_video_device_write_brightness;
+ entry = proc_create_data("brightness", S_IFREG | S_IRUGO | S_IWUSR,
+ device_dir,
+ &acpi_video_device_brightness_fops,
+ acpi_driver_data(device));
+ if (!entry)
+ goto err_remove_state;
+
+ /* 'EDID' [R] */
+ entry = proc_create_data("EDID", S_IRUGO, device_dir,
+ &acpi_video_device_EDID_fops,
+ acpi_driver_data(device));
+ if (!entry)
+ goto err_remove_brightness;
+
+ acpi_device_dir(device) = device_dir;
+
+ return 0;
+
+ err_remove_brightness:
+ remove_proc_entry("brightness", device_dir);
+ err_remove_state:
+ remove_proc_entry("state", device_dir);
+ err_remove_info:
+ remove_proc_entry("info", device_dir);
+ err_remove_dir:
+ remove_proc_entry(acpi_device_bid(device), vid_dev->video->dir);
+ return -ENOMEM;
+}
+
+static int acpi_video_device_remove_fs(struct acpi_device *device)
+{
+ struct acpi_video_device *vid_dev;
+ struct proc_dir_entry *device_dir;
+
+ vid_dev = acpi_driver_data(device);
+ if (!vid_dev || !vid_dev->video || !vid_dev->video->dir)
+ return -ENODEV;
+
+ device_dir = acpi_device_dir(device);
+ if (device_dir) {
+ remove_proc_entry("info", device_dir);
+ remove_proc_entry("state", device_dir);
+ remove_proc_entry("brightness", device_dir);
+ remove_proc_entry("EDID", device_dir);
+ remove_proc_entry(acpi_device_bid(device), vid_dev->video->dir);
+ acpi_device_dir(device) = NULL;
+ }
+
+ return 0;
+}
+
+/* video bus */
+static int acpi_video_bus_info_seq_show(struct seq_file *seq, void *offset)
+{
+ struct acpi_video_bus *video = seq->private;
+
+
+ if (!video)
+ goto end;
+
+ seq_printf(seq, "Switching heads: %s\n",
+ video->flags.multihead ? "yes" : "no");
+ seq_printf(seq, "Video ROM: %s\n",
+ video->flags.rom ? "yes" : "no");
+ seq_printf(seq, "Device to be POSTed on boot: %s\n",
+ video->flags.post ? "yes" : "no");
+
+ end:
+ return 0;
+}
+
+static int acpi_video_bus_info_open_fs(struct inode *inode, struct file *file)
+{
+ return single_open(file, acpi_video_bus_info_seq_show,
+ PDE(inode)->data);
+}
+
+static int acpi_video_bus_ROM_seq_show(struct seq_file *seq, void *offset)
+{
+ struct acpi_video_bus *video = seq->private;
+
+
+ if (!video)
+ goto end;
+
+ printk(KERN_INFO PREFIX "Please implement %s\n", __func__);
+ seq_printf(seq, "<TODO>\n");
+
+ end:
+ return 0;
+}
+
+static int acpi_video_bus_ROM_open_fs(struct inode *inode, struct file *file)
+{
+ return single_open(file, acpi_video_bus_ROM_seq_show, PDE(inode)->data);
+}
+
+static int acpi_video_bus_POST_info_seq_show(struct seq_file *seq, void *offset)
+{
+ struct acpi_video_bus *video = seq->private;
+ unsigned long long options;
+ int status;
+
+
+ if (!video)
+ goto end;
+
+ status = acpi_video_bus_POST_options(video, &options);
+ if (ACPI_SUCCESS(status)) {
+ if (!(options & 1)) {
+ printk(KERN_WARNING PREFIX
+ "The motherboard VGA device is not listed as a possible POST device.\n");
+ printk(KERN_WARNING PREFIX
+ "This indicates a BIOS bug. Please contact the manufacturer.\n");
+ }
+ printk("%llx\n", options);
+ seq_printf(seq, "can POST: <integrated video>");
+ if (options & 2)
+ seq_printf(seq, " <PCI video>");
+ if (options & 4)
+ seq_printf(seq, " <AGP video>");
+ seq_putc(seq, '\n');
+ } else
+ seq_printf(seq, "<not supported>\n");
+ end:
+ return 0;
+}
+
+static int
+acpi_video_bus_POST_info_open_fs(struct inode *inode, struct file *file)
+{
+ return single_open(file, acpi_video_bus_POST_info_seq_show,
+ PDE(inode)->data);
+}
+
+static int acpi_video_bus_POST_seq_show(struct seq_file *seq, void *offset)
+{
+ struct acpi_video_bus *video = seq->private;
+ int status;
+ unsigned long long id;
+
+
+ if (!video)
+ goto end;
+
+ status = acpi_video_bus_get_POST(video, &id);
+ if (!ACPI_SUCCESS(status)) {
+ seq_printf(seq, "<not supported>\n");
+ goto end;
+ }
+ seq_printf(seq, "device POSTed is <%s>\n", device_decode[id & 3]);
+
+ end:
+ return 0;
+}
+
+static int acpi_video_bus_DOS_seq_show(struct seq_file *seq, void *offset)
+{
+ struct acpi_video_bus *video = seq->private;
+
+
+ seq_printf(seq, "DOS setting: <%d>\n", video->dos_setting);
+
+ return 0;
+}
+
+static int acpi_video_bus_POST_open_fs(struct inode *inode, struct file *file)
+{
+ return single_open(file, acpi_video_bus_POST_seq_show,
+ PDE(inode)->data);
+}
+
+static int acpi_video_bus_DOS_open_fs(struct inode *inode, struct file *file)
+{
+ return single_open(file, acpi_video_bus_DOS_seq_show, PDE(inode)->data);
+}
+
+static ssize_t
+acpi_video_bus_write_POST(struct file *file,
+ const char __user * buffer,
+ size_t count, loff_t * data)
+{
+ int status;
+ struct seq_file *m = file->private_data;
+ struct acpi_video_bus *video = m->private;
+ char str[12] = { 0 };
+ unsigned long long opt, options;
+
+
+ if (!video || count + 1 > sizeof str)
+ return -EINVAL;
+
+ status = acpi_video_bus_POST_options(video, &options);
+ if (!ACPI_SUCCESS(status))
+ return -EINVAL;
+
+ if (copy_from_user(str, buffer, count))
+ return -EFAULT;
+
+ str[count] = 0;
+ opt = strtoul(str, NULL, 0);
+ if (opt > 3)
+ return -EFAULT;
+
+ /* just in case an OEM 'forgot' the motherboard... */
+ options |= 1;
+
+ if (options & (1ul << opt)) {
+ status = acpi_video_bus_set_POST(video, opt);
+ if (!ACPI_SUCCESS(status))
+ return -EFAULT;
+
+ }
+
+ return count;
+}
+
+static ssize_t
+acpi_video_bus_write_DOS(struct file *file,
+ const char __user * buffer,
+ size_t count, loff_t * data)
+{
+ int status;
+ struct seq_file *m = file->private_data;
+ struct acpi_video_bus *video = m->private;
+ char str[12] = { 0 };
+ unsigned long opt;
+
+
+ if (!video || count + 1 > sizeof str)
+ return -EINVAL;
+
+ if (copy_from_user(str, buffer, count))
+ return -EFAULT;
+
+ str[count] = 0;
+ opt = strtoul(str, NULL, 0);
+ if (opt > 7)
+ return -EFAULT;
+
+ status = acpi_video_bus_DOS(video, opt & 0x3, (opt & 0x4) >> 2);
+
+ if (!ACPI_SUCCESS(status))
+ return -EFAULT;
+
+ return count;
+}
+
+static int acpi_video_bus_add_fs(struct acpi_device *device)
+{
+ struct acpi_video_bus *video = acpi_driver_data(device);
+ struct proc_dir_entry *device_dir;
+ struct proc_dir_entry *entry;
+
+ device_dir = proc_mkdir(acpi_device_bid(device), acpi_video_dir);
+ if (!device_dir)
+ return -ENOMEM;
+
+ device_dir->owner = THIS_MODULE;
+
+ /* 'info' [R] */
+ entry = proc_create_data("info", S_IRUGO, device_dir,
+ &acpi_video_bus_info_fops,
+ acpi_driver_data(device));
+ if (!entry)
+ goto err_remove_dir;
+
+ /* 'ROM' [R] */
+ entry = proc_create_data("ROM", S_IRUGO, device_dir,
+ &acpi_video_bus_ROM_fops,
+ acpi_driver_data(device));
+ if (!entry)
+ goto err_remove_info;
+
+ /* 'POST_info' [R] */
+ entry = proc_create_data("POST_info", S_IRUGO, device_dir,
+ &acpi_video_bus_POST_info_fops,
+ acpi_driver_data(device));
+ if (!entry)
+ goto err_remove_rom;
+
+ /* 'POST' [R/W] */
+ acpi_video_bus_POST_fops.write = acpi_video_bus_write_POST;
+ entry = proc_create_data("POST", S_IFREG | S_IRUGO | S_IWUSR,
+ device_dir,
+ &acpi_video_bus_POST_fops,
+ acpi_driver_data(device));
+ if (!entry)
+ goto err_remove_post_info;
+
+ /* 'DOS' [R/W] */
+ acpi_video_bus_DOS_fops.write = acpi_video_bus_write_DOS;
+ entry = proc_create_data("DOS", S_IFREG | S_IRUGO | S_IWUSR,
+ device_dir,
+ &acpi_video_bus_DOS_fops,
+ acpi_driver_data(device));
+ if (!entry)
+ goto err_remove_post;
+
+ video->dir = acpi_device_dir(device) = device_dir;
+ return 0;
+
+ err_remove_post:
+ remove_proc_entry("POST", device_dir);
+ err_remove_post_info:
+ remove_proc_entry("POST_info", device_dir);
+ err_remove_rom:
+ remove_proc_entry("ROM", device_dir);
+ err_remove_info:
+ remove_proc_entry("info", device_dir);
+ err_remove_dir:
+ remove_proc_entry(acpi_device_bid(device), acpi_video_dir);
+ return -ENOMEM;
+}
+
+static int acpi_video_bus_remove_fs(struct acpi_device *device)
+{
+ struct proc_dir_entry *device_dir = acpi_device_dir(device);
+
+ if (device_dir) {
+ remove_proc_entry("info", device_dir);
+ remove_proc_entry("ROM", device_dir);
+ remove_proc_entry("POST_info", device_dir);
+ remove_proc_entry("POST", device_dir);
+ remove_proc_entry("DOS", device_dir);
+ remove_proc_entry(acpi_device_bid(device), acpi_video_dir);
+ acpi_device_dir(device) = NULL;
+ }
+
+ return 0;
+}
+
+/* --------------------------------------------------------------------------
+ Driver Interface
+ -------------------------------------------------------------------------- */
+
+/* device interface */
+static struct acpi_video_device_attrib*
+acpi_video_get_device_attr(struct acpi_video_bus *video, unsigned long device_id)
+{
+ struct acpi_video_enumerated_device *ids;
+ int i;
+
+ for (i = 0; i < video->attached_count; i++) {
+ ids = &video->attached_array[i];
+ if ((ids->value.int_val & 0xffff) == device_id)
+ return &ids->value.attrib;
+ }
+
+ return NULL;
+}
+
+static int
+acpi_video_bus_get_one_device(struct acpi_device *device,
+ struct acpi_video_bus *video)
+{
+ unsigned long long device_id;
+ int status;
+ struct acpi_video_device *data;
+ struct acpi_video_device_attrib* attribute;
+
+ if (!device || !video)
+ return -EINVAL;
+
+ status =
+ acpi_evaluate_integer(device->handle, "_ADR", NULL, &device_id);
+ if (ACPI_SUCCESS(status)) {
+
+ data = kzalloc(sizeof(struct acpi_video_device), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ strcpy(acpi_device_name(device), ACPI_VIDEO_DEVICE_NAME);
+ strcpy(acpi_device_class(device), ACPI_VIDEO_CLASS);
+ device->driver_data = data;
+
+ data->device_id = device_id;
+ data->video = video;
+ data->dev = device;
+
+ attribute = acpi_video_get_device_attr(video, device_id);
+
+ if((attribute != NULL) && attribute->device_id_scheme) {
+ switch (attribute->display_type) {
+ case ACPI_VIDEO_DISPLAY_CRT:
+ data->flags.crt = 1;
+ break;
+ case ACPI_VIDEO_DISPLAY_TV:
+ data->flags.tvout = 1;
+ break;
+ case ACPI_VIDEO_DISPLAY_DVI:
+ data->flags.dvi = 1;
+ break;
+ case ACPI_VIDEO_DISPLAY_LCD:
+ data->flags.lcd = 1;
+ break;
+ default:
+ data->flags.unknown = 1;
+ break;
+ }
+ if(attribute->bios_can_detect)
+ data->flags.bios = 1;
+ } else
+ data->flags.unknown = 1;
+
+ acpi_video_device_bind(video, data);
+ acpi_video_device_find_cap(data);
+
+ status = acpi_install_notify_handler(device->handle,
+ ACPI_DEVICE_NOTIFY,
+ acpi_video_device_notify,
+ data);
+ if (ACPI_FAILURE(status)) {
+ printk(KERN_ERR PREFIX
+ "Error installing notify handler\n");
+ if(data->brightness)
+ kfree(data->brightness->levels);
+ kfree(data->brightness);
+ kfree(data);
+ return -ENODEV;
+ }
+
+ mutex_lock(&video->device_list_lock);
+ list_add_tail(&data->entry, &video->video_device_list);
+ mutex_unlock(&video->device_list_lock);
+
+ acpi_video_device_add_fs(device);
+
+ return 0;
+ }
+
+ return -ENOENT;
+}
+
+/*
+ * Arg:
+ * video : video bus device
+ *
+ * Return:
+ * none
+ *
+ * Enumerate the video device list of the video bus,
+ * bind the ids with the corresponding video devices
+ * under the video bus.
+ */
+
+static void acpi_video_device_rebind(struct acpi_video_bus *video)
+{
+ struct acpi_video_device *dev;
+
+ mutex_lock(&video->device_list_lock);
+
+ list_for_each_entry(dev, &video->video_device_list, entry)
+ acpi_video_device_bind(video, dev);
+
+ mutex_unlock(&video->device_list_lock);
+}
+
+/*
+ * Arg:
+ * video : video bus device
+ * device : video output device under the video
+ * bus
+ *
+ * Return:
+ * none
+ *
+ * Bind the ids with the corresponding video devices
+ * under the video bus.
+ */
+
+static void
+acpi_video_device_bind(struct acpi_video_bus *video,
+ struct acpi_video_device *device)
+{
+ struct acpi_video_enumerated_device *ids;
+ int i;
+
+ for (i = 0; i < video->attached_count; i++) {
+ ids = &video->attached_array[i];
+ if (device->device_id == (ids->value.int_val & 0xffff)) {
+ ids->bind_info = device;
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "device_bind %d\n", i));
+ }
+ }
+}
+
+/*
+ * Arg:
+ * video : video bus device
+ *
+ * Return:
+ * < 0 : error
+ *
+ * Call _DOD to enumerate all devices attached to display adapter
+ *
+ */
+
+static int acpi_video_device_enumerate(struct acpi_video_bus *video)
+{
+ int status;
+ int count;
+ int i;
+ struct acpi_video_enumerated_device *active_list;
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ union acpi_object *dod = NULL;
+ union acpi_object *obj;
+
+ status = acpi_evaluate_object(video->device->handle, "_DOD", NULL, &buffer);
+ if (!ACPI_SUCCESS(status)) {
+ ACPI_EXCEPTION((AE_INFO, status, "Evaluating _DOD"));
+ return status;
+ }
+
+ dod = buffer.pointer;
+ if (!dod || (dod->type != ACPI_TYPE_PACKAGE)) {
+ ACPI_EXCEPTION((AE_INFO, status, "Invalid _DOD data"));
+ status = -EFAULT;
+ goto out;
+ }
+
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d video heads in _DOD\n",
+ dod->package.count));
+
+ active_list = kcalloc(1 + dod->package.count,
+ sizeof(struct acpi_video_enumerated_device),
+ GFP_KERNEL);
+ if (!active_list) {
+ status = -ENOMEM;
+ goto out;
+ }
+
+ count = 0;
+ for (i = 0; i < dod->package.count; i++) {
+ obj = &dod->package.elements[i];
+
+ if (obj->type != ACPI_TYPE_INTEGER) {
+ printk(KERN_ERR PREFIX
+ "Invalid _DOD data in element %d\n", i);
+ continue;
+ }
+
+ active_list[count].value.int_val = obj->integer.value;
+ active_list[count].bind_info = NULL;
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "dod element[%d] = %d\n", i,
+ (int)obj->integer.value));
+ count++;
+ }
+
+ kfree(video->attached_array);
+
+ video->attached_array = active_list;
+ video->attached_count = count;
+
+ out:
+ kfree(buffer.pointer);
+ return status;
+}
+
+static int
+acpi_video_get_next_level(struct acpi_video_device *device,
+ u32 level_current, u32 event)
+{
+ int min, max, min_above, max_below, i, l, delta = 255;
+ max = max_below = 0;
+ min = min_above = 255;
+ /* Find closest level to level_current */
+ for (i = 2; i < device->brightness->count; i++) {
+ l = device->brightness->levels[i];
+ if (abs(l - level_current) < abs(delta)) {
+ delta = l - level_current;
+ if (!delta)
+ break;
+ }
+ }
+ /* Ajust level_current to closest available level */
+ level_current += delta;
+ for (i = 2; i < device->brightness->count; i++) {
+ l = device->brightness->levels[i];
+ if (l < min)
+ min = l;
+ if (l > max)
+ max = l;
+ if (l < min_above && l > level_current)
+ min_above = l;
+ if (l > max_below && l < level_current)
+ max_below = l;
+ }
+
+ switch (event) {
+ case ACPI_VIDEO_NOTIFY_CYCLE_BRIGHTNESS:
+ return (level_current < max) ? min_above : min;
+ case ACPI_VIDEO_NOTIFY_INC_BRIGHTNESS:
+ return (level_current < max) ? min_above : max;
+ case ACPI_VIDEO_NOTIFY_DEC_BRIGHTNESS:
+ return (level_current > min) ? max_below : min;
+ case ACPI_VIDEO_NOTIFY_ZERO_BRIGHTNESS:
+ case ACPI_VIDEO_NOTIFY_DISPLAY_OFF:
+ return 0;
+ default:
+ return level_current;
+ }
+}
+
+static void
+acpi_video_switch_brightness(struct acpi_video_device *device, int event)
+{
+ unsigned long long level_current, level_next;
+ if (!device->brightness)
+ return;
+ acpi_video_device_lcd_get_level_current(device, &level_current);
+ level_next = acpi_video_get_next_level(device, level_current, event);
+ acpi_video_device_lcd_set_level(device, level_next);
+}
+
+static int
+acpi_video_bus_get_devices(struct acpi_video_bus *video,
+ struct acpi_device *device)
+{
+ int status = 0;
+ struct acpi_device *dev;
+
+ acpi_video_device_enumerate(video);
+
+ list_for_each_entry(dev, &device->children, node) {
+
+ status = acpi_video_bus_get_one_device(dev, video);
+ if (ACPI_FAILURE(status)) {
+ printk(KERN_WARNING PREFIX
+ "Cant attach device");
+ continue;
+ }
+ }
+ return status;
+}
+
+static int acpi_video_bus_put_one_device(struct acpi_video_device *device)
+{
+ acpi_status status;
+ struct acpi_video_bus *video;
+
+
+ if (!device || !device->video)
+ return -ENOENT;
+
+ video = device->video;
+
+ acpi_video_device_remove_fs(device->dev);
+
+ status = acpi_remove_notify_handler(device->dev->handle,
+ ACPI_DEVICE_NOTIFY,
+ acpi_video_device_notify);
+ backlight_device_unregister(device->backlight);
+ if (device->cdev) {
+ sysfs_remove_link(&device->dev->dev.kobj,
+ "thermal_cooling");
+ sysfs_remove_link(&device->cdev->device.kobj,
+ "device");
+ thermal_cooling_device_unregister(device->cdev);
+ device->cdev = NULL;
+ }
+ video_output_unregister(device->output_dev);
+
+ return 0;
+}
+
+static int acpi_video_bus_put_devices(struct acpi_video_bus *video)
+{
+ int status;
+ struct acpi_video_device *dev, *next;
+
+ mutex_lock(&video->device_list_lock);
+
+ list_for_each_entry_safe(dev, next, &video->video_device_list, entry) {
+
+ status = acpi_video_bus_put_one_device(dev);
+ if (ACPI_FAILURE(status))
+ printk(KERN_WARNING PREFIX
+ "hhuuhhuu bug in acpi video driver.\n");
+
+ if (dev->brightness) {
+ kfree(dev->brightness->levels);
+ kfree(dev->brightness);
+ }
+ list_del(&dev->entry);
+ kfree(dev);
+ }
+
+ mutex_unlock(&video->device_list_lock);
+
+ return 0;
+}
+
+/* acpi_video interface */
+
+static int acpi_video_bus_start_devices(struct acpi_video_bus *video)
+{
+ return acpi_video_bus_DOS(video, 0, 0);
+}
+
+static int acpi_video_bus_stop_devices(struct acpi_video_bus *video)
+{
+ return acpi_video_bus_DOS(video, 0, 1);
+}
+
+static void acpi_video_bus_notify(acpi_handle handle, u32 event, void *data)
+{
+ struct acpi_video_bus *video = data;
+ struct acpi_device *device = NULL;
+ struct input_dev *input;
+ int keycode;
+
+ if (!video)
+ return;
+
+ device = video->device;
+ input = video->input;
+
+ switch (event) {
+ case ACPI_VIDEO_NOTIFY_SWITCH: /* User requested a switch,
+ * most likely via hotkey. */
+ acpi_bus_generate_proc_event(device, event, 0);
+ keycode = KEY_SWITCHVIDEOMODE;
+ break;
+
+ case ACPI_VIDEO_NOTIFY_PROBE: /* User plugged in or removed a video
+ * connector. */
+ acpi_video_device_enumerate(video);
+ acpi_video_device_rebind(video);
+ acpi_bus_generate_proc_event(device, event, 0);
+ keycode = KEY_SWITCHVIDEOMODE;
+ break;
+
+ case ACPI_VIDEO_NOTIFY_CYCLE: /* Cycle Display output hotkey pressed. */
+ acpi_bus_generate_proc_event(device, event, 0);
+ keycode = KEY_SWITCHVIDEOMODE;
+ break;
+ case ACPI_VIDEO_NOTIFY_NEXT_OUTPUT: /* Next Display output hotkey pressed. */
+ acpi_bus_generate_proc_event(device, event, 0);
+ keycode = KEY_VIDEO_NEXT;
+ break;
+ case ACPI_VIDEO_NOTIFY_PREV_OUTPUT: /* previous Display output hotkey pressed. */
+ acpi_bus_generate_proc_event(device, event, 0);
+ keycode = KEY_VIDEO_PREV;
+ break;
+
+ default:
+ keycode = KEY_UNKNOWN;
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Unsupported event [0x%x]\n", event));
+ break;
+ }
+
+ acpi_notifier_call_chain(device, event, 0);
+ input_report_key(input, keycode, 1);
+ input_sync(input);
+ input_report_key(input, keycode, 0);
+ input_sync(input);
+
+ return;
+}
+
+static void acpi_video_device_notify(acpi_handle handle, u32 event, void *data)
+{
+ struct acpi_video_device *video_device = data;
+ struct acpi_device *device = NULL;
+ struct acpi_video_bus *bus;
+ struct input_dev *input;
+ int keycode;
+
+ if (!video_device)
+ return;
+
+ device = video_device->dev;
+ bus = video_device->video;
+ input = bus->input;
+
+ switch (event) {
+ case ACPI_VIDEO_NOTIFY_CYCLE_BRIGHTNESS: /* Cycle brightness */
+ if (brightness_switch_enabled)
+ acpi_video_switch_brightness(video_device, event);
+ acpi_bus_generate_proc_event(device, event, 0);
+ keycode = KEY_BRIGHTNESS_CYCLE;
+ break;
+ case ACPI_VIDEO_NOTIFY_INC_BRIGHTNESS: /* Increase brightness */
+ if (brightness_switch_enabled)
+ acpi_video_switch_brightness(video_device, event);
+ acpi_bus_generate_proc_event(device, event, 0);
+ keycode = KEY_BRIGHTNESSUP;
+ break;
+ case ACPI_VIDEO_NOTIFY_DEC_BRIGHTNESS: /* Decrease brightness */
+ if (brightness_switch_enabled)
+ acpi_video_switch_brightness(video_device, event);
+ acpi_bus_generate_proc_event(device, event, 0);
+ keycode = KEY_BRIGHTNESSDOWN;
+ break;
+ case ACPI_VIDEO_NOTIFY_ZERO_BRIGHTNESS: /* zero brightnesss */
+ if (brightness_switch_enabled)
+ acpi_video_switch_brightness(video_device, event);
+ acpi_bus_generate_proc_event(device, event, 0);
+ keycode = KEY_BRIGHTNESS_ZERO;
+ break;
+ case ACPI_VIDEO_NOTIFY_DISPLAY_OFF: /* display device off */
+ if (brightness_switch_enabled)
+ acpi_video_switch_brightness(video_device, event);
+ acpi_bus_generate_proc_event(device, event, 0);
+ keycode = KEY_DISPLAY_OFF;
+ break;
+ default:
+ keycode = KEY_UNKNOWN;
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Unsupported event [0x%x]\n", event));
+ break;
+ }
+
+ acpi_notifier_call_chain(device, event, 0);
+ input_report_key(input, keycode, 1);
+ input_sync(input);
+ input_report_key(input, keycode, 0);
+ input_sync(input);
+
+ return;
+}
+
+static int instance;
+static int acpi_video_resume(struct acpi_device *device)
+{
+ struct acpi_video_bus *video;
+ struct acpi_video_device *video_device;
+ int i;
+
+ if (!device || !acpi_driver_data(device))
+ return -EINVAL;
+
+ video = acpi_driver_data(device);
+
+ for (i = 0; i < video->attached_count; i++) {
+ video_device = video->attached_array[i].bind_info;
+ if (video_device && video_device->backlight)
+ acpi_video_set_brightness(video_device->backlight);
+ }
+ return AE_OK;
+}
+
+static int acpi_video_bus_add(struct acpi_device *device)
+{
+ acpi_status status;
+ struct acpi_video_bus *video;
+ struct input_dev *input;
+ int error;
+
+ video = kzalloc(sizeof(struct acpi_video_bus), GFP_KERNEL);
+ if (!video)
+ return -ENOMEM;
+
+ /* a hack to fix the duplicate name "VID" problem on T61 */
+ if (!strcmp(device->pnp.bus_id, "VID")) {
+ if (instance)
+ device->pnp.bus_id[3] = '0' + instance;
+ instance ++;
+ }
+ /* a hack to fix the duplicate name "VGA" problem on Pa 3553 */
+ if (!strcmp(device->pnp.bus_id, "VGA")) {
+ if (instance)
+ device->pnp.bus_id[3] = '0' + instance;
+ instance++;
+ }
+
+ video->device = device;
+ strcpy(acpi_device_name(device), ACPI_VIDEO_BUS_NAME);
+ strcpy(acpi_device_class(device), ACPI_VIDEO_CLASS);
+ device->driver_data = video;
+
+ acpi_video_bus_find_cap(video);
+ error = acpi_video_bus_check(video);
+ if (error)
+ goto err_free_video;
+
+ error = acpi_video_bus_add_fs(device);
+ if (error)
+ goto err_free_video;
+
+ mutex_init(&video->device_list_lock);
+ INIT_LIST_HEAD(&video->video_device_list);
+
+ acpi_video_bus_get_devices(video, device);
+ acpi_video_bus_start_devices(video);
+
+ status = acpi_install_notify_handler(device->handle,
+ ACPI_DEVICE_NOTIFY,
+ acpi_video_bus_notify, video);
+ if (ACPI_FAILURE(status)) {
+ printk(KERN_ERR PREFIX
+ "Error installing notify handler\n");
+ error = -ENODEV;
+ goto err_stop_video;
+ }
+
+ video->input = input = input_allocate_device();
+ if (!input) {
+ error = -ENOMEM;
+ goto err_uninstall_notify;
+ }
+
+ snprintf(video->phys, sizeof(video->phys),
+ "%s/video/input0", acpi_device_hid(video->device));
+
+ input->name = acpi_device_name(video->device);
+ input->phys = video->phys;
+ input->id.bustype = BUS_HOST;
+ input->id.product = 0x06;
+ input->dev.parent = &device->dev;
+ input->evbit[0] = BIT(EV_KEY);
+ set_bit(KEY_SWITCHVIDEOMODE, input->keybit);
+ set_bit(KEY_VIDEO_NEXT, input->keybit);
+ set_bit(KEY_VIDEO_PREV, input->keybit);
+ set_bit(KEY_BRIGHTNESS_CYCLE, input->keybit);
+ set_bit(KEY_BRIGHTNESSUP, input->keybit);
+ set_bit(KEY_BRIGHTNESSDOWN, input->keybit);
+ set_bit(KEY_BRIGHTNESS_ZERO, input->keybit);
+ set_bit(KEY_DISPLAY_OFF, input->keybit);
+ set_bit(KEY_UNKNOWN, input->keybit);
+
+ error = input_register_device(input);
+ if (error)
+ goto err_free_input_dev;
+
+ printk(KERN_INFO PREFIX "%s [%s] (multi-head: %s rom: %s post: %s)\n",
+ ACPI_VIDEO_DEVICE_NAME, acpi_device_bid(device),
+ video->flags.multihead ? "yes" : "no",
+ video->flags.rom ? "yes" : "no",
+ video->flags.post ? "yes" : "no");
+
+ return 0;
+
+ err_free_input_dev:
+ input_free_device(input);
+ err_uninstall_notify:
+ acpi_remove_notify_handler(device->handle, ACPI_DEVICE_NOTIFY,
+ acpi_video_bus_notify);
+ err_stop_video:
+ acpi_video_bus_stop_devices(video);
+ acpi_video_bus_put_devices(video);
+ kfree(video->attached_array);
+ acpi_video_bus_remove_fs(device);
+ err_free_video:
+ kfree(video);
+ device->driver_data = NULL;
+
+ return error;
+}
+
+static int acpi_video_bus_remove(struct acpi_device *device, int type)
+{
+ acpi_status status = 0;
+ struct acpi_video_bus *video = NULL;
+
+
+ if (!device || !acpi_driver_data(device))
+ return -EINVAL;
+
+ video = acpi_driver_data(device);
+
+ acpi_video_bus_stop_devices(video);
+
+ status = acpi_remove_notify_handler(video->device->handle,
+ ACPI_DEVICE_NOTIFY,
+ acpi_video_bus_notify);
+
+ acpi_video_bus_put_devices(video);
+ acpi_video_bus_remove_fs(device);
+
+ input_unregister_device(video->input);
+ kfree(video->attached_array);
+ kfree(video);
+
+ return 0;
+}
+
+static int __init acpi_video_init(void)
+{
+ int result = 0;
+
+ acpi_video_dir = proc_mkdir(ACPI_VIDEO_CLASS, acpi_root_dir);
+ if (!acpi_video_dir)
+ return -ENODEV;
+ acpi_video_dir->owner = THIS_MODULE;
+
+ result = acpi_bus_register_driver(&acpi_video_bus);
+ if (result < 0) {
+ remove_proc_entry(ACPI_VIDEO_CLASS, acpi_root_dir);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static void __exit acpi_video_exit(void)
+{
+
+ acpi_bus_unregister_driver(&acpi_video_bus);
+
+ remove_proc_entry(ACPI_VIDEO_CLASS, acpi_root_dir);
+
+ return;
+}
+
+module_init(acpi_video_init);
+module_exit(acpi_video_exit);
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
new file mode 100644
index 0000000..f022eb6
--- /dev/null
+++ b/drivers/acpi/video_detect.c
@@ -0,0 +1,267 @@
+/*
+ * Copyright (C) 2008 SuSE Linux Products GmbH
+ * Thomas Renninger <trenn@suse.de>
+ *
+ * May be copied or modified under the terms of the GNU General Public License
+ *
+ * video_detect.c:
+ * Provides acpi_is_video_device() for early scanning of ACPI devices in scan.c
+ * There a Linux specific (Spec does not provide a HID for video devices) is
+ * assinged
+ *
+ * After PCI devices are glued with ACPI devices
+ * acpi_get_physical_pci_device() can be called to identify ACPI graphics
+ * devices for which a real graphics card is plugged in
+ *
+ * Now acpi_video_get_capabilities() can be called to check which
+ * capabilities the graphics cards plugged in support. The check for general
+ * video capabilities will be triggered by the first caller of
+ * acpi_video_get_capabilities(NULL); which will happen when the first
+ * backlight (or display output) switching supporting driver calls:
+ * acpi_video_backlight_support();
+ *
+ * Depending on whether ACPI graphics extensions (cmp. ACPI spec Appendix B)
+ * are available, video.ko should be used to handle the device.
+ *
+ * Otherwise vendor specific drivers like thinkpad_acpi, asus_acpi,
+ * sony_acpi,... can take care about backlight brightness and display output
+ * switching.
+ *
+ * If CONFIG_ACPI_VIDEO is neither set as "compiled in" (y) nor as a module (m)
+ * this file will not be compiled, acpi_video_get_capabilities() and
+ * acpi_video_backlight_support() will always return 0 and vendor specific
+ * drivers always can handle backlight.
+ *
+ */
+
+#include <linux/acpi.h>
+#include <linux/dmi.h>
+
+ACPI_MODULE_NAME("video");
+#define _COMPONENT ACPI_VIDEO_COMPONENT
+
+static long acpi_video_support;
+static bool acpi_video_caps_checked;
+
+static acpi_status
+acpi_backlight_cap_match(acpi_handle handle, u32 level, void *context,
+ void **retyurn_value)
+{
+ long *cap = context;
+ acpi_handle h_dummy;
+
+ if (ACPI_SUCCESS(acpi_get_handle(handle, "_BCM", &h_dummy)) &&
+ ACPI_SUCCESS(acpi_get_handle(handle, "_BCL", &h_dummy))) {
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found generic backlight "
+ "support\n"));
+ *cap |= ACPI_VIDEO_BACKLIGHT;
+ /* We have backlight support, no need to scan further */
+ return AE_CTRL_TERMINATE;
+ }
+ return 0;
+}
+
+/* Returns true if the device is a video device which can be handled by
+ * video.ko.
+ * The device will get a Linux specific CID added in scan.c to
+ * identify the device as an ACPI graphics device
+ * Be aware that the graphics device may not be physically present
+ * Use acpi_video_get_capabilities() to detect general ACPI video
+ * capabilities of present cards
+ */
+long acpi_is_video_device(struct acpi_device *device)
+{
+ acpi_handle h_dummy;
+ long video_caps = 0;
+
+ if (!device)
+ return 0;
+
+ /* Does this device able to support video switching ? */
+ if (ACPI_SUCCESS(acpi_get_handle(device->handle, "_DOD", &h_dummy)) &&
+ ACPI_SUCCESS(acpi_get_handle(device->handle, "_DOS", &h_dummy)))
+ video_caps |= ACPI_VIDEO_OUTPUT_SWITCHING;
+
+ /* Does this device able to retrieve a video ROM ? */
+ if (ACPI_SUCCESS(acpi_get_handle(device->handle, "_ROM", &h_dummy)))
+ video_caps |= ACPI_VIDEO_ROM_AVAILABLE;
+
+ /* Does this device able to configure which video head to be POSTed ? */
+ if (ACPI_SUCCESS(acpi_get_handle(device->handle, "_VPO", &h_dummy)) &&
+ ACPI_SUCCESS(acpi_get_handle(device->handle, "_GPD", &h_dummy)) &&
+ ACPI_SUCCESS(acpi_get_handle(device->handle, "_SPD", &h_dummy)))
+ video_caps |= ACPI_VIDEO_DEVICE_POSTING;
+
+ /* Only check for backlight functionality if one of the above hit. */
+ if (video_caps)
+ acpi_walk_namespace(ACPI_TYPE_DEVICE, device->handle,
+ ACPI_UINT32_MAX, acpi_backlight_cap_match,
+ &video_caps, NULL);
+
+ return video_caps;
+}
+EXPORT_SYMBOL(acpi_is_video_device);
+
+static acpi_status
+find_video(acpi_handle handle, u32 lvl, void *context, void **rv)
+{
+ long *cap = context;
+ struct device *dev;
+ struct acpi_device *acpi_dev;
+
+ const struct acpi_device_id video_ids[] = {
+ {ACPI_VIDEO_HID, 0},
+ {"", 0},
+ };
+ if (acpi_bus_get_device(handle, &acpi_dev))
+ return AE_OK;
+
+ if (!acpi_match_device_ids(acpi_dev, video_ids)) {
+ dev = acpi_get_physical_pci_device(handle);
+ if (!dev)
+ return AE_OK;
+ put_device(dev);
+ *cap |= acpi_is_video_device(acpi_dev);
+ }
+ return AE_OK;
+}
+
+/*
+ * Returns the video capabilities of a specific ACPI graphics device
+ *
+ * if NULL is passed as argument all ACPI devices are enumerated and
+ * all graphics capabilities of physically present devices are
+ * summerized and returned. This is cached and done only once.
+ */
+long acpi_video_get_capabilities(acpi_handle graphics_handle)
+{
+ long caps = 0;
+ struct acpi_device *tmp_dev;
+ acpi_status status;
+
+ if (acpi_video_caps_checked && graphics_handle == NULL)
+ return acpi_video_support;
+
+ if (!graphics_handle) {
+ /* Only do the global walk through all graphics devices once */
+ acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
+ ACPI_UINT32_MAX, find_video,
+ &caps, NULL);
+ /* There might be boot param flags set already... */
+ acpi_video_support |= caps;
+ acpi_video_caps_checked = 1;
+ /* Add blacklists here. Be careful to use the right *DMI* bits
+ * to still be able to override logic via boot params, e.g.:
+ *
+ * if (dmi_name_in_vendors("XY")) {
+ * acpi_video_support |=
+ * ACPI_VIDEO_OUTPUT_SWITCHING_DMI_VENDOR;
+ * acpi_video_support |=
+ * ACPI_VIDEO_BACKLIGHT_DMI_VENDOR;
+ *}
+ */
+ } else {
+ status = acpi_bus_get_device(graphics_handle, &tmp_dev);
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status, "Invalid device"));
+ return 0;
+ }
+ acpi_walk_namespace(ACPI_TYPE_DEVICE, graphics_handle,
+ ACPI_UINT32_MAX, find_video,
+ &caps, NULL);
+ }
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "We have 0x%lX video support %s %s\n",
+ graphics_handle ? caps : acpi_video_support,
+ graphics_handle ? "on device " : "in general",
+ graphics_handle ? acpi_device_bid(tmp_dev) : ""));
+ return caps;
+}
+EXPORT_SYMBOL(acpi_video_get_capabilities);
+
+/* Returns true if video.ko can do backlight switching */
+int acpi_video_backlight_support(void)
+{
+ /*
+ * We must check whether the ACPI graphics device is physically plugged
+ * in. Therefore this must be called after binding PCI and ACPI devices
+ */
+ if (!acpi_video_caps_checked)
+ acpi_video_get_capabilities(NULL);
+
+ /* First check for boot param -> highest prio */
+ if (acpi_video_support & ACPI_VIDEO_BACKLIGHT_FORCE_VENDOR)
+ return 0;
+ else if (acpi_video_support & ACPI_VIDEO_BACKLIGHT_FORCE_VIDEO)
+ return 1;
+
+ /* Then check for DMI blacklist -> second highest prio */
+ if (acpi_video_support & ACPI_VIDEO_BACKLIGHT_DMI_VENDOR)
+ return 0;
+ else if (acpi_video_support & ACPI_VIDEO_BACKLIGHT_DMI_VIDEO)
+ return 1;
+
+ /* Then go the default way */
+ return acpi_video_support & ACPI_VIDEO_BACKLIGHT;
+}
+EXPORT_SYMBOL(acpi_video_backlight_support);
+
+/*
+ * Returns true if video.ko can do display output switching.
+ * This does not work well/at all with binary graphics drivers
+ * which disable system io ranges and do it on their own.
+ */
+int acpi_video_display_switch_support(void)
+{
+ if (!acpi_video_caps_checked)
+ acpi_video_get_capabilities(NULL);
+
+ if (acpi_video_support & ACPI_VIDEO_OUTPUT_SWITCHING_FORCE_VENDOR)
+ return 0;
+ else if (acpi_video_support & ACPI_VIDEO_OUTPUT_SWITCHING_FORCE_VIDEO)
+ return 1;
+
+ if (acpi_video_support & ACPI_VIDEO_OUTPUT_SWITCHING_DMI_VENDOR)
+ return 0;
+ else if (acpi_video_support & ACPI_VIDEO_OUTPUT_SWITCHING_DMI_VIDEO)
+ return 1;
+
+ return acpi_video_support & ACPI_VIDEO_OUTPUT_SWITCHING;
+}
+EXPORT_SYMBOL(acpi_video_display_switch_support);
+
+/*
+ * Use acpi_display_output=vendor/video or acpi_backlight=vendor/video
+ * To force that backlight or display output switching is processed by vendor
+ * specific acpi drivers or video.ko driver.
+ */
+int __init acpi_backlight(char *str)
+{
+ if (str == NULL || *str == '\0')
+ return 1;
+ else {
+ if (!strcmp("vendor", str))
+ acpi_video_support |=
+ ACPI_VIDEO_BACKLIGHT_FORCE_VENDOR;
+ if (!strcmp("video", str))
+ acpi_video_support |=
+ ACPI_VIDEO_OUTPUT_SWITCHING_FORCE_VIDEO;
+ }
+ return 1;
+}
+__setup("acpi_backlight=", acpi_backlight);
+
+int __init acpi_display_output(char *str)
+{
+ if (str == NULL || *str == '\0')
+ return 1;
+ else {
+ if (!strcmp("vendor", str))
+ acpi_video_support |=
+ ACPI_VIDEO_OUTPUT_SWITCHING_FORCE_VENDOR;
+ if (!strcmp("video", str))
+ acpi_video_support |=
+ ACPI_VIDEO_OUTPUT_SWITCHING_FORCE_VIDEO;
+ }
+ return 1;
+}
+__setup("acpi_display_output=", acpi_display_output);
diff --git a/drivers/acpi/wmi.c b/drivers/acpi/wmi.c
new file mode 100644
index 0000000..8a8b377
--- /dev/null
+++ b/drivers/acpi/wmi.c
@@ -0,0 +1,747 @@
+/*
+ * ACPI-WMI mapping driver
+ *
+ * Copyright (C) 2007-2008 Carlos Corbacho <carlos@strangeworlds.co.uk>
+ *
+ * GUID parsing code from ldm.c is:
+ * Copyright (C) 2001,2002 Richard Russon <ldm@flatcap.org>
+ * Copyright (c) 2001-2007 Anton Altaparmakov
+ * Copyright (C) 2001,2002 Jakob Kemi <jakob.kemi@telia.com>
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/acpi.h>
+#include <acpi/acpi_bus.h>
+#include <acpi/acpi_drivers.h>
+
+ACPI_MODULE_NAME("wmi");
+MODULE_AUTHOR("Carlos Corbacho");
+MODULE_DESCRIPTION("ACPI-WMI Mapping Driver");
+MODULE_LICENSE("GPL");
+
+#define ACPI_WMI_CLASS "wmi"
+
+#undef PREFIX
+#define PREFIX "ACPI: WMI: "
+
+static DEFINE_MUTEX(wmi_data_lock);
+
+struct guid_block {
+ char guid[16];
+ union {
+ char object_id[2];
+ struct {
+ unsigned char notify_id;
+ unsigned char reserved;
+ };
+ };
+ u8 instance_count;
+ u8 flags;
+};
+
+struct wmi_block {
+ struct list_head list;
+ struct guid_block gblock;
+ acpi_handle handle;
+ wmi_notify_handler handler;
+ void *handler_data;
+};
+
+static struct wmi_block wmi_blocks;
+
+/*
+ * If the GUID data block is marked as expensive, we must enable and
+ * explicitily disable data collection.
+ */
+#define ACPI_WMI_EXPENSIVE 0x1
+#define ACPI_WMI_METHOD 0x2 /* GUID is a method */
+#define ACPI_WMI_STRING 0x4 /* GUID takes & returns a string */
+#define ACPI_WMI_EVENT 0x8 /* GUID is an event */
+
+static int acpi_wmi_remove(struct acpi_device *device, int type);
+static int acpi_wmi_add(struct acpi_device *device);
+
+static const struct acpi_device_id wmi_device_ids[] = {
+ {"PNP0C14", 0},
+ {"pnp0c14", 0},
+ {"", 0},
+};
+MODULE_DEVICE_TABLE(acpi, wmi_device_ids);
+
+static struct acpi_driver acpi_wmi_driver = {
+ .name = "wmi",
+ .class = ACPI_WMI_CLASS,
+ .ids = wmi_device_ids,
+ .ops = {
+ .add = acpi_wmi_add,
+ .remove = acpi_wmi_remove,
+ },
+};
+
+/*
+ * GUID parsing functions
+ */
+
+/**
+ * wmi_parse_hexbyte - Convert a ASCII hex number to a byte
+ * @src: Pointer to at least 2 characters to convert.
+ *
+ * Convert a two character ASCII hex string to a number.
+ *
+ * Return: 0-255 Success, the byte was parsed correctly
+ * -1 Error, an invalid character was supplied
+ */
+static int wmi_parse_hexbyte(const u8 *src)
+{
+ unsigned int x; /* For correct wrapping */
+ int h;
+
+ /* high part */
+ x = src[0];
+ if (x - '0' <= '9' - '0') {
+ h = x - '0';
+ } else if (x - 'a' <= 'f' - 'a') {
+ h = x - 'a' + 10;
+ } else if (x - 'A' <= 'F' - 'A') {
+ h = x - 'A' + 10;
+ } else {
+ return -1;
+ }
+ h <<= 4;
+
+ /* low part */
+ x = src[1];
+ if (x - '0' <= '9' - '0')
+ return h | (x - '0');
+ if (x - 'a' <= 'f' - 'a')
+ return h | (x - 'a' + 10);
+ if (x - 'A' <= 'F' - 'A')
+ return h | (x - 'A' + 10);
+ return -1;
+}
+
+/**
+ * wmi_swap_bytes - Rearrange GUID bytes to match GUID binary
+ * @src: Memory block holding binary GUID (16 bytes)
+ * @dest: Memory block to hold byte swapped binary GUID (16 bytes)
+ *
+ * Byte swap a binary GUID to match it's real GUID value
+ */
+static void wmi_swap_bytes(u8 *src, u8 *dest)
+{
+ int i;
+
+ for (i = 0; i <= 3; i++)
+ memcpy(dest + i, src + (3 - i), 1);
+
+ for (i = 0; i <= 1; i++)
+ memcpy(dest + 4 + i, src + (5 - i), 1);
+
+ for (i = 0; i <= 1; i++)
+ memcpy(dest + 6 + i, src + (7 - i), 1);
+
+ memcpy(dest + 8, src + 8, 8);
+}
+
+/**
+ * wmi_parse_guid - Convert GUID from ASCII to binary
+ * @src: 36 char string of the form fa50ff2b-f2e8-45de-83fa-65417f2f49ba
+ * @dest: Memory block to hold binary GUID (16 bytes)
+ *
+ * N.B. The GUID need not be NULL terminated.
+ *
+ * Return: 'true' @dest contains binary GUID
+ * 'false' @dest contents are undefined
+ */
+static bool wmi_parse_guid(const u8 *src, u8 *dest)
+{
+ static const int size[] = { 4, 2, 2, 2, 6 };
+ int i, j, v;
+
+ if (src[8] != '-' || src[13] != '-' ||
+ src[18] != '-' || src[23] != '-')
+ return false;
+
+ for (j = 0; j < 5; j++, src++) {
+ for (i = 0; i < size[j]; i++, src += 2, *dest++ = v) {
+ v = wmi_parse_hexbyte(src);
+ if (v < 0)
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static bool find_guid(const char *guid_string, struct wmi_block **out)
+{
+ char tmp[16], guid_input[16];
+ struct wmi_block *wblock;
+ struct guid_block *block;
+ struct list_head *p;
+
+ wmi_parse_guid(guid_string, tmp);
+ wmi_swap_bytes(tmp, guid_input);
+
+ list_for_each(p, &wmi_blocks.list) {
+ wblock = list_entry(p, struct wmi_block, list);
+ block = &wblock->gblock;
+
+ if (memcmp(block->guid, guid_input, 16) == 0) {
+ if (out)
+ *out = wblock;
+ return 1;
+ }
+ }
+ return 0;
+}
+
+static acpi_status wmi_method_enable(struct wmi_block *wblock, int enable)
+{
+ struct guid_block *block = NULL;
+ char method[5];
+ struct acpi_object_list input;
+ union acpi_object params[1];
+ acpi_status status;
+ acpi_handle handle;
+
+ block = &wblock->gblock;
+ handle = wblock->handle;
+
+ if (!block)
+ return AE_NOT_EXIST;
+
+ input.count = 1;
+ input.pointer = params;
+ params[0].type = ACPI_TYPE_INTEGER;
+ params[0].integer.value = enable;
+
+ snprintf(method, 5, "WE%02X", block->notify_id);
+ status = acpi_evaluate_object(handle, method, &input, NULL);
+
+ if (status != AE_OK && status != AE_NOT_FOUND)
+ return status;
+ else
+ return AE_OK;
+}
+
+/*
+ * Exported WMI functions
+ */
+/**
+ * wmi_evaluate_method - Evaluate a WMI method
+ * @guid_string: 36 char string of the form fa50ff2b-f2e8-45de-83fa-65417f2f49ba
+ * @instance: Instance index
+ * @method_id: Method ID to call
+ * &in: Buffer containing input for the method call
+ * &out: Empty buffer to return the method results
+ *
+ * Call an ACPI-WMI method
+ */
+acpi_status wmi_evaluate_method(const char *guid_string, u8 instance,
+u32 method_id, const struct acpi_buffer *in, struct acpi_buffer *out)
+{
+ struct guid_block *block = NULL;
+ struct wmi_block *wblock = NULL;
+ acpi_handle handle;
+ acpi_status status;
+ struct acpi_object_list input;
+ union acpi_object params[3];
+ char method[4] = "WM";
+
+ if (!find_guid(guid_string, &wblock))
+ return AE_ERROR;
+
+ block = &wblock->gblock;
+ handle = wblock->handle;
+
+ if (!(block->flags & ACPI_WMI_METHOD))
+ return AE_BAD_DATA;
+
+ if (block->instance_count < instance)
+ return AE_BAD_PARAMETER;
+
+ input.count = 2;
+ input.pointer = params;
+ params[0].type = ACPI_TYPE_INTEGER;
+ params[0].integer.value = instance;
+ params[1].type = ACPI_TYPE_INTEGER;
+ params[1].integer.value = method_id;
+
+ if (in) {
+ input.count = 3;
+
+ if (block->flags & ACPI_WMI_STRING) {
+ params[2].type = ACPI_TYPE_STRING;
+ } else {
+ params[2].type = ACPI_TYPE_BUFFER;
+ }
+ params[2].buffer.length = in->length;
+ params[2].buffer.pointer = in->pointer;
+ }
+
+ strncat(method, block->object_id, 2);
+
+ status = acpi_evaluate_object(handle, method, &input, out);
+
+ return status;
+}
+EXPORT_SYMBOL_GPL(wmi_evaluate_method);
+
+/**
+ * wmi_query_block - Return contents of a WMI block
+ * @guid_string: 36 char string of the form fa50ff2b-f2e8-45de-83fa-65417f2f49ba
+ * @instance: Instance index
+ * &out: Empty buffer to return the contents of the data block to
+ *
+ * Return the contents of an ACPI-WMI data block to a buffer
+ */
+acpi_status wmi_query_block(const char *guid_string, u8 instance,
+struct acpi_buffer *out)
+{
+ struct guid_block *block = NULL;
+ struct wmi_block *wblock = NULL;
+ acpi_handle handle, wc_handle;
+ acpi_status status, wc_status = AE_ERROR;
+ struct acpi_object_list input, wc_input;
+ union acpi_object wc_params[1], wq_params[1];
+ char method[4];
+ char wc_method[4] = "WC";
+
+ if (!guid_string || !out)
+ return AE_BAD_PARAMETER;
+
+ if (!find_guid(guid_string, &wblock))
+ return AE_ERROR;
+
+ block = &wblock->gblock;
+ handle = wblock->handle;
+
+ if (block->instance_count < instance)
+ return AE_BAD_PARAMETER;
+
+ /* Check GUID is a data block */
+ if (block->flags & (ACPI_WMI_EVENT | ACPI_WMI_METHOD))
+ return AE_ERROR;
+
+ input.count = 1;
+ input.pointer = wq_params;
+ wq_params[0].type = ACPI_TYPE_INTEGER;
+ wq_params[0].integer.value = instance;
+
+ /*
+ * If ACPI_WMI_EXPENSIVE, call the relevant WCxx method first to
+ * enable collection.
+ */
+ if (block->flags & ACPI_WMI_EXPENSIVE) {
+ wc_input.count = 1;
+ wc_input.pointer = wc_params;
+ wc_params[0].type = ACPI_TYPE_INTEGER;
+ wc_params[0].integer.value = 1;
+
+ strncat(wc_method, block->object_id, 2);
+
+ /*
+ * Some GUIDs break the specification by declaring themselves
+ * expensive, but have no corresponding WCxx method. So we
+ * should not fail if this happens.
+ */
+ wc_status = acpi_get_handle(handle, wc_method, &wc_handle);
+ if (ACPI_SUCCESS(wc_status))
+ wc_status = acpi_evaluate_object(handle, wc_method,
+ &wc_input, NULL);
+ }
+
+ strcpy(method, "WQ");
+ strncat(method, block->object_id, 2);
+
+ status = acpi_evaluate_object(handle, method, &input, out);
+
+ /*
+ * If ACPI_WMI_EXPENSIVE, call the relevant WCxx method, even if
+ * the WQxx method failed - we should disable collection anyway.
+ */
+ if ((block->flags & ACPI_WMI_EXPENSIVE) && ACPI_SUCCESS(wc_status)) {
+ wc_params[0].integer.value = 0;
+ status = acpi_evaluate_object(handle,
+ wc_method, &wc_input, NULL);
+ }
+
+ return status;
+}
+EXPORT_SYMBOL_GPL(wmi_query_block);
+
+/**
+ * wmi_set_block - Write to a WMI block
+ * @guid_string: 36 char string of the form fa50ff2b-f2e8-45de-83fa-65417f2f49ba
+ * @instance: Instance index
+ * &in: Buffer containing new values for the data block
+ *
+ * Write the contents of the input buffer to an ACPI-WMI data block
+ */
+acpi_status wmi_set_block(const char *guid_string, u8 instance,
+const struct acpi_buffer *in)
+{
+ struct guid_block *block = NULL;
+ struct wmi_block *wblock = NULL;
+ acpi_handle handle;
+ struct acpi_object_list input;
+ union acpi_object params[2];
+ char method[4] = "WS";
+
+ if (!guid_string || !in)
+ return AE_BAD_DATA;
+
+ if (!find_guid(guid_string, &wblock))
+ return AE_ERROR;
+
+ block = &wblock->gblock;
+ handle = wblock->handle;
+
+ if (block->instance_count < instance)
+ return AE_BAD_PARAMETER;
+
+ /* Check GUID is a data block */
+ if (block->flags & (ACPI_WMI_EVENT | ACPI_WMI_METHOD))
+ return AE_ERROR;
+
+ input.count = 2;
+ input.pointer = params;
+ params[0].type = ACPI_TYPE_INTEGER;
+ params[0].integer.value = instance;
+
+ if (block->flags & ACPI_WMI_STRING) {
+ params[1].type = ACPI_TYPE_STRING;
+ } else {
+ params[1].type = ACPI_TYPE_BUFFER;
+ }
+ params[1].buffer.length = in->length;
+ params[1].buffer.pointer = in->pointer;
+
+ strncat(method, block->object_id, 2);
+
+ return acpi_evaluate_object(handle, method, &input, NULL);
+}
+EXPORT_SYMBOL_GPL(wmi_set_block);
+
+/**
+ * wmi_install_notify_handler - Register handler for WMI events
+ * @handler: Function to handle notifications
+ * @data: Data to be returned to handler when event is fired
+ *
+ * Register a handler for events sent to the ACPI-WMI mapper device.
+ */
+acpi_status wmi_install_notify_handler(const char *guid,
+wmi_notify_handler handler, void *data)
+{
+ struct wmi_block *block;
+ acpi_status status;
+
+ if (!guid || !handler)
+ return AE_BAD_PARAMETER;
+
+ find_guid(guid, &block);
+ if (!block)
+ return AE_NOT_EXIST;
+
+ if (block->handler)
+ return AE_ALREADY_ACQUIRED;
+
+ block->handler = handler;
+ block->handler_data = data;
+
+ status = wmi_method_enable(block, 1);
+
+ return status;
+}
+EXPORT_SYMBOL_GPL(wmi_install_notify_handler);
+
+/**
+ * wmi_uninstall_notify_handler - Unregister handler for WMI events
+ *
+ * Unregister handler for events sent to the ACPI-WMI mapper device.
+ */
+acpi_status wmi_remove_notify_handler(const char *guid)
+{
+ struct wmi_block *block;
+ acpi_status status;
+
+ if (!guid)
+ return AE_BAD_PARAMETER;
+
+ find_guid(guid, &block);
+ if (!block)
+ return AE_NOT_EXIST;
+
+ if (!block->handler)
+ return AE_NULL_ENTRY;
+
+ status = wmi_method_enable(block, 0);
+
+ block->handler = NULL;
+ block->handler_data = NULL;
+
+ return status;
+}
+EXPORT_SYMBOL_GPL(wmi_remove_notify_handler);
+
+/**
+ * wmi_get_event_data - Get WMI data associated with an event
+ *
+ * @event - Event to find
+ * &out - Buffer to hold event data
+ *
+ * Returns extra data associated with an event in WMI.
+ */
+acpi_status wmi_get_event_data(u32 event, struct acpi_buffer *out)
+{
+ struct acpi_object_list input;
+ union acpi_object params[1];
+ struct guid_block *gblock;
+ struct wmi_block *wblock;
+ struct list_head *p;
+
+ input.count = 1;
+ input.pointer = params;
+ params[0].type = ACPI_TYPE_INTEGER;
+ params[0].integer.value = event;
+
+ list_for_each(p, &wmi_blocks.list) {
+ wblock = list_entry(p, struct wmi_block, list);
+ gblock = &wblock->gblock;
+
+ if ((gblock->flags & ACPI_WMI_EVENT) &&
+ (gblock->notify_id == event))
+ return acpi_evaluate_object(wblock->handle, "_WED",
+ &input, out);
+ }
+
+ return AE_NOT_FOUND;
+}
+EXPORT_SYMBOL_GPL(wmi_get_event_data);
+
+/**
+ * wmi_has_guid - Check if a GUID is available
+ * @guid_string: 36 char string of the form fa50ff2b-f2e8-45de-83fa-65417f2f49ba
+ *
+ * Check if a given GUID is defined by _WDG
+ */
+bool wmi_has_guid(const char *guid_string)
+{
+ return find_guid(guid_string, NULL);
+}
+EXPORT_SYMBOL_GPL(wmi_has_guid);
+
+/*
+ * Parse the _WDG method for the GUID data blocks
+ */
+static __init acpi_status parse_wdg(acpi_handle handle)
+{
+ struct acpi_buffer out = {ACPI_ALLOCATE_BUFFER, NULL};
+ union acpi_object *obj;
+ struct guid_block *gblock;
+ struct wmi_block *wblock;
+ acpi_status status;
+ u32 i, total;
+
+ status = acpi_evaluate_object(handle, "_WDG", NULL, &out);
+
+ if (ACPI_FAILURE(status))
+ return status;
+
+ obj = (union acpi_object *) out.pointer;
+
+ if (obj->type != ACPI_TYPE_BUFFER)
+ return AE_ERROR;
+
+ total = obj->buffer.length / sizeof(struct guid_block);
+
+ gblock = kzalloc(obj->buffer.length, GFP_KERNEL);
+ if (!gblock)
+ return AE_NO_MEMORY;
+
+ memcpy(gblock, obj->buffer.pointer, obj->buffer.length);
+
+ for (i = 0; i < total; i++) {
+ wblock = kzalloc(sizeof(struct wmi_block), GFP_KERNEL);
+ if (!wblock)
+ return AE_NO_MEMORY;
+
+ wblock->gblock = gblock[i];
+ wblock->handle = handle;
+ list_add_tail(&wblock->list, &wmi_blocks.list);
+ }
+
+ kfree(out.pointer);
+ kfree(gblock);
+
+ return status;
+}
+
+/*
+ * WMI can have EmbeddedControl access regions. In which case, we just want to
+ * hand these off to the EC driver.
+ */
+static acpi_status
+acpi_wmi_ec_space_handler(u32 function, acpi_physical_address address,
+ u32 bits, acpi_integer * value,
+ void *handler_context, void *region_context)
+{
+ int result = 0, i = 0;
+ u8 temp = 0;
+
+ if ((address > 0xFF) || !value)
+ return AE_BAD_PARAMETER;
+
+ if (function != ACPI_READ && function != ACPI_WRITE)
+ return AE_BAD_PARAMETER;
+
+ if (bits != 8)
+ return AE_BAD_PARAMETER;
+
+ if (function == ACPI_READ) {
+ result = ec_read(address, &temp);
+ (*value) |= ((acpi_integer)temp) << i;
+ } else {
+ temp = 0xff & ((*value) >> i);
+ result = ec_write(address, temp);
+ }
+
+ switch (result) {
+ case -EINVAL:
+ return AE_BAD_PARAMETER;
+ break;
+ case -ENODEV:
+ return AE_NOT_FOUND;
+ break;
+ case -ETIME:
+ return AE_TIME;
+ break;
+ default:
+ return AE_OK;
+ }
+}
+
+static void acpi_wmi_notify(acpi_handle handle, u32 event, void *data)
+{
+ struct guid_block *block;
+ struct wmi_block *wblock;
+ struct list_head *p;
+ struct acpi_device *device = data;
+
+ list_for_each(p, &wmi_blocks.list) {
+ wblock = list_entry(p, struct wmi_block, list);
+ block = &wblock->gblock;
+
+ if ((block->flags & ACPI_WMI_EVENT) &&
+ (block->notify_id == event)) {
+ if (wblock->handler)
+ wblock->handler(event, wblock->handler_data);
+
+ acpi_bus_generate_netlink_event(
+ device->pnp.device_class, dev_name(&device->dev),
+ event, 0);
+ break;
+ }
+ }
+}
+
+static int acpi_wmi_remove(struct acpi_device *device, int type)
+{
+ acpi_remove_notify_handler(device->handle, ACPI_DEVICE_NOTIFY,
+ acpi_wmi_notify);
+
+ acpi_remove_address_space_handler(device->handle,
+ ACPI_ADR_SPACE_EC, &acpi_wmi_ec_space_handler);
+
+ return 0;
+}
+
+static int __init acpi_wmi_add(struct acpi_device *device)
+{
+ acpi_status status;
+ int result = 0;
+
+ status = acpi_install_notify_handler(device->handle, ACPI_DEVICE_NOTIFY,
+ acpi_wmi_notify, device);
+ if (ACPI_FAILURE(status)) {
+ printk(KERN_ERR PREFIX "Error installing notify handler\n");
+ return -ENODEV;
+ }
+
+ status = acpi_install_address_space_handler(device->handle,
+ ACPI_ADR_SPACE_EC,
+ &acpi_wmi_ec_space_handler,
+ NULL, NULL);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+
+ status = parse_wdg(device->handle);
+ if (ACPI_FAILURE(status)) {
+ printk(KERN_ERR PREFIX "Error installing EC region handler\n");
+ return -ENODEV;
+ }
+
+ return result;
+}
+
+static int __init acpi_wmi_init(void)
+{
+ acpi_status result;
+
+ INIT_LIST_HEAD(&wmi_blocks.list);
+
+ if (acpi_disabled)
+ return -ENODEV;
+
+ result = acpi_bus_register_driver(&acpi_wmi_driver);
+
+ if (result < 0) {
+ printk(KERN_INFO PREFIX "Error loading mapper\n");
+ } else {
+ printk(KERN_INFO PREFIX "Mapper loaded\n");
+ }
+
+ return result;
+}
+
+static void __exit acpi_wmi_exit(void)
+{
+ struct list_head *p, *tmp;
+ struct wmi_block *wblock;
+
+ acpi_bus_unregister_driver(&acpi_wmi_driver);
+
+ list_for_each_safe(p, tmp, &wmi_blocks.list) {
+ wblock = list_entry(p, struct wmi_block, list);
+
+ list_del(p);
+ kfree(wblock);
+ }
+
+ printk(KERN_INFO PREFIX "Mapper unloaded\n");
+}
+
+subsys_initcall(acpi_wmi_init);
+module_exit(acpi_wmi_exit);
OpenPOWER on IntegriCloud