diff options
author | Timothy Pearson <tpearson@raptorengineering.com> | 2017-08-23 14:45:25 -0500 |
---|---|---|
committer | Timothy Pearson <tpearson@raptorengineering.com> | 2017-08-23 14:45:25 -0500 |
commit | fcbb27b0ec6dcbc5a5108cb8fb19eae64593d204 (patch) | |
tree | 22962a4387943edc841c72a4e636a068c66d58fd /drivers/misc | |
download | ast2050-linux-kernel-fcbb27b0ec6dcbc5a5108cb8fb19eae64593d204.zip ast2050-linux-kernel-fcbb27b0ec6dcbc5a5108cb8fb19eae64593d204.tar.gz |
Initial import of modified Linux 2.6.28 tree
Original upstream URL:
git://git.kernel.org/pub/scm/linux/kernel/git/stable/linux-stable.git | branch linux-2.6.28.y
Diffstat (limited to 'drivers/misc')
77 files changed, 43449 insertions, 0 deletions
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig new file mode 100644 index 0000000..fee7304 --- /dev/null +++ b/drivers/misc/Kconfig @@ -0,0 +1,503 @@ +# +# Misc strange devices +# + +menuconfig MISC_DEVICES + bool "Misc devices" + default y + ---help--- + Say Y here to get to see options for device drivers from various + different categories. This option alone does not add any kernel code. + + If you say N, all options in this submenu will be skipped and disabled. + +if MISC_DEVICES + +config ATMEL_PWM + tristate "Atmel AT32/AT91 PWM support" + depends on AVR32 || ARCH_AT91SAM9263 || ARCH_AT91SAM9RL || ARCH_AT91CAP9 + help + This option enables device driver support for the PWM channels + on certain Atmel prcoessors. Pulse Width Modulation is used for + purposes including software controlled power-efficent backlights + on LCD displays, motor control, and waveform generation. + +config ATMEL_TCLIB + bool "Atmel AT32/AT91 Timer/Counter Library" + depends on (AVR32 || ARCH_AT91) + help + Select this if you want a library to allocate the Timer/Counter + blocks found on many Atmel processors. This facilitates using + these blocks by different drivers despite processor differences. + +config ATMEL_TCB_CLKSRC + bool "TC Block Clocksource" + depends on ATMEL_TCLIB && GENERIC_TIME + default y + help + Select this to get a high precision clocksource based on a + TC block with a 5+ MHz base clock rate. Two timer channels + are combined to make a single 32-bit timer. + + When GENERIC_CLOCKEVENTS is defined, the third timer channel + may be used as a clock event device supporting oneshot mode + (delays of up to two seconds) based on the 32 KiHz clock. + +config ATMEL_TCB_CLKSRC_BLOCK + int + depends on ATMEL_TCB_CLKSRC + prompt "TC Block" if ARCH_AT91RM9200 || ARCH_AT91SAM9260 || CPU_AT32AP700X + default 0 + range 0 1 + help + Some chips provide more than one TC block, so you have the + choice of which one to use for the clock framework. The other + TC can be used for other purposes, such as PWM generation and + interval timing. + +config IBM_ASM + tristate "Device driver for IBM RSA service processor" + depends on X86 && PCI && INPUT && EXPERIMENTAL + ---help--- + This option enables device driver support for in-band access to the + IBM RSA (Condor) service processor in eServer xSeries systems. + The ibmasm device driver allows user space application to access + ASM (Advanced Systems Management) functions on the service + processor. The driver is meant to be used in conjunction with + a user space API. + The ibmasm driver also enables the OS to use the UART on the + service processor board as a regular serial port. To make use of + this feature serial driver support (CONFIG_SERIAL_8250) must be + enabled. + + WARNING: This software may not be supported or function + correctly on your IBM server. Please consult the IBM ServerProven + website <http://www.pc.ibm.com/ww/eserver/xseries/serverproven> for + information on the specific driver level and support statement + for your IBM server. + +config PHANTOM + tristate "Sensable PHANToM (PCI)" + depends on PCI + help + Say Y here if you want to build a driver for Sensable PHANToM device. + + This driver is only for PCI PHANToMs. + + If you choose to build module, its name will be phantom. If unsure, + say N here. + +config EEPROM_93CX6 + tristate "EEPROM 93CX6 support" + ---help--- + This is a driver for the EEPROM chipsets 93c46 and 93c66. + The driver supports both read as well as write commands. + + If unsure, say N. + +config SGI_IOC4 + tristate "SGI IOC4 Base IO support" + depends on PCI + ---help--- + This option enables basic support for the IOC4 chip on certain + SGI IO controller cards (IO9, IO10, and PCI-RT). This option + does not enable any specific functions on such a card, but provides + necessary infrastructure for other drivers to utilize. + + If you have an SGI Altix with an IOC4-based card say Y. + Otherwise say N. + +config TIFM_CORE + tristate "TI Flash Media interface support (EXPERIMENTAL)" + depends on EXPERIMENTAL && PCI + help + If you want support for Texas Instruments(R) Flash Media adapters + you should select this option and then also choose an appropriate + host adapter, such as 'TI Flash Media PCI74xx/PCI76xx host adapter + support', if you have a TI PCI74xx compatible card reader, for + example. + You will also have to select some flash card format drivers. MMC/SD + cards are supported via 'MMC/SD Card support: TI Flash Media MMC/SD + Interface support (MMC_TIFM_SD)'. + + To compile this driver as a module, choose M here: the module will + be called tifm_core. + +config TIFM_7XX1 + tristate "TI Flash Media PCI74xx/PCI76xx host adapter support (EXPERIMENTAL)" + depends on PCI && TIFM_CORE && EXPERIMENTAL + default TIFM_CORE + help + This option enables support for Texas Instruments(R) PCI74xx and + PCI76xx families of Flash Media adapters, found in many laptops. + To make actual use of the device, you will have to select some + flash card format drivers, as outlined in the TIFM_CORE Help. + + To compile this driver as a module, choose M here: the module will + be called tifm_7xx1. + +config ACER_WMI + tristate "Acer WMI Laptop Extras (EXPERIMENTAL)" + depends on X86 + depends on EXPERIMENTAL + depends on ACPI + depends on LEDS_CLASS + depends on NEW_LEDS + depends on BACKLIGHT_CLASS_DEVICE + depends on SERIO_I8042 + depends on RFKILL + select ACPI_WMI + ---help--- + This is a driver for newer Acer (and Wistron) laptops. It adds + wireless radio and bluetooth control, and on some laptops, + exposes the mail LED and LCD backlight. + + For more information about this driver see + <file:Documentation/laptops/acer-wmi.txt> + + If you have an ACPI-WMI compatible Acer/ Wistron laptop, say Y or M + here. + +config ASUS_LAPTOP + tristate "Asus Laptop Extras (EXPERIMENTAL)" + depends on X86 + depends on ACPI + depends on EXPERIMENTAL && !ACPI_ASUS + depends on LEDS_CLASS + depends on NEW_LEDS + depends on BACKLIGHT_CLASS_DEVICE + ---help--- + This is the new Linux driver for Asus laptops. It may also support some + MEDION, JVC or VICTOR laptops. It makes all the extra buttons generate + standard ACPI events that go through /proc/acpi/events. It also adds + support for video output switching, LCD backlight control, Bluetooth and + Wlan control, and most importantly, allows you to blink those fancy LEDs. + + For more information and a userspace daemon for handling the extra + buttons see <http://acpi4asus.sf.net/>. + + If you have an ACPI-compatible ASUS laptop, say Y or M here. + +config FUJITSU_LAPTOP + tristate "Fujitsu Laptop Extras" + depends on X86 + depends on ACPI + depends on INPUT + depends on BACKLIGHT_CLASS_DEVICE + ---help--- + This is a driver for laptops built by Fujitsu: + + * P2xxx/P5xxx/S6xxx/S7xxx series Lifebooks + * Possibly other Fujitsu laptop models + * Tested with S6410 and S7020 + + It adds support for LCD brightness control and some hotkeys. + + If you have a Fujitsu laptop, say Y or M here. + +config FUJITSU_LAPTOP_DEBUG + bool "Verbose debug mode for Fujitsu Laptop Extras" + depends on FUJITSU_LAPTOP + default n + ---help--- + Enables extra debug output from the fujitsu extras driver, at the + expense of a slight increase in driver size. + + If you are not sure, say N here. + +config TC1100_WMI + tristate "HP Compaq TC1100 Tablet WMI Extras (EXPERIMENTAL)" + depends on X86 && !X86_64 + depends on EXPERIMENTAL + depends on ACPI + select ACPI_WMI + ---help--- + This is a driver for the WMI extensions (wireless and bluetooth power + control) of the HP Compaq TC1100 tablet. + +config HP_WMI + tristate "HP WMI extras" + depends on ACPI_WMI + depends on INPUT + depends on RFKILL + help + Say Y here if you want to support WMI-based hotkeys on HP laptops and + to read data from WMI such as docking or ambient light sensor state. + + To compile this driver as a module, choose M here: the module will + be called hp-wmi. + +config ICS932S401 + tristate "Integrated Circuits ICS932S401" + depends on I2C && EXPERIMENTAL + help + If you say yes here you get support for the Integrated Circuits + ICS932S401 clock control chips. + + This driver can also be built as a module. If so, the module + will be called ics932s401. + +config MSI_LAPTOP + tristate "MSI Laptop Extras" + depends on X86 + depends on ACPI + depends on BACKLIGHT_CLASS_DEVICE + ---help--- + This is a driver for laptops built by MSI (MICRO-STAR + INTERNATIONAL): + + MSI MegaBook S270 (MS-1013) + Cytron/TCM/Medion/Tchibo MD96100/SAM2000 + + It adds support for Bluetooth, WLAN and LCD brightness control. + + More information about this driver is available at + <http://0pointer.de/lennart/tchibo.html>. + + If you have an MSI S270 laptop, say Y or M here. + +config PANASONIC_LAPTOP + tristate "Panasonic Laptop Extras" + depends on X86 && INPUT && ACPI + depends on BACKLIGHT_CLASS_DEVICE + ---help--- + This driver adds support for access to backlight control and hotkeys + on Panasonic Let's Note laptops. + + If you have a Panasonic Let's note laptop (such as the R1(N variant), + R2, R3, R5, T2, W2 and Y2 series), say Y. + +config COMPAL_LAPTOP + tristate "Compal Laptop Extras" + depends on X86 + depends on ACPI + depends on BACKLIGHT_CLASS_DEVICE + ---help--- + This is a driver for laptops built by Compal: + + Compal FL90/IFL90 + Compal FL91/IFL91 + Compal FL92/JFL92 + Compal FT00/IFT00 + + It adds support for Bluetooth, WLAN and LCD brightness control. + + If you have an Compal FL9x/IFL9x/FT00 laptop, say Y or M here. + +config SONY_LAPTOP + tristate "Sony Laptop Extras" + depends on X86 && ACPI + select BACKLIGHT_CLASS_DEVICE + depends on INPUT + ---help--- + This mini-driver drives the SNC and SPIC devices present in the ACPI + BIOS of the Sony Vaio laptops. + + It gives access to some extra laptop functionalities like Bluetooth, + screen brightness control, Fn keys and allows powering on/off some + devices. + + Read <file:Documentation/laptops/sony-laptop.txt> for more information. + +config SONYPI_COMPAT + bool "Sonypi compatibility" + depends on SONY_LAPTOP + ---help--- + Build the sonypi driver compatibility code into the sony-laptop driver. + +config THINKPAD_ACPI + tristate "ThinkPad ACPI Laptop Extras" + depends on X86 && ACPI + select BACKLIGHT_LCD_SUPPORT + select BACKLIGHT_CLASS_DEVICE + select HWMON + select NVRAM + select INPUT + select NEW_LEDS + select LEDS_CLASS + select NET + select RFKILL + ---help--- + This is a driver for the IBM and Lenovo ThinkPad laptops. It adds + support for Fn-Fx key combinations, Bluetooth control, video + output switching, ThinkLight control, UltraBay eject and more. + For more information about this driver see + <file:Documentation/laptops/thinkpad-acpi.txt> and + <http://ibm-acpi.sf.net/> . + + This driver was formerly known as ibm-acpi. + + If you have an IBM or Lenovo ThinkPad laptop, say Y or M here. + +config THINKPAD_ACPI_DEBUG + bool "Verbose debug mode" + depends on THINKPAD_ACPI + default n + ---help--- + Enables extra debugging information, at the expense of a slightly + increase in driver size. + + If you are not sure, say N here. + +config THINKPAD_ACPI_DOCK + bool "Legacy Docking Station Support" + depends on THINKPAD_ACPI + depends on ACPI_DOCK=n + default n + ---help--- + Allows the thinkpad_acpi driver to handle docking station events. + This support was made obsolete by the generic ACPI docking station + support (CONFIG_ACPI_DOCK). It will allow locking and removing the + laptop from the docking station, but will not properly connect PCI + devices. + + If you are not sure, say N here. + +config THINKPAD_ACPI_BAY + bool "Legacy Removable Bay Support" + depends on THINKPAD_ACPI + default y + ---help--- + Allows the thinkpad_acpi driver to handle removable bays. It will + electrically disable the device in the bay, and also generate + notifications when the bay lever is ejected or inserted. + + If you are not sure, say Y here. + +config THINKPAD_ACPI_VIDEO + bool "Video output control support" + depends on THINKPAD_ACPI + default y + ---help--- + Allows the thinkpad_acpi driver to provide an interface to control + the various video output ports. + + This feature often won't work well, depending on ThinkPad model, + display state, video output devices in use, whether there is a X + server running, phase of the moon, and the current mood of + Schroedinger's cat. If you can use X.org's RandR to control + your ThinkPad's video output ports instead of this feature, + don't think twice: do it and say N here to save some memory. + + If you are not sure, say Y here. + +config THINKPAD_ACPI_HOTKEY_POLL + bool "Support NVRAM polling for hot keys" + depends on THINKPAD_ACPI + default y + ---help--- + Some thinkpad models benefit from NVRAM polling to detect a few of + the hot key press events. If you know your ThinkPad model does not + need to do NVRAM polling to support any of the hot keys you use, + unselecting this option will save about 1kB of memory. + + ThinkPads T40 and newer, R52 and newer, and X31 and newer are + unlikely to need NVRAM polling in their latest BIOS versions. + + NVRAM polling can detect at most the following keys: ThinkPad/Access + IBM, Zoom, Switch Display (fn+F7), ThinkLight, Volume up/down/mute, + Brightness up/down, Display Expand (fn+F8), Hibernate (fn+F12). + + If you are not sure, say Y here. The driver enables polling only if + it is strictly necessary to do so. + +config ATMEL_SSC + tristate "Device driver for Atmel SSC peripheral" + depends on AVR32 || ARCH_AT91 + ---help--- + This option enables device driver support for Atmel Syncronized + Serial Communication peripheral (SSC). + + The SSC peripheral supports a wide variety of serial frame based + communications, i.e. I2S, SPI, etc. + + If unsure, say N. + +config INTEL_MENLOW + tristate "Thermal Management driver for Intel menlow platform" + depends on ACPI_THERMAL + select THERMAL + depends on X86 + ---help--- + ACPI thermal management enhancement driver on + Intel Menlow platform. + + If unsure, say N. + +config EEEPC_LAPTOP + tristate "Eee PC Hotkey Driver (EXPERIMENTAL)" + depends on X86 + depends on ACPI + depends on BACKLIGHT_CLASS_DEVICE + depends on HWMON + depends on EXPERIMENTAL + depends on RFKILL + ---help--- + This driver supports the Fn-Fx keys on Eee PC laptops. + It also adds the ability to switch camera/wlan on/off. + + If you have an Eee PC laptop, say Y or M here. + +config ENCLOSURE_SERVICES + tristate "Enclosure Services" + default n + help + Provides support for intelligent enclosures (bays which + contain storage devices). You also need either a host + driver (SCSI/ATA) which supports enclosures + or a SCSI enclosure device (SES) to use these services. + +config SGI_XP + tristate "Support communication between SGI SSIs" + depends on NET + depends on (IA64_GENERIC || IA64_SGI_SN2 || IA64_SGI_UV || X86_64) && SMP + select IA64_UNCACHED_ALLOCATOR if IA64_GENERIC || IA64_SGI_SN2 + select GENERIC_ALLOCATOR if IA64_GENERIC || IA64_SGI_SN2 + select SGI_GRU if (IA64_GENERIC || IA64_SGI_UV || X86_64) && SMP + ---help--- + An SGI machine can be divided into multiple Single System + Images which act independently of each other and have + hardware based memory protection from the others. Enabling + this feature will allow for direct communication between SSIs + based on a network adapter and DMA messaging. + +config HP_ILO + tristate "Channel interface driver for HP iLO/iLO2 processor" + depends on PCI + default n + help + The channel interface driver allows applications to communicate + with iLO/iLO2 management processors present on HP ProLiant + servers. Upon loading, the driver creates /dev/hpilo/dXccbN files, + which can be used to gather data from the management processor, + via read and write system calls. + + To compile this driver as a module, choose M here: the + module will be called hpilo. + +config SGI_GRU + tristate "SGI GRU driver" + depends on (X86_64 || IA64_SGI_UV || IA64_GENERIC) && SMP + default n + select MMU_NOTIFIER + ---help--- + The GRU is a hardware resource located in the system chipset. The GRU + contains memory that can be mmapped into the user address space. This memory is + used to communicate with the GRU to perform functions such as load/store, + scatter/gather, bcopy, AMOs, etc. The GRU is directly accessed by user + instructions using user virtual addresses. GRU instructions (ex., bcopy) use + user virtual addresses for operands. + + If you are not running on a SGI UV system, say N. + +config SGI_GRU_DEBUG + bool "SGI GRU driver debug" + depends on SGI_GRU + default n + ---help--- + This option enables addition debugging code for the SGI GRU driver. If + you are unsure, say N. + +source "drivers/misc/c2port/Kconfig" + +endif # MISC_DEVICES diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile new file mode 100644 index 0000000..817f7f5 --- /dev/null +++ b/drivers/misc/Makefile @@ -0,0 +1,35 @@ +# +# Makefile for misc devices that really don't fit anywhere else. +# +obj- := misc.o # Dummy rule to force built-in.o to be made + +obj-$(CONFIG_IBM_ASM) += ibmasm/ +obj-$(CONFIG_HDPU_FEATURES) += hdpuftrs/ +obj-$(CONFIG_ASUS_LAPTOP) += asus-laptop.o +obj-$(CONFIG_EEEPC_LAPTOP) += eeepc-laptop.o +obj-$(CONFIG_MSI_LAPTOP) += msi-laptop.o +obj-$(CONFIG_COMPAL_LAPTOP) += compal-laptop.o +obj-$(CONFIG_ACER_WMI) += acer-wmi.o +obj-$(CONFIG_ATMEL_PWM) += atmel_pwm.o +obj-$(CONFIG_ATMEL_SSC) += atmel-ssc.o +obj-$(CONFIG_ATMEL_TCLIB) += atmel_tclib.o +obj-$(CONFIG_HP_WMI) += hp-wmi.o +obj-$(CONFIG_ICS932S401) += ics932s401.o +obj-$(CONFIG_TC1100_WMI) += tc1100-wmi.o +obj-$(CONFIG_LKDTM) += lkdtm.o +obj-$(CONFIG_TIFM_CORE) += tifm_core.o +obj-$(CONFIG_TIFM_7XX1) += tifm_7xx1.o +obj-$(CONFIG_PHANTOM) += phantom.o +obj-$(CONFIG_SGI_IOC4) += ioc4.o +obj-$(CONFIG_SONY_LAPTOP) += sony-laptop.o +obj-$(CONFIG_THINKPAD_ACPI) += thinkpad_acpi.o +obj-$(CONFIG_FUJITSU_LAPTOP) += fujitsu-laptop.o +obj-$(CONFIG_PANASONIC_LAPTOP) += panasonic-laptop.o +obj-$(CONFIG_EEPROM_93CX6) += eeprom_93cx6.o +obj-$(CONFIG_INTEL_MENLOW) += intel_menlow.o +obj-$(CONFIG_ENCLOSURE_SERVICES) += enclosure.o +obj-$(CONFIG_KGDB_TESTS) += kgdbts.o +obj-$(CONFIG_SGI_XP) += sgi-xp/ +obj-$(CONFIG_SGI_GRU) += sgi-gru/ +obj-$(CONFIG_HP_ILO) += hpilo.o +obj-$(CONFIG_C2PORT) += c2port/ diff --git a/drivers/misc/acer-wmi.c b/drivers/misc/acer-wmi.c new file mode 100644 index 0000000..6bcca61 --- /dev/null +++ b/drivers/misc/acer-wmi.c @@ -0,0 +1,1345 @@ +/* + * Acer WMI Laptop Extras + * + * Copyright (C) 2007-2008 Carlos Corbacho <carlos@strangeworlds.co.uk> + * + * Based on acer_acpi: + * Copyright (C) 2005-2007 E.M. Smith + * Copyright (C) 2007-2008 Carlos Corbacho <cathectic@gmail.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/types.h> +#include <linux/dmi.h> +#include <linux/fb.h> +#include <linux/backlight.h> +#include <linux/leds.h> +#include <linux/platform_device.h> +#include <linux/acpi.h> +#include <linux/i8042.h> +#include <linux/rfkill.h> +#include <linux/workqueue.h> +#include <linux/debugfs.h> + +#include <acpi/acpi_drivers.h> + +MODULE_AUTHOR("Carlos Corbacho"); +MODULE_DESCRIPTION("Acer Laptop WMI Extras Driver"); +MODULE_LICENSE("GPL"); + +#define ACER_LOGPREFIX "acer-wmi: " +#define ACER_ERR KERN_ERR ACER_LOGPREFIX +#define ACER_NOTICE KERN_NOTICE ACER_LOGPREFIX +#define ACER_INFO KERN_INFO ACER_LOGPREFIX + +/* + * The following defines quirks to get some specific functions to work + * which are known to not be supported over ACPI-WMI (such as the mail LED + * on WMID based Acer's) + */ +struct acer_quirks { + const char *vendor; + const char *model; + u16 quirks; +}; + +/* + * Magic Number + * Meaning is unknown - this number is required for writing to ACPI for AMW0 + * (it's also used in acerhk when directly accessing the BIOS) + */ +#define ACER_AMW0_WRITE 0x9610 + +/* + * Bit masks for the AMW0 interface + */ +#define ACER_AMW0_WIRELESS_MASK 0x35 +#define ACER_AMW0_BLUETOOTH_MASK 0x34 +#define ACER_AMW0_MAILLED_MASK 0x31 + +/* + * Method IDs for WMID interface + */ +#define ACER_WMID_GET_WIRELESS_METHODID 1 +#define ACER_WMID_GET_BLUETOOTH_METHODID 2 +#define ACER_WMID_GET_BRIGHTNESS_METHODID 3 +#define ACER_WMID_SET_WIRELESS_METHODID 4 +#define ACER_WMID_SET_BLUETOOTH_METHODID 5 +#define ACER_WMID_SET_BRIGHTNESS_METHODID 6 +#define ACER_WMID_GET_THREEG_METHODID 10 +#define ACER_WMID_SET_THREEG_METHODID 11 + +/* + * Acer ACPI method GUIDs + */ +#define AMW0_GUID1 "67C3371D-95A3-4C37-BB61-DD47B491DAAB" +#define AMW0_GUID2 "431F16ED-0C2B-444C-B267-27DEB140CF9C" +#define WMID_GUID1 "6AF4F258-B401-42fd-BE91-3D4AC2D7C0D3" +#define WMID_GUID2 "95764E09-FB56-4e83-B31A-37761F60994A" + +MODULE_ALIAS("wmi:67C3371D-95A3-4C37-BB61-DD47B491DAAB"); +MODULE_ALIAS("wmi:6AF4F258-B401-42fd-BE91-3D4AC2D7C0D3"); + +/* Temporary workaround until the WMI sysfs interface goes in */ +MODULE_ALIAS("dmi:*:*Acer*:*:"); + +/* + * Interface capability flags + */ +#define ACER_CAP_MAILLED (1<<0) +#define ACER_CAP_WIRELESS (1<<1) +#define ACER_CAP_BLUETOOTH (1<<2) +#define ACER_CAP_BRIGHTNESS (1<<3) +#define ACER_CAP_THREEG (1<<4) +#define ACER_CAP_ANY (0xFFFFFFFF) + +/* + * Interface type flags + */ +enum interface_flags { + ACER_AMW0, + ACER_AMW0_V2, + ACER_WMID, +}; + +#define ACER_DEFAULT_WIRELESS 0 +#define ACER_DEFAULT_BLUETOOTH 0 +#define ACER_DEFAULT_MAILLED 0 +#define ACER_DEFAULT_THREEG 0 + +static int max_brightness = 0xF; + +static int mailled = -1; +static int brightness = -1; +static int threeg = -1; +static int force_series; + +module_param(mailled, int, 0444); +module_param(brightness, int, 0444); +module_param(threeg, int, 0444); +module_param(force_series, int, 0444); +MODULE_PARM_DESC(mailled, "Set initial state of Mail LED"); +MODULE_PARM_DESC(brightness, "Set initial LCD backlight brightness"); +MODULE_PARM_DESC(threeg, "Set initial state of 3G hardware"); +MODULE_PARM_DESC(force_series, "Force a different laptop series"); + +struct acer_data { + int mailled; + int threeg; + int brightness; +}; + +struct acer_debug { + struct dentry *root; + struct dentry *devices; + u32 wmid_devices; +}; + +static struct rfkill *wireless_rfkill; +static struct rfkill *bluetooth_rfkill; + +/* Each low-level interface must define at least some of the following */ +struct wmi_interface { + /* The WMI device type */ + u32 type; + + /* The capabilities this interface provides */ + u32 capability; + + /* Private data for the current interface */ + struct acer_data data; + + /* debugfs entries associated with this interface */ + struct acer_debug debug; +}; + +/* The static interface pointer, points to the currently detected interface */ +static struct wmi_interface *interface; + +/* + * Embedded Controller quirks + * Some laptops require us to directly access the EC to either enable or query + * features that are not available through WMI. + */ + +struct quirk_entry { + u8 wireless; + u8 mailled; + s8 brightness; + u8 bluetooth; +}; + +static struct quirk_entry *quirks; + +static void set_quirks(void) +{ + if (!interface) + return; + + if (quirks->mailled) + interface->capability |= ACER_CAP_MAILLED; + + if (quirks->brightness) + interface->capability |= ACER_CAP_BRIGHTNESS; +} + +static int dmi_matched(const struct dmi_system_id *dmi) +{ + quirks = dmi->driver_data; + return 0; +} + +static struct quirk_entry quirk_unknown = { +}; + +static struct quirk_entry quirk_acer_aspire_1520 = { + .brightness = -1, +}; + +static struct quirk_entry quirk_acer_travelmate_2490 = { + .mailled = 1, +}; + +/* This AMW0 laptop has no bluetooth */ +static struct quirk_entry quirk_medion_md_98300 = { + .wireless = 1, +}; + +static struct quirk_entry quirk_fujitsu_amilo_li_1718 = { + .wireless = 2, +}; + +static struct dmi_system_id acer_quirks[] = { + { + .callback = dmi_matched, + .ident = "Acer Aspire 1360", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 1360"), + }, + .driver_data = &quirk_acer_aspire_1520, + }, + { + .callback = dmi_matched, + .ident = "Acer Aspire 1520", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 1520"), + }, + .driver_data = &quirk_acer_aspire_1520, + }, + { + .callback = dmi_matched, + .ident = "Acer Aspire 3100", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 3100"), + }, + .driver_data = &quirk_acer_travelmate_2490, + }, + { + .callback = dmi_matched, + .ident = "Acer Aspire 3610", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 3610"), + }, + .driver_data = &quirk_acer_travelmate_2490, + }, + { + .callback = dmi_matched, + .ident = "Acer Aspire 5100", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5100"), + }, + .driver_data = &quirk_acer_travelmate_2490, + }, + { + .callback = dmi_matched, + .ident = "Acer Aspire 5610", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5610"), + }, + .driver_data = &quirk_acer_travelmate_2490, + }, + { + .callback = dmi_matched, + .ident = "Acer Aspire 5630", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5630"), + }, + .driver_data = &quirk_acer_travelmate_2490, + }, + { + .callback = dmi_matched, + .ident = "Acer Aspire 5650", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5650"), + }, + .driver_data = &quirk_acer_travelmate_2490, + }, + { + .callback = dmi_matched, + .ident = "Acer Aspire 5680", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5680"), + }, + .driver_data = &quirk_acer_travelmate_2490, + }, + { + .callback = dmi_matched, + .ident = "Acer Aspire 9110", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 9110"), + }, + .driver_data = &quirk_acer_travelmate_2490, + }, + { + .callback = dmi_matched, + .ident = "Acer TravelMate 2490", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 2490"), + }, + .driver_data = &quirk_acer_travelmate_2490, + }, + { + .callback = dmi_matched, + .ident = "Acer TravelMate 4200", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 4200"), + }, + .driver_data = &quirk_acer_travelmate_2490, + }, + { + .callback = dmi_matched, + .ident = "Fujitsu Siemens Amilo Li 1718", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"), + DMI_MATCH(DMI_PRODUCT_NAME, "AMILO Li 1718"), + }, + .driver_data = &quirk_fujitsu_amilo_li_1718, + }, + { + .callback = dmi_matched, + .ident = "Medion MD 98300", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "MEDION"), + DMI_MATCH(DMI_PRODUCT_NAME, "WAM2030"), + }, + .driver_data = &quirk_medion_md_98300, + }, + {} +}; + +/* Find which quirks are needed for a particular vendor/ model pair */ +static void find_quirks(void) +{ + if (!force_series) { + dmi_check_system(acer_quirks); + } else if (force_series == 2490) { + quirks = &quirk_acer_travelmate_2490; + } + + if (quirks == NULL) + quirks = &quirk_unknown; + + set_quirks(); +} + +/* + * General interface convenience methods + */ + +static bool has_cap(u32 cap) +{ + if ((interface->capability & cap) != 0) + return 1; + + return 0; +} + +/* + * AMW0 (V1) interface + */ +struct wmab_args { + u32 eax; + u32 ebx; + u32 ecx; + u32 edx; +}; + +struct wmab_ret { + u32 eax; + u32 ebx; + u32 ecx; + u32 edx; + u32 eex; +}; + +static acpi_status wmab_execute(struct wmab_args *regbuf, +struct acpi_buffer *result) +{ + struct acpi_buffer input; + acpi_status status; + input.length = sizeof(struct wmab_args); + input.pointer = (u8 *)regbuf; + + status = wmi_evaluate_method(AMW0_GUID1, 1, 1, &input, result); + + return status; +} + +static acpi_status AMW0_get_u32(u32 *value, u32 cap, +struct wmi_interface *iface) +{ + int err; + u8 result; + + switch (cap) { + case ACER_CAP_MAILLED: + switch (quirks->mailled) { + default: + err = ec_read(0xA, &result); + if (err) + return AE_ERROR; + *value = (result >> 7) & 0x1; + return AE_OK; + } + break; + case ACER_CAP_WIRELESS: + switch (quirks->wireless) { + case 1: + err = ec_read(0x7B, &result); + if (err) + return AE_ERROR; + *value = result & 0x1; + return AE_OK; + case 2: + err = ec_read(0x71, &result); + if (err) + return AE_ERROR; + *value = result & 0x1; + return AE_OK; + default: + err = ec_read(0xA, &result); + if (err) + return AE_ERROR; + *value = (result >> 2) & 0x1; + return AE_OK; + } + break; + case ACER_CAP_BLUETOOTH: + switch (quirks->bluetooth) { + default: + err = ec_read(0xA, &result); + if (err) + return AE_ERROR; + *value = (result >> 4) & 0x1; + return AE_OK; + } + break; + case ACER_CAP_BRIGHTNESS: + switch (quirks->brightness) { + default: + err = ec_read(0x83, &result); + if (err) + return AE_ERROR; + *value = result; + return AE_OK; + } + break; + default: + return AE_ERROR; + } + return AE_OK; +} + +static acpi_status AMW0_set_u32(u32 value, u32 cap, struct wmi_interface *iface) +{ + struct wmab_args args; + + args.eax = ACER_AMW0_WRITE; + args.ebx = value ? (1<<8) : 0; + args.ecx = args.edx = 0; + + switch (cap) { + case ACER_CAP_MAILLED: + if (value > 1) + return AE_BAD_PARAMETER; + args.ebx |= ACER_AMW0_MAILLED_MASK; + break; + case ACER_CAP_WIRELESS: + if (value > 1) + return AE_BAD_PARAMETER; + args.ebx |= ACER_AMW0_WIRELESS_MASK; + break; + case ACER_CAP_BLUETOOTH: + if (value > 1) + return AE_BAD_PARAMETER; + args.ebx |= ACER_AMW0_BLUETOOTH_MASK; + break; + case ACER_CAP_BRIGHTNESS: + if (value > max_brightness) + return AE_BAD_PARAMETER; + switch (quirks->brightness) { + default: + return ec_write(0x83, value); + break; + } + default: + return AE_ERROR; + } + + /* Actually do the set */ + return wmab_execute(&args, NULL); +} + +static acpi_status AMW0_find_mailled(void) +{ + struct wmab_args args; + struct wmab_ret ret; + acpi_status status = AE_OK; + struct acpi_buffer out = { ACPI_ALLOCATE_BUFFER, NULL }; + union acpi_object *obj; + + args.eax = 0x86; + args.ebx = args.ecx = args.edx = 0; + + status = wmab_execute(&args, &out); + if (ACPI_FAILURE(status)) + return status; + + obj = (union acpi_object *) out.pointer; + if (obj && obj->type == ACPI_TYPE_BUFFER && + obj->buffer.length == sizeof(struct wmab_ret)) { + ret = *((struct wmab_ret *) obj->buffer.pointer); + } else { + return AE_ERROR; + } + + if (ret.eex & 0x1) + interface->capability |= ACER_CAP_MAILLED; + + kfree(out.pointer); + + return AE_OK; +} + +static acpi_status AMW0_set_capabilities(void) +{ + struct wmab_args args; + struct wmab_ret ret; + acpi_status status = AE_OK; + struct acpi_buffer out = { ACPI_ALLOCATE_BUFFER, NULL }; + union acpi_object *obj; + + /* + * On laptops with this strange GUID (non Acer), normal probing doesn't + * work. + */ + if (wmi_has_guid(AMW0_GUID2)) { + interface->capability |= ACER_CAP_WIRELESS; + return AE_OK; + } + + args.eax = ACER_AMW0_WRITE; + args.ecx = args.edx = 0; + + args.ebx = 0xa2 << 8; + args.ebx |= ACER_AMW0_WIRELESS_MASK; + + status = wmab_execute(&args, &out); + if (ACPI_FAILURE(status)) + return status; + + obj = (union acpi_object *) out.pointer; + if (obj && obj->type == ACPI_TYPE_BUFFER && + obj->buffer.length == sizeof(struct wmab_ret)) { + ret = *((struct wmab_ret *) obj->buffer.pointer); + } else { + return AE_ERROR; + } + + if (ret.eax & 0x1) + interface->capability |= ACER_CAP_WIRELESS; + + args.ebx = 2 << 8; + args.ebx |= ACER_AMW0_BLUETOOTH_MASK; + + status = wmab_execute(&args, &out); + if (ACPI_FAILURE(status)) + return status; + + obj = (union acpi_object *) out.pointer; + if (obj && obj->type == ACPI_TYPE_BUFFER + && obj->buffer.length == sizeof(struct wmab_ret)) { + ret = *((struct wmab_ret *) obj->buffer.pointer); + } else { + return AE_ERROR; + } + + if (ret.eax & 0x1) + interface->capability |= ACER_CAP_BLUETOOTH; + + kfree(out.pointer); + + /* + * This appears to be safe to enable, since all Wistron based laptops + * appear to use the same EC register for brightness, even if they + * differ for wireless, etc + */ + if (quirks->brightness >= 0) + interface->capability |= ACER_CAP_BRIGHTNESS; + + return AE_OK; +} + +static struct wmi_interface AMW0_interface = { + .type = ACER_AMW0, +}; + +static struct wmi_interface AMW0_V2_interface = { + .type = ACER_AMW0_V2, +}; + +/* + * New interface (The WMID interface) + */ +static acpi_status +WMI_execute_u32(u32 method_id, u32 in, u32 *out) +{ + struct acpi_buffer input = { (acpi_size) sizeof(u32), (void *)(&in) }; + struct acpi_buffer result = { ACPI_ALLOCATE_BUFFER, NULL }; + union acpi_object *obj; + u32 tmp; + acpi_status status; + + status = wmi_evaluate_method(WMID_GUID1, 1, method_id, &input, &result); + + if (ACPI_FAILURE(status)) + return status; + + obj = (union acpi_object *) result.pointer; + if (obj && obj->type == ACPI_TYPE_BUFFER && + obj->buffer.length == sizeof(u32)) { + tmp = *((u32 *) obj->buffer.pointer); + } else { + tmp = 0; + } + + if (out) + *out = tmp; + + kfree(result.pointer); + + return status; +} + +static acpi_status WMID_get_u32(u32 *value, u32 cap, +struct wmi_interface *iface) +{ + acpi_status status; + u8 tmp; + u32 result, method_id = 0; + + switch (cap) { + case ACER_CAP_WIRELESS: + method_id = ACER_WMID_GET_WIRELESS_METHODID; + break; + case ACER_CAP_BLUETOOTH: + method_id = ACER_WMID_GET_BLUETOOTH_METHODID; + break; + case ACER_CAP_BRIGHTNESS: + method_id = ACER_WMID_GET_BRIGHTNESS_METHODID; + break; + case ACER_CAP_THREEG: + method_id = ACER_WMID_GET_THREEG_METHODID; + break; + case ACER_CAP_MAILLED: + if (quirks->mailled == 1) { + ec_read(0x9f, &tmp); + *value = tmp & 0x1; + return 0; + } + default: + return AE_ERROR; + } + status = WMI_execute_u32(method_id, 0, &result); + + if (ACPI_SUCCESS(status)) + *value = (u8)result; + + return status; +} + +static acpi_status WMID_set_u32(u32 value, u32 cap, struct wmi_interface *iface) +{ + u32 method_id = 0; + char param; + + switch (cap) { + case ACER_CAP_BRIGHTNESS: + if (value > max_brightness) + return AE_BAD_PARAMETER; + method_id = ACER_WMID_SET_BRIGHTNESS_METHODID; + break; + case ACER_CAP_WIRELESS: + if (value > 1) + return AE_BAD_PARAMETER; + method_id = ACER_WMID_SET_WIRELESS_METHODID; + break; + case ACER_CAP_BLUETOOTH: + if (value > 1) + return AE_BAD_PARAMETER; + method_id = ACER_WMID_SET_BLUETOOTH_METHODID; + break; + case ACER_CAP_THREEG: + if (value > 1) + return AE_BAD_PARAMETER; + method_id = ACER_WMID_SET_THREEG_METHODID; + break; + case ACER_CAP_MAILLED: + if (value > 1) + return AE_BAD_PARAMETER; + if (quirks->mailled == 1) { + param = value ? 0x92 : 0x93; + i8042_command(¶m, 0x1059); + return 0; + } + break; + default: + return AE_ERROR; + } + return WMI_execute_u32(method_id, (u32)value, NULL); +} + +static acpi_status WMID_set_capabilities(void) +{ + struct acpi_buffer out = {ACPI_ALLOCATE_BUFFER, NULL}; + union acpi_object *obj; + acpi_status status; + u32 devices; + + status = wmi_query_block(WMID_GUID2, 1, &out); + if (ACPI_FAILURE(status)) + return status; + + obj = (union acpi_object *) out.pointer; + if (obj && obj->type == ACPI_TYPE_BUFFER && + obj->buffer.length == sizeof(u32)) { + devices = *((u32 *) obj->buffer.pointer); + } else { + return AE_ERROR; + } + + /* Not sure on the meaning of the relevant bits yet to detect these */ + interface->capability |= ACER_CAP_WIRELESS; + interface->capability |= ACER_CAP_THREEG; + + /* WMID always provides brightness methods */ + interface->capability |= ACER_CAP_BRIGHTNESS; + + if (devices & 0x10) + interface->capability |= ACER_CAP_BLUETOOTH; + + if (!(devices & 0x20)) + max_brightness = 0x9; + + return status; +} + +static struct wmi_interface wmid_interface = { + .type = ACER_WMID, +}; + +/* + * Generic Device (interface-independent) + */ + +static acpi_status get_u32(u32 *value, u32 cap) +{ + acpi_status status = AE_ERROR; + + switch (interface->type) { + case ACER_AMW0: + status = AMW0_get_u32(value, cap, interface); + break; + case ACER_AMW0_V2: + if (cap == ACER_CAP_MAILLED) { + status = AMW0_get_u32(value, cap, interface); + break; + } + case ACER_WMID: + status = WMID_get_u32(value, cap, interface); + break; + } + + return status; +} + +static acpi_status set_u32(u32 value, u32 cap) +{ + acpi_status status; + + if (interface->capability & cap) { + switch (interface->type) { + case ACER_AMW0: + return AMW0_set_u32(value, cap, interface); + case ACER_AMW0_V2: + if (cap == ACER_CAP_MAILLED) + return AMW0_set_u32(value, cap, interface); + + /* + * On some models, some WMID methods don't toggle + * properly. For those cases, we want to run the AMW0 + * method afterwards to be certain we've really toggled + * the device state. + */ + if (cap == ACER_CAP_WIRELESS || + cap == ACER_CAP_BLUETOOTH) { + status = WMID_set_u32(value, cap, interface); + if (ACPI_FAILURE(status)) + return status; + + return AMW0_set_u32(value, cap, interface); + } + case ACER_WMID: + return WMID_set_u32(value, cap, interface); + default: + return AE_BAD_PARAMETER; + } + } + return AE_BAD_PARAMETER; +} + +static void __init acer_commandline_init(void) +{ + /* + * These will all fail silently if the value given is invalid, or the + * capability isn't available on the given interface + */ + set_u32(mailled, ACER_CAP_MAILLED); + set_u32(threeg, ACER_CAP_THREEG); + set_u32(brightness, ACER_CAP_BRIGHTNESS); +} + +/* + * LED device (Mail LED only, no other LEDs known yet) + */ +static void mail_led_set(struct led_classdev *led_cdev, +enum led_brightness value) +{ + set_u32(value, ACER_CAP_MAILLED); +} + +static struct led_classdev mail_led = { + .name = "acer-wmi::mail", + .brightness_set = mail_led_set, +}; + +static int __devinit acer_led_init(struct device *dev) +{ + return led_classdev_register(dev, &mail_led); +} + +static void acer_led_exit(void) +{ + led_classdev_unregister(&mail_led); +} + +/* + * Backlight device + */ +static struct backlight_device *acer_backlight_device; + +static int read_brightness(struct backlight_device *bd) +{ + u32 value; + get_u32(&value, ACER_CAP_BRIGHTNESS); + return value; +} + +static int update_bl_status(struct backlight_device *bd) +{ + int intensity = bd->props.brightness; + + if (bd->props.power != FB_BLANK_UNBLANK) + intensity = 0; + if (bd->props.fb_blank != FB_BLANK_UNBLANK) + intensity = 0; + + set_u32(intensity, ACER_CAP_BRIGHTNESS); + + return 0; +} + +static struct backlight_ops acer_bl_ops = { + .get_brightness = read_brightness, + .update_status = update_bl_status, +}; + +static int __devinit acer_backlight_init(struct device *dev) +{ + struct backlight_device *bd; + + bd = backlight_device_register("acer-wmi", dev, NULL, &acer_bl_ops); + if (IS_ERR(bd)) { + printk(ACER_ERR "Could not register Acer backlight device\n"); + acer_backlight_device = NULL; + return PTR_ERR(bd); + } + + acer_backlight_device = bd; + + bd->props.power = FB_BLANK_UNBLANK; + bd->props.brightness = max_brightness; + bd->props.max_brightness = max_brightness; + backlight_update_status(bd); + return 0; +} + +static void acer_backlight_exit(void) +{ + backlight_device_unregister(acer_backlight_device); +} + +/* + * Rfkill devices + */ +static void acer_rfkill_update(struct work_struct *ignored); +static DECLARE_DELAYED_WORK(acer_rfkill_work, acer_rfkill_update); +static void acer_rfkill_update(struct work_struct *ignored) +{ + u32 state; + acpi_status status; + + status = get_u32(&state, ACER_CAP_WIRELESS); + if (ACPI_SUCCESS(status)) + rfkill_force_state(wireless_rfkill, state ? + RFKILL_STATE_UNBLOCKED : RFKILL_STATE_SOFT_BLOCKED); + + if (has_cap(ACER_CAP_BLUETOOTH)) { + status = get_u32(&state, ACER_CAP_BLUETOOTH); + if (ACPI_SUCCESS(status)) + rfkill_force_state(bluetooth_rfkill, state ? + RFKILL_STATE_UNBLOCKED : + RFKILL_STATE_SOFT_BLOCKED); + } + + schedule_delayed_work(&acer_rfkill_work, round_jiffies_relative(HZ)); +} + +static int acer_rfkill_set(void *data, enum rfkill_state state) +{ + acpi_status status; + u32 *cap = data; + status = set_u32((u32) (state == RFKILL_STATE_UNBLOCKED), *cap); + if (ACPI_FAILURE(status)) + return -ENODEV; + return 0; +} + +static struct rfkill * acer_rfkill_register(struct device *dev, +enum rfkill_type type, char *name, u32 cap) +{ + int err; + u32 state; + u32 *data; + struct rfkill *rfkill_dev; + + rfkill_dev = rfkill_allocate(dev, type); + if (!rfkill_dev) + return ERR_PTR(-ENOMEM); + rfkill_dev->name = name; + get_u32(&state, cap); + rfkill_dev->state = state ? RFKILL_STATE_UNBLOCKED : + RFKILL_STATE_SOFT_BLOCKED; + data = kzalloc(sizeof(u32), GFP_KERNEL); + if (!data) { + rfkill_free(rfkill_dev); + return ERR_PTR(-ENOMEM); + } + *data = cap; + rfkill_dev->data = data; + rfkill_dev->toggle_radio = acer_rfkill_set; + rfkill_dev->user_claim_unsupported = 1; + + err = rfkill_register(rfkill_dev); + if (err) { + kfree(rfkill_dev->data); + rfkill_free(rfkill_dev); + return ERR_PTR(err); + } + return rfkill_dev; +} + +static int acer_rfkill_init(struct device *dev) +{ + wireless_rfkill = acer_rfkill_register(dev, RFKILL_TYPE_WLAN, + "acer-wireless", ACER_CAP_WIRELESS); + if (IS_ERR(wireless_rfkill)) + return PTR_ERR(wireless_rfkill); + + if (has_cap(ACER_CAP_BLUETOOTH)) { + bluetooth_rfkill = acer_rfkill_register(dev, + RFKILL_TYPE_BLUETOOTH, "acer-bluetooth", + ACER_CAP_BLUETOOTH); + if (IS_ERR(bluetooth_rfkill)) { + kfree(wireless_rfkill->data); + rfkill_unregister(wireless_rfkill); + return PTR_ERR(bluetooth_rfkill); + } + } + + schedule_delayed_work(&acer_rfkill_work, round_jiffies_relative(HZ)); + + return 0; +} + +static void acer_rfkill_exit(void) +{ + cancel_delayed_work_sync(&acer_rfkill_work); + kfree(wireless_rfkill->data); + rfkill_unregister(wireless_rfkill); + if (has_cap(ACER_CAP_BLUETOOTH)) { + kfree(wireless_rfkill->data); + rfkill_unregister(bluetooth_rfkill); + } + return; +} + +/* + * sysfs interface + */ +static ssize_t show_bool_threeg(struct device *dev, + struct device_attribute *attr, char *buf) +{ + u32 result; \ + acpi_status status = get_u32(&result, ACER_CAP_THREEG); + if (ACPI_SUCCESS(status)) + return sprintf(buf, "%u\n", result); + return sprintf(buf, "Read error\n"); +} + +static ssize_t set_bool_threeg(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + u32 tmp = simple_strtoul(buf, NULL, 10); + acpi_status status = set_u32(tmp, ACER_CAP_THREEG); + if (ACPI_FAILURE(status)) + return -EINVAL; + return count; +} +static DEVICE_ATTR(threeg, S_IWUGO | S_IRUGO | S_IWUSR, show_bool_threeg, + set_bool_threeg); + +static ssize_t show_interface(struct device *dev, struct device_attribute *attr, + char *buf) +{ + switch (interface->type) { + case ACER_AMW0: + return sprintf(buf, "AMW0\n"); + case ACER_AMW0_V2: + return sprintf(buf, "AMW0 v2\n"); + case ACER_WMID: + return sprintf(buf, "WMID\n"); + default: + return sprintf(buf, "Error!\n"); + } +} + +static DEVICE_ATTR(interface, S_IWUGO | S_IRUGO | S_IWUSR, + show_interface, NULL); + +/* + * debugfs functions + */ +static u32 get_wmid_devices(void) +{ + struct acpi_buffer out = {ACPI_ALLOCATE_BUFFER, NULL}; + union acpi_object *obj; + acpi_status status; + + status = wmi_query_block(WMID_GUID2, 1, &out); + if (ACPI_FAILURE(status)) + return 0; + + obj = (union acpi_object *) out.pointer; + if (obj && obj->type == ACPI_TYPE_BUFFER && + obj->buffer.length == sizeof(u32)) { + return *((u32 *) obj->buffer.pointer); + } else { + return 0; + } +} + +/* + * Platform device + */ +static int __devinit acer_platform_probe(struct platform_device *device) +{ + int err; + + if (has_cap(ACER_CAP_MAILLED)) { + err = acer_led_init(&device->dev); + if (err) + goto error_mailled; + } + + if (has_cap(ACER_CAP_BRIGHTNESS)) { + err = acer_backlight_init(&device->dev); + if (err) + goto error_brightness; + } + + err = acer_rfkill_init(&device->dev); + + return err; + +error_brightness: + acer_led_exit(); +error_mailled: + return err; +} + +static int acer_platform_remove(struct platform_device *device) +{ + if (has_cap(ACER_CAP_MAILLED)) + acer_led_exit(); + if (has_cap(ACER_CAP_BRIGHTNESS)) + acer_backlight_exit(); + + acer_rfkill_exit(); + return 0; +} + +static int acer_platform_suspend(struct platform_device *dev, +pm_message_t state) +{ + u32 value; + struct acer_data *data = &interface->data; + + if (!data) + return -ENOMEM; + + if (has_cap(ACER_CAP_MAILLED)) { + get_u32(&value, ACER_CAP_MAILLED); + data->mailled = value; + } + + if (has_cap(ACER_CAP_BRIGHTNESS)) { + get_u32(&value, ACER_CAP_BRIGHTNESS); + data->brightness = value; + } + + return 0; +} + +static int acer_platform_resume(struct platform_device *device) +{ + struct acer_data *data = &interface->data; + + if (!data) + return -ENOMEM; + + if (has_cap(ACER_CAP_MAILLED)) + set_u32(data->mailled, ACER_CAP_MAILLED); + + if (has_cap(ACER_CAP_BRIGHTNESS)) + set_u32(data->brightness, ACER_CAP_BRIGHTNESS); + + return 0; +} + +static struct platform_driver acer_platform_driver = { + .driver = { + .name = "acer-wmi", + .owner = THIS_MODULE, + }, + .probe = acer_platform_probe, + .remove = acer_platform_remove, + .suspend = acer_platform_suspend, + .resume = acer_platform_resume, +}; + +static struct platform_device *acer_platform_device; + +static int remove_sysfs(struct platform_device *device) +{ + if (has_cap(ACER_CAP_THREEG)) + device_remove_file(&device->dev, &dev_attr_threeg); + + device_remove_file(&device->dev, &dev_attr_interface); + + return 0; +} + +static int create_sysfs(void) +{ + int retval = -ENOMEM; + + if (has_cap(ACER_CAP_THREEG)) { + retval = device_create_file(&acer_platform_device->dev, + &dev_attr_threeg); + if (retval) + goto error_sysfs; + } + + retval = device_create_file(&acer_platform_device->dev, + &dev_attr_interface); + if (retval) + goto error_sysfs; + + return 0; + +error_sysfs: + remove_sysfs(acer_platform_device); + return retval; +} + +static void remove_debugfs(void) +{ + debugfs_remove(interface->debug.devices); + debugfs_remove(interface->debug.root); +} + +static int create_debugfs(void) +{ + interface->debug.root = debugfs_create_dir("acer-wmi", NULL); + if (!interface->debug.root) { + printk(ACER_ERR "Failed to create debugfs directory"); + return -ENOMEM; + } + + interface->debug.devices = debugfs_create_u32("devices", S_IRUGO, + interface->debug.root, + &interface->debug.wmid_devices); + if (!interface->debug.devices) + goto error_debugfs; + + return 0; + +error_debugfs: + remove_debugfs(); + return -ENOMEM; +} + +static int __init acer_wmi_init(void) +{ + int err; + + printk(ACER_INFO "Acer Laptop ACPI-WMI Extras\n"); + + find_quirks(); + + /* + * Detect which ACPI-WMI interface we're using. + */ + if (wmi_has_guid(AMW0_GUID1) && wmi_has_guid(WMID_GUID1)) + interface = &AMW0_V2_interface; + + if (!wmi_has_guid(AMW0_GUID1) && wmi_has_guid(WMID_GUID1)) + interface = &wmid_interface; + + if (wmi_has_guid(WMID_GUID2) && interface) { + if (ACPI_FAILURE(WMID_set_capabilities())) { + printk(ACER_ERR "Unable to detect available WMID " + "devices\n"); + return -ENODEV; + } + } else if (!wmi_has_guid(WMID_GUID2) && interface) { + printk(ACER_ERR "No WMID device detection method found\n"); + return -ENODEV; + } + + if (wmi_has_guid(AMW0_GUID1) && !wmi_has_guid(WMID_GUID1)) { + interface = &AMW0_interface; + + if (ACPI_FAILURE(AMW0_set_capabilities())) { + printk(ACER_ERR "Unable to detect available AMW0 " + "devices\n"); + return -ENODEV; + } + } + + if (wmi_has_guid(AMW0_GUID1)) + AMW0_find_mailled(); + + if (!interface) { + printk(ACER_ERR "No or unsupported WMI interface, unable to " + "load\n"); + return -ENODEV; + } + + set_quirks(); + + if (acpi_video_backlight_support() && has_cap(ACER_CAP_BRIGHTNESS)) { + interface->capability &= ~ACER_CAP_BRIGHTNESS; + printk(ACER_INFO "Brightness must be controlled by " + "generic video driver\n"); + } + + if (platform_driver_register(&acer_platform_driver)) { + printk(ACER_ERR "Unable to register platform driver.\n"); + goto error_platform_register; + } + acer_platform_device = platform_device_alloc("acer-wmi", -1); + platform_device_add(acer_platform_device); + + err = create_sysfs(); + if (err) + return err; + + if (wmi_has_guid(WMID_GUID2)) { + interface->debug.wmid_devices = get_wmid_devices(); + err = create_debugfs(); + if (err) + return err; + } + + /* Override any initial settings with values from the commandline */ + acer_commandline_init(); + + return 0; + +error_platform_register: + return -ENODEV; +} + +static void __exit acer_wmi_exit(void) +{ + remove_sysfs(acer_platform_device); + remove_debugfs(); + platform_device_del(acer_platform_device); + platform_driver_unregister(&acer_platform_driver); + + printk(ACER_INFO "Acer Laptop WMI Extras unloaded\n"); + return; +} + +module_init(acer_wmi_init); +module_exit(acer_wmi_exit); diff --git a/drivers/misc/asus-laptop.c b/drivers/misc/asus-laptop.c new file mode 100644 index 0000000..8fb8b35 --- /dev/null +++ b/drivers/misc/asus-laptop.c @@ -0,0 +1,1266 @@ +/* + * asus-laptop.c - Asus Laptop Support + * + * + * Copyright (C) 2002-2005 Julien Lerouge, 2003-2006 Karol Kozimor + * Copyright (C) 2006-2007 Corentin Chary + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + * + * The development page for this driver is located at + * http://sourceforge.net/projects/acpi4asus/ + * + * Credits: + * Pontus Fuchs - Helper functions, cleanup + * Johann Wiesner - Small compile fixes + * John Belmonte - ACPI code for Toshiba laptop was a good starting point. + * Eric Burghard - LED display support for W1N + * Josh Green - Light Sens support + * Thomas Tuttle - His first patch for led support was very helpfull + * Sam Lin - GPS support + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/types.h> +#include <linux/err.h> +#include <linux/proc_fs.h> +#include <linux/backlight.h> +#include <linux/fb.h> +#include <linux/leds.h> +#include <linux/platform_device.h> +#include <acpi/acpi_drivers.h> +#include <acpi/acpi_bus.h> +#include <asm/uaccess.h> + +#define ASUS_LAPTOP_VERSION "0.42" + +#define ASUS_HOTK_NAME "Asus Laptop Support" +#define ASUS_HOTK_CLASS "hotkey" +#define ASUS_HOTK_DEVICE_NAME "Hotkey" +#define ASUS_HOTK_FILE "asus-laptop" +#define ASUS_HOTK_PREFIX "\\_SB.ATKD." + +/* + * Some events we use, same for all Asus + */ +#define ATKD_BR_UP 0x10 +#define ATKD_BR_DOWN 0x20 +#define ATKD_LCD_ON 0x33 +#define ATKD_LCD_OFF 0x34 + +/* + * Known bits returned by \_SB.ATKD.HWRS + */ +#define WL_HWRS 0x80 +#define BT_HWRS 0x100 + +/* + * Flags for hotk status + * WL_ON and BT_ON are also used for wireless_status() + */ +#define WL_ON 0x01 //internal Wifi +#define BT_ON 0x02 //internal Bluetooth +#define MLED_ON 0x04 //mail LED +#define TLED_ON 0x08 //touchpad LED +#define RLED_ON 0x10 //Record LED +#define PLED_ON 0x20 //Phone LED +#define GLED_ON 0x40 //Gaming LED +#define LCD_ON 0x80 //LCD backlight +#define GPS_ON 0x100 //GPS + +#define ASUS_LOG ASUS_HOTK_FILE ": " +#define ASUS_ERR KERN_ERR ASUS_LOG +#define ASUS_WARNING KERN_WARNING ASUS_LOG +#define ASUS_NOTICE KERN_NOTICE ASUS_LOG +#define ASUS_INFO KERN_INFO ASUS_LOG +#define ASUS_DEBUG KERN_DEBUG ASUS_LOG + +MODULE_AUTHOR("Julien Lerouge, Karol Kozimor, Corentin Chary"); +MODULE_DESCRIPTION(ASUS_HOTK_NAME); +MODULE_LICENSE("GPL"); + +/* WAPF defines the behavior of the Fn+Fx wlan key + * The significance of values is yet to be found, but + * most of the time: + * 0x0 will do nothing + * 0x1 will allow to control the device with Fn+Fx key. + * 0x4 will send an ACPI event (0x88) while pressing the Fn+Fx key + * 0x5 like 0x1 or 0x4 + * So, if something doesn't work as you want, just try other values =) + */ +static uint wapf = 1; +module_param(wapf, uint, 0644); +MODULE_PARM_DESC(wapf, "WAPF value"); + +#define ASUS_HANDLE(object, paths...) \ + static acpi_handle object##_handle = NULL; \ + static char *object##_paths[] = { paths } + +/* LED */ +ASUS_HANDLE(mled_set, ASUS_HOTK_PREFIX "MLED"); +ASUS_HANDLE(tled_set, ASUS_HOTK_PREFIX "TLED"); +ASUS_HANDLE(rled_set, ASUS_HOTK_PREFIX "RLED"); /* W1JC */ +ASUS_HANDLE(pled_set, ASUS_HOTK_PREFIX "PLED"); /* A7J */ +ASUS_HANDLE(gled_set, ASUS_HOTK_PREFIX "GLED"); /* G1, G2 (probably) */ + +/* LEDD */ +ASUS_HANDLE(ledd_set, ASUS_HOTK_PREFIX "SLCM"); + +/* Bluetooth and WLAN + * WLED and BLED are not handled like other XLED, because in some dsdt + * they also control the WLAN/Bluetooth device. + */ +ASUS_HANDLE(wl_switch, ASUS_HOTK_PREFIX "WLED"); +ASUS_HANDLE(bt_switch, ASUS_HOTK_PREFIX "BLED"); +ASUS_HANDLE(wireless_status, ASUS_HOTK_PREFIX "RSTS"); /* All new models */ + +/* Brightness */ +ASUS_HANDLE(brightness_set, ASUS_HOTK_PREFIX "SPLV"); +ASUS_HANDLE(brightness_get, ASUS_HOTK_PREFIX "GPLV"); + +/* Backlight */ +ASUS_HANDLE(lcd_switch, "\\_SB.PCI0.SBRG.EC0._Q10", /* All new models */ + "\\_SB.PCI0.ISA.EC0._Q10", /* A1x */ + "\\_SB.PCI0.PX40.ECD0._Q10", /* L3C */ + "\\_SB.PCI0.PX40.EC0.Q10", /* M1A */ + "\\_SB.PCI0.LPCB.EC0._Q10", /* P30 */ + "\\_SB.PCI0.LPCB.EC0._Q0E", /* P30/P35 */ + "\\_SB.PCI0.PX40.Q10", /* S1x */ + "\\Q10"); /* A2x, L2D, L3D, M2E */ + +/* Display */ +ASUS_HANDLE(display_set, ASUS_HOTK_PREFIX "SDSP"); +ASUS_HANDLE(display_get, "\\_SB.PCI0.P0P1.VGA.GETD", /* A6B, A6K A6R A7D F3JM L4R M6R A3G + M6A M6V VX-1 V6J V6V W3Z */ + "\\_SB.PCI0.P0P2.VGA.GETD", /* A3E A4K, A4D A4L A6J A7J A8J Z71V M9V + S5A M5A z33A W1Jc W2V G1 */ + "\\_SB.PCI0.P0P3.VGA.GETD", /* A6V A6Q */ + "\\_SB.PCI0.P0PA.VGA.GETD", /* A6T, A6M */ + "\\_SB.PCI0.PCI1.VGAC.NMAP", /* L3C */ + "\\_SB.PCI0.VGA.GETD", /* Z96F */ + "\\ACTD", /* A2D */ + "\\ADVG", /* A4G Z71A W1N W5A W5F M2N M3N M5N M6N S1N S5N */ + "\\DNXT", /* P30 */ + "\\INFB", /* A2H D1 L2D L3D L3H L2E L5D L5C M1A M2E L4L W3V */ + "\\SSTE"); /* A3F A6F A3N A3L M6N W3N W6A */ + +ASUS_HANDLE(ls_switch, ASUS_HOTK_PREFIX "ALSC"); /* Z71A Z71V */ +ASUS_HANDLE(ls_level, ASUS_HOTK_PREFIX "ALSL"); /* Z71A Z71V */ + +/* GPS */ +/* R2H use different handle for GPS on/off */ +ASUS_HANDLE(gps_on, ASUS_HOTK_PREFIX "SDON"); /* R2H */ +ASUS_HANDLE(gps_off, ASUS_HOTK_PREFIX "SDOF"); /* R2H */ +ASUS_HANDLE(gps_status, ASUS_HOTK_PREFIX "GPST"); + +/* + * This is the main structure, we can use it to store anything interesting + * about the hotk device + */ +struct asus_hotk { + char *name; //laptop name + struct acpi_device *device; //the device we are in + acpi_handle handle; //the handle of the hotk device + char status; //status of the hotk, for LEDs, ... + u32 ledd_status; //status of the LED display + u8 light_level; //light sensor level + u8 light_switch; //light sensor switch value + u16 event_count[128]; //count for each event TODO make this better +}; + +/* + * This header is made available to allow proper configuration given model, + * revision number , ... this info cannot go in struct asus_hotk because it is + * available before the hotk + */ +static struct acpi_table_header *asus_info; + +/* The actual device the driver binds to */ +static struct asus_hotk *hotk; + +/* + * The hotkey driver declaration + */ +static const struct acpi_device_id asus_device_ids[] = { + {"ATK0100", 0}, + {"", 0}, +}; +MODULE_DEVICE_TABLE(acpi, asus_device_ids); + +static int asus_hotk_add(struct acpi_device *device); +static int asus_hotk_remove(struct acpi_device *device, int type); +static struct acpi_driver asus_hotk_driver = { + .name = ASUS_HOTK_NAME, + .class = ASUS_HOTK_CLASS, + .ids = asus_device_ids, + .ops = { + .add = asus_hotk_add, + .remove = asus_hotk_remove, + }, +}; + +/* The backlight device /sys/class/backlight */ +static struct backlight_device *asus_backlight_device; + +/* + * The backlight class declaration + */ +static int read_brightness(struct backlight_device *bd); +static int update_bl_status(struct backlight_device *bd); +static struct backlight_ops asusbl_ops = { + .get_brightness = read_brightness, + .update_status = update_bl_status, +}; + +/* These functions actually update the LED's, and are called from a + * workqueue. By doing this as separate work rather than when the LED + * subsystem asks, we avoid messing with the Asus ACPI stuff during a + * potentially bad time, such as a timer interrupt. */ +static struct workqueue_struct *led_workqueue; + +#define ASUS_LED(object, ledname) \ + static void object##_led_set(struct led_classdev *led_cdev, \ + enum led_brightness value); \ + static void object##_led_update(struct work_struct *ignored); \ + static int object##_led_wk; \ + static DECLARE_WORK(object##_led_work, object##_led_update); \ + static struct led_classdev object##_led = { \ + .name = "asus::" ledname, \ + .brightness_set = object##_led_set, \ + } + +ASUS_LED(mled, "mail"); +ASUS_LED(tled, "touchpad"); +ASUS_LED(rled, "record"); +ASUS_LED(pled, "phone"); +ASUS_LED(gled, "gaming"); + +/* + * This function evaluates an ACPI method, given an int as parameter, the + * method is searched within the scope of the handle, can be NULL. The output + * of the method is written is output, which can also be NULL + * + * returns 0 if write is successful, -1 else. + */ +static int write_acpi_int(acpi_handle handle, const char *method, int val, + struct acpi_buffer *output) +{ + struct acpi_object_list params; //list of input parameters (an int here) + union acpi_object in_obj; //the only param we use + acpi_status status; + + if (!handle) + return 0; + + params.count = 1; + params.pointer = &in_obj; + in_obj.type = ACPI_TYPE_INTEGER; + in_obj.integer.value = val; + + status = acpi_evaluate_object(handle, (char *)method, ¶ms, output); + if (status == AE_OK) + return 0; + else + return -1; +} + +static int read_wireless_status(int mask) +{ + unsigned long long status; + acpi_status rv = AE_OK; + + if (!wireless_status_handle) + return (hotk->status & mask) ? 1 : 0; + + rv = acpi_evaluate_integer(wireless_status_handle, NULL, NULL, &status); + if (ACPI_FAILURE(rv)) + printk(ASUS_WARNING "Error reading Wireless status\n"); + else + return (status & mask) ? 1 : 0; + + return (hotk->status & mask) ? 1 : 0; +} + +static int read_gps_status(void) +{ + unsigned long long status; + acpi_status rv = AE_OK; + + rv = acpi_evaluate_integer(gps_status_handle, NULL, NULL, &status); + if (ACPI_FAILURE(rv)) + printk(ASUS_WARNING "Error reading GPS status\n"); + else + return status ? 1 : 0; + + return (hotk->status & GPS_ON) ? 1 : 0; +} + +/* Generic LED functions */ +static int read_status(int mask) +{ + /* There is a special method for both wireless devices */ + if (mask == BT_ON || mask == WL_ON) + return read_wireless_status(mask); + else if (mask == GPS_ON) + return read_gps_status(); + + return (hotk->status & mask) ? 1 : 0; +} + +static void write_status(acpi_handle handle, int out, int mask) +{ + hotk->status = (out) ? (hotk->status | mask) : (hotk->status & ~mask); + + switch (mask) { + case MLED_ON: + out = !(out & 0x1); + break; + case GLED_ON: + out = (out & 0x1) + 1; + break; + case GPS_ON: + handle = (out) ? gps_on_handle : gps_off_handle; + out = 0x02; + break; + default: + out &= 0x1; + break; + } + + if (write_acpi_int(handle, NULL, out, NULL)) + printk(ASUS_WARNING " write failed %x\n", mask); +} + +/* /sys/class/led handlers */ +#define ASUS_LED_HANDLER(object, mask) \ + static void object##_led_set(struct led_classdev *led_cdev, \ + enum led_brightness value) \ + { \ + object##_led_wk = (value > 0) ? 1 : 0; \ + queue_work(led_workqueue, &object##_led_work); \ + } \ + static void object##_led_update(struct work_struct *ignored) \ + { \ + int value = object##_led_wk; \ + write_status(object##_set_handle, value, (mask)); \ + } + +ASUS_LED_HANDLER(mled, MLED_ON); +ASUS_LED_HANDLER(pled, PLED_ON); +ASUS_LED_HANDLER(rled, RLED_ON); +ASUS_LED_HANDLER(tled, TLED_ON); +ASUS_LED_HANDLER(gled, GLED_ON); + +static int get_lcd_state(void) +{ + return read_status(LCD_ON); +} + +static int set_lcd_state(int value) +{ + int lcd = 0; + acpi_status status = 0; + + lcd = value ? 1 : 0; + + if (lcd == get_lcd_state()) + return 0; + + if (lcd_switch_handle) { + status = acpi_evaluate_object(lcd_switch_handle, + NULL, NULL, NULL); + + if (ACPI_FAILURE(status)) + printk(ASUS_WARNING "Error switching LCD\n"); + } + + write_status(NULL, lcd, LCD_ON); + return 0; +} + +static void lcd_blank(int blank) +{ + struct backlight_device *bd = asus_backlight_device; + + if (bd) { + bd->props.power = blank; + backlight_update_status(bd); + } +} + +static int read_brightness(struct backlight_device *bd) +{ + unsigned long long value; + acpi_status rv = AE_OK; + + rv = acpi_evaluate_integer(brightness_get_handle, NULL, NULL, &value); + if (ACPI_FAILURE(rv)) + printk(ASUS_WARNING "Error reading brightness\n"); + + return value; +} + +static int set_brightness(struct backlight_device *bd, int value) +{ + int ret = 0; + + value = (0 < value) ? ((15 < value) ? 15 : value) : 0; + /* 0 <= value <= 15 */ + + if (write_acpi_int(brightness_set_handle, NULL, value, NULL)) { + printk(ASUS_WARNING "Error changing brightness\n"); + ret = -EIO; + } + + return ret; +} + +static int update_bl_status(struct backlight_device *bd) +{ + int rv; + int value = bd->props.brightness; + + rv = set_brightness(bd, value); + if (rv) + return rv; + + value = (bd->props.power == FB_BLANK_UNBLANK) ? 1 : 0; + return set_lcd_state(value); +} + +/* + * Platform device handlers + */ + +/* + * We write our info in page, we begin at offset off and cannot write more + * than count bytes. We set eof to 1 if we handle those 2 values. We return the + * number of bytes written in page + */ +static ssize_t show_infos(struct device *dev, + struct device_attribute *attr, char *page) +{ + int len = 0; + unsigned long long temp; + char buf[16]; //enough for all info + acpi_status rv = AE_OK; + + /* + * We use the easy way, we don't care of off and count, so we don't set eof + * to 1 + */ + + len += sprintf(page, ASUS_HOTK_NAME " " ASUS_LAPTOP_VERSION "\n"); + len += sprintf(page + len, "Model reference : %s\n", hotk->name); + /* + * The SFUN method probably allows the original driver to get the list + * of features supported by a given model. For now, 0x0100 or 0x0800 + * bit signifies that the laptop is equipped with a Wi-Fi MiniPCI card. + * The significance of others is yet to be found. + */ + rv = acpi_evaluate_integer(hotk->handle, "SFUN", NULL, &temp); + if (!ACPI_FAILURE(rv)) + len += sprintf(page + len, "SFUN value : 0x%04x\n", + (uint) temp); + /* + * Another value for userspace: the ASYM method returns 0x02 for + * battery low and 0x04 for battery critical, its readings tend to be + * more accurate than those provided by _BST. + * Note: since not all the laptops provide this method, errors are + * silently ignored. + */ + rv = acpi_evaluate_integer(hotk->handle, "ASYM", NULL, &temp); + if (!ACPI_FAILURE(rv)) + len += sprintf(page + len, "ASYM value : 0x%04x\n", + (uint) temp); + if (asus_info) { + snprintf(buf, 16, "%d", asus_info->length); + len += sprintf(page + len, "DSDT length : %s\n", buf); + snprintf(buf, 16, "%d", asus_info->checksum); + len += sprintf(page + len, "DSDT checksum : %s\n", buf); + snprintf(buf, 16, "%d", asus_info->revision); + len += sprintf(page + len, "DSDT revision : %s\n", buf); + snprintf(buf, 7, "%s", asus_info->oem_id); + len += sprintf(page + len, "OEM id : %s\n", buf); + snprintf(buf, 9, "%s", asus_info->oem_table_id); + len += sprintf(page + len, "OEM table id : %s\n", buf); + snprintf(buf, 16, "%x", asus_info->oem_revision); + len += sprintf(page + len, "OEM revision : 0x%s\n", buf); + snprintf(buf, 5, "%s", asus_info->asl_compiler_id); + len += sprintf(page + len, "ASL comp vendor id : %s\n", buf); + snprintf(buf, 16, "%x", asus_info->asl_compiler_revision); + len += sprintf(page + len, "ASL comp revision : 0x%s\n", buf); + } + + return len; +} + +static int parse_arg(const char *buf, unsigned long count, int *val) +{ + if (!count) + return 0; + if (count > 31) + return -EINVAL; + if (sscanf(buf, "%i", val) != 1) + return -EINVAL; + return count; +} + +static ssize_t store_status(const char *buf, size_t count, + acpi_handle handle, int mask) +{ + int rv, value; + int out = 0; + + rv = parse_arg(buf, count, &value); + if (rv > 0) + out = value ? 1 : 0; + + write_status(handle, out, mask); + + return rv; +} + +/* + * LEDD display + */ +static ssize_t show_ledd(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "0x%08x\n", hotk->ledd_status); +} + +static ssize_t store_ledd(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + int rv, value; + + rv = parse_arg(buf, count, &value); + if (rv > 0) { + if (write_acpi_int(ledd_set_handle, NULL, value, NULL)) + printk(ASUS_WARNING "LED display write failed\n"); + else + hotk->ledd_status = (u32) value; + } + return rv; +} + +/* + * WLAN + */ +static ssize_t show_wlan(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "%d\n", read_status(WL_ON)); +} + +static ssize_t store_wlan(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + return store_status(buf, count, wl_switch_handle, WL_ON); +} + +/* + * Bluetooth + */ +static ssize_t show_bluetooth(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "%d\n", read_status(BT_ON)); +} + +static ssize_t store_bluetooth(struct device *dev, + struct device_attribute *attr, const char *buf, + size_t count) +{ + return store_status(buf, count, bt_switch_handle, BT_ON); +} + +/* + * Display + */ +static void set_display(int value) +{ + /* no sanity check needed for now */ + if (write_acpi_int(display_set_handle, NULL, value, NULL)) + printk(ASUS_WARNING "Error setting display\n"); + return; +} + +static int read_display(void) +{ + unsigned long long value = 0; + acpi_status rv = AE_OK; + + /* In most of the case, we know how to set the display, but sometime + we can't read it */ + if (display_get_handle) { + rv = acpi_evaluate_integer(display_get_handle, NULL, + NULL, &value); + if (ACPI_FAILURE(rv)) + printk(ASUS_WARNING "Error reading display status\n"); + } + + value &= 0x0F; /* needed for some models, shouldn't hurt others */ + + return value; +} + +/* + * Now, *this* one could be more user-friendly, but so far, no-one has + * complained. The significance of bits is the same as in store_disp() + */ +static ssize_t show_disp(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "%d\n", read_display()); +} + +/* + * Experimental support for display switching. As of now: 1 should activate + * the LCD output, 2 should do for CRT, 4 for TV-Out and 8 for DVI. + * Any combination (bitwise) of these will suffice. I never actually tested 4 + * displays hooked up simultaneously, so be warned. See the acpi4asus README + * for more info. + */ +static ssize_t store_disp(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + int rv, value; + + rv = parse_arg(buf, count, &value); + if (rv > 0) + set_display(value); + return rv; +} + +/* + * Light Sens + */ +static void set_light_sens_switch(int value) +{ + if (write_acpi_int(ls_switch_handle, NULL, value, NULL)) + printk(ASUS_WARNING "Error setting light sensor switch\n"); + hotk->light_switch = value; +} + +static ssize_t show_lssw(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "%d\n", hotk->light_switch); +} + +static ssize_t store_lssw(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + int rv, value; + + rv = parse_arg(buf, count, &value); + if (rv > 0) + set_light_sens_switch(value ? 1 : 0); + + return rv; +} + +static void set_light_sens_level(int value) +{ + if (write_acpi_int(ls_level_handle, NULL, value, NULL)) + printk(ASUS_WARNING "Error setting light sensor level\n"); + hotk->light_level = value; +} + +static ssize_t show_lslvl(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "%d\n", hotk->light_level); +} + +static ssize_t store_lslvl(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + int rv, value; + + rv = parse_arg(buf, count, &value); + if (rv > 0) { + value = (0 < value) ? ((15 < value) ? 15 : value) : 0; + /* 0 <= value <= 15 */ + set_light_sens_level(value); + } + + return rv; +} + +/* + * GPS + */ +static ssize_t show_gps(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "%d\n", read_status(GPS_ON)); +} + +static ssize_t store_gps(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + return store_status(buf, count, NULL, GPS_ON); +} + +static void asus_hotk_notify(acpi_handle handle, u32 event, void *data) +{ + /* TODO Find a better way to handle events count. */ + if (!hotk) + return; + + /* + * We need to tell the backlight device when the backlight power is + * switched + */ + if (event == ATKD_LCD_ON) { + write_status(NULL, 1, LCD_ON); + lcd_blank(FB_BLANK_UNBLANK); + } else if (event == ATKD_LCD_OFF) { + write_status(NULL, 0, LCD_ON); + lcd_blank(FB_BLANK_POWERDOWN); + } + + acpi_bus_generate_proc_event(hotk->device, event, + hotk->event_count[event % 128]++); + + return; +} + +#define ASUS_CREATE_DEVICE_ATTR(_name) \ + struct device_attribute dev_attr_##_name = { \ + .attr = { \ + .name = __stringify(_name), \ + .mode = 0 }, \ + .show = NULL, \ + .store = NULL, \ + } + +#define ASUS_SET_DEVICE_ATTR(_name, _mode, _show, _store) \ + do { \ + dev_attr_##_name.attr.mode = _mode; \ + dev_attr_##_name.show = _show; \ + dev_attr_##_name.store = _store; \ + } while(0) + +static ASUS_CREATE_DEVICE_ATTR(infos); +static ASUS_CREATE_DEVICE_ATTR(wlan); +static ASUS_CREATE_DEVICE_ATTR(bluetooth); +static ASUS_CREATE_DEVICE_ATTR(display); +static ASUS_CREATE_DEVICE_ATTR(ledd); +static ASUS_CREATE_DEVICE_ATTR(ls_switch); +static ASUS_CREATE_DEVICE_ATTR(ls_level); +static ASUS_CREATE_DEVICE_ATTR(gps); + +static struct attribute *asuspf_attributes[] = { + &dev_attr_infos.attr, + &dev_attr_wlan.attr, + &dev_attr_bluetooth.attr, + &dev_attr_display.attr, + &dev_attr_ledd.attr, + &dev_attr_ls_switch.attr, + &dev_attr_ls_level.attr, + &dev_attr_gps.attr, + NULL +}; + +static struct attribute_group asuspf_attribute_group = { + .attrs = asuspf_attributes +}; + +static struct platform_driver asuspf_driver = { + .driver = { + .name = ASUS_HOTK_FILE, + .owner = THIS_MODULE, + } +}; + +static struct platform_device *asuspf_device; + +static void asus_hotk_add_fs(void) +{ + ASUS_SET_DEVICE_ATTR(infos, 0444, show_infos, NULL); + + if (wl_switch_handle) + ASUS_SET_DEVICE_ATTR(wlan, 0644, show_wlan, store_wlan); + + if (bt_switch_handle) + ASUS_SET_DEVICE_ATTR(bluetooth, 0644, + show_bluetooth, store_bluetooth); + + if (display_set_handle && display_get_handle) + ASUS_SET_DEVICE_ATTR(display, 0644, show_disp, store_disp); + else if (display_set_handle) + ASUS_SET_DEVICE_ATTR(display, 0200, NULL, store_disp); + + if (ledd_set_handle) + ASUS_SET_DEVICE_ATTR(ledd, 0644, show_ledd, store_ledd); + + if (ls_switch_handle && ls_level_handle) { + ASUS_SET_DEVICE_ATTR(ls_level, 0644, show_lslvl, store_lslvl); + ASUS_SET_DEVICE_ATTR(ls_switch, 0644, show_lssw, store_lssw); + } + + if (gps_status_handle && gps_on_handle && gps_off_handle) + ASUS_SET_DEVICE_ATTR(gps, 0644, show_gps, store_gps); +} + +static int asus_handle_init(char *name, acpi_handle * handle, + char **paths, int num_paths) +{ + int i; + acpi_status status; + + for (i = 0; i < num_paths; i++) { + status = acpi_get_handle(NULL, paths[i], handle); + if (ACPI_SUCCESS(status)) + return 0; + } + + *handle = NULL; + return -ENODEV; +} + +#define ASUS_HANDLE_INIT(object) \ + asus_handle_init(#object, &object##_handle, object##_paths, \ + ARRAY_SIZE(object##_paths)) + +/* + * This function is used to initialize the hotk with right values. In this + * method, we can make all the detection we want, and modify the hotk struct + */ +static int asus_hotk_get_info(void) +{ + struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; + union acpi_object *model = NULL; + unsigned long long bsts_result, hwrs_result; + char *string = NULL; + acpi_status status; + + /* + * Get DSDT headers early enough to allow for differentiating between + * models, but late enough to allow acpi_bus_register_driver() to fail + * before doing anything ACPI-specific. Should we encounter a machine, + * which needs special handling (i.e. its hotkey device has a different + * HID), this bit will be moved. A global variable asus_info contains + * the DSDT header. + */ + status = acpi_get_table(ACPI_SIG_DSDT, 1, &asus_info); + if (ACPI_FAILURE(status)) + printk(ASUS_WARNING "Couldn't get the DSDT table header\n"); + + /* We have to write 0 on init this far for all ASUS models */ + if (write_acpi_int(hotk->handle, "INIT", 0, &buffer)) { + printk(ASUS_ERR "Hotkey initialization failed\n"); + return -ENODEV; + } + + /* This needs to be called for some laptops to init properly */ + status = + acpi_evaluate_integer(hotk->handle, "BSTS", NULL, &bsts_result); + if (ACPI_FAILURE(status)) + printk(ASUS_WARNING "Error calling BSTS\n"); + else if (bsts_result) + printk(ASUS_NOTICE "BSTS called, 0x%02x returned\n", + (uint) bsts_result); + + /* This too ... */ + write_acpi_int(hotk->handle, "CWAP", wapf, NULL); + + /* + * Try to match the object returned by INIT to the specific model. + * Handle every possible object (or the lack of thereof) the DSDT + * writers might throw at us. When in trouble, we pass NULL to + * asus_model_match() and try something completely different. + */ + if (buffer.pointer) { + model = buffer.pointer; + switch (model->type) { + case ACPI_TYPE_STRING: + string = model->string.pointer; + break; + case ACPI_TYPE_BUFFER: + string = model->buffer.pointer; + break; + default: + string = ""; + break; + } + } + hotk->name = kstrdup(string, GFP_KERNEL); + if (!hotk->name) + return -ENOMEM; + + if (*string) + printk(ASUS_NOTICE " %s model detected\n", string); + + ASUS_HANDLE_INIT(mled_set); + ASUS_HANDLE_INIT(tled_set); + ASUS_HANDLE_INIT(rled_set); + ASUS_HANDLE_INIT(pled_set); + ASUS_HANDLE_INIT(gled_set); + + ASUS_HANDLE_INIT(ledd_set); + + /* + * The HWRS method return informations about the hardware. + * 0x80 bit is for WLAN, 0x100 for Bluetooth. + * The significance of others is yet to be found. + * If we don't find the method, we assume the device are present. + */ + status = + acpi_evaluate_integer(hotk->handle, "HRWS", NULL, &hwrs_result); + if (ACPI_FAILURE(status)) + hwrs_result = WL_HWRS | BT_HWRS; + + if (hwrs_result & WL_HWRS) + ASUS_HANDLE_INIT(wl_switch); + if (hwrs_result & BT_HWRS) + ASUS_HANDLE_INIT(bt_switch); + + ASUS_HANDLE_INIT(wireless_status); + + ASUS_HANDLE_INIT(brightness_set); + ASUS_HANDLE_INIT(brightness_get); + + ASUS_HANDLE_INIT(lcd_switch); + + ASUS_HANDLE_INIT(display_set); + ASUS_HANDLE_INIT(display_get); + + /* There is a lot of models with "ALSL", but a few get + a real light sens, so we need to check it. */ + if (!ASUS_HANDLE_INIT(ls_switch)) + ASUS_HANDLE_INIT(ls_level); + + ASUS_HANDLE_INIT(gps_on); + ASUS_HANDLE_INIT(gps_off); + ASUS_HANDLE_INIT(gps_status); + + kfree(model); + + return AE_OK; +} + +static int asus_hotk_check(void) +{ + int result = 0; + + result = acpi_bus_get_status(hotk->device); + if (result) + return result; + + if (hotk->device->status.present) { + result = asus_hotk_get_info(); + } else { + printk(ASUS_ERR "Hotkey device not present, aborting\n"); + return -EINVAL; + } + + return result; +} + +static int asus_hotk_found; + +static int asus_hotk_add(struct acpi_device *device) +{ + acpi_status status = AE_OK; + int result; + + if (!device) + return -EINVAL; + + printk(ASUS_NOTICE "Asus Laptop Support version %s\n", + ASUS_LAPTOP_VERSION); + + hotk = kzalloc(sizeof(struct asus_hotk), GFP_KERNEL); + if (!hotk) + return -ENOMEM; + + hotk->handle = device->handle; + strcpy(acpi_device_name(device), ASUS_HOTK_DEVICE_NAME); + strcpy(acpi_device_class(device), ASUS_HOTK_CLASS); + device->driver_data = hotk; + hotk->device = device; + + result = asus_hotk_check(); + if (result) + goto end; + + asus_hotk_add_fs(); + + /* + * We install the handler, it will receive the hotk in parameter, so, we + * could add other data to the hotk struct + */ + status = acpi_install_notify_handler(hotk->handle, ACPI_ALL_NOTIFY, + asus_hotk_notify, hotk); + if (ACPI_FAILURE(status)) + printk(ASUS_ERR "Error installing notify handler\n"); + + asus_hotk_found = 1; + + /* WLED and BLED are on by default */ + write_status(bt_switch_handle, 1, BT_ON); + write_status(wl_switch_handle, 1, WL_ON); + + /* If the h/w switch is off, we need to check the real status */ + write_status(NULL, read_status(BT_ON), BT_ON); + write_status(NULL, read_status(WL_ON), WL_ON); + + /* LCD Backlight is on by default */ + write_status(NULL, 1, LCD_ON); + + /* LED display is off by default */ + hotk->ledd_status = 0xFFF; + + /* Set initial values of light sensor and level */ + hotk->light_switch = 1; /* Default to light sensor disabled */ + hotk->light_level = 0; /* level 5 for sensor sensitivity */ + + if (ls_switch_handle) + set_light_sens_switch(hotk->light_switch); + + if (ls_level_handle) + set_light_sens_level(hotk->light_level); + + /* GPS is on by default */ + write_status(NULL, 1, GPS_ON); + + end: + if (result) { + kfree(hotk->name); + kfree(hotk); + } + + return result; +} + +static int asus_hotk_remove(struct acpi_device *device, int type) +{ + acpi_status status = 0; + + if (!device || !acpi_driver_data(device)) + return -EINVAL; + + status = acpi_remove_notify_handler(hotk->handle, ACPI_ALL_NOTIFY, + asus_hotk_notify); + if (ACPI_FAILURE(status)) + printk(ASUS_ERR "Error removing notify handler\n"); + + kfree(hotk->name); + kfree(hotk); + + return 0; +} + +static void asus_backlight_exit(void) +{ + if (asus_backlight_device) + backlight_device_unregister(asus_backlight_device); +} + +#define ASUS_LED_UNREGISTER(object) \ + if (object##_led.dev) \ + led_classdev_unregister(&object##_led) + +static void asus_led_exit(void) +{ + destroy_workqueue(led_workqueue); + ASUS_LED_UNREGISTER(mled); + ASUS_LED_UNREGISTER(tled); + ASUS_LED_UNREGISTER(pled); + ASUS_LED_UNREGISTER(rled); + ASUS_LED_UNREGISTER(gled); +} + +static void __exit asus_laptop_exit(void) +{ + asus_backlight_exit(); + asus_led_exit(); + + acpi_bus_unregister_driver(&asus_hotk_driver); + sysfs_remove_group(&asuspf_device->dev.kobj, &asuspf_attribute_group); + platform_device_unregister(asuspf_device); + platform_driver_unregister(&asuspf_driver); +} + +static int asus_backlight_init(struct device *dev) +{ + struct backlight_device *bd; + + if (brightness_set_handle && lcd_switch_handle) { + bd = backlight_device_register(ASUS_HOTK_FILE, dev, + NULL, &asusbl_ops); + if (IS_ERR(bd)) { + printk(ASUS_ERR + "Could not register asus backlight device\n"); + asus_backlight_device = NULL; + return PTR_ERR(bd); + } + + asus_backlight_device = bd; + + bd->props.max_brightness = 15; + bd->props.brightness = read_brightness(NULL); + bd->props.power = FB_BLANK_UNBLANK; + backlight_update_status(bd); + } + return 0; +} + +static int asus_led_register(acpi_handle handle, + struct led_classdev *ldev, struct device *dev) +{ + if (!handle) + return 0; + + return led_classdev_register(dev, ldev); +} + +#define ASUS_LED_REGISTER(object, device) \ + asus_led_register(object##_set_handle, &object##_led, device) + +static int asus_led_init(struct device *dev) +{ + int rv; + + rv = ASUS_LED_REGISTER(mled, dev); + if (rv) + goto out; + + rv = ASUS_LED_REGISTER(tled, dev); + if (rv) + goto out1; + + rv = ASUS_LED_REGISTER(rled, dev); + if (rv) + goto out2; + + rv = ASUS_LED_REGISTER(pled, dev); + if (rv) + goto out3; + + rv = ASUS_LED_REGISTER(gled, dev); + if (rv) + goto out4; + + led_workqueue = create_singlethread_workqueue("led_workqueue"); + if (!led_workqueue) + goto out5; + + return 0; +out5: + rv = -ENOMEM; + ASUS_LED_UNREGISTER(gled); +out4: + ASUS_LED_UNREGISTER(pled); +out3: + ASUS_LED_UNREGISTER(rled); +out2: + ASUS_LED_UNREGISTER(tled); +out1: + ASUS_LED_UNREGISTER(mled); +out: + return rv; +} + +static int __init asus_laptop_init(void) +{ + struct device *dev; + int result; + + if (acpi_disabled) + return -ENODEV; + + result = acpi_bus_register_driver(&asus_hotk_driver); + if (result < 0) + return result; + + /* + * This is a bit of a kludge. We only want this module loaded + * for ASUS systems, but there's currently no way to probe the + * ACPI namespace for ASUS HIDs. So we just return failure if + * we didn't find one, which will cause the module to be + * unloaded. + */ + if (!asus_hotk_found) { + acpi_bus_unregister_driver(&asus_hotk_driver); + return -ENODEV; + } + + dev = acpi_get_physical_device(hotk->device->handle); + + if (!acpi_video_backlight_support()) { + result = asus_backlight_init(dev); + if (result) + goto fail_backlight; + } else + printk(ASUS_INFO "Brightness ignored, must be controlled by " + "ACPI video driver\n"); + + result = asus_led_init(dev); + if (result) + goto fail_led; + + /* Register platform stuff */ + result = platform_driver_register(&asuspf_driver); + if (result) + goto fail_platform_driver; + + asuspf_device = platform_device_alloc(ASUS_HOTK_FILE, -1); + if (!asuspf_device) { + result = -ENOMEM; + goto fail_platform_device1; + } + + result = platform_device_add(asuspf_device); + if (result) + goto fail_platform_device2; + + result = sysfs_create_group(&asuspf_device->dev.kobj, + &asuspf_attribute_group); + if (result) + goto fail_sysfs; + + return 0; + + fail_sysfs: + platform_device_del(asuspf_device); + + fail_platform_device2: + platform_device_put(asuspf_device); + + fail_platform_device1: + platform_driver_unregister(&asuspf_driver); + + fail_platform_driver: + asus_led_exit(); + + fail_led: + asus_backlight_exit(); + + fail_backlight: + + return result; +} + +module_init(asus_laptop_init); +module_exit(asus_laptop_exit); diff --git a/drivers/misc/atmel-ssc.c b/drivers/misc/atmel-ssc.c new file mode 100644 index 0000000..bf5e4d0 --- /dev/null +++ b/drivers/misc/atmel-ssc.c @@ -0,0 +1,175 @@ +/* + * Atmel SSC driver + * + * Copyright (C) 2007 Atmel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/platform_device.h> +#include <linux/list.h> +#include <linux/clk.h> +#include <linux/err.h> +#include <linux/io.h> +#include <linux/spinlock.h> +#include <linux/atmel-ssc.h> + +/* Serialize access to ssc_list and user count */ +static DEFINE_SPINLOCK(user_lock); +static LIST_HEAD(ssc_list); + +struct ssc_device *ssc_request(unsigned int ssc_num) +{ + int ssc_valid = 0; + struct ssc_device *ssc; + + spin_lock(&user_lock); + list_for_each_entry(ssc, &ssc_list, list) { + if (ssc->pdev->id == ssc_num) { + ssc_valid = 1; + break; + } + } + + if (!ssc_valid) { + spin_unlock(&user_lock); + dev_dbg(&ssc->pdev->dev, "could not find requested device\n"); + return ERR_PTR(-ENODEV); + } + + if (ssc->user) { + spin_unlock(&user_lock); + dev_dbg(&ssc->pdev->dev, "module busy\n"); + return ERR_PTR(-EBUSY); + } + ssc->user++; + spin_unlock(&user_lock); + + clk_enable(ssc->clk); + + return ssc; +} +EXPORT_SYMBOL(ssc_request); + +void ssc_free(struct ssc_device *ssc) +{ + spin_lock(&user_lock); + if (ssc->user) { + ssc->user--; + clk_disable(ssc->clk); + } else { + dev_dbg(&ssc->pdev->dev, "device already free\n"); + } + spin_unlock(&user_lock); +} +EXPORT_SYMBOL(ssc_free); + +static int __init ssc_probe(struct platform_device *pdev) +{ + int retval = 0; + struct resource *regs; + struct ssc_device *ssc; + + ssc = kzalloc(sizeof(struct ssc_device), GFP_KERNEL); + if (!ssc) { + dev_dbg(&pdev->dev, "out of memory\n"); + retval = -ENOMEM; + goto out; + } + + regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!regs) { + dev_dbg(&pdev->dev, "no mmio resource defined\n"); + retval = -ENXIO; + goto out_free; + } + + ssc->clk = clk_get(&pdev->dev, "pclk"); + if (IS_ERR(ssc->clk)) { + dev_dbg(&pdev->dev, "no pclk clock defined\n"); + retval = -ENXIO; + goto out_free; + } + + ssc->pdev = pdev; + ssc->regs = ioremap(regs->start, regs->end - regs->start + 1); + if (!ssc->regs) { + dev_dbg(&pdev->dev, "ioremap failed\n"); + retval = -EINVAL; + goto out_clk; + } + + /* disable all interrupts */ + clk_enable(ssc->clk); + ssc_writel(ssc->regs, IDR, ~0UL); + ssc_readl(ssc->regs, SR); + clk_disable(ssc->clk); + + ssc->irq = platform_get_irq(pdev, 0); + if (!ssc->irq) { + dev_dbg(&pdev->dev, "could not get irq\n"); + retval = -ENXIO; + goto out_unmap; + } + + spin_lock(&user_lock); + list_add_tail(&ssc->list, &ssc_list); + spin_unlock(&user_lock); + + platform_set_drvdata(pdev, ssc); + + dev_info(&pdev->dev, "Atmel SSC device at 0x%p (irq %d)\n", + ssc->regs, ssc->irq); + + goto out; + +out_unmap: + iounmap(ssc->regs); +out_clk: + clk_put(ssc->clk); +out_free: + kfree(ssc); +out: + return retval; +} + +static int __devexit ssc_remove(struct platform_device *pdev) +{ + struct ssc_device *ssc = platform_get_drvdata(pdev); + + spin_lock(&user_lock); + iounmap(ssc->regs); + clk_put(ssc->clk); + list_del(&ssc->list); + kfree(ssc); + spin_unlock(&user_lock); + + return 0; +} + +static struct platform_driver ssc_driver = { + .remove = __devexit_p(ssc_remove), + .driver = { + .name = "ssc", + .owner = THIS_MODULE, + }, +}; + +static int __init ssc_init(void) +{ + return platform_driver_probe(&ssc_driver, ssc_probe); +} +module_init(ssc_init); + +static void __exit ssc_exit(void) +{ + platform_driver_unregister(&ssc_driver); +} +module_exit(ssc_exit); + +MODULE_AUTHOR("Hans-Christian Egtvedt <hcegtvedt@atmel.com>"); +MODULE_DESCRIPTION("SSC driver for Atmel AVR32 and AT91"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:ssc"); diff --git a/drivers/misc/atmel_pwm.c b/drivers/misc/atmel_pwm.c new file mode 100644 index 0000000..6aa5294 --- /dev/null +++ b/drivers/misc/atmel_pwm.c @@ -0,0 +1,409 @@ +#include <linux/module.h> +#include <linux/clk.h> +#include <linux/err.h> +#include <linux/io.h> +#include <linux/interrupt.h> +#include <linux/platform_device.h> +#include <linux/atmel_pwm.h> + + +/* + * This is a simple driver for the PWM controller found in various newer + * Atmel SOCs, including the AVR32 series and the AT91sam9263. + * + * Chips with current Linux ports have only 4 PWM channels, out of max 32. + * AT32UC3A and AT32UC3B chips have 7 channels (but currently no Linux). + * Docs are inconsistent about the width of the channel counter registers; + * it's at least 16 bits, but several places say 20 bits. + */ +#define PWM_NCHAN 4 /* max 32 */ + +struct pwm { + spinlock_t lock; + struct platform_device *pdev; + u32 mask; + int irq; + void __iomem *base; + struct clk *clk; + struct pwm_channel *channel[PWM_NCHAN]; + void (*handler[PWM_NCHAN])(struct pwm_channel *); +}; + + +/* global PWM controller registers */ +#define PWM_MR 0x00 +#define PWM_ENA 0x04 +#define PWM_DIS 0x08 +#define PWM_SR 0x0c +#define PWM_IER 0x10 +#define PWM_IDR 0x14 +#define PWM_IMR 0x18 +#define PWM_ISR 0x1c + +static inline void pwm_writel(const struct pwm *p, unsigned offset, u32 val) +{ + __raw_writel(val, p->base + offset); +} + +static inline u32 pwm_readl(const struct pwm *p, unsigned offset) +{ + return __raw_readl(p->base + offset); +} + +static inline void __iomem *pwmc_regs(const struct pwm *p, int index) +{ + return p->base + 0x200 + index * 0x20; +} + +static struct pwm *pwm; + +static void pwm_dumpregs(struct pwm_channel *ch, char *tag) +{ + struct device *dev = &pwm->pdev->dev; + + dev_dbg(dev, "%s: mr %08x, sr %08x, imr %08x\n", + tag, + pwm_readl(pwm, PWM_MR), + pwm_readl(pwm, PWM_SR), + pwm_readl(pwm, PWM_IMR)); + dev_dbg(dev, + "pwm ch%d - mr %08x, dty %u, prd %u, cnt %u\n", + ch->index, + pwm_channel_readl(ch, PWM_CMR), + pwm_channel_readl(ch, PWM_CDTY), + pwm_channel_readl(ch, PWM_CPRD), + pwm_channel_readl(ch, PWM_CCNT)); +} + + +/** + * pwm_channel_alloc - allocate an unused PWM channel + * @index: identifies the channel + * @ch: structure to be initialized + * + * Drivers allocate PWM channels according to the board's wiring, and + * matching board-specific setup code. Returns zero or negative errno. + */ +int pwm_channel_alloc(int index, struct pwm_channel *ch) +{ + unsigned long flags; + int status = 0; + + /* insist on PWM init, with this signal pinned out */ + if (!pwm || !(pwm->mask & 1 << index)) + return -ENODEV; + + if (index < 0 || index >= PWM_NCHAN || !ch) + return -EINVAL; + memset(ch, 0, sizeof *ch); + + spin_lock_irqsave(&pwm->lock, flags); + if (pwm->channel[index]) + status = -EBUSY; + else { + clk_enable(pwm->clk); + + ch->regs = pwmc_regs(pwm, index); + ch->index = index; + + /* REVISIT: ap7000 seems to go 2x as fast as we expect!! */ + ch->mck = clk_get_rate(pwm->clk); + + pwm->channel[index] = ch; + pwm->handler[index] = NULL; + + /* channel and irq are always disabled when we return */ + pwm_writel(pwm, PWM_DIS, 1 << index); + pwm_writel(pwm, PWM_IDR, 1 << index); + } + spin_unlock_irqrestore(&pwm->lock, flags); + return status; +} +EXPORT_SYMBOL(pwm_channel_alloc); + +static int pwmcheck(struct pwm_channel *ch) +{ + int index; + + if (!pwm) + return -ENODEV; + if (!ch) + return -EINVAL; + index = ch->index; + if (index < 0 || index >= PWM_NCHAN || pwm->channel[index] != ch) + return -EINVAL; + + return index; +} + +/** + * pwm_channel_free - release a previously allocated channel + * @ch: the channel being released + * + * The channel is completely shut down (counter and IRQ disabled), + * and made available for re-use. Returns zero, or negative errno. + */ +int pwm_channel_free(struct pwm_channel *ch) +{ + unsigned long flags; + int t; + + spin_lock_irqsave(&pwm->lock, flags); + t = pwmcheck(ch); + if (t >= 0) { + pwm->channel[t] = NULL; + pwm->handler[t] = NULL; + + /* channel and irq are always disabled when we return */ + pwm_writel(pwm, PWM_DIS, 1 << t); + pwm_writel(pwm, PWM_IDR, 1 << t); + + clk_disable(pwm->clk); + t = 0; + } + spin_unlock_irqrestore(&pwm->lock, flags); + return t; +} +EXPORT_SYMBOL(pwm_channel_free); + +int __pwm_channel_onoff(struct pwm_channel *ch, int enabled) +{ + unsigned long flags; + int t; + + /* OMITTED FUNCTIONALITY: starting several channels in synch */ + + spin_lock_irqsave(&pwm->lock, flags); + t = pwmcheck(ch); + if (t >= 0) { + pwm_writel(pwm, enabled ? PWM_ENA : PWM_DIS, 1 << t); + t = 0; + pwm_dumpregs(ch, enabled ? "enable" : "disable"); + } + spin_unlock_irqrestore(&pwm->lock, flags); + + return t; +} +EXPORT_SYMBOL(__pwm_channel_onoff); + +/** + * pwm_clk_alloc - allocate and configure CLKA or CLKB + * @prescale: from 0..10, the power of two used to divide MCK + * @div: from 1..255, the linear divisor to use + * + * Returns PWM_CPR_CLKA, PWM_CPR_CLKB, or negative errno. The allocated + * clock will run with a period of (2^prescale * div) / MCK, or twice as + * long if center aligned PWM output is used. The clock must later be + * deconfigured using pwm_clk_free(). + */ +int pwm_clk_alloc(unsigned prescale, unsigned div) +{ + unsigned long flags; + u32 mr; + u32 val = (prescale << 8) | div; + int ret = -EBUSY; + + if (prescale >= 10 || div == 0 || div > 255) + return -EINVAL; + + spin_lock_irqsave(&pwm->lock, flags); + mr = pwm_readl(pwm, PWM_MR); + if ((mr & 0xffff) == 0) { + mr |= val; + ret = PWM_CPR_CLKA; + } else if ((mr & (0xffff << 16)) == 0) { + mr |= val << 16; + ret = PWM_CPR_CLKB; + } + if (ret > 0) + pwm_writel(pwm, PWM_MR, mr); + spin_unlock_irqrestore(&pwm->lock, flags); + return ret; +} +EXPORT_SYMBOL(pwm_clk_alloc); + +/** + * pwm_clk_free - deconfigure and release CLKA or CLKB + * + * Reverses the effect of pwm_clk_alloc(). + */ +void pwm_clk_free(unsigned clk) +{ + unsigned long flags; + u32 mr; + + spin_lock_irqsave(&pwm->lock, flags); + mr = pwm_readl(pwm, PWM_MR); + if (clk == PWM_CPR_CLKA) + pwm_writel(pwm, PWM_MR, mr & ~(0xffff << 0)); + if (clk == PWM_CPR_CLKB) + pwm_writel(pwm, PWM_MR, mr & ~(0xffff << 16)); + spin_unlock_irqrestore(&pwm->lock, flags); +} +EXPORT_SYMBOL(pwm_clk_free); + +/** + * pwm_channel_handler - manage channel's IRQ handler + * @ch: the channel + * @handler: the handler to use, possibly NULL + * + * If the handler is non-null, the handler will be called after every + * period of this PWM channel. If the handler is null, this channel + * won't generate an IRQ. + */ +int pwm_channel_handler(struct pwm_channel *ch, + void (*handler)(struct pwm_channel *ch)) +{ + unsigned long flags; + int t; + + spin_lock_irqsave(&pwm->lock, flags); + t = pwmcheck(ch); + if (t >= 0) { + pwm->handler[t] = handler; + pwm_writel(pwm, handler ? PWM_IER : PWM_IDR, 1 << t); + t = 0; + } + spin_unlock_irqrestore(&pwm->lock, flags); + + return t; +} +EXPORT_SYMBOL(pwm_channel_handler); + +static irqreturn_t pwm_irq(int id, void *_pwm) +{ + struct pwm *p = _pwm; + irqreturn_t handled = IRQ_NONE; + u32 irqstat; + int index; + + spin_lock(&p->lock); + + /* ack irqs, then handle them */ + irqstat = pwm_readl(pwm, PWM_ISR); + + while (irqstat) { + struct pwm_channel *ch; + void (*handler)(struct pwm_channel *ch); + + index = ffs(irqstat) - 1; + irqstat &= ~(1 << index); + ch = pwm->channel[index]; + handler = pwm->handler[index]; + if (handler && ch) { + spin_unlock(&p->lock); + handler(ch); + spin_lock(&p->lock); + handled = IRQ_HANDLED; + } + } + + spin_unlock(&p->lock); + return handled; +} + +static int __init pwm_probe(struct platform_device *pdev) +{ + struct resource *r = platform_get_resource(pdev, IORESOURCE_MEM, 0); + int irq = platform_get_irq(pdev, 0); + u32 *mp = pdev->dev.platform_data; + struct pwm *p; + int status = -EIO; + + if (pwm) + return -EBUSY; + if (!r || irq < 0 || !mp || !*mp) + return -ENODEV; + if (*mp & ~((1<<PWM_NCHAN)-1)) { + dev_warn(&pdev->dev, "mask 0x%x ... more than %d channels\n", + *mp, PWM_NCHAN); + return -EINVAL; + } + + p = kzalloc(sizeof(*p), GFP_KERNEL); + if (!p) + return -ENOMEM; + + spin_lock_init(&p->lock); + p->pdev = pdev; + p->mask = *mp; + p->irq = irq; + p->base = ioremap(r->start, r->end - r->start + 1); + if (!p->base) + goto fail; + p->clk = clk_get(&pdev->dev, "pwm_clk"); + if (IS_ERR(p->clk)) { + status = PTR_ERR(p->clk); + p->clk = NULL; + goto fail; + } + + status = request_irq(irq, pwm_irq, 0, pdev->name, p); + if (status < 0) + goto fail; + + pwm = p; + platform_set_drvdata(pdev, p); + + return 0; + +fail: + if (p->clk) + clk_put(p->clk); + if (p->base) + iounmap(p->base); + + kfree(p); + return status; +} + +static int __exit pwm_remove(struct platform_device *pdev) +{ + struct pwm *p = platform_get_drvdata(pdev); + + if (p != pwm) + return -EINVAL; + + clk_enable(pwm->clk); + pwm_writel(pwm, PWM_DIS, (1 << PWM_NCHAN) - 1); + pwm_writel(pwm, PWM_IDR, (1 << PWM_NCHAN) - 1); + clk_disable(pwm->clk); + + pwm = NULL; + + free_irq(p->irq, p); + clk_put(p->clk); + iounmap(p->base); + kfree(p); + + return 0; +} + +static struct platform_driver atmel_pwm_driver = { + .driver = { + .name = "atmel_pwm", + .owner = THIS_MODULE, + }, + .remove = __exit_p(pwm_remove), + + /* NOTE: PWM can keep running in AVR32 "idle" and "frozen" states; + * and all AT91sam9263 states, albeit at reduced clock rate if + * MCK becomes the slow clock (i.e. what Linux labels STR). + */ +}; + +static int __init pwm_init(void) +{ + return platform_driver_probe(&atmel_pwm_driver, pwm_probe); +} +module_init(pwm_init); + +static void __exit pwm_exit(void) +{ + platform_driver_unregister(&atmel_pwm_driver); +} +module_exit(pwm_exit); + +MODULE_DESCRIPTION("Driver for AT32/AT91 PWM module"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:atmel_pwm"); diff --git a/drivers/misc/atmel_tclib.c b/drivers/misc/atmel_tclib.c new file mode 100644 index 0000000..05dc8a3 --- /dev/null +++ b/drivers/misc/atmel_tclib.c @@ -0,0 +1,161 @@ +#include <linux/atmel_tc.h> +#include <linux/clk.h> +#include <linux/err.h> +#include <linux/init.h> +#include <linux/io.h> +#include <linux/ioport.h> +#include <linux/kernel.h> +#include <linux/platform_device.h> + +/* Number of bytes to reserve for the iomem resource */ +#define ATMEL_TC_IOMEM_SIZE 256 + + +/* + * This is a thin library to solve the problem of how to portably allocate + * one of the TC blocks. For simplicity, it doesn't currently expect to + * share individual timers between different drivers. + */ + +#if defined(CONFIG_AVR32) +/* AVR32 has these divide PBB */ +const u8 atmel_tc_divisors[5] = { 0, 4, 8, 16, 32, }; +EXPORT_SYMBOL(atmel_tc_divisors); + +#elif defined(CONFIG_ARCH_AT91) +/* AT91 has these divide MCK */ +const u8 atmel_tc_divisors[5] = { 2, 8, 32, 128, 0, }; +EXPORT_SYMBOL(atmel_tc_divisors); + +#endif + +static DEFINE_SPINLOCK(tc_list_lock); +static LIST_HEAD(tc_list); + +/** + * atmel_tc_alloc - allocate a specified TC block + * @block: which block to allocate + * @name: name to be associated with the iomem resource + * + * Caller allocates a block. If it is available, a pointer to a + * pre-initialized struct atmel_tc is returned. The caller can access + * the registers directly through the "regs" field. + */ +struct atmel_tc *atmel_tc_alloc(unsigned block, const char *name) +{ + struct atmel_tc *tc; + struct platform_device *pdev = NULL; + struct resource *r; + + spin_lock(&tc_list_lock); + list_for_each_entry(tc, &tc_list, node) { + if (tc->pdev->id == block) { + pdev = tc->pdev; + break; + } + } + + if (!pdev || tc->iomem) + goto fail; + + r = platform_get_resource(pdev, IORESOURCE_MEM, 0); + r = request_mem_region(r->start, ATMEL_TC_IOMEM_SIZE, name); + if (!r) + goto fail; + + tc->regs = ioremap(r->start, ATMEL_TC_IOMEM_SIZE); + if (!tc->regs) + goto fail_ioremap; + + tc->iomem = r; + +out: + spin_unlock(&tc_list_lock); + return tc; + +fail_ioremap: + release_resource(r); +fail: + tc = NULL; + goto out; +} +EXPORT_SYMBOL_GPL(atmel_tc_alloc); + +/** + * atmel_tc_free - release a specified TC block + * @tc: Timer/counter block that was returned by atmel_tc_alloc() + * + * This reverses the effect of atmel_tc_alloc(), unmapping the I/O + * registers, invalidating the resource returned by that routine and + * making the TC available to other drivers. + */ +void atmel_tc_free(struct atmel_tc *tc) +{ + spin_lock(&tc_list_lock); + if (tc->regs) { + iounmap(tc->regs); + release_resource(tc->iomem); + tc->regs = NULL; + tc->iomem = NULL; + } + spin_unlock(&tc_list_lock); +} +EXPORT_SYMBOL_GPL(atmel_tc_free); + +static int __init tc_probe(struct platform_device *pdev) +{ + struct atmel_tc *tc; + struct clk *clk; + int irq; + + if (!platform_get_resource(pdev, IORESOURCE_MEM, 0)) + return -EINVAL; + + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return -EINVAL; + + tc = kzalloc(sizeof(struct atmel_tc), GFP_KERNEL); + if (!tc) + return -ENOMEM; + + tc->pdev = pdev; + + clk = clk_get(&pdev->dev, "t0_clk"); + if (IS_ERR(clk)) { + kfree(tc); + return -EINVAL; + } + + tc->clk[0] = clk; + tc->clk[1] = clk_get(&pdev->dev, "t1_clk"); + if (IS_ERR(tc->clk[1])) + tc->clk[1] = clk; + tc->clk[2] = clk_get(&pdev->dev, "t2_clk"); + if (IS_ERR(tc->clk[2])) + tc->clk[2] = clk; + + tc->irq[0] = irq; + tc->irq[1] = platform_get_irq(pdev, 1); + if (tc->irq[1] < 0) + tc->irq[1] = irq; + tc->irq[2] = platform_get_irq(pdev, 2); + if (tc->irq[2] < 0) + tc->irq[2] = irq; + + spin_lock(&tc_list_lock); + list_add_tail(&tc->node, &tc_list); + spin_unlock(&tc_list_lock); + + return 0; +} + +static struct platform_driver tc_driver = { + .driver.name = "atmel_tcb", +}; + +static int __init tc_init(void) +{ + return platform_driver_probe(&tc_driver, tc_probe); +} +arch_initcall(tc_init); diff --git a/drivers/misc/c2port/Kconfig b/drivers/misc/c2port/Kconfig new file mode 100644 index 0000000..e46af9a --- /dev/null +++ b/drivers/misc/c2port/Kconfig @@ -0,0 +1,35 @@ +# +# C2 port devices +# + +menuconfig C2PORT + tristate "Silicon Labs C2 port support (EXPERIMENTAL)" + depends on EXPERIMENTAL + default no + help + This option enables support for Silicon Labs C2 port used to + program Silicon micro controller chips (and other 8051 compatible). + + If your board have no such micro controllers you don't need this + interface at all. + + To compile this driver as a module, choose M here: the module will + be called c2port_core. Note that you also need a client module + usually called c2port-*. + + If you are not sure, say N here. + +if C2PORT + +config C2PORT_DURAMAR_2150 + tristate "C2 port support for Eurotech's Duramar 2150 (EXPERIMENTAL)" + depends on X86 && C2PORT + default no + help + This option enables C2 support for the Eurotech's Duramar 2150 + on board micro controller. + + To compile this driver as a module, choose M here: the module will + be called c2port-duramar2150. + +endif # C2PORT diff --git a/drivers/misc/c2port/Makefile b/drivers/misc/c2port/Makefile new file mode 100644 index 0000000..3b2cf43 --- /dev/null +++ b/drivers/misc/c2port/Makefile @@ -0,0 +1,3 @@ +obj-$(CONFIG_C2PORT) += core.o + +obj-$(CONFIG_C2PORT_DURAMAR_2150) += c2port-duramar2150.o diff --git a/drivers/misc/c2port/c2port-duramar2150.c b/drivers/misc/c2port/c2port-duramar2150.c new file mode 100644 index 0000000..338dcc1 --- /dev/null +++ b/drivers/misc/c2port/c2port-duramar2150.c @@ -0,0 +1,158 @@ +/* + * Silicon Labs C2 port Linux support for Eurotech Duramar 2150 + * + * Copyright (c) 2008 Rodolfo Giometti <giometti@linux.it> + * Copyright (c) 2008 Eurotech S.p.A. <info@eurotech.it> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation + */ + +#include <linux/errno.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/delay.h> +#include <linux/io.h> +#include <linux/c2port.h> + +#define DATA_PORT 0x325 +#define DIR_PORT 0x326 +#define C2D (1 << 0) +#define C2CK (1 << 1) + +static DEFINE_MUTEX(update_lock); + +/* + * C2 port operations + */ + +static void duramar2150_c2port_access(struct c2port_device *dev, int status) +{ + u8 v; + + mutex_lock(&update_lock); + + v = inb(DIR_PORT); + + /* 0 = input, 1 = output */ + if (status) + outb(v | (C2D | C2CK), DIR_PORT); + else + /* When access is "off" is important that both lines are set + * as inputs or hi-impedence */ + outb(v & ~(C2D | C2CK), DIR_PORT); + + mutex_unlock(&update_lock); +} + +static void duramar2150_c2port_c2d_dir(struct c2port_device *dev, int dir) +{ + u8 v; + + mutex_lock(&update_lock); + + v = inb(DIR_PORT); + + if (dir) + outb(v & ~C2D, DIR_PORT); + else + outb(v | C2D, DIR_PORT); + + mutex_unlock(&update_lock); +} + +static int duramar2150_c2port_c2d_get(struct c2port_device *dev) +{ + return inb(DATA_PORT) & C2D; +} + +static void duramar2150_c2port_c2d_set(struct c2port_device *dev, int status) +{ + u8 v; + + mutex_lock(&update_lock); + + v = inb(DATA_PORT); + + if (status) + outb(v | C2D, DATA_PORT); + else + outb(v & ~C2D, DATA_PORT); + + mutex_unlock(&update_lock); +} + +static void duramar2150_c2port_c2ck_set(struct c2port_device *dev, int status) +{ + u8 v; + + mutex_lock(&update_lock); + + v = inb(DATA_PORT); + + if (status) + outb(v | C2CK, DATA_PORT); + else + outb(v & ~C2CK, DATA_PORT); + + mutex_unlock(&update_lock); +} + +static struct c2port_ops duramar2150_c2port_ops = { + .block_size = 512, /* bytes */ + .blocks_num = 30, /* total flash size: 15360 bytes */ + + .access = duramar2150_c2port_access, + .c2d_dir = duramar2150_c2port_c2d_dir, + .c2d_get = duramar2150_c2port_c2d_get, + .c2d_set = duramar2150_c2port_c2d_set, + .c2ck_set = duramar2150_c2port_c2ck_set, +}; + +static struct c2port_device *duramar2150_c2port_dev; + +/* + * Module stuff + */ + +static int __init duramar2150_c2port_init(void) +{ + struct resource *res; + int ret = 0; + + res = request_region(0x325, 2, "c2port"); + if (!res) + return -EBUSY; + + duramar2150_c2port_dev = c2port_device_register("uc", + &duramar2150_c2port_ops, NULL); + if (!duramar2150_c2port_dev) { + ret = -ENODEV; + goto free_region; + } + + return 0; + +free_region: + release_region(0x325, 2); + return ret; +} + +static void __exit duramar2150_c2port_exit(void) +{ + /* Setup the GPIOs as input by default (access = 0) */ + duramar2150_c2port_access(duramar2150_c2port_dev, 0); + + c2port_device_unregister(duramar2150_c2port_dev); + + release_region(0x325, 2); +} + +module_init(duramar2150_c2port_init); +module_exit(duramar2150_c2port_exit); + +MODULE_AUTHOR("Rodolfo Giometti <giometti@linux.it>"); +MODULE_DESCRIPTION("Silicon Labs C2 port Linux support for Duramar 2150"); +MODULE_LICENSE("GPL"); diff --git a/drivers/misc/c2port/core.c b/drivers/misc/c2port/core.c new file mode 100644 index 0000000..0207dd5 --- /dev/null +++ b/drivers/misc/c2port/core.c @@ -0,0 +1,1003 @@ +/* + * Silicon Labs C2 port core Linux support + * + * Copyright (c) 2007 Rodolfo Giometti <giometti@linux.it> + * Copyright (c) 2007 Eurotech S.p.A. <info@eurotech.it> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation + */ + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/device.h> +#include <linux/errno.h> +#include <linux/err.h> +#include <linux/kernel.h> +#include <linux/ctype.h> +#include <linux/delay.h> +#include <linux/idr.h> +#include <linux/sched.h> + +#include <linux/c2port.h> + +#define DRIVER_NAME "c2port" +#define DRIVER_VERSION "0.51.0" + +static DEFINE_SPINLOCK(c2port_idr_lock); +static DEFINE_IDR(c2port_idr); + +/* + * Local variables + */ + +static struct class *c2port_class; + +/* + * C2 registers & commands defines + */ + +/* C2 registers */ +#define C2PORT_DEVICEID 0x00 +#define C2PORT_REVID 0x01 +#define C2PORT_FPCTL 0x02 +#define C2PORT_FPDAT 0xB4 + +/* C2 interface commands */ +#define C2PORT_GET_VERSION 0x01 +#define C2PORT_DEVICE_ERASE 0x03 +#define C2PORT_BLOCK_READ 0x06 +#define C2PORT_BLOCK_WRITE 0x07 +#define C2PORT_PAGE_ERASE 0x08 + +/* C2 status return codes */ +#define C2PORT_INVALID_COMMAND 0x00 +#define C2PORT_COMMAND_FAILED 0x02 +#define C2PORT_COMMAND_OK 0x0d + +/* + * C2 port low level signal managements + */ + +static void c2port_reset(struct c2port_device *dev) +{ + struct c2port_ops *ops = dev->ops; + + /* To reset the device we have to keep clock line low for at least + * 20us. + */ + local_irq_disable(); + ops->c2ck_set(dev, 0); + udelay(25); + ops->c2ck_set(dev, 1); + local_irq_enable(); + + udelay(1); +} + +static void c2port_strobe_ck(struct c2port_device *dev) +{ + struct c2port_ops *ops = dev->ops; + + /* During hi-low-hi transition we disable local IRQs to avoid + * interructions since C2 port specification says that it must be + * shorter than 5us, otherwise the microcontroller may consider + * it as a reset signal! + */ + local_irq_disable(); + ops->c2ck_set(dev, 0); + udelay(1); + ops->c2ck_set(dev, 1); + local_irq_enable(); + + udelay(1); +} + +/* + * C2 port basic functions + */ + +static void c2port_write_ar(struct c2port_device *dev, u8 addr) +{ + struct c2port_ops *ops = dev->ops; + int i; + + /* START field */ + c2port_strobe_ck(dev); + + /* INS field (11b, LSB first) */ + ops->c2d_dir(dev, 0); + ops->c2d_set(dev, 1); + c2port_strobe_ck(dev); + ops->c2d_set(dev, 1); + c2port_strobe_ck(dev); + + /* ADDRESS field */ + for (i = 0; i < 8; i++) { + ops->c2d_set(dev, addr & 0x01); + c2port_strobe_ck(dev); + + addr >>= 1; + } + + /* STOP field */ + ops->c2d_dir(dev, 1); + c2port_strobe_ck(dev); +} + +static int c2port_read_ar(struct c2port_device *dev, u8 *addr) +{ + struct c2port_ops *ops = dev->ops; + int i; + + /* START field */ + c2port_strobe_ck(dev); + + /* INS field (10b, LSB first) */ + ops->c2d_dir(dev, 0); + ops->c2d_set(dev, 0); + c2port_strobe_ck(dev); + ops->c2d_set(dev, 1); + c2port_strobe_ck(dev); + + /* ADDRESS field */ + ops->c2d_dir(dev, 1); + *addr = 0; + for (i = 0; i < 8; i++) { + *addr >>= 1; /* shift in 8-bit ADDRESS field LSB first */ + + c2port_strobe_ck(dev); + if (ops->c2d_get(dev)) + *addr |= 0x80; + } + + /* STOP field */ + c2port_strobe_ck(dev); + + return 0; +} + +static int c2port_write_dr(struct c2port_device *dev, u8 data) +{ + struct c2port_ops *ops = dev->ops; + int timeout, i; + + /* START field */ + c2port_strobe_ck(dev); + + /* INS field (01b, LSB first) */ + ops->c2d_dir(dev, 0); + ops->c2d_set(dev, 1); + c2port_strobe_ck(dev); + ops->c2d_set(dev, 0); + c2port_strobe_ck(dev); + + /* LENGTH field (00b, LSB first -> 1 byte) */ + ops->c2d_set(dev, 0); + c2port_strobe_ck(dev); + ops->c2d_set(dev, 0); + c2port_strobe_ck(dev); + + /* DATA field */ + for (i = 0; i < 8; i++) { + ops->c2d_set(dev, data & 0x01); + c2port_strobe_ck(dev); + + data >>= 1; + } + + /* WAIT field */ + ops->c2d_dir(dev, 1); + timeout = 20; + do { + c2port_strobe_ck(dev); + if (ops->c2d_get(dev)) + break; + + udelay(1); + } while (--timeout > 0); + if (timeout == 0) + return -EIO; + + /* STOP field */ + c2port_strobe_ck(dev); + + return 0; +} + +static int c2port_read_dr(struct c2port_device *dev, u8 *data) +{ + struct c2port_ops *ops = dev->ops; + int timeout, i; + + /* START field */ + c2port_strobe_ck(dev); + + /* INS field (00b, LSB first) */ + ops->c2d_dir(dev, 0); + ops->c2d_set(dev, 0); + c2port_strobe_ck(dev); + ops->c2d_set(dev, 0); + c2port_strobe_ck(dev); + + /* LENGTH field (00b, LSB first -> 1 byte) */ + ops->c2d_set(dev, 0); + c2port_strobe_ck(dev); + ops->c2d_set(dev, 0); + c2port_strobe_ck(dev); + + /* WAIT field */ + ops->c2d_dir(dev, 1); + timeout = 20; + do { + c2port_strobe_ck(dev); + if (ops->c2d_get(dev)) + break; + + udelay(1); + } while (--timeout > 0); + if (timeout == 0) + return -EIO; + + /* DATA field */ + *data = 0; + for (i = 0; i < 8; i++) { + *data >>= 1; /* shift in 8-bit DATA field LSB first */ + + c2port_strobe_ck(dev); + if (ops->c2d_get(dev)) + *data |= 0x80; + } + + /* STOP field */ + c2port_strobe_ck(dev); + + return 0; +} + +static int c2port_poll_in_busy(struct c2port_device *dev) +{ + u8 addr; + int ret, timeout = 20; + + do { + ret = (c2port_read_ar(dev, &addr)); + if (ret < 0) + return -EIO; + + if (!(addr & 0x02)) + break; + + udelay(1); + } while (--timeout > 0); + if (timeout == 0) + return -EIO; + + return 0; +} + +static int c2port_poll_out_ready(struct c2port_device *dev) +{ + u8 addr; + int ret, timeout = 10000; /* erase flash needs long time... */ + + do { + ret = (c2port_read_ar(dev, &addr)); + if (ret < 0) + return -EIO; + + if (addr & 0x01) + break; + + udelay(1); + } while (--timeout > 0); + if (timeout == 0) + return -EIO; + + return 0; +} + +/* + * sysfs methods + */ + +static ssize_t c2port_show_name(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct c2port_device *c2dev = dev_get_drvdata(dev); + + return sprintf(buf, "%s\n", c2dev->name); +} + +static ssize_t c2port_show_flash_blocks_num(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct c2port_device *c2dev = dev_get_drvdata(dev); + struct c2port_ops *ops = c2dev->ops; + + return sprintf(buf, "%d\n", ops->blocks_num); +} + +static ssize_t c2port_show_flash_block_size(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct c2port_device *c2dev = dev_get_drvdata(dev); + struct c2port_ops *ops = c2dev->ops; + + return sprintf(buf, "%d\n", ops->block_size); +} + +static ssize_t c2port_show_flash_size(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct c2port_device *c2dev = dev_get_drvdata(dev); + struct c2port_ops *ops = c2dev->ops; + + return sprintf(buf, "%d\n", ops->blocks_num * ops->block_size); +} + +static ssize_t c2port_show_access(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct c2port_device *c2dev = dev_get_drvdata(dev); + + return sprintf(buf, "%d\n", c2dev->access); +} + +static ssize_t c2port_store_access(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct c2port_device *c2dev = dev_get_drvdata(dev); + struct c2port_ops *ops = c2dev->ops; + int status, ret; + + ret = sscanf(buf, "%d", &status); + if (ret != 1) + return -EINVAL; + + mutex_lock(&c2dev->mutex); + + c2dev->access = !!status; + + /* If access is "on" clock should be HIGH _before_ setting the line + * as output and data line should be set as INPUT anyway */ + if (c2dev->access) + ops->c2ck_set(c2dev, 1); + ops->access(c2dev, c2dev->access); + if (c2dev->access) + ops->c2d_dir(c2dev, 1); + + mutex_unlock(&c2dev->mutex); + + return count; +} + +static ssize_t c2port_store_reset(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct c2port_device *c2dev = dev_get_drvdata(dev); + + /* Check the device access status */ + if (!c2dev->access) + return -EBUSY; + + mutex_lock(&c2dev->mutex); + + c2port_reset(c2dev); + c2dev->flash_access = 0; + + mutex_unlock(&c2dev->mutex); + + return count; +} + +static ssize_t __c2port_show_dev_id(struct c2port_device *dev, char *buf) +{ + u8 data; + int ret; + + /* Select DEVICEID register for C2 data register accesses */ + c2port_write_ar(dev, C2PORT_DEVICEID); + + /* Read and return the device ID register */ + ret = c2port_read_dr(dev, &data); + if (ret < 0) + return ret; + + return sprintf(buf, "%d\n", data); +} + +static ssize_t c2port_show_dev_id(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct c2port_device *c2dev = dev_get_drvdata(dev); + ssize_t ret; + + /* Check the device access status */ + if (!c2dev->access) + return -EBUSY; + + mutex_lock(&c2dev->mutex); + ret = __c2port_show_dev_id(c2dev, buf); + mutex_unlock(&c2dev->mutex); + + if (ret < 0) + dev_err(dev, "cannot read from %s\n", c2dev->name); + + return ret; +} + +static ssize_t __c2port_show_rev_id(struct c2port_device *dev, char *buf) +{ + u8 data; + int ret; + + /* Select REVID register for C2 data register accesses */ + c2port_write_ar(dev, C2PORT_REVID); + + /* Read and return the revision ID register */ + ret = c2port_read_dr(dev, &data); + if (ret < 0) + return ret; + + return sprintf(buf, "%d\n", data); +} + +static ssize_t c2port_show_rev_id(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct c2port_device *c2dev = dev_get_drvdata(dev); + ssize_t ret; + + /* Check the device access status */ + if (!c2dev->access) + return -EBUSY; + + mutex_lock(&c2dev->mutex); + ret = __c2port_show_rev_id(c2dev, buf); + mutex_unlock(&c2dev->mutex); + + if (ret < 0) + dev_err(c2dev->dev, "cannot read from %s\n", c2dev->name); + + return ret; +} + +static ssize_t c2port_show_flash_access(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct c2port_device *c2dev = dev_get_drvdata(dev); + + return sprintf(buf, "%d\n", c2dev->flash_access); +} + +static ssize_t __c2port_store_flash_access(struct c2port_device *dev, + int status) +{ + int ret; + + /* Check the device access status */ + if (!dev->access) + return -EBUSY; + + dev->flash_access = !!status; + + /* If flash_access is off we have nothing to do... */ + if (dev->flash_access == 0) + return 0; + + /* Target the C2 flash programming control register for C2 data + * register access */ + c2port_write_ar(dev, C2PORT_FPCTL); + + /* Write the first keycode to enable C2 Flash programming */ + ret = c2port_write_dr(dev, 0x02); + if (ret < 0) + return ret; + + /* Write the second keycode to enable C2 Flash programming */ + ret = c2port_write_dr(dev, 0x01); + if (ret < 0) + return ret; + + /* Delay for at least 20ms to ensure the target is ready for + * C2 flash programming */ + mdelay(25); + + return 0; +} + +static ssize_t c2port_store_flash_access(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct c2port_device *c2dev = dev_get_drvdata(dev); + int status; + ssize_t ret; + + ret = sscanf(buf, "%d", &status); + if (ret != 1) + return -EINVAL; + + mutex_lock(&c2dev->mutex); + ret = __c2port_store_flash_access(c2dev, status); + mutex_unlock(&c2dev->mutex); + + if (ret < 0) { + dev_err(c2dev->dev, "cannot enable %s flash programming\n", + c2dev->name); + return ret; + } + + return count; +} + +static ssize_t __c2port_write_flash_erase(struct c2port_device *dev) +{ + u8 status; + int ret; + + /* Target the C2 flash programming data register for C2 data register + * access. + */ + c2port_write_ar(dev, C2PORT_FPDAT); + + /* Send device erase command */ + c2port_write_dr(dev, C2PORT_DEVICE_ERASE); + + /* Wait for input acknowledge */ + ret = c2port_poll_in_busy(dev); + if (ret < 0) + return ret; + + /* Should check status before starting FLASH access sequence */ + + /* Wait for status information */ + ret = c2port_poll_out_ready(dev); + if (ret < 0) + return ret; + + /* Read flash programming interface status */ + ret = c2port_read_dr(dev, &status); + if (ret < 0) + return ret; + if (status != C2PORT_COMMAND_OK) + return -EBUSY; + + /* Send a three-byte arming sequence to enable the device erase. + * If the sequence is not received correctly, the command will be + * ignored. + * Sequence is: 0xde, 0xad, 0xa5. + */ + c2port_write_dr(dev, 0xde); + ret = c2port_poll_in_busy(dev); + if (ret < 0) + return ret; + c2port_write_dr(dev, 0xad); + ret = c2port_poll_in_busy(dev); + if (ret < 0) + return ret; + c2port_write_dr(dev, 0xa5); + ret = c2port_poll_in_busy(dev); + if (ret < 0) + return ret; + + ret = c2port_poll_out_ready(dev); + if (ret < 0) + return ret; + + return 0; +} + +static ssize_t c2port_store_flash_erase(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct c2port_device *c2dev = dev_get_drvdata(dev); + int ret; + + /* Check the device and flash access status */ + if (!c2dev->access || !c2dev->flash_access) + return -EBUSY; + + mutex_lock(&c2dev->mutex); + ret = __c2port_write_flash_erase(c2dev); + mutex_unlock(&c2dev->mutex); + + if (ret < 0) { + dev_err(c2dev->dev, "cannot erase %s flash\n", c2dev->name); + return ret; + } + + return count; +} + +static ssize_t __c2port_read_flash_data(struct c2port_device *dev, + char *buffer, loff_t offset, size_t count) +{ + struct c2port_ops *ops = dev->ops; + u8 status, nread = 128; + int i, ret; + + /* Check for flash end */ + if (offset >= ops->block_size * ops->blocks_num) + return 0; + + if (ops->block_size * ops->blocks_num - offset < nread) + nread = ops->block_size * ops->blocks_num - offset; + if (count < nread) + nread = count; + if (nread == 0) + return nread; + + /* Target the C2 flash programming data register for C2 data register + * access */ + c2port_write_ar(dev, C2PORT_FPDAT); + + /* Send flash block read command */ + c2port_write_dr(dev, C2PORT_BLOCK_READ); + + /* Wait for input acknowledge */ + ret = c2port_poll_in_busy(dev); + if (ret < 0) + return ret; + + /* Should check status before starting FLASH access sequence */ + + /* Wait for status information */ + ret = c2port_poll_out_ready(dev); + if (ret < 0) + return ret; + + /* Read flash programming interface status */ + ret = c2port_read_dr(dev, &status); + if (ret < 0) + return ret; + if (status != C2PORT_COMMAND_OK) + return -EBUSY; + + /* Send address high byte */ + c2port_write_dr(dev, offset >> 8); + ret = c2port_poll_in_busy(dev); + if (ret < 0) + return ret; + + /* Send address low byte */ + c2port_write_dr(dev, offset & 0x00ff); + ret = c2port_poll_in_busy(dev); + if (ret < 0) + return ret; + + /* Send address block size */ + c2port_write_dr(dev, nread); + ret = c2port_poll_in_busy(dev); + if (ret < 0) + return ret; + + /* Should check status before reading FLASH block */ + + /* Wait for status information */ + ret = c2port_poll_out_ready(dev); + if (ret < 0) + return ret; + + /* Read flash programming interface status */ + ret = c2port_read_dr(dev, &status); + if (ret < 0) + return ret; + if (status != C2PORT_COMMAND_OK) + return -EBUSY; + + /* Read flash block */ + for (i = 0; i < nread; i++) { + ret = c2port_poll_out_ready(dev); + if (ret < 0) + return ret; + + ret = c2port_read_dr(dev, buffer+i); + if (ret < 0) + return ret; + } + + return nread; +} + +static ssize_t c2port_read_flash_data(struct kobject *kobj, + struct bin_attribute *attr, + char *buffer, loff_t offset, size_t count) +{ + struct c2port_device *c2dev = + dev_get_drvdata(container_of(kobj, + struct device, kobj)); + ssize_t ret; + + /* Check the device and flash access status */ + if (!c2dev->access || !c2dev->flash_access) + return -EBUSY; + + mutex_lock(&c2dev->mutex); + ret = __c2port_read_flash_data(c2dev, buffer, offset, count); + mutex_unlock(&c2dev->mutex); + + if (ret < 0) + dev_err(c2dev->dev, "cannot read %s flash\n", c2dev->name); + + return ret; +} + +static ssize_t __c2port_write_flash_data(struct c2port_device *dev, + char *buffer, loff_t offset, size_t count) +{ + struct c2port_ops *ops = dev->ops; + u8 status, nwrite = 128; + int i, ret; + + if (nwrite > count) + nwrite = count; + if (ops->block_size * ops->blocks_num - offset < nwrite) + nwrite = ops->block_size * ops->blocks_num - offset; + + /* Check for flash end */ + if (offset >= ops->block_size * ops->blocks_num) + return -EINVAL; + + /* Target the C2 flash programming data register for C2 data register + * access */ + c2port_write_ar(dev, C2PORT_FPDAT); + + /* Send flash block write command */ + c2port_write_dr(dev, C2PORT_BLOCK_WRITE); + + /* Wait for input acknowledge */ + ret = c2port_poll_in_busy(dev); + if (ret < 0) + return ret; + + /* Should check status before starting FLASH access sequence */ + + /* Wait for status information */ + ret = c2port_poll_out_ready(dev); + if (ret < 0) + return ret; + + /* Read flash programming interface status */ + ret = c2port_read_dr(dev, &status); + if (ret < 0) + return ret; + if (status != C2PORT_COMMAND_OK) + return -EBUSY; + + /* Send address high byte */ + c2port_write_dr(dev, offset >> 8); + ret = c2port_poll_in_busy(dev); + if (ret < 0) + return ret; + + /* Send address low byte */ + c2port_write_dr(dev, offset & 0x00ff); + ret = c2port_poll_in_busy(dev); + if (ret < 0) + return ret; + + /* Send address block size */ + c2port_write_dr(dev, nwrite); + ret = c2port_poll_in_busy(dev); + if (ret < 0) + return ret; + + /* Should check status before writing FLASH block */ + + /* Wait for status information */ + ret = c2port_poll_out_ready(dev); + if (ret < 0) + return ret; + + /* Read flash programming interface status */ + ret = c2port_read_dr(dev, &status); + if (ret < 0) + return ret; + if (status != C2PORT_COMMAND_OK) + return -EBUSY; + + /* Write flash block */ + for (i = 0; i < nwrite; i++) { + ret = c2port_write_dr(dev, *(buffer+i)); + if (ret < 0) + return ret; + + ret = c2port_poll_in_busy(dev); + if (ret < 0) + return ret; + + } + + /* Wait for last flash write to complete */ + ret = c2port_poll_out_ready(dev); + if (ret < 0) + return ret; + + return nwrite; +} + +static ssize_t c2port_write_flash_data(struct kobject *kobj, + struct bin_attribute *attr, + char *buffer, loff_t offset, size_t count) +{ + struct c2port_device *c2dev = + dev_get_drvdata(container_of(kobj, + struct device, kobj)); + int ret; + + /* Check the device access status */ + if (!c2dev->access || !c2dev->flash_access) + return -EBUSY; + + mutex_lock(&c2dev->mutex); + ret = __c2port_write_flash_data(c2dev, buffer, offset, count); + mutex_unlock(&c2dev->mutex); + + if (ret < 0) + dev_err(c2dev->dev, "cannot write %s flash\n", c2dev->name); + + return ret; +} + +/* + * Class attributes + */ + +static struct device_attribute c2port_attrs[] = { + __ATTR(name, 0444, c2port_show_name, NULL), + __ATTR(flash_blocks_num, 0444, c2port_show_flash_blocks_num, NULL), + __ATTR(flash_block_size, 0444, c2port_show_flash_block_size, NULL), + __ATTR(flash_size, 0444, c2port_show_flash_size, NULL), + __ATTR(access, 0644, c2port_show_access, c2port_store_access), + __ATTR(reset, 0200, NULL, c2port_store_reset), + __ATTR(dev_id, 0444, c2port_show_dev_id, NULL), + __ATTR(rev_id, 0444, c2port_show_rev_id, NULL), + + __ATTR(flash_access, 0644, c2port_show_flash_access, + c2port_store_flash_access), + __ATTR(flash_erase, 0200, NULL, c2port_store_flash_erase), + __ATTR_NULL, +}; + +static struct bin_attribute c2port_bin_attrs = { + .attr = { + .name = "flash_data", + .mode = 0644 + }, + .read = c2port_read_flash_data, + .write = c2port_write_flash_data, + /* .size is computed at run-time */ +}; + +/* + * Exported functions + */ + +struct c2port_device *c2port_device_register(char *name, + struct c2port_ops *ops, void *devdata) +{ + struct c2port_device *c2dev; + int id, ret; + + if (unlikely(!ops) || unlikely(!ops->access) || \ + unlikely(!ops->c2d_dir) || unlikely(!ops->c2ck_set) || \ + unlikely(!ops->c2d_get) || unlikely(!ops->c2d_set)) + return ERR_PTR(-EINVAL); + + c2dev = kmalloc(sizeof(struct c2port_device), GFP_KERNEL); + if (unlikely(!c2dev)) + return ERR_PTR(-ENOMEM); + + ret = idr_pre_get(&c2port_idr, GFP_KERNEL); + if (!ret) { + ret = -ENOMEM; + goto error_idr_get_new; + } + + spin_lock_irq(&c2port_idr_lock); + ret = idr_get_new(&c2port_idr, c2dev, &id); + spin_unlock_irq(&c2port_idr_lock); + + if (ret < 0) + goto error_idr_get_new; + c2dev->id = id; + + c2dev->dev = device_create(c2port_class, NULL, 0, c2dev, + "c2port%d", id); + if (unlikely(!c2dev->dev)) { + ret = -ENOMEM; + goto error_device_create; + } + dev_set_drvdata(c2dev->dev, c2dev); + + strncpy(c2dev->name, name, C2PORT_NAME_LEN); + c2dev->ops = ops; + mutex_init(&c2dev->mutex); + + /* Create binary file */ + c2port_bin_attrs.size = ops->blocks_num * ops->block_size; + ret = device_create_bin_file(c2dev->dev, &c2port_bin_attrs); + if (unlikely(ret)) + goto error_device_create_bin_file; + + /* By default C2 port access is off */ + c2dev->access = c2dev->flash_access = 0; + ops->access(c2dev, 0); + + dev_info(c2dev->dev, "C2 port %s added\n", name); + dev_info(c2dev->dev, "%s flash has %d blocks x %d bytes " + "(%d bytes total)\n", + name, ops->blocks_num, ops->block_size, + ops->blocks_num * ops->block_size); + + return c2dev; + +error_device_create_bin_file: + device_destroy(c2port_class, 0); + +error_device_create: + spin_lock_irq(&c2port_idr_lock); + idr_remove(&c2port_idr, id); + spin_unlock_irq(&c2port_idr_lock); + +error_idr_get_new: + kfree(c2dev); + + return ERR_PTR(ret); +} +EXPORT_SYMBOL(c2port_device_register); + +void c2port_device_unregister(struct c2port_device *c2dev) +{ + if (!c2dev) + return; + + dev_info(c2dev->dev, "C2 port %s removed\n", c2dev->name); + + device_remove_bin_file(c2dev->dev, &c2port_bin_attrs); + spin_lock_irq(&c2port_idr_lock); + idr_remove(&c2port_idr, c2dev->id); + spin_unlock_irq(&c2port_idr_lock); + + device_destroy(c2port_class, c2dev->id); + + kfree(c2dev); +} +EXPORT_SYMBOL(c2port_device_unregister); + +/* + * Module stuff + */ + +static int __init c2port_init(void) +{ + printk(KERN_INFO "Silicon Labs C2 port support v. " DRIVER_VERSION + " - (C) 2007 Rodolfo Giometti\n"); + + c2port_class = class_create(THIS_MODULE, "c2port"); + if (!c2port_class) { + printk(KERN_ERR "c2port: failed to allocate class\n"); + return -ENOMEM; + } + c2port_class->dev_attrs = c2port_attrs; + + return 0; +} + +static void __exit c2port_exit(void) +{ + class_destroy(c2port_class); +} + +module_init(c2port_init); +module_exit(c2port_exit); + +MODULE_AUTHOR("Rodolfo Giometti <giometti@linux.it>"); +MODULE_DESCRIPTION("Silicon Labs C2 port support v. " DRIVER_VERSION); +MODULE_LICENSE("GPL"); diff --git a/drivers/misc/compal-laptop.c b/drivers/misc/compal-laptop.c new file mode 100644 index 0000000..11003bb --- /dev/null +++ b/drivers/misc/compal-laptop.c @@ -0,0 +1,406 @@ +/*-*-linux-c-*-*/ + +/* + Copyright (C) 2008 Cezary Jackiewicz <cezary.jackiewicz (at) gmail.com> + + based on MSI driver + + Copyright (C) 2006 Lennart Poettering <mzxreary (at) 0pointer (dot) de> + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + 02110-1301, USA. + */ + +/* + * comapl-laptop.c - Compal laptop support. + * + * This driver exports a few files in /sys/devices/platform/compal-laptop/: + * + * wlan - wlan subsystem state: contains 0 or 1 (rw) + * + * bluetooth - Bluetooth subsystem state: contains 0 or 1 (rw) + * + * raw - raw value taken from embedded controller register (ro) + * + * In addition to these platform device attributes the driver + * registers itself in the Linux backlight control subsystem and is + * available to userspace under /sys/class/backlight/compal-laptop/. + * + * This driver might work on other laptops produced by Compal. If you + * want to try it you can pass force=1 as argument to the module which + * will force it to load even when the DMI data doesn't identify the + * laptop as FL9x. + */ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/acpi.h> +#include <linux/dmi.h> +#include <linux/backlight.h> +#include <linux/platform_device.h> +#include <linux/autoconf.h> + +#define COMPAL_DRIVER_VERSION "0.2.6" + +#define COMPAL_LCD_LEVEL_MAX 8 + +#define COMPAL_EC_COMMAND_WIRELESS 0xBB +#define COMPAL_EC_COMMAND_LCD_LEVEL 0xB9 + +#define KILLSWITCH_MASK 0x10 +#define WLAN_MASK 0x01 +#define BT_MASK 0x02 + +static int force; +module_param(force, bool, 0); +MODULE_PARM_DESC(force, "Force driver load, ignore DMI data"); + +/* Hardware access */ + +static int set_lcd_level(int level) +{ + if (level < 0 || level >= COMPAL_LCD_LEVEL_MAX) + return -EINVAL; + + ec_write(COMPAL_EC_COMMAND_LCD_LEVEL, level); + + return 0; +} + +static int get_lcd_level(void) +{ + u8 result; + + ec_read(COMPAL_EC_COMMAND_LCD_LEVEL, &result); + + return (int) result; +} + +static int set_wlan_state(int state) +{ + u8 result, value; + + ec_read(COMPAL_EC_COMMAND_WIRELESS, &result); + + if ((result & KILLSWITCH_MASK) == 0) + return -EINVAL; + else { + if (state) + value = (u8) (result | WLAN_MASK); + else + value = (u8) (result & ~WLAN_MASK); + ec_write(COMPAL_EC_COMMAND_WIRELESS, value); + } + + return 0; +} + +static int set_bluetooth_state(int state) +{ + u8 result, value; + + ec_read(COMPAL_EC_COMMAND_WIRELESS, &result); + + if ((result & KILLSWITCH_MASK) == 0) + return -EINVAL; + else { + if (state) + value = (u8) (result | BT_MASK); + else + value = (u8) (result & ~BT_MASK); + ec_write(COMPAL_EC_COMMAND_WIRELESS, value); + } + + return 0; +} + +static int get_wireless_state(int *wlan, int *bluetooth) +{ + u8 result; + + ec_read(COMPAL_EC_COMMAND_WIRELESS, &result); + + if (wlan) { + if ((result & KILLSWITCH_MASK) == 0) + *wlan = 0; + else + *wlan = result & WLAN_MASK; + } + + if (bluetooth) { + if ((result & KILLSWITCH_MASK) == 0) + *bluetooth = 0; + else + *bluetooth = (result & BT_MASK) >> 1; + } + + return 0; +} + +/* Backlight device stuff */ + +static int bl_get_brightness(struct backlight_device *b) +{ + return get_lcd_level(); +} + + +static int bl_update_status(struct backlight_device *b) +{ + return set_lcd_level(b->props.brightness); +} + +static struct backlight_ops compalbl_ops = { + .get_brightness = bl_get_brightness, + .update_status = bl_update_status, +}; + +static struct backlight_device *compalbl_device; + +/* Platform device */ + +static ssize_t show_wlan(struct device *dev, + struct device_attribute *attr, char *buf) +{ + int ret, enabled; + + ret = get_wireless_state(&enabled, NULL); + if (ret < 0) + return ret; + + return sprintf(buf, "%i\n", enabled); +} + +static ssize_t show_raw(struct device *dev, + struct device_attribute *attr, char *buf) +{ + u8 result; + + ec_read(COMPAL_EC_COMMAND_WIRELESS, &result); + + return sprintf(buf, "%i\n", result); +} + +static ssize_t show_bluetooth(struct device *dev, + struct device_attribute *attr, char *buf) +{ + int ret, enabled; + + ret = get_wireless_state(NULL, &enabled); + if (ret < 0) + return ret; + + return sprintf(buf, "%i\n", enabled); +} + +static ssize_t store_wlan_state(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + int state, ret; + + if (sscanf(buf, "%i", &state) != 1 || (state < 0 || state > 1)) + return -EINVAL; + + ret = set_wlan_state(state); + if (ret < 0) + return ret; + + return count; +} + +static ssize_t store_bluetooth_state(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + int state, ret; + + if (sscanf(buf, "%i", &state) != 1 || (state < 0 || state > 1)) + return -EINVAL; + + ret = set_bluetooth_state(state); + if (ret < 0) + return ret; + + return count; +} + +static DEVICE_ATTR(bluetooth, 0644, show_bluetooth, store_bluetooth_state); +static DEVICE_ATTR(wlan, 0644, show_wlan, store_wlan_state); +static DEVICE_ATTR(raw, 0444, show_raw, NULL); + +static struct attribute *compal_attributes[] = { + &dev_attr_bluetooth.attr, + &dev_attr_wlan.attr, + &dev_attr_raw.attr, + NULL +}; + +static struct attribute_group compal_attribute_group = { + .attrs = compal_attributes +}; + +static struct platform_driver compal_driver = { + .driver = { + .name = "compal-laptop", + .owner = THIS_MODULE, + } +}; + +static struct platform_device *compal_device; + +/* Initialization */ + +static int dmi_check_cb(const struct dmi_system_id *id) +{ + printk(KERN_INFO "compal-laptop: Identified laptop model '%s'.\n", + id->ident); + + return 0; +} + +static struct dmi_system_id __initdata compal_dmi_table[] = { + { + .ident = "FL90/IFL90", + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "IFL90"), + DMI_MATCH(DMI_BOARD_VERSION, "IFT00"), + }, + .callback = dmi_check_cb + }, + { + .ident = "FL90/IFL90", + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "IFL90"), + DMI_MATCH(DMI_BOARD_VERSION, "REFERENCE"), + }, + .callback = dmi_check_cb + }, + { + .ident = "FL91/IFL91", + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "IFL91"), + DMI_MATCH(DMI_BOARD_VERSION, "IFT00"), + }, + .callback = dmi_check_cb + }, + { + .ident = "FL92/JFL92", + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "JFL92"), + DMI_MATCH(DMI_BOARD_VERSION, "IFT00"), + }, + .callback = dmi_check_cb + }, + { + .ident = "FT00/IFT00", + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "IFT00"), + DMI_MATCH(DMI_BOARD_VERSION, "IFT00"), + }, + .callback = dmi_check_cb + }, + { } +}; + +static int __init compal_init(void) +{ + int ret; + + if (acpi_disabled) + return -ENODEV; + + if (!force && !dmi_check_system(compal_dmi_table)) + return -ENODEV; + + /* Register backlight stuff */ + + if (!acpi_video_backlight_support()) { + compalbl_device = backlight_device_register("compal-laptop", NULL, NULL, + &compalbl_ops); + if (IS_ERR(compalbl_device)) + return PTR_ERR(compalbl_device); + + compalbl_device->props.max_brightness = COMPAL_LCD_LEVEL_MAX-1; + } + + ret = platform_driver_register(&compal_driver); + if (ret) + goto fail_backlight; + + /* Register platform stuff */ + + compal_device = platform_device_alloc("compal-laptop", -1); + if (!compal_device) { + ret = -ENOMEM; + goto fail_platform_driver; + } + + ret = platform_device_add(compal_device); + if (ret) + goto fail_platform_device1; + + ret = sysfs_create_group(&compal_device->dev.kobj, + &compal_attribute_group); + if (ret) + goto fail_platform_device2; + + printk(KERN_INFO "compal-laptop: driver "COMPAL_DRIVER_VERSION + " successfully loaded.\n"); + + return 0; + +fail_platform_device2: + + platform_device_del(compal_device); + +fail_platform_device1: + + platform_device_put(compal_device); + +fail_platform_driver: + + platform_driver_unregister(&compal_driver); + +fail_backlight: + + backlight_device_unregister(compalbl_device); + + return ret; +} + +static void __exit compal_cleanup(void) +{ + + sysfs_remove_group(&compal_device->dev.kobj, &compal_attribute_group); + platform_device_unregister(compal_device); + platform_driver_unregister(&compal_driver); + backlight_device_unregister(compalbl_device); + + printk(KERN_INFO "compal-laptop: driver unloaded.\n"); +} + +module_init(compal_init); +module_exit(compal_cleanup); + +MODULE_AUTHOR("Cezary Jackiewicz"); +MODULE_DESCRIPTION("Compal Laptop Support"); +MODULE_VERSION(COMPAL_DRIVER_VERSION); +MODULE_LICENSE("GPL"); + +MODULE_ALIAS("dmi:*:rnIFL90:rvrIFT00:*"); +MODULE_ALIAS("dmi:*:rnIFL90:rvrREFERENCE:*"); +MODULE_ALIAS("dmi:*:rnIFL91:rvrIFT00:*"); +MODULE_ALIAS("dmi:*:rnJFL92:rvrIFT00:*"); +MODULE_ALIAS("dmi:*:rnIFT00:rvrIFT00:*"); diff --git a/drivers/misc/eeepc-laptop.c b/drivers/misc/eeepc-laptop.c new file mode 100644 index 0000000..1583aa7 --- /dev/null +++ b/drivers/misc/eeepc-laptop.c @@ -0,0 +1,877 @@ +/* + * eepc-laptop.c - Asus Eee PC extras + * + * Based on asus_acpi.c as patched for the Eee PC by Asus: + * ftp://ftp.asus.com/pub/ASUS/EeePC/701/ASUS_ACPI_071126.rar + * Based on eee.c from eeepc-linux + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/types.h> +#include <linux/platform_device.h> +#include <linux/backlight.h> +#include <linux/fb.h> +#include <linux/hwmon.h> +#include <linux/hwmon-sysfs.h> +#include <acpi/acpi_drivers.h> +#include <acpi/acpi_bus.h> +#include <linux/uaccess.h> +#include <linux/input.h> +#include <linux/rfkill.h> + +#define EEEPC_LAPTOP_VERSION "0.1" + +#define EEEPC_HOTK_NAME "Eee PC Hotkey Driver" +#define EEEPC_HOTK_FILE "eeepc" +#define EEEPC_HOTK_CLASS "hotkey" +#define EEEPC_HOTK_DEVICE_NAME "Hotkey" +#define EEEPC_HOTK_HID "ASUS010" + +#define EEEPC_LOG EEEPC_HOTK_FILE ": " +#define EEEPC_ERR KERN_ERR EEEPC_LOG +#define EEEPC_WARNING KERN_WARNING EEEPC_LOG +#define EEEPC_NOTICE KERN_NOTICE EEEPC_LOG +#define EEEPC_INFO KERN_INFO EEEPC_LOG + +/* + * Definitions for Asus EeePC + */ +#define NOTIFY_WLAN_ON 0x10 +#define NOTIFY_BRN_MIN 0x20 +#define NOTIFY_BRN_MAX 0x2f + +enum { + DISABLE_ASL_WLAN = 0x0001, + DISABLE_ASL_BLUETOOTH = 0x0002, + DISABLE_ASL_IRDA = 0x0004, + DISABLE_ASL_CAMERA = 0x0008, + DISABLE_ASL_TV = 0x0010, + DISABLE_ASL_GPS = 0x0020, + DISABLE_ASL_DISPLAYSWITCH = 0x0040, + DISABLE_ASL_MODEM = 0x0080, + DISABLE_ASL_CARDREADER = 0x0100 +}; + +enum { + CM_ASL_WLAN = 0, + CM_ASL_BLUETOOTH, + CM_ASL_IRDA, + CM_ASL_1394, + CM_ASL_CAMERA, + CM_ASL_TV, + CM_ASL_GPS, + CM_ASL_DVDROM, + CM_ASL_DISPLAYSWITCH, + CM_ASL_PANELBRIGHT, + CM_ASL_BIOSFLASH, + CM_ASL_ACPIFLASH, + CM_ASL_CPUFV, + CM_ASL_CPUTEMPERATURE, + CM_ASL_FANCPU, + CM_ASL_FANCHASSIS, + CM_ASL_USBPORT1, + CM_ASL_USBPORT2, + CM_ASL_USBPORT3, + CM_ASL_MODEM, + CM_ASL_CARDREADER, + CM_ASL_LID +}; + +static const char *cm_getv[] = { + "WLDG", NULL, NULL, NULL, + "CAMG", NULL, NULL, NULL, + NULL, "PBLG", NULL, NULL, + "CFVG", NULL, NULL, NULL, + "USBG", NULL, NULL, "MODG", + "CRDG", "LIDG" +}; + +static const char *cm_setv[] = { + "WLDS", NULL, NULL, NULL, + "CAMS", NULL, NULL, NULL, + "SDSP", "PBLS", "HDPS", NULL, + "CFVS", NULL, NULL, NULL, + "USBG", NULL, NULL, "MODS", + "CRDS", NULL +}; + +#define EEEPC_EC "\\_SB.PCI0.SBRG.EC0." + +#define EEEPC_EC_FAN_PWM EEEPC_EC "SC02" /* Fan PWM duty cycle (%) */ +#define EEEPC_EC_SC02 0x63 +#define EEEPC_EC_FAN_HRPM EEEPC_EC "SC05" /* High byte, fan speed (RPM) */ +#define EEEPC_EC_FAN_LRPM EEEPC_EC "SC06" /* Low byte, fan speed (RPM) */ +#define EEEPC_EC_FAN_CTRL EEEPC_EC "SFB3" /* Byte containing SF25 */ +#define EEEPC_EC_SFB3 0xD3 + +/* + * This is the main structure, we can use it to store useful information + * about the hotk device + */ +struct eeepc_hotk { + struct acpi_device *device; /* the device we are in */ + acpi_handle handle; /* the handle of the hotk device */ + u32 cm_supported; /* the control methods supported + by this BIOS */ + uint init_flag; /* Init flags */ + u16 event_count[128]; /* count for each event */ + struct input_dev *inputdev; + u16 *keycode_map; + struct rfkill *eeepc_wlan_rfkill; + struct rfkill *eeepc_bluetooth_rfkill; +}; + +/* The actual device the driver binds to */ +static struct eeepc_hotk *ehotk; + +/* Platform device/driver */ +static struct platform_driver platform_driver = { + .driver = { + .name = EEEPC_HOTK_FILE, + .owner = THIS_MODULE, + } +}; + +static struct platform_device *platform_device; + +struct key_entry { + char type; + u8 code; + u16 keycode; +}; + +enum { KE_KEY, KE_END }; + +static struct key_entry eeepc_keymap[] = { + /* Sleep already handled via generic ACPI code */ + {KE_KEY, 0x10, KEY_WLAN }, + {KE_KEY, 0x12, KEY_PROG1 }, + {KE_KEY, 0x13, KEY_MUTE }, + {KE_KEY, 0x14, KEY_VOLUMEDOWN }, + {KE_KEY, 0x15, KEY_VOLUMEUP }, + {KE_KEY, 0x1a, KEY_COFFEE }, + {KE_KEY, 0x1b, KEY_ZOOM }, + {KE_KEY, 0x1c, KEY_PROG2 }, + {KE_KEY, 0x1d, KEY_PROG3 }, + {KE_KEY, 0x30, KEY_SWITCHVIDEOMODE }, + {KE_KEY, 0x31, KEY_SWITCHVIDEOMODE }, + {KE_KEY, 0x32, KEY_SWITCHVIDEOMODE }, + {KE_END, 0}, +}; + +/* + * The hotkey driver declaration + */ +static int eeepc_hotk_add(struct acpi_device *device); +static int eeepc_hotk_remove(struct acpi_device *device, int type); + +static const struct acpi_device_id eeepc_device_ids[] = { + {EEEPC_HOTK_HID, 0}, + {"", 0}, +}; +MODULE_DEVICE_TABLE(acpi, eeepc_device_ids); + +static struct acpi_driver eeepc_hotk_driver = { + .name = EEEPC_HOTK_NAME, + .class = EEEPC_HOTK_CLASS, + .ids = eeepc_device_ids, + .ops = { + .add = eeepc_hotk_add, + .remove = eeepc_hotk_remove, + }, +}; + +/* The backlight device /sys/class/backlight */ +static struct backlight_device *eeepc_backlight_device; + +/* The hwmon device */ +static struct device *eeepc_hwmon_device; + +/* + * The backlight class declaration + */ +static int read_brightness(struct backlight_device *bd); +static int update_bl_status(struct backlight_device *bd); +static struct backlight_ops eeepcbl_ops = { + .get_brightness = read_brightness, + .update_status = update_bl_status, +}; + +MODULE_AUTHOR("Corentin Chary, Eric Cooper"); +MODULE_DESCRIPTION(EEEPC_HOTK_NAME); +MODULE_LICENSE("GPL"); + +/* + * ACPI Helpers + */ +static int write_acpi_int(acpi_handle handle, const char *method, int val, + struct acpi_buffer *output) +{ + struct acpi_object_list params; + union acpi_object in_obj; + acpi_status status; + + params.count = 1; + params.pointer = &in_obj; + in_obj.type = ACPI_TYPE_INTEGER; + in_obj.integer.value = val; + + status = acpi_evaluate_object(handle, (char *)method, ¶ms, output); + return (status == AE_OK ? 0 : -1); +} + +static int read_acpi_int(acpi_handle handle, const char *method, int *val) +{ + acpi_status status; + unsigned long long result; + + status = acpi_evaluate_integer(handle, (char *)method, NULL, &result); + if (ACPI_FAILURE(status)) { + *val = -1; + return -1; + } else { + *val = result; + return 0; + } +} + +static int set_acpi(int cm, int value) +{ + if (ehotk->cm_supported & (0x1 << cm)) { + const char *method = cm_setv[cm]; + if (method == NULL) + return -ENODEV; + if (write_acpi_int(ehotk->handle, method, value, NULL)) + printk(EEEPC_WARNING "Error writing %s\n", method); + } + return 0; +} + +static int get_acpi(int cm) +{ + int value = -1; + if ((ehotk->cm_supported & (0x1 << cm))) { + const char *method = cm_getv[cm]; + if (method == NULL) + return -ENODEV; + if (read_acpi_int(ehotk->handle, method, &value)) + printk(EEEPC_WARNING "Error reading %s\n", method); + } + return value; +} + +/* + * Backlight + */ +static int read_brightness(struct backlight_device *bd) +{ + return get_acpi(CM_ASL_PANELBRIGHT); +} + +static int set_brightness(struct backlight_device *bd, int value) +{ + value = max(0, min(15, value)); + return set_acpi(CM_ASL_PANELBRIGHT, value); +} + +static int update_bl_status(struct backlight_device *bd) +{ + return set_brightness(bd, bd->props.brightness); +} + +/* + * Rfkill helpers + */ + +static int eeepc_wlan_rfkill_set(void *data, enum rfkill_state state) +{ + if (state == RFKILL_STATE_SOFT_BLOCKED) + return set_acpi(CM_ASL_WLAN, 0); + else + return set_acpi(CM_ASL_WLAN, 1); +} + +static int eeepc_wlan_rfkill_state(void *data, enum rfkill_state *state) +{ + if (get_acpi(CM_ASL_WLAN) == 1) + *state = RFKILL_STATE_UNBLOCKED; + else + *state = RFKILL_STATE_SOFT_BLOCKED; + return 0; +} + +static int eeepc_bluetooth_rfkill_set(void *data, enum rfkill_state state) +{ + if (state == RFKILL_STATE_SOFT_BLOCKED) + return set_acpi(CM_ASL_BLUETOOTH, 0); + else + return set_acpi(CM_ASL_BLUETOOTH, 1); +} + +static int eeepc_bluetooth_rfkill_state(void *data, enum rfkill_state *state) +{ + if (get_acpi(CM_ASL_BLUETOOTH) == 1) + *state = RFKILL_STATE_UNBLOCKED; + else + *state = RFKILL_STATE_SOFT_BLOCKED; + return 0; +} + +/* + * Sys helpers + */ +static int parse_arg(const char *buf, unsigned long count, int *val) +{ + if (!count) + return 0; + if (sscanf(buf, "%i", val) != 1) + return -EINVAL; + return count; +} + +static ssize_t store_sys_acpi(int cm, const char *buf, size_t count) +{ + int rv, value; + + rv = parse_arg(buf, count, &value); + if (rv > 0) + set_acpi(cm, value); + return rv; +} + +static ssize_t show_sys_acpi(int cm, char *buf) +{ + return sprintf(buf, "%d\n", get_acpi(cm)); +} + +#define EEEPC_CREATE_DEVICE_ATTR(_name, _cm) \ + static ssize_t show_##_name(struct device *dev, \ + struct device_attribute *attr, \ + char *buf) \ + { \ + return show_sys_acpi(_cm, buf); \ + } \ + static ssize_t store_##_name(struct device *dev, \ + struct device_attribute *attr, \ + const char *buf, size_t count) \ + { \ + return store_sys_acpi(_cm, buf, count); \ + } \ + static struct device_attribute dev_attr_##_name = { \ + .attr = { \ + .name = __stringify(_name), \ + .mode = 0644 }, \ + .show = show_##_name, \ + .store = store_##_name, \ + } + +EEEPC_CREATE_DEVICE_ATTR(camera, CM_ASL_CAMERA); +EEEPC_CREATE_DEVICE_ATTR(cardr, CM_ASL_CARDREADER); +EEEPC_CREATE_DEVICE_ATTR(disp, CM_ASL_DISPLAYSWITCH); + +static struct attribute *platform_attributes[] = { + &dev_attr_camera.attr, + &dev_attr_cardr.attr, + &dev_attr_disp.attr, + NULL +}; + +static struct attribute_group platform_attribute_group = { + .attrs = platform_attributes +}; + +/* + * Hotkey functions + */ +static struct key_entry *eepc_get_entry_by_scancode(int code) +{ + struct key_entry *key; + + for (key = eeepc_keymap; key->type != KE_END; key++) + if (code == key->code) + return key; + + return NULL; +} + +static struct key_entry *eepc_get_entry_by_keycode(int code) +{ + struct key_entry *key; + + for (key = eeepc_keymap; key->type != KE_END; key++) + if (code == key->keycode && key->type == KE_KEY) + return key; + + return NULL; +} + +static int eeepc_getkeycode(struct input_dev *dev, int scancode, int *keycode) +{ + struct key_entry *key = eepc_get_entry_by_scancode(scancode); + + if (key && key->type == KE_KEY) { + *keycode = key->keycode; + return 0; + } + + return -EINVAL; +} + +static int eeepc_setkeycode(struct input_dev *dev, int scancode, int keycode) +{ + struct key_entry *key; + int old_keycode; + + if (keycode < 0 || keycode > KEY_MAX) + return -EINVAL; + + key = eepc_get_entry_by_scancode(scancode); + if (key && key->type == KE_KEY) { + old_keycode = key->keycode; + key->keycode = keycode; + set_bit(keycode, dev->keybit); + if (!eepc_get_entry_by_keycode(old_keycode)) + clear_bit(old_keycode, dev->keybit); + return 0; + } + + return -EINVAL; +} + +static int eeepc_hotk_check(void) +{ + const struct key_entry *key; + struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; + int result; + + result = acpi_bus_get_status(ehotk->device); + if (result) + return result; + if (ehotk->device->status.present) { + if (write_acpi_int(ehotk->handle, "INIT", ehotk->init_flag, + &buffer)) { + printk(EEEPC_ERR "Hotkey initialization failed\n"); + return -ENODEV; + } else { + printk(EEEPC_NOTICE "Hotkey init flags 0x%x\n", + ehotk->init_flag); + } + /* get control methods supported */ + if (read_acpi_int(ehotk->handle, "CMSG" + , &ehotk->cm_supported)) { + printk(EEEPC_ERR + "Get control methods supported failed\n"); + return -ENODEV; + } else { + printk(EEEPC_INFO + "Get control methods supported: 0x%x\n", + ehotk->cm_supported); + } + ehotk->inputdev = input_allocate_device(); + if (!ehotk->inputdev) { + printk(EEEPC_INFO "Unable to allocate input device\n"); + return 0; + } + ehotk->inputdev->name = "Asus EeePC extra buttons"; + ehotk->inputdev->phys = EEEPC_HOTK_FILE "/input0"; + ehotk->inputdev->id.bustype = BUS_HOST; + ehotk->inputdev->getkeycode = eeepc_getkeycode; + ehotk->inputdev->setkeycode = eeepc_setkeycode; + + for (key = eeepc_keymap; key->type != KE_END; key++) { + switch (key->type) { + case KE_KEY: + set_bit(EV_KEY, ehotk->inputdev->evbit); + set_bit(key->keycode, ehotk->inputdev->keybit); + break; + } + } + result = input_register_device(ehotk->inputdev); + if (result) { + printk(EEEPC_INFO "Unable to register input device\n"); + input_free_device(ehotk->inputdev); + return 0; + } + } else { + printk(EEEPC_ERR "Hotkey device not present, aborting\n"); + return -EINVAL; + } + return 0; +} + +static void notify_brn(void) +{ + struct backlight_device *bd = eeepc_backlight_device; + if (bd) + bd->props.brightness = read_brightness(bd); +} + +static void eeepc_hotk_notify(acpi_handle handle, u32 event, void *data) +{ + static struct key_entry *key; + if (!ehotk) + return; + if (event >= NOTIFY_BRN_MIN && event <= NOTIFY_BRN_MAX) + notify_brn(); + acpi_bus_generate_proc_event(ehotk->device, event, + ehotk->event_count[event % 128]++); + if (ehotk->inputdev) { + key = eepc_get_entry_by_scancode(event); + if (key) { + switch (key->type) { + case KE_KEY: + input_report_key(ehotk->inputdev, key->keycode, + 1); + input_sync(ehotk->inputdev); + input_report_key(ehotk->inputdev, key->keycode, + 0); + input_sync(ehotk->inputdev); + break; + } + } + } +} + +static int eeepc_hotk_add(struct acpi_device *device) +{ + acpi_status status = AE_OK; + int result; + + if (!device) + return -EINVAL; + printk(EEEPC_NOTICE EEEPC_HOTK_NAME "\n"); + ehotk = kzalloc(sizeof(struct eeepc_hotk), GFP_KERNEL); + if (!ehotk) + return -ENOMEM; + ehotk->init_flag = DISABLE_ASL_WLAN | DISABLE_ASL_DISPLAYSWITCH; + ehotk->handle = device->handle; + strcpy(acpi_device_name(device), EEEPC_HOTK_DEVICE_NAME); + strcpy(acpi_device_class(device), EEEPC_HOTK_CLASS); + device->driver_data = ehotk; + ehotk->device = device; + result = eeepc_hotk_check(); + if (result) + goto end; + status = acpi_install_notify_handler(ehotk->handle, ACPI_SYSTEM_NOTIFY, + eeepc_hotk_notify, ehotk); + if (ACPI_FAILURE(status)) + printk(EEEPC_ERR "Error installing notify handler\n"); + + if (get_acpi(CM_ASL_WLAN) != -1) { + ehotk->eeepc_wlan_rfkill = rfkill_allocate(&device->dev, + RFKILL_TYPE_WLAN); + + if (!ehotk->eeepc_wlan_rfkill) + goto end; + + ehotk->eeepc_wlan_rfkill->name = "eeepc-wlan"; + ehotk->eeepc_wlan_rfkill->toggle_radio = eeepc_wlan_rfkill_set; + ehotk->eeepc_wlan_rfkill->get_state = eeepc_wlan_rfkill_state; + if (get_acpi(CM_ASL_WLAN) == 1) + ehotk->eeepc_wlan_rfkill->state = + RFKILL_STATE_UNBLOCKED; + else + ehotk->eeepc_wlan_rfkill->state = + RFKILL_STATE_SOFT_BLOCKED; + rfkill_register(ehotk->eeepc_wlan_rfkill); + } + + if (get_acpi(CM_ASL_BLUETOOTH) != -1) { + ehotk->eeepc_bluetooth_rfkill = + rfkill_allocate(&device->dev, RFKILL_TYPE_BLUETOOTH); + + if (!ehotk->eeepc_bluetooth_rfkill) + goto end; + + ehotk->eeepc_bluetooth_rfkill->name = "eeepc-bluetooth"; + ehotk->eeepc_bluetooth_rfkill->toggle_radio = + eeepc_bluetooth_rfkill_set; + ehotk->eeepc_bluetooth_rfkill->get_state = + eeepc_bluetooth_rfkill_state; + if (get_acpi(CM_ASL_BLUETOOTH) == 1) + ehotk->eeepc_bluetooth_rfkill->state = + RFKILL_STATE_UNBLOCKED; + else + ehotk->eeepc_bluetooth_rfkill->state = + RFKILL_STATE_SOFT_BLOCKED; + rfkill_register(ehotk->eeepc_bluetooth_rfkill); + } + + end: + if (result) { + kfree(ehotk); + ehotk = NULL; + } + return result; +} + +static int eeepc_hotk_remove(struct acpi_device *device, int type) +{ + acpi_status status = 0; + + if (!device || !acpi_driver_data(device)) + return -EINVAL; + status = acpi_remove_notify_handler(ehotk->handle, ACPI_SYSTEM_NOTIFY, + eeepc_hotk_notify); + if (ACPI_FAILURE(status)) + printk(EEEPC_ERR "Error removing notify handler\n"); + kfree(ehotk); + return 0; +} + +/* + * Hwmon + */ +static int eeepc_get_fan_pwm(void) +{ + int value = 0; + + read_acpi_int(NULL, EEEPC_EC_FAN_PWM, &value); + value = value * 255 / 100; + return (value); +} + +static void eeepc_set_fan_pwm(int value) +{ + value = SENSORS_LIMIT(value, 0, 255); + value = value * 100 / 255; + ec_write(EEEPC_EC_SC02, value); +} + +static int eeepc_get_fan_rpm(void) +{ + int high = 0; + int low = 0; + + read_acpi_int(NULL, EEEPC_EC_FAN_HRPM, &high); + read_acpi_int(NULL, EEEPC_EC_FAN_LRPM, &low); + return (high << 8 | low); +} + +static int eeepc_get_fan_ctrl(void) +{ + int value = 0; + + read_acpi_int(NULL, EEEPC_EC_FAN_CTRL, &value); + return ((value & 0x02 ? 1 : 0)); +} + +static void eeepc_set_fan_ctrl(int manual) +{ + int value = 0; + + read_acpi_int(NULL, EEEPC_EC_FAN_CTRL, &value); + if (manual) + value |= 0x02; + else + value &= ~0x02; + ec_write(EEEPC_EC_SFB3, value); +} + +static ssize_t store_sys_hwmon(void (*set)(int), const char *buf, size_t count) +{ + int rv, value; + + rv = parse_arg(buf, count, &value); + if (rv > 0) + set(value); + return rv; +} + +static ssize_t show_sys_hwmon(int (*get)(void), char *buf) +{ + return sprintf(buf, "%d\n", get()); +} + +#define EEEPC_CREATE_SENSOR_ATTR(_name, _mode, _set, _get) \ + static ssize_t show_##_name(struct device *dev, \ + struct device_attribute *attr, \ + char *buf) \ + { \ + return show_sys_hwmon(_set, buf); \ + } \ + static ssize_t store_##_name(struct device *dev, \ + struct device_attribute *attr, \ + const char *buf, size_t count) \ + { \ + return store_sys_hwmon(_get, buf, count); \ + } \ + static SENSOR_DEVICE_ATTR(_name, _mode, show_##_name, store_##_name, 0); + +EEEPC_CREATE_SENSOR_ATTR(fan1_input, S_IRUGO, eeepc_get_fan_rpm, NULL); +EEEPC_CREATE_SENSOR_ATTR(pwm1, S_IRUGO | S_IWUSR, + eeepc_get_fan_pwm, eeepc_set_fan_pwm); +EEEPC_CREATE_SENSOR_ATTR(pwm1_enable, S_IRUGO | S_IWUSR, + eeepc_get_fan_ctrl, eeepc_set_fan_ctrl); + +static ssize_t +show_name(struct device *dev, struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "eeepc\n"); +} +static SENSOR_DEVICE_ATTR(name, S_IRUGO, show_name, NULL, 0); + +static struct attribute *hwmon_attributes[] = { + &sensor_dev_attr_pwm1.dev_attr.attr, + &sensor_dev_attr_fan1_input.dev_attr.attr, + &sensor_dev_attr_pwm1_enable.dev_attr.attr, + &sensor_dev_attr_name.dev_attr.attr, + NULL +}; + +static struct attribute_group hwmon_attribute_group = { + .attrs = hwmon_attributes +}; + +/* + * exit/init + */ +static void eeepc_backlight_exit(void) +{ + if (eeepc_backlight_device) + backlight_device_unregister(eeepc_backlight_device); + if (ehotk->inputdev) + input_unregister_device(ehotk->inputdev); + if (ehotk->eeepc_wlan_rfkill) + rfkill_unregister(ehotk->eeepc_wlan_rfkill); + if (ehotk->eeepc_bluetooth_rfkill) + rfkill_unregister(ehotk->eeepc_bluetooth_rfkill); + eeepc_backlight_device = NULL; +} + +static void eeepc_hwmon_exit(void) +{ + struct device *hwmon; + + hwmon = eeepc_hwmon_device; + if (!hwmon) + return ; + sysfs_remove_group(&hwmon->kobj, + &hwmon_attribute_group); + hwmon_device_unregister(hwmon); + eeepc_hwmon_device = NULL; +} + +static void __exit eeepc_laptop_exit(void) +{ + eeepc_backlight_exit(); + eeepc_hwmon_exit(); + acpi_bus_unregister_driver(&eeepc_hotk_driver); + sysfs_remove_group(&platform_device->dev.kobj, + &platform_attribute_group); + platform_device_unregister(platform_device); + platform_driver_unregister(&platform_driver); +} + +static int eeepc_backlight_init(struct device *dev) +{ + struct backlight_device *bd; + + bd = backlight_device_register(EEEPC_HOTK_FILE, dev, + NULL, &eeepcbl_ops); + if (IS_ERR(bd)) { + printk(EEEPC_ERR + "Could not register eeepc backlight device\n"); + eeepc_backlight_device = NULL; + return PTR_ERR(bd); + } + eeepc_backlight_device = bd; + bd->props.max_brightness = 15; + bd->props.brightness = read_brightness(NULL); + bd->props.power = FB_BLANK_UNBLANK; + backlight_update_status(bd); + return 0; +} + +static int eeepc_hwmon_init(struct device *dev) +{ + struct device *hwmon; + int result; + + hwmon = hwmon_device_register(dev); + if (IS_ERR(hwmon)) { + printk(EEEPC_ERR + "Could not register eeepc hwmon device\n"); + eeepc_hwmon_device = NULL; + return PTR_ERR(hwmon); + } + eeepc_hwmon_device = hwmon; + result = sysfs_create_group(&hwmon->kobj, + &hwmon_attribute_group); + if (result) + eeepc_hwmon_exit(); + return result; +} + +static int __init eeepc_laptop_init(void) +{ + struct device *dev; + int result; + + if (acpi_disabled) + return -ENODEV; + result = acpi_bus_register_driver(&eeepc_hotk_driver); + if (result < 0) + return result; + if (!ehotk) { + acpi_bus_unregister_driver(&eeepc_hotk_driver); + return -ENODEV; + } + dev = acpi_get_physical_device(ehotk->device->handle); + + if (!acpi_video_backlight_support()) { + result = eeepc_backlight_init(dev); + if (result) + goto fail_backlight; + } else + printk(EEEPC_INFO "Backlight controlled by ACPI video " + "driver\n"); + + result = eeepc_hwmon_init(dev); + if (result) + goto fail_hwmon; + /* Register platform stuff */ + result = platform_driver_register(&platform_driver); + if (result) + goto fail_platform_driver; + platform_device = platform_device_alloc(EEEPC_HOTK_FILE, -1); + if (!platform_device) { + result = -ENOMEM; + goto fail_platform_device1; + } + result = platform_device_add(platform_device); + if (result) + goto fail_platform_device2; + result = sysfs_create_group(&platform_device->dev.kobj, + &platform_attribute_group); + if (result) + goto fail_sysfs; + return 0; +fail_sysfs: + platform_device_del(platform_device); +fail_platform_device2: + platform_device_put(platform_device); +fail_platform_device1: + platform_driver_unregister(&platform_driver); +fail_platform_driver: + eeepc_hwmon_exit(); +fail_hwmon: + eeepc_backlight_exit(); +fail_backlight: + return result; +} + +module_init(eeepc_laptop_init); +module_exit(eeepc_laptop_exit); diff --git a/drivers/misc/eeprom_93cx6.c b/drivers/misc/eeprom_93cx6.c new file mode 100644 index 0000000..15b1780 --- /dev/null +++ b/drivers/misc/eeprom_93cx6.c @@ -0,0 +1,240 @@ +/* + Copyright (C) 2004 - 2006 rt2x00 SourceForge Project + <http://rt2x00.serialmonkey.com> + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the + Free Software Foundation, Inc., + 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +/* + Module: eeprom_93cx6 + Abstract: EEPROM reader routines for 93cx6 chipsets. + Supported chipsets: 93c46 & 93c66. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/delay.h> +#include <linux/eeprom_93cx6.h> + +MODULE_AUTHOR("http://rt2x00.serialmonkey.com"); +MODULE_VERSION("1.0"); +MODULE_DESCRIPTION("EEPROM 93cx6 chip driver"); +MODULE_LICENSE("GPL"); + +static inline void eeprom_93cx6_pulse_high(struct eeprom_93cx6 *eeprom) +{ + eeprom->reg_data_clock = 1; + eeprom->register_write(eeprom); + + /* + * Add a short delay for the pulse to work. + * According to the specifications the "maximum minimum" + * time should be 450ns. + */ + ndelay(450); +} + +static inline void eeprom_93cx6_pulse_low(struct eeprom_93cx6 *eeprom) +{ + eeprom->reg_data_clock = 0; + eeprom->register_write(eeprom); + + /* + * Add a short delay for the pulse to work. + * According to the specifications the "maximum minimum" + * time should be 450ns. + */ + ndelay(450); +} + +static void eeprom_93cx6_startup(struct eeprom_93cx6 *eeprom) +{ + /* + * Clear all flags, and enable chip select. + */ + eeprom->register_read(eeprom); + eeprom->reg_data_in = 0; + eeprom->reg_data_out = 0; + eeprom->reg_data_clock = 0; + eeprom->reg_chip_select = 1; + eeprom->register_write(eeprom); + + /* + * kick a pulse. + */ + eeprom_93cx6_pulse_high(eeprom); + eeprom_93cx6_pulse_low(eeprom); +} + +static void eeprom_93cx6_cleanup(struct eeprom_93cx6 *eeprom) +{ + /* + * Clear chip_select and data_in flags. + */ + eeprom->register_read(eeprom); + eeprom->reg_data_in = 0; + eeprom->reg_chip_select = 0; + eeprom->register_write(eeprom); + + /* + * kick a pulse. + */ + eeprom_93cx6_pulse_high(eeprom); + eeprom_93cx6_pulse_low(eeprom); +} + +static void eeprom_93cx6_write_bits(struct eeprom_93cx6 *eeprom, + const u16 data, const u16 count) +{ + unsigned int i; + + eeprom->register_read(eeprom); + + /* + * Clear data flags. + */ + eeprom->reg_data_in = 0; + eeprom->reg_data_out = 0; + + /* + * Start writing all bits. + */ + for (i = count; i > 0; i--) { + /* + * Check if this bit needs to be set. + */ + eeprom->reg_data_in = !!(data & (1 << (i - 1))); + + /* + * Write the bit to the eeprom register. + */ + eeprom->register_write(eeprom); + + /* + * Kick a pulse. + */ + eeprom_93cx6_pulse_high(eeprom); + eeprom_93cx6_pulse_low(eeprom); + } + + eeprom->reg_data_in = 0; + eeprom->register_write(eeprom); +} + +static void eeprom_93cx6_read_bits(struct eeprom_93cx6 *eeprom, + u16 *data, const u16 count) +{ + unsigned int i; + u16 buf = 0; + + eeprom->register_read(eeprom); + + /* + * Clear data flags. + */ + eeprom->reg_data_in = 0; + eeprom->reg_data_out = 0; + + /* + * Start reading all bits. + */ + for (i = count; i > 0; i--) { + eeprom_93cx6_pulse_high(eeprom); + + eeprom->register_read(eeprom); + + /* + * Clear data_in flag. + */ + eeprom->reg_data_in = 0; + + /* + * Read if the bit has been set. + */ + if (eeprom->reg_data_out) + buf |= (1 << (i - 1)); + + eeprom_93cx6_pulse_low(eeprom); + } + + *data = buf; +} + +/** + * eeprom_93cx6_read - Read multiple words from eeprom + * @eeprom: Pointer to eeprom structure + * @word: Word index from where we should start reading + * @data: target pointer where the information will have to be stored + * + * This function will read the eeprom data as host-endian word + * into the given data pointer. + */ +void eeprom_93cx6_read(struct eeprom_93cx6 *eeprom, const u8 word, + u16 *data) +{ + u16 command; + + /* + * Initialize the eeprom register + */ + eeprom_93cx6_startup(eeprom); + + /* + * Select the read opcode and the word to be read. + */ + command = (PCI_EEPROM_READ_OPCODE << eeprom->width) | word; + eeprom_93cx6_write_bits(eeprom, command, + PCI_EEPROM_WIDTH_OPCODE + eeprom->width); + + /* + * Read the requested 16 bits. + */ + eeprom_93cx6_read_bits(eeprom, data, 16); + + /* + * Cleanup eeprom register. + */ + eeprom_93cx6_cleanup(eeprom); +} +EXPORT_SYMBOL_GPL(eeprom_93cx6_read); + +/** + * eeprom_93cx6_multiread - Read multiple words from eeprom + * @eeprom: Pointer to eeprom structure + * @word: Word index from where we should start reading + * @data: target pointer where the information will have to be stored + * @words: Number of words that should be read. + * + * This function will read all requested words from the eeprom, + * this is done by calling eeprom_93cx6_read() multiple times. + * But with the additional change that while the eeprom_93cx6_read + * will return host ordered bytes, this method will return little + * endian words. + */ +void eeprom_93cx6_multiread(struct eeprom_93cx6 *eeprom, const u8 word, + __le16 *data, const u16 words) +{ + unsigned int i; + u16 tmp; + + for (i = 0; i < words; i++) { + tmp = 0; + eeprom_93cx6_read(eeprom, word + i, &tmp); + data[i] = cpu_to_le16(tmp); + } +} +EXPORT_SYMBOL_GPL(eeprom_93cx6_multiread); + diff --git a/drivers/misc/enclosure.c b/drivers/misc/enclosure.c new file mode 100644 index 0000000..0736cff --- /dev/null +++ b/drivers/misc/enclosure.c @@ -0,0 +1,536 @@ +/* + * Enclosure Services + * + * Copyright (C) 2008 James Bottomley <James.Bottomley@HansenPartnership.com> + * +**----------------------------------------------------------------------------- +** +** This program is free software; you can redistribute it and/or +** modify it under the terms of the GNU General Public License +** version 2 as published by the Free Software Foundation. +** +** This program is distributed in the hope that it will be useful, +** but WITHOUT ANY WARRANTY; without even the implied warranty of +** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +** GNU General Public License for more details. +** +** You should have received a copy of the GNU General Public License +** along with this program; if not, write to the Free Software +** Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. +** +**----------------------------------------------------------------------------- +*/ +#include <linux/device.h> +#include <linux/enclosure.h> +#include <linux/err.h> +#include <linux/list.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/mutex.h> + +static LIST_HEAD(container_list); +static DEFINE_MUTEX(container_list_lock); +static struct class enclosure_class; + +/** + * enclosure_find - find an enclosure given a device + * @dev: the device to find for + * + * Looks through the list of registered enclosures to see + * if it can find a match for a device. Returns NULL if no + * enclosure is found. Obtains a reference to the enclosure class + * device which must be released with device_put(). + */ +struct enclosure_device *enclosure_find(struct device *dev) +{ + struct enclosure_device *edev; + + mutex_lock(&container_list_lock); + list_for_each_entry(edev, &container_list, node) { + if (edev->edev.parent == dev) { + get_device(&edev->edev); + mutex_unlock(&container_list_lock); + return edev; + } + } + mutex_unlock(&container_list_lock); + + return NULL; +} +EXPORT_SYMBOL_GPL(enclosure_find); + +/** + * enclosure_for_each_device - calls a function for each enclosure + * @fn: the function to call + * @data: the data to pass to each call + * + * Loops over all the enclosures calling the function. + * + * Note, this function uses a mutex which will be held across calls to + * @fn, so it must have non atomic context, and @fn may (although it + * should not) sleep or otherwise cause the mutex to be held for + * indefinite periods + */ +int enclosure_for_each_device(int (*fn)(struct enclosure_device *, void *), + void *data) +{ + int error = 0; + struct enclosure_device *edev; + + mutex_lock(&container_list_lock); + list_for_each_entry(edev, &container_list, node) { + error = fn(edev, data); + if (error) + break; + } + mutex_unlock(&container_list_lock); + + return error; +} +EXPORT_SYMBOL_GPL(enclosure_for_each_device); + +/** + * enclosure_register - register device as an enclosure + * + * @dev: device containing the enclosure + * @components: number of components in the enclosure + * + * This sets up the device for being an enclosure. Note that @dev does + * not have to be a dedicated enclosure device. It may be some other type + * of device that additionally responds to enclosure services + */ +struct enclosure_device * +enclosure_register(struct device *dev, const char *name, int components, + struct enclosure_component_callbacks *cb) +{ + struct enclosure_device *edev = + kzalloc(sizeof(struct enclosure_device) + + sizeof(struct enclosure_component)*components, + GFP_KERNEL); + int err, i; + + BUG_ON(!cb); + + if (!edev) + return ERR_PTR(-ENOMEM); + + edev->components = components; + + edev->edev.class = &enclosure_class; + edev->edev.parent = get_device(dev); + edev->cb = cb; + snprintf(edev->edev.bus_id, BUS_ID_SIZE, "%s", name); + err = device_register(&edev->edev); + if (err) + goto err; + + for (i = 0; i < components; i++) + edev->component[i].number = -1; + + mutex_lock(&container_list_lock); + list_add_tail(&edev->node, &container_list); + mutex_unlock(&container_list_lock); + + return edev; + + err: + put_device(edev->edev.parent); + kfree(edev); + return ERR_PTR(err); +} +EXPORT_SYMBOL_GPL(enclosure_register); + +static struct enclosure_component_callbacks enclosure_null_callbacks; + +/** + * enclosure_unregister - remove an enclosure + * + * @edev: the registered enclosure to remove; + */ +void enclosure_unregister(struct enclosure_device *edev) +{ + int i; + + mutex_lock(&container_list_lock); + list_del(&edev->node); + mutex_unlock(&container_list_lock); + + for (i = 0; i < edev->components; i++) + if (edev->component[i].number != -1) + device_unregister(&edev->component[i].cdev); + + /* prevent any callbacks into service user */ + edev->cb = &enclosure_null_callbacks; + device_unregister(&edev->edev); +} +EXPORT_SYMBOL_GPL(enclosure_unregister); + +#define ENCLOSURE_NAME_SIZE 64 + +static void enclosure_link_name(struct enclosure_component *cdev, char *name) +{ + strcpy(name, "enclosure_device:"); + strcat(name, cdev->cdev.bus_id); +} + +static void enclosure_remove_links(struct enclosure_component *cdev) +{ + char name[ENCLOSURE_NAME_SIZE]; + + enclosure_link_name(cdev, name); + sysfs_remove_link(&cdev->dev->kobj, name); + sysfs_remove_link(&cdev->cdev.kobj, "device"); +} + +static int enclosure_add_links(struct enclosure_component *cdev) +{ + int error; + char name[ENCLOSURE_NAME_SIZE]; + + error = sysfs_create_link(&cdev->cdev.kobj, &cdev->dev->kobj, "device"); + if (error) + return error; + + enclosure_link_name(cdev, name); + error = sysfs_create_link(&cdev->dev->kobj, &cdev->cdev.kobj, name); + if (error) + sysfs_remove_link(&cdev->cdev.kobj, "device"); + + return error; +} + +static void enclosure_release(struct device *cdev) +{ + struct enclosure_device *edev = to_enclosure_device(cdev); + + put_device(cdev->parent); + kfree(edev); +} + +static void enclosure_component_release(struct device *dev) +{ + struct enclosure_component *cdev = to_enclosure_component(dev); + + if (cdev->dev) { + enclosure_remove_links(cdev); + put_device(cdev->dev); + } + put_device(dev->parent); +} + +static struct attribute_group *enclosure_groups[]; + +/** + * enclosure_component_register - add a particular component to an enclosure + * @edev: the enclosure to add the component + * @num: the device number + * @type: the type of component being added + * @name: an optional name to appear in sysfs (leave NULL if none) + * + * Registers the component. The name is optional for enclosures that + * give their components a unique name. If not, leave the field NULL + * and a name will be assigned. + * + * Returns a pointer to the enclosure component or an error. + */ +struct enclosure_component * +enclosure_component_register(struct enclosure_device *edev, + unsigned int number, + enum enclosure_component_type type, + const char *name) +{ + struct enclosure_component *ecomp; + struct device *cdev; + int err; + + if (number >= edev->components) + return ERR_PTR(-EINVAL); + + ecomp = &edev->component[number]; + + if (ecomp->number != -1) + return ERR_PTR(-EINVAL); + + ecomp->type = type; + ecomp->number = number; + cdev = &ecomp->cdev; + cdev->parent = get_device(&edev->edev); + if (name) + snprintf(cdev->bus_id, BUS_ID_SIZE, "%s", name); + else + snprintf(cdev->bus_id, BUS_ID_SIZE, "%u", number); + + cdev->release = enclosure_component_release; + cdev->groups = enclosure_groups; + + err = device_register(cdev); + if (err) + ERR_PTR(err); + + return ecomp; +} +EXPORT_SYMBOL_GPL(enclosure_component_register); + +/** + * enclosure_add_device - add a device as being part of an enclosure + * @edev: the enclosure device being added to. + * @num: the number of the component + * @dev: the device being added + * + * Declares a real device to reside in slot (or identifier) @num of an + * enclosure. This will cause the relevant sysfs links to appear. + * This function may also be used to change a device associated with + * an enclosure without having to call enclosure_remove_device() in + * between. + * + * Returns zero on success or an error. + */ +int enclosure_add_device(struct enclosure_device *edev, int component, + struct device *dev) +{ + struct enclosure_component *cdev; + + if (!edev || component >= edev->components) + return -EINVAL; + + cdev = &edev->component[component]; + + if (cdev->dev) + enclosure_remove_links(cdev); + + put_device(cdev->dev); + cdev->dev = get_device(dev); + return enclosure_add_links(cdev); +} +EXPORT_SYMBOL_GPL(enclosure_add_device); + +/** + * enclosure_remove_device - remove a device from an enclosure + * @edev: the enclosure device + * @num: the number of the component to remove + * + * Returns zero on success or an error. + * + */ +int enclosure_remove_device(struct enclosure_device *edev, int component) +{ + struct enclosure_component *cdev; + + if (!edev || component >= edev->components) + return -EINVAL; + + cdev = &edev->component[component]; + + device_del(&cdev->cdev); + put_device(cdev->dev); + cdev->dev = NULL; + return device_add(&cdev->cdev); +} +EXPORT_SYMBOL_GPL(enclosure_remove_device); + +/* + * sysfs pieces below + */ + +static ssize_t enclosure_show_components(struct device *cdev, + struct device_attribute *attr, + char *buf) +{ + struct enclosure_device *edev = to_enclosure_device(cdev); + + return snprintf(buf, 40, "%d\n", edev->components); +} + +static struct device_attribute enclosure_attrs[] = { + __ATTR(components, S_IRUGO, enclosure_show_components, NULL), + __ATTR_NULL +}; + +static struct class enclosure_class = { + .name = "enclosure", + .owner = THIS_MODULE, + .dev_release = enclosure_release, + .dev_attrs = enclosure_attrs, +}; + +static const char *const enclosure_status [] = { + [ENCLOSURE_STATUS_UNSUPPORTED] = "unsupported", + [ENCLOSURE_STATUS_OK] = "OK", + [ENCLOSURE_STATUS_CRITICAL] = "critical", + [ENCLOSURE_STATUS_NON_CRITICAL] = "non-critical", + [ENCLOSURE_STATUS_UNRECOVERABLE] = "unrecoverable", + [ENCLOSURE_STATUS_NOT_INSTALLED] = "not installed", + [ENCLOSURE_STATUS_UNKNOWN] = "unknown", + [ENCLOSURE_STATUS_UNAVAILABLE] = "unavailable", +}; + +static const char *const enclosure_type [] = { + [ENCLOSURE_COMPONENT_DEVICE] = "device", + [ENCLOSURE_COMPONENT_ARRAY_DEVICE] = "array device", +}; + +static ssize_t get_component_fault(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct enclosure_device *edev = to_enclosure_device(cdev->parent); + struct enclosure_component *ecomp = to_enclosure_component(cdev); + + if (edev->cb->get_fault) + edev->cb->get_fault(edev, ecomp); + return snprintf(buf, 40, "%d\n", ecomp->fault); +} + +static ssize_t set_component_fault(struct device *cdev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct enclosure_device *edev = to_enclosure_device(cdev->parent); + struct enclosure_component *ecomp = to_enclosure_component(cdev); + int val = simple_strtoul(buf, NULL, 0); + + if (edev->cb->set_fault) + edev->cb->set_fault(edev, ecomp, val); + return count; +} + +static ssize_t get_component_status(struct device *cdev, + struct device_attribute *attr,char *buf) +{ + struct enclosure_device *edev = to_enclosure_device(cdev->parent); + struct enclosure_component *ecomp = to_enclosure_component(cdev); + + if (edev->cb->get_status) + edev->cb->get_status(edev, ecomp); + return snprintf(buf, 40, "%s\n", enclosure_status[ecomp->status]); +} + +static ssize_t set_component_status(struct device *cdev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct enclosure_device *edev = to_enclosure_device(cdev->parent); + struct enclosure_component *ecomp = to_enclosure_component(cdev); + int i; + + for (i = 0; enclosure_status[i]; i++) { + if (strncmp(buf, enclosure_status[i], + strlen(enclosure_status[i])) == 0 && + (buf[strlen(enclosure_status[i])] == '\n' || + buf[strlen(enclosure_status[i])] == '\0')) + break; + } + + if (enclosure_status[i] && edev->cb->set_status) { + edev->cb->set_status(edev, ecomp, i); + return count; + } else + return -EINVAL; +} + +static ssize_t get_component_active(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct enclosure_device *edev = to_enclosure_device(cdev->parent); + struct enclosure_component *ecomp = to_enclosure_component(cdev); + + if (edev->cb->get_active) + edev->cb->get_active(edev, ecomp); + return snprintf(buf, 40, "%d\n", ecomp->active); +} + +static ssize_t set_component_active(struct device *cdev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct enclosure_device *edev = to_enclosure_device(cdev->parent); + struct enclosure_component *ecomp = to_enclosure_component(cdev); + int val = simple_strtoul(buf, NULL, 0); + + if (edev->cb->set_active) + edev->cb->set_active(edev, ecomp, val); + return count; +} + +static ssize_t get_component_locate(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct enclosure_device *edev = to_enclosure_device(cdev->parent); + struct enclosure_component *ecomp = to_enclosure_component(cdev); + + if (edev->cb->get_locate) + edev->cb->get_locate(edev, ecomp); + return snprintf(buf, 40, "%d\n", ecomp->locate); +} + +static ssize_t set_component_locate(struct device *cdev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct enclosure_device *edev = to_enclosure_device(cdev->parent); + struct enclosure_component *ecomp = to_enclosure_component(cdev); + int val = simple_strtoul(buf, NULL, 0); + + if (edev->cb->set_locate) + edev->cb->set_locate(edev, ecomp, val); + return count; +} + +static ssize_t get_component_type(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct enclosure_component *ecomp = to_enclosure_component(cdev); + + return snprintf(buf, 40, "%s\n", enclosure_type[ecomp->type]); +} + + +static DEVICE_ATTR(fault, S_IRUGO | S_IWUSR, get_component_fault, + set_component_fault); +static DEVICE_ATTR(status, S_IRUGO | S_IWUSR, get_component_status, + set_component_status); +static DEVICE_ATTR(active, S_IRUGO | S_IWUSR, get_component_active, + set_component_active); +static DEVICE_ATTR(locate, S_IRUGO | S_IWUSR, get_component_locate, + set_component_locate); +static DEVICE_ATTR(type, S_IRUGO, get_component_type, NULL); + +static struct attribute *enclosure_component_attrs[] = { + &dev_attr_fault.attr, + &dev_attr_status.attr, + &dev_attr_active.attr, + &dev_attr_locate.attr, + &dev_attr_type.attr, + NULL +}; + +static struct attribute_group enclosure_group = { + .attrs = enclosure_component_attrs, +}; + +static struct attribute_group *enclosure_groups[] = { + &enclosure_group, + NULL +}; + +static int __init enclosure_init(void) +{ + int err; + + err = class_register(&enclosure_class); + if (err) + return err; + + return 0; +} + +static void __exit enclosure_exit(void) +{ + class_unregister(&enclosure_class); +} + +module_init(enclosure_init); +module_exit(enclosure_exit); + +MODULE_AUTHOR("James Bottomley"); +MODULE_DESCRIPTION("Enclosure Services"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/misc/fujitsu-laptop.c b/drivers/misc/fujitsu-laptop.c new file mode 100644 index 0000000..a7dd3e9 --- /dev/null +++ b/drivers/misc/fujitsu-laptop.c @@ -0,0 +1,1126 @@ +/*-*-linux-c-*-*/ + +/* + Copyright (C) 2007,2008 Jonathan Woithe <jwoithe@physics.adelaide.edu.au> + Copyright (C) 2008 Peter Gruber <nokos@gmx.net> + Based on earlier work: + Copyright (C) 2003 Shane Spencer <shane@bogomip.com> + Adrian Yee <brewt-fujitsu@brewt.org> + + Templated from msi-laptop.c and thinkpad_acpi.c which is copyright + by its respective authors. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + 02110-1301, USA. + */ + +/* + * fujitsu-laptop.c - Fujitsu laptop support, providing access to additional + * features made available on a range of Fujitsu laptops including the + * P2xxx/P5xxx/S6xxx/S7xxx series. + * + * This driver exports a few files in /sys/devices/platform/fujitsu-laptop/; + * others may be added at a later date. + * + * lcd_level - Screen brightness: contains a single integer in the + * range 0..7. (rw) + * + * In addition to these platform device attributes the driver + * registers itself in the Linux backlight control subsystem and is + * available to userspace under /sys/class/backlight/fujitsu-laptop/. + * + * Hotkeys present on certain Fujitsu laptops (eg: the S6xxx series) are + * also supported by this driver. + * + * This driver has been tested on a Fujitsu Lifebook S6410, S7020 and + * P8010. It should work on most P-series and S-series Lifebooks, but + * YMMV. + * + * The module parameter use_alt_lcd_levels switches between different ACPI + * brightness controls which are used by different Fujitsu laptops. In most + * cases the correct method is automatically detected. "use_alt_lcd_levels=1" + * is applicable for a Fujitsu Lifebook S6410 if autodetection fails. + * + */ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/acpi.h> +#include <linux/dmi.h> +#include <linux/backlight.h> +#include <linux/input.h> +#include <linux/kfifo.h> +#include <linux/video_output.h> +#include <linux/platform_device.h> + +#define FUJITSU_DRIVER_VERSION "0.4.3" + +#define FUJITSU_LCD_N_LEVELS 8 + +#define ACPI_FUJITSU_CLASS "fujitsu" +#define ACPI_FUJITSU_HID "FUJ02B1" +#define ACPI_FUJITSU_DRIVER_NAME "Fujitsu laptop FUJ02B1 ACPI brightness driver" +#define ACPI_FUJITSU_DEVICE_NAME "Fujitsu FUJ02B1" +#define ACPI_FUJITSU_HOTKEY_HID "FUJ02E3" +#define ACPI_FUJITSU_HOTKEY_DRIVER_NAME "Fujitsu laptop FUJ02E3 ACPI hotkeys driver" +#define ACPI_FUJITSU_HOTKEY_DEVICE_NAME "Fujitsu FUJ02E3" + +#define ACPI_FUJITSU_NOTIFY_CODE1 0x80 + +#define ACPI_VIDEO_NOTIFY_INC_BRIGHTNESS 0x86 +#define ACPI_VIDEO_NOTIFY_DEC_BRIGHTNESS 0x87 + +/* Hotkey details */ +#define KEY1_CODE 0x410 /* codes for the keys in the GIRB register */ +#define KEY2_CODE 0x411 +#define KEY3_CODE 0x412 +#define KEY4_CODE 0x413 + +#define MAX_HOTKEY_RINGBUFFER_SIZE 100 +#define RINGBUFFERSIZE 40 + +/* Debugging */ +#define FUJLAPTOP_LOG ACPI_FUJITSU_HID ": " +#define FUJLAPTOP_ERR KERN_ERR FUJLAPTOP_LOG +#define FUJLAPTOP_NOTICE KERN_NOTICE FUJLAPTOP_LOG +#define FUJLAPTOP_INFO KERN_INFO FUJLAPTOP_LOG +#define FUJLAPTOP_DEBUG KERN_DEBUG FUJLAPTOP_LOG + +#define FUJLAPTOP_DBG_ALL 0xffff +#define FUJLAPTOP_DBG_ERROR 0x0001 +#define FUJLAPTOP_DBG_WARN 0x0002 +#define FUJLAPTOP_DBG_INFO 0x0004 +#define FUJLAPTOP_DBG_TRACE 0x0008 + +#define dbg_printk(a_dbg_level, format, arg...) \ + do { if (dbg_level & a_dbg_level) \ + printk(FUJLAPTOP_DEBUG "%s: " format, __func__ , ## arg); \ + } while (0) +#ifdef CONFIG_FUJITSU_LAPTOP_DEBUG +#define vdbg_printk(a_dbg_level, format, arg...) \ + dbg_printk(a_dbg_level, format, ## arg) +#else +#define vdbg_printk(a_dbg_level, format, arg...) +#endif + +/* Device controlling the backlight and associated keys */ +struct fujitsu_t { + acpi_handle acpi_handle; + struct acpi_device *dev; + struct input_dev *input; + char phys[32]; + struct backlight_device *bl_device; + struct platform_device *pf_device; + int keycode1, keycode2, keycode3, keycode4; + + unsigned int max_brightness; + unsigned int brightness_changed; + unsigned int brightness_level; +}; + +static struct fujitsu_t *fujitsu; +static int use_alt_lcd_levels = -1; +static int disable_brightness_keys = -1; +static int disable_brightness_adjust = -1; + +/* Device used to access other hotkeys on the laptop */ +struct fujitsu_hotkey_t { + acpi_handle acpi_handle; + struct acpi_device *dev; + struct input_dev *input; + char phys[32]; + struct platform_device *pf_device; + struct kfifo *fifo; + spinlock_t fifo_lock; + + unsigned int irb; /* info about the pressed buttons */ +}; + +static struct fujitsu_hotkey_t *fujitsu_hotkey; + +static void acpi_fujitsu_hotkey_notify(acpi_handle handle, u32 event, + void *data); + +#ifdef CONFIG_FUJITSU_LAPTOP_DEBUG +static u32 dbg_level = 0x03; +#endif + +static void acpi_fujitsu_notify(acpi_handle handle, u32 event, void *data); + +/* Hardware access for LCD brightness control */ + +static int set_lcd_level(int level) +{ + acpi_status status = AE_OK; + union acpi_object arg0 = { ACPI_TYPE_INTEGER }; + struct acpi_object_list arg_list = { 1, &arg0 }; + acpi_handle handle = NULL; + + vdbg_printk(FUJLAPTOP_DBG_TRACE, "set lcd level via SBLL [%d]\n", + level); + + if (level < 0 || level >= fujitsu->max_brightness) + return -EINVAL; + + if (!fujitsu) + return -EINVAL; + + status = acpi_get_handle(fujitsu->acpi_handle, "SBLL", &handle); + if (ACPI_FAILURE(status)) { + vdbg_printk(FUJLAPTOP_DBG_ERROR, "SBLL not present\n"); + return -ENODEV; + } + + arg0.integer.value = level; + + status = acpi_evaluate_object(handle, NULL, &arg_list, NULL); + if (ACPI_FAILURE(status)) + return -ENODEV; + + return 0; +} + +static int set_lcd_level_alt(int level) +{ + acpi_status status = AE_OK; + union acpi_object arg0 = { ACPI_TYPE_INTEGER }; + struct acpi_object_list arg_list = { 1, &arg0 }; + acpi_handle handle = NULL; + + vdbg_printk(FUJLAPTOP_DBG_TRACE, "set lcd level via SBL2 [%d]\n", + level); + + if (level < 0 || level >= fujitsu->max_brightness) + return -EINVAL; + + if (!fujitsu) + return -EINVAL; + + status = acpi_get_handle(fujitsu->acpi_handle, "SBL2", &handle); + if (ACPI_FAILURE(status)) { + vdbg_printk(FUJLAPTOP_DBG_ERROR, "SBL2 not present\n"); + return -ENODEV; + } + + arg0.integer.value = level; + + status = acpi_evaluate_object(handle, NULL, &arg_list, NULL); + if (ACPI_FAILURE(status)) + return -ENODEV; + + return 0; +} + +static int get_lcd_level(void) +{ + unsigned long long state = 0; + acpi_status status = AE_OK; + + vdbg_printk(FUJLAPTOP_DBG_TRACE, "get lcd level via GBLL\n"); + + status = + acpi_evaluate_integer(fujitsu->acpi_handle, "GBLL", NULL, &state); + if (status < 0) + return status; + + fujitsu->brightness_level = state & 0x0fffffff; + + if (state & 0x80000000) + fujitsu->brightness_changed = 1; + else + fujitsu->brightness_changed = 0; + + return fujitsu->brightness_level; +} + +static int get_max_brightness(void) +{ + unsigned long long state = 0; + acpi_status status = AE_OK; + + vdbg_printk(FUJLAPTOP_DBG_TRACE, "get max lcd level via RBLL\n"); + + status = + acpi_evaluate_integer(fujitsu->acpi_handle, "RBLL", NULL, &state); + if (status < 0) + return status; + + fujitsu->max_brightness = state; + + return fujitsu->max_brightness; +} + +static int get_lcd_level_alt(void) +{ + unsigned long long state = 0; + acpi_status status = AE_OK; + + vdbg_printk(FUJLAPTOP_DBG_TRACE, "get lcd level via GBLS\n"); + + status = + acpi_evaluate_integer(fujitsu->acpi_handle, "GBLS", NULL, &state); + if (status < 0) + return status; + + fujitsu->brightness_level = state & 0x0fffffff; + + if (state & 0x80000000) + fujitsu->brightness_changed = 1; + else + fujitsu->brightness_changed = 0; + + return fujitsu->brightness_level; +} + +/* Backlight device stuff */ + +static int bl_get_brightness(struct backlight_device *b) +{ + if (use_alt_lcd_levels) + return get_lcd_level_alt(); + else + return get_lcd_level(); +} + +static int bl_update_status(struct backlight_device *b) +{ + if (use_alt_lcd_levels) + return set_lcd_level_alt(b->props.brightness); + else + return set_lcd_level(b->props.brightness); +} + +static struct backlight_ops fujitsubl_ops = { + .get_brightness = bl_get_brightness, + .update_status = bl_update_status, +}; + +/* Platform LCD brightness device */ + +static ssize_t +show_max_brightness(struct device *dev, + struct device_attribute *attr, char *buf) +{ + + int ret; + + ret = get_max_brightness(); + if (ret < 0) + return ret; + + return sprintf(buf, "%i\n", ret); +} + +static ssize_t +show_brightness_changed(struct device *dev, + struct device_attribute *attr, char *buf) +{ + + int ret; + + ret = fujitsu->brightness_changed; + if (ret < 0) + return ret; + + return sprintf(buf, "%i\n", ret); +} + +static ssize_t show_lcd_level(struct device *dev, + struct device_attribute *attr, char *buf) +{ + + int ret; + + if (use_alt_lcd_levels) + ret = get_lcd_level_alt(); + else + ret = get_lcd_level(); + if (ret < 0) + return ret; + + return sprintf(buf, "%i\n", ret); +} + +static ssize_t store_lcd_level(struct device *dev, + struct device_attribute *attr, const char *buf, + size_t count) +{ + + int level, ret; + + if (sscanf(buf, "%i", &level) != 1 + || (level < 0 || level >= fujitsu->max_brightness)) + return -EINVAL; + + if (use_alt_lcd_levels) + ret = set_lcd_level_alt(level); + else + ret = set_lcd_level(level); + if (ret < 0) + return ret; + + if (use_alt_lcd_levels) + ret = get_lcd_level_alt(); + else + ret = get_lcd_level(); + if (ret < 0) + return ret; + + return count; +} + +/* Hardware access for hotkey device */ + +static int get_irb(void) +{ + unsigned long long state = 0; + acpi_status status = AE_OK; + + vdbg_printk(FUJLAPTOP_DBG_TRACE, "Get irb\n"); + + status = + acpi_evaluate_integer(fujitsu_hotkey->acpi_handle, "GIRB", NULL, + &state); + if (status < 0) + return status; + + fujitsu_hotkey->irb = state; + + return fujitsu_hotkey->irb; +} + +static ssize_t +ignore_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + return count; +} + +static DEVICE_ATTR(max_brightness, 0444, show_max_brightness, ignore_store); +static DEVICE_ATTR(brightness_changed, 0444, show_brightness_changed, + ignore_store); +static DEVICE_ATTR(lcd_level, 0644, show_lcd_level, store_lcd_level); + +static struct attribute *fujitsupf_attributes[] = { + &dev_attr_brightness_changed.attr, + &dev_attr_max_brightness.attr, + &dev_attr_lcd_level.attr, + NULL +}; + +static struct attribute_group fujitsupf_attribute_group = { + .attrs = fujitsupf_attributes +}; + +static struct platform_driver fujitsupf_driver = { + .driver = { + .name = "fujitsu-laptop", + .owner = THIS_MODULE, + } +}; + +static void dmi_check_cb_common(const struct dmi_system_id *id) +{ + acpi_handle handle; + int have_blnf; + printk(KERN_INFO "fujitsu-laptop: Identified laptop model '%s'.\n", + id->ident); + have_blnf = ACPI_SUCCESS + (acpi_get_handle(NULL, "\\_SB.PCI0.GFX0.LCD.BLNF", &handle)); + if (use_alt_lcd_levels == -1) { + vdbg_printk(FUJLAPTOP_DBG_TRACE, "auto-detecting usealt\n"); + use_alt_lcd_levels = 1; + } + if (disable_brightness_keys == -1) { + vdbg_printk(FUJLAPTOP_DBG_TRACE, + "auto-detecting disable_keys\n"); + disable_brightness_keys = have_blnf ? 1 : 0; + } + if (disable_brightness_adjust == -1) { + vdbg_printk(FUJLAPTOP_DBG_TRACE, + "auto-detecting disable_adjust\n"); + disable_brightness_adjust = have_blnf ? 0 : 1; + } +} + +static int dmi_check_cb_s6410(const struct dmi_system_id *id) +{ + dmi_check_cb_common(id); + fujitsu->keycode1 = KEY_SCREENLOCK; /* "Lock" */ + fujitsu->keycode2 = KEY_HELP; /* "Mobility Center" */ + return 0; +} + +static int dmi_check_cb_s6420(const struct dmi_system_id *id) +{ + dmi_check_cb_common(id); + fujitsu->keycode1 = KEY_SCREENLOCK; /* "Lock" */ + fujitsu->keycode2 = KEY_HELP; /* "Mobility Center" */ + return 0; +} + +static int dmi_check_cb_p8010(const struct dmi_system_id *id) +{ + dmi_check_cb_common(id); + fujitsu->keycode1 = KEY_HELP; /* "Support" */ + fujitsu->keycode3 = KEY_SWITCHVIDEOMODE; /* "Presentation" */ + fujitsu->keycode4 = KEY_WWW; /* "Internet" */ + return 0; +} + +static struct dmi_system_id fujitsu_dmi_table[] = { + { + .ident = "Fujitsu Siemens S6410", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"), + DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK S6410"), + }, + .callback = dmi_check_cb_s6410}, + { + .ident = "Fujitsu Siemens S6420", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"), + DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK S6420"), + }, + .callback = dmi_check_cb_s6420}, + { + .ident = "Fujitsu LifeBook P8010", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), + DMI_MATCH(DMI_PRODUCT_NAME, "LifeBook P8010"), + }, + .callback = dmi_check_cb_p8010}, + {} +}; + +/* ACPI device for LCD brightness control */ + +static int acpi_fujitsu_add(struct acpi_device *device) +{ + acpi_status status; + acpi_handle handle; + int result = 0; + int state = 0; + struct input_dev *input; + int error; + + if (!device) + return -EINVAL; + + fujitsu->acpi_handle = device->handle; + sprintf(acpi_device_name(device), "%s", ACPI_FUJITSU_DEVICE_NAME); + sprintf(acpi_device_class(device), "%s", ACPI_FUJITSU_CLASS); + device->driver_data = fujitsu; + + status = acpi_install_notify_handler(device->handle, + ACPI_DEVICE_NOTIFY, + acpi_fujitsu_notify, fujitsu); + + if (ACPI_FAILURE(status)) { + printk(KERN_ERR "Error installing notify handler\n"); + error = -ENODEV; + goto err_stop; + } + + fujitsu->input = input = input_allocate_device(); + if (!input) { + error = -ENOMEM; + goto err_uninstall_notify; + } + + snprintf(fujitsu->phys, sizeof(fujitsu->phys), + "%s/video/input0", acpi_device_hid(device)); + + input->name = acpi_device_name(device); + input->phys = fujitsu->phys; + input->id.bustype = BUS_HOST; + input->id.product = 0x06; + input->dev.parent = &device->dev; + input->evbit[0] = BIT(EV_KEY); + set_bit(KEY_BRIGHTNESSUP, input->keybit); + set_bit(KEY_BRIGHTNESSDOWN, input->keybit); + set_bit(KEY_UNKNOWN, input->keybit); + + error = input_register_device(input); + if (error) + goto err_free_input_dev; + + result = acpi_bus_get_power(fujitsu->acpi_handle, &state); + if (result) { + printk(KERN_ERR "Error reading power state\n"); + goto end; + } + + printk(KERN_INFO PREFIX "%s [%s] (%s)\n", + acpi_device_name(device), acpi_device_bid(device), + !device->power.state ? "on" : "off"); + + fujitsu->dev = device; + + if (ACPI_SUCCESS + (acpi_get_handle(device->handle, METHOD_NAME__INI, &handle))) { + vdbg_printk(FUJLAPTOP_DBG_INFO, "Invoking _INI\n"); + if (ACPI_FAILURE + (acpi_evaluate_object + (device->handle, METHOD_NAME__INI, NULL, NULL))) + printk(KERN_ERR "_INI Method failed\n"); + } + + /* do config (detect defaults) */ + use_alt_lcd_levels = use_alt_lcd_levels == 1 ? 1 : 0; + disable_brightness_keys = disable_brightness_keys == 1 ? 1 : 0; + disable_brightness_adjust = disable_brightness_adjust == 1 ? 1 : 0; + vdbg_printk(FUJLAPTOP_DBG_INFO, + "config: [alt interface: %d], [key disable: %d], [adjust disable: %d]\n", + use_alt_lcd_levels, disable_brightness_keys, + disable_brightness_adjust); + + if (get_max_brightness() <= 0) + fujitsu->max_brightness = FUJITSU_LCD_N_LEVELS; + if (use_alt_lcd_levels) + get_lcd_level_alt(); + else + get_lcd_level(); + + return result; + +end: +err_free_input_dev: + input_free_device(input); +err_uninstall_notify: + acpi_remove_notify_handler(device->handle, ACPI_DEVICE_NOTIFY, + acpi_fujitsu_notify); +err_stop: + + return result; +} + +static int acpi_fujitsu_remove(struct acpi_device *device, int type) +{ + acpi_status status; + struct fujitsu_t *fujitsu = NULL; + + if (!device || !acpi_driver_data(device)) + return -EINVAL; + + fujitsu = acpi_driver_data(device); + + status = acpi_remove_notify_handler(fujitsu->acpi_handle, + ACPI_DEVICE_NOTIFY, + acpi_fujitsu_notify); + + if (!device || !acpi_driver_data(device)) + return -EINVAL; + + fujitsu->acpi_handle = NULL; + + return 0; +} + +/* Brightness notify */ + +static void acpi_fujitsu_notify(acpi_handle handle, u32 event, void *data) +{ + struct input_dev *input; + int keycode; + int oldb, newb; + + input = fujitsu->input; + + switch (event) { + case ACPI_FUJITSU_NOTIFY_CODE1: + keycode = 0; + oldb = fujitsu->brightness_level; + get_lcd_level(); /* the alt version always yields changed */ + newb = fujitsu->brightness_level; + + vdbg_printk(FUJLAPTOP_DBG_TRACE, + "brightness button event [%i -> %i (%i)]\n", + oldb, newb, fujitsu->brightness_changed); + + if (oldb == newb && fujitsu->brightness_changed) { + keycode = 0; + if (disable_brightness_keys != 1) { + if (oldb == 0) { + acpi_bus_generate_proc_event + (fujitsu->dev, + ACPI_VIDEO_NOTIFY_DEC_BRIGHTNESS, + 0); + keycode = KEY_BRIGHTNESSDOWN; + } else if (oldb == + (fujitsu->max_brightness) - 1) { + acpi_bus_generate_proc_event + (fujitsu->dev, + ACPI_VIDEO_NOTIFY_INC_BRIGHTNESS, + 0); + keycode = KEY_BRIGHTNESSUP; + } + } + } else if (oldb < newb) { + if (disable_brightness_adjust != 1) { + if (use_alt_lcd_levels) + set_lcd_level_alt(newb); + else + set_lcd_level(newb); + } + if (disable_brightness_keys != 1) { + acpi_bus_generate_proc_event(fujitsu->dev, + ACPI_VIDEO_NOTIFY_INC_BRIGHTNESS, 0); + keycode = KEY_BRIGHTNESSUP; + } + } else if (oldb > newb) { + if (disable_brightness_adjust != 1) { + if (use_alt_lcd_levels) + set_lcd_level_alt(newb); + else + set_lcd_level(newb); + } + if (disable_brightness_keys != 1) { + acpi_bus_generate_proc_event(fujitsu->dev, + ACPI_VIDEO_NOTIFY_DEC_BRIGHTNESS, 0); + keycode = KEY_BRIGHTNESSDOWN; + } + } else { + keycode = KEY_UNKNOWN; + } + break; + default: + keycode = KEY_UNKNOWN; + vdbg_printk(FUJLAPTOP_DBG_WARN, + "unsupported event [0x%x]\n", event); + break; + } + + if (keycode != 0) { + input_report_key(input, keycode, 1); + input_sync(input); + input_report_key(input, keycode, 0); + input_sync(input); + } + + return; +} + +/* ACPI device for hotkey handling */ + +static int acpi_fujitsu_hotkey_add(struct acpi_device *device) +{ + acpi_status status; + acpi_handle handle; + int result = 0; + int state = 0; + struct input_dev *input; + int error; + int i; + + if (!device) + return -EINVAL; + + fujitsu_hotkey->acpi_handle = device->handle; + sprintf(acpi_device_name(device), "%s", + ACPI_FUJITSU_HOTKEY_DEVICE_NAME); + sprintf(acpi_device_class(device), "%s", ACPI_FUJITSU_CLASS); + device->driver_data = fujitsu_hotkey; + + status = acpi_install_notify_handler(device->handle, + ACPI_DEVICE_NOTIFY, + acpi_fujitsu_hotkey_notify, + fujitsu_hotkey); + + if (ACPI_FAILURE(status)) { + printk(KERN_ERR "Error installing notify handler\n"); + error = -ENODEV; + goto err_stop; + } + + /* kfifo */ + spin_lock_init(&fujitsu_hotkey->fifo_lock); + fujitsu_hotkey->fifo = + kfifo_alloc(RINGBUFFERSIZE * sizeof(int), GFP_KERNEL, + &fujitsu_hotkey->fifo_lock); + if (IS_ERR(fujitsu_hotkey->fifo)) { + printk(KERN_ERR "kfifo_alloc failed\n"); + error = PTR_ERR(fujitsu_hotkey->fifo); + goto err_stop; + } + + fujitsu_hotkey->input = input = input_allocate_device(); + if (!input) { + error = -ENOMEM; + goto err_uninstall_notify; + } + + snprintf(fujitsu_hotkey->phys, sizeof(fujitsu_hotkey->phys), + "%s/video/input0", acpi_device_hid(device)); + + input->name = acpi_device_name(device); + input->phys = fujitsu_hotkey->phys; + input->id.bustype = BUS_HOST; + input->id.product = 0x06; + input->dev.parent = &device->dev; + input->evbit[0] = BIT(EV_KEY); + set_bit(fujitsu->keycode1, input->keybit); + set_bit(fujitsu->keycode2, input->keybit); + set_bit(fujitsu->keycode3, input->keybit); + set_bit(fujitsu->keycode4, input->keybit); + set_bit(KEY_UNKNOWN, input->keybit); + + error = input_register_device(input); + if (error) + goto err_free_input_dev; + + result = acpi_bus_get_power(fujitsu_hotkey->acpi_handle, &state); + if (result) { + printk(KERN_ERR "Error reading power state\n"); + goto end; + } + + printk(KERN_INFO PREFIX "%s [%s] (%s)\n", + acpi_device_name(device), acpi_device_bid(device), + !device->power.state ? "on" : "off"); + + fujitsu_hotkey->dev = device; + + if (ACPI_SUCCESS + (acpi_get_handle(device->handle, METHOD_NAME__INI, &handle))) { + vdbg_printk(FUJLAPTOP_DBG_INFO, "Invoking _INI\n"); + if (ACPI_FAILURE + (acpi_evaluate_object + (device->handle, METHOD_NAME__INI, NULL, NULL))) + printk(KERN_ERR "_INI Method failed\n"); + } + + i = 0; /* Discard hotkey ringbuffer */ + while (get_irb() != 0 && (i++) < MAX_HOTKEY_RINGBUFFER_SIZE) ; + vdbg_printk(FUJLAPTOP_DBG_INFO, "Discarded %i ringbuffer entries\n", i); + + return result; + +end: +err_free_input_dev: + input_free_device(input); +err_uninstall_notify: + acpi_remove_notify_handler(device->handle, ACPI_DEVICE_NOTIFY, + acpi_fujitsu_hotkey_notify); + kfifo_free(fujitsu_hotkey->fifo); +err_stop: + + return result; +} + +static int acpi_fujitsu_hotkey_remove(struct acpi_device *device, int type) +{ + acpi_status status; + struct fujitsu_hotkey_t *fujitsu_hotkey = NULL; + + if (!device || !acpi_driver_data(device)) + return -EINVAL; + + fujitsu_hotkey = acpi_driver_data(device); + + status = acpi_remove_notify_handler(fujitsu_hotkey->acpi_handle, + ACPI_DEVICE_NOTIFY, + acpi_fujitsu_hotkey_notify); + + fujitsu_hotkey->acpi_handle = NULL; + + kfifo_free(fujitsu_hotkey->fifo); + + return 0; +} + +static void acpi_fujitsu_hotkey_notify(acpi_handle handle, u32 event, + void *data) +{ + struct input_dev *input; + int keycode, keycode_r; + unsigned int irb = 1; + int i, status; + + input = fujitsu_hotkey->input; + + vdbg_printk(FUJLAPTOP_DBG_TRACE, "Hotkey event\n"); + + switch (event) { + case ACPI_FUJITSU_NOTIFY_CODE1: + i = 0; + while ((irb = get_irb()) != 0 + && (i++) < MAX_HOTKEY_RINGBUFFER_SIZE) { + vdbg_printk(FUJLAPTOP_DBG_TRACE, "GIRB result [%x]\n", + irb); + + switch (irb & 0x4ff) { + case KEY1_CODE: + keycode = fujitsu->keycode1; + break; + case KEY2_CODE: + keycode = fujitsu->keycode2; + break; + case KEY3_CODE: + keycode = fujitsu->keycode3; + break; + case KEY4_CODE: + keycode = fujitsu->keycode4; + break; + case 0: + keycode = 0; + break; + default: + vdbg_printk(FUJLAPTOP_DBG_WARN, + "Unknown GIRB result [%x]\n", irb); + keycode = -1; + break; + } + if (keycode > 0) { + vdbg_printk(FUJLAPTOP_DBG_TRACE, + "Push keycode into ringbuffer [%d]\n", + keycode); + status = kfifo_put(fujitsu_hotkey->fifo, + (unsigned char *)&keycode, + sizeof(keycode)); + if (status != sizeof(keycode)) { + vdbg_printk(FUJLAPTOP_DBG_WARN, + "Could not push keycode [0x%x]\n", + keycode); + } else { + input_report_key(input, keycode, 1); + input_sync(input); + } + } else if (keycode == 0) { + while ((status = + kfifo_get + (fujitsu_hotkey->fifo, (unsigned char *) + &keycode_r, + sizeof + (keycode_r))) == sizeof(keycode_r)) { + input_report_key(input, keycode_r, 0); + input_sync(input); + vdbg_printk(FUJLAPTOP_DBG_TRACE, + "Pop keycode from ringbuffer [%d]\n", + keycode_r); + } + } + } + + break; + default: + keycode = KEY_UNKNOWN; + vdbg_printk(FUJLAPTOP_DBG_WARN, + "Unsupported event [0x%x]\n", event); + input_report_key(input, keycode, 1); + input_sync(input); + input_report_key(input, keycode, 0); + input_sync(input); + break; + } + + return; +} + +/* Initialization */ + +static const struct acpi_device_id fujitsu_device_ids[] = { + {ACPI_FUJITSU_HID, 0}, + {"", 0}, +}; + +static struct acpi_driver acpi_fujitsu_driver = { + .name = ACPI_FUJITSU_DRIVER_NAME, + .class = ACPI_FUJITSU_CLASS, + .ids = fujitsu_device_ids, + .ops = { + .add = acpi_fujitsu_add, + .remove = acpi_fujitsu_remove, + }, +}; + +static const struct acpi_device_id fujitsu_hotkey_device_ids[] = { + {ACPI_FUJITSU_HOTKEY_HID, 0}, + {"", 0}, +}; + +static struct acpi_driver acpi_fujitsu_hotkey_driver = { + .name = ACPI_FUJITSU_HOTKEY_DRIVER_NAME, + .class = ACPI_FUJITSU_CLASS, + .ids = fujitsu_hotkey_device_ids, + .ops = { + .add = acpi_fujitsu_hotkey_add, + .remove = acpi_fujitsu_hotkey_remove, + }, +}; + +static int __init fujitsu_init(void) +{ + int ret, result, max_brightness; + + if (acpi_disabled) + return -ENODEV; + + fujitsu = kmalloc(sizeof(struct fujitsu_t), GFP_KERNEL); + if (!fujitsu) + return -ENOMEM; + memset(fujitsu, 0, sizeof(struct fujitsu_t)); + fujitsu->keycode1 = KEY_PROG1; + fujitsu->keycode2 = KEY_PROG2; + fujitsu->keycode3 = KEY_PROG3; + fujitsu->keycode4 = KEY_PROG4; + dmi_check_system(fujitsu_dmi_table); + + result = acpi_bus_register_driver(&acpi_fujitsu_driver); + if (result < 0) { + ret = -ENODEV; + goto fail_acpi; + } + + /* Register platform stuff */ + + fujitsu->pf_device = platform_device_alloc("fujitsu-laptop", -1); + if (!fujitsu->pf_device) { + ret = -ENOMEM; + goto fail_platform_driver; + } + + ret = platform_device_add(fujitsu->pf_device); + if (ret) + goto fail_platform_device1; + + ret = + sysfs_create_group(&fujitsu->pf_device->dev.kobj, + &fujitsupf_attribute_group); + if (ret) + goto fail_platform_device2; + + /* Register backlight stuff */ + + if (!acpi_video_backlight_support()) { + fujitsu->bl_device = + backlight_device_register("fujitsu-laptop", NULL, NULL, + &fujitsubl_ops); + if (IS_ERR(fujitsu->bl_device)) + return PTR_ERR(fujitsu->bl_device); + max_brightness = fujitsu->max_brightness; + fujitsu->bl_device->props.max_brightness = max_brightness - 1; + fujitsu->bl_device->props.brightness = fujitsu->brightness_level; + } + + ret = platform_driver_register(&fujitsupf_driver); + if (ret) + goto fail_backlight; + + /* Register hotkey driver */ + + fujitsu_hotkey = kmalloc(sizeof(struct fujitsu_hotkey_t), GFP_KERNEL); + if (!fujitsu_hotkey) { + ret = -ENOMEM; + goto fail_hotkey; + } + memset(fujitsu_hotkey, 0, sizeof(struct fujitsu_hotkey_t)); + + result = acpi_bus_register_driver(&acpi_fujitsu_hotkey_driver); + if (result < 0) { + ret = -ENODEV; + goto fail_hotkey1; + } + + printk(KERN_INFO "fujitsu-laptop: driver " FUJITSU_DRIVER_VERSION + " successfully loaded.\n"); + + return 0; + +fail_hotkey1: + + kfree(fujitsu_hotkey); + +fail_hotkey: + + platform_driver_unregister(&fujitsupf_driver); + +fail_backlight: + + if (fujitsu->bl_device) + backlight_device_unregister(fujitsu->bl_device); + +fail_platform_device2: + + platform_device_del(fujitsu->pf_device); + +fail_platform_device1: + + platform_device_put(fujitsu->pf_device); + +fail_platform_driver: + + acpi_bus_unregister_driver(&acpi_fujitsu_driver); + +fail_acpi: + + kfree(fujitsu); + + return ret; +} + +static void __exit fujitsu_cleanup(void) +{ + sysfs_remove_group(&fujitsu->pf_device->dev.kobj, + &fujitsupf_attribute_group); + platform_device_unregister(fujitsu->pf_device); + platform_driver_unregister(&fujitsupf_driver); + if (fujitsu->bl_device) + backlight_device_unregister(fujitsu->bl_device); + + acpi_bus_unregister_driver(&acpi_fujitsu_driver); + + kfree(fujitsu); + + acpi_bus_unregister_driver(&acpi_fujitsu_hotkey_driver); + + kfree(fujitsu_hotkey); + + printk(KERN_INFO "fujitsu-laptop: driver unloaded.\n"); +} + +module_init(fujitsu_init); +module_exit(fujitsu_cleanup); + +module_param(use_alt_lcd_levels, uint, 0644); +MODULE_PARM_DESC(use_alt_lcd_levels, + "Use alternative interface for lcd_levels (needed for Lifebook s6410)."); +module_param(disable_brightness_keys, uint, 0644); +MODULE_PARM_DESC(disable_brightness_keys, + "Disable brightness keys (eg. if they are already handled by the generic ACPI_VIDEO device)."); +module_param(disable_brightness_adjust, uint, 0644); +MODULE_PARM_DESC(disable_brightness_adjust, "Disable brightness adjustment ."); +#ifdef CONFIG_FUJITSU_LAPTOP_DEBUG +module_param_named(debug, dbg_level, uint, 0644); +MODULE_PARM_DESC(debug, "Sets debug level bit-mask"); +#endif + +MODULE_AUTHOR("Jonathan Woithe, Peter Gruber"); +MODULE_DESCRIPTION("Fujitsu laptop extras support"); +MODULE_VERSION(FUJITSU_DRIVER_VERSION); +MODULE_LICENSE("GPL"); + +MODULE_ALIAS("dmi:*:svnFUJITSUSIEMENS:*:pvr:rvnFUJITSU:rnFJNB1D3:*:cvrS6410:*"); +MODULE_ALIAS("dmi:*:svnFUJITSU:*:pvr:rvnFUJITSU:rnFJNB19C:*:cvrS7020:*"); + +static struct pnp_device_id pnp_ids[] = { + {.id = "FUJ02bf"}, + {.id = "FUJ02B1"}, + {.id = "FUJ02E3"}, + {.id = ""} +}; + +MODULE_DEVICE_TABLE(pnp, pnp_ids); diff --git a/drivers/misc/hdpuftrs/Makefile b/drivers/misc/hdpuftrs/Makefile new file mode 100644 index 0000000..ac74ae6 --- /dev/null +++ b/drivers/misc/hdpuftrs/Makefile @@ -0,0 +1 @@ +obj-$(CONFIG_HDPU_FEATURES) := hdpu_cpustate.o hdpu_nexus.o diff --git a/drivers/misc/hdpuftrs/hdpu_cpustate.c b/drivers/misc/hdpuftrs/hdpu_cpustate.c new file mode 100644 index 0000000..176fe4e --- /dev/null +++ b/drivers/misc/hdpuftrs/hdpu_cpustate.c @@ -0,0 +1,256 @@ +/* + * Sky CPU State Driver + * + * Copyright (C) 2002 Brian Waite + * + * This driver allows use of the CPU state bits + * It exports the /dev/sky_cpustate and also + * /proc/sky_cpustate pseudo-file for status information. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + */ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/spinlock.h> +#include <linux/smp_lock.h> +#include <linux/miscdevice.h> +#include <linux/proc_fs.h> +#include <linux/hdpu_features.h> +#include <linux/platform_device.h> +#include <asm/uaccess.h> +#include <linux/seq_file.h> +#include <asm/io.h> + +#define SKY_CPUSTATE_VERSION "1.1" + +static int hdpu_cpustate_probe(struct platform_device *pdev); +static int hdpu_cpustate_remove(struct platform_device *pdev); + +static unsigned char cpustate_get_state(void); +static int cpustate_proc_open(struct inode *inode, struct file *file); +static int cpustate_proc_read(struct seq_file *seq, void *offset); + +static struct cpustate_t cpustate; + +static const struct file_operations proc_cpustate = { + .open = cpustate_proc_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .owner = THIS_MODULE, +}; + +static int cpustate_proc_open(struct inode *inode, struct file *file) +{ + return single_open(file, cpustate_proc_read, NULL); +} + +static int cpustate_proc_read(struct seq_file *seq, void *offset) +{ + seq_printf(seq, "CPU State: %04x\n", cpustate_get_state()); + return 0; +} + +static int cpustate_get_ref(int excl) +{ + + int retval = -EBUSY; + + spin_lock(&cpustate.lock); + + if (cpustate.excl) + goto out_busy; + + if (excl) { + if (cpustate.open_count) + goto out_busy; + cpustate.excl = 1; + } + + cpustate.open_count++; + retval = 0; + + out_busy: + spin_unlock(&cpustate.lock); + return retval; +} + +static int cpustate_free_ref(void) +{ + + spin_lock(&cpustate.lock); + + cpustate.excl = 0; + cpustate.open_count--; + + spin_unlock(&cpustate.lock); + return 0; +} + +static unsigned char cpustate_get_state(void) +{ + + return cpustate.cached_val; +} + +static void cpustate_set_state(unsigned char new_state) +{ + unsigned int state = (new_state << 21); + +#ifdef DEBUG_CPUSTATE + printk("CPUSTATE -> 0x%x\n", new_state); +#endif + spin_lock(&cpustate.lock); + cpustate.cached_val = new_state; + writel((0xff << 21), cpustate.clr_addr); + writel(state, cpustate.set_addr); + spin_unlock(&cpustate.lock); +} + +/* + * Now all the various file operations that we export. + */ + +static ssize_t cpustate_read(struct file *file, char *buf, + size_t count, loff_t * ppos) +{ + unsigned char data; + + if (count < 0) + return -EFAULT; + if (count == 0) + return 0; + + data = cpustate_get_state(); + if (copy_to_user(buf, &data, sizeof(unsigned char))) + return -EFAULT; + return sizeof(unsigned char); +} + +static ssize_t cpustate_write(struct file *file, const char *buf, + size_t count, loff_t * ppos) +{ + unsigned char data; + + if (count < 0) + return -EFAULT; + + if (count == 0) + return 0; + + if (copy_from_user((unsigned char *)&data, buf, sizeof(unsigned char))) + return -EFAULT; + + cpustate_set_state(data); + return sizeof(unsigned char); +} + +static int cpustate_open(struct inode *inode, struct file *file) +{ + int ret; + + lock_kernel(); + ret = cpustate_get_ref((file->f_flags & O_EXCL)); + unlock_kernel(); + + return ret; +} + +static int cpustate_release(struct inode *inode, struct file *file) +{ + return cpustate_free_ref(); +} + +static struct platform_driver hdpu_cpustate_driver = { + .probe = hdpu_cpustate_probe, + .remove = hdpu_cpustate_remove, + .driver = { + .name = HDPU_CPUSTATE_NAME, + .owner = THIS_MODULE, + }, +}; + +/* + * The various file operations we support. + */ +static const struct file_operations cpustate_fops = { + .owner = THIS_MODULE, + .open = cpustate_open, + .release = cpustate_release, + .read = cpustate_read, + .write = cpustate_write, + .llseek = no_llseek, +}; + +static struct miscdevice cpustate_dev = { + .minor = MISC_DYNAMIC_MINOR, + .name = "sky_cpustate", + .fops = &cpustate_fops, +}; + +static int hdpu_cpustate_probe(struct platform_device *pdev) +{ + struct resource *res; + struct proc_dir_entry *proc_de; + int ret; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + printk(KERN_ERR "sky_cpustate: " + "Invalid memory resource.\n"); + return -EINVAL; + } + cpustate.set_addr = (unsigned long *)res->start; + cpustate.clr_addr = (unsigned long *)res->end - 1; + + ret = misc_register(&cpustate_dev); + if (ret) { + printk(KERN_WARNING "sky_cpustate: " + "Unable to register misc device.\n"); + cpustate.set_addr = NULL; + cpustate.clr_addr = NULL; + return ret; + } + + proc_de = proc_create("sky_cpustate", 0666, NULL, &proc_cpustate); + if (!proc_de) { + printk(KERN_WARNING "sky_cpustate: " + "Unable to create proc entry\n"); + } + + printk(KERN_INFO "Sky CPU State Driver v" SKY_CPUSTATE_VERSION "\n"); + return 0; +} + +static int hdpu_cpustate_remove(struct platform_device *pdev) +{ + cpustate.set_addr = NULL; + cpustate.clr_addr = NULL; + + remove_proc_entry("sky_cpustate", NULL); + misc_deregister(&cpustate_dev); + + return 0; +} + +static int __init cpustate_init(void) +{ + return platform_driver_register(&hdpu_cpustate_driver); +} + +static void __exit cpustate_exit(void) +{ + platform_driver_unregister(&hdpu_cpustate_driver); +} + +module_init(cpustate_init); +module_exit(cpustate_exit); + +MODULE_AUTHOR("Brian Waite"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:" HDPU_CPUSTATE_NAME); diff --git a/drivers/misc/hdpuftrs/hdpu_nexus.c b/drivers/misc/hdpuftrs/hdpu_nexus.c new file mode 100644 index 0000000..ce39fa5 --- /dev/null +++ b/drivers/misc/hdpuftrs/hdpu_nexus.c @@ -0,0 +1,149 @@ +/* + * Sky Nexus Register Driver + * + * Copyright (C) 2002 Brian Waite + * + * This driver allows reading the Nexus register + * It exports the /proc/sky_chassis_id and also + * /proc/sky_slot_id pseudo-file for status information. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + */ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/proc_fs.h> +#include <linux/hdpu_features.h> +#include <linux/platform_device.h> +#include <linux/seq_file.h> +#include <asm/io.h> + +static int hdpu_nexus_probe(struct platform_device *pdev); +static int hdpu_nexus_remove(struct platform_device *pdev); +static int hdpu_slot_id_open(struct inode *inode, struct file *file); +static int hdpu_slot_id_read(struct seq_file *seq, void *offset); +static int hdpu_chassis_id_open(struct inode *inode, struct file *file); +static int hdpu_chassis_id_read(struct seq_file *seq, void *offset); + +static struct proc_dir_entry *hdpu_slot_id; +static struct proc_dir_entry *hdpu_chassis_id; +static int slot_id = -1; +static int chassis_id = -1; + +static const struct file_operations proc_slot_id = { + .open = hdpu_slot_id_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .owner = THIS_MODULE, +}; + +static const struct file_operations proc_chassis_id = { + .open = hdpu_chassis_id_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .owner = THIS_MODULE, +}; + +static struct platform_driver hdpu_nexus_driver = { + .probe = hdpu_nexus_probe, + .remove = hdpu_nexus_remove, + .driver = { + .name = HDPU_NEXUS_NAME, + .owner = THIS_MODULE, + }, +}; + +static int hdpu_slot_id_open(struct inode *inode, struct file *file) +{ + return single_open(file, hdpu_slot_id_read, NULL); +} + +static int hdpu_slot_id_read(struct seq_file *seq, void *offset) +{ + seq_printf(seq, "%d\n", slot_id); + return 0; +} + +static int hdpu_chassis_id_open(struct inode *inode, struct file *file) +{ + return single_open(file, hdpu_chassis_id_read, NULL); +} + +static int hdpu_chassis_id_read(struct seq_file *seq, void *offset) +{ + seq_printf(seq, "%d\n", chassis_id); + return 0; +} + +static int hdpu_nexus_probe(struct platform_device *pdev) +{ + struct resource *res; + int *nexus_id_addr; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + printk(KERN_ERR "sky_nexus: " + "Invalid memory resource.\n"); + return -EINVAL; + } + nexus_id_addr = ioremap(res->start, + (unsigned long)(res->end - res->start)); + if (nexus_id_addr) { + slot_id = (*nexus_id_addr >> 8) & 0x1f; + chassis_id = *nexus_id_addr & 0xff; + iounmap(nexus_id_addr); + } else { + printk(KERN_ERR "sky_nexus: Could not map slot id\n"); + } + + hdpu_slot_id = proc_create("sky_slot_id", 0666, NULL, &proc_slot_id); + if (!hdpu_slot_id) { + printk(KERN_WARNING "sky_nexus: " + "Unable to create proc dir entry: sky_slot_id\n"); + } + + hdpu_chassis_id = proc_create("sky_chassis_id", 0666, NULL, + &proc_chassis_id); + if (!hdpu_chassis_id) + printk(KERN_WARNING "sky_nexus: " + "Unable to create proc dir entry: sky_chassis_id\n"); + + return 0; +} + +static int hdpu_nexus_remove(struct platform_device *pdev) +{ + slot_id = -1; + chassis_id = -1; + + remove_proc_entry("sky_slot_id", NULL); + remove_proc_entry("sky_chassis_id", NULL); + + hdpu_slot_id = 0; + hdpu_chassis_id = 0; + + return 0; +} + +static int __init nexus_init(void) +{ + return platform_driver_register(&hdpu_nexus_driver); +} + +static void __exit nexus_exit(void) +{ + platform_driver_unregister(&hdpu_nexus_driver); +} + +module_init(nexus_init); +module_exit(nexus_exit); + +MODULE_AUTHOR("Brian Waite"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:" HDPU_NEXUS_NAME); diff --git a/drivers/misc/hp-wmi.c b/drivers/misc/hp-wmi.c new file mode 100644 index 0000000..4b7c24c --- /dev/null +++ b/drivers/misc/hp-wmi.c @@ -0,0 +1,512 @@ +/* + * HP WMI hotkeys + * + * Copyright (C) 2008 Red Hat <mjg@redhat.com> + * + * Portions based on wistron_btns.c: + * Copyright (C) 2005 Miloslav Trmac <mitr@volny.cz> + * Copyright (C) 2005 Bernhard Rosenkraenzer <bero@arklinux.org> + * Copyright (C) 2005 Dmitry Torokhov <dtor@mail.ru> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/types.h> +#include <linux/input.h> +#include <acpi/acpi_drivers.h> +#include <linux/platform_device.h> +#include <linux/acpi.h> +#include <linux/rfkill.h> +#include <linux/string.h> + +MODULE_AUTHOR("Matthew Garrett <mjg59@srcf.ucam.org>"); +MODULE_DESCRIPTION("HP laptop WMI hotkeys driver"); +MODULE_LICENSE("GPL"); + +MODULE_ALIAS("wmi:95F24279-4D7B-4334-9387-ACCDC67EF61C"); +MODULE_ALIAS("wmi:5FB7F034-2C63-45e9-BE91-3D44E2C707E4"); + +#define HPWMI_EVENT_GUID "95F24279-4D7B-4334-9387-ACCDC67EF61C" +#define HPWMI_BIOS_GUID "5FB7F034-2C63-45e9-BE91-3D44E2C707E4" + +#define HPWMI_DISPLAY_QUERY 0x1 +#define HPWMI_HDDTEMP_QUERY 0x2 +#define HPWMI_ALS_QUERY 0x3 +#define HPWMI_DOCK_QUERY 0x4 +#define HPWMI_WIRELESS_QUERY 0x5 +#define HPWMI_HOTKEY_QUERY 0xc + +static int __init hp_wmi_bios_setup(struct platform_device *device); +static int __exit hp_wmi_bios_remove(struct platform_device *device); + +struct bios_args { + u32 signature; + u32 command; + u32 commandtype; + u32 datasize; + u32 data; +}; + +struct bios_return { + u32 sigpass; + u32 return_code; + u32 value; +}; + +struct key_entry { + char type; /* See KE_* below */ + u16 code; + u16 keycode; +}; + +enum { KE_KEY, KE_SW, KE_END }; + +static struct key_entry hp_wmi_keymap[] = { + {KE_SW, 0x01, SW_DOCK}, + {KE_KEY, 0x02, KEY_BRIGHTNESSUP}, + {KE_KEY, 0x03, KEY_BRIGHTNESSDOWN}, + {KE_KEY, 0x20e6, KEY_PROG1}, + {KE_KEY, 0x2142, KEY_MEDIA}, + {KE_KEY, 0x213b, KEY_INFO}, + {KE_KEY, 0x231b, KEY_HELP}, + {KE_END, 0} +}; + +static struct input_dev *hp_wmi_input_dev; +static struct platform_device *hp_wmi_platform_dev; + +static struct rfkill *wifi_rfkill; +static struct rfkill *bluetooth_rfkill; +static struct rfkill *wwan_rfkill; + +static struct platform_driver hp_wmi_driver = { + .driver = { + .name = "hp-wmi", + .owner = THIS_MODULE, + }, + .probe = hp_wmi_bios_setup, + .remove = hp_wmi_bios_remove, +}; + +static int hp_wmi_perform_query(int query, int write, int value) +{ + struct bios_return bios_return; + acpi_status status; + union acpi_object *obj; + struct bios_args args = { + .signature = 0x55434553, + .command = write ? 0x2 : 0x1, + .commandtype = query, + .datasize = write ? 0x4 : 0, + .data = value, + }; + struct acpi_buffer input = { sizeof(struct bios_args), &args }; + struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; + + status = wmi_evaluate_method(HPWMI_BIOS_GUID, 0, 0x3, &input, &output); + + obj = output.pointer; + + if (!obj || obj->type != ACPI_TYPE_BUFFER) + return -EINVAL; + + bios_return = *((struct bios_return *)obj->buffer.pointer); + if (bios_return.return_code > 0) + return bios_return.return_code * -1; + else + return bios_return.value; +} + +static int hp_wmi_display_state(void) +{ + return hp_wmi_perform_query(HPWMI_DISPLAY_QUERY, 0, 0); +} + +static int hp_wmi_hddtemp_state(void) +{ + return hp_wmi_perform_query(HPWMI_HDDTEMP_QUERY, 0, 0); +} + +static int hp_wmi_als_state(void) +{ + return hp_wmi_perform_query(HPWMI_ALS_QUERY, 0, 0); +} + +static int hp_wmi_dock_state(void) +{ + return hp_wmi_perform_query(HPWMI_DOCK_QUERY, 0, 0); +} + +static int hp_wmi_wifi_set(void *data, enum rfkill_state state) +{ + if (state) + return hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 1, 0x101); + else + return hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 1, 0x100); +} + +static int hp_wmi_bluetooth_set(void *data, enum rfkill_state state) +{ + if (state) + return hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 1, 0x202); + else + return hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 1, 0x200); +} + +static int hp_wmi_wwan_set(void *data, enum rfkill_state state) +{ + if (state) + return hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 1, 0x404); + else + return hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 1, 0x400); +} + +static int hp_wmi_wifi_state(void) +{ + int wireless = hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 0, 0); + + if (wireless & 0x100) + return RFKILL_STATE_UNBLOCKED; + else + return RFKILL_STATE_SOFT_BLOCKED; +} + +static int hp_wmi_bluetooth_state(void) +{ + int wireless = hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 0, 0); + + if (wireless & 0x10000) + return RFKILL_STATE_UNBLOCKED; + else + return RFKILL_STATE_SOFT_BLOCKED; +} + +static int hp_wmi_wwan_state(void) +{ + int wireless = hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 0, 0); + + if (wireless & 0x1000000) + return RFKILL_STATE_UNBLOCKED; + else + return RFKILL_STATE_SOFT_BLOCKED; +} + +static ssize_t show_display(struct device *dev, struct device_attribute *attr, + char *buf) +{ + int value = hp_wmi_display_state(); + if (value < 0) + return -EINVAL; + return sprintf(buf, "%d\n", value); +} + +static ssize_t show_hddtemp(struct device *dev, struct device_attribute *attr, + char *buf) +{ + int value = hp_wmi_hddtemp_state(); + if (value < 0) + return -EINVAL; + return sprintf(buf, "%d\n", value); +} + +static ssize_t show_als(struct device *dev, struct device_attribute *attr, + char *buf) +{ + int value = hp_wmi_als_state(); + if (value < 0) + return -EINVAL; + return sprintf(buf, "%d\n", value); +} + +static ssize_t show_dock(struct device *dev, struct device_attribute *attr, + char *buf) +{ + int value = hp_wmi_dock_state(); + if (value < 0) + return -EINVAL; + return sprintf(buf, "%d\n", value); +} + +static ssize_t set_als(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + u32 tmp = simple_strtoul(buf, NULL, 10); + hp_wmi_perform_query(HPWMI_ALS_QUERY, 1, tmp); + return count; +} + +static DEVICE_ATTR(display, S_IRUGO, show_display, NULL); +static DEVICE_ATTR(hddtemp, S_IRUGO, show_hddtemp, NULL); +static DEVICE_ATTR(als, S_IRUGO | S_IWUSR, show_als, set_als); +static DEVICE_ATTR(dock, S_IRUGO, show_dock, NULL); + +static struct key_entry *hp_wmi_get_entry_by_scancode(int code) +{ + struct key_entry *key; + + for (key = hp_wmi_keymap; key->type != KE_END; key++) + if (code == key->code) + return key; + + return NULL; +} + +static struct key_entry *hp_wmi_get_entry_by_keycode(int keycode) +{ + struct key_entry *key; + + for (key = hp_wmi_keymap; key->type != KE_END; key++) + if (key->type == KE_KEY && keycode == key->keycode) + return key; + + return NULL; +} + +static int hp_wmi_getkeycode(struct input_dev *dev, int scancode, int *keycode) +{ + struct key_entry *key = hp_wmi_get_entry_by_scancode(scancode); + + if (key && key->type == KE_KEY) { + *keycode = key->keycode; + return 0; + } + + return -EINVAL; +} + +static int hp_wmi_setkeycode(struct input_dev *dev, int scancode, int keycode) +{ + struct key_entry *key; + int old_keycode; + + if (keycode < 0 || keycode > KEY_MAX) + return -EINVAL; + + key = hp_wmi_get_entry_by_scancode(scancode); + if (key && key->type == KE_KEY) { + old_keycode = key->keycode; + key->keycode = keycode; + set_bit(keycode, dev->keybit); + if (!hp_wmi_get_entry_by_keycode(old_keycode)) + clear_bit(old_keycode, dev->keybit); + return 0; + } + + return -EINVAL; +} + +static void hp_wmi_notify(u32 value, void *context) +{ + struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL }; + static struct key_entry *key; + union acpi_object *obj; + + wmi_get_event_data(value, &response); + + obj = (union acpi_object *)response.pointer; + + if (obj && obj->type == ACPI_TYPE_BUFFER && obj->buffer.length == 8) { + int eventcode = *((u8 *) obj->buffer.pointer); + if (eventcode == 0x4) + eventcode = hp_wmi_perform_query(HPWMI_HOTKEY_QUERY, 0, + 0); + key = hp_wmi_get_entry_by_scancode(eventcode); + if (key) { + switch (key->type) { + case KE_KEY: + input_report_key(hp_wmi_input_dev, + key->keycode, 1); + input_sync(hp_wmi_input_dev); + input_report_key(hp_wmi_input_dev, + key->keycode, 0); + input_sync(hp_wmi_input_dev); + break; + case KE_SW: + input_report_switch(hp_wmi_input_dev, + key->keycode, + hp_wmi_dock_state()); + input_sync(hp_wmi_input_dev); + break; + } + } else if (eventcode == 0x5) { + if (wifi_rfkill) + rfkill_force_state(wifi_rfkill, + hp_wmi_wifi_state()); + if (bluetooth_rfkill) + rfkill_force_state(bluetooth_rfkill, + hp_wmi_bluetooth_state()); + if (wwan_rfkill) + rfkill_force_state(wwan_rfkill, + hp_wmi_wwan_state()); + } else + printk(KERN_INFO "HP WMI: Unknown key pressed - %x\n", + eventcode); + } else + printk(KERN_INFO "HP WMI: Unknown response received\n"); +} + +static int __init hp_wmi_input_setup(void) +{ + struct key_entry *key; + int err; + + hp_wmi_input_dev = input_allocate_device(); + + hp_wmi_input_dev->name = "HP WMI hotkeys"; + hp_wmi_input_dev->phys = "wmi/input0"; + hp_wmi_input_dev->id.bustype = BUS_HOST; + hp_wmi_input_dev->getkeycode = hp_wmi_getkeycode; + hp_wmi_input_dev->setkeycode = hp_wmi_setkeycode; + + for (key = hp_wmi_keymap; key->type != KE_END; key++) { + switch (key->type) { + case KE_KEY: + set_bit(EV_KEY, hp_wmi_input_dev->evbit); + set_bit(key->keycode, hp_wmi_input_dev->keybit); + break; + case KE_SW: + set_bit(EV_SW, hp_wmi_input_dev->evbit); + set_bit(key->keycode, hp_wmi_input_dev->swbit); + break; + } + } + + err = input_register_device(hp_wmi_input_dev); + + if (err) { + input_free_device(hp_wmi_input_dev); + return err; + } + + return 0; +} + +static void cleanup_sysfs(struct platform_device *device) +{ + device_remove_file(&device->dev, &dev_attr_display); + device_remove_file(&device->dev, &dev_attr_hddtemp); + device_remove_file(&device->dev, &dev_attr_als); + device_remove_file(&device->dev, &dev_attr_dock); +} + +static int __init hp_wmi_bios_setup(struct platform_device *device) +{ + int err; + int wireless = hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 0, 0); + + err = device_create_file(&device->dev, &dev_attr_display); + if (err) + goto add_sysfs_error; + err = device_create_file(&device->dev, &dev_attr_hddtemp); + if (err) + goto add_sysfs_error; + err = device_create_file(&device->dev, &dev_attr_als); + if (err) + goto add_sysfs_error; + err = device_create_file(&device->dev, &dev_attr_dock); + if (err) + goto add_sysfs_error; + + if (wireless & 0x1) { + wifi_rfkill = rfkill_allocate(&device->dev, RFKILL_TYPE_WLAN); + wifi_rfkill->name = "hp-wifi"; + wifi_rfkill->state = hp_wmi_wifi_state(); + wifi_rfkill->toggle_radio = hp_wmi_wifi_set; + wifi_rfkill->user_claim_unsupported = 1; + rfkill_register(wifi_rfkill); + } + + if (wireless & 0x2) { + bluetooth_rfkill = rfkill_allocate(&device->dev, + RFKILL_TYPE_BLUETOOTH); + bluetooth_rfkill->name = "hp-bluetooth"; + bluetooth_rfkill->state = hp_wmi_bluetooth_state(); + bluetooth_rfkill->toggle_radio = hp_wmi_bluetooth_set; + bluetooth_rfkill->user_claim_unsupported = 1; + rfkill_register(bluetooth_rfkill); + } + + if (wireless & 0x4) { + wwan_rfkill = rfkill_allocate(&device->dev, RFKILL_TYPE_WWAN); + wwan_rfkill->name = "hp-wwan"; + wwan_rfkill->state = hp_wmi_wwan_state(); + wwan_rfkill->toggle_radio = hp_wmi_wwan_set; + wwan_rfkill->user_claim_unsupported = 1; + rfkill_register(wwan_rfkill); + } + + return 0; +add_sysfs_error: + cleanup_sysfs(device); + return err; +} + +static int __exit hp_wmi_bios_remove(struct platform_device *device) +{ + cleanup_sysfs(device); + + if (wifi_rfkill) + rfkill_unregister(wifi_rfkill); + if (bluetooth_rfkill) + rfkill_unregister(bluetooth_rfkill); + if (wwan_rfkill) + rfkill_unregister(wwan_rfkill); + + return 0; +} + +static int __init hp_wmi_init(void) +{ + int err; + + if (wmi_has_guid(HPWMI_EVENT_GUID)) { + err = wmi_install_notify_handler(HPWMI_EVENT_GUID, + hp_wmi_notify, NULL); + if (!err) + hp_wmi_input_setup(); + } + + if (wmi_has_guid(HPWMI_BIOS_GUID)) { + err = platform_driver_register(&hp_wmi_driver); + if (err) + return 0; + hp_wmi_platform_dev = platform_device_alloc("hp-wmi", -1); + if (!hp_wmi_platform_dev) { + platform_driver_unregister(&hp_wmi_driver); + return 0; + } + platform_device_add(hp_wmi_platform_dev); + } + + return 0; +} + +static void __exit hp_wmi_exit(void) +{ + if (wmi_has_guid(HPWMI_EVENT_GUID)) { + wmi_remove_notify_handler(HPWMI_EVENT_GUID); + input_unregister_device(hp_wmi_input_dev); + } + if (hp_wmi_platform_dev) { + platform_device_del(hp_wmi_platform_dev); + platform_driver_unregister(&hp_wmi_driver); + } +} + +module_init(hp_wmi_init); +module_exit(hp_wmi_exit); diff --git a/drivers/misc/hpilo.c b/drivers/misc/hpilo.c new file mode 100644 index 0000000..174a35f --- /dev/null +++ b/drivers/misc/hpilo.c @@ -0,0 +1,769 @@ +/* + * Driver for HP iLO/iLO2 management processor. + * + * Copyright (C) 2008 Hewlett-Packard Development Company, L.P. + * David Altobelli <david.altobelli@hp.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/module.h> +#include <linux/fs.h> +#include <linux/pci.h> +#include <linux/ioport.h> +#include <linux/device.h> +#include <linux/file.h> +#include <linux/cdev.h> +#include <linux/spinlock.h> +#include <linux/delay.h> +#include <linux/uaccess.h> +#include <linux/io.h> +#include "hpilo.h" + +static struct class *ilo_class; +static unsigned int ilo_major; +static char ilo_hwdev[MAX_ILO_DEV]; + +static inline int get_entry_id(int entry) +{ + return (entry & ENTRY_MASK_DESCRIPTOR) >> ENTRY_BITPOS_DESCRIPTOR; +} + +static inline int get_entry_len(int entry) +{ + return ((entry & ENTRY_MASK_QWORDS) >> ENTRY_BITPOS_QWORDS) << 3; +} + +static inline int mk_entry(int id, int len) +{ + int qlen = len & 7 ? (len >> 3) + 1 : len >> 3; + return id << ENTRY_BITPOS_DESCRIPTOR | qlen << ENTRY_BITPOS_QWORDS; +} + +static inline int desc_mem_sz(int nr_entry) +{ + return nr_entry << L2_QENTRY_SZ; +} + +/* + * FIFO queues, shared with hardware. + * + * If a queue has empty slots, an entry is added to the queue tail, + * and that entry is marked as occupied. + * Entries can be dequeued from the head of the list, when the device + * has marked the entry as consumed. + * + * Returns true on successful queue/dequeue, false on failure. + */ +static int fifo_enqueue(struct ilo_hwinfo *hw, char *fifobar, int entry) +{ + struct fifo *fifo_q = FIFOBARTOHANDLE(fifobar); + int ret = 0; + + spin_lock(&hw->fifo_lock); + if (!(fifo_q->fifobar[(fifo_q->tail + 1) & fifo_q->imask] + & ENTRY_MASK_O)) { + fifo_q->fifobar[fifo_q->tail & fifo_q->imask] |= + (entry & ENTRY_MASK_NOSTATE) | fifo_q->merge; + fifo_q->tail += 1; + ret = 1; + } + spin_unlock(&hw->fifo_lock); + + return ret; +} + +static int fifo_dequeue(struct ilo_hwinfo *hw, char *fifobar, int *entry) +{ + struct fifo *fifo_q = FIFOBARTOHANDLE(fifobar); + int ret = 0; + u64 c; + + spin_lock(&hw->fifo_lock); + c = fifo_q->fifobar[fifo_q->head & fifo_q->imask]; + if (c & ENTRY_MASK_C) { + if (entry) + *entry = c & ENTRY_MASK_NOSTATE; + + fifo_q->fifobar[fifo_q->head & fifo_q->imask] = + (c | ENTRY_MASK) + 1; + fifo_q->head += 1; + ret = 1; + } + spin_unlock(&hw->fifo_lock); + + return ret; +} + +static int ilo_pkt_enqueue(struct ilo_hwinfo *hw, struct ccb *ccb, + int dir, int id, int len) +{ + char *fifobar; + int entry; + + if (dir == SENDQ) + fifobar = ccb->ccb_u1.send_fifobar; + else + fifobar = ccb->ccb_u3.recv_fifobar; + + entry = mk_entry(id, len); + return fifo_enqueue(hw, fifobar, entry); +} + +static int ilo_pkt_dequeue(struct ilo_hwinfo *hw, struct ccb *ccb, + int dir, int *id, int *len, void **pkt) +{ + char *fifobar, *desc; + int entry = 0, pkt_id = 0; + int ret; + + if (dir == SENDQ) { + fifobar = ccb->ccb_u1.send_fifobar; + desc = ccb->ccb_u2.send_desc; + } else { + fifobar = ccb->ccb_u3.recv_fifobar; + desc = ccb->ccb_u4.recv_desc; + } + + ret = fifo_dequeue(hw, fifobar, &entry); + if (ret) { + pkt_id = get_entry_id(entry); + if (id) + *id = pkt_id; + if (len) + *len = get_entry_len(entry); + if (pkt) + *pkt = (void *)(desc + desc_mem_sz(pkt_id)); + } + + return ret; +} + +static inline void doorbell_set(struct ccb *ccb) +{ + iowrite8(1, ccb->ccb_u5.db_base); +} + +static inline void doorbell_clr(struct ccb *ccb) +{ + iowrite8(2, ccb->ccb_u5.db_base); +} +static inline int ctrl_set(int l2sz, int idxmask, int desclim) +{ + int active = 0, go = 1; + return l2sz << CTRL_BITPOS_L2SZ | + idxmask << CTRL_BITPOS_FIFOINDEXMASK | + desclim << CTRL_BITPOS_DESCLIMIT | + active << CTRL_BITPOS_A | + go << CTRL_BITPOS_G; +} +static void ctrl_setup(struct ccb *ccb, int nr_desc, int l2desc_sz) +{ + /* for simplicity, use the same parameters for send and recv ctrls */ + ccb->send_ctrl = ctrl_set(l2desc_sz, nr_desc-1, nr_desc-1); + ccb->recv_ctrl = ctrl_set(l2desc_sz, nr_desc-1, nr_desc-1); +} + +static inline int fifo_sz(int nr_entry) +{ + /* size of a fifo is determined by the number of entries it contains */ + return (nr_entry * sizeof(u64)) + FIFOHANDLESIZE; +} + +static void fifo_setup(void *base_addr, int nr_entry) +{ + struct fifo *fifo_q = base_addr; + int i; + + /* set up an empty fifo */ + fifo_q->head = 0; + fifo_q->tail = 0; + fifo_q->reset = 0; + fifo_q->nrents = nr_entry; + fifo_q->imask = nr_entry - 1; + fifo_q->merge = ENTRY_MASK_O; + + for (i = 0; i < nr_entry; i++) + fifo_q->fifobar[i] = 0; +} + +static void ilo_ccb_close(struct pci_dev *pdev, struct ccb_data *data) +{ + struct ccb *driver_ccb; + struct ccb __iomem *device_ccb; + int retries; + + driver_ccb = &data->driver_ccb; + device_ccb = data->mapped_ccb; + + /* complicated dance to tell the hw we are stopping */ + doorbell_clr(driver_ccb); + iowrite32(ioread32(&device_ccb->send_ctrl) & ~(1 << CTRL_BITPOS_G), + &device_ccb->send_ctrl); + iowrite32(ioread32(&device_ccb->recv_ctrl) & ~(1 << CTRL_BITPOS_G), + &device_ccb->recv_ctrl); + + /* give iLO some time to process stop request */ + for (retries = 1000; retries > 0; retries--) { + doorbell_set(driver_ccb); + udelay(1); + if (!(ioread32(&device_ccb->send_ctrl) & (1 << CTRL_BITPOS_A)) + && + !(ioread32(&device_ccb->recv_ctrl) & (1 << CTRL_BITPOS_A))) + break; + } + if (retries == 0) + dev_err(&pdev->dev, "Closing, but controller still active\n"); + + /* clear the hw ccb */ + memset_io(device_ccb, 0, sizeof(struct ccb)); + + /* free resources used to back send/recv queues */ + pci_free_consistent(pdev, data->dma_size, data->dma_va, data->dma_pa); +} + +static int ilo_ccb_open(struct ilo_hwinfo *hw, struct ccb_data *data, int slot) +{ + char *dma_va, *dma_pa; + int pkt_id, pkt_sz, i, error; + struct ccb *driver_ccb, *ilo_ccb; + struct pci_dev *pdev; + + driver_ccb = &data->driver_ccb; + ilo_ccb = &data->ilo_ccb; + pdev = hw->ilo_dev; + + data->dma_size = 2 * fifo_sz(NR_QENTRY) + + 2 * desc_mem_sz(NR_QENTRY) + + ILO_START_ALIGN + ILO_CACHE_SZ; + + error = -ENOMEM; + data->dma_va = pci_alloc_consistent(pdev, data->dma_size, + &data->dma_pa); + if (!data->dma_va) + goto out; + + dma_va = (char *)data->dma_va; + dma_pa = (char *)data->dma_pa; + + memset(dma_va, 0, data->dma_size); + + dma_va = (char *)roundup((unsigned long)dma_va, ILO_START_ALIGN); + dma_pa = (char *)roundup((unsigned long)dma_pa, ILO_START_ALIGN); + + /* + * Create two ccb's, one with virt addrs, one with phys addrs. + * Copy the phys addr ccb to device shared mem. + */ + ctrl_setup(driver_ccb, NR_QENTRY, L2_QENTRY_SZ); + ctrl_setup(ilo_ccb, NR_QENTRY, L2_QENTRY_SZ); + + fifo_setup(dma_va, NR_QENTRY); + driver_ccb->ccb_u1.send_fifobar = dma_va + FIFOHANDLESIZE; + ilo_ccb->ccb_u1.send_fifobar = dma_pa + FIFOHANDLESIZE; + dma_va += fifo_sz(NR_QENTRY); + dma_pa += fifo_sz(NR_QENTRY); + + dma_va = (char *)roundup((unsigned long)dma_va, ILO_CACHE_SZ); + dma_pa = (char *)roundup((unsigned long)dma_pa, ILO_CACHE_SZ); + + fifo_setup(dma_va, NR_QENTRY); + driver_ccb->ccb_u3.recv_fifobar = dma_va + FIFOHANDLESIZE; + ilo_ccb->ccb_u3.recv_fifobar = dma_pa + FIFOHANDLESIZE; + dma_va += fifo_sz(NR_QENTRY); + dma_pa += fifo_sz(NR_QENTRY); + + driver_ccb->ccb_u2.send_desc = dma_va; + ilo_ccb->ccb_u2.send_desc = dma_pa; + dma_pa += desc_mem_sz(NR_QENTRY); + dma_va += desc_mem_sz(NR_QENTRY); + + driver_ccb->ccb_u4.recv_desc = dma_va; + ilo_ccb->ccb_u4.recv_desc = dma_pa; + + driver_ccb->channel = slot; + ilo_ccb->channel = slot; + + driver_ccb->ccb_u5.db_base = hw->db_vaddr + (slot << L2_DB_SIZE); + ilo_ccb->ccb_u5.db_base = NULL; /* hw ccb's doorbell is not used */ + + /* copy the ccb with physical addrs to device memory */ + data->mapped_ccb = (struct ccb __iomem *) + (hw->ram_vaddr + (slot * ILOHW_CCB_SZ)); + memcpy_toio(data->mapped_ccb, ilo_ccb, sizeof(struct ccb)); + + /* put packets on the send and receive queues */ + pkt_sz = 0; + for (pkt_id = 0; pkt_id < NR_QENTRY; pkt_id++) { + ilo_pkt_enqueue(hw, driver_ccb, SENDQ, pkt_id, pkt_sz); + doorbell_set(driver_ccb); + } + + pkt_sz = desc_mem_sz(1); + for (pkt_id = 0; pkt_id < NR_QENTRY; pkt_id++) + ilo_pkt_enqueue(hw, driver_ccb, RECVQ, pkt_id, pkt_sz); + + doorbell_clr(driver_ccb); + + /* make sure iLO is really handling requests */ + for (i = 1000; i > 0; i--) { + if (ilo_pkt_dequeue(hw, driver_ccb, SENDQ, &pkt_id, NULL, NULL)) + break; + udelay(1); + } + + if (i) { + ilo_pkt_enqueue(hw, driver_ccb, SENDQ, pkt_id, 0); + doorbell_set(driver_ccb); + } else { + dev_err(&pdev->dev, "Open could not dequeue a packet\n"); + error = -EBUSY; + goto free; + } + + return 0; +free: + pci_free_consistent(pdev, data->dma_size, data->dma_va, data->dma_pa); +out: + return error; +} + +static inline int is_channel_reset(struct ccb *ccb) +{ + /* check for this particular channel needing a reset */ + return FIFOBARTOHANDLE(ccb->ccb_u1.send_fifobar)->reset; +} + +static inline void set_channel_reset(struct ccb *ccb) +{ + /* set a flag indicating this channel needs a reset */ + FIFOBARTOHANDLE(ccb->ccb_u1.send_fifobar)->reset = 1; +} + +static inline int is_device_reset(struct ilo_hwinfo *hw) +{ + /* check for global reset condition */ + return ioread32(&hw->mmio_vaddr[DB_OUT]) & (1 << DB_RESET); +} + +static inline void clear_device(struct ilo_hwinfo *hw) +{ + /* clear the device (reset bits, pending channel entries) */ + iowrite32(-1, &hw->mmio_vaddr[DB_OUT]); +} + +static void ilo_locked_reset(struct ilo_hwinfo *hw) +{ + int slot; + + /* + * Mapped memory is zeroed on ilo reset, so set a per ccb flag + * to indicate that this ccb needs to be closed and reopened. + */ + for (slot = 0; slot < MAX_CCB; slot++) { + if (!hw->ccb_alloc[slot]) + continue; + set_channel_reset(&hw->ccb_alloc[slot]->driver_ccb); + } + + clear_device(hw); +} + +static void ilo_reset(struct ilo_hwinfo *hw) +{ + spin_lock(&hw->alloc_lock); + + /* reset might have been handled after lock was taken */ + if (is_device_reset(hw)) + ilo_locked_reset(hw); + + spin_unlock(&hw->alloc_lock); +} + +static ssize_t ilo_read(struct file *fp, char __user *buf, + size_t len, loff_t *off) +{ + int err, found, cnt, pkt_id, pkt_len; + struct ccb_data *data; + struct ccb *driver_ccb; + struct ilo_hwinfo *hw; + void *pkt; + + data = fp->private_data; + driver_ccb = &data->driver_ccb; + hw = data->ilo_hw; + + if (is_device_reset(hw) || is_channel_reset(driver_ccb)) { + /* + * If the device has been reset, applications + * need to close and reopen all ccbs. + */ + ilo_reset(hw); + return -ENODEV; + } + + /* + * This function is to be called when data is expected + * in the channel, and will return an error if no packet is found + * during the loop below. The sleep/retry logic is to allow + * applications to call read() immediately post write(), + * and give iLO some time to process the sent packet. + */ + cnt = 20; + do { + /* look for a received packet */ + found = ilo_pkt_dequeue(hw, driver_ccb, RECVQ, &pkt_id, + &pkt_len, &pkt); + if (found) + break; + cnt--; + msleep(100); + } while (!found && cnt); + + if (!found) + return -EAGAIN; + + /* only copy the length of the received packet */ + if (pkt_len < len) + len = pkt_len; + + err = copy_to_user(buf, pkt, len); + + /* return the received packet to the queue */ + ilo_pkt_enqueue(hw, driver_ccb, RECVQ, pkt_id, desc_mem_sz(1)); + + return err ? -EFAULT : len; +} + +static ssize_t ilo_write(struct file *fp, const char __user *buf, + size_t len, loff_t *off) +{ + int err, pkt_id, pkt_len; + struct ccb_data *data; + struct ccb *driver_ccb; + struct ilo_hwinfo *hw; + void *pkt; + + data = fp->private_data; + driver_ccb = &data->driver_ccb; + hw = data->ilo_hw; + + if (is_device_reset(hw) || is_channel_reset(driver_ccb)) { + /* + * If the device has been reset, applications + * need to close and reopen all ccbs. + */ + ilo_reset(hw); + return -ENODEV; + } + + /* get a packet to send the user command */ + if (!ilo_pkt_dequeue(hw, driver_ccb, SENDQ, &pkt_id, &pkt_len, &pkt)) + return -EBUSY; + + /* limit the length to the length of the packet */ + if (pkt_len < len) + len = pkt_len; + + /* on failure, set the len to 0 to return empty packet to the device */ + err = copy_from_user(pkt, buf, len); + if (err) + len = 0; + + /* send the packet */ + ilo_pkt_enqueue(hw, driver_ccb, SENDQ, pkt_id, len); + doorbell_set(driver_ccb); + + return err ? -EFAULT : len; +} + +static int ilo_close(struct inode *ip, struct file *fp) +{ + int slot; + struct ccb_data *data; + struct ilo_hwinfo *hw; + + slot = iminor(ip) % MAX_CCB; + hw = container_of(ip->i_cdev, struct ilo_hwinfo, cdev); + + spin_lock(&hw->alloc_lock); + + if (is_device_reset(hw)) + ilo_locked_reset(hw); + + if (hw->ccb_alloc[slot]->ccb_cnt == 1) { + + data = fp->private_data; + + ilo_ccb_close(hw->ilo_dev, data); + + kfree(data); + hw->ccb_alloc[slot] = NULL; + } else + hw->ccb_alloc[slot]->ccb_cnt--; + + spin_unlock(&hw->alloc_lock); + + return 0; +} + +static int ilo_open(struct inode *ip, struct file *fp) +{ + int slot, error; + struct ccb_data *data; + struct ilo_hwinfo *hw; + + slot = iminor(ip) % MAX_CCB; + hw = container_of(ip->i_cdev, struct ilo_hwinfo, cdev); + + /* new ccb allocation */ + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + spin_lock(&hw->alloc_lock); + + if (is_device_reset(hw)) + ilo_locked_reset(hw); + + /* each fd private_data holds sw/hw view of ccb */ + if (hw->ccb_alloc[slot] == NULL) { + /* create a channel control block for this minor */ + error = ilo_ccb_open(hw, data, slot); + if (!error) { + hw->ccb_alloc[slot] = data; + hw->ccb_alloc[slot]->ccb_cnt = 1; + hw->ccb_alloc[slot]->ccb_excl = fp->f_flags & O_EXCL; + hw->ccb_alloc[slot]->ilo_hw = hw; + } else + kfree(data); + } else { + kfree(data); + if (fp->f_flags & O_EXCL || hw->ccb_alloc[slot]->ccb_excl) { + /* + * The channel exists, and either this open + * or a previous open of this channel wants + * exclusive access. + */ + error = -EBUSY; + } else { + hw->ccb_alloc[slot]->ccb_cnt++; + error = 0; + } + } + spin_unlock(&hw->alloc_lock); + + if (!error) + fp->private_data = hw->ccb_alloc[slot]; + + return error; +} + +static const struct file_operations ilo_fops = { + .owner = THIS_MODULE, + .read = ilo_read, + .write = ilo_write, + .open = ilo_open, + .release = ilo_close, +}; + +static void ilo_unmap_device(struct pci_dev *pdev, struct ilo_hwinfo *hw) +{ + pci_iounmap(pdev, hw->db_vaddr); + pci_iounmap(pdev, hw->ram_vaddr); + pci_iounmap(pdev, hw->mmio_vaddr); +} + +static int __devinit ilo_map_device(struct pci_dev *pdev, struct ilo_hwinfo *hw) +{ + int error = -ENOMEM; + + /* map the memory mapped i/o registers */ + hw->mmio_vaddr = pci_iomap(pdev, 1, 0); + if (hw->mmio_vaddr == NULL) { + dev_err(&pdev->dev, "Error mapping mmio\n"); + goto out; + } + + /* map the adapter shared memory region */ + hw->ram_vaddr = pci_iomap(pdev, 2, MAX_CCB * ILOHW_CCB_SZ); + if (hw->ram_vaddr == NULL) { + dev_err(&pdev->dev, "Error mapping shared mem\n"); + goto mmio_free; + } + + /* map the doorbell aperture */ + hw->db_vaddr = pci_iomap(pdev, 3, MAX_CCB * ONE_DB_SIZE); + if (hw->db_vaddr == NULL) { + dev_err(&pdev->dev, "Error mapping doorbell\n"); + goto ram_free; + } + + return 0; +ram_free: + pci_iounmap(pdev, hw->ram_vaddr); +mmio_free: + pci_iounmap(pdev, hw->mmio_vaddr); +out: + return error; +} + +static void ilo_remove(struct pci_dev *pdev) +{ + int i, minor; + struct ilo_hwinfo *ilo_hw = pci_get_drvdata(pdev); + + clear_device(ilo_hw); + + minor = MINOR(ilo_hw->cdev.dev); + for (i = minor; i < minor + MAX_CCB; i++) + device_destroy(ilo_class, MKDEV(ilo_major, i)); + + cdev_del(&ilo_hw->cdev); + ilo_unmap_device(pdev, ilo_hw); + pci_release_regions(pdev); + pci_disable_device(pdev); + kfree(ilo_hw); + ilo_hwdev[(minor / MAX_CCB)] = 0; +} + +static int __devinit ilo_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + int devnum, minor, start, error; + struct ilo_hwinfo *ilo_hw; + + /* find a free range for device files */ + for (devnum = 0; devnum < MAX_ILO_DEV; devnum++) { + if (ilo_hwdev[devnum] == 0) { + ilo_hwdev[devnum] = 1; + break; + } + } + + if (devnum == MAX_ILO_DEV) { + dev_err(&pdev->dev, "Error finding free device\n"); + return -ENODEV; + } + + /* track global allocations for this device */ + error = -ENOMEM; + ilo_hw = kzalloc(sizeof(*ilo_hw), GFP_KERNEL); + if (!ilo_hw) + goto out; + + ilo_hw->ilo_dev = pdev; + spin_lock_init(&ilo_hw->alloc_lock); + spin_lock_init(&ilo_hw->fifo_lock); + + error = pci_enable_device(pdev); + if (error) + goto free; + + pci_set_master(pdev); + + error = pci_request_regions(pdev, ILO_NAME); + if (error) + goto disable; + + error = ilo_map_device(pdev, ilo_hw); + if (error) + goto free_regions; + + pci_set_drvdata(pdev, ilo_hw); + clear_device(ilo_hw); + + cdev_init(&ilo_hw->cdev, &ilo_fops); + ilo_hw->cdev.owner = THIS_MODULE; + start = devnum * MAX_CCB; + error = cdev_add(&ilo_hw->cdev, MKDEV(ilo_major, start), MAX_CCB); + if (error) { + dev_err(&pdev->dev, "Could not add cdev\n"); + goto unmap; + } + + for (minor = 0 ; minor < MAX_CCB; minor++) { + struct device *dev; + dev = device_create(ilo_class, &pdev->dev, + MKDEV(ilo_major, minor), NULL, + "hpilo!d%dccb%d", devnum, minor); + if (IS_ERR(dev)) + dev_err(&pdev->dev, "Could not create files\n"); + } + + return 0; +unmap: + ilo_unmap_device(pdev, ilo_hw); +free_regions: + pci_release_regions(pdev); +disable: + pci_disable_device(pdev); +free: + kfree(ilo_hw); +out: + ilo_hwdev[devnum] = 0; + return error; +} + +static struct pci_device_id ilo_devices[] = { + { PCI_DEVICE(PCI_VENDOR_ID_COMPAQ, 0xB204) }, + { PCI_DEVICE(PCI_VENDOR_ID_HP, 0x3307) }, + { } +}; +MODULE_DEVICE_TABLE(pci, ilo_devices); + +static struct pci_driver ilo_driver = { + .name = ILO_NAME, + .id_table = ilo_devices, + .probe = ilo_probe, + .remove = __devexit_p(ilo_remove), +}; + +static int __init ilo_init(void) +{ + int error; + dev_t dev; + + ilo_class = class_create(THIS_MODULE, "iLO"); + if (IS_ERR(ilo_class)) { + error = PTR_ERR(ilo_class); + goto out; + } + + error = alloc_chrdev_region(&dev, 0, MAX_OPEN, ILO_NAME); + if (error) + goto class_destroy; + + ilo_major = MAJOR(dev); + + error = pci_register_driver(&ilo_driver); + if (error) + goto chr_remove; + + return 0; +chr_remove: + unregister_chrdev_region(dev, MAX_OPEN); +class_destroy: + class_destroy(ilo_class); +out: + return error; +} + +static void __exit ilo_exit(void) +{ + pci_unregister_driver(&ilo_driver); + unregister_chrdev_region(MKDEV(ilo_major, 0), MAX_OPEN); + class_destroy(ilo_class); +} + +MODULE_VERSION("0.05"); +MODULE_ALIAS(ILO_NAME); +MODULE_DESCRIPTION(ILO_NAME); +MODULE_AUTHOR("David Altobelli <david.altobelli@hp.com>"); +MODULE_LICENSE("GPL v2"); + +module_init(ilo_init); +module_exit(ilo_exit); diff --git a/drivers/misc/hpilo.h b/drivers/misc/hpilo.h new file mode 100644 index 0000000..a281207 --- /dev/null +++ b/drivers/misc/hpilo.h @@ -0,0 +1,189 @@ +/* + * linux/drivers/char/hpilo.h + * + * Copyright (C) 2008 Hewlett-Packard Development Company, L.P. + * David Altobelli <david.altobelli@hp.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef __HPILO_H +#define __HPILO_H + +#define ILO_NAME "hpilo" + +/* max number of open channel control blocks per device, hw limited to 32 */ +#define MAX_CCB 8 +/* max number of supported devices */ +#define MAX_ILO_DEV 1 +/* max number of files */ +#define MAX_OPEN (MAX_CCB * MAX_ILO_DEV) + +/* + * Per device, used to track global memory allocations. + */ +struct ilo_hwinfo { + /* mmio registers on device */ + char __iomem *mmio_vaddr; + + /* doorbell registers on device */ + char __iomem *db_vaddr; + + /* shared memory on device used for channel control blocks */ + char __iomem *ram_vaddr; + + /* files corresponding to this device */ + struct ccb_data *ccb_alloc[MAX_CCB]; + + struct pci_dev *ilo_dev; + + spinlock_t alloc_lock; + spinlock_t fifo_lock; + + struct cdev cdev; +}; + +/* offset from mmio_vaddr */ +#define DB_OUT 0xD4 +/* DB_OUT reset bit */ +#define DB_RESET 26 + +/* + * Channel control block. Used to manage hardware queues. + * The format must match hw's version. The hw ccb is 128 bytes, + * but the context area shouldn't be touched by the driver. + */ +#define ILOSW_CCB_SZ 64 +#define ILOHW_CCB_SZ 128 +struct ccb { + union { + char *send_fifobar; + u64 padding1; + } ccb_u1; + union { + char *send_desc; + u64 padding2; + } ccb_u2; + u64 send_ctrl; + + union { + char *recv_fifobar; + u64 padding3; + } ccb_u3; + union { + char *recv_desc; + u64 padding4; + } ccb_u4; + u64 recv_ctrl; + + union { + char __iomem *db_base; + u64 padding5; + } ccb_u5; + + u64 channel; + + /* unused context area (64 bytes) */ +}; + +/* ccb queue parameters */ +#define SENDQ 1 +#define RECVQ 2 +#define NR_QENTRY 4 +#define L2_QENTRY_SZ 12 + +/* ccb ctrl bitfields */ +#define CTRL_BITPOS_L2SZ 0 +#define CTRL_BITPOS_FIFOINDEXMASK 4 +#define CTRL_BITPOS_DESCLIMIT 18 +#define CTRL_BITPOS_A 30 +#define CTRL_BITPOS_G 31 + +/* ccb doorbell macros */ +#define L2_DB_SIZE 14 +#define ONE_DB_SIZE (1 << L2_DB_SIZE) + +/* + * Per fd structure used to track the ccb allocated to that dev file. + */ +struct ccb_data { + /* software version of ccb, using virtual addrs */ + struct ccb driver_ccb; + + /* hardware version of ccb, using physical addrs */ + struct ccb ilo_ccb; + + /* hardware ccb is written to this shared mapped device memory */ + struct ccb __iomem *mapped_ccb; + + /* dma'able memory used for send/recv queues */ + void *dma_va; + dma_addr_t dma_pa; + size_t dma_size; + + /* pointer to hardware device info */ + struct ilo_hwinfo *ilo_hw; + + /* usage count, to allow for shared ccb's */ + int ccb_cnt; + + /* open wanted exclusive access to this ccb */ + int ccb_excl; +}; + +/* + * FIFO queue structure, shared with hw. + */ +#define ILO_START_ALIGN 4096 +#define ILO_CACHE_SZ 128 +struct fifo { + u64 nrents; /* user requested number of fifo entries */ + u64 imask; /* mask to extract valid fifo index */ + u64 merge; /* O/C bits to merge in during enqueue operation */ + u64 reset; /* set to non-zero when the target device resets */ + u8 pad_0[ILO_CACHE_SZ - (sizeof(u64) * 4)]; + + u64 head; + u8 pad_1[ILO_CACHE_SZ - (sizeof(u64))]; + + u64 tail; + u8 pad_2[ILO_CACHE_SZ - (sizeof(u64))]; + + u64 fifobar[1]; +}; + +/* convert between struct fifo, and the fifobar, which is saved in the ccb */ +#define FIFOHANDLESIZE (sizeof(struct fifo) - sizeof(u64)) +#define FIFOBARTOHANDLE(_fifo) \ + ((struct fifo *)(((char *)(_fifo)) - FIFOHANDLESIZE)) + +/* the number of qwords to consume from the entry descriptor */ +#define ENTRY_BITPOS_QWORDS 0 +/* descriptor index number (within a specified queue) */ +#define ENTRY_BITPOS_DESCRIPTOR 10 +/* state bit, fifo entry consumed by consumer */ +#define ENTRY_BITPOS_C 22 +/* state bit, fifo entry is occupied */ +#define ENTRY_BITPOS_O 23 + +#define ENTRY_BITS_QWORDS 10 +#define ENTRY_BITS_DESCRIPTOR 12 +#define ENTRY_BITS_C 1 +#define ENTRY_BITS_O 1 +#define ENTRY_BITS_TOTAL \ + (ENTRY_BITS_C + ENTRY_BITS_O + \ + ENTRY_BITS_QWORDS + ENTRY_BITS_DESCRIPTOR) + +/* extract various entry fields */ +#define ENTRY_MASK ((1 << ENTRY_BITS_TOTAL) - 1) +#define ENTRY_MASK_C (((1 << ENTRY_BITS_C) - 1) << ENTRY_BITPOS_C) +#define ENTRY_MASK_O (((1 << ENTRY_BITS_O) - 1) << ENTRY_BITPOS_O) +#define ENTRY_MASK_QWORDS \ + (((1 << ENTRY_BITS_QWORDS) - 1) << ENTRY_BITPOS_QWORDS) +#define ENTRY_MASK_DESCRIPTOR \ + (((1 << ENTRY_BITS_DESCRIPTOR) - 1) << ENTRY_BITPOS_DESCRIPTOR) + +#define ENTRY_MASK_NOSTATE (ENTRY_MASK >> (ENTRY_BITS_C + ENTRY_BITS_O)) + +#endif /* __HPILO_H */ diff --git a/drivers/misc/ibmasm/Makefile b/drivers/misc/ibmasm/Makefile new file mode 100644 index 0000000..9e63ade --- /dev/null +++ b/drivers/misc/ibmasm/Makefile @@ -0,0 +1,15 @@ + +obj-$(CONFIG_IBM_ASM) := ibmasm.o + +ibmasm-y := module.o \ + ibmasmfs.o \ + event.o \ + command.o \ + remote.o \ + heartbeat.o \ + r_heartbeat.o \ + dot_command.o \ + lowlevel.o + +ibmasm-$(CONFIG_SERIAL_8250) += uart.o + diff --git a/drivers/misc/ibmasm/command.c b/drivers/misc/ibmasm/command.c new file mode 100644 index 0000000..276d3fb --- /dev/null +++ b/drivers/misc/ibmasm/command.c @@ -0,0 +1,185 @@ + +/* + * IBM ASM Service Processor Device Driver + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * Copyright (C) IBM Corporation, 2004 + * + * Author: Max Asböck <amax@us.ibm.com> + * + */ + +#include "ibmasm.h" +#include "lowlevel.h" + +static void exec_next_command(struct service_processor *sp); + +static atomic_t command_count = ATOMIC_INIT(0); + +struct command *ibmasm_new_command(struct service_processor *sp, size_t buffer_size) +{ + struct command *cmd; + + if (buffer_size > IBMASM_CMD_MAX_BUFFER_SIZE) + return NULL; + + cmd = kzalloc(sizeof(struct command), GFP_KERNEL); + if (cmd == NULL) + return NULL; + + + cmd->buffer = kzalloc(buffer_size, GFP_KERNEL); + if (cmd->buffer == NULL) { + kfree(cmd); + return NULL; + } + cmd->buffer_size = buffer_size; + + kref_init(&cmd->kref); + cmd->lock = &sp->lock; + + cmd->status = IBMASM_CMD_PENDING; + init_waitqueue_head(&cmd->wait); + INIT_LIST_HEAD(&cmd->queue_node); + + atomic_inc(&command_count); + dbg("command count: %d\n", atomic_read(&command_count)); + + return cmd; +} + +void ibmasm_free_command(struct kref *kref) +{ + struct command *cmd = to_command(kref); + + list_del(&cmd->queue_node); + atomic_dec(&command_count); + dbg("command count: %d\n", atomic_read(&command_count)); + kfree(cmd->buffer); + kfree(cmd); +} + +static void enqueue_command(struct service_processor *sp, struct command *cmd) +{ + list_add_tail(&cmd->queue_node, &sp->command_queue); +} + +static struct command *dequeue_command(struct service_processor *sp) +{ + struct command *cmd; + struct list_head *next; + + if (list_empty(&sp->command_queue)) + return NULL; + + next = sp->command_queue.next; + list_del_init(next); + cmd = list_entry(next, struct command, queue_node); + + return cmd; +} + +static inline void do_exec_command(struct service_processor *sp) +{ + char tsbuf[32]; + + dbg("%s:%d at %s\n", __func__, __LINE__, get_timestamp(tsbuf)); + + if (ibmasm_send_i2o_message(sp)) { + sp->current_command->status = IBMASM_CMD_FAILED; + wake_up(&sp->current_command->wait); + command_put(sp->current_command); + exec_next_command(sp); + } +} + +/** + * exec_command + * send a command to a service processor + * Commands are executed sequentially. One command (sp->current_command) + * is sent to the service processor. Once the interrupt handler gets a + * message of type command_response, the message is copied into + * the current commands buffer, + */ +void ibmasm_exec_command(struct service_processor *sp, struct command *cmd) +{ + unsigned long flags; + char tsbuf[32]; + + dbg("%s:%d at %s\n", __func__, __LINE__, get_timestamp(tsbuf)); + + spin_lock_irqsave(&sp->lock, flags); + + if (!sp->current_command) { + sp->current_command = cmd; + command_get(sp->current_command); + spin_unlock_irqrestore(&sp->lock, flags); + do_exec_command(sp); + } else { + enqueue_command(sp, cmd); + spin_unlock_irqrestore(&sp->lock, flags); + } +} + +static void exec_next_command(struct service_processor *sp) +{ + unsigned long flags; + char tsbuf[32]; + + dbg("%s:%d at %s\n", __func__, __LINE__, get_timestamp(tsbuf)); + + spin_lock_irqsave(&sp->lock, flags); + sp->current_command = dequeue_command(sp); + if (sp->current_command) { + command_get(sp->current_command); + spin_unlock_irqrestore(&sp->lock, flags); + do_exec_command(sp); + } else { + spin_unlock_irqrestore(&sp->lock, flags); + } +} + +/** + * Sleep until a command has failed or a response has been received + * and the command status been updated by the interrupt handler. + * (see receive_response). + */ +void ibmasm_wait_for_response(struct command *cmd, int timeout) +{ + wait_event_interruptible_timeout(cmd->wait, + cmd->status == IBMASM_CMD_COMPLETE || + cmd->status == IBMASM_CMD_FAILED, + timeout * HZ); +} + +/** + * receive_command_response + * called by the interrupt handler when a dot command of type command_response + * was received. + */ +void ibmasm_receive_command_response(struct service_processor *sp, void *response, size_t size) +{ + struct command *cmd = sp->current_command; + + if (!sp->current_command) + return; + + memcpy_fromio(cmd->buffer, response, min(size, cmd->buffer_size)); + cmd->status = IBMASM_CMD_COMPLETE; + wake_up(&sp->current_command->wait); + command_put(sp->current_command); + exec_next_command(sp); +} diff --git a/drivers/misc/ibmasm/dot_command.c b/drivers/misc/ibmasm/dot_command.c new file mode 100644 index 0000000..3dd2dfb --- /dev/null +++ b/drivers/misc/ibmasm/dot_command.c @@ -0,0 +1,152 @@ +/* + * IBM ASM Service Processor Device Driver + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * Copyright (C) IBM Corporation, 2004 + * + * Author: Max Asböck <amax@us.ibm.com> + * + */ + +#include "ibmasm.h" +#include "dot_command.h" + +/** + * Dispatch an incoming message to the specific handler for the message. + * Called from interrupt context. + */ +void ibmasm_receive_message(struct service_processor *sp, void *message, int message_size) +{ + u32 size; + struct dot_command_header *header = (struct dot_command_header *)message; + + if (message_size == 0) + return; + + size = get_dot_command_size(message); + if (size == 0) + return; + + if (size > message_size) + size = message_size; + + switch (header->type) { + case sp_event: + ibmasm_receive_event(sp, message, size); + break; + case sp_command_response: + ibmasm_receive_command_response(sp, message, size); + break; + case sp_heartbeat: + ibmasm_receive_heartbeat(sp, message, size); + break; + default: + dev_err(sp->dev, "Received unknown message from service processor\n"); + } +} + + +#define INIT_BUFFER_SIZE 32 + + +/** + * send the 4.3.5.10 dot command (driver VPD) to the service processor + */ +int ibmasm_send_driver_vpd(struct service_processor *sp) +{ + struct command *command; + struct dot_command_header *header; + u8 *vpd_command; + u8 *vpd_data; + int result = 0; + + command = ibmasm_new_command(sp, INIT_BUFFER_SIZE); + if (command == NULL) + return -ENOMEM; + + header = (struct dot_command_header *)command->buffer; + header->type = sp_write; + header->command_size = 4; + header->data_size = 16; + header->status = 0; + header->reserved = 0; + + vpd_command = command->buffer + sizeof(struct dot_command_header); + vpd_command[0] = 0x4; + vpd_command[1] = 0x3; + vpd_command[2] = 0x5; + vpd_command[3] = 0xa; + + vpd_data = vpd_command + header->command_size; + vpd_data[0] = 0; + strcat(vpd_data, IBMASM_DRIVER_VPD); + vpd_data[10] = 0; + vpd_data[15] = 0; + + ibmasm_exec_command(sp, command); + ibmasm_wait_for_response(command, IBMASM_CMD_TIMEOUT_NORMAL); + + if (command->status != IBMASM_CMD_COMPLETE) + result = -ENODEV; + + command_put(command); + + return result; +} + +struct os_state_command { + struct dot_command_header header; + unsigned char command[3]; + unsigned char data; +}; + +/** + * send the 4.3.6 dot command (os state) to the service processor + * During driver init this function is called with os state "up". + * This causes the service processor to start sending heartbeats the + * driver. + * During driver exit the function is called with os state "down", + * causing the service processor to stop the heartbeats. + */ +int ibmasm_send_os_state(struct service_processor *sp, int os_state) +{ + struct command *cmd; + struct os_state_command *os_state_cmd; + int result = 0; + + cmd = ibmasm_new_command(sp, sizeof(struct os_state_command)); + if (cmd == NULL) + return -ENOMEM; + + os_state_cmd = (struct os_state_command *)cmd->buffer; + os_state_cmd->header.type = sp_write; + os_state_cmd->header.command_size = 3; + os_state_cmd->header.data_size = 1; + os_state_cmd->header.status = 0; + os_state_cmd->command[0] = 4; + os_state_cmd->command[1] = 3; + os_state_cmd->command[2] = 6; + os_state_cmd->data = os_state; + + ibmasm_exec_command(sp, cmd); + ibmasm_wait_for_response(cmd, IBMASM_CMD_TIMEOUT_NORMAL); + + if (cmd->status != IBMASM_CMD_COMPLETE) + result = -ENODEV; + + command_put(cmd); + return result; +} diff --git a/drivers/misc/ibmasm/dot_command.h b/drivers/misc/ibmasm/dot_command.h new file mode 100644 index 0000000..6cbba1a --- /dev/null +++ b/drivers/misc/ibmasm/dot_command.h @@ -0,0 +1,78 @@ +/* + * IBM ASM Service Processor Device Driver + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * Copyright (C) IBM Corporation, 2004 + * + * Author: Max Asböck <amax@us.ibm.com> + * + */ + +#ifndef __DOT_COMMAND_H__ +#define __DOT_COMMAND_H__ + +/* + * dot commands are the protocol used to communicate with the service + * processor. + * They consist of header, a command of variable length and data of + * variable length. + */ + +/* dot command types */ +#define sp_write 0 +#define sp_write_next 1 +#define sp_read 2 +#define sp_read_next 3 +#define sp_command_response 4 +#define sp_event 5 +#define sp_heartbeat 6 + +#pragma pack(1) +struct dot_command_header { + u8 type; + u8 command_size; + u16 data_size; + u8 status; + u8 reserved; +}; +#pragma pack() + +static inline size_t get_dot_command_size(void *buffer) +{ + struct dot_command_header *cmd = (struct dot_command_header *)buffer; + return sizeof(struct dot_command_header) + cmd->command_size + cmd->data_size; +} + +static inline unsigned int get_dot_command_timeout(void *buffer) +{ + struct dot_command_header *header = (struct dot_command_header *)buffer; + unsigned char *cmd = buffer + sizeof(struct dot_command_header); + + /* dot commands 6.3.1, 7.1 and 8.x need a longer timeout */ + + if (header->command_size == 3) { + if ((cmd[0] == 6) && (cmd[1] == 3) && (cmd[2] == 1)) + return IBMASM_CMD_TIMEOUT_EXTRA; + } else if (header->command_size == 2) { + if ((cmd[0] == 7) && (cmd[1] == 1)) + return IBMASM_CMD_TIMEOUT_EXTRA; + if (cmd[0] == 8) + return IBMASM_CMD_TIMEOUT_EXTRA; + } + return IBMASM_CMD_TIMEOUT_NORMAL; +} + +#endif /* __DOT_COMMAND_H__ */ diff --git a/drivers/misc/ibmasm/event.c b/drivers/misc/ibmasm/event.c new file mode 100644 index 0000000..fda6a4d --- /dev/null +++ b/drivers/misc/ibmasm/event.c @@ -0,0 +1,175 @@ + +/* + * IBM ASM Service Processor Device Driver + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * Copyright (C) IBM Corporation, 2004 + * + * Author: Max Asböck <amax@us.ibm.com> + * + */ + +#include "ibmasm.h" +#include "lowlevel.h" + +/* + * ASM service processor event handling routines. + * + * Events are signalled to the device drivers through interrupts. + * They have the format of dot commands, with the type field set to + * sp_event. + * The driver does not interpret the events, it simply stores them in a + * circular buffer. + */ + +static void wake_up_event_readers(struct service_processor *sp) +{ + struct event_reader *reader; + + list_for_each_entry(reader, &sp->event_buffer->readers, node) + wake_up_interruptible(&reader->wait); +} + +/** + * receive_event + * Called by the interrupt handler when a dot command of type sp_event is + * received. + * Store the event in the circular event buffer, wake up any sleeping + * event readers. + * There is no reader marker in the buffer, therefore readers are + * responsible for keeping up with the writer, or they will loose events. + */ +void ibmasm_receive_event(struct service_processor *sp, void *data, unsigned int data_size) +{ + struct event_buffer *buffer = sp->event_buffer; + struct ibmasm_event *event; + unsigned long flags; + + data_size = min(data_size, IBMASM_EVENT_MAX_SIZE); + + spin_lock_irqsave(&sp->lock, flags); + /* copy the event into the next slot in the circular buffer */ + event = &buffer->events[buffer->next_index]; + memcpy_fromio(event->data, data, data_size); + event->data_size = data_size; + event->serial_number = buffer->next_serial_number; + + /* advance indices in the buffer */ + buffer->next_index = (buffer->next_index + 1) % IBMASM_NUM_EVENTS; + buffer->next_serial_number++; + spin_unlock_irqrestore(&sp->lock, flags); + + wake_up_event_readers(sp); +} + +static inline int event_available(struct event_buffer *b, struct event_reader *r) +{ + return (r->next_serial_number < b->next_serial_number); +} + +/** + * get_next_event + * Called by event readers (initiated from user space through the file + * system). + * Sleeps until a new event is available. + */ +int ibmasm_get_next_event(struct service_processor *sp, struct event_reader *reader) +{ + struct event_buffer *buffer = sp->event_buffer; + struct ibmasm_event *event; + unsigned int index; + unsigned long flags; + + reader->cancelled = 0; + + if (wait_event_interruptible(reader->wait, + event_available(buffer, reader) || reader->cancelled)) + return -ERESTARTSYS; + + if (!event_available(buffer, reader)) + return 0; + + spin_lock_irqsave(&sp->lock, flags); + + index = buffer->next_index; + event = &buffer->events[index]; + while (event->serial_number < reader->next_serial_number) { + index = (index + 1) % IBMASM_NUM_EVENTS; + event = &buffer->events[index]; + } + memcpy(reader->data, event->data, event->data_size); + reader->data_size = event->data_size; + reader->next_serial_number = event->serial_number + 1; + + spin_unlock_irqrestore(&sp->lock, flags); + + return event->data_size; +} + +void ibmasm_cancel_next_event(struct event_reader *reader) +{ + reader->cancelled = 1; + wake_up_interruptible(&reader->wait); +} + +void ibmasm_event_reader_register(struct service_processor *sp, struct event_reader *reader) +{ + unsigned long flags; + + reader->next_serial_number = sp->event_buffer->next_serial_number; + init_waitqueue_head(&reader->wait); + spin_lock_irqsave(&sp->lock, flags); + list_add(&reader->node, &sp->event_buffer->readers); + spin_unlock_irqrestore(&sp->lock, flags); +} + +void ibmasm_event_reader_unregister(struct service_processor *sp, struct event_reader *reader) +{ + unsigned long flags; + + spin_lock_irqsave(&sp->lock, flags); + list_del(&reader->node); + spin_unlock_irqrestore(&sp->lock, flags); +} + +int ibmasm_event_buffer_init(struct service_processor *sp) +{ + struct event_buffer *buffer; + struct ibmasm_event *event; + int i; + + buffer = kmalloc(sizeof(struct event_buffer), GFP_KERNEL); + if (!buffer) + return 1; + + buffer->next_index = 0; + buffer->next_serial_number = 1; + + event = buffer->events; + for (i=0; i<IBMASM_NUM_EVENTS; i++, event++) + event->serial_number = 0; + + INIT_LIST_HEAD(&buffer->readers); + + sp->event_buffer = buffer; + + return 0; +} + +void ibmasm_event_buffer_exit(struct service_processor *sp) +{ + kfree(sp->event_buffer); +} diff --git a/drivers/misc/ibmasm/heartbeat.c b/drivers/misc/ibmasm/heartbeat.c new file mode 100644 index 0000000..1bc4306 --- /dev/null +++ b/drivers/misc/ibmasm/heartbeat.c @@ -0,0 +1,101 @@ + +/* + * IBM ASM Service Processor Device Driver + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * Copyright (C) IBM Corporation, 2004 + * + * Author: Max Asböck <amax@us.ibm.com> + * + */ + +#include <linux/notifier.h> +#include "ibmasm.h" +#include "dot_command.h" +#include "lowlevel.h" + +static int suspend_heartbeats = 0; + +/* + * Once the driver indicates to the service processor that it is running + * - see send_os_state() - the service processor sends periodic heartbeats + * to the driver. The driver must respond to the heartbeats or else the OS + * will be rebooted. + * In the case of a panic the interrupt handler continues to work and thus + * continues to respond to heartbeats, making the service processor believe + * the OS is still running and thus preventing a reboot. + * To prevent this from happening a callback is added the panic_notifier_list. + * Before responding to a heartbeat the driver checks if a panic has happened, + * if yes it suspends heartbeat, causing the service processor to reboot as + * expected. + */ +static int panic_happened(struct notifier_block *n, unsigned long val, void *v) +{ + suspend_heartbeats = 1; + return 0; +} + +static struct notifier_block panic_notifier = { panic_happened, NULL, 1 }; + +void ibmasm_register_panic_notifier(void) +{ + atomic_notifier_chain_register(&panic_notifier_list, &panic_notifier); +} + +void ibmasm_unregister_panic_notifier(void) +{ + atomic_notifier_chain_unregister(&panic_notifier_list, + &panic_notifier); +} + + +int ibmasm_heartbeat_init(struct service_processor *sp) +{ + sp->heartbeat = ibmasm_new_command(sp, HEARTBEAT_BUFFER_SIZE); + if (sp->heartbeat == NULL) + return -ENOMEM; + + return 0; +} + +void ibmasm_heartbeat_exit(struct service_processor *sp) +{ + char tsbuf[32]; + + dbg("%s:%d at %s\n", __func__, __LINE__, get_timestamp(tsbuf)); + ibmasm_wait_for_response(sp->heartbeat, IBMASM_CMD_TIMEOUT_NORMAL); + dbg("%s:%d at %s\n", __func__, __LINE__, get_timestamp(tsbuf)); + suspend_heartbeats = 1; + command_put(sp->heartbeat); +} + +void ibmasm_receive_heartbeat(struct service_processor *sp, void *message, size_t size) +{ + struct command *cmd = sp->heartbeat; + struct dot_command_header *header = (struct dot_command_header *)cmd->buffer; + char tsbuf[32]; + + dbg("%s:%d at %s\n", __func__, __LINE__, get_timestamp(tsbuf)); + if (suspend_heartbeats) + return; + + /* return the received dot command to sender */ + cmd->status = IBMASM_CMD_PENDING; + size = min(size, cmd->buffer_size); + memcpy_fromio(cmd->buffer, message, size); + header->type = sp_write; + ibmasm_exec_command(sp, cmd); +} diff --git a/drivers/misc/ibmasm/i2o.h b/drivers/misc/ibmasm/i2o.h new file mode 100644 index 0000000..bf2c738 --- /dev/null +++ b/drivers/misc/ibmasm/i2o.h @@ -0,0 +1,77 @@ +/* + * IBM ASM Service Processor Device Driver + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * Copyright (C) IBM Corporation, 2004 + * + * Author: Max Asböck <amax@us.ibm.com> + * + */ + +#pragma pack(1) +struct i2o_header { + u8 version; + u8 message_flags; + u16 message_size; + u8 target; + u8 initiator_and_target; + u8 initiator; + u8 function; + u32 initiator_context; +}; +#pragma pack() + +#define I2O_HEADER_TEMPLATE \ + { .version = 0x01, \ + .message_flags = 0x00, \ + .function = 0xFF, \ + .initiator = 0x00, \ + .initiator_and_target = 0x40, \ + .target = 0x00, \ + .initiator_context = 0x0 } + +#define I2O_MESSAGE_SIZE 0x1000 +#define I2O_COMMAND_SIZE (I2O_MESSAGE_SIZE - sizeof(struct i2o_header)) + +#pragma pack(1) +struct i2o_message { + struct i2o_header header; + void *data; +}; +#pragma pack() + +static inline unsigned short outgoing_message_size(unsigned int data_size) +{ + unsigned int size; + unsigned short i2o_size; + + if (data_size > I2O_COMMAND_SIZE) + data_size = I2O_COMMAND_SIZE; + + size = sizeof(struct i2o_header) + data_size; + + i2o_size = size / sizeof(u32); + + if (size % sizeof(u32)) + i2o_size++; + + return i2o_size; +} + +static inline u32 incoming_data_size(struct i2o_message *i2o_message) +{ + return (sizeof(u32) * i2o_message->header.message_size); +} diff --git a/drivers/misc/ibmasm/ibmasm.h b/drivers/misc/ibmasm/ibmasm.h new file mode 100644 index 0000000..4d8a4e2 --- /dev/null +++ b/drivers/misc/ibmasm/ibmasm.h @@ -0,0 +1,220 @@ + +/* + * IBM ASM Service Processor Device Driver + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * Copyright (C) IBM Corporation, 2004 + * + * Author: Max Asböck <amax@us.ibm.com> + * + */ + +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/list.h> +#include <linux/wait.h> +#include <linux/spinlock.h> +#include <linux/slab.h> +#include <linux/module.h> +#include <linux/interrupt.h> +#include <linux/kref.h> +#include <linux/device.h> +#include <linux/input.h> + +/* Driver identification */ +#define DRIVER_NAME "ibmasm" +#define DRIVER_VERSION "1.0" +#define DRIVER_AUTHOR "Max Asbock <masbock@us.ibm.com>, Vernon Mauery <vernux@us.ibm.com>" +#define DRIVER_DESC "IBM ASM Service Processor Driver" + +#define err(msg) printk(KERN_ERR "%s: " msg "\n", DRIVER_NAME) +#define info(msg) printk(KERN_INFO "%s: " msg "\n", DRIVER_NAME) + +extern int ibmasm_debug; +#define dbg(STR, ARGS...) \ + do { \ + if (ibmasm_debug) \ + printk(KERN_DEBUG STR , ##ARGS); \ + } while (0) + +static inline char *get_timestamp(char *buf) +{ + struct timeval now; + do_gettimeofday(&now); + sprintf(buf, "%lu.%lu", now.tv_sec, now.tv_usec); + return buf; +} + +#define IBMASM_CMD_PENDING 0 +#define IBMASM_CMD_COMPLETE 1 +#define IBMASM_CMD_FAILED 2 + +#define IBMASM_CMD_TIMEOUT_NORMAL 45 +#define IBMASM_CMD_TIMEOUT_EXTRA 240 + +#define IBMASM_CMD_MAX_BUFFER_SIZE 0x8000 + +#define REVERSE_HEARTBEAT_TIMEOUT 120 + +#define HEARTBEAT_BUFFER_SIZE 0x400 + +#ifdef IA64 +#define IBMASM_DRIVER_VPD "Lin64 6.08 " +#else +#define IBMASM_DRIVER_VPD "Lin32 6.08 " +#endif + +#define SYSTEM_STATE_OS_UP 5 +#define SYSTEM_STATE_OS_DOWN 4 + +#define IBMASM_NAME_SIZE 16 + +#define IBMASM_NUM_EVENTS 10 +#define IBMASM_EVENT_MAX_SIZE 2048u + + +struct command { + struct list_head queue_node; + wait_queue_head_t wait; + unsigned char *buffer; + size_t buffer_size; + int status; + struct kref kref; + spinlock_t *lock; +}; +#define to_command(c) container_of(c, struct command, kref) + +void ibmasm_free_command(struct kref *kref); +static inline void command_put(struct command *cmd) +{ + unsigned long flags; + spinlock_t *lock = cmd->lock; + + spin_lock_irqsave(lock, flags); + kref_put(&cmd->kref, ibmasm_free_command); + spin_unlock_irqrestore(lock, flags); +} + +static inline void command_get(struct command *cmd) +{ + kref_get(&cmd->kref); +} + + +struct ibmasm_event { + unsigned int serial_number; + unsigned int data_size; + unsigned char data[IBMASM_EVENT_MAX_SIZE]; +}; + +struct event_buffer { + struct ibmasm_event events[IBMASM_NUM_EVENTS]; + unsigned int next_serial_number; + unsigned int next_index; + struct list_head readers; +}; + +struct event_reader { + int cancelled; + unsigned int next_serial_number; + wait_queue_head_t wait; + struct list_head node; + unsigned int data_size; + unsigned char data[IBMASM_EVENT_MAX_SIZE]; +}; + +struct reverse_heartbeat { + wait_queue_head_t wait; + unsigned int stopped; +}; + +struct ibmasm_remote { + struct input_dev *keybd_dev; + struct input_dev *mouse_dev; +}; + +struct service_processor { + struct list_head node; + spinlock_t lock; + void __iomem *base_address; + unsigned int irq; + struct command *current_command; + struct command *heartbeat; + struct list_head command_queue; + struct event_buffer *event_buffer; + char dirname[IBMASM_NAME_SIZE]; + char devname[IBMASM_NAME_SIZE]; + unsigned int number; + struct ibmasm_remote remote; + int serial_line; + struct device *dev; +}; + +/* command processing */ +struct command *ibmasm_new_command(struct service_processor *sp, size_t buffer_size); +void ibmasm_exec_command(struct service_processor *sp, struct command *cmd); +void ibmasm_wait_for_response(struct command *cmd, int timeout); +void ibmasm_receive_command_response(struct service_processor *sp, void *response, size_t size); + +/* event processing */ +int ibmasm_event_buffer_init(struct service_processor *sp); +void ibmasm_event_buffer_exit(struct service_processor *sp); +void ibmasm_receive_event(struct service_processor *sp, void *data, unsigned int data_size); +void ibmasm_event_reader_register(struct service_processor *sp, struct event_reader *reader); +void ibmasm_event_reader_unregister(struct service_processor *sp, struct event_reader *reader); +int ibmasm_get_next_event(struct service_processor *sp, struct event_reader *reader); +void ibmasm_cancel_next_event(struct event_reader *reader); + +/* heartbeat - from SP to OS */ +void ibmasm_register_panic_notifier(void); +void ibmasm_unregister_panic_notifier(void); +int ibmasm_heartbeat_init(struct service_processor *sp); +void ibmasm_heartbeat_exit(struct service_processor *sp); +void ibmasm_receive_heartbeat(struct service_processor *sp, void *message, size_t size); + +/* reverse heartbeat - from OS to SP */ +void ibmasm_init_reverse_heartbeat(struct service_processor *sp, struct reverse_heartbeat *rhb); +int ibmasm_start_reverse_heartbeat(struct service_processor *sp, struct reverse_heartbeat *rhb); +void ibmasm_stop_reverse_heartbeat(struct reverse_heartbeat *rhb); + +/* dot commands */ +void ibmasm_receive_message(struct service_processor *sp, void *data, int data_size); +int ibmasm_send_driver_vpd(struct service_processor *sp); +int ibmasm_send_os_state(struct service_processor *sp, int os_state); + +/* low level message processing */ +int ibmasm_send_i2o_message(struct service_processor *sp); +irqreturn_t ibmasm_interrupt_handler(int irq, void * dev_id); + +/* remote console */ +void ibmasm_handle_mouse_interrupt(struct service_processor *sp); +int ibmasm_init_remote_input_dev(struct service_processor *sp); +void ibmasm_free_remote_input_dev(struct service_processor *sp); + +/* file system */ +int ibmasmfs_register(void); +void ibmasmfs_unregister(void); +void ibmasmfs_add_sp(struct service_processor *sp); + +/* uart */ +#ifdef CONFIG_SERIAL_8250 +void ibmasm_register_uart(struct service_processor *sp); +void ibmasm_unregister_uart(struct service_processor *sp); +#else +#define ibmasm_register_uart(sp) do { } while(0) +#define ibmasm_unregister_uart(sp) do { } while(0) +#endif diff --git a/drivers/misc/ibmasm/ibmasmfs.c b/drivers/misc/ibmasm/ibmasmfs.c new file mode 100644 index 0000000..22a7e8b --- /dev/null +++ b/drivers/misc/ibmasm/ibmasmfs.c @@ -0,0 +1,637 @@ +/* + * IBM ASM Service Processor Device Driver + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * Copyright (C) IBM Corporation, 2004 + * + * Author: Max Asböck <amax@us.ibm.com> + * + */ + +/* + * Parts of this code are based on an article by Jonathan Corbet + * that appeared in Linux Weekly News. + */ + + +/* + * The IBMASM file virtual filesystem. It creates the following hierarchy + * dymamically when mounted from user space: + * + * /ibmasm + * |-- 0 + * | |-- command + * | |-- event + * | |-- reverse_heartbeat + * | `-- remote_video + * | |-- depth + * | |-- height + * | `-- width + * . + * . + * . + * `-- n + * |-- command + * |-- event + * |-- reverse_heartbeat + * `-- remote_video + * |-- depth + * |-- height + * `-- width + * + * For each service processor the following files are created: + * + * command: execute dot commands + * write: execute a dot command on the service processor + * read: return the result of a previously executed dot command + * + * events: listen for service processor events + * read: sleep (interruptible) until an event occurs + * write: wakeup sleeping event listener + * + * reverse_heartbeat: send a heartbeat to the service processor + * read: sleep (interruptible) until the reverse heartbeat fails + * write: wakeup sleeping heartbeat listener + * + * remote_video/width + * remote_video/height + * remote_video/width: control remote display settings + * write: set value + * read: read value + */ + +#include <linux/fs.h> +#include <linux/pagemap.h> +#include <asm/uaccess.h> +#include <asm/io.h> +#include "ibmasm.h" +#include "remote.h" +#include "dot_command.h" + +#define IBMASMFS_MAGIC 0x66726f67 + +static LIST_HEAD(service_processors); + +static struct inode *ibmasmfs_make_inode(struct super_block *sb, int mode); +static void ibmasmfs_create_files (struct super_block *sb, struct dentry *root); +static int ibmasmfs_fill_super (struct super_block *sb, void *data, int silent); + + +static int ibmasmfs_get_super(struct file_system_type *fst, + int flags, const char *name, void *data, + struct vfsmount *mnt) +{ + return get_sb_single(fst, flags, data, ibmasmfs_fill_super, mnt); +} + +static struct super_operations ibmasmfs_s_ops = { + .statfs = simple_statfs, + .drop_inode = generic_delete_inode, +}; + +static const struct file_operations *ibmasmfs_dir_ops = &simple_dir_operations; + +static struct file_system_type ibmasmfs_type = { + .owner = THIS_MODULE, + .name = "ibmasmfs", + .get_sb = ibmasmfs_get_super, + .kill_sb = kill_litter_super, +}; + +static int ibmasmfs_fill_super (struct super_block *sb, void *data, int silent) +{ + struct inode *root; + struct dentry *root_dentry; + + sb->s_blocksize = PAGE_CACHE_SIZE; + sb->s_blocksize_bits = PAGE_CACHE_SHIFT; + sb->s_magic = IBMASMFS_MAGIC; + sb->s_op = &ibmasmfs_s_ops; + sb->s_time_gran = 1; + + root = ibmasmfs_make_inode (sb, S_IFDIR | 0500); + if (!root) + return -ENOMEM; + + root->i_op = &simple_dir_inode_operations; + root->i_fop = ibmasmfs_dir_ops; + + root_dentry = d_alloc_root(root); + if (!root_dentry) { + iput(root); + return -ENOMEM; + } + sb->s_root = root_dentry; + + ibmasmfs_create_files(sb, root_dentry); + return 0; +} + +static struct inode *ibmasmfs_make_inode(struct super_block *sb, int mode) +{ + struct inode *ret = new_inode(sb); + + if (ret) { + ret->i_mode = mode; + ret->i_uid = ret->i_gid = 0; + ret->i_blocks = 0; + ret->i_atime = ret->i_mtime = ret->i_ctime = CURRENT_TIME; + } + return ret; +} + +static struct dentry *ibmasmfs_create_file (struct super_block *sb, + struct dentry *parent, + const char *name, + const struct file_operations *fops, + void *data, + int mode) +{ + struct dentry *dentry; + struct inode *inode; + + dentry = d_alloc_name(parent, name); + if (!dentry) + return NULL; + + inode = ibmasmfs_make_inode(sb, S_IFREG | mode); + if (!inode) { + dput(dentry); + return NULL; + } + + inode->i_fop = fops; + inode->i_private = data; + + d_add(dentry, inode); + return dentry; +} + +static struct dentry *ibmasmfs_create_dir (struct super_block *sb, + struct dentry *parent, + const char *name) +{ + struct dentry *dentry; + struct inode *inode; + + dentry = d_alloc_name(parent, name); + if (!dentry) + return NULL; + + inode = ibmasmfs_make_inode(sb, S_IFDIR | 0500); + if (!inode) { + dput(dentry); + return NULL; + } + + inode->i_op = &simple_dir_inode_operations; + inode->i_fop = ibmasmfs_dir_ops; + + d_add(dentry, inode); + return dentry; +} + +int ibmasmfs_register(void) +{ + return register_filesystem(&ibmasmfs_type); +} + +void ibmasmfs_unregister(void) +{ + unregister_filesystem(&ibmasmfs_type); +} + +void ibmasmfs_add_sp(struct service_processor *sp) +{ + list_add(&sp->node, &service_processors); +} + +/* struct to save state between command file operations */ +struct ibmasmfs_command_data { + struct service_processor *sp; + struct command *command; +}; + +/* struct to save state between event file operations */ +struct ibmasmfs_event_data { + struct service_processor *sp; + struct event_reader reader; + int active; +}; + +/* struct to save state between reverse heartbeat file operations */ +struct ibmasmfs_heartbeat_data { + struct service_processor *sp; + struct reverse_heartbeat heartbeat; + int active; +}; + +static int command_file_open(struct inode *inode, struct file *file) +{ + struct ibmasmfs_command_data *command_data; + + if (!inode->i_private) + return -ENODEV; + + command_data = kmalloc(sizeof(struct ibmasmfs_command_data), GFP_KERNEL); + if (!command_data) + return -ENOMEM; + + command_data->command = NULL; + command_data->sp = inode->i_private; + file->private_data = command_data; + return 0; +} + +static int command_file_close(struct inode *inode, struct file *file) +{ + struct ibmasmfs_command_data *command_data = file->private_data; + + if (command_data->command) + command_put(command_data->command); + + kfree(command_data); + return 0; +} + +static ssize_t command_file_read(struct file *file, char __user *buf, size_t count, loff_t *offset) +{ + struct ibmasmfs_command_data *command_data = file->private_data; + struct command *cmd; + int len; + unsigned long flags; + + if (*offset < 0) + return -EINVAL; + if (count == 0 || count > IBMASM_CMD_MAX_BUFFER_SIZE) + return 0; + if (*offset != 0) + return 0; + + spin_lock_irqsave(&command_data->sp->lock, flags); + cmd = command_data->command; + if (cmd == NULL) { + spin_unlock_irqrestore(&command_data->sp->lock, flags); + return 0; + } + command_data->command = NULL; + spin_unlock_irqrestore(&command_data->sp->lock, flags); + + if (cmd->status != IBMASM_CMD_COMPLETE) { + command_put(cmd); + return -EIO; + } + len = min(count, cmd->buffer_size); + if (copy_to_user(buf, cmd->buffer, len)) { + command_put(cmd); + return -EFAULT; + } + command_put(cmd); + + return len; +} + +static ssize_t command_file_write(struct file *file, const char __user *ubuff, size_t count, loff_t *offset) +{ + struct ibmasmfs_command_data *command_data = file->private_data; + struct command *cmd; + unsigned long flags; + + if (*offset < 0) + return -EINVAL; + if (count == 0 || count > IBMASM_CMD_MAX_BUFFER_SIZE) + return 0; + if (*offset != 0) + return 0; + + /* commands are executed sequentially, only one command at a time */ + if (command_data->command) + return -EAGAIN; + + cmd = ibmasm_new_command(command_data->sp, count); + if (!cmd) + return -ENOMEM; + + if (copy_from_user(cmd->buffer, ubuff, count)) { + command_put(cmd); + return -EFAULT; + } + + spin_lock_irqsave(&command_data->sp->lock, flags); + if (command_data->command) { + spin_unlock_irqrestore(&command_data->sp->lock, flags); + command_put(cmd); + return -EAGAIN; + } + command_data->command = cmd; + spin_unlock_irqrestore(&command_data->sp->lock, flags); + + ibmasm_exec_command(command_data->sp, cmd); + ibmasm_wait_for_response(cmd, get_dot_command_timeout(cmd->buffer)); + + return count; +} + +static int event_file_open(struct inode *inode, struct file *file) +{ + struct ibmasmfs_event_data *event_data; + struct service_processor *sp; + + if (!inode->i_private) + return -ENODEV; + + sp = inode->i_private; + + event_data = kmalloc(sizeof(struct ibmasmfs_event_data), GFP_KERNEL); + if (!event_data) + return -ENOMEM; + + ibmasm_event_reader_register(sp, &event_data->reader); + + event_data->sp = sp; + event_data->active = 0; + file->private_data = event_data; + return 0; +} + +static int event_file_close(struct inode *inode, struct file *file) +{ + struct ibmasmfs_event_data *event_data = file->private_data; + + ibmasm_event_reader_unregister(event_data->sp, &event_data->reader); + kfree(event_data); + return 0; +} + +static ssize_t event_file_read(struct file *file, char __user *buf, size_t count, loff_t *offset) +{ + struct ibmasmfs_event_data *event_data = file->private_data; + struct event_reader *reader = &event_data->reader; + struct service_processor *sp = event_data->sp; + int ret; + unsigned long flags; + + if (*offset < 0) + return -EINVAL; + if (count == 0 || count > IBMASM_EVENT_MAX_SIZE) + return 0; + if (*offset != 0) + return 0; + + spin_lock_irqsave(&sp->lock, flags); + if (event_data->active) { + spin_unlock_irqrestore(&sp->lock, flags); + return -EBUSY; + } + event_data->active = 1; + spin_unlock_irqrestore(&sp->lock, flags); + + ret = ibmasm_get_next_event(sp, reader); + if (ret <= 0) + goto out; + + if (count < reader->data_size) { + ret = -EINVAL; + goto out; + } + + if (copy_to_user(buf, reader->data, reader->data_size)) { + ret = -EFAULT; + goto out; + } + ret = reader->data_size; + +out: + event_data->active = 0; + return ret; +} + +static ssize_t event_file_write(struct file *file, const char __user *buf, size_t count, loff_t *offset) +{ + struct ibmasmfs_event_data *event_data = file->private_data; + + if (*offset < 0) + return -EINVAL; + if (count != 1) + return 0; + if (*offset != 0) + return 0; + + ibmasm_cancel_next_event(&event_data->reader); + return 0; +} + +static int r_heartbeat_file_open(struct inode *inode, struct file *file) +{ + struct ibmasmfs_heartbeat_data *rhbeat; + + if (!inode->i_private) + return -ENODEV; + + rhbeat = kmalloc(sizeof(struct ibmasmfs_heartbeat_data), GFP_KERNEL); + if (!rhbeat) + return -ENOMEM; + + rhbeat->sp = inode->i_private; + rhbeat->active = 0; + ibmasm_init_reverse_heartbeat(rhbeat->sp, &rhbeat->heartbeat); + file->private_data = rhbeat; + return 0; +} + +static int r_heartbeat_file_close(struct inode *inode, struct file *file) +{ + struct ibmasmfs_heartbeat_data *rhbeat = file->private_data; + + kfree(rhbeat); + return 0; +} + +static ssize_t r_heartbeat_file_read(struct file *file, char __user *buf, size_t count, loff_t *offset) +{ + struct ibmasmfs_heartbeat_data *rhbeat = file->private_data; + unsigned long flags; + int result; + + if (*offset < 0) + return -EINVAL; + if (count == 0 || count > 1024) + return 0; + if (*offset != 0) + return 0; + + /* allow only one reverse heartbeat per process */ + spin_lock_irqsave(&rhbeat->sp->lock, flags); + if (rhbeat->active) { + spin_unlock_irqrestore(&rhbeat->sp->lock, flags); + return -EBUSY; + } + rhbeat->active = 1; + spin_unlock_irqrestore(&rhbeat->sp->lock, flags); + + result = ibmasm_start_reverse_heartbeat(rhbeat->sp, &rhbeat->heartbeat); + rhbeat->active = 0; + + return result; +} + +static ssize_t r_heartbeat_file_write(struct file *file, const char __user *buf, size_t count, loff_t *offset) +{ + struct ibmasmfs_heartbeat_data *rhbeat = file->private_data; + + if (*offset < 0) + return -EINVAL; + if (count != 1) + return 0; + if (*offset != 0) + return 0; + + if (rhbeat->active) + ibmasm_stop_reverse_heartbeat(&rhbeat->heartbeat); + + return 1; +} + +static int remote_settings_file_open(struct inode *inode, struct file *file) +{ + file->private_data = inode->i_private; + return 0; +} + +static int remote_settings_file_close(struct inode *inode, struct file *file) +{ + return 0; +} + +static ssize_t remote_settings_file_read(struct file *file, char __user *buf, size_t count, loff_t *offset) +{ + void __iomem *address = (void __iomem *)file->private_data; + unsigned char *page; + int retval; + int len = 0; + unsigned int value; + + if (*offset < 0) + return -EINVAL; + if (count == 0 || count > 1024) + return 0; + if (*offset != 0) + return 0; + + page = (unsigned char *)__get_free_page(GFP_KERNEL); + if (!page) + return -ENOMEM; + + value = readl(address); + len = sprintf(page, "%d\n", value); + + if (copy_to_user(buf, page, len)) { + retval = -EFAULT; + goto exit; + } + *offset += len; + retval = len; + +exit: + free_page((unsigned long)page); + return retval; +} + +static ssize_t remote_settings_file_write(struct file *file, const char __user *ubuff, size_t count, loff_t *offset) +{ + void __iomem *address = (void __iomem *)file->private_data; + char *buff; + unsigned int value; + + if (*offset < 0) + return -EINVAL; + if (count == 0 || count > 1024) + return 0; + if (*offset != 0) + return 0; + + buff = kzalloc (count + 1, GFP_KERNEL); + if (!buff) + return -ENOMEM; + + + if (copy_from_user(buff, ubuff, count)) { + kfree(buff); + return -EFAULT; + } + + value = simple_strtoul(buff, NULL, 10); + writel(value, address); + kfree(buff); + + return count; +} + +static const struct file_operations command_fops = { + .open = command_file_open, + .release = command_file_close, + .read = command_file_read, + .write = command_file_write, +}; + +static const struct file_operations event_fops = { + .open = event_file_open, + .release = event_file_close, + .read = event_file_read, + .write = event_file_write, +}; + +static const struct file_operations r_heartbeat_fops = { + .open = r_heartbeat_file_open, + .release = r_heartbeat_file_close, + .read = r_heartbeat_file_read, + .write = r_heartbeat_file_write, +}; + +static const struct file_operations remote_settings_fops = { + .open = remote_settings_file_open, + .release = remote_settings_file_close, + .read = remote_settings_file_read, + .write = remote_settings_file_write, +}; + + +static void ibmasmfs_create_files (struct super_block *sb, struct dentry *root) +{ + struct list_head *entry; + struct service_processor *sp; + + list_for_each(entry, &service_processors) { + struct dentry *dir; + struct dentry *remote_dir; + sp = list_entry(entry, struct service_processor, node); + dir = ibmasmfs_create_dir(sb, root, sp->dirname); + if (!dir) + continue; + + ibmasmfs_create_file(sb, dir, "command", &command_fops, sp, S_IRUSR|S_IWUSR); + ibmasmfs_create_file(sb, dir, "event", &event_fops, sp, S_IRUSR|S_IWUSR); + ibmasmfs_create_file(sb, dir, "reverse_heartbeat", &r_heartbeat_fops, sp, S_IRUSR|S_IWUSR); + + remote_dir = ibmasmfs_create_dir(sb, dir, "remote_video"); + if (!remote_dir) + continue; + + ibmasmfs_create_file(sb, remote_dir, "width", &remote_settings_fops, (void *)display_width(sp), S_IRUSR|S_IWUSR); + ibmasmfs_create_file(sb, remote_dir, "height", &remote_settings_fops, (void *)display_height(sp), S_IRUSR|S_IWUSR); + ibmasmfs_create_file(sb, remote_dir, "depth", &remote_settings_fops, (void *)display_depth(sp), S_IRUSR|S_IWUSR); + } +} diff --git a/drivers/misc/ibmasm/lowlevel.c b/drivers/misc/ibmasm/lowlevel.c new file mode 100644 index 0000000..4b2398e --- /dev/null +++ b/drivers/misc/ibmasm/lowlevel.c @@ -0,0 +1,85 @@ +/* + * IBM ASM Service Processor Device Driver + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * Copyright (C) IBM Corporation, 2004 + * + * Author: Max Asböck <amax@us.ibm.com> + * + */ + +#include "ibmasm.h" +#include "lowlevel.h" +#include "i2o.h" +#include "dot_command.h" +#include "remote.h" + +static struct i2o_header header = I2O_HEADER_TEMPLATE; + + +int ibmasm_send_i2o_message(struct service_processor *sp) +{ + u32 mfa; + unsigned int command_size; + struct i2o_message *message; + struct command *command = sp->current_command; + + mfa = get_mfa_inbound(sp->base_address); + if (!mfa) + return 1; + + command_size = get_dot_command_size(command->buffer); + header.message_size = outgoing_message_size(command_size); + + message = get_i2o_message(sp->base_address, mfa); + + memcpy_toio(&message->header, &header, sizeof(struct i2o_header)); + memcpy_toio(&message->data, command->buffer, command_size); + + set_mfa_inbound(sp->base_address, mfa); + + return 0; +} + +irqreturn_t ibmasm_interrupt_handler(int irq, void * dev_id) +{ + u32 mfa; + struct service_processor *sp = (struct service_processor *)dev_id; + void __iomem *base_address = sp->base_address; + char tsbuf[32]; + + if (!sp_interrupt_pending(base_address)) + return IRQ_NONE; + + dbg("respond to interrupt at %s\n", get_timestamp(tsbuf)); + + if (mouse_interrupt_pending(sp)) { + ibmasm_handle_mouse_interrupt(sp); + clear_mouse_interrupt(sp); + } + + mfa = get_mfa_outbound(base_address); + if (valid_mfa(mfa)) { + struct i2o_message *msg = get_i2o_message(base_address, mfa); + ibmasm_receive_message(sp, &msg->data, incoming_data_size(msg)); + } else + dbg("didn't get a valid MFA\n"); + + set_mfa_outbound(base_address, mfa); + dbg("finished interrupt at %s\n", get_timestamp(tsbuf)); + + return IRQ_HANDLED; +} diff --git a/drivers/misc/ibmasm/lowlevel.h b/drivers/misc/ibmasm/lowlevel.h new file mode 100644 index 0000000..7667665 --- /dev/null +++ b/drivers/misc/ibmasm/lowlevel.h @@ -0,0 +1,137 @@ +/* + * IBM ASM Service Processor Device Driver + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * Copyright (C) IBM Corporation, 2004 + * + * Author: Max Asböck <amax@us.ibm.com> + * + */ + +/* Condor service processor specific hardware definitions */ + +#ifndef __IBMASM_CONDOR_H__ +#define __IBMASM_CONDOR_H__ + +#include <asm/io.h> + +#define VENDORID_IBM 0x1014 +#define DEVICEID_RSA 0x010F + +#define GET_MFA_ADDR(x) (x & 0xFFFFFF00) + +#define MAILBOX_FULL(x) (x & 0x00000001) + +#define NO_MFAS_AVAILABLE 0xFFFFFFFF + + +#define INBOUND_QUEUE_PORT 0x40 /* contains address of next free MFA */ +#define OUTBOUND_QUEUE_PORT 0x44 /* contains address of posted MFA */ + +#define SP_INTR_MASK 0x00000008 +#define UART_INTR_MASK 0x00000010 + +#define INTR_STATUS_REGISTER 0x13A0 +#define INTR_CONTROL_REGISTER 0x13A4 + +#define SCOUT_COM_A_BASE 0x0000 +#define SCOUT_COM_B_BASE 0x0100 +#define SCOUT_COM_C_BASE 0x0200 +#define SCOUT_COM_D_BASE 0x0300 + +static inline int sp_interrupt_pending(void __iomem *base_address) +{ + return SP_INTR_MASK & readl(base_address + INTR_STATUS_REGISTER); +} + +static inline int uart_interrupt_pending(void __iomem *base_address) +{ + return UART_INTR_MASK & readl(base_address + INTR_STATUS_REGISTER); +} + +static inline void ibmasm_enable_interrupts(void __iomem *base_address, int mask) +{ + void __iomem *ctrl_reg = base_address + INTR_CONTROL_REGISTER; + writel( readl(ctrl_reg) & ~mask, ctrl_reg); +} + +static inline void ibmasm_disable_interrupts(void __iomem *base_address, int mask) +{ + void __iomem *ctrl_reg = base_address + INTR_CONTROL_REGISTER; + writel( readl(ctrl_reg) | mask, ctrl_reg); +} + +static inline void enable_sp_interrupts(void __iomem *base_address) +{ + ibmasm_enable_interrupts(base_address, SP_INTR_MASK); +} + +static inline void disable_sp_interrupts(void __iomem *base_address) +{ + ibmasm_disable_interrupts(base_address, SP_INTR_MASK); +} + +static inline void enable_uart_interrupts(void __iomem *base_address) +{ + ibmasm_enable_interrupts(base_address, UART_INTR_MASK); +} + +static inline void disable_uart_interrupts(void __iomem *base_address) +{ + ibmasm_disable_interrupts(base_address, UART_INTR_MASK); +} + +#define valid_mfa(mfa) ( (mfa) != NO_MFAS_AVAILABLE ) + +static inline u32 get_mfa_outbound(void __iomem *base_address) +{ + int retry; + u32 mfa; + + for (retry=0; retry<=10; retry++) { + mfa = readl(base_address + OUTBOUND_QUEUE_PORT); + if (valid_mfa(mfa)) + break; + } + return mfa; +} + +static inline void set_mfa_outbound(void __iomem *base_address, u32 mfa) +{ + writel(mfa, base_address + OUTBOUND_QUEUE_PORT); +} + +static inline u32 get_mfa_inbound(void __iomem *base_address) +{ + u32 mfa = readl(base_address + INBOUND_QUEUE_PORT); + + if (MAILBOX_FULL(mfa)) + return 0; + + return mfa; +} + +static inline void set_mfa_inbound(void __iomem *base_address, u32 mfa) +{ + writel(mfa, base_address + INBOUND_QUEUE_PORT); +} + +static inline struct i2o_message *get_i2o_message(void __iomem *base_address, u32 mfa) +{ + return (struct i2o_message *)(GET_MFA_ADDR(mfa) + base_address); +} + +#endif /* __IBMASM_CONDOR_H__ */ diff --git a/drivers/misc/ibmasm/module.c b/drivers/misc/ibmasm/module.c new file mode 100644 index 0000000..b5f6add --- /dev/null +++ b/drivers/misc/ibmasm/module.c @@ -0,0 +1,238 @@ + +/* + * IBM ASM Service Processor Device Driver + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * Copyright (C) IBM Corporation, 2004 + * + * Author: Max Asböck <amax@us.ibm.com> + * + * This driver is based on code originally written by Pete Reynolds + * and others. + * + */ + +/* + * The ASM device driver does the following things: + * + * 1) When loaded it sends a message to the service processor, + * indicating that an OS is * running. This causes the service processor + * to send periodic heartbeats to the OS. + * + * 2) Answers the periodic heartbeats sent by the service processor. + * Failure to do so would result in system reboot. + * + * 3) Acts as a pass through for dot commands sent from user applications. + * The interface for this is the ibmasmfs file system. + * + * 4) Allows user applications to register for event notification. Events + * are sent to the driver through interrupts. They can be read from user + * space through the ibmasmfs file system. + * + * 5) Allows user space applications to send heartbeats to the service + * processor (aka reverse heartbeats). Again this happens through ibmasmfs. + * + * 6) Handles remote mouse and keyboard event interrupts and makes them + * available to user applications through ibmasmfs. + * + */ + +#include <linux/pci.h> +#include <linux/init.h> +#include "ibmasm.h" +#include "lowlevel.h" +#include "remote.h" + +int ibmasm_debug = 0; +module_param(ibmasm_debug, int , S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(ibmasm_debug, " Set debug mode on or off"); + + +static int __devinit ibmasm_init_one(struct pci_dev *pdev, const struct pci_device_id *id) +{ + int result; + struct service_processor *sp; + + if ((result = pci_enable_device(pdev))) { + dev_err(&pdev->dev, "Failed to enable PCI device\n"); + return result; + } + if ((result = pci_request_regions(pdev, DRIVER_NAME))) { + dev_err(&pdev->dev, "Failed to allocate PCI resources\n"); + goto error_resources; + } + /* vnc client won't work without bus-mastering */ + pci_set_master(pdev); + + sp = kzalloc(sizeof(struct service_processor), GFP_KERNEL); + if (sp == NULL) { + dev_err(&pdev->dev, "Failed to allocate memory\n"); + result = -ENOMEM; + goto error_kmalloc; + } + + spin_lock_init(&sp->lock); + INIT_LIST_HEAD(&sp->command_queue); + + pci_set_drvdata(pdev, (void *)sp); + sp->dev = &pdev->dev; + sp->number = pdev->bus->number; + snprintf(sp->dirname, IBMASM_NAME_SIZE, "%d", sp->number); + snprintf(sp->devname, IBMASM_NAME_SIZE, "%s%d", DRIVER_NAME, sp->number); + + if (ibmasm_event_buffer_init(sp)) { + dev_err(sp->dev, "Failed to allocate event buffer\n"); + goto error_eventbuffer; + } + + if (ibmasm_heartbeat_init(sp)) { + dev_err(sp->dev, "Failed to allocate heartbeat command\n"); + goto error_heartbeat; + } + + sp->irq = pdev->irq; + sp->base_address = ioremap(pci_resource_start(pdev, 0), + pci_resource_len(pdev, 0)); + if (!sp->base_address) { + dev_err(sp->dev, "Failed to ioremap pci memory\n"); + result = -ENODEV; + goto error_ioremap; + } + + result = request_irq(sp->irq, ibmasm_interrupt_handler, IRQF_SHARED, sp->devname, (void*)sp); + if (result) { + dev_err(sp->dev, "Failed to register interrupt handler\n"); + goto error_request_irq; + } + + enable_sp_interrupts(sp->base_address); + + result = ibmasm_init_remote_input_dev(sp); + if (result) { + dev_err(sp->dev, "Failed to initialize remote queue\n"); + goto error_send_message; + } + + result = ibmasm_send_driver_vpd(sp); + if (result) { + dev_err(sp->dev, "Failed to send driver VPD to service processor\n"); + goto error_send_message; + } + result = ibmasm_send_os_state(sp, SYSTEM_STATE_OS_UP); + if (result) { + dev_err(sp->dev, "Failed to send OS state to service processor\n"); + goto error_send_message; + } + ibmasmfs_add_sp(sp); + + ibmasm_register_uart(sp); + + return 0; + +error_send_message: + disable_sp_interrupts(sp->base_address); + ibmasm_free_remote_input_dev(sp); + free_irq(sp->irq, (void *)sp); +error_request_irq: + iounmap(sp->base_address); +error_ioremap: + ibmasm_heartbeat_exit(sp); +error_heartbeat: + ibmasm_event_buffer_exit(sp); +error_eventbuffer: + pci_set_drvdata(pdev, NULL); + kfree(sp); +error_kmalloc: + pci_release_regions(pdev); +error_resources: + pci_disable_device(pdev); + + return result; +} + +static void __devexit ibmasm_remove_one(struct pci_dev *pdev) +{ + struct service_processor *sp = (struct service_processor *)pci_get_drvdata(pdev); + + dbg("Unregistering UART\n"); + ibmasm_unregister_uart(sp); + dbg("Sending OS down message\n"); + if (ibmasm_send_os_state(sp, SYSTEM_STATE_OS_DOWN)) + err("failed to get repsonse to 'Send OS State' command\n"); + dbg("Disabling heartbeats\n"); + ibmasm_heartbeat_exit(sp); + dbg("Disabling interrupts\n"); + disable_sp_interrupts(sp->base_address); + dbg("Freeing SP irq\n"); + free_irq(sp->irq, (void *)sp); + dbg("Cleaning up\n"); + ibmasm_free_remote_input_dev(sp); + iounmap(sp->base_address); + ibmasm_event_buffer_exit(sp); + pci_set_drvdata(pdev, NULL); + kfree(sp); + pci_release_regions(pdev); + pci_disable_device(pdev); +} + +static struct pci_device_id ibmasm_pci_table[] = +{ + { PCI_DEVICE(VENDORID_IBM, DEVICEID_RSA) }, + {}, +}; + +static struct pci_driver ibmasm_driver = { + .name = DRIVER_NAME, + .id_table = ibmasm_pci_table, + .probe = ibmasm_init_one, + .remove = __devexit_p(ibmasm_remove_one), +}; + +static void __exit ibmasm_exit (void) +{ + ibmasm_unregister_panic_notifier(); + ibmasmfs_unregister(); + pci_unregister_driver(&ibmasm_driver); + info(DRIVER_DESC " version " DRIVER_VERSION " unloaded"); +} + +static int __init ibmasm_init(void) +{ + int result; + + result = ibmasmfs_register(); + if (result) { + err("Failed to register ibmasmfs file system"); + return result; + } + result = pci_register_driver(&ibmasm_driver); + if (result) { + ibmasmfs_unregister(); + return result; + } + ibmasm_register_panic_notifier(); + info(DRIVER_DESC " version " DRIVER_VERSION " loaded"); + return 0; +} + +module_init(ibmasm_init); +module_exit(ibmasm_exit); + +MODULE_AUTHOR(DRIVER_AUTHOR); +MODULE_DESCRIPTION(DRIVER_DESC); +MODULE_LICENSE("GPL"); +MODULE_DEVICE_TABLE(pci, ibmasm_pci_table); + diff --git a/drivers/misc/ibmasm/r_heartbeat.c b/drivers/misc/ibmasm/r_heartbeat.c new file mode 100644 index 0000000..bec9e2c --- /dev/null +++ b/drivers/misc/ibmasm/r_heartbeat.c @@ -0,0 +1,98 @@ + +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * Copyright (C) IBM Corporation, 2004 + * + * Author: Max Asböck <amax@us.ibm.com> + * + */ + +#include "ibmasm.h" +#include "dot_command.h" + +/* + * Reverse Heartbeat, i.e. heartbeats sent from the driver to the + * service processor. + * These heartbeats are initiated by user level programs. + */ + +/* the reverse heartbeat dot command */ +#pragma pack(1) +static struct { + struct dot_command_header header; + unsigned char command[3]; +} rhb_dot_cmd = { + .header = { + .type = sp_read, + .command_size = 3, + .data_size = 0, + .status = 0 + }, + .command = { 4, 3, 6 } +}; +#pragma pack() + +void ibmasm_init_reverse_heartbeat(struct service_processor *sp, struct reverse_heartbeat *rhb) +{ + init_waitqueue_head(&rhb->wait); + rhb->stopped = 0; +} + +/** + * start_reverse_heartbeat + * Loop forever, sending a reverse heartbeat dot command to the service + * processor, then sleeping. The loop comes to an end if the service + * processor fails to respond 3 times or we were interrupted. + */ +int ibmasm_start_reverse_heartbeat(struct service_processor *sp, struct reverse_heartbeat *rhb) +{ + struct command *cmd; + int times_failed = 0; + int result = 1; + + cmd = ibmasm_new_command(sp, sizeof rhb_dot_cmd); + if (!cmd) + return -ENOMEM; + + while (times_failed < 3) { + memcpy(cmd->buffer, (void *)&rhb_dot_cmd, sizeof rhb_dot_cmd); + cmd->status = IBMASM_CMD_PENDING; + ibmasm_exec_command(sp, cmd); + ibmasm_wait_for_response(cmd, IBMASM_CMD_TIMEOUT_NORMAL); + + if (cmd->status != IBMASM_CMD_COMPLETE) + times_failed++; + + wait_event_interruptible_timeout(rhb->wait, + rhb->stopped, + REVERSE_HEARTBEAT_TIMEOUT * HZ); + + if (signal_pending(current) || rhb->stopped) { + result = -EINTR; + break; + } + } + command_put(cmd); + rhb->stopped = 0; + + return result; +} + +void ibmasm_stop_reverse_heartbeat(struct reverse_heartbeat *rhb) +{ + rhb->stopped = 1; + wake_up_interruptible(&rhb->wait); +} diff --git a/drivers/misc/ibmasm/remote.c b/drivers/misc/ibmasm/remote.c new file mode 100644 index 0000000..477bb43 --- /dev/null +++ b/drivers/misc/ibmasm/remote.c @@ -0,0 +1,282 @@ +/* + * IBM ASM Service Processor Device Driver + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * Copyright (C) IBM Corporation, 2004 + * + * Authors: Max Asböck <amax@us.ibm.com> + * Vernon Mauery <vernux@us.ibm.com> + * + */ + +/* Remote mouse and keyboard event handling functions */ + +#include <linux/pci.h> +#include "ibmasm.h" +#include "remote.h" + +#define MOUSE_X_MAX 1600 +#define MOUSE_Y_MAX 1200 + +static const unsigned short xlate_high[XLATE_SIZE] = { + [KEY_SYM_ENTER & 0xff] = KEY_ENTER, + [KEY_SYM_KPSLASH & 0xff] = KEY_KPSLASH, + [KEY_SYM_KPSTAR & 0xff] = KEY_KPASTERISK, + [KEY_SYM_KPMINUS & 0xff] = KEY_KPMINUS, + [KEY_SYM_KPDOT & 0xff] = KEY_KPDOT, + [KEY_SYM_KPPLUS & 0xff] = KEY_KPPLUS, + [KEY_SYM_KP0 & 0xff] = KEY_KP0, + [KEY_SYM_KP1 & 0xff] = KEY_KP1, + [KEY_SYM_KP2 & 0xff] = KEY_KP2, [KEY_SYM_KPDOWN & 0xff] = KEY_KP2, + [KEY_SYM_KP3 & 0xff] = KEY_KP3, + [KEY_SYM_KP4 & 0xff] = KEY_KP4, [KEY_SYM_KPLEFT & 0xff] = KEY_KP4, + [KEY_SYM_KP5 & 0xff] = KEY_KP5, + [KEY_SYM_KP6 & 0xff] = KEY_KP6, [KEY_SYM_KPRIGHT & 0xff] = KEY_KP6, + [KEY_SYM_KP7 & 0xff] = KEY_KP7, + [KEY_SYM_KP8 & 0xff] = KEY_KP8, [KEY_SYM_KPUP & 0xff] = KEY_KP8, + [KEY_SYM_KP9 & 0xff] = KEY_KP9, + [KEY_SYM_BK_SPC & 0xff] = KEY_BACKSPACE, + [KEY_SYM_TAB & 0xff] = KEY_TAB, + [KEY_SYM_CTRL & 0xff] = KEY_LEFTCTRL, + [KEY_SYM_ALT & 0xff] = KEY_LEFTALT, + [KEY_SYM_INSERT & 0xff] = KEY_INSERT, + [KEY_SYM_DELETE & 0xff] = KEY_DELETE, + [KEY_SYM_SHIFT & 0xff] = KEY_LEFTSHIFT, + [KEY_SYM_UARROW & 0xff] = KEY_UP, + [KEY_SYM_DARROW & 0xff] = KEY_DOWN, + [KEY_SYM_LARROW & 0xff] = KEY_LEFT, + [KEY_SYM_RARROW & 0xff] = KEY_RIGHT, + [KEY_SYM_ESCAPE & 0xff] = KEY_ESC, + [KEY_SYM_PAGEUP & 0xff] = KEY_PAGEUP, + [KEY_SYM_PAGEDOWN & 0xff] = KEY_PAGEDOWN, + [KEY_SYM_HOME & 0xff] = KEY_HOME, + [KEY_SYM_END & 0xff] = KEY_END, + [KEY_SYM_F1 & 0xff] = KEY_F1, + [KEY_SYM_F2 & 0xff] = KEY_F2, + [KEY_SYM_F3 & 0xff] = KEY_F3, + [KEY_SYM_F4 & 0xff] = KEY_F4, + [KEY_SYM_F5 & 0xff] = KEY_F5, + [KEY_SYM_F6 & 0xff] = KEY_F6, + [KEY_SYM_F7 & 0xff] = KEY_F7, + [KEY_SYM_F8 & 0xff] = KEY_F8, + [KEY_SYM_F9 & 0xff] = KEY_F9, + [KEY_SYM_F10 & 0xff] = KEY_F10, + [KEY_SYM_F11 & 0xff] = KEY_F11, + [KEY_SYM_F12 & 0xff] = KEY_F12, + [KEY_SYM_CAP_LOCK & 0xff] = KEY_CAPSLOCK, + [KEY_SYM_NUM_LOCK & 0xff] = KEY_NUMLOCK, + [KEY_SYM_SCR_LOCK & 0xff] = KEY_SCROLLLOCK, +}; + +static const unsigned short xlate[XLATE_SIZE] = { + [NO_KEYCODE] = KEY_RESERVED, + [KEY_SYM_SPACE] = KEY_SPACE, + [KEY_SYM_TILDE] = KEY_GRAVE, [KEY_SYM_BKTIC] = KEY_GRAVE, + [KEY_SYM_ONE] = KEY_1, [KEY_SYM_BANG] = KEY_1, + [KEY_SYM_TWO] = KEY_2, [KEY_SYM_AT] = KEY_2, + [KEY_SYM_THREE] = KEY_3, [KEY_SYM_POUND] = KEY_3, + [KEY_SYM_FOUR] = KEY_4, [KEY_SYM_DOLLAR] = KEY_4, + [KEY_SYM_FIVE] = KEY_5, [KEY_SYM_PERCENT] = KEY_5, + [KEY_SYM_SIX] = KEY_6, [KEY_SYM_CARAT] = KEY_6, + [KEY_SYM_SEVEN] = KEY_7, [KEY_SYM_AMPER] = KEY_7, + [KEY_SYM_EIGHT] = KEY_8, [KEY_SYM_STAR] = KEY_8, + [KEY_SYM_NINE] = KEY_9, [KEY_SYM_LPAREN] = KEY_9, + [KEY_SYM_ZERO] = KEY_0, [KEY_SYM_RPAREN] = KEY_0, + [KEY_SYM_MINUS] = KEY_MINUS, [KEY_SYM_USCORE] = KEY_MINUS, + [KEY_SYM_EQUAL] = KEY_EQUAL, [KEY_SYM_PLUS] = KEY_EQUAL, + [KEY_SYM_LBRKT] = KEY_LEFTBRACE, [KEY_SYM_LCURLY] = KEY_LEFTBRACE, + [KEY_SYM_RBRKT] = KEY_RIGHTBRACE, [KEY_SYM_RCURLY] = KEY_RIGHTBRACE, + [KEY_SYM_SLASH] = KEY_BACKSLASH, [KEY_SYM_PIPE] = KEY_BACKSLASH, + [KEY_SYM_TIC] = KEY_APOSTROPHE, [KEY_SYM_QUOTE] = KEY_APOSTROPHE, + [KEY_SYM_SEMIC] = KEY_SEMICOLON, [KEY_SYM_COLON] = KEY_SEMICOLON, + [KEY_SYM_COMMA] = KEY_COMMA, [KEY_SYM_LT] = KEY_COMMA, + [KEY_SYM_PERIOD] = KEY_DOT, [KEY_SYM_GT] = KEY_DOT, + [KEY_SYM_BSLASH] = KEY_SLASH, [KEY_SYM_QMARK] = KEY_SLASH, + [KEY_SYM_A] = KEY_A, [KEY_SYM_a] = KEY_A, + [KEY_SYM_B] = KEY_B, [KEY_SYM_b] = KEY_B, + [KEY_SYM_C] = KEY_C, [KEY_SYM_c] = KEY_C, + [KEY_SYM_D] = KEY_D, [KEY_SYM_d] = KEY_D, + [KEY_SYM_E] = KEY_E, [KEY_SYM_e] = KEY_E, + [KEY_SYM_F] = KEY_F, [KEY_SYM_f] = KEY_F, + [KEY_SYM_G] = KEY_G, [KEY_SYM_g] = KEY_G, + [KEY_SYM_H] = KEY_H, [KEY_SYM_h] = KEY_H, + [KEY_SYM_I] = KEY_I, [KEY_SYM_i] = KEY_I, + [KEY_SYM_J] = KEY_J, [KEY_SYM_j] = KEY_J, + [KEY_SYM_K] = KEY_K, [KEY_SYM_k] = KEY_K, + [KEY_SYM_L] = KEY_L, [KEY_SYM_l] = KEY_L, + [KEY_SYM_M] = KEY_M, [KEY_SYM_m] = KEY_M, + [KEY_SYM_N] = KEY_N, [KEY_SYM_n] = KEY_N, + [KEY_SYM_O] = KEY_O, [KEY_SYM_o] = KEY_O, + [KEY_SYM_P] = KEY_P, [KEY_SYM_p] = KEY_P, + [KEY_SYM_Q] = KEY_Q, [KEY_SYM_q] = KEY_Q, + [KEY_SYM_R] = KEY_R, [KEY_SYM_r] = KEY_R, + [KEY_SYM_S] = KEY_S, [KEY_SYM_s] = KEY_S, + [KEY_SYM_T] = KEY_T, [KEY_SYM_t] = KEY_T, + [KEY_SYM_U] = KEY_U, [KEY_SYM_u] = KEY_U, + [KEY_SYM_V] = KEY_V, [KEY_SYM_v] = KEY_V, + [KEY_SYM_W] = KEY_W, [KEY_SYM_w] = KEY_W, + [KEY_SYM_X] = KEY_X, [KEY_SYM_x] = KEY_X, + [KEY_SYM_Y] = KEY_Y, [KEY_SYM_y] = KEY_Y, + [KEY_SYM_Z] = KEY_Z, [KEY_SYM_z] = KEY_Z, +}; + +static void print_input(struct remote_input *input) +{ + if (input->type == INPUT_TYPE_MOUSE) { + unsigned char buttons = input->mouse_buttons; + dbg("remote mouse movement: (x,y)=(%d,%d)%s%s%s%s\n", + input->data.mouse.x, input->data.mouse.y, + (buttons) ? " -- buttons:" : "", + (buttons & REMOTE_BUTTON_LEFT) ? "left " : "", + (buttons & REMOTE_BUTTON_MIDDLE) ? "middle " : "", + (buttons & REMOTE_BUTTON_RIGHT) ? "right" : "" + ); + } else { + dbg("remote keypress (code, flag, down):" + "%d (0x%x) [0x%x] [0x%x]\n", + input->data.keyboard.key_code, + input->data.keyboard.key_code, + input->data.keyboard.key_flag, + input->data.keyboard.key_down + ); + } +} + +static void send_mouse_event(struct input_dev *dev, struct remote_input *input) +{ + unsigned char buttons = input->mouse_buttons; + + input_report_abs(dev, ABS_X, input->data.mouse.x); + input_report_abs(dev, ABS_Y, input->data.mouse.y); + input_report_key(dev, BTN_LEFT, buttons & REMOTE_BUTTON_LEFT); + input_report_key(dev, BTN_MIDDLE, buttons & REMOTE_BUTTON_MIDDLE); + input_report_key(dev, BTN_RIGHT, buttons & REMOTE_BUTTON_RIGHT); + input_sync(dev); +} + +static void send_keyboard_event(struct input_dev *dev, + struct remote_input *input) +{ + unsigned int key; + unsigned short code = input->data.keyboard.key_code; + + if (code & 0xff00) + key = xlate_high[code & 0xff]; + else + key = xlate[code]; + input_report_key(dev, key, input->data.keyboard.key_down); + input_sync(dev); +} + +void ibmasm_handle_mouse_interrupt(struct service_processor *sp) +{ + unsigned long reader; + unsigned long writer; + struct remote_input input; + + reader = get_queue_reader(sp); + writer = get_queue_writer(sp); + + while (reader != writer) { + memcpy_fromio(&input, get_queue_entry(sp, reader), + sizeof(struct remote_input)); + + print_input(&input); + if (input.type == INPUT_TYPE_MOUSE) { + send_mouse_event(sp->remote.mouse_dev, &input); + } else if (input.type == INPUT_TYPE_KEYBOARD) { + send_keyboard_event(sp->remote.keybd_dev, &input); + } else + break; + + reader = advance_queue_reader(sp, reader); + writer = get_queue_writer(sp); + } +} + +int ibmasm_init_remote_input_dev(struct service_processor *sp) +{ + /* set up the mouse input device */ + struct input_dev *mouse_dev, *keybd_dev; + struct pci_dev *pdev = to_pci_dev(sp->dev); + int error = -ENOMEM; + int i; + + sp->remote.mouse_dev = mouse_dev = input_allocate_device(); + sp->remote.keybd_dev = keybd_dev = input_allocate_device(); + + if (!mouse_dev || !keybd_dev) + goto err_free_devices; + + mouse_dev->id.bustype = BUS_PCI; + mouse_dev->id.vendor = pdev->vendor; + mouse_dev->id.product = pdev->device; + mouse_dev->id.version = 1; + mouse_dev->dev.parent = sp->dev; + mouse_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS); + mouse_dev->keybit[BIT_WORD(BTN_MOUSE)] = BIT_MASK(BTN_LEFT) | + BIT_MASK(BTN_RIGHT) | BIT_MASK(BTN_MIDDLE); + set_bit(BTN_TOUCH, mouse_dev->keybit); + mouse_dev->name = "ibmasm RSA I remote mouse"; + input_set_abs_params(mouse_dev, ABS_X, 0, MOUSE_X_MAX, 0, 0); + input_set_abs_params(mouse_dev, ABS_Y, 0, MOUSE_Y_MAX, 0, 0); + + keybd_dev->id.bustype = BUS_PCI; + keybd_dev->id.vendor = pdev->vendor; + keybd_dev->id.product = pdev->device; + keybd_dev->id.version = 2; + keybd_dev->dev.parent = sp->dev; + keybd_dev->evbit[0] = BIT_MASK(EV_KEY); + keybd_dev->name = "ibmasm RSA I remote keyboard"; + + for (i = 0; i < XLATE_SIZE; i++) { + if (xlate_high[i]) + set_bit(xlate_high[i], keybd_dev->keybit); + if (xlate[i]) + set_bit(xlate[i], keybd_dev->keybit); + } + + error = input_register_device(mouse_dev); + if (error) + goto err_free_devices; + + error = input_register_device(keybd_dev); + if (error) + goto err_unregister_mouse_dev; + + enable_mouse_interrupts(sp); + + printk(KERN_INFO "ibmasm remote responding to events on RSA card %d\n", sp->number); + + return 0; + + err_unregister_mouse_dev: + input_unregister_device(mouse_dev); + mouse_dev = NULL; /* so we don't try to free it again below */ + err_free_devices: + input_free_device(mouse_dev); + input_free_device(keybd_dev); + + return error; +} + +void ibmasm_free_remote_input_dev(struct service_processor *sp) +{ + disable_mouse_interrupts(sp); + input_unregister_device(sp->remote.mouse_dev); + input_unregister_device(sp->remote.keybd_dev); +} + diff --git a/drivers/misc/ibmasm/remote.h b/drivers/misc/ibmasm/remote.h new file mode 100644 index 0000000..72acf5a --- /dev/null +++ b/drivers/misc/ibmasm/remote.h @@ -0,0 +1,270 @@ + +/* + * IBM ASM Service Processor Device Driver + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * Copyright (C) IBM Corporation, 2004 + * + * Author: Max Asböck <amax@us.ibm.com> + * + * Orignally written by Pete Reynolds + */ + +#ifndef _IBMASM_REMOTE_H_ +#define _IBMASM_REMOTE_H_ + +#include <asm/io.h> + +/* pci offsets */ +#define CONDOR_MOUSE_DATA 0x000AC000 +#define CONDOR_MOUSE_ISR_CONTROL 0x00 +#define CONDOR_MOUSE_ISR_STATUS 0x04 +#define CONDOR_MOUSE_Q_READER 0x08 +#define CONDOR_MOUSE_Q_WRITER 0x0C +#define CONDOR_MOUSE_Q_BEGIN 0x10 +#define CONDOR_MOUSE_MAX_X 0x14 +#define CONDOR_MOUSE_MAX_Y 0x18 + +#define CONDOR_INPUT_DESKTOP_INFO 0x1F0 +#define CONDOR_INPUT_DISPLAY_RESX 0x1F4 +#define CONDOR_INPUT_DISPLAY_RESY 0x1F8 +#define CONDOR_INPUT_DISPLAY_BITS 0x1FC +#define CONDOR_OUTPUT_VNC_STATUS 0x200 + +#define CONDOR_MOUSE_INTR_STATUS_MASK 0x00000001 + +#define INPUT_TYPE_MOUSE 0x1 +#define INPUT_TYPE_KEYBOARD 0x2 + + +/* mouse button states received from SP */ +#define REMOTE_DOUBLE_CLICK 0xF0 +#define REMOTE_BUTTON_LEFT 0x01 +#define REMOTE_BUTTON_MIDDLE 0x02 +#define REMOTE_BUTTON_RIGHT 0x04 + +/* size of keysym/keycode translation matricies */ +#define XLATE_SIZE 256 + +struct mouse_input { + unsigned short y; + unsigned short x; +}; + + +struct keyboard_input { + unsigned short key_code; + unsigned char key_flag; + unsigned char key_down; +}; + + + +struct remote_input { + union { + struct mouse_input mouse; + struct keyboard_input keyboard; + } data; + + unsigned char type; + unsigned char pad1; + unsigned char mouse_buttons; + unsigned char pad3; +}; + +#define mouse_addr(sp) (sp->base_address + CONDOR_MOUSE_DATA) +#define display_width(sp) (mouse_addr(sp) + CONDOR_INPUT_DISPLAY_RESX) +#define display_height(sp) (mouse_addr(sp) + CONDOR_INPUT_DISPLAY_RESY) +#define display_depth(sp) (mouse_addr(sp) + CONDOR_INPUT_DISPLAY_BITS) +#define desktop_info(sp) (mouse_addr(sp) + CONDOR_INPUT_DESKTOP_INFO) +#define vnc_status(sp) (mouse_addr(sp) + CONDOR_OUTPUT_VNC_STATUS) +#define isr_control(sp) (mouse_addr(sp) + CONDOR_MOUSE_ISR_CONTROL) + +#define mouse_interrupt_pending(sp) readl(mouse_addr(sp) + CONDOR_MOUSE_ISR_STATUS) +#define clear_mouse_interrupt(sp) writel(0, mouse_addr(sp) + CONDOR_MOUSE_ISR_STATUS) +#define enable_mouse_interrupts(sp) writel(1, mouse_addr(sp) + CONDOR_MOUSE_ISR_CONTROL) +#define disable_mouse_interrupts(sp) writel(0, mouse_addr(sp) + CONDOR_MOUSE_ISR_CONTROL) + +/* remote input queue operations */ +#define REMOTE_QUEUE_SIZE 60 + +#define get_queue_writer(sp) readl(mouse_addr(sp) + CONDOR_MOUSE_Q_WRITER) +#define get_queue_reader(sp) readl(mouse_addr(sp) + CONDOR_MOUSE_Q_READER) +#define set_queue_reader(sp, reader) writel(reader, mouse_addr(sp) + CONDOR_MOUSE_Q_READER) + +#define queue_begin (mouse_addr(sp) + CONDOR_MOUSE_Q_BEGIN) + +#define get_queue_entry(sp, read_index) \ + ((void*)(queue_begin + read_index * sizeof(struct remote_input))) + +static inline int advance_queue_reader(struct service_processor *sp, unsigned long reader) +{ + reader++; + if (reader == REMOTE_QUEUE_SIZE) + reader = 0; + + set_queue_reader(sp, reader); + return reader; +} + +#define NO_KEYCODE 0 +#define KEY_SYM_BK_SPC 0xFF08 +#define KEY_SYM_TAB 0xFF09 +#define KEY_SYM_ENTER 0xFF0D +#define KEY_SYM_SCR_LOCK 0xFF14 +#define KEY_SYM_ESCAPE 0xFF1B +#define KEY_SYM_HOME 0xFF50 +#define KEY_SYM_LARROW 0xFF51 +#define KEY_SYM_UARROW 0xFF52 +#define KEY_SYM_RARROW 0xFF53 +#define KEY_SYM_DARROW 0xFF54 +#define KEY_SYM_PAGEUP 0xFF55 +#define KEY_SYM_PAGEDOWN 0xFF56 +#define KEY_SYM_END 0xFF57 +#define KEY_SYM_INSERT 0xFF63 +#define KEY_SYM_NUM_LOCK 0xFF7F +#define KEY_SYM_KPSTAR 0xFFAA +#define KEY_SYM_KPPLUS 0xFFAB +#define KEY_SYM_KPMINUS 0xFFAD +#define KEY_SYM_KPDOT 0xFFAE +#define KEY_SYM_KPSLASH 0xFFAF +#define KEY_SYM_KPRIGHT 0xFF96 +#define KEY_SYM_KPUP 0xFF97 +#define KEY_SYM_KPLEFT 0xFF98 +#define KEY_SYM_KPDOWN 0xFF99 +#define KEY_SYM_KP0 0xFFB0 +#define KEY_SYM_KP1 0xFFB1 +#define KEY_SYM_KP2 0xFFB2 +#define KEY_SYM_KP3 0xFFB3 +#define KEY_SYM_KP4 0xFFB4 +#define KEY_SYM_KP5 0xFFB5 +#define KEY_SYM_KP6 0xFFB6 +#define KEY_SYM_KP7 0xFFB7 +#define KEY_SYM_KP8 0xFFB8 +#define KEY_SYM_KP9 0xFFB9 +#define KEY_SYM_F1 0xFFBE // 1B 5B 5B 41 +#define KEY_SYM_F2 0xFFBF // 1B 5B 5B 42 +#define KEY_SYM_F3 0xFFC0 // 1B 5B 5B 43 +#define KEY_SYM_F4 0xFFC1 // 1B 5B 5B 44 +#define KEY_SYM_F5 0xFFC2 // 1B 5B 5B 45 +#define KEY_SYM_F6 0xFFC3 // 1B 5B 31 37 7E +#define KEY_SYM_F7 0xFFC4 // 1B 5B 31 38 7E +#define KEY_SYM_F8 0xFFC5 // 1B 5B 31 39 7E +#define KEY_SYM_F9 0xFFC6 // 1B 5B 32 30 7E +#define KEY_SYM_F10 0xFFC7 // 1B 5B 32 31 7E +#define KEY_SYM_F11 0xFFC8 // 1B 5B 32 33 7E +#define KEY_SYM_F12 0xFFC9 // 1B 5B 32 34 7E +#define KEY_SYM_SHIFT 0xFFE1 +#define KEY_SYM_CTRL 0xFFE3 +#define KEY_SYM_ALT 0xFFE9 +#define KEY_SYM_CAP_LOCK 0xFFE5 +#define KEY_SYM_DELETE 0xFFFF +#define KEY_SYM_TILDE 0x60 +#define KEY_SYM_BKTIC 0x7E +#define KEY_SYM_ONE 0x31 +#define KEY_SYM_BANG 0x21 +#define KEY_SYM_TWO 0x32 +#define KEY_SYM_AT 0x40 +#define KEY_SYM_THREE 0x33 +#define KEY_SYM_POUND 0x23 +#define KEY_SYM_FOUR 0x34 +#define KEY_SYM_DOLLAR 0x24 +#define KEY_SYM_FIVE 0x35 +#define KEY_SYM_PERCENT 0x25 +#define KEY_SYM_SIX 0x36 +#define KEY_SYM_CARAT 0x5E +#define KEY_SYM_SEVEN 0x37 +#define KEY_SYM_AMPER 0x26 +#define KEY_SYM_EIGHT 0x38 +#define KEY_SYM_STAR 0x2A +#define KEY_SYM_NINE 0x39 +#define KEY_SYM_LPAREN 0x28 +#define KEY_SYM_ZERO 0x30 +#define KEY_SYM_RPAREN 0x29 +#define KEY_SYM_MINUS 0x2D +#define KEY_SYM_USCORE 0x5F +#define KEY_SYM_EQUAL 0x2B +#define KEY_SYM_PLUS 0x3D +#define KEY_SYM_LBRKT 0x5B +#define KEY_SYM_LCURLY 0x7B +#define KEY_SYM_RBRKT 0x5D +#define KEY_SYM_RCURLY 0x7D +#define KEY_SYM_SLASH 0x5C +#define KEY_SYM_PIPE 0x7C +#define KEY_SYM_TIC 0x27 +#define KEY_SYM_QUOTE 0x22 +#define KEY_SYM_SEMIC 0x3B +#define KEY_SYM_COLON 0x3A +#define KEY_SYM_COMMA 0x2C +#define KEY_SYM_LT 0x3C +#define KEY_SYM_PERIOD 0x2E +#define KEY_SYM_GT 0x3E +#define KEY_SYM_BSLASH 0x2F +#define KEY_SYM_QMARK 0x3F +#define KEY_SYM_A 0x41 +#define KEY_SYM_B 0x42 +#define KEY_SYM_C 0x43 +#define KEY_SYM_D 0x44 +#define KEY_SYM_E 0x45 +#define KEY_SYM_F 0x46 +#define KEY_SYM_G 0x47 +#define KEY_SYM_H 0x48 +#define KEY_SYM_I 0x49 +#define KEY_SYM_J 0x4A +#define KEY_SYM_K 0x4B +#define KEY_SYM_L 0x4C +#define KEY_SYM_M 0x4D +#define KEY_SYM_N 0x4E +#define KEY_SYM_O 0x4F +#define KEY_SYM_P 0x50 +#define KEY_SYM_Q 0x51 +#define KEY_SYM_R 0x52 +#define KEY_SYM_S 0x53 +#define KEY_SYM_T 0x54 +#define KEY_SYM_U 0x55 +#define KEY_SYM_V 0x56 +#define KEY_SYM_W 0x57 +#define KEY_SYM_X 0x58 +#define KEY_SYM_Y 0x59 +#define KEY_SYM_Z 0x5A +#define KEY_SYM_a 0x61 +#define KEY_SYM_b 0x62 +#define KEY_SYM_c 0x63 +#define KEY_SYM_d 0x64 +#define KEY_SYM_e 0x65 +#define KEY_SYM_f 0x66 +#define KEY_SYM_g 0x67 +#define KEY_SYM_h 0x68 +#define KEY_SYM_i 0x69 +#define KEY_SYM_j 0x6A +#define KEY_SYM_k 0x6B +#define KEY_SYM_l 0x6C +#define KEY_SYM_m 0x6D +#define KEY_SYM_n 0x6E +#define KEY_SYM_o 0x6F +#define KEY_SYM_p 0x70 +#define KEY_SYM_q 0x71 +#define KEY_SYM_r 0x72 +#define KEY_SYM_s 0x73 +#define KEY_SYM_t 0x74 +#define KEY_SYM_u 0x75 +#define KEY_SYM_v 0x76 +#define KEY_SYM_w 0x77 +#define KEY_SYM_x 0x78 +#define KEY_SYM_y 0x79 +#define KEY_SYM_z 0x7A +#define KEY_SYM_SPACE 0x20 +#endif /* _IBMASM_REMOTE_H_ */ diff --git a/drivers/misc/ibmasm/uart.c b/drivers/misc/ibmasm/uart.c new file mode 100644 index 0000000..93baa35 --- /dev/null +++ b/drivers/misc/ibmasm/uart.c @@ -0,0 +1,72 @@ + +/* + * IBM ASM Service Processor Device Driver + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * Copyright (C) IBM Corporation, 2004 + * + * Author: Max Asböck <amax@us.ibm.com> + * + */ + +#include <linux/termios.h> +#include <linux/tty.h> +#include <linux/serial_core.h> +#include <linux/serial_reg.h> +#include <linux/serial_8250.h> +#include "ibmasm.h" +#include "lowlevel.h" + + +void ibmasm_register_uart(struct service_processor *sp) +{ + struct uart_port uport; + void __iomem *iomem_base; + + iomem_base = sp->base_address + SCOUT_COM_B_BASE; + + /* read the uart scratch register to determine if the UART + * is dedicated to the service processor or if the OS can use it + */ + if (0 == readl(iomem_base + UART_SCR)) { + dev_info(sp->dev, "IBM SP UART not registered, owned by service processor\n"); + sp->serial_line = -1; + return; + } + + memset(&uport, 0, sizeof(struct uart_port)); + uport.irq = sp->irq; + uport.uartclk = 3686400; + uport.flags = UPF_SHARE_IRQ; + uport.iotype = UPIO_MEM; + uport.membase = iomem_base; + + sp->serial_line = serial8250_register_port(&uport); + if (sp->serial_line < 0) { + dev_err(sp->dev, "Failed to register serial port\n"); + return; + } + enable_uart_interrupts(sp->base_address); +} + +void ibmasm_unregister_uart(struct service_processor *sp) +{ + if (sp->serial_line < 0) + return; + + disable_uart_interrupts(sp->base_address); + serial8250_unregister_port(sp->serial_line); +} diff --git a/drivers/misc/ics932s401.c b/drivers/misc/ics932s401.c new file mode 100644 index 0000000..6e43ab4 --- /dev/null +++ b/drivers/misc/ics932s401.c @@ -0,0 +1,515 @@ +/* + * A driver for the Integrated Circuits ICS932S401 + * Copyright (C) 2008 IBM + * + * Author: Darrick J. Wong <djwong@us.ibm.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include <linux/module.h> +#include <linux/jiffies.h> +#include <linux/i2c.h> +#include <linux/err.h> +#include <linux/mutex.h> +#include <linux/delay.h> +#include <linux/log2.h> + +/* Addresses to scan */ +static const unsigned short normal_i2c[] = { 0x69, I2C_CLIENT_END }; + +/* Insmod parameters */ +I2C_CLIENT_INSMOD_1(ics932s401); + +/* ICS932S401 registers */ +#define ICS932S401_REG_CFG2 0x01 +#define ICS932S401_CFG1_SPREAD 0x01 +#define ICS932S401_REG_CFG7 0x06 +#define ICS932S401_FS_MASK 0x07 +#define ICS932S401_REG_VENDOR_REV 0x07 +#define ICS932S401_VENDOR 1 +#define ICS932S401_VENDOR_MASK 0x0F +#define ICS932S401_REV 4 +#define ICS932S401_REV_SHIFT 4 +#define ICS932S401_REG_DEVICE 0x09 +#define ICS932S401_DEVICE 11 +#define ICS932S401_REG_CTRL 0x0A +#define ICS932S401_MN_ENABLED 0x80 +#define ICS932S401_CPU_ALT 0x04 +#define ICS932S401_SRC_ALT 0x08 +#define ICS932S401_REG_CPU_M_CTRL 0x0B +#define ICS932S401_M_MASK 0x3F +#define ICS932S401_REG_CPU_N_CTRL 0x0C +#define ICS932S401_REG_CPU_SPREAD1 0x0D +#define ICS932S401_REG_CPU_SPREAD2 0x0E +#define ICS932S401_SPREAD_MASK 0x7FFF +#define ICS932S401_REG_SRC_M_CTRL 0x0F +#define ICS932S401_REG_SRC_N_CTRL 0x10 +#define ICS932S401_REG_SRC_SPREAD1 0x11 +#define ICS932S401_REG_SRC_SPREAD2 0x12 +#define ICS932S401_REG_CPU_DIVISOR 0x13 +#define ICS932S401_CPU_DIVISOR_SHIFT 4 +#define ICS932S401_REG_PCISRC_DIVISOR 0x14 +#define ICS932S401_SRC_DIVISOR_MASK 0x0F +#define ICS932S401_PCI_DIVISOR_SHIFT 4 + +/* Base clock is 14.318MHz */ +#define BASE_CLOCK 14318 + +#define NUM_REGS 21 +#define NUM_MIRRORED_REGS 15 + +static int regs_to_copy[NUM_MIRRORED_REGS] = { + ICS932S401_REG_CFG2, + ICS932S401_REG_CFG7, + ICS932S401_REG_VENDOR_REV, + ICS932S401_REG_DEVICE, + ICS932S401_REG_CTRL, + ICS932S401_REG_CPU_M_CTRL, + ICS932S401_REG_CPU_N_CTRL, + ICS932S401_REG_CPU_SPREAD1, + ICS932S401_REG_CPU_SPREAD2, + ICS932S401_REG_SRC_M_CTRL, + ICS932S401_REG_SRC_N_CTRL, + ICS932S401_REG_SRC_SPREAD1, + ICS932S401_REG_SRC_SPREAD2, + ICS932S401_REG_CPU_DIVISOR, + ICS932S401_REG_PCISRC_DIVISOR, +}; + +/* How often do we reread sensors values? (In jiffies) */ +#define SENSOR_REFRESH_INTERVAL (2 * HZ) + +/* How often do we reread sensor limit values? (In jiffies) */ +#define LIMIT_REFRESH_INTERVAL (60 * HZ) + +struct ics932s401_data { + struct attribute_group attrs; + struct mutex lock; + char sensors_valid; + unsigned long sensors_last_updated; /* In jiffies */ + + u8 regs[NUM_REGS]; +}; + +static int ics932s401_probe(struct i2c_client *client, + const struct i2c_device_id *id); +static int ics932s401_detect(struct i2c_client *client, int kind, + struct i2c_board_info *info); +static int ics932s401_remove(struct i2c_client *client); + +static const struct i2c_device_id ics932s401_id[] = { + { "ics932s401", ics932s401 }, + { } +}; +MODULE_DEVICE_TABLE(i2c, ics932s401_id); + +static struct i2c_driver ics932s401_driver = { + .class = I2C_CLASS_HWMON, + .driver = { + .name = "ics932s401", + }, + .probe = ics932s401_probe, + .remove = ics932s401_remove, + .id_table = ics932s401_id, + .detect = ics932s401_detect, + .address_data = &addr_data, +}; + +static struct ics932s401_data *ics932s401_update_device(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct ics932s401_data *data = i2c_get_clientdata(client); + unsigned long local_jiffies = jiffies; + int i, temp; + + mutex_lock(&data->lock); + if (time_before(local_jiffies, data->sensors_last_updated + + SENSOR_REFRESH_INTERVAL) + && data->sensors_valid) + goto out; + + /* + * Each register must be read as a word and then right shifted 8 bits. + * Not really sure why this is; setting the "byte count programming" + * register to 1 does not fix this problem. + */ + for (i = 0; i < NUM_MIRRORED_REGS; i++) { + temp = i2c_smbus_read_word_data(client, regs_to_copy[i]); + data->regs[regs_to_copy[i]] = temp >> 8; + } + + data->sensors_last_updated = local_jiffies; + data->sensors_valid = 1; + +out: + mutex_unlock(&data->lock); + return data; +} + +static ssize_t show_spread_enabled(struct device *dev, + struct device_attribute *devattr, + char *buf) +{ + struct ics932s401_data *data = ics932s401_update_device(dev); + + if (data->regs[ICS932S401_REG_CFG2] & ICS932S401_CFG1_SPREAD) + return sprintf(buf, "1\n"); + + return sprintf(buf, "0\n"); +} + +/* bit to cpu khz map */ +static const int fs_speeds[] = { + 266666, + 133333, + 200000, + 166666, + 333333, + 100000, + 400000, + 0, +}; + +/* clock divisor map */ +static const int divisors[] = {2, 3, 5, 15, 4, 6, 10, 30, 8, 12, 20, 60, 16, + 24, 40, 120}; + +/* Calculate CPU frequency from the M/N registers. */ +static int calculate_cpu_freq(struct ics932s401_data *data) +{ + int m, n, freq; + + m = data->regs[ICS932S401_REG_CPU_M_CTRL] & ICS932S401_M_MASK; + n = data->regs[ICS932S401_REG_CPU_N_CTRL]; + + /* Pull in bits 8 & 9 from the M register */ + n |= ((int)data->regs[ICS932S401_REG_CPU_M_CTRL] & 0x80) << 1; + n |= ((int)data->regs[ICS932S401_REG_CPU_M_CTRL] & 0x40) << 3; + + freq = BASE_CLOCK * (n + 8) / (m + 2); + freq /= divisors[data->regs[ICS932S401_REG_CPU_DIVISOR] >> + ICS932S401_CPU_DIVISOR_SHIFT]; + + return freq; +} + +static ssize_t show_cpu_clock(struct device *dev, + struct device_attribute *devattr, + char *buf) +{ + struct ics932s401_data *data = ics932s401_update_device(dev); + + return sprintf(buf, "%d\n", calculate_cpu_freq(data)); +} + +static ssize_t show_cpu_clock_sel(struct device *dev, + struct device_attribute *devattr, + char *buf) +{ + struct ics932s401_data *data = ics932s401_update_device(dev); + int freq; + + if (data->regs[ICS932S401_REG_CTRL] & ICS932S401_MN_ENABLED) + freq = calculate_cpu_freq(data); + else { + /* Freq is neatly wrapped up for us */ + int fid = data->regs[ICS932S401_REG_CFG7] & ICS932S401_FS_MASK; + freq = fs_speeds[fid]; + if (data->regs[ICS932S401_REG_CTRL] & ICS932S401_CPU_ALT) { + switch (freq) { + case 166666: + freq = 160000; + break; + case 333333: + freq = 320000; + break; + } + } + } + + return sprintf(buf, "%d\n", freq); +} + +/* Calculate SRC frequency from the M/N registers. */ +static int calculate_src_freq(struct ics932s401_data *data) +{ + int m, n, freq; + + m = data->regs[ICS932S401_REG_SRC_M_CTRL] & ICS932S401_M_MASK; + n = data->regs[ICS932S401_REG_SRC_N_CTRL]; + + /* Pull in bits 8 & 9 from the M register */ + n |= ((int)data->regs[ICS932S401_REG_SRC_M_CTRL] & 0x80) << 1; + n |= ((int)data->regs[ICS932S401_REG_SRC_M_CTRL] & 0x40) << 3; + + freq = BASE_CLOCK * (n + 8) / (m + 2); + freq /= divisors[data->regs[ICS932S401_REG_PCISRC_DIVISOR] & + ICS932S401_SRC_DIVISOR_MASK]; + + return freq; +} + +static ssize_t show_src_clock(struct device *dev, + struct device_attribute *devattr, + char *buf) +{ + struct ics932s401_data *data = ics932s401_update_device(dev); + + return sprintf(buf, "%d\n", calculate_src_freq(data)); +} + +static ssize_t show_src_clock_sel(struct device *dev, + struct device_attribute *devattr, + char *buf) +{ + struct ics932s401_data *data = ics932s401_update_device(dev); + int freq; + + if (data->regs[ICS932S401_REG_CTRL] & ICS932S401_MN_ENABLED) + freq = calculate_src_freq(data); + else + /* Freq is neatly wrapped up for us */ + if (data->regs[ICS932S401_REG_CTRL] & ICS932S401_CPU_ALT && + data->regs[ICS932S401_REG_CTRL] & ICS932S401_SRC_ALT) + freq = 96000; + else + freq = 100000; + + return sprintf(buf, "%d\n", freq); +} + +/* Calculate PCI frequency from the SRC M/N registers. */ +static int calculate_pci_freq(struct ics932s401_data *data) +{ + int m, n, freq; + + m = data->regs[ICS932S401_REG_SRC_M_CTRL] & ICS932S401_M_MASK; + n = data->regs[ICS932S401_REG_SRC_N_CTRL]; + + /* Pull in bits 8 & 9 from the M register */ + n |= ((int)data->regs[ICS932S401_REG_SRC_M_CTRL] & 0x80) << 1; + n |= ((int)data->regs[ICS932S401_REG_SRC_M_CTRL] & 0x40) << 3; + + freq = BASE_CLOCK * (n + 8) / (m + 2); + freq /= divisors[data->regs[ICS932S401_REG_PCISRC_DIVISOR] >> + ICS932S401_PCI_DIVISOR_SHIFT]; + + return freq; +} + +static ssize_t show_pci_clock(struct device *dev, + struct device_attribute *devattr, + char *buf) +{ + struct ics932s401_data *data = ics932s401_update_device(dev); + + return sprintf(buf, "%d\n", calculate_pci_freq(data)); +} + +static ssize_t show_pci_clock_sel(struct device *dev, + struct device_attribute *devattr, + char *buf) +{ + struct ics932s401_data *data = ics932s401_update_device(dev); + int freq; + + if (data->regs[ICS932S401_REG_CTRL] & ICS932S401_MN_ENABLED) + freq = calculate_pci_freq(data); + else + freq = 33333; + + return sprintf(buf, "%d\n", freq); +} + +static ssize_t show_value(struct device *dev, + struct device_attribute *devattr, + char *buf); + +static ssize_t show_spread(struct device *dev, + struct device_attribute *devattr, + char *buf); + +static DEVICE_ATTR(spread_enabled, S_IRUGO, show_spread_enabled, NULL); +static DEVICE_ATTR(cpu_clock_selection, S_IRUGO, show_cpu_clock_sel, NULL); +static DEVICE_ATTR(cpu_clock, S_IRUGO, show_cpu_clock, NULL); +static DEVICE_ATTR(src_clock_selection, S_IRUGO, show_src_clock_sel, NULL); +static DEVICE_ATTR(src_clock, S_IRUGO, show_src_clock, NULL); +static DEVICE_ATTR(pci_clock_selection, S_IRUGO, show_pci_clock_sel, NULL); +static DEVICE_ATTR(pci_clock, S_IRUGO, show_pci_clock, NULL); +static DEVICE_ATTR(usb_clock, S_IRUGO, show_value, NULL); +static DEVICE_ATTR(ref_clock, S_IRUGO, show_value, NULL); +static DEVICE_ATTR(cpu_spread, S_IRUGO, show_spread, NULL); +static DEVICE_ATTR(src_spread, S_IRUGO, show_spread, NULL); + +static struct attribute *ics932s401_attr[] = +{ + &dev_attr_spread_enabled.attr, + &dev_attr_cpu_clock_selection.attr, + &dev_attr_cpu_clock.attr, + &dev_attr_src_clock_selection.attr, + &dev_attr_src_clock.attr, + &dev_attr_pci_clock_selection.attr, + &dev_attr_pci_clock.attr, + &dev_attr_usb_clock.attr, + &dev_attr_ref_clock.attr, + &dev_attr_cpu_spread.attr, + &dev_attr_src_spread.attr, + NULL +}; + +static ssize_t show_value(struct device *dev, + struct device_attribute *devattr, + char *buf) +{ + int x; + + if (devattr == &dev_attr_usb_clock) + x = 48000; + else if (devattr == &dev_attr_ref_clock) + x = BASE_CLOCK; + else + BUG(); + + return sprintf(buf, "%d\n", x); +} + +static ssize_t show_spread(struct device *dev, + struct device_attribute *devattr, + char *buf) +{ + struct ics932s401_data *data = ics932s401_update_device(dev); + int reg; + unsigned long val; + + if (!(data->regs[ICS932S401_REG_CFG2] & ICS932S401_CFG1_SPREAD)) + return sprintf(buf, "0%%\n"); + + if (devattr == &dev_attr_src_spread) + reg = ICS932S401_REG_SRC_SPREAD1; + else if (devattr == &dev_attr_cpu_spread) + reg = ICS932S401_REG_CPU_SPREAD1; + else + BUG(); + + val = data->regs[reg] | (data->regs[reg + 1] << 8); + val &= ICS932S401_SPREAD_MASK; + + /* Scale 0..2^14 to -0.5. */ + val = 500000 * val / 16384; + return sprintf(buf, "-0.%lu%%\n", val); +} + +/* Return 0 if detection is successful, -ENODEV otherwise */ +static int ics932s401_detect(struct i2c_client *client, int kind, + struct i2c_board_info *info) +{ + struct i2c_adapter *adapter = client->adapter; + + if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) + return -ENODEV; + + if (kind <= 0) { + int vendor, device, revision; + + vendor = i2c_smbus_read_word_data(client, + ICS932S401_REG_VENDOR_REV); + vendor >>= 8; + revision = vendor >> ICS932S401_REV_SHIFT; + vendor &= ICS932S401_VENDOR_MASK; + if (vendor != ICS932S401_VENDOR) + return -ENODEV; + + device = i2c_smbus_read_word_data(client, + ICS932S401_REG_DEVICE); + device >>= 8; + if (device != ICS932S401_DEVICE) + return -ENODEV; + + if (revision != ICS932S401_REV) + dev_info(&adapter->dev, "Unknown revision %d\n", + revision); + } else + dev_dbg(&adapter->dev, "detection forced\n"); + + strlcpy(info->type, "ics932s401", I2C_NAME_SIZE); + + return 0; +} + +static int ics932s401_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct ics932s401_data *data; + int err; + + data = kzalloc(sizeof(struct ics932s401_data), GFP_KERNEL); + if (!data) { + err = -ENOMEM; + goto exit; + } + + i2c_set_clientdata(client, data); + mutex_init(&data->lock); + + dev_info(&client->dev, "%s chip found\n", client->name); + + /* Register sysfs hooks */ + data->attrs.attrs = ics932s401_attr; + err = sysfs_create_group(&client->dev.kobj, &data->attrs); + if (err) + goto exit_free; + + return 0; + +exit_free: + kfree(data); +exit: + return err; +} + +static int ics932s401_remove(struct i2c_client *client) +{ + struct ics932s401_data *data = i2c_get_clientdata(client); + + sysfs_remove_group(&client->dev.kobj, &data->attrs); + kfree(data); + return 0; +} + +static int __init ics932s401_init(void) +{ + return i2c_add_driver(&ics932s401_driver); +} + +static void __exit ics932s401_exit(void) +{ + i2c_del_driver(&ics932s401_driver); +} + +MODULE_AUTHOR("Darrick J. Wong <djwong@us.ibm.com>"); +MODULE_DESCRIPTION("ICS932S401 driver"); +MODULE_LICENSE("GPL"); + +module_init(ics932s401_init); +module_exit(ics932s401_exit); + +/* IBM IntelliStation Z30 */ +MODULE_ALIAS("dmi:bvnIBM:*:rn9228:*"); +MODULE_ALIAS("dmi:bvnIBM:*:rn9232:*"); + +/* IBM x3650/x3550 */ +MODULE_ALIAS("dmi:bvnIBM:*:pnIBMSystemx3650*"); +MODULE_ALIAS("dmi:bvnIBM:*:pnIBMSystemx3550*"); diff --git a/drivers/misc/intel_menlow.c b/drivers/misc/intel_menlow.c new file mode 100644 index 0000000..27b7662 --- /dev/null +++ b/drivers/misc/intel_menlow.c @@ -0,0 +1,536 @@ +/* + * intel_menlow.c - Intel menlow Driver for thermal management extension + * + * Copyright (C) 2008 Intel Corp + * Copyright (C) 2008 Sujith Thomas <sujith.thomas@intel.com> + * Copyright (C) 2008 Zhang Rui <rui.zhang@intel.com> + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * This driver creates the sys I/F for programming the sensors. + * It also implements the driver for intel menlow memory controller (hardware + * id is INT0002) which makes use of the platform specific ACPI methods + * to get/set bandwidth. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/types.h> +#include <linux/pci.h> +#include <linux/pm.h> + +#include <linux/thermal.h> +#include <acpi/acpi_bus.h> +#include <acpi/acpi_drivers.h> + +MODULE_AUTHOR("Thomas Sujith"); +MODULE_AUTHOR("Zhang Rui"); +MODULE_DESCRIPTION("Intel Menlow platform specific driver"); +MODULE_LICENSE("GPL"); + +/* + * Memory controller device control + */ + +#define MEMORY_GET_BANDWIDTH "GTHS" +#define MEMORY_SET_BANDWIDTH "STHS" +#define MEMORY_ARG_CUR_BANDWIDTH 1 +#define MEMORY_ARG_MAX_BANDWIDTH 0 + +/* + * GTHS returning 'n' would mean that [0,n-1] states are supported + * In that case max_cstate would be n-1 + * GTHS returning '0' would mean that no bandwidth control states are supported + */ +static int memory_get_int_max_bandwidth(struct thermal_cooling_device *cdev, + unsigned long *max_state) +{ + struct acpi_device *device = cdev->devdata; + acpi_handle handle = device->handle; + unsigned long long value; + struct acpi_object_list arg_list; + union acpi_object arg; + acpi_status status = AE_OK; + + arg_list.count = 1; + arg_list.pointer = &arg; + arg.type = ACPI_TYPE_INTEGER; + arg.integer.value = MEMORY_ARG_MAX_BANDWIDTH; + status = acpi_evaluate_integer(handle, MEMORY_GET_BANDWIDTH, + &arg_list, &value); + if (ACPI_FAILURE(status)) + return -EFAULT; + + if (!value) + return -EINVAL; + + *max_state = value - 1; + return 0; +} + +static int memory_get_max_bandwidth(struct thermal_cooling_device *cdev, + char *buf) +{ + unsigned long value; + if (memory_get_int_max_bandwidth(cdev, &value)) + return -EINVAL; + + return sprintf(buf, "%ld\n", value); +} + +static int memory_get_cur_bandwidth(struct thermal_cooling_device *cdev, + char *buf) +{ + struct acpi_device *device = cdev->devdata; + acpi_handle handle = device->handle; + unsigned long long value; + struct acpi_object_list arg_list; + union acpi_object arg; + acpi_status status = AE_OK; + + arg_list.count = 1; + arg_list.pointer = &arg; + arg.type = ACPI_TYPE_INTEGER; + arg.integer.value = MEMORY_ARG_CUR_BANDWIDTH; + status = acpi_evaluate_integer(handle, MEMORY_GET_BANDWIDTH, + &arg_list, &value); + if (ACPI_FAILURE(status)) + return -EFAULT; + + return sprintf(buf, "%llu\n", value); +} + +static int memory_set_cur_bandwidth(struct thermal_cooling_device *cdev, + unsigned int state) +{ + struct acpi_device *device = cdev->devdata; + acpi_handle handle = device->handle; + struct acpi_object_list arg_list; + union acpi_object arg; + acpi_status status; + unsigned long long temp; + unsigned long max_state; + + if (memory_get_int_max_bandwidth(cdev, &max_state)) + return -EFAULT; + + if (state > max_state) + return -EINVAL; + + arg_list.count = 1; + arg_list.pointer = &arg; + arg.type = ACPI_TYPE_INTEGER; + arg.integer.value = state; + + status = + acpi_evaluate_integer(handle, MEMORY_SET_BANDWIDTH, &arg_list, + &temp); + + printk(KERN_INFO + "Bandwidth value was %d: status is %d\n", state, status); + if (ACPI_FAILURE(status)) + return -EFAULT; + + return 0; +} + +static struct thermal_cooling_device_ops memory_cooling_ops = { + .get_max_state = memory_get_max_bandwidth, + .get_cur_state = memory_get_cur_bandwidth, + .set_cur_state = memory_set_cur_bandwidth, +}; + +/* + * Memory Device Management + */ +static int intel_menlow_memory_add(struct acpi_device *device) +{ + int result = -ENODEV; + acpi_status status = AE_OK; + acpi_handle dummy; + struct thermal_cooling_device *cdev; + + if (!device) + return -EINVAL; + + status = acpi_get_handle(device->handle, MEMORY_GET_BANDWIDTH, &dummy); + if (ACPI_FAILURE(status)) + goto end; + + status = acpi_get_handle(device->handle, MEMORY_SET_BANDWIDTH, &dummy); + if (ACPI_FAILURE(status)) + goto end; + + cdev = thermal_cooling_device_register("Memory controller", device, + &memory_cooling_ops); + if (IS_ERR(cdev)) { + result = PTR_ERR(cdev); + goto end; + } + + device->driver_data = cdev; + result = sysfs_create_link(&device->dev.kobj, + &cdev->device.kobj, "thermal_cooling"); + if (result) + goto unregister; + + result = sysfs_create_link(&cdev->device.kobj, + &device->dev.kobj, "device"); + if (result) { + sysfs_remove_link(&device->dev.kobj, "thermal_cooling"); + goto unregister; + } + + end: + return result; + + unregister: + thermal_cooling_device_unregister(cdev); + return result; + +} + +static int intel_menlow_memory_remove(struct acpi_device *device, int type) +{ + struct thermal_cooling_device *cdev = acpi_driver_data(device); + + if (!device || !cdev) + return -EINVAL; + + sysfs_remove_link(&device->dev.kobj, "thermal_cooling"); + sysfs_remove_link(&cdev->device.kobj, "device"); + thermal_cooling_device_unregister(cdev); + + return 0; +} + +static const struct acpi_device_id intel_menlow_memory_ids[] = { + {"INT0002", 0}, + {"", 0}, +}; + +static struct acpi_driver intel_menlow_memory_driver = { + .name = "intel_menlow_thermal_control", + .ids = intel_menlow_memory_ids, + .ops = { + .add = intel_menlow_memory_add, + .remove = intel_menlow_memory_remove, + }, +}; + +/* + * Sensor control on menlow platform + */ + +#define THERMAL_AUX0 0 +#define THERMAL_AUX1 1 +#define GET_AUX0 "GAX0" +#define GET_AUX1 "GAX1" +#define SET_AUX0 "SAX0" +#define SET_AUX1 "SAX1" + +struct intel_menlow_attribute { + struct device_attribute attr; + struct device *device; + acpi_handle handle; + struct list_head node; +}; + +static LIST_HEAD(intel_menlow_attr_list); +static DEFINE_MUTEX(intel_menlow_attr_lock); + +/* + * sensor_get_auxtrip - get the current auxtrip value from sensor + * @name: Thermalzone name + * @auxtype : AUX0/AUX1 + * @buf: syfs buffer + */ +static int sensor_get_auxtrip(acpi_handle handle, int index, + unsigned long long *value) +{ + acpi_status status; + + if ((index != 0 && index != 1) || !value) + return -EINVAL; + + status = acpi_evaluate_integer(handle, index ? GET_AUX1 : GET_AUX0, + NULL, value); + if (ACPI_FAILURE(status)) + return -EIO; + + return 0; +} + +/* + * sensor_set_auxtrip - set the new auxtrip value to sensor + * @name: Thermalzone name + * @auxtype : AUX0/AUX1 + * @buf: syfs buffer + */ +static int sensor_set_auxtrip(acpi_handle handle, int index, int value) +{ + acpi_status status; + union acpi_object arg = { + ACPI_TYPE_INTEGER + }; + struct acpi_object_list args = { + 1, &arg + }; + unsigned long long temp; + + if (index != 0 && index != 1) + return -EINVAL; + + status = acpi_evaluate_integer(handle, index ? GET_AUX0 : GET_AUX1, + NULL, &temp); + if (ACPI_FAILURE(status)) + return -EIO; + if ((index && value < temp) || (!index && value > temp)) + return -EINVAL; + + arg.integer.value = value; + status = acpi_evaluate_integer(handle, index ? SET_AUX1 : SET_AUX0, + &args, &temp); + if (ACPI_FAILURE(status)) + return -EIO; + + /* do we need to check the return value of SAX0/SAX1 ? */ + + return 0; +} + +#define to_intel_menlow_attr(_attr) \ + container_of(_attr, struct intel_menlow_attribute, attr) + +static ssize_t aux0_show(struct device *dev, + struct device_attribute *dev_attr, char *buf) +{ + struct intel_menlow_attribute *attr = to_intel_menlow_attr(dev_attr); + unsigned long long value; + int result; + + result = sensor_get_auxtrip(attr->handle, 0, &value); + + return result ? result : sprintf(buf, "%lu", KELVIN_TO_CELSIUS(value)); +} + +static ssize_t aux1_show(struct device *dev, + struct device_attribute *dev_attr, char *buf) +{ + struct intel_menlow_attribute *attr = to_intel_menlow_attr(dev_attr); + unsigned long long value; + int result; + + result = sensor_get_auxtrip(attr->handle, 1, &value); + + return result ? result : sprintf(buf, "%lu", KELVIN_TO_CELSIUS(value)); +} + +static ssize_t aux0_store(struct device *dev, + struct device_attribute *dev_attr, + const char *buf, size_t count) +{ + struct intel_menlow_attribute *attr = to_intel_menlow_attr(dev_attr); + int value; + int result; + + /*Sanity check; should be a positive integer */ + if (!sscanf(buf, "%d", &value)) + return -EINVAL; + + if (value < 0) + return -EINVAL; + + result = sensor_set_auxtrip(attr->handle, 0, CELSIUS_TO_KELVIN(value)); + return result ? result : count; +} + +static ssize_t aux1_store(struct device *dev, + struct device_attribute *dev_attr, + const char *buf, size_t count) +{ + struct intel_menlow_attribute *attr = to_intel_menlow_attr(dev_attr); + int value; + int result; + + /*Sanity check; should be a positive integer */ + if (!sscanf(buf, "%d", &value)) + return -EINVAL; + + if (value < 0) + return -EINVAL; + + result = sensor_set_auxtrip(attr->handle, 1, CELSIUS_TO_KELVIN(value)); + return result ? result : count; +} + +/* BIOS can enable/disable the thermal user application in dabney platform */ +#define BIOS_ENABLED "\\_TZ.GSTS" +static ssize_t bios_enabled_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + acpi_status status; + unsigned long long bios_enabled; + + status = acpi_evaluate_integer(NULL, BIOS_ENABLED, NULL, &bios_enabled); + if (ACPI_FAILURE(status)) + return -ENODEV; + + return sprintf(buf, "%s\n", bios_enabled ? "enabled" : "disabled"); +} + +static int intel_menlow_add_one_attribute(char *name, int mode, void *show, + void *store, struct device *dev, + acpi_handle handle) +{ + struct intel_menlow_attribute *attr; + int result; + + attr = kzalloc(sizeof(struct intel_menlow_attribute), GFP_KERNEL); + if (!attr) + return -ENOMEM; + + attr->attr.attr.name = name; + attr->attr.attr.mode = mode; + attr->attr.show = show; + attr->attr.store = store; + attr->device = dev; + attr->handle = handle; + + result = device_create_file(dev, &attr->attr); + if (result) + return result; + + mutex_lock(&intel_menlow_attr_lock); + list_add_tail(&attr->node, &intel_menlow_attr_list); + mutex_unlock(&intel_menlow_attr_lock); + + return 0; +} + +static acpi_status intel_menlow_register_sensor(acpi_handle handle, u32 lvl, + void *context, void **rv) +{ + acpi_status status; + acpi_handle dummy; + struct thermal_zone_device *thermal; + int result; + + result = acpi_bus_get_private_data(handle, (void **)&thermal); + if (result) + return 0; + + /* _TZ must have the AUX0/1 methods */ + status = acpi_get_handle(handle, GET_AUX0, &dummy); + if (ACPI_FAILURE(status)) + goto not_found; + + status = acpi_get_handle(handle, SET_AUX0, &dummy); + if (ACPI_FAILURE(status)) + goto not_found; + + result = intel_menlow_add_one_attribute("aux0", 0644, + aux0_show, aux0_store, + &thermal->device, handle); + if (result) + return AE_ERROR; + + status = acpi_get_handle(handle, GET_AUX1, &dummy); + if (ACPI_FAILURE(status)) + goto not_found; + + status = acpi_get_handle(handle, SET_AUX1, &dummy); + if (ACPI_FAILURE(status)) + goto not_found; + + result = intel_menlow_add_one_attribute("aux1", 0644, + aux1_show, aux1_store, + &thermal->device, handle); + if (result) + return AE_ERROR; + + /* + * create the "dabney_enabled" attribute which means the user app + * should be loaded or not + */ + + result = intel_menlow_add_one_attribute("bios_enabled", 0444, + bios_enabled_show, NULL, + &thermal->device, handle); + if (result) + return AE_ERROR; + + not_found: + if (status == AE_NOT_FOUND) + return AE_OK; + else + return status; +} + +static void intel_menlow_unregister_sensor(void) +{ + struct intel_menlow_attribute *pos, *next; + + mutex_lock(&intel_menlow_attr_lock); + list_for_each_entry_safe(pos, next, &intel_menlow_attr_list, node) { + list_del(&pos->node); + device_remove_file(pos->device, &pos->attr); + kfree(pos); + } + mutex_unlock(&intel_menlow_attr_lock); + + return; +} + +static int __init intel_menlow_module_init(void) +{ + int result = -ENODEV; + acpi_status status; + unsigned long long enable; + + if (acpi_disabled) + return result; + + /* Looking for the \_TZ.GSTS method */ + status = acpi_evaluate_integer(NULL, BIOS_ENABLED, NULL, &enable); + if (ACPI_FAILURE(status) || !enable) + return -ENODEV; + + /* Looking for ACPI device MEM0 with hardware id INT0002 */ + result = acpi_bus_register_driver(&intel_menlow_memory_driver); + if (result) + return result; + + /* Looking for sensors in each ACPI thermal zone */ + status = acpi_walk_namespace(ACPI_TYPE_THERMAL, ACPI_ROOT_OBJECT, + ACPI_UINT32_MAX, + intel_menlow_register_sensor, NULL, NULL); + if (ACPI_FAILURE(status)) + return -ENODEV; + + return 0; +} + +static void __exit intel_menlow_module_exit(void) +{ + acpi_bus_unregister_driver(&intel_menlow_memory_driver); + intel_menlow_unregister_sensor(); +} + +module_init(intel_menlow_module_init); +module_exit(intel_menlow_module_exit); diff --git a/drivers/misc/ioc4.c b/drivers/misc/ioc4.c new file mode 100644 index 0000000..6f76573 --- /dev/null +++ b/drivers/misc/ioc4.c @@ -0,0 +1,476 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2005-2006 Silicon Graphics, Inc. All Rights Reserved. + */ + +/* This file contains the master driver module for use by SGI IOC4 subdrivers. + * + * It allocates any resources shared between multiple subdevices, and + * provides accessor functions (where needed) and the like for those + * resources. It also provides a mechanism for the subdevice modules + * to support loading and unloading. + * + * Non-shared resources (e.g. external interrupt A_INT_OUT register page + * alias, serial port and UART registers) are handled by the subdevice + * modules themselves. + * + * This is all necessary because IOC4 is not implemented as a multi-function + * PCI device, but an amalgamation of disparate registers for several + * types of device (ATA, serial, external interrupts). The normal + * resource management in the kernel doesn't have quite the right interfaces + * to handle this situation (e.g. multiple modules can't claim the same + * PCI ID), thus this IOC4 master module. + */ + +#include <linux/errno.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/ioc4.h> +#include <linux/ktime.h> +#include <linux/mutex.h> +#include <linux/time.h> +#include <asm/io.h> + +/*************** + * Definitions * + ***************/ + +/* Tweakable values */ + +/* PCI bus speed detection/calibration */ +#define IOC4_CALIBRATE_COUNT 63 /* Calibration cycle period */ +#define IOC4_CALIBRATE_CYCLES 256 /* Average over this many cycles */ +#define IOC4_CALIBRATE_DISCARD 2 /* Discard first few cycles */ +#define IOC4_CALIBRATE_LOW_MHZ 25 /* Lower bound on bus speed sanity */ +#define IOC4_CALIBRATE_HIGH_MHZ 75 /* Upper bound on bus speed sanity */ +#define IOC4_CALIBRATE_DEFAULT_MHZ 66 /* Assumed if sanity check fails */ + +/************************ + * Submodule management * + ************************/ + +static DEFINE_MUTEX(ioc4_mutex); + +static LIST_HEAD(ioc4_devices); +static LIST_HEAD(ioc4_submodules); + +/* Register an IOC4 submodule */ +int +ioc4_register_submodule(struct ioc4_submodule *is) +{ + struct ioc4_driver_data *idd; + + mutex_lock(&ioc4_mutex); + list_add(&is->is_list, &ioc4_submodules); + + /* Initialize submodule for each IOC4 */ + if (!is->is_probe) + goto out; + + list_for_each_entry(idd, &ioc4_devices, idd_list) { + if (is->is_probe(idd)) { + printk(KERN_WARNING + "%s: IOC4 submodule %s probe failed " + "for pci_dev %s", + __func__, module_name(is->is_owner), + pci_name(idd->idd_pdev)); + } + } + out: + mutex_unlock(&ioc4_mutex); + return 0; +} + +/* Unregister an IOC4 submodule */ +void +ioc4_unregister_submodule(struct ioc4_submodule *is) +{ + struct ioc4_driver_data *idd; + + mutex_lock(&ioc4_mutex); + list_del(&is->is_list); + + /* Remove submodule for each IOC4 */ + if (!is->is_remove) + goto out; + + list_for_each_entry(idd, &ioc4_devices, idd_list) { + if (is->is_remove(idd)) { + printk(KERN_WARNING + "%s: IOC4 submodule %s remove failed " + "for pci_dev %s.\n", + __func__, module_name(is->is_owner), + pci_name(idd->idd_pdev)); + } + } + out: + mutex_unlock(&ioc4_mutex); +} + +/********************* + * Device management * + *********************/ + +#define IOC4_CALIBRATE_LOW_LIMIT \ + (1000*IOC4_EXTINT_COUNT_DIVISOR/IOC4_CALIBRATE_LOW_MHZ) +#define IOC4_CALIBRATE_HIGH_LIMIT \ + (1000*IOC4_EXTINT_COUNT_DIVISOR/IOC4_CALIBRATE_HIGH_MHZ) +#define IOC4_CALIBRATE_DEFAULT \ + (1000*IOC4_EXTINT_COUNT_DIVISOR/IOC4_CALIBRATE_DEFAULT_MHZ) + +#define IOC4_CALIBRATE_END \ + (IOC4_CALIBRATE_CYCLES + IOC4_CALIBRATE_DISCARD) + +#define IOC4_INT_OUT_MODE_TOGGLE 0x7 /* Toggle INT_OUT every COUNT+1 ticks */ + +/* Determines external interrupt output clock period of the PCI bus an + * IOC4 is attached to. This value can be used to determine the PCI + * bus speed. + * + * IOC4 has a design feature that various internal timers are derived from + * the PCI bus clock. This causes IOC4 device drivers to need to take the + * bus speed into account when setting various register values (e.g. INT_OUT + * register COUNT field, UART divisors, etc). Since this information is + * needed by several subdrivers, it is determined by the main IOC4 driver, + * even though the following code utilizes external interrupt registers + * to perform the speed calculation. + */ +static void +ioc4_clock_calibrate(struct ioc4_driver_data *idd) +{ + union ioc4_int_out int_out; + union ioc4_gpcr gpcr; + unsigned int state, last_state = 1; + struct timespec start_ts, end_ts; + uint64_t start, end, period; + unsigned int count = 0; + + /* Enable output */ + gpcr.raw = 0; + gpcr.fields.dir = IOC4_GPCR_DIR_0; + gpcr.fields.int_out_en = 1; + writel(gpcr.raw, &idd->idd_misc_regs->gpcr_s.raw); + + /* Reset to power-on state */ + writel(0, &idd->idd_misc_regs->int_out.raw); + mmiowb(); + + /* Set up square wave */ + int_out.raw = 0; + int_out.fields.count = IOC4_CALIBRATE_COUNT; + int_out.fields.mode = IOC4_INT_OUT_MODE_TOGGLE; + int_out.fields.diag = 0; + writel(int_out.raw, &idd->idd_misc_regs->int_out.raw); + mmiowb(); + + /* Check square wave period averaged over some number of cycles */ + do { + int_out.raw = readl(&idd->idd_misc_regs->int_out.raw); + state = int_out.fields.int_out; + if (!last_state && state) { + count++; + if (count == IOC4_CALIBRATE_END) { + ktime_get_ts(&end_ts); + break; + } else if (count == IOC4_CALIBRATE_DISCARD) + ktime_get_ts(&start_ts); + } + last_state = state; + } while (1); + + /* Calculation rearranged to preserve intermediate precision. + * Logically: + * 1. "end - start" gives us the measurement period over all + * the square wave cycles. + * 2. Divide by number of square wave cycles to get the period + * of a square wave cycle. + * 3. Divide by 2*(int_out.fields.count+1), which is the formula + * by which the IOC4 generates the square wave, to get the + * period of an IOC4 INT_OUT count. + */ + end = end_ts.tv_sec * NSEC_PER_SEC + end_ts.tv_nsec; + start = start_ts.tv_sec * NSEC_PER_SEC + start_ts.tv_nsec; + period = (end - start) / + (IOC4_CALIBRATE_CYCLES * 2 * (IOC4_CALIBRATE_COUNT + 1)); + + /* Bounds check the result. */ + if (period > IOC4_CALIBRATE_LOW_LIMIT || + period < IOC4_CALIBRATE_HIGH_LIMIT) { + printk(KERN_INFO + "IOC4 %s: Clock calibration failed. Assuming" + "PCI clock is %d ns.\n", + pci_name(idd->idd_pdev), + IOC4_CALIBRATE_DEFAULT / IOC4_EXTINT_COUNT_DIVISOR); + period = IOC4_CALIBRATE_DEFAULT; + } else { + u64 ns = period; + + do_div(ns, IOC4_EXTINT_COUNT_DIVISOR); + printk(KERN_DEBUG + "IOC4 %s: PCI clock is %llu ns.\n", + pci_name(idd->idd_pdev), (unsigned long long)ns); + } + + /* Remember results. We store the extint clock period rather + * than the PCI clock period so that greater precision is + * retained. Divide by IOC4_EXTINT_COUNT_DIVISOR to get + * PCI clock period. + */ + idd->count_period = period; +} + +/* There are three variants of IOC4 cards: IO9, IO10, and PCI-RT. + * Each brings out different combinations of IOC4 signals, thus. + * the IOC4 subdrivers need to know to which we're attached. + * + * We look for the presence of a SCSI (IO9) or SATA (IO10) controller + * on the same PCI bus at slot number 3 to differentiate IO9 from IO10. + * If neither is present, it's a PCI-RT. + */ +static unsigned int +ioc4_variant(struct ioc4_driver_data *idd) +{ + struct pci_dev *pdev = NULL; + int found = 0; + + /* IO9: Look for a QLogic ISP 12160 at the same bus and slot 3. */ + do { + pdev = pci_get_device(PCI_VENDOR_ID_QLOGIC, + PCI_DEVICE_ID_QLOGIC_ISP12160, pdev); + if (pdev && + idd->idd_pdev->bus->number == pdev->bus->number && + 3 == PCI_SLOT(pdev->devfn)) + found = 1; + } while (pdev && !found); + if (NULL != pdev) { + pci_dev_put(pdev); + return IOC4_VARIANT_IO9; + } + + /* IO10: Look for a Vitesse VSC 7174 at the same bus and slot 3. */ + pdev = NULL; + do { + pdev = pci_get_device(PCI_VENDOR_ID_VITESSE, + PCI_DEVICE_ID_VITESSE_VSC7174, pdev); + if (pdev && + idd->idd_pdev->bus->number == pdev->bus->number && + 3 == PCI_SLOT(pdev->devfn)) + found = 1; + } while (pdev && !found); + if (NULL != pdev) { + pci_dev_put(pdev); + return IOC4_VARIANT_IO10; + } + + /* PCI-RT: No SCSI/SATA controller will be present */ + return IOC4_VARIANT_PCI_RT; +} + +/* Adds a new instance of an IOC4 card */ +static int +ioc4_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id) +{ + struct ioc4_driver_data *idd; + struct ioc4_submodule *is; + uint32_t pcmd; + int ret; + + /* Enable IOC4 and take ownership of it */ + if ((ret = pci_enable_device(pdev))) { + printk(KERN_WARNING + "%s: Failed to enable IOC4 device for pci_dev %s.\n", + __func__, pci_name(pdev)); + goto out; + } + pci_set_master(pdev); + + /* Set up per-IOC4 data */ + idd = kmalloc(sizeof(struct ioc4_driver_data), GFP_KERNEL); + if (!idd) { + printk(KERN_WARNING + "%s: Failed to allocate IOC4 data for pci_dev %s.\n", + __func__, pci_name(pdev)); + ret = -ENODEV; + goto out_idd; + } + idd->idd_pdev = pdev; + idd->idd_pci_id = pci_id; + + /* Map IOC4 misc registers. These are shared between subdevices + * so the main IOC4 module manages them. + */ + idd->idd_bar0 = pci_resource_start(idd->idd_pdev, 0); + if (!idd->idd_bar0) { + printk(KERN_WARNING + "%s: Unable to find IOC4 misc resource " + "for pci_dev %s.\n", + __func__, pci_name(idd->idd_pdev)); + ret = -ENODEV; + goto out_pci; + } + if (!request_mem_region(idd->idd_bar0, sizeof(struct ioc4_misc_regs), + "ioc4_misc")) { + printk(KERN_WARNING + "%s: Unable to request IOC4 misc region " + "for pci_dev %s.\n", + __func__, pci_name(idd->idd_pdev)); + ret = -ENODEV; + goto out_pci; + } + idd->idd_misc_regs = ioremap(idd->idd_bar0, + sizeof(struct ioc4_misc_regs)); + if (!idd->idd_misc_regs) { + printk(KERN_WARNING + "%s: Unable to remap IOC4 misc region " + "for pci_dev %s.\n", + __func__, pci_name(idd->idd_pdev)); + ret = -ENODEV; + goto out_misc_region; + } + + /* Failsafe portion of per-IOC4 initialization */ + + /* Detect card variant */ + idd->idd_variant = ioc4_variant(idd); + printk(KERN_INFO "IOC4 %s: %s card detected.\n", pci_name(pdev), + idd->idd_variant == IOC4_VARIANT_IO9 ? "IO9" : + idd->idd_variant == IOC4_VARIANT_PCI_RT ? "PCI-RT" : + idd->idd_variant == IOC4_VARIANT_IO10 ? "IO10" : "unknown"); + + /* Initialize IOC4 */ + pci_read_config_dword(idd->idd_pdev, PCI_COMMAND, &pcmd); + pci_write_config_dword(idd->idd_pdev, PCI_COMMAND, + pcmd | PCI_COMMAND_PARITY | PCI_COMMAND_SERR); + + /* Determine PCI clock */ + ioc4_clock_calibrate(idd); + + /* Disable/clear all interrupts. Need to do this here lest + * one submodule request the shared IOC4 IRQ, but interrupt + * is generated by a different subdevice. + */ + /* Disable */ + writel(~0, &idd->idd_misc_regs->other_iec.raw); + writel(~0, &idd->idd_misc_regs->sio_iec); + /* Clear (i.e. acknowledge) */ + writel(~0, &idd->idd_misc_regs->other_ir.raw); + writel(~0, &idd->idd_misc_regs->sio_ir); + + /* Track PCI-device specific data */ + idd->idd_serial_data = NULL; + pci_set_drvdata(idd->idd_pdev, idd); + + mutex_lock(&ioc4_mutex); + list_add_tail(&idd->idd_list, &ioc4_devices); + + /* Add this IOC4 to all submodules */ + list_for_each_entry(is, &ioc4_submodules, is_list) { + if (is->is_probe && is->is_probe(idd)) { + printk(KERN_WARNING + "%s: IOC4 submodule 0x%s probe failed " + "for pci_dev %s.\n", + __func__, module_name(is->is_owner), + pci_name(idd->idd_pdev)); + } + } + mutex_unlock(&ioc4_mutex); + + return 0; + +out_misc_region: + release_mem_region(idd->idd_bar0, sizeof(struct ioc4_misc_regs)); +out_pci: + kfree(idd); +out_idd: + pci_disable_device(pdev); +out: + return ret; +} + +/* Removes a particular instance of an IOC4 card. */ +static void +ioc4_remove(struct pci_dev *pdev) +{ + struct ioc4_submodule *is; + struct ioc4_driver_data *idd; + + idd = pci_get_drvdata(pdev); + + /* Remove this IOC4 from all submodules */ + mutex_lock(&ioc4_mutex); + list_for_each_entry(is, &ioc4_submodules, is_list) { + if (is->is_remove && is->is_remove(idd)) { + printk(KERN_WARNING + "%s: IOC4 submodule 0x%s remove failed " + "for pci_dev %s.\n", + __func__, module_name(is->is_owner), + pci_name(idd->idd_pdev)); + } + } + mutex_unlock(&ioc4_mutex); + + /* Release resources */ + iounmap(idd->idd_misc_regs); + if (!idd->idd_bar0) { + printk(KERN_WARNING + "%s: Unable to get IOC4 misc mapping for pci_dev %s. " + "Device removal may be incomplete.\n", + __func__, pci_name(idd->idd_pdev)); + } + release_mem_region(idd->idd_bar0, sizeof(struct ioc4_misc_regs)); + + /* Disable IOC4 and relinquish */ + pci_disable_device(pdev); + + /* Remove and free driver data */ + mutex_lock(&ioc4_mutex); + list_del(&idd->idd_list); + mutex_unlock(&ioc4_mutex); + kfree(idd); +} + +static struct pci_device_id ioc4_id_table[] = { + {PCI_VENDOR_ID_SGI, PCI_DEVICE_ID_SGI_IOC4, PCI_ANY_ID, + PCI_ANY_ID, 0x0b4000, 0xFFFFFF}, + {0} +}; + +static struct pci_driver ioc4_driver = { + .name = "IOC4", + .id_table = ioc4_id_table, + .probe = ioc4_probe, + .remove = ioc4_remove, +}; + +MODULE_DEVICE_TABLE(pci, ioc4_id_table); + +/********************* + * Module management * + *********************/ + +/* Module load */ +static int __devinit +ioc4_init(void) +{ + return pci_register_driver(&ioc4_driver); +} + +/* Module unload */ +static void __devexit +ioc4_exit(void) +{ + pci_unregister_driver(&ioc4_driver); +} + +module_init(ioc4_init); +module_exit(ioc4_exit); + +MODULE_AUTHOR("Brent Casavant - Silicon Graphics, Inc. <bcasavan@sgi.com>"); +MODULE_DESCRIPTION("PCI driver master module for SGI IOC4 Base-IO Card"); +MODULE_LICENSE("GPL"); + +EXPORT_SYMBOL(ioc4_register_submodule); +EXPORT_SYMBOL(ioc4_unregister_submodule); diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c new file mode 100644 index 0000000..e4ff50b --- /dev/null +++ b/drivers/misc/kgdbts.c @@ -0,0 +1,1120 @@ +/* + * kgdbts is a test suite for kgdb for the sole purpose of validating + * that key pieces of the kgdb internals are working properly such as + * HW/SW breakpoints, single stepping, and NMI. + * + * Created by: Jason Wessel <jason.wessel@windriver.com> + * + * Copyright (c) 2008 Wind River Systems, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * See the GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ +/* Information about the kgdb test suite. + * ------------------------------------- + * + * The kgdb test suite is designed as a KGDB I/O module which + * simulates the communications that a debugger would have with kgdb. + * The tests are broken up in to a line by line and referenced here as + * a "get" which is kgdb requesting input and "put" which is kgdb + * sending a response. + * + * The kgdb suite can be invoked from the kernel command line + * arguments system or executed dynamically at run time. The test + * suite uses the variable "kgdbts" to obtain the information about + * which tests to run and to configure the verbosity level. The + * following are the various characters you can use with the kgdbts= + * line: + * + * When using the "kgdbts=" you only choose one of the following core + * test types: + * A = Run all the core tests silently + * V1 = Run all the core tests with minimal output + * V2 = Run all the core tests in debug mode + * + * You can also specify optional tests: + * N## = Go to sleep with interrupts of for ## seconds + * to test the HW NMI watchdog + * F## = Break at do_fork for ## iterations + * S## = Break at sys_open for ## iterations + * I## = Run the single step test ## iterations + * + * NOTE: that the do_fork and sys_open tests are mutually exclusive. + * + * To invoke the kgdb test suite from boot you use a kernel start + * argument as follows: + * kgdbts=V1 kgdbwait + * Or if you wanted to perform the NMI test for 6 seconds and do_fork + * test for 100 forks, you could use: + * kgdbts=V1N6F100 kgdbwait + * + * The test suite can also be invoked at run time with: + * echo kgdbts=V1N6F100 > /sys/module/kgdbts/parameters/kgdbts + * Or as another example: + * echo kgdbts=V2 > /sys/module/kgdbts/parameters/kgdbts + * + * When developing a new kgdb arch specific implementation or + * using these tests for the purpose of regression testing, + * several invocations are required. + * + * 1) Boot with the test suite enabled by using the kernel arguments + * "kgdbts=V1F100 kgdbwait" + * ## If kgdb arch specific implementation has NMI use + * "kgdbts=V1N6F100 + * + * 2) After the system boot run the basic test. + * echo kgdbts=V1 > /sys/module/kgdbts/parameters/kgdbts + * + * 3) Run the concurrency tests. It is best to use n+1 + * while loops where n is the number of cpus you have + * in your system. The example below uses only two + * loops. + * + * ## This tests break points on sys_open + * while [ 1 ] ; do find / > /dev/null 2>&1 ; done & + * while [ 1 ] ; do find / > /dev/null 2>&1 ; done & + * echo kgdbts=V1S10000 > /sys/module/kgdbts/parameters/kgdbts + * fg # and hit control-c + * fg # and hit control-c + * ## This tests break points on do_fork + * while [ 1 ] ; do date > /dev/null ; done & + * while [ 1 ] ; do date > /dev/null ; done & + * echo kgdbts=V1F1000 > /sys/module/kgdbts/parameters/kgdbts + * fg # and hit control-c + * + */ + +#include <linux/kernel.h> +#include <linux/kgdb.h> +#include <linux/ctype.h> +#include <linux/uaccess.h> +#include <linux/syscalls.h> +#include <linux/nmi.h> +#include <linux/delay.h> +#include <linux/kthread.h> + +#define v1printk(a...) do { \ + if (verbose) \ + printk(KERN_INFO a); \ + } while (0) +#define v2printk(a...) do { \ + if (verbose > 1) \ + printk(KERN_INFO a); \ + touch_nmi_watchdog(); \ + } while (0) +#define eprintk(a...) do { \ + printk(KERN_ERR a); \ + WARN_ON(1); \ + } while (0) +#define MAX_CONFIG_LEN 40 + +static struct kgdb_io kgdbts_io_ops; +static char get_buf[BUFMAX]; +static int get_buf_cnt; +static char put_buf[BUFMAX]; +static int put_buf_cnt; +static char scratch_buf[BUFMAX]; +static int verbose; +static int repeat_test; +static int test_complete; +static int send_ack; +static int final_ack; +static int force_hwbrks; +static int hwbreaks_ok; +static int hw_break_val; +static int hw_break_val2; +#if defined(CONFIG_ARM) || defined(CONFIG_MIPS) || defined(CONFIG_SPARC) +static int arch_needs_sstep_emulation = 1; +#else +static int arch_needs_sstep_emulation; +#endif +static unsigned long sstep_addr; +static int sstep_state; + +/* Storage for the registers, in GDB format. */ +static unsigned long kgdbts_gdb_regs[(NUMREGBYTES + + sizeof(unsigned long) - 1) / + sizeof(unsigned long)]; +static struct pt_regs kgdbts_regs; + +/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */ +static int configured = -1; + +#ifdef CONFIG_KGDB_TESTS_BOOT_STRING +static char config[MAX_CONFIG_LEN] = CONFIG_KGDB_TESTS_BOOT_STRING; +#else +static char config[MAX_CONFIG_LEN]; +#endif +static struct kparam_string kps = { + .string = config, + .maxlen = MAX_CONFIG_LEN, +}; + +static void fill_get_buf(char *buf); + +struct test_struct { + char *get; + char *put; + void (*get_handler)(char *); + int (*put_handler)(char *, char *); +}; + +struct test_state { + char *name; + struct test_struct *tst; + int idx; + int (*run_test) (int, int); + int (*validate_put) (char *); +}; + +static struct test_state ts; + +static int kgdbts_unreg_thread(void *ptr) +{ + /* Wait until the tests are complete and then ungresiter the I/O + * driver. + */ + while (!final_ack) + msleep_interruptible(1500); + + if (configured) + kgdb_unregister_io_module(&kgdbts_io_ops); + configured = 0; + + return 0; +} + +/* This is noinline such that it can be used for a single location to + * place a breakpoint + */ +static noinline void kgdbts_break_test(void) +{ + v2printk("kgdbts: breakpoint complete\n"); +} + +/* Lookup symbol info in the kernel */ +static unsigned long lookup_addr(char *arg) +{ + unsigned long addr = 0; + + if (!strcmp(arg, "kgdbts_break_test")) + addr = (unsigned long)kgdbts_break_test; + else if (!strcmp(arg, "sys_open")) + addr = (unsigned long)sys_open; + else if (!strcmp(arg, "do_fork")) + addr = (unsigned long)do_fork; + else if (!strcmp(arg, "hw_break_val")) + addr = (unsigned long)&hw_break_val; + return addr; +} + +static void break_helper(char *bp_type, char *arg, unsigned long vaddr) +{ + unsigned long addr; + + if (arg) + addr = lookup_addr(arg); + else + addr = vaddr; + + sprintf(scratch_buf, "%s,%lx,%i", bp_type, addr, + BREAK_INSTR_SIZE); + fill_get_buf(scratch_buf); +} + +static void sw_break(char *arg) +{ + break_helper(force_hwbrks ? "Z1" : "Z0", arg, 0); +} + +static void sw_rem_break(char *arg) +{ + break_helper(force_hwbrks ? "z1" : "z0", arg, 0); +} + +static void hw_break(char *arg) +{ + break_helper("Z1", arg, 0); +} + +static void hw_rem_break(char *arg) +{ + break_helper("z1", arg, 0); +} + +static void hw_write_break(char *arg) +{ + break_helper("Z2", arg, 0); +} + +static void hw_rem_write_break(char *arg) +{ + break_helper("z2", arg, 0); +} + +static void hw_access_break(char *arg) +{ + break_helper("Z4", arg, 0); +} + +static void hw_rem_access_break(char *arg) +{ + break_helper("z4", arg, 0); +} + +static void hw_break_val_access(void) +{ + hw_break_val2 = hw_break_val; +} + +static void hw_break_val_write(void) +{ + hw_break_val++; +} + +static int check_and_rewind_pc(char *put_str, char *arg) +{ + unsigned long addr = lookup_addr(arg); + int offset = 0; + + kgdb_hex2mem(&put_str[1], (char *)kgdbts_gdb_regs, + NUMREGBYTES); + gdb_regs_to_pt_regs(kgdbts_gdb_regs, &kgdbts_regs); + v2printk("Stopped at IP: %lx\n", instruction_pointer(&kgdbts_regs)); +#ifdef CONFIG_X86 + /* On x86 a breakpoint stop requires it to be decremented */ + if (addr + 1 == kgdbts_regs.ip) + offset = -1; +#endif + if (strcmp(arg, "silent") && + instruction_pointer(&kgdbts_regs) + offset != addr) { + eprintk("kgdbts: BP mismatch %lx expected %lx\n", + instruction_pointer(&kgdbts_regs) + offset, addr); + return 1; + } +#ifdef CONFIG_X86 + /* On x86 adjust the instruction pointer if needed */ + kgdbts_regs.ip += offset; +#endif + return 0; +} + +static int check_single_step(char *put_str, char *arg) +{ + unsigned long addr = lookup_addr(arg); + /* + * From an arch indepent point of view the instruction pointer + * should be on a different instruction + */ + kgdb_hex2mem(&put_str[1], (char *)kgdbts_gdb_regs, + NUMREGBYTES); + gdb_regs_to_pt_regs(kgdbts_gdb_regs, &kgdbts_regs); + v2printk("Singlestep stopped at IP: %lx\n", + instruction_pointer(&kgdbts_regs)); + if (instruction_pointer(&kgdbts_regs) == addr) { + eprintk("kgdbts: SingleStep failed at %lx\n", + instruction_pointer(&kgdbts_regs)); + return 1; + } + + return 0; +} + +static void write_regs(char *arg) +{ + memset(scratch_buf, 0, sizeof(scratch_buf)); + scratch_buf[0] = 'G'; + pt_regs_to_gdb_regs(kgdbts_gdb_regs, &kgdbts_regs); + kgdb_mem2hex((char *)kgdbts_gdb_regs, &scratch_buf[1], NUMREGBYTES); + fill_get_buf(scratch_buf); +} + +static void skip_back_repeat_test(char *arg) +{ + int go_back = simple_strtol(arg, NULL, 10); + + repeat_test--; + if (repeat_test <= 0) + ts.idx++; + else + ts.idx -= go_back; + fill_get_buf(ts.tst[ts.idx].get); +} + +static int got_break(char *put_str, char *arg) +{ + test_complete = 1; + if (!strncmp(put_str+1, arg, 2)) { + if (!strncmp(arg, "T0", 2)) + test_complete = 2; + return 0; + } + return 1; +} + +static void emul_sstep_get(char *arg) +{ + if (!arch_needs_sstep_emulation) { + fill_get_buf(arg); + return; + } + switch (sstep_state) { + case 0: + v2printk("Emulate single step\n"); + /* Start by looking at the current PC */ + fill_get_buf("g"); + break; + case 1: + /* set breakpoint */ + break_helper("Z0", NULL, sstep_addr); + break; + case 2: + /* Continue */ + fill_get_buf("c"); + break; + case 3: + /* Clear breakpoint */ + break_helper("z0", NULL, sstep_addr); + break; + default: + eprintk("kgdbts: ERROR failed sstep get emulation\n"); + } + sstep_state++; +} + +static int emul_sstep_put(char *put_str, char *arg) +{ + if (!arch_needs_sstep_emulation) { + if (!strncmp(put_str+1, arg, 2)) + return 0; + return 1; + } + switch (sstep_state) { + case 1: + /* validate the "g" packet to get the IP */ + kgdb_hex2mem(&put_str[1], (char *)kgdbts_gdb_regs, + NUMREGBYTES); + gdb_regs_to_pt_regs(kgdbts_gdb_regs, &kgdbts_regs); + v2printk("Stopped at IP: %lx\n", + instruction_pointer(&kgdbts_regs)); + /* Want to stop at IP + break instruction size by default */ + sstep_addr = instruction_pointer(&kgdbts_regs) + + BREAK_INSTR_SIZE; + break; + case 2: + if (strncmp(put_str, "$OK", 3)) { + eprintk("kgdbts: failed sstep break set\n"); + return 1; + } + break; + case 3: + if (strncmp(put_str, "$T0", 3)) { + eprintk("kgdbts: failed continue sstep\n"); + return 1; + } + break; + case 4: + if (strncmp(put_str, "$OK", 3)) { + eprintk("kgdbts: failed sstep break unset\n"); + return 1; + } + /* Single step is complete so continue on! */ + sstep_state = 0; + return 0; + default: + eprintk("kgdbts: ERROR failed sstep put emulation\n"); + } + + /* Continue on the same test line until emulation is complete */ + ts.idx--; + return 0; +} + +static int final_ack_set(char *put_str, char *arg) +{ + if (strncmp(put_str+1, arg, 2)) + return 1; + final_ack = 1; + return 0; +} +/* + * Test to plant a breakpoint and detach, which should clear out the + * breakpoint and restore the original instruction. + */ +static struct test_struct plant_and_detach_test[] = { + { "?", "S0*" }, /* Clear break points */ + { "kgdbts_break_test", "OK", sw_break, }, /* set sw breakpoint */ + { "D", "OK" }, /* Detach */ + { "", "" }, +}; + +/* + * Simple test to write in a software breakpoint, check for the + * correct stop location and detach. + */ +static struct test_struct sw_breakpoint_test[] = { + { "?", "S0*" }, /* Clear break points */ + { "kgdbts_break_test", "OK", sw_break, }, /* set sw breakpoint */ + { "c", "T0*", }, /* Continue */ + { "g", "kgdbts_break_test", NULL, check_and_rewind_pc }, + { "write", "OK", write_regs }, + { "kgdbts_break_test", "OK", sw_rem_break }, /*remove breakpoint */ + { "D", "OK" }, /* Detach */ + { "D", "OK", NULL, got_break }, /* On success we made it here */ + { "", "" }, +}; + +/* + * Test a known bad memory read location to test the fault handler and + * read bytes 1-8 at the bad address + */ +static struct test_struct bad_read_test[] = { + { "?", "S0*" }, /* Clear break points */ + { "m0,1", "E*" }, /* read 1 byte at address 1 */ + { "m0,2", "E*" }, /* read 1 byte at address 2 */ + { "m0,3", "E*" }, /* read 1 byte at address 3 */ + { "m0,4", "E*" }, /* read 1 byte at address 4 */ + { "m0,5", "E*" }, /* read 1 byte at address 5 */ + { "m0,6", "E*" }, /* read 1 byte at address 6 */ + { "m0,7", "E*" }, /* read 1 byte at address 7 */ + { "m0,8", "E*" }, /* read 1 byte at address 8 */ + { "D", "OK" }, /* Detach which removes all breakpoints and continues */ + { "", "" }, +}; + +/* + * Test for hitting a breakpoint, remove it, single step, plant it + * again and detach. + */ +static struct test_struct singlestep_break_test[] = { + { "?", "S0*" }, /* Clear break points */ + { "kgdbts_break_test", "OK", sw_break, }, /* set sw breakpoint */ + { "c", "T0*", }, /* Continue */ + { "g", "kgdbts_break_test", NULL, check_and_rewind_pc }, + { "write", "OK", write_regs }, /* Write registers */ + { "kgdbts_break_test", "OK", sw_rem_break }, /*remove breakpoint */ + { "s", "T0*", emul_sstep_get, emul_sstep_put }, /* Single step */ + { "g", "kgdbts_break_test", NULL, check_single_step }, + { "kgdbts_break_test", "OK", sw_break, }, /* set sw breakpoint */ + { "c", "T0*", }, /* Continue */ + { "g", "kgdbts_break_test", NULL, check_and_rewind_pc }, + { "write", "OK", write_regs }, /* Write registers */ + { "D", "OK" }, /* Remove all breakpoints and continues */ + { "", "" }, +}; + +/* + * Test for hitting a breakpoint at do_fork for what ever the number + * of iterations required by the variable repeat_test. + */ +static struct test_struct do_fork_test[] = { + { "?", "S0*" }, /* Clear break points */ + { "do_fork", "OK", sw_break, }, /* set sw breakpoint */ + { "c", "T0*", }, /* Continue */ + { "g", "do_fork", NULL, check_and_rewind_pc }, /* check location */ + { "write", "OK", write_regs }, /* Write registers */ + { "do_fork", "OK", sw_rem_break }, /*remove breakpoint */ + { "s", "T0*", emul_sstep_get, emul_sstep_put }, /* Single step */ + { "g", "do_fork", NULL, check_single_step }, + { "do_fork", "OK", sw_break, }, /* set sw breakpoint */ + { "7", "T0*", skip_back_repeat_test }, /* Loop based on repeat_test */ + { "D", "OK", NULL, final_ack_set }, /* detach and unregister I/O */ + { "", "" }, +}; + +/* Test for hitting a breakpoint at sys_open for what ever the number + * of iterations required by the variable repeat_test. + */ +static struct test_struct sys_open_test[] = { + { "?", "S0*" }, /* Clear break points */ + { "sys_open", "OK", sw_break, }, /* set sw breakpoint */ + { "c", "T0*", }, /* Continue */ + { "g", "sys_open", NULL, check_and_rewind_pc }, /* check location */ + { "write", "OK", write_regs }, /* Write registers */ + { "sys_open", "OK", sw_rem_break }, /*remove breakpoint */ + { "s", "T0*", emul_sstep_get, emul_sstep_put }, /* Single step */ + { "g", "sys_open", NULL, check_single_step }, + { "sys_open", "OK", sw_break, }, /* set sw breakpoint */ + { "7", "T0*", skip_back_repeat_test }, /* Loop based on repeat_test */ + { "D", "OK", NULL, final_ack_set }, /* detach and unregister I/O */ + { "", "" }, +}; + +/* + * Test for hitting a simple hw breakpoint + */ +static struct test_struct hw_breakpoint_test[] = { + { "?", "S0*" }, /* Clear break points */ + { "kgdbts_break_test", "OK", hw_break, }, /* set hw breakpoint */ + { "c", "T0*", }, /* Continue */ + { "g", "kgdbts_break_test", NULL, check_and_rewind_pc }, + { "write", "OK", write_regs }, + { "kgdbts_break_test", "OK", hw_rem_break }, /*remove breakpoint */ + { "D", "OK" }, /* Detach */ + { "D", "OK", NULL, got_break }, /* On success we made it here */ + { "", "" }, +}; + +/* + * Test for hitting a hw write breakpoint + */ +static struct test_struct hw_write_break_test[] = { + { "?", "S0*" }, /* Clear break points */ + { "hw_break_val", "OK", hw_write_break, }, /* set hw breakpoint */ + { "c", "T0*", NULL, got_break }, /* Continue */ + { "g", "silent", NULL, check_and_rewind_pc }, + { "write", "OK", write_regs }, + { "hw_break_val", "OK", hw_rem_write_break }, /*remove breakpoint */ + { "D", "OK" }, /* Detach */ + { "D", "OK", NULL, got_break }, /* On success we made it here */ + { "", "" }, +}; + +/* + * Test for hitting a hw access breakpoint + */ +static struct test_struct hw_access_break_test[] = { + { "?", "S0*" }, /* Clear break points */ + { "hw_break_val", "OK", hw_access_break, }, /* set hw breakpoint */ + { "c", "T0*", NULL, got_break }, /* Continue */ + { "g", "silent", NULL, check_and_rewind_pc }, + { "write", "OK", write_regs }, + { "hw_break_val", "OK", hw_rem_access_break }, /*remove breakpoint */ + { "D", "OK" }, /* Detach */ + { "D", "OK", NULL, got_break }, /* On success we made it here */ + { "", "" }, +}; + +/* + * Test for hitting a hw access breakpoint + */ +static struct test_struct nmi_sleep_test[] = { + { "?", "S0*" }, /* Clear break points */ + { "c", "T0*", NULL, got_break }, /* Continue */ + { "D", "OK" }, /* Detach */ + { "D", "OK", NULL, got_break }, /* On success we made it here */ + { "", "" }, +}; + +static void fill_get_buf(char *buf) +{ + unsigned char checksum = 0; + int count = 0; + char ch; + + strcpy(get_buf, "$"); + strcat(get_buf, buf); + while ((ch = buf[count])) { + checksum += ch; + count++; + } + strcat(get_buf, "#"); + get_buf[count + 2] = hex_asc_hi(checksum); + get_buf[count + 3] = hex_asc_lo(checksum); + get_buf[count + 4] = '\0'; + v2printk("get%i: %s\n", ts.idx, get_buf); +} + +static int validate_simple_test(char *put_str) +{ + char *chk_str; + + if (ts.tst[ts.idx].put_handler) + return ts.tst[ts.idx].put_handler(put_str, + ts.tst[ts.idx].put); + + chk_str = ts.tst[ts.idx].put; + if (*put_str == '$') + put_str++; + + while (*chk_str != '\0' && *put_str != '\0') { + /* If someone does a * to match the rest of the string, allow + * it, or stop if the recieved string is complete. + */ + if (*put_str == '#' || *chk_str == '*') + return 0; + if (*put_str != *chk_str) + return 1; + + chk_str++; + put_str++; + } + if (*chk_str == '\0' && (*put_str == '\0' || *put_str == '#')) + return 0; + + return 1; +} + +static int run_simple_test(int is_get_char, int chr) +{ + int ret = 0; + if (is_get_char) { + /* Send an ACK on the get if a prior put completed and set the + * send ack variable + */ + if (send_ack) { + send_ack = 0; + return '+'; + } + /* On the first get char, fill the transmit buffer and then + * take from the get_string. + */ + if (get_buf_cnt == 0) { + if (ts.tst[ts.idx].get_handler) + ts.tst[ts.idx].get_handler(ts.tst[ts.idx].get); + else + fill_get_buf(ts.tst[ts.idx].get); + } + + if (get_buf[get_buf_cnt] == '\0') { + eprintk("kgdbts: ERROR GET: EOB on '%s' at %i\n", + ts.name, ts.idx); + get_buf_cnt = 0; + fill_get_buf("D"); + } + ret = get_buf[get_buf_cnt]; + get_buf_cnt++; + return ret; + } + + /* This callback is a put char which is when kgdb sends data to + * this I/O module. + */ + if (ts.tst[ts.idx].get[0] == '\0' && + ts.tst[ts.idx].put[0] == '\0') { + eprintk("kgdbts: ERROR: beyond end of test on" + " '%s' line %i\n", ts.name, ts.idx); + return 0; + } + + if (put_buf_cnt >= BUFMAX) { + eprintk("kgdbts: ERROR: put buffer overflow on" + " '%s' line %i\n", ts.name, ts.idx); + put_buf_cnt = 0; + return 0; + } + /* Ignore everything until the first valid packet start '$' */ + if (put_buf_cnt == 0 && chr != '$') + return 0; + + put_buf[put_buf_cnt] = chr; + put_buf_cnt++; + + /* End of packet == #XX so look for the '#' */ + if (put_buf_cnt > 3 && put_buf[put_buf_cnt - 3] == '#') { + put_buf[put_buf_cnt] = '\0'; + v2printk("put%i: %s\n", ts.idx, put_buf); + /* Trigger check here */ + if (ts.validate_put && ts.validate_put(put_buf)) { + eprintk("kgdbts: ERROR PUT: end of test " + "buffer on '%s' line %i expected %s got %s\n", + ts.name, ts.idx, ts.tst[ts.idx].put, put_buf); + } + ts.idx++; + put_buf_cnt = 0; + get_buf_cnt = 0; + send_ack = 1; + } + return 0; +} + +static void init_simple_test(void) +{ + memset(&ts, 0, sizeof(ts)); + ts.run_test = run_simple_test; + ts.validate_put = validate_simple_test; +} + +static void run_plant_and_detach_test(int is_early) +{ + char before[BREAK_INSTR_SIZE]; + char after[BREAK_INSTR_SIZE]; + + probe_kernel_read(before, (char *)kgdbts_break_test, + BREAK_INSTR_SIZE); + init_simple_test(); + ts.tst = plant_and_detach_test; + ts.name = "plant_and_detach_test"; + /* Activate test with initial breakpoint */ + if (!is_early) + kgdb_breakpoint(); + probe_kernel_read(after, (char *)kgdbts_break_test, + BREAK_INSTR_SIZE); + if (memcmp(before, after, BREAK_INSTR_SIZE)) { + printk(KERN_CRIT "kgdbts: ERROR kgdb corrupted memory\n"); + panic("kgdb memory corruption"); + } + + /* complete the detach test */ + if (!is_early) + kgdbts_break_test(); +} + +static void run_breakpoint_test(int is_hw_breakpoint) +{ + test_complete = 0; + init_simple_test(); + if (is_hw_breakpoint) { + ts.tst = hw_breakpoint_test; + ts.name = "hw_breakpoint_test"; + } else { + ts.tst = sw_breakpoint_test; + ts.name = "sw_breakpoint_test"; + } + /* Activate test with initial breakpoint */ + kgdb_breakpoint(); + /* run code with the break point in it */ + kgdbts_break_test(); + kgdb_breakpoint(); + + if (test_complete) + return; + + eprintk("kgdbts: ERROR %s test failed\n", ts.name); + if (is_hw_breakpoint) + hwbreaks_ok = 0; +} + +static void run_hw_break_test(int is_write_test) +{ + test_complete = 0; + init_simple_test(); + if (is_write_test) { + ts.tst = hw_write_break_test; + ts.name = "hw_write_break_test"; + } else { + ts.tst = hw_access_break_test; + ts.name = "hw_access_break_test"; + } + /* Activate test with initial breakpoint */ + kgdb_breakpoint(); + hw_break_val_access(); + if (is_write_test) { + if (test_complete == 2) { + eprintk("kgdbts: ERROR %s broke on access\n", + ts.name); + hwbreaks_ok = 0; + } + hw_break_val_write(); + } + kgdb_breakpoint(); + + if (test_complete == 1) + return; + + eprintk("kgdbts: ERROR %s test failed\n", ts.name); + hwbreaks_ok = 0; +} + +static void run_nmi_sleep_test(int nmi_sleep) +{ + unsigned long flags; + + init_simple_test(); + ts.tst = nmi_sleep_test; + ts.name = "nmi_sleep_test"; + /* Activate test with initial breakpoint */ + kgdb_breakpoint(); + local_irq_save(flags); + mdelay(nmi_sleep*1000); + touch_nmi_watchdog(); + local_irq_restore(flags); + if (test_complete != 2) + eprintk("kgdbts: ERROR nmi_test did not hit nmi\n"); + kgdb_breakpoint(); + if (test_complete == 1) + return; + + eprintk("kgdbts: ERROR %s test failed\n", ts.name); +} + +static void run_bad_read_test(void) +{ + init_simple_test(); + ts.tst = bad_read_test; + ts.name = "bad_read_test"; + /* Activate test with initial breakpoint */ + kgdb_breakpoint(); +} + +static void run_do_fork_test(void) +{ + init_simple_test(); + ts.tst = do_fork_test; + ts.name = "do_fork_test"; + /* Activate test with initial breakpoint */ + kgdb_breakpoint(); +} + +static void run_sys_open_test(void) +{ + init_simple_test(); + ts.tst = sys_open_test; + ts.name = "sys_open_test"; + /* Activate test with initial breakpoint */ + kgdb_breakpoint(); +} + +static void run_singlestep_break_test(void) +{ + init_simple_test(); + ts.tst = singlestep_break_test; + ts.name = "singlestep_breakpoint_test"; + /* Activate test with initial breakpoint */ + kgdb_breakpoint(); + kgdbts_break_test(); + kgdbts_break_test(); +} + +static void kgdbts_run_tests(void) +{ + char *ptr; + int fork_test = 0; + int do_sys_open_test = 0; + int sstep_test = 1000; + int nmi_sleep = 0; + int i; + + ptr = strstr(config, "F"); + if (ptr) + fork_test = simple_strtol(ptr + 1, NULL, 10); + ptr = strstr(config, "S"); + if (ptr) + do_sys_open_test = simple_strtol(ptr + 1, NULL, 10); + ptr = strstr(config, "N"); + if (ptr) + nmi_sleep = simple_strtol(ptr+1, NULL, 10); + ptr = strstr(config, "I"); + if (ptr) + sstep_test = simple_strtol(ptr+1, NULL, 10); + + /* required internal KGDB tests */ + v1printk("kgdbts:RUN plant and detach test\n"); + run_plant_and_detach_test(0); + v1printk("kgdbts:RUN sw breakpoint test\n"); + run_breakpoint_test(0); + v1printk("kgdbts:RUN bad memory access test\n"); + run_bad_read_test(); + v1printk("kgdbts:RUN singlestep test %i iterations\n", sstep_test); + for (i = 0; i < sstep_test; i++) { + run_singlestep_break_test(); + if (i % 100 == 0) + v1printk("kgdbts:RUN singlestep [%i/%i]\n", + i, sstep_test); + } + + /* ===Optional tests=== */ + + /* All HW break point tests */ + if (arch_kgdb_ops.flags & KGDB_HW_BREAKPOINT) { + hwbreaks_ok = 1; + v1printk("kgdbts:RUN hw breakpoint test\n"); + run_breakpoint_test(1); + v1printk("kgdbts:RUN hw write breakpoint test\n"); + run_hw_break_test(1); + v1printk("kgdbts:RUN access write breakpoint test\n"); + run_hw_break_test(0); + } + + if (nmi_sleep) { + v1printk("kgdbts:RUN NMI sleep %i seconds test\n", nmi_sleep); + run_nmi_sleep_test(nmi_sleep); + } + +#ifdef CONFIG_DEBUG_RODATA + /* Until there is an api to write to read-only text segments, use + * HW breakpoints for the remainder of any tests, else print a + * failure message if hw breakpoints do not work. + */ + if (!(arch_kgdb_ops.flags & KGDB_HW_BREAKPOINT && hwbreaks_ok)) { + eprintk("kgdbts: HW breakpoints do not work," + "skipping remaining tests\n"); + return; + } + force_hwbrks = 1; +#endif /* CONFIG_DEBUG_RODATA */ + + /* If the do_fork test is run it will be the last test that is + * executed because a kernel thread will be spawned at the very + * end to unregister the debug hooks. + */ + if (fork_test) { + repeat_test = fork_test; + printk(KERN_INFO "kgdbts:RUN do_fork for %i breakpoints\n", + repeat_test); + kthread_run(kgdbts_unreg_thread, NULL, "kgdbts_unreg"); + run_do_fork_test(); + return; + } + + /* If the sys_open test is run it will be the last test that is + * executed because a kernel thread will be spawned at the very + * end to unregister the debug hooks. + */ + if (do_sys_open_test) { + repeat_test = do_sys_open_test; + printk(KERN_INFO "kgdbts:RUN sys_open for %i breakpoints\n", + repeat_test); + kthread_run(kgdbts_unreg_thread, NULL, "kgdbts_unreg"); + run_sys_open_test(); + return; + } + /* Shutdown and unregister */ + kgdb_unregister_io_module(&kgdbts_io_ops); + configured = 0; +} + +static int kgdbts_option_setup(char *opt) +{ + if (strlen(opt) > MAX_CONFIG_LEN) { + printk(KERN_ERR "kgdbts: config string too long\n"); + return -ENOSPC; + } + strcpy(config, opt); + + verbose = 0; + if (strstr(config, "V1")) + verbose = 1; + if (strstr(config, "V2")) + verbose = 2; + + return 0; +} + +__setup("kgdbts=", kgdbts_option_setup); + +static int configure_kgdbts(void) +{ + int err = 0; + + if (!strlen(config) || isspace(config[0])) + goto noconfig; + err = kgdbts_option_setup(config); + if (err) + goto noconfig; + + final_ack = 0; + run_plant_and_detach_test(1); + + err = kgdb_register_io_module(&kgdbts_io_ops); + if (err) { + configured = 0; + return err; + } + configured = 1; + kgdbts_run_tests(); + + return err; + +noconfig: + config[0] = 0; + configured = 0; + + return err; +} + +static int __init init_kgdbts(void) +{ + /* Already configured? */ + if (configured == 1) + return 0; + + return configure_kgdbts(); +} + +static void cleanup_kgdbts(void) +{ + if (configured == 1) + kgdb_unregister_io_module(&kgdbts_io_ops); +} + +static int kgdbts_get_char(void) +{ + int val = 0; + + if (ts.run_test) + val = ts.run_test(1, 0); + + return val; +} + +static void kgdbts_put_char(u8 chr) +{ + if (ts.run_test) + ts.run_test(0, chr); +} + +static int param_set_kgdbts_var(const char *kmessage, struct kernel_param *kp) +{ + int len = strlen(kmessage); + + if (len >= MAX_CONFIG_LEN) { + printk(KERN_ERR "kgdbts: config string too long\n"); + return -ENOSPC; + } + + /* Only copy in the string if the init function has not run yet */ + if (configured < 0) { + strcpy(config, kmessage); + return 0; + } + + if (kgdb_connected) { + printk(KERN_ERR + "kgdbts: Cannot reconfigure while KGDB is connected.\n"); + + return -EBUSY; + } + + strcpy(config, kmessage); + /* Chop out \n char as a result of echo */ + if (config[len - 1] == '\n') + config[len - 1] = '\0'; + + if (configured == 1) + cleanup_kgdbts(); + + /* Go and configure with the new params. */ + return configure_kgdbts(); +} + +static void kgdbts_pre_exp_handler(void) +{ + /* Increment the module count when the debugger is active */ + if (!kgdb_connected) + try_module_get(THIS_MODULE); +} + +static void kgdbts_post_exp_handler(void) +{ + /* decrement the module count when the debugger detaches */ + if (!kgdb_connected) + module_put(THIS_MODULE); +} + +static struct kgdb_io kgdbts_io_ops = { + .name = "kgdbts", + .read_char = kgdbts_get_char, + .write_char = kgdbts_put_char, + .pre_exception = kgdbts_pre_exp_handler, + .post_exception = kgdbts_post_exp_handler, +}; + +module_init(init_kgdbts); +module_exit(cleanup_kgdbts); +module_param_call(kgdbts, param_set_kgdbts_var, param_get_string, &kps, 0644); +MODULE_PARM_DESC(kgdbts, "<A|V1|V2>[F#|S#][N#]"); +MODULE_DESCRIPTION("KGDB Test Suite"); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Wind River Systems, Inc."); + diff --git a/drivers/misc/lkdtm.c b/drivers/misc/lkdtm.c new file mode 100644 index 0000000..1bfe5d1 --- /dev/null +++ b/drivers/misc/lkdtm.c @@ -0,0 +1,345 @@ +/* + * Kprobe module for testing crash dumps + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * Copyright (C) IBM Corporation, 2006 + * + * Author: Ankita Garg <ankita@in.ibm.com> + * + * This module induces system failures at predefined crashpoints to + * evaluate the reliability of crash dumps obtained using different dumping + * solutions. + * + * It is adapted from the Linux Kernel Dump Test Tool by + * Fernando Luis Vazquez Cao <http://lkdtt.sourceforge.net> + * + * Usage : insmod lkdtm.ko [recur_count={>0}] cpoint_name=<> cpoint_type=<> + * [cpoint_count={>0}] + * + * recur_count : Recursion level for the stack overflow test. Default is 10. + * + * cpoint_name : Crash point where the kernel is to be crashed. It can be + * one of INT_HARDWARE_ENTRY, INT_HW_IRQ_EN, INT_TASKLET_ENTRY, + * FS_DEVRW, MEM_SWAPOUT, TIMERADD, SCSI_DISPATCH_CMD, + * IDE_CORE_CP + * + * cpoint_type : Indicates the action to be taken on hitting the crash point. + * It can be one of PANIC, BUG, EXCEPTION, LOOP, OVERFLOW + * + * cpoint_count : Indicates the number of times the crash point is to be hit + * to trigger an action. The default is 10. + */ + +#include <linux/kernel.h> +#include <linux/fs.h> +#include <linux/module.h> +#include <linux/buffer_head.h> +#include <linux/kprobes.h> +#include <linux/list.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/hrtimer.h> +#include <scsi/scsi_cmnd.h> + +#ifdef CONFIG_IDE +#include <linux/ide.h> +#endif + +#define NUM_CPOINTS 8 +#define NUM_CPOINT_TYPES 5 +#define DEFAULT_COUNT 10 +#define REC_NUM_DEFAULT 10 + +enum cname { + INVALID, + INT_HARDWARE_ENTRY, + INT_HW_IRQ_EN, + INT_TASKLET_ENTRY, + FS_DEVRW, + MEM_SWAPOUT, + TIMERADD, + SCSI_DISPATCH_CMD, + IDE_CORE_CP +}; + +enum ctype { + NONE, + PANIC, + BUG, + EXCEPTION, + LOOP, + OVERFLOW +}; + +static char* cp_name[] = { + "INT_HARDWARE_ENTRY", + "INT_HW_IRQ_EN", + "INT_TASKLET_ENTRY", + "FS_DEVRW", + "MEM_SWAPOUT", + "TIMERADD", + "SCSI_DISPATCH_CMD", + "IDE_CORE_CP" +}; + +static char* cp_type[] = { + "PANIC", + "BUG", + "EXCEPTION", + "LOOP", + "OVERFLOW" +}; + +static struct jprobe lkdtm; + +static int lkdtm_parse_commandline(void); +static void lkdtm_handler(void); + +static char* cpoint_name; +static char* cpoint_type; +static int cpoint_count = DEFAULT_COUNT; +static int recur_count = REC_NUM_DEFAULT; + +static enum cname cpoint = INVALID; +static enum ctype cptype = NONE; +static int count = DEFAULT_COUNT; + +module_param(recur_count, int, 0644); +MODULE_PARM_DESC(recur_count, " Recursion level for the stack overflow test, "\ + "default is 10"); +module_param(cpoint_name, charp, 0644); +MODULE_PARM_DESC(cpoint_name, " Crash Point, where kernel is to be crashed"); +module_param(cpoint_type, charp, 0644); +MODULE_PARM_DESC(cpoint_type, " Crash Point Type, action to be taken on "\ + "hitting the crash point"); +module_param(cpoint_count, int, 0644); +MODULE_PARM_DESC(cpoint_count, " Crash Point Count, number of times the "\ + "crash point is to be hit to trigger action"); + +static unsigned int jp_do_irq(unsigned int irq) +{ + lkdtm_handler(); + jprobe_return(); + return 0; +} + +static irqreturn_t jp_handle_irq_event(unsigned int irq, + struct irqaction *action) +{ + lkdtm_handler(); + jprobe_return(); + return 0; +} + +static void jp_tasklet_action(struct softirq_action *a) +{ + lkdtm_handler(); + jprobe_return(); +} + +static void jp_ll_rw_block(int rw, int nr, struct buffer_head *bhs[]) +{ + lkdtm_handler(); + jprobe_return(); +} + +struct scan_control; + +static unsigned long jp_shrink_inactive_list(unsigned long max_scan, + struct zone *zone, + struct scan_control *sc) +{ + lkdtm_handler(); + jprobe_return(); + return 0; +} + +static int jp_hrtimer_start(struct hrtimer *timer, ktime_t tim, + const enum hrtimer_mode mode) +{ + lkdtm_handler(); + jprobe_return(); + return 0; +} + +static int jp_scsi_dispatch_cmd(struct scsi_cmnd *cmd) +{ + lkdtm_handler(); + jprobe_return(); + return 0; +} + +#ifdef CONFIG_IDE +int jp_generic_ide_ioctl(ide_drive_t *drive, struct file *file, + struct block_device *bdev, unsigned int cmd, + unsigned long arg) +{ + lkdtm_handler(); + jprobe_return(); + return 0; +} +#endif + +static int lkdtm_parse_commandline(void) +{ + int i; + + if (cpoint_name == NULL || cpoint_type == NULL || + cpoint_count < 1 || recur_count < 1) + return -EINVAL; + + for (i = 0; i < NUM_CPOINTS; ++i) { + if (!strcmp(cpoint_name, cp_name[i])) { + cpoint = i + 1; + break; + } + } + + for (i = 0; i < NUM_CPOINT_TYPES; ++i) { + if (!strcmp(cpoint_type, cp_type[i])) { + cptype = i + 1; + break; + } + } + + if (cpoint == INVALID || cptype == NONE) + return -EINVAL; + + count = cpoint_count; + + return 0; +} + +static int recursive_loop(int a) +{ + char buf[1024]; + + memset(buf,0xFF,1024); + recur_count--; + if (!recur_count) + return 0; + else + return recursive_loop(a); +} + +void lkdtm_handler(void) +{ + printk(KERN_INFO "lkdtm : Crash point %s of type %s hit\n", + cpoint_name, cpoint_type); + --count; + + if (count == 0) { + switch (cptype) { + case NONE: + break; + case PANIC: + printk(KERN_INFO "lkdtm : PANIC\n"); + panic("dumptest"); + break; + case BUG: + printk(KERN_INFO "lkdtm : BUG\n"); + BUG(); + break; + case EXCEPTION: + printk(KERN_INFO "lkdtm : EXCEPTION\n"); + *((int *) 0) = 0; + break; + case LOOP: + printk(KERN_INFO "lkdtm : LOOP\n"); + for (;;); + break; + case OVERFLOW: + printk(KERN_INFO "lkdtm : OVERFLOW\n"); + (void) recursive_loop(0); + break; + default: + break; + } + count = cpoint_count; + } +} + +static int __init lkdtm_module_init(void) +{ + int ret; + + if (lkdtm_parse_commandline() == -EINVAL) { + printk(KERN_INFO "lkdtm : Invalid command\n"); + return -EINVAL; + } + + switch (cpoint) { + case INT_HARDWARE_ENTRY: + lkdtm.kp.symbol_name = "__do_IRQ"; + lkdtm.entry = (kprobe_opcode_t*) jp_do_irq; + break; + case INT_HW_IRQ_EN: + lkdtm.kp.symbol_name = "handle_IRQ_event"; + lkdtm.entry = (kprobe_opcode_t*) jp_handle_irq_event; + break; + case INT_TASKLET_ENTRY: + lkdtm.kp.symbol_name = "tasklet_action"; + lkdtm.entry = (kprobe_opcode_t*) jp_tasklet_action; + break; + case FS_DEVRW: + lkdtm.kp.symbol_name = "ll_rw_block"; + lkdtm.entry = (kprobe_opcode_t*) jp_ll_rw_block; + break; + case MEM_SWAPOUT: + lkdtm.kp.symbol_name = "shrink_inactive_list"; + lkdtm.entry = (kprobe_opcode_t*) jp_shrink_inactive_list; + break; + case TIMERADD: + lkdtm.kp.symbol_name = "hrtimer_start"; + lkdtm.entry = (kprobe_opcode_t*) jp_hrtimer_start; + break; + case SCSI_DISPATCH_CMD: + lkdtm.kp.symbol_name = "scsi_dispatch_cmd"; + lkdtm.entry = (kprobe_opcode_t*) jp_scsi_dispatch_cmd; + break; + case IDE_CORE_CP: +#ifdef CONFIG_IDE + lkdtm.kp.symbol_name = "generic_ide_ioctl"; + lkdtm.entry = (kprobe_opcode_t*) jp_generic_ide_ioctl; +#else + printk(KERN_INFO "lkdtm : Crash point not available\n"); +#endif + break; + default: + printk(KERN_INFO "lkdtm : Invalid Crash Point\n"); + break; + } + + if ((ret = register_jprobe(&lkdtm)) < 0) { + printk(KERN_INFO "lkdtm : Couldn't register jprobe\n"); + return ret; + } + + printk(KERN_INFO "lkdtm : Crash point %s of type %s registered\n", + cpoint_name, cpoint_type); + return 0; +} + +static void __exit lkdtm_module_exit(void) +{ + unregister_jprobe(&lkdtm); + printk(KERN_INFO "lkdtm : Crash point unregistered\n"); +} + +module_init(lkdtm_module_init); +module_exit(lkdtm_module_exit); + +MODULE_LICENSE("GPL"); diff --git a/drivers/misc/msi-laptop.c b/drivers/misc/msi-laptop.c new file mode 100644 index 0000000..759763d --- /dev/null +++ b/drivers/misc/msi-laptop.c @@ -0,0 +1,437 @@ +/*-*-linux-c-*-*/ + +/* + Copyright (C) 2006 Lennart Poettering <mzxreary (at) 0pointer (dot) de> + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + 02110-1301, USA. + */ + +/* + * msi-laptop.c - MSI S270 laptop support. This laptop is sold under + * various brands, including "Cytron/TCM/Medion/Tchibo MD96100". + * + * Driver also supports S271, S420 models. + * + * This driver exports a few files in /sys/devices/platform/msi-laptop-pf/: + * + * lcd_level - Screen brightness: contains a single integer in the + * range 0..8. (rw) + * + * auto_brightness - Enable automatic brightness control: contains + * either 0 or 1. If set to 1 the hardware adjusts the screen + * brightness automatically when the power cord is + * plugged/unplugged. (rw) + * + * wlan - WLAN subsystem enabled: contains either 0 or 1. (ro) + * + * bluetooth - Bluetooth subsystem enabled: contains either 0 or 1 + * Please note that this file is constantly 0 if no Bluetooth + * hardware is available. (ro) + * + * In addition to these platform device attributes the driver + * registers itself in the Linux backlight control subsystem and is + * available to userspace under /sys/class/backlight/msi-laptop-bl/. + * + * This driver might work on other laptops produced by MSI. If you + * want to try it you can pass force=1 as argument to the module which + * will force it to load even when the DMI data doesn't identify the + * laptop as MSI S270. YMMV. + */ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/acpi.h> +#include <linux/dmi.h> +#include <linux/backlight.h> +#include <linux/platform_device.h> + +#define MSI_DRIVER_VERSION "0.5" + +#define MSI_LCD_LEVEL_MAX 9 + +#define MSI_EC_COMMAND_WIRELESS 0x10 +#define MSI_EC_COMMAND_LCD_LEVEL 0x11 + +static int force; +module_param(force, bool, 0); +MODULE_PARM_DESC(force, "Force driver load, ignore DMI data"); + +static int auto_brightness; +module_param(auto_brightness, int, 0); +MODULE_PARM_DESC(auto_brightness, "Enable automatic brightness control (0: disabled; 1: enabled; 2: don't touch)"); + +/* Hardware access */ + +static int set_lcd_level(int level) +{ + u8 buf[2]; + + if (level < 0 || level >= MSI_LCD_LEVEL_MAX) + return -EINVAL; + + buf[0] = 0x80; + buf[1] = (u8) (level*31); + + return ec_transaction(MSI_EC_COMMAND_LCD_LEVEL, buf, sizeof(buf), NULL, 0, 1); +} + +static int get_lcd_level(void) +{ + u8 wdata = 0, rdata; + int result; + + result = ec_transaction(MSI_EC_COMMAND_LCD_LEVEL, &wdata, 1, &rdata, 1, 1); + if (result < 0) + return result; + + return (int) rdata / 31; +} + +static int get_auto_brightness(void) +{ + u8 wdata = 4, rdata; + int result; + + result = ec_transaction(MSI_EC_COMMAND_LCD_LEVEL, &wdata, 1, &rdata, 1, 1); + if (result < 0) + return result; + + return !!(rdata & 8); +} + +static int set_auto_brightness(int enable) +{ + u8 wdata[2], rdata; + int result; + + wdata[0] = 4; + + result = ec_transaction(MSI_EC_COMMAND_LCD_LEVEL, wdata, 1, &rdata, 1, 1); + if (result < 0) + return result; + + wdata[0] = 0x84; + wdata[1] = (rdata & 0xF7) | (enable ? 8 : 0); + + return ec_transaction(MSI_EC_COMMAND_LCD_LEVEL, wdata, 2, NULL, 0, 1); +} + +static int get_wireless_state(int *wlan, int *bluetooth) +{ + u8 wdata = 0, rdata; + int result; + + result = ec_transaction(MSI_EC_COMMAND_WIRELESS, &wdata, 1, &rdata, 1, 1); + if (result < 0) + return -1; + + if (wlan) + *wlan = !!(rdata & 8); + + if (bluetooth) + *bluetooth = !!(rdata & 128); + + return 0; +} + +/* Backlight device stuff */ + +static int bl_get_brightness(struct backlight_device *b) +{ + return get_lcd_level(); +} + + +static int bl_update_status(struct backlight_device *b) +{ + return set_lcd_level(b->props.brightness); +} + +static struct backlight_ops msibl_ops = { + .get_brightness = bl_get_brightness, + .update_status = bl_update_status, +}; + +static struct backlight_device *msibl_device; + +/* Platform device */ + +static ssize_t show_wlan(struct device *dev, + struct device_attribute *attr, char *buf) +{ + + int ret, enabled; + + ret = get_wireless_state(&enabled, NULL); + if (ret < 0) + return ret; + + return sprintf(buf, "%i\n", enabled); +} + +static ssize_t show_bluetooth(struct device *dev, + struct device_attribute *attr, char *buf) +{ + + int ret, enabled; + + ret = get_wireless_state(NULL, &enabled); + if (ret < 0) + return ret; + + return sprintf(buf, "%i\n", enabled); +} + +static ssize_t show_lcd_level(struct device *dev, + struct device_attribute *attr, char *buf) +{ + + int ret; + + ret = get_lcd_level(); + if (ret < 0) + return ret; + + return sprintf(buf, "%i\n", ret); +} + +static ssize_t store_lcd_level(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + + int level, ret; + + if (sscanf(buf, "%i", &level) != 1 || (level < 0 || level >= MSI_LCD_LEVEL_MAX)) + return -EINVAL; + + ret = set_lcd_level(level); + if (ret < 0) + return ret; + + return count; +} + +static ssize_t show_auto_brightness(struct device *dev, + struct device_attribute *attr, char *buf) +{ + + int ret; + + ret = get_auto_brightness(); + if (ret < 0) + return ret; + + return sprintf(buf, "%i\n", ret); +} + +static ssize_t store_auto_brightness(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + + int enable, ret; + + if (sscanf(buf, "%i", &enable) != 1 || (enable != (enable & 1))) + return -EINVAL; + + ret = set_auto_brightness(enable); + if (ret < 0) + return ret; + + return count; +} + +static DEVICE_ATTR(lcd_level, 0644, show_lcd_level, store_lcd_level); +static DEVICE_ATTR(auto_brightness, 0644, show_auto_brightness, store_auto_brightness); +static DEVICE_ATTR(bluetooth, 0444, show_bluetooth, NULL); +static DEVICE_ATTR(wlan, 0444, show_wlan, NULL); + +static struct attribute *msipf_attributes[] = { + &dev_attr_lcd_level.attr, + &dev_attr_auto_brightness.attr, + &dev_attr_bluetooth.attr, + &dev_attr_wlan.attr, + NULL +}; + +static struct attribute_group msipf_attribute_group = { + .attrs = msipf_attributes +}; + +static struct platform_driver msipf_driver = { + .driver = { + .name = "msi-laptop-pf", + .owner = THIS_MODULE, + } +}; + +static struct platform_device *msipf_device; + +/* Initialization */ + +static int dmi_check_cb(const struct dmi_system_id *id) +{ + printk("msi-laptop: Identified laptop model '%s'.\n", id->ident); + return 0; +} + +static struct dmi_system_id __initdata msi_dmi_table[] = { + { + .ident = "MSI S270", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "MICRO-STAR INT'L CO.,LTD"), + DMI_MATCH(DMI_PRODUCT_NAME, "MS-1013"), + DMI_MATCH(DMI_PRODUCT_VERSION, "0131"), + DMI_MATCH(DMI_CHASSIS_VENDOR, "MICRO-STAR INT'L CO.,LTD") + }, + .callback = dmi_check_cb + }, + { + .ident = "MSI S271", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Micro-Star International"), + DMI_MATCH(DMI_PRODUCT_NAME, "MS-1058"), + DMI_MATCH(DMI_PRODUCT_VERSION, "0581"), + DMI_MATCH(DMI_BOARD_NAME, "MS-1058") + }, + .callback = dmi_check_cb + }, + { + .ident = "MSI S420", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Micro-Star International"), + DMI_MATCH(DMI_PRODUCT_NAME, "MS-1412"), + DMI_MATCH(DMI_BOARD_VENDOR, "MSI"), + DMI_MATCH(DMI_BOARD_NAME, "MS-1412") + }, + .callback = dmi_check_cb + }, + { + .ident = "Medion MD96100", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "NOTEBOOK"), + DMI_MATCH(DMI_PRODUCT_NAME, "SAM2000"), + DMI_MATCH(DMI_PRODUCT_VERSION, "0131"), + DMI_MATCH(DMI_CHASSIS_VENDOR, "MICRO-STAR INT'L CO.,LTD") + }, + .callback = dmi_check_cb + }, + { } +}; + +static int __init msi_init(void) +{ + int ret; + + if (acpi_disabled) + return -ENODEV; + + if (!force && !dmi_check_system(msi_dmi_table)) + return -ENODEV; + + if (auto_brightness < 0 || auto_brightness > 2) + return -EINVAL; + + /* Register backlight stuff */ + + if (acpi_video_backlight_support()) { + printk(KERN_INFO "MSI: Brightness ignored, must be controlled " + "by ACPI video driver\n"); + } else { + msibl_device = backlight_device_register("msi-laptop-bl", NULL, + NULL, &msibl_ops); + if (IS_ERR(msibl_device)) + return PTR_ERR(msibl_device); + msibl_device->props.max_brightness = MSI_LCD_LEVEL_MAX-1; + } + + ret = platform_driver_register(&msipf_driver); + if (ret) + goto fail_backlight; + + /* Register platform stuff */ + + msipf_device = platform_device_alloc("msi-laptop-pf", -1); + if (!msipf_device) { + ret = -ENOMEM; + goto fail_platform_driver; + } + + ret = platform_device_add(msipf_device); + if (ret) + goto fail_platform_device1; + + ret = sysfs_create_group(&msipf_device->dev.kobj, &msipf_attribute_group); + if (ret) + goto fail_platform_device2; + + /* Disable automatic brightness control by default because + * this module was probably loaded to do brightness control in + * software. */ + + if (auto_brightness != 2) + set_auto_brightness(auto_brightness); + + printk(KERN_INFO "msi-laptop: driver "MSI_DRIVER_VERSION" successfully loaded.\n"); + + return 0; + +fail_platform_device2: + + platform_device_del(msipf_device); + +fail_platform_device1: + + platform_device_put(msipf_device); + +fail_platform_driver: + + platform_driver_unregister(&msipf_driver); + +fail_backlight: + + backlight_device_unregister(msibl_device); + + return ret; +} + +static void __exit msi_cleanup(void) +{ + + sysfs_remove_group(&msipf_device->dev.kobj, &msipf_attribute_group); + platform_device_unregister(msipf_device); + platform_driver_unregister(&msipf_driver); + backlight_device_unregister(msibl_device); + + /* Enable automatic brightness control again */ + if (auto_brightness != 2) + set_auto_brightness(1); + + printk(KERN_INFO "msi-laptop: driver unloaded.\n"); +} + +module_init(msi_init); +module_exit(msi_cleanup); + +MODULE_AUTHOR("Lennart Poettering"); +MODULE_DESCRIPTION("MSI Laptop Support"); +MODULE_VERSION(MSI_DRIVER_VERSION); +MODULE_LICENSE("GPL"); + +MODULE_ALIAS("dmi:*:svnMICRO-STARINT'LCO.,LTD:pnMS-1013:pvr0131*:cvnMICRO-STARINT'LCO.,LTD:ct10:*"); +MODULE_ALIAS("dmi:*:svnMicro-StarInternational:pnMS-1058:pvr0581:rvnMSI:rnMS-1058:*:ct10:*"); +MODULE_ALIAS("dmi:*:svnMicro-StarInternational:pnMS-1412:*:rvnMSI:rnMS-1412:*:cvnMICRO-STARINT'LCO.,LTD:ct10:*"); +MODULE_ALIAS("dmi:*:svnNOTEBOOK:pnSAM2000:pvr0131*:cvnMICRO-STARINT'LCO.,LTD:ct10:*"); diff --git a/drivers/misc/panasonic-laptop.c b/drivers/misc/panasonic-laptop.c new file mode 100644 index 0000000..d82d1cc --- /dev/null +++ b/drivers/misc/panasonic-laptop.c @@ -0,0 +1,766 @@ +/* + * Panasonic HotKey and LCD brightness control driver + * (C) 2004 Hiroshi Miura <miura@da-cha.org> + * (C) 2004 NTT DATA Intellilink Co. http://www.intellilink.co.jp/ + * (C) YOKOTA Hiroshi <yokota (at) netlab. is. tsukuba. ac. jp> + * (C) 2004 David Bronaugh <dbronaugh> + * (C) 2006-2008 Harald Welte <laforge@gnumonks.org> + * + * derived from toshiba_acpi.c, Copyright (C) 2002-2004 John Belmonte + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * publicshed by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + *--------------------------------------------------------------------------- + * + * ChangeLog: + * Sep.23, 2008 Harald Welte <laforge@gnumonks.org> + * -v0.95 rename driver from drivers/acpi/pcc_acpi.c to + * drivers/misc/panasonic-laptop.c + * + * Jul.04, 2008 Harald Welte <laforge@gnumonks.org> + * -v0.94 replace /proc interface with device attributes + * support {set,get}keycode on th input device + * + * Jun.27, 2008 Harald Welte <laforge@gnumonks.org> + * -v0.92 merge with 2.6.26-rc6 input API changes + * remove broken <= 2.6.15 kernel support + * resolve all compiler warnings + * various coding style fixes (checkpatch.pl) + * add support for backlight api + * major code restructuring + * + * Dac.28, 2007 Harald Welte <laforge@gnumonks.org> + * -v0.91 merge with 2.6.24-rc6 ACPI changes + * + * Nov.04, 2006 Hiroshi Miura <miura@da-cha.org> + * -v0.9 remove warning about section reference. + * remove acpi_os_free + * add /proc/acpi/pcc/brightness interface for HAL access + * merge dbronaugh's enhancement + * Aug.17, 2004 David Bronaugh (dbronaugh) + * - Added screen brightness setting interface + * Thanks to FreeBSD crew (acpi_panasonic.c) + * for the ideas I needed to accomplish it + * + * May.29, 2006 Hiroshi Miura <miura@da-cha.org> + * -v0.8.4 follow to change keyinput structure + * thanks Fabian Yamaguchi <fabs@cs.tu-berlin.de>, + * Jacob Bower <jacob.bower@ic.ac.uk> and + * Hiroshi Yokota for providing solutions. + * + * Oct.02, 2004 Hiroshi Miura <miura@da-cha.org> + * -v0.8.2 merge code of YOKOTA Hiroshi + * <yokota@netlab.is.tsukuba.ac.jp>. + * Add sticky key mode interface. + * Refactoring acpi_pcc_generate_keyinput(). + * + * Sep.15, 2004 Hiroshi Miura <miura@da-cha.org> + * -v0.8 Generate key input event on input subsystem. + * This is based on yet another driver written by + * Ryuta Nakanishi. + * + * Sep.10, 2004 Hiroshi Miura <miura@da-cha.org> + * -v0.7 Change proc interface functions using seq_file + * facility as same as other ACPI drivers. + * + * Aug.28, 2004 Hiroshi Miura <miura@da-cha.org> + * -v0.6.4 Fix a silly error with status checking + * + * Aug.25, 2004 Hiroshi Miura <miura@da-cha.org> + * -v0.6.3 replace read_acpi_int by standard function + * acpi_evaluate_integer + * some clean up and make smart copyright notice. + * fix return value of pcc_acpi_get_key() + * fix checking return value of acpi_bus_register_driver() + * + * Aug.22, 2004 David Bronaugh <dbronaugh@linuxboxen.org> + * -v0.6.2 Add check on ACPI data (num_sifr) + * Coding style cleanups, better error messages/handling + * Fixed an off-by-one error in memory allocation + * + * Aug.21, 2004 David Bronaugh <dbronaugh@linuxboxen.org> + * -v0.6.1 Fix a silly error with status checking + * + * Aug.20, 2004 David Bronaugh <dbronaugh@linuxboxen.org> + * - v0.6 Correct brightness controls to reflect reality + * based on information gleaned by Hiroshi Miura + * and discussions with Hiroshi Miura + * + * Aug.10, 2004 Hiroshi Miura <miura@da-cha.org> + * - v0.5 support LCD brightness control + * based on the disclosed information by MEI. + * + * Jul.25, 2004 Hiroshi Miura <miura@da-cha.org> + * - v0.4 first post version + * add function to retrive SIFR + * + * Jul.24, 2004 Hiroshi Miura <miura@da-cha.org> + * - v0.3 get proper status of hotkey + * + * Jul.22, 2004 Hiroshi Miura <miura@da-cha.org> + * - v0.2 add HotKey handler + * + * Jul.17, 2004 Hiroshi Miura <miura@da-cha.org> + * - v0.1 start from toshiba_acpi driver written by John Belmonte + * + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/types.h> +#include <linux/backlight.h> +#include <linux/ctype.h> +#include <linux/seq_file.h> +#include <linux/uaccess.h> +#include <acpi/acpi_bus.h> +#include <acpi/acpi_drivers.h> +#include <linux/input.h> + + +#ifndef ACPI_HOTKEY_COMPONENT +#define ACPI_HOTKEY_COMPONENT 0x10000000 +#endif + +#define _COMPONENT ACPI_HOTKEY_COMPONENT + +MODULE_AUTHOR("Hiroshi Miura, David Bronaugh and Harald Welte"); +MODULE_DESCRIPTION("ACPI HotKey driver for Panasonic Let's Note laptops"); +MODULE_LICENSE("GPL"); + +#define LOGPREFIX "pcc_acpi: " + +/* Define ACPI PATHs */ +/* Lets note hotkeys */ +#define METHOD_HKEY_QUERY "HINF" +#define METHOD_HKEY_SQTY "SQTY" +#define METHOD_HKEY_SINF "SINF" +#define METHOD_HKEY_SSET "SSET" +#define HKEY_NOTIFY 0x80 + +#define ACPI_PCC_DRIVER_NAME "Panasonic Laptop Support" +#define ACPI_PCC_DEVICE_NAME "Hotkey" +#define ACPI_PCC_CLASS "pcc" + +#define ACPI_PCC_INPUT_PHYS "panasonic/hkey0" + +/* LCD_TYPEs: 0 = Normal, 1 = Semi-transparent + ENV_STATEs: Normal temp=0x01, High temp=0x81, N/A=0x00 +*/ +enum SINF_BITS { SINF_NUM_BATTERIES = 0, + SINF_LCD_TYPE, + SINF_AC_MAX_BRIGHT, + SINF_AC_MIN_BRIGHT, + SINF_AC_CUR_BRIGHT, + SINF_DC_MAX_BRIGHT, + SINF_DC_MIN_BRIGHT, + SINF_DC_CUR_BRIGHT, + SINF_MUTE, + SINF_RESERVED, + SINF_ENV_STATE, + SINF_STICKY_KEY = 0x80, + }; +/* R1 handles SINF_AC_CUR_BRIGHT as SINF_CUR_BRIGHT, doesn't know AC state */ + +static int acpi_pcc_hotkey_add(struct acpi_device *device); +static int acpi_pcc_hotkey_remove(struct acpi_device *device, int type); +static int acpi_pcc_hotkey_resume(struct acpi_device *device); + +static const struct acpi_device_id pcc_device_ids[] = { + { "MAT0012", 0}, + { "MAT0013", 0}, + { "MAT0018", 0}, + { "MAT0019", 0}, + { "", 0}, +}; + +static struct acpi_driver acpi_pcc_driver = { + .name = ACPI_PCC_DRIVER_NAME, + .class = ACPI_PCC_CLASS, + .ids = pcc_device_ids, + .ops = { + .add = acpi_pcc_hotkey_add, + .remove = acpi_pcc_hotkey_remove, + .resume = acpi_pcc_hotkey_resume, + }, +}; + +#define KEYMAP_SIZE 11 +static const int initial_keymap[KEYMAP_SIZE] = { + /* 0 */ KEY_RESERVED, + /* 1 */ KEY_BRIGHTNESSDOWN, + /* 2 */ KEY_BRIGHTNESSUP, + /* 3 */ KEY_DISPLAYTOGGLE, + /* 4 */ KEY_MUTE, + /* 5 */ KEY_VOLUMEDOWN, + /* 6 */ KEY_VOLUMEUP, + /* 7 */ KEY_SLEEP, + /* 8 */ KEY_PROG1, /* Change CPU boost */ + /* 9 */ KEY_BATTERY, + /* 10 */ KEY_SUSPEND, +}; + +struct pcc_acpi { + acpi_handle handle; + unsigned long num_sifr; + int sticky_mode; + u32 *sinf; + struct acpi_device *device; + struct input_dev *input_dev; + struct backlight_device *backlight; + int keymap[KEYMAP_SIZE]; +}; + +struct pcc_keyinput { + struct acpi_hotkey *hotkey; +}; + +/* method access functions */ +static int acpi_pcc_write_sset(struct pcc_acpi *pcc, int func, int val) +{ + union acpi_object in_objs[] = { + { .integer.type = ACPI_TYPE_INTEGER, + .integer.value = func, }, + { .integer.type = ACPI_TYPE_INTEGER, + .integer.value = val, }, + }; + struct acpi_object_list params = { + .count = ARRAY_SIZE(in_objs), + .pointer = in_objs, + }; + acpi_status status = AE_OK; + + ACPI_FUNCTION_TRACE("acpi_pcc_write_sset"); + + status = acpi_evaluate_object(pcc->handle, METHOD_HKEY_SSET, + ¶ms, NULL); + + return status == AE_OK; +} + +static inline int acpi_pcc_get_sqty(struct acpi_device *device) +{ + unsigned long long s; + acpi_status status; + + ACPI_FUNCTION_TRACE("acpi_pcc_get_sqty"); + + status = acpi_evaluate_integer(device->handle, METHOD_HKEY_SQTY, + NULL, &s); + if (ACPI_SUCCESS(status)) + return s; + else { + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, + "evaluation error HKEY.SQTY\n")); + return -EINVAL; + } +} + +static int acpi_pcc_retrieve_biosdata(struct pcc_acpi *pcc, u32 *sinf) +{ + acpi_status status; + struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; + union acpi_object *hkey = NULL; + int i; + + ACPI_FUNCTION_TRACE("acpi_pcc_retrieve_biosdata"); + + status = acpi_evaluate_object(pcc->handle, METHOD_HKEY_SINF, 0, + &buffer); + if (ACPI_FAILURE(status)) { + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, + "evaluation error HKEY.SINF\n")); + return 0; + } + + hkey = buffer.pointer; + if (!hkey || (hkey->type != ACPI_TYPE_PACKAGE)) { + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid HKEY.SINF\n")); + goto end; + } + + if (pcc->num_sifr < hkey->package.count) { + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, + "SQTY reports bad SINF length\n")); + status = AE_ERROR; + goto end; + } + + for (i = 0; i < hkey->package.count; i++) { + union acpi_object *element = &(hkey->package.elements[i]); + if (likely(element->type == ACPI_TYPE_INTEGER)) { + sinf[i] = element->integer.value; + } else + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, + "Invalid HKEY.SINF data\n")); + } + sinf[hkey->package.count] = -1; + +end: + kfree(buffer.pointer); + return status == AE_OK; +} + +/* backlight API interface functions */ + +/* This driver currently treats AC and DC brightness identical, + * since we don't need to invent an interface to the core ACPI + * logic to receive events in case a power supply is plugged in + * or removed */ + +static int bl_get(struct backlight_device *bd) +{ + struct pcc_acpi *pcc = bl_get_data(bd); + + if (!acpi_pcc_retrieve_biosdata(pcc, pcc->sinf)) + return -EIO; + + return pcc->sinf[SINF_AC_CUR_BRIGHT]; +} + +static int bl_set_status(struct backlight_device *bd) +{ + struct pcc_acpi *pcc = bl_get_data(bd); + int bright = bd->props.brightness; + int rc; + + if (!acpi_pcc_retrieve_biosdata(pcc, pcc->sinf)) + return -EIO; + + if (bright < pcc->sinf[SINF_AC_MIN_BRIGHT]) + bright = pcc->sinf[SINF_AC_MIN_BRIGHT]; + + if (bright < pcc->sinf[SINF_DC_MIN_BRIGHT]) + bright = pcc->sinf[SINF_DC_MIN_BRIGHT]; + + if (bright < pcc->sinf[SINF_AC_MIN_BRIGHT] || + bright > pcc->sinf[SINF_AC_MAX_BRIGHT]) + return -EINVAL; + + rc = acpi_pcc_write_sset(pcc, SINF_AC_CUR_BRIGHT, bright); + if (rc < 0) + return rc; + + return acpi_pcc_write_sset(pcc, SINF_DC_CUR_BRIGHT, bright); +} + +static struct backlight_ops pcc_backlight_ops = { + .get_brightness = bl_get, + .update_status = bl_set_status, +}; + + +/* sysfs user interface functions */ + +static ssize_t show_numbatt(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct acpi_device *acpi = to_acpi_device(dev); + struct pcc_acpi *pcc = acpi_driver_data(acpi); + + if (!acpi_pcc_retrieve_biosdata(pcc, pcc->sinf)) + return -EIO; + + return sprintf(buf, "%u\n", pcc->sinf[SINF_NUM_BATTERIES]); +} + +static ssize_t show_lcdtype(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct acpi_device *acpi = to_acpi_device(dev); + struct pcc_acpi *pcc = acpi_driver_data(acpi); + + if (!acpi_pcc_retrieve_biosdata(pcc, pcc->sinf)) + return -EIO; + + return sprintf(buf, "%u\n", pcc->sinf[SINF_LCD_TYPE]); +} + +static ssize_t show_mute(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct acpi_device *acpi = to_acpi_device(dev); + struct pcc_acpi *pcc = acpi_driver_data(acpi); + + if (!acpi_pcc_retrieve_biosdata(pcc, pcc->sinf)) + return -EIO; + + return sprintf(buf, "%u\n", pcc->sinf[SINF_MUTE]); +} + +static ssize_t show_sticky(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct acpi_device *acpi = to_acpi_device(dev); + struct pcc_acpi *pcc = acpi_driver_data(acpi); + + if (!acpi_pcc_retrieve_biosdata(pcc, pcc->sinf)) + return -EIO; + + return sprintf(buf, "%u\n", pcc->sinf[SINF_STICKY_KEY]); +} + +static ssize_t set_sticky(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct acpi_device *acpi = to_acpi_device(dev); + struct pcc_acpi *pcc = acpi_driver_data(acpi); + int val; + + if (count && sscanf(buf, "%i", &val) == 1 && + (val == 0 || val == 1)) { + acpi_pcc_write_sset(pcc, SINF_STICKY_KEY, val); + pcc->sticky_mode = val; + } + + return count; +} + +static DEVICE_ATTR(numbatt, S_IRUGO, show_numbatt, NULL); +static DEVICE_ATTR(lcdtype, S_IRUGO, show_lcdtype, NULL); +static DEVICE_ATTR(mute, S_IRUGO, show_mute, NULL); +static DEVICE_ATTR(sticky_key, S_IRUGO | S_IWUSR, show_sticky, set_sticky); + +static struct attribute *pcc_sysfs_entries[] = { + &dev_attr_numbatt.attr, + &dev_attr_lcdtype.attr, + &dev_attr_mute.attr, + &dev_attr_sticky_key.attr, + NULL, +}; + +static struct attribute_group pcc_attr_group = { + .name = NULL, /* put in device directory */ + .attrs = pcc_sysfs_entries, +}; + + +/* hotkey input device driver */ + +static int pcc_getkeycode(struct input_dev *dev, int scancode, int *keycode) +{ + struct pcc_acpi *pcc = input_get_drvdata(dev); + + if (scancode >= ARRAY_SIZE(pcc->keymap)) + return -EINVAL; + + *keycode = pcc->keymap[scancode]; + + return 0; +} + +static int keymap_get_by_keycode(struct pcc_acpi *pcc, int keycode) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(pcc->keymap); i++) { + if (pcc->keymap[i] == keycode) + return i+1; + } + + return 0; +} + +static int pcc_setkeycode(struct input_dev *dev, int scancode, int keycode) +{ + struct pcc_acpi *pcc = input_get_drvdata(dev); + int oldkeycode; + + if (scancode >= ARRAY_SIZE(pcc->keymap)) + return -EINVAL; + + if (keycode < 0 || keycode > KEY_MAX) + return -EINVAL; + + oldkeycode = pcc->keymap[scancode]; + pcc->keymap[scancode] = keycode; + + set_bit(keycode, dev->keybit); + + if (!keymap_get_by_keycode(pcc, oldkeycode)) + clear_bit(oldkeycode, dev->keybit); + + return 0; +} + +static void acpi_pcc_generate_keyinput(struct pcc_acpi *pcc) +{ + struct input_dev *hotk_input_dev = pcc->input_dev; + int rc; + int key_code, hkey_num; + unsigned long long result; + + ACPI_FUNCTION_TRACE("acpi_pcc_generate_keyinput"); + + rc = acpi_evaluate_integer(pcc->handle, METHOD_HKEY_QUERY, + NULL, &result); + if (!ACPI_SUCCESS(rc)) { + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, + "error getting hotkey status\n")); + return; + } + + acpi_bus_generate_proc_event(pcc->device, HKEY_NOTIFY, result); + + hkey_num = result & 0xf; + + if (hkey_num < 0 || hkey_num >= ARRAY_SIZE(pcc->keymap)) { + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, + "hotkey number out of range: %d\n", + hkey_num)); + return; + } + + key_code = pcc->keymap[hkey_num]; + + if (key_code != KEY_RESERVED) { + int pushed = (result & 0x80) ? TRUE : FALSE; + + input_report_key(hotk_input_dev, key_code, pushed); + input_sync(hotk_input_dev); + } + + return; +} + +static void acpi_pcc_hotkey_notify(acpi_handle handle, u32 event, void *data) +{ + struct pcc_acpi *pcc = (struct pcc_acpi *) data; + + ACPI_FUNCTION_TRACE("acpi_pcc_hotkey_notify"); + + switch (event) { + case HKEY_NOTIFY: + acpi_pcc_generate_keyinput(pcc); + break; + default: + /* nothing to do */ + break; + } +} + +static int acpi_pcc_init_input(struct pcc_acpi *pcc) +{ + int i, rc; + + ACPI_FUNCTION_TRACE("acpi_pcc_init_input"); + + pcc->input_dev = input_allocate_device(); + if (!pcc->input_dev) { + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, + "Couldn't allocate input device for hotkey")); + return -ENOMEM; + } + + pcc->input_dev->evbit[0] = BIT(EV_KEY); + + pcc->input_dev->name = ACPI_PCC_DRIVER_NAME; + pcc->input_dev->phys = ACPI_PCC_INPUT_PHYS; + pcc->input_dev->id.bustype = BUS_HOST; + pcc->input_dev->id.vendor = 0x0001; + pcc->input_dev->id.product = 0x0001; + pcc->input_dev->id.version = 0x0100; + pcc->input_dev->getkeycode = pcc_getkeycode; + pcc->input_dev->setkeycode = pcc_setkeycode; + + /* load initial keymap */ + memcpy(pcc->keymap, initial_keymap, sizeof(pcc->keymap)); + + for (i = 0; i < ARRAY_SIZE(pcc->keymap); i++) + __set_bit(pcc->keymap[i], pcc->input_dev->keybit); + __clear_bit(KEY_RESERVED, pcc->input_dev->keybit); + + input_set_drvdata(pcc->input_dev, pcc); + + rc = input_register_device(pcc->input_dev); + if (rc < 0) + input_free_device(pcc->input_dev); + + return rc; +} + +/* kernel module interface */ + +static int acpi_pcc_hotkey_resume(struct acpi_device *device) +{ + struct pcc_acpi *pcc = acpi_driver_data(device); + acpi_status status = AE_OK; + + ACPI_FUNCTION_TRACE("acpi_pcc_hotkey_resume"); + + if (device == NULL || pcc == NULL) + return -EINVAL; + + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Sticky mode restore: %d\n", + pcc->sticky_mode)); + + status = acpi_pcc_write_sset(pcc, SINF_STICKY_KEY, pcc->sticky_mode); + + return status == AE_OK ? 0 : -EINVAL; +} + +static int acpi_pcc_hotkey_add(struct acpi_device *device) +{ + acpi_status status; + struct pcc_acpi *pcc; + int num_sifr, result; + + ACPI_FUNCTION_TRACE("acpi_pcc_hotkey_add"); + + if (!device) + return -EINVAL; + + num_sifr = acpi_pcc_get_sqty(device); + + if (num_sifr > 255) { + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "num_sifr too large")); + return -ENODEV; + } + + pcc = kzalloc(sizeof(struct pcc_acpi), GFP_KERNEL); + if (!pcc) { + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, + "Couldn't allocate mem for pcc")); + return -ENOMEM; + } + + pcc->sinf = kzalloc(sizeof(u32) * (num_sifr + 1), GFP_KERNEL); + if (!pcc->sinf) { + result = -ENOMEM; + goto out_hotkey; + } + + pcc->device = device; + pcc->handle = device->handle; + pcc->num_sifr = num_sifr; + device->driver_data = pcc; + strcpy(acpi_device_name(device), ACPI_PCC_DEVICE_NAME); + strcpy(acpi_device_class(device), ACPI_PCC_CLASS); + + result = acpi_pcc_init_input(pcc); + if (result) { + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, + "Error installing keyinput handler\n")); + goto out_sinf; + } + + /* initialize hotkey input device */ + status = acpi_install_notify_handler(pcc->handle, ACPI_DEVICE_NOTIFY, + acpi_pcc_hotkey_notify, pcc); + + if (ACPI_FAILURE(status)) { + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, + "Error installing notify handler\n")); + result = -ENODEV; + goto out_input; + } + + /* initialize backlight */ + pcc->backlight = backlight_device_register("panasonic", NULL, pcc, + &pcc_backlight_ops); + if (IS_ERR(pcc->backlight)) + goto out_notify; + + if (!acpi_pcc_retrieve_biosdata(pcc, pcc->sinf)) { + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, + "Couldn't retrieve BIOS data\n")); + goto out_backlight; + } + + /* read the initial brightness setting from the hardware */ + pcc->backlight->props.max_brightness = + pcc->sinf[SINF_AC_MAX_BRIGHT]; + pcc->backlight->props.brightness = pcc->sinf[SINF_AC_CUR_BRIGHT]; + + /* read the initial sticky key mode from the hardware */ + pcc->sticky_mode = pcc->sinf[SINF_STICKY_KEY]; + + /* add sysfs attributes */ + result = sysfs_create_group(&device->dev.kobj, &pcc_attr_group); + if (result) + goto out_backlight; + + return 0; + +out_backlight: + backlight_device_unregister(pcc->backlight); +out_notify: + acpi_remove_notify_handler(pcc->handle, ACPI_DEVICE_NOTIFY, + acpi_pcc_hotkey_notify); +out_input: + input_unregister_device(pcc->input_dev); + /* no need to input_free_device() since core input API refcount and + * free()s the device */ +out_sinf: + kfree(pcc->sinf); +out_hotkey: + kfree(pcc); + + return result; +} + +static int __init acpi_pcc_init(void) +{ + int result = 0; + + ACPI_FUNCTION_TRACE("acpi_pcc_init"); + + if (acpi_disabled) + return -ENODEV; + + result = acpi_bus_register_driver(&acpi_pcc_driver); + if (result < 0) { + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, + "Error registering hotkey driver\n")); + return -ENODEV; + } + + return 0; +} + +static int acpi_pcc_hotkey_remove(struct acpi_device *device, int type) +{ + struct pcc_acpi *pcc = acpi_driver_data(device); + + ACPI_FUNCTION_TRACE("acpi_pcc_hotkey_remove"); + + if (!device || !pcc) + return -EINVAL; + + sysfs_remove_group(&device->dev.kobj, &pcc_attr_group); + + backlight_device_unregister(pcc->backlight); + + acpi_remove_notify_handler(pcc->handle, ACPI_DEVICE_NOTIFY, + acpi_pcc_hotkey_notify); + + input_unregister_device(pcc->input_dev); + /* no need to input_free_device() since core input API refcount and + * free()s the device */ + + kfree(pcc->sinf); + kfree(pcc); + + return 0; +} + +static void __exit acpi_pcc_exit(void) +{ + ACPI_FUNCTION_TRACE("acpi_pcc_exit"); + + acpi_bus_unregister_driver(&acpi_pcc_driver); +} + +module_init(acpi_pcc_init); +module_exit(acpi_pcc_exit); diff --git a/drivers/misc/phantom.c b/drivers/misc/phantom.c new file mode 100644 index 0000000..abdebe3 --- /dev/null +++ b/drivers/misc/phantom.c @@ -0,0 +1,568 @@ +/* + * Copyright (C) 2005-2007 Jiri Slaby <jirislaby@gmail.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * You need an userspace library to cooperate with this driver. It (and other + * info) may be obtained here: + * http://www.fi.muni.cz/~xslaby/phantom.html + * or alternatively, you might use OpenHaptics provided by Sensable. + */ + +#include <linux/compat.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/device.h> +#include <linux/pci.h> +#include <linux/fs.h> +#include <linux/poll.h> +#include <linux/interrupt.h> +#include <linux/cdev.h> +#include <linux/phantom.h> +#include <linux/smp_lock.h> + +#include <asm/atomic.h> +#include <asm/io.h> + +#define PHANTOM_VERSION "n0.9.8" + +#define PHANTOM_MAX_MINORS 8 + +#define PHN_IRQCTL 0x4c /* irq control in caddr space */ + +#define PHB_RUNNING 1 +#define PHB_NOT_OH 2 + +static struct class *phantom_class; +static int phantom_major; + +struct phantom_device { + unsigned int opened; + void __iomem *caddr; + u32 __iomem *iaddr; + u32 __iomem *oaddr; + unsigned long status; + atomic_t counter; + + wait_queue_head_t wait; + struct cdev cdev; + + struct mutex open_lock; + spinlock_t regs_lock; + + /* used in NOT_OH mode */ + struct phm_regs oregs; + u32 ctl_reg; +}; + +static unsigned char phantom_devices[PHANTOM_MAX_MINORS]; + +static int phantom_status(struct phantom_device *dev, unsigned long newstat) +{ + pr_debug("phantom_status %lx %lx\n", dev->status, newstat); + + if (!(dev->status & PHB_RUNNING) && (newstat & PHB_RUNNING)) { + atomic_set(&dev->counter, 0); + iowrite32(PHN_CTL_IRQ, dev->iaddr + PHN_CONTROL); + iowrite32(0x43, dev->caddr + PHN_IRQCTL); + ioread32(dev->caddr + PHN_IRQCTL); /* PCI posting */ + } else if ((dev->status & PHB_RUNNING) && !(newstat & PHB_RUNNING)) { + iowrite32(0, dev->caddr + PHN_IRQCTL); + ioread32(dev->caddr + PHN_IRQCTL); /* PCI posting */ + } + + dev->status = newstat; + + return 0; +} + +/* + * File ops + */ + +static long phantom_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + struct phantom_device *dev = file->private_data; + struct phm_regs rs; + struct phm_reg r; + void __user *argp = (void __user *)arg; + unsigned long flags; + unsigned int i; + + switch (cmd) { + case PHN_SETREG: + case PHN_SET_REG: + if (copy_from_user(&r, argp, sizeof(r))) + return -EFAULT; + + if (r.reg > 7) + return -EINVAL; + + spin_lock_irqsave(&dev->regs_lock, flags); + if (r.reg == PHN_CONTROL && (r.value & PHN_CTL_IRQ) && + phantom_status(dev, dev->status | PHB_RUNNING)){ + spin_unlock_irqrestore(&dev->regs_lock, flags); + return -ENODEV; + } + + pr_debug("phantom: writing %x to %u\n", r.value, r.reg); + + /* preserve amp bit (don't allow to change it when in NOT_OH) */ + if (r.reg == PHN_CONTROL && (dev->status & PHB_NOT_OH)) { + r.value &= ~PHN_CTL_AMP; + r.value |= dev->ctl_reg & PHN_CTL_AMP; + dev->ctl_reg = r.value; + } + + iowrite32(r.value, dev->iaddr + r.reg); + ioread32(dev->iaddr); /* PCI posting */ + + if (r.reg == PHN_CONTROL && !(r.value & PHN_CTL_IRQ)) + phantom_status(dev, dev->status & ~PHB_RUNNING); + spin_unlock_irqrestore(&dev->regs_lock, flags); + break; + case PHN_SETREGS: + case PHN_SET_REGS: + if (copy_from_user(&rs, argp, sizeof(rs))) + return -EFAULT; + + pr_debug("phantom: SRS %u regs %x\n", rs.count, rs.mask); + spin_lock_irqsave(&dev->regs_lock, flags); + if (dev->status & PHB_NOT_OH) + memcpy(&dev->oregs, &rs, sizeof(rs)); + else { + u32 m = min(rs.count, 8U); + for (i = 0; i < m; i++) + if (rs.mask & BIT(i)) + iowrite32(rs.values[i], dev->oaddr + i); + ioread32(dev->iaddr); /* PCI posting */ + } + spin_unlock_irqrestore(&dev->regs_lock, flags); + break; + case PHN_GETREG: + case PHN_GET_REG: + if (copy_from_user(&r, argp, sizeof(r))) + return -EFAULT; + + if (r.reg > 7) + return -EINVAL; + + r.value = ioread32(dev->iaddr + r.reg); + + if (copy_to_user(argp, &r, sizeof(r))) + return -EFAULT; + break; + case PHN_GETREGS: + case PHN_GET_REGS: { + u32 m; + + if (copy_from_user(&rs, argp, sizeof(rs))) + return -EFAULT; + + m = min(rs.count, 8U); + + pr_debug("phantom: GRS %u regs %x\n", rs.count, rs.mask); + spin_lock_irqsave(&dev->regs_lock, flags); + for (i = 0; i < m; i++) + if (rs.mask & BIT(i)) + rs.values[i] = ioread32(dev->iaddr + i); + atomic_set(&dev->counter, 0); + spin_unlock_irqrestore(&dev->regs_lock, flags); + + if (copy_to_user(argp, &rs, sizeof(rs))) + return -EFAULT; + break; + } case PHN_NOT_OH: + spin_lock_irqsave(&dev->regs_lock, flags); + if (dev->status & PHB_RUNNING) { + printk(KERN_ERR "phantom: you need to set NOT_OH " + "before you start the device!\n"); + spin_unlock_irqrestore(&dev->regs_lock, flags); + return -EINVAL; + } + dev->status |= PHB_NOT_OH; + spin_unlock_irqrestore(&dev->regs_lock, flags); + break; + default: + return -ENOTTY; + } + + return 0; +} + +#ifdef CONFIG_COMPAT +static long phantom_compat_ioctl(struct file *filp, unsigned int cmd, + unsigned long arg) +{ + if (_IOC_NR(cmd) <= 3 && _IOC_SIZE(cmd) == sizeof(compat_uptr_t)) { + cmd &= ~(_IOC_SIZEMASK << _IOC_SIZESHIFT); + cmd |= sizeof(void *) << _IOC_SIZESHIFT; + } + return phantom_ioctl(filp, cmd, (unsigned long)compat_ptr(arg)); +} +#else +#define phantom_compat_ioctl NULL +#endif + +static int phantom_open(struct inode *inode, struct file *file) +{ + struct phantom_device *dev = container_of(inode->i_cdev, + struct phantom_device, cdev); + + lock_kernel(); + nonseekable_open(inode, file); + + if (mutex_lock_interruptible(&dev->open_lock)) { + unlock_kernel(); + return -ERESTARTSYS; + } + + if (dev->opened) { + mutex_unlock(&dev->open_lock); + unlock_kernel(); + return -EINVAL; + } + + WARN_ON(dev->status & PHB_NOT_OH); + + file->private_data = dev; + + atomic_set(&dev->counter, 0); + dev->opened++; + mutex_unlock(&dev->open_lock); + unlock_kernel(); + return 0; +} + +static int phantom_release(struct inode *inode, struct file *file) +{ + struct phantom_device *dev = file->private_data; + + mutex_lock(&dev->open_lock); + + dev->opened = 0; + phantom_status(dev, dev->status & ~PHB_RUNNING); + dev->status &= ~PHB_NOT_OH; + + mutex_unlock(&dev->open_lock); + + return 0; +} + +static unsigned int phantom_poll(struct file *file, poll_table *wait) +{ + struct phantom_device *dev = file->private_data; + unsigned int mask = 0; + + pr_debug("phantom_poll: %d\n", atomic_read(&dev->counter)); + poll_wait(file, &dev->wait, wait); + + if (!(dev->status & PHB_RUNNING)) + mask = POLLERR; + else if (atomic_read(&dev->counter)) + mask = POLLIN | POLLRDNORM; + + pr_debug("phantom_poll end: %x/%d\n", mask, atomic_read(&dev->counter)); + + return mask; +} + +static struct file_operations phantom_file_ops = { + .open = phantom_open, + .release = phantom_release, + .unlocked_ioctl = phantom_ioctl, + .compat_ioctl = phantom_compat_ioctl, + .poll = phantom_poll, +}; + +static irqreturn_t phantom_isr(int irq, void *data) +{ + struct phantom_device *dev = data; + unsigned int i; + u32 ctl; + + spin_lock(&dev->regs_lock); + ctl = ioread32(dev->iaddr + PHN_CONTROL); + if (!(ctl & PHN_CTL_IRQ)) { + spin_unlock(&dev->regs_lock); + return IRQ_NONE; + } + + iowrite32(0, dev->iaddr); + iowrite32(0xc0, dev->iaddr); + + if (dev->status & PHB_NOT_OH) { + struct phm_regs *r = &dev->oregs; + u32 m = min(r->count, 8U); + + for (i = 0; i < m; i++) + if (r->mask & BIT(i)) + iowrite32(r->values[i], dev->oaddr + i); + + dev->ctl_reg ^= PHN_CTL_AMP; + iowrite32(dev->ctl_reg, dev->iaddr + PHN_CONTROL); + } + spin_unlock(&dev->regs_lock); + + ioread32(dev->iaddr); /* PCI posting */ + + atomic_inc(&dev->counter); + wake_up_interruptible(&dev->wait); + + return IRQ_HANDLED; +} + +/* + * Init and deinit driver + */ + +static unsigned int __devinit phantom_get_free(void) +{ + unsigned int i; + + for (i = 0; i < PHANTOM_MAX_MINORS; i++) + if (phantom_devices[i] == 0) + break; + + return i; +} + +static int __devinit phantom_probe(struct pci_dev *pdev, + const struct pci_device_id *pci_id) +{ + struct phantom_device *pht; + unsigned int minor; + int retval; + + retval = pci_enable_device(pdev); + if (retval) + goto err; + + minor = phantom_get_free(); + if (minor == PHANTOM_MAX_MINORS) { + dev_err(&pdev->dev, "too many devices found!\n"); + retval = -EIO; + goto err_dis; + } + + phantom_devices[minor] = 1; + + retval = pci_request_regions(pdev, "phantom"); + if (retval) + goto err_null; + + retval = -ENOMEM; + pht = kzalloc(sizeof(*pht), GFP_KERNEL); + if (pht == NULL) { + dev_err(&pdev->dev, "unable to allocate device\n"); + goto err_reg; + } + + pht->caddr = pci_iomap(pdev, 0, 0); + if (pht->caddr == NULL) { + dev_err(&pdev->dev, "can't remap conf space\n"); + goto err_fr; + } + pht->iaddr = pci_iomap(pdev, 2, 0); + if (pht->iaddr == NULL) { + dev_err(&pdev->dev, "can't remap input space\n"); + goto err_unmc; + } + pht->oaddr = pci_iomap(pdev, 3, 0); + if (pht->oaddr == NULL) { + dev_err(&pdev->dev, "can't remap output space\n"); + goto err_unmi; + } + + mutex_init(&pht->open_lock); + spin_lock_init(&pht->regs_lock); + init_waitqueue_head(&pht->wait); + cdev_init(&pht->cdev, &phantom_file_ops); + pht->cdev.owner = THIS_MODULE; + + iowrite32(0, pht->caddr + PHN_IRQCTL); + ioread32(pht->caddr + PHN_IRQCTL); /* PCI posting */ + retval = request_irq(pdev->irq, phantom_isr, + IRQF_SHARED | IRQF_DISABLED, "phantom", pht); + if (retval) { + dev_err(&pdev->dev, "can't establish ISR\n"); + goto err_unmo; + } + + retval = cdev_add(&pht->cdev, MKDEV(phantom_major, minor), 1); + if (retval) { + dev_err(&pdev->dev, "chardev registration failed\n"); + goto err_irq; + } + + if (IS_ERR(device_create(phantom_class, &pdev->dev, + MKDEV(phantom_major, minor), NULL, + "phantom%u", minor))) + dev_err(&pdev->dev, "can't create device\n"); + + pci_set_drvdata(pdev, pht); + + return 0; +err_irq: + free_irq(pdev->irq, pht); +err_unmo: + pci_iounmap(pdev, pht->oaddr); +err_unmi: + pci_iounmap(pdev, pht->iaddr); +err_unmc: + pci_iounmap(pdev, pht->caddr); +err_fr: + kfree(pht); +err_reg: + pci_release_regions(pdev); +err_null: + phantom_devices[minor] = 0; +err_dis: + pci_disable_device(pdev); +err: + return retval; +} + +static void __devexit phantom_remove(struct pci_dev *pdev) +{ + struct phantom_device *pht = pci_get_drvdata(pdev); + unsigned int minor = MINOR(pht->cdev.dev); + + device_destroy(phantom_class, MKDEV(phantom_major, minor)); + + cdev_del(&pht->cdev); + + iowrite32(0, pht->caddr + PHN_IRQCTL); + ioread32(pht->caddr + PHN_IRQCTL); /* PCI posting */ + free_irq(pdev->irq, pht); + + pci_iounmap(pdev, pht->oaddr); + pci_iounmap(pdev, pht->iaddr); + pci_iounmap(pdev, pht->caddr); + + kfree(pht); + + pci_release_regions(pdev); + + phantom_devices[minor] = 0; + + pci_disable_device(pdev); +} + +#ifdef CONFIG_PM +static int phantom_suspend(struct pci_dev *pdev, pm_message_t state) +{ + struct phantom_device *dev = pci_get_drvdata(pdev); + + iowrite32(0, dev->caddr + PHN_IRQCTL); + ioread32(dev->caddr + PHN_IRQCTL); /* PCI posting */ + + synchronize_irq(pdev->irq); + + return 0; +} + +static int phantom_resume(struct pci_dev *pdev) +{ + struct phantom_device *dev = pci_get_drvdata(pdev); + + iowrite32(0, dev->caddr + PHN_IRQCTL); + + return 0; +} +#else +#define phantom_suspend NULL +#define phantom_resume NULL +#endif + +static struct pci_device_id phantom_pci_tbl[] __devinitdata = { + { .vendor = PCI_VENDOR_ID_PLX, .device = PCI_DEVICE_ID_PLX_9050, + .subvendor = PCI_VENDOR_ID_PLX, .subdevice = PCI_DEVICE_ID_PLX_9050, + .class = PCI_CLASS_BRIDGE_OTHER << 8, .class_mask = 0xffff00 }, + { 0, } +}; +MODULE_DEVICE_TABLE(pci, phantom_pci_tbl); + +static struct pci_driver phantom_pci_driver = { + .name = "phantom", + .id_table = phantom_pci_tbl, + .probe = phantom_probe, + .remove = __devexit_p(phantom_remove), + .suspend = phantom_suspend, + .resume = phantom_resume +}; + +static ssize_t phantom_show_version(struct class *cls, char *buf) +{ + return sprintf(buf, PHANTOM_VERSION "\n"); +} + +static CLASS_ATTR(version, 0444, phantom_show_version, NULL); + +static int __init phantom_init(void) +{ + int retval; + dev_t dev; + + phantom_class = class_create(THIS_MODULE, "phantom"); + if (IS_ERR(phantom_class)) { + retval = PTR_ERR(phantom_class); + printk(KERN_ERR "phantom: can't register phantom class\n"); + goto err; + } + retval = class_create_file(phantom_class, &class_attr_version); + if (retval) { + printk(KERN_ERR "phantom: can't create sysfs version file\n"); + goto err_class; + } + + retval = alloc_chrdev_region(&dev, 0, PHANTOM_MAX_MINORS, "phantom"); + if (retval) { + printk(KERN_ERR "phantom: can't register character device\n"); + goto err_attr; + } + phantom_major = MAJOR(dev); + + retval = pci_register_driver(&phantom_pci_driver); + if (retval) { + printk(KERN_ERR "phantom: can't register pci driver\n"); + goto err_unchr; + } + + printk(KERN_INFO "Phantom Linux Driver, version " PHANTOM_VERSION ", " + "init OK\n"); + + return 0; +err_unchr: + unregister_chrdev_region(dev, PHANTOM_MAX_MINORS); +err_attr: + class_remove_file(phantom_class, &class_attr_version); +err_class: + class_destroy(phantom_class); +err: + return retval; +} + +static void __exit phantom_exit(void) +{ + pci_unregister_driver(&phantom_pci_driver); + + unregister_chrdev_region(MKDEV(phantom_major, 0), PHANTOM_MAX_MINORS); + + class_remove_file(phantom_class, &class_attr_version); + class_destroy(phantom_class); + + pr_debug("phantom: module successfully removed\n"); +} + +module_init(phantom_init); +module_exit(phantom_exit); + +MODULE_AUTHOR("Jiri Slaby <jirislaby@gmail.com>"); +MODULE_DESCRIPTION("Sensable Phantom driver (PCI devices)"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(PHANTOM_VERSION); diff --git a/drivers/misc/sgi-gru/Makefile b/drivers/misc/sgi-gru/Makefile new file mode 100644 index 0000000..9e9170b --- /dev/null +++ b/drivers/misc/sgi-gru/Makefile @@ -0,0 +1,7 @@ +ifdef CONFIG_SGI_GRU_DEBUG + EXTRA_CFLAGS += -DDEBUG +endif + +obj-$(CONFIG_SGI_GRU) := gru.o +gru-y := grufile.o grumain.o grufault.o grutlbpurge.o gruprocfs.o grukservices.o + diff --git a/drivers/misc/sgi-gru/gru.h b/drivers/misc/sgi-gru/gru.h new file mode 100644 index 0000000..f93f03a --- /dev/null +++ b/drivers/misc/sgi-gru/gru.h @@ -0,0 +1,67 @@ +/* + * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as published by + * the Free Software Foundation; either version 2.1 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef __GRU_H__ +#define __GRU_H__ + +/* + * GRU architectural definitions + */ +#define GRU_CACHE_LINE_BYTES 64 +#define GRU_HANDLE_STRIDE 256 +#define GRU_CB_BASE 0 +#define GRU_DS_BASE 0x20000 + +/* + * Size used to map GRU GSeg + */ +#if defined(CONFIG_IA64) +#define GRU_GSEG_PAGESIZE (256 * 1024UL) +#elif defined(CONFIG_X86_64) +#define GRU_GSEG_PAGESIZE (256 * 1024UL) /* ZZZ 2MB ??? */ +#else +#error "Unsupported architecture" +#endif + +/* + * Structure for obtaining GRU resource information + */ +struct gru_chiplet_info { + int node; + int chiplet; + int blade; + int total_dsr_bytes; + int total_cbr; + int total_user_dsr_bytes; + int total_user_cbr; + int free_user_dsr_bytes; + int free_user_cbr; +}; + +/* Flags for GRU options on the gru_create_context() call */ +/* Select one of the follow 4 options to specify how TLB misses are handled */ +#define GRU_OPT_MISS_DEFAULT 0x0000 /* Use default mode */ +#define GRU_OPT_MISS_USER_POLL 0x0001 /* User will poll CB for faults */ +#define GRU_OPT_MISS_FMM_INTR 0x0002 /* Send interrupt to cpu to + handle fault */ +#define GRU_OPT_MISS_FMM_POLL 0x0003 /* Use system polling thread */ +#define GRU_OPT_MISS_MASK 0x0003 /* Mask for TLB MISS option */ + + + +#endif /* __GRU_H__ */ diff --git a/drivers/misc/sgi-gru/gru_instructions.h b/drivers/misc/sgi-gru/gru_instructions.h new file mode 100644 index 0000000..48762e7 --- /dev/null +++ b/drivers/misc/sgi-gru/gru_instructions.h @@ -0,0 +1,671 @@ +/* + * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as published by + * the Free Software Foundation; either version 2.1 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef __GRU_INSTRUCTIONS_H__ +#define __GRU_INSTRUCTIONS_H__ + +#define gru_flush_cache_hook(p) +#define gru_emulator_wait_hook(p, w) + +/* + * Architecture dependent functions + */ + +#if defined(CONFIG_IA64) +#include <linux/compiler.h> +#include <asm/intrinsics.h> +#define __flush_cache(p) ia64_fc(p) +/* Use volatile on IA64 to ensure ordering via st4.rel */ +#define gru_ordered_store_int(p,v) \ + do { \ + barrier(); \ + *((volatile int *)(p)) = v; /* force st.rel */ \ + } while (0) +#elif defined(CONFIG_X86_64) +#define __flush_cache(p) clflush(p) +#define gru_ordered_store_int(p,v) \ + do { \ + barrier(); \ + *(int *)p = v; \ + } while (0) +#else +#error "Unsupported architecture" +#endif + +/* + * Control block status and exception codes + */ +#define CBS_IDLE 0 +#define CBS_EXCEPTION 1 +#define CBS_ACTIVE 2 +#define CBS_CALL_OS 3 + +/* CB substatus bitmasks */ +#define CBSS_MSG_QUEUE_MASK 7 +#define CBSS_IMPLICIT_ABORT_ACTIVE_MASK 8 + +/* CB substatus message queue values (low 3 bits of substatus) */ +#define CBSS_NO_ERROR 0 +#define CBSS_LB_OVERFLOWED 1 +#define CBSS_QLIMIT_REACHED 2 +#define CBSS_PAGE_OVERFLOW 3 +#define CBSS_AMO_NACKED 4 +#define CBSS_PUT_NACKED 5 + +/* + * Structure used to fetch exception detail for CBs that terminate with + * CBS_EXCEPTION + */ +struct control_block_extended_exc_detail { + unsigned long cb; + int opc; + int ecause; + int exopc; + long exceptdet0; + int exceptdet1; +}; + +/* + * Instruction formats + */ + +/* + * Generic instruction format. + * This definition has precise bit field definitions. + */ +struct gru_instruction_bits { + /* DW 0 - low */ + unsigned int icmd: 1; + unsigned char ima: 3; /* CB_DelRep, unmapped mode */ + unsigned char reserved0: 4; + unsigned int xtype: 3; + unsigned int iaa0: 2; + unsigned int iaa1: 2; + unsigned char reserved1: 1; + unsigned char opc: 8; /* opcode */ + unsigned char exopc: 8; /* extended opcode */ + /* DW 0 - high */ + unsigned int idef2: 22; /* TRi0 */ + unsigned char reserved2: 2; + unsigned char istatus: 2; + unsigned char isubstatus:4; + unsigned char reserved3: 2; + /* DW 1 */ + unsigned long idef4; /* 42 bits: TRi1, BufSize */ + /* DW 2-6 */ + unsigned long idef1; /* BAddr0 */ + unsigned long idef5; /* Nelem */ + unsigned long idef6; /* Stride, Operand1 */ + unsigned long idef3; /* BAddr1, Value, Operand2 */ + unsigned long reserved4; + /* DW 7 */ + unsigned long avalue; /* AValue */ +}; + +/* + * Generic instruction with friendlier names. This format is used + * for inline instructions. + */ +struct gru_instruction { + /* DW 0 */ + unsigned int op32; /* icmd,xtype,iaa0,ima,opc */ + unsigned int tri0; + unsigned long tri1_bufsize; /* DW 1 */ + unsigned long baddr0; /* DW 2 */ + unsigned long nelem; /* DW 3 */ + unsigned long op1_stride; /* DW 4 */ + unsigned long op2_value_baddr1; /* DW 5 */ + unsigned long reserved0; /* DW 6 */ + unsigned long avalue; /* DW 7 */ +}; + +/* Some shifts and masks for the low 32 bits of a GRU command */ +#define GRU_CB_ICMD_SHFT 0 +#define GRU_CB_ICMD_MASK 0x1 +#define GRU_CB_XTYPE_SHFT 8 +#define GRU_CB_XTYPE_MASK 0x7 +#define GRU_CB_IAA0_SHFT 11 +#define GRU_CB_IAA0_MASK 0x3 +#define GRU_CB_IAA1_SHFT 13 +#define GRU_CB_IAA1_MASK 0x3 +#define GRU_CB_IMA_SHFT 1 +#define GRU_CB_IMA_MASK 0x3 +#define GRU_CB_OPC_SHFT 16 +#define GRU_CB_OPC_MASK 0xff +#define GRU_CB_EXOPC_SHFT 24 +#define GRU_CB_EXOPC_MASK 0xff + +/* GRU instruction opcodes (opc field) */ +#define OP_NOP 0x00 +#define OP_BCOPY 0x01 +#define OP_VLOAD 0x02 +#define OP_IVLOAD 0x03 +#define OP_VSTORE 0x04 +#define OP_IVSTORE 0x05 +#define OP_VSET 0x06 +#define OP_IVSET 0x07 +#define OP_MESQ 0x08 +#define OP_GAMXR 0x09 +#define OP_GAMIR 0x0a +#define OP_GAMIRR 0x0b +#define OP_GAMER 0x0c +#define OP_GAMERR 0x0d +#define OP_BSTORE 0x0e +#define OP_VFLUSH 0x0f + + +/* Extended opcodes values (exopc field) */ + +/* GAMIR - AMOs with implicit operands */ +#define EOP_IR_FETCH 0x01 /* Plain fetch of memory */ +#define EOP_IR_CLR 0x02 /* Fetch and clear */ +#define EOP_IR_INC 0x05 /* Fetch and increment */ +#define EOP_IR_DEC 0x07 /* Fetch and decrement */ +#define EOP_IR_QCHK1 0x0d /* Queue check, 64 byte msg */ +#define EOP_IR_QCHK2 0x0e /* Queue check, 128 byte msg */ + +/* GAMIRR - Registered AMOs with implicit operands */ +#define EOP_IRR_FETCH 0x01 /* Registered fetch of memory */ +#define EOP_IRR_CLR 0x02 /* Registered fetch and clear */ +#define EOP_IRR_INC 0x05 /* Registered fetch and increment */ +#define EOP_IRR_DEC 0x07 /* Registered fetch and decrement */ +#define EOP_IRR_DECZ 0x0f /* Registered fetch and decrement, update on zero*/ + +/* GAMER - AMOs with explicit operands */ +#define EOP_ER_SWAP 0x00 /* Exchange argument and memory */ +#define EOP_ER_OR 0x01 /* Logical OR with memory */ +#define EOP_ER_AND 0x02 /* Logical AND with memory */ +#define EOP_ER_XOR 0x03 /* Logical XOR with memory */ +#define EOP_ER_ADD 0x04 /* Add value to memory */ +#define EOP_ER_CSWAP 0x08 /* Compare with operand2, write operand1 if match*/ +#define EOP_ER_CADD 0x0c /* Queue check, operand1*64 byte msg */ + +/* GAMERR - Registered AMOs with explicit operands */ +#define EOP_ERR_SWAP 0x00 /* Exchange argument and memory */ +#define EOP_ERR_OR 0x01 /* Logical OR with memory */ +#define EOP_ERR_AND 0x02 /* Logical AND with memory */ +#define EOP_ERR_XOR 0x03 /* Logical XOR with memory */ +#define EOP_ERR_ADD 0x04 /* Add value to memory */ +#define EOP_ERR_CSWAP 0x08 /* Compare with operand2, write operand1 if match*/ +#define EOP_ERR_EPOLL 0x09 /* Poll for equality */ +#define EOP_ERR_NPOLL 0x0a /* Poll for inequality */ + +/* GAMXR - SGI Arithmetic unit */ +#define EOP_XR_CSWAP 0x0b /* Masked compare exchange */ + + +/* Transfer types (xtype field) */ +#define XTYPE_B 0x0 /* byte */ +#define XTYPE_S 0x1 /* short (2-byte) */ +#define XTYPE_W 0x2 /* word (4-byte) */ +#define XTYPE_DW 0x3 /* doubleword (8-byte) */ +#define XTYPE_CL 0x6 /* cacheline (64-byte) */ + + +/* Instruction access attributes (iaa0, iaa1 fields) */ +#define IAA_RAM 0x0 /* normal cached RAM access */ +#define IAA_NCRAM 0x2 /* noncoherent RAM access */ +#define IAA_MMIO 0x1 /* noncoherent memory-mapped I/O space */ +#define IAA_REGISTER 0x3 /* memory-mapped registers, etc. */ + + +/* Instruction mode attributes (ima field) */ +#define IMA_MAPPED 0x0 /* Virtual mode */ +#define IMA_CB_DELAY 0x1 /* hold read responses until status changes */ +#define IMA_UNMAPPED 0x2 /* bypass the TLBs (OS only) */ +#define IMA_INTERRUPT 0x4 /* Interrupt when instruction completes */ + +/* CBE ecause bits */ +#define CBE_CAUSE_RI (1 << 0) +#define CBE_CAUSE_INVALID_INSTRUCTION (1 << 1) +#define CBE_CAUSE_UNMAPPED_MODE_FORBIDDEN (1 << 2) +#define CBE_CAUSE_PE_CHECK_DATA_ERROR (1 << 3) +#define CBE_CAUSE_IAA_GAA_MISMATCH (1 << 4) +#define CBE_CAUSE_DATA_SEGMENT_LIMIT_EXCEPTION (1 << 5) +#define CBE_CAUSE_OS_FATAL_TLB_FAULT (1 << 6) +#define CBE_CAUSE_EXECUTION_HW_ERROR (1 << 7) +#define CBE_CAUSE_TLBHW_ERROR (1 << 8) +#define CBE_CAUSE_RA_REQUEST_TIMEOUT (1 << 9) +#define CBE_CAUSE_HA_REQUEST_TIMEOUT (1 << 10) +#define CBE_CAUSE_RA_RESPONSE_FATAL (1 << 11) +#define CBE_CAUSE_RA_RESPONSE_NON_FATAL (1 << 12) +#define CBE_CAUSE_HA_RESPONSE_FATAL (1 << 13) +#define CBE_CAUSE_HA_RESPONSE_NON_FATAL (1 << 14) +#define CBE_CAUSE_ADDRESS_SPACE_DECODE_ERROR (1 << 15) +#define CBE_CAUSE_RESPONSE_DATA_ERROR (1 << 16) +#define CBE_CAUSE_PROTOCOL_STATE_DATA_ERROR (1 << 17) + +/* + * Exceptions are retried for the following cases. If any OTHER bits are set + * in ecause, the exception is not retryable. + */ +#define EXCEPTION_RETRY_BITS (CBE_CAUSE_RESPONSE_DATA_ERROR | \ + CBE_CAUSE_RA_REQUEST_TIMEOUT | \ + CBE_CAUSE_TLBHW_ERROR | \ + CBE_CAUSE_HA_REQUEST_TIMEOUT) + +/* Message queue head structure */ +union gru_mesqhead { + unsigned long val; + struct { + unsigned int head; + unsigned int limit; + }; +}; + + +/* Generate the low word of a GRU instruction */ +static inline unsigned int +__opword(unsigned char opcode, unsigned char exopc, unsigned char xtype, + unsigned char iaa0, unsigned char iaa1, + unsigned char ima) +{ + return (1 << GRU_CB_ICMD_SHFT) | + (iaa0 << GRU_CB_IAA0_SHFT) | + (iaa1 << GRU_CB_IAA1_SHFT) | + (ima << GRU_CB_IMA_SHFT) | + (xtype << GRU_CB_XTYPE_SHFT) | + (opcode << GRU_CB_OPC_SHFT) | + (exopc << GRU_CB_EXOPC_SHFT); +} + +/* + * Architecture specific intrinsics + */ +static inline void gru_flush_cache(void *p) +{ + __flush_cache(p); +} + +/* + * Store the lower 32 bits of the command including the "start" bit. Then + * start the instruction executing. + */ +static inline void gru_start_instruction(struct gru_instruction *ins, int op32) +{ + gru_ordered_store_int(ins, op32); + gru_flush_cache(ins); +} + + +/* Convert "hints" to IMA */ +#define CB_IMA(h) ((h) | IMA_UNMAPPED) + +/* Convert data segment cache line index into TRI0 / TRI1 value */ +#define GRU_DINDEX(i) ((i) * GRU_CACHE_LINE_BYTES) + +/* Inline functions for GRU instructions. + * Note: + * - nelem and stride are in elements + * - tri0/tri1 is in bytes for the beginning of the data segment. + */ +static inline void gru_vload(void *cb, unsigned long mem_addr, + unsigned int tri0, unsigned char xtype, unsigned long nelem, + unsigned long stride, unsigned long hints) +{ + struct gru_instruction *ins = (struct gru_instruction *)cb; + + ins->baddr0 = (long)mem_addr; + ins->nelem = nelem; + ins->tri0 = tri0; + ins->op1_stride = stride; + gru_start_instruction(ins, __opword(OP_VLOAD, 0, xtype, IAA_RAM, 0, + CB_IMA(hints))); +} + +static inline void gru_vstore(void *cb, unsigned long mem_addr, + unsigned int tri0, unsigned char xtype, unsigned long nelem, + unsigned long stride, unsigned long hints) +{ + struct gru_instruction *ins = (void *)cb; + + ins->baddr0 = (long)mem_addr; + ins->nelem = nelem; + ins->tri0 = tri0; + ins->op1_stride = stride; + gru_start_instruction(ins, __opword(OP_VSTORE, 0, xtype, IAA_RAM, 0, + CB_IMA(hints))); +} + +static inline void gru_ivload(void *cb, unsigned long mem_addr, + unsigned int tri0, unsigned int tri1, unsigned char xtype, + unsigned long nelem, unsigned long hints) +{ + struct gru_instruction *ins = (void *)cb; + + ins->baddr0 = (long)mem_addr; + ins->nelem = nelem; + ins->tri0 = tri0; + ins->tri1_bufsize = tri1; + gru_start_instruction(ins, __opword(OP_IVLOAD, 0, xtype, IAA_RAM, 0, + CB_IMA(hints))); +} + +static inline void gru_ivstore(void *cb, unsigned long mem_addr, + unsigned int tri0, unsigned int tri1, + unsigned char xtype, unsigned long nelem, unsigned long hints) +{ + struct gru_instruction *ins = (void *)cb; + + ins->baddr0 = (long)mem_addr; + ins->nelem = nelem; + ins->tri0 = tri0; + ins->tri1_bufsize = tri1; + gru_start_instruction(ins, __opword(OP_IVSTORE, 0, xtype, IAA_RAM, 0, + CB_IMA(hints))); +} + +static inline void gru_vset(void *cb, unsigned long mem_addr, + unsigned long value, unsigned char xtype, unsigned long nelem, + unsigned long stride, unsigned long hints) +{ + struct gru_instruction *ins = (void *)cb; + + ins->baddr0 = (long)mem_addr; + ins->op2_value_baddr1 = value; + ins->nelem = nelem; + ins->op1_stride = stride; + gru_start_instruction(ins, __opword(OP_VSET, 0, xtype, IAA_RAM, 0, + CB_IMA(hints))); +} + +static inline void gru_ivset(void *cb, unsigned long mem_addr, + unsigned int tri1, unsigned long value, unsigned char xtype, + unsigned long nelem, unsigned long hints) +{ + struct gru_instruction *ins = (void *)cb; + + ins->baddr0 = (long)mem_addr; + ins->op2_value_baddr1 = value; + ins->nelem = nelem; + ins->tri1_bufsize = tri1; + gru_start_instruction(ins, __opword(OP_IVSET, 0, xtype, IAA_RAM, 0, + CB_IMA(hints))); +} + +static inline void gru_vflush(void *cb, unsigned long mem_addr, + unsigned long nelem, unsigned char xtype, unsigned long stride, + unsigned long hints) +{ + struct gru_instruction *ins = (void *)cb; + + ins->baddr0 = (long)mem_addr; + ins->op1_stride = stride; + ins->nelem = nelem; + gru_start_instruction(ins, __opword(OP_VFLUSH, 0, xtype, IAA_RAM, 0, + CB_IMA(hints))); +} + +static inline void gru_nop(void *cb, int hints) +{ + struct gru_instruction *ins = (void *)cb; + + gru_start_instruction(ins, __opword(OP_NOP, 0, 0, 0, 0, CB_IMA(hints))); +} + + +static inline void gru_bcopy(void *cb, const unsigned long src, + unsigned long dest, + unsigned int tri0, unsigned int xtype, unsigned long nelem, + unsigned int bufsize, unsigned long hints) +{ + struct gru_instruction *ins = (void *)cb; + + ins->baddr0 = (long)src; + ins->op2_value_baddr1 = (long)dest; + ins->nelem = nelem; + ins->tri0 = tri0; + ins->tri1_bufsize = bufsize; + gru_start_instruction(ins, __opword(OP_BCOPY, 0, xtype, IAA_RAM, + IAA_RAM, CB_IMA(hints))); +} + +static inline void gru_bstore(void *cb, const unsigned long src, + unsigned long dest, unsigned int tri0, unsigned int xtype, + unsigned long nelem, unsigned long hints) +{ + struct gru_instruction *ins = (void *)cb; + + ins->baddr0 = (long)src; + ins->op2_value_baddr1 = (long)dest; + ins->nelem = nelem; + ins->tri0 = tri0; + gru_start_instruction(ins, __opword(OP_BSTORE, 0, xtype, 0, IAA_RAM, + CB_IMA(hints))); +} + +static inline void gru_gamir(void *cb, int exopc, unsigned long src, + unsigned int xtype, unsigned long hints) +{ + struct gru_instruction *ins = (void *)cb; + + ins->baddr0 = (long)src; + gru_start_instruction(ins, __opword(OP_GAMIR, exopc, xtype, IAA_RAM, 0, + CB_IMA(hints))); +} + +static inline void gru_gamirr(void *cb, int exopc, unsigned long src, + unsigned int xtype, unsigned long hints) +{ + struct gru_instruction *ins = (void *)cb; + + ins->baddr0 = (long)src; + gru_start_instruction(ins, __opword(OP_GAMIRR, exopc, xtype, IAA_RAM, 0, + CB_IMA(hints))); +} + +static inline void gru_gamer(void *cb, int exopc, unsigned long src, + unsigned int xtype, + unsigned long operand1, unsigned long operand2, + unsigned long hints) +{ + struct gru_instruction *ins = (void *)cb; + + ins->baddr0 = (long)src; + ins->op1_stride = operand1; + ins->op2_value_baddr1 = operand2; + gru_start_instruction(ins, __opword(OP_GAMER, exopc, xtype, IAA_RAM, 0, + CB_IMA(hints))); +} + +static inline void gru_gamerr(void *cb, int exopc, unsigned long src, + unsigned int xtype, unsigned long operand1, + unsigned long operand2, unsigned long hints) +{ + struct gru_instruction *ins = (void *)cb; + + ins->baddr0 = (long)src; + ins->op1_stride = operand1; + ins->op2_value_baddr1 = operand2; + gru_start_instruction(ins, __opword(OP_GAMERR, exopc, xtype, IAA_RAM, 0, + CB_IMA(hints))); +} + +static inline void gru_gamxr(void *cb, unsigned long src, + unsigned int tri0, unsigned long hints) +{ + struct gru_instruction *ins = (void *)cb; + + ins->baddr0 = (long)src; + ins->nelem = 4; + gru_start_instruction(ins, __opword(OP_GAMXR, EOP_XR_CSWAP, XTYPE_DW, + IAA_RAM, 0, CB_IMA(hints))); +} + +static inline void gru_mesq(void *cb, unsigned long queue, + unsigned long tri0, unsigned long nelem, + unsigned long hints) +{ + struct gru_instruction *ins = (void *)cb; + + ins->baddr0 = (long)queue; + ins->nelem = nelem; + ins->tri0 = tri0; + gru_start_instruction(ins, __opword(OP_MESQ, 0, XTYPE_CL, IAA_RAM, 0, + CB_IMA(hints))); +} + +static inline unsigned long gru_get_amo_value(void *cb) +{ + struct gru_instruction *ins = (void *)cb; + + return ins->avalue; +} + +static inline int gru_get_amo_value_head(void *cb) +{ + struct gru_instruction *ins = (void *)cb; + + return ins->avalue & 0xffffffff; +} + +static inline int gru_get_amo_value_limit(void *cb) +{ + struct gru_instruction *ins = (void *)cb; + + return ins->avalue >> 32; +} + +static inline union gru_mesqhead gru_mesq_head(int head, int limit) +{ + union gru_mesqhead mqh; + + mqh.head = head; + mqh.limit = limit; + return mqh; +} + +/* + * Get struct control_block_extended_exc_detail for CB. + */ +extern int gru_get_cb_exception_detail(void *cb, + struct control_block_extended_exc_detail *excdet); + +#define GRU_EXC_STR_SIZE 256 + +extern int gru_check_status_proc(void *cb); +extern int gru_wait_proc(void *cb); +extern void gru_wait_abort_proc(void *cb); + +/* + * Control block definition for checking status + */ +struct gru_control_block_status { + unsigned int icmd :1; + unsigned int unused1 :31; + unsigned int unused2 :24; + unsigned int istatus :2; + unsigned int isubstatus :4; + unsigned int inused3 :2; +}; + +/* Get CB status */ +static inline int gru_get_cb_status(void *cb) +{ + struct gru_control_block_status *cbs = (void *)cb; + + return cbs->istatus; +} + +/* Get CB message queue substatus */ +static inline int gru_get_cb_message_queue_substatus(void *cb) +{ + struct gru_control_block_status *cbs = (void *)cb; + + return cbs->isubstatus & CBSS_MSG_QUEUE_MASK; +} + +/* Get CB substatus */ +static inline int gru_get_cb_substatus(void *cb) +{ + struct gru_control_block_status *cbs = (void *)cb; + + return cbs->isubstatus; +} + +/* Check the status of a CB. If the CB is in UPM mode, call the + * OS to handle the UPM status. + * Returns the CB status field value (0 for normal completion) + */ +static inline int gru_check_status(void *cb) +{ + struct gru_control_block_status *cbs = (void *)cb; + int ret; + + ret = cbs->istatus; + if (ret == CBS_CALL_OS) + ret = gru_check_status_proc(cb); + return ret; +} + +/* Wait for CB to complete. + * Returns the CB status field value (0 for normal completion) + */ +static inline int gru_wait(void *cb) +{ + struct gru_control_block_status *cbs = (void *)cb; + int ret = cbs->istatus; + + if (ret != CBS_IDLE) + ret = gru_wait_proc(cb); + return ret; +} + +/* Wait for CB to complete. Aborts program if error. (Note: error does NOT + * mean TLB mis - only fatal errors such as memory parity error or user + * bugs will cause termination. + */ +static inline void gru_wait_abort(void *cb) +{ + struct gru_control_block_status *cbs = (void *)cb; + + if (cbs->istatus != CBS_IDLE) + gru_wait_abort_proc(cb); +} + + +/* + * Get a pointer to a control block + * gseg - GSeg address returned from gru_get_thread_gru_segment() + * index - index of desired CB + */ +static inline void *gru_get_cb_pointer(void *gseg, + int index) +{ + return gseg + GRU_CB_BASE + index * GRU_HANDLE_STRIDE; +} + +/* + * Get a pointer to a cacheline in the data segment portion of a GSeg + * gseg - GSeg address returned from gru_get_thread_gru_segment() + * index - index of desired cache line + */ +static inline void *gru_get_data_pointer(void *gseg, int index) +{ + return gseg + GRU_DS_BASE + index * GRU_CACHE_LINE_BYTES; +} + +/* + * Convert a vaddr into the tri index within the GSEG + * vaddr - virtual address of within gseg + */ +static inline int gru_get_tri(void *vaddr) +{ + return ((unsigned long)vaddr & (GRU_GSEG_PAGESIZE - 1)) - GRU_DS_BASE; +} +#endif /* __GRU_INSTRUCTIONS_H__ */ diff --git a/drivers/misc/sgi-gru/grufault.c b/drivers/misc/sgi-gru/grufault.c new file mode 100644 index 0000000..3ee698a --- /dev/null +++ b/drivers/misc/sgi-gru/grufault.c @@ -0,0 +1,636 @@ +/* + * SN Platform GRU Driver + * + * FAULT HANDLER FOR GRU DETECTED TLB MISSES + * + * This file contains code that handles TLB misses within the GRU. + * These misses are reported either via interrupts or user polling of + * the user CB. + * + * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include <linux/kernel.h> +#include <linux/errno.h> +#include <linux/spinlock.h> +#include <linux/mm.h> +#include <linux/hugetlb.h> +#include <linux/device.h> +#include <linux/io.h> +#include <linux/uaccess.h> +#include <asm/pgtable.h> +#include "gru.h" +#include "grutables.h" +#include "grulib.h" +#include "gru_instructions.h" +#include <asm/uv/uv_hub.h> + +/* + * Test if a physical address is a valid GRU GSEG address + */ +static inline int is_gru_paddr(unsigned long paddr) +{ + return paddr >= gru_start_paddr && paddr < gru_end_paddr; +} + +/* + * Find the vma of a GRU segment. Caller must hold mmap_sem. + */ +struct vm_area_struct *gru_find_vma(unsigned long vaddr) +{ + struct vm_area_struct *vma; + + vma = find_vma(current->mm, vaddr); + if (vma && vma->vm_start <= vaddr && vma->vm_ops == &gru_vm_ops) + return vma; + return NULL; +} + +/* + * Find and lock the gts that contains the specified user vaddr. + * + * Returns: + * - *gts with the mmap_sem locked for read and the GTS locked. + * - NULL if vaddr invalid OR is not a valid GSEG vaddr. + */ + +static struct gru_thread_state *gru_find_lock_gts(unsigned long vaddr) +{ + struct mm_struct *mm = current->mm; + struct vm_area_struct *vma; + struct gru_thread_state *gts = NULL; + + down_read(&mm->mmap_sem); + vma = gru_find_vma(vaddr); + if (vma) + gts = gru_find_thread_state(vma, TSID(vaddr, vma)); + if (gts) + mutex_lock(>s->ts_ctxlock); + else + up_read(&mm->mmap_sem); + return gts; +} + +static struct gru_thread_state *gru_alloc_locked_gts(unsigned long vaddr) +{ + struct mm_struct *mm = current->mm; + struct vm_area_struct *vma; + struct gru_thread_state *gts = NULL; + + down_write(&mm->mmap_sem); + vma = gru_find_vma(vaddr); + if (vma) + gts = gru_alloc_thread_state(vma, TSID(vaddr, vma)); + if (gts) { + mutex_lock(>s->ts_ctxlock); + downgrade_write(&mm->mmap_sem); + } else { + up_write(&mm->mmap_sem); + } + + return gts; +} + +/* + * Unlock a GTS that was previously locked with gru_find_lock_gts(). + */ +static void gru_unlock_gts(struct gru_thread_state *gts) +{ + mutex_unlock(>s->ts_ctxlock); + up_read(¤t->mm->mmap_sem); +} + +/* + * Set a CB.istatus to active using a user virtual address. This must be done + * just prior to a TFH RESTART. The new cb.istatus is an in-cache status ONLY. + * If the line is evicted, the status may be lost. The in-cache update + * is necessary to prevent the user from seeing a stale cb.istatus that will + * change as soon as the TFH restart is complete. Races may cause an + * occasional failure to clear the cb.istatus, but that is ok. + * + * If the cb address is not valid (should not happen, but...), nothing + * bad will happen.. The get_user()/put_user() will fail but there + * are no bad side-effects. + */ +static void gru_cb_set_istatus_active(unsigned long __user *cb) +{ + union { + struct gru_instruction_bits bits; + unsigned long dw; + } u; + + if (cb) { + get_user(u.dw, cb); + u.bits.istatus = CBS_ACTIVE; + put_user(u.dw, cb); + } +} + +/* + * Convert a interrupt IRQ to a pointer to the GRU GTS that caused the + * interrupt. Interrupts are always sent to a cpu on the blade that contains the + * GRU (except for headless blades which are not currently supported). A blade + * has N grus; a block of N consecutive IRQs is assigned to the GRUs. The IRQ + * number uniquely identifies the GRU chiplet on the local blade that caused the + * interrupt. Always called in interrupt context. + */ +static inline struct gru_state *irq_to_gru(int irq) +{ + return &gru_base[uv_numa_blade_id()]->bs_grus[irq - IRQ_GRU]; +} + +/* + * Read & clear a TFM + * + * The GRU has an array of fault maps. A map is private to a cpu + * Only one cpu will be accessing a cpu's fault map. + * + * This function scans the cpu-private fault map & clears all bits that + * are set. The function returns a bitmap that indicates the bits that + * were cleared. Note that sense the maps may be updated asynchronously by + * the GRU, atomic operations must be used to clear bits. + */ +static void get_clear_fault_map(struct gru_state *gru, + struct gru_tlb_fault_map *map) +{ + unsigned long i, k; + struct gru_tlb_fault_map *tfm; + + tfm = get_tfm_for_cpu(gru, gru_cpu_fault_map_id()); + prefetchw(tfm); /* Helps on hardware, required for emulator */ + for (i = 0; i < BITS_TO_LONGS(GRU_NUM_CBE); i++) { + k = tfm->fault_bits[i]; + if (k) + k = xchg(&tfm->fault_bits[i], 0UL); + map->fault_bits[i] = k; + } + + /* + * Not functionally required but helps performance. (Required + * on emulator) + */ + gru_flush_cache(tfm); +} + +/* + * Atomic (interrupt context) & non-atomic (user context) functions to + * convert a vaddr into a physical address. The size of the page + * is returned in pageshift. + * returns: + * 0 - successful + * < 0 - error code + * 1 - (atomic only) try again in non-atomic context + */ +static int non_atomic_pte_lookup(struct vm_area_struct *vma, + unsigned long vaddr, int write, + unsigned long *paddr, int *pageshift) +{ + struct page *page; + + /* ZZZ Need to handle HUGE pages */ + if (is_vm_hugetlb_page(vma)) + return -EFAULT; + *pageshift = PAGE_SHIFT; + if (get_user_pages + (current, current->mm, vaddr, 1, write, 0, &page, NULL) <= 0) + return -EFAULT; + *paddr = page_to_phys(page); + put_page(page); + return 0; +} + +/* + * atomic_pte_lookup + * + * Convert a user virtual address to a physical address + * Only supports Intel large pages (2MB only) on x86_64. + * ZZZ - hugepage support is incomplete + * + * NOTE: mmap_sem is already held on entry to this function. This + * guarantees existence of the page tables. + */ +static int atomic_pte_lookup(struct vm_area_struct *vma, unsigned long vaddr, + int write, unsigned long *paddr, int *pageshift) +{ + pgd_t *pgdp; + pmd_t *pmdp; + pud_t *pudp; + pte_t pte; + + pgdp = pgd_offset(vma->vm_mm, vaddr); + if (unlikely(pgd_none(*pgdp))) + goto err; + + pudp = pud_offset(pgdp, vaddr); + if (unlikely(pud_none(*pudp))) + goto err; + + pmdp = pmd_offset(pudp, vaddr); + if (unlikely(pmd_none(*pmdp))) + goto err; +#ifdef CONFIG_X86_64 + if (unlikely(pmd_large(*pmdp))) + pte = *(pte_t *) pmdp; + else +#endif + pte = *pte_offset_kernel(pmdp, vaddr); + + if (unlikely(!pte_present(pte) || + (write && (!pte_write(pte) || !pte_dirty(pte))))) + return 1; + + *paddr = pte_pfn(pte) << PAGE_SHIFT; +#ifdef CONFIG_HUGETLB_PAGE + *pageshift = is_vm_hugetlb_page(vma) ? HPAGE_SHIFT : PAGE_SHIFT; +#else + *pageshift = PAGE_SHIFT; +#endif + return 0; + +err: + local_irq_enable(); + return 1; +} + +/* + * Drop a TLB entry into the GRU. The fault is described by info in an TFH. + * Input: + * cb Address of user CBR. Null if not running in user context + * Return: + * 0 = dropin, exception, or switch to UPM successful + * 1 = range invalidate active + * < 0 = error code + * + */ +static int gru_try_dropin(struct gru_thread_state *gts, + struct gru_tlb_fault_handle *tfh, + unsigned long __user *cb) +{ + struct mm_struct *mm = gts->ts_mm; + struct vm_area_struct *vma; + int pageshift, asid, write, ret; + unsigned long paddr, gpa, vaddr; + + /* + * NOTE: The GRU contains magic hardware that eliminates races between + * TLB invalidates and TLB dropins. If an invalidate occurs + * in the window between reading the TFH and the subsequent TLB dropin, + * the dropin is ignored. This eliminates the need for additional locks. + */ + + /* + * Error if TFH state is IDLE or FMM mode & the user issuing a UPM call. + * Might be a hardware race OR a stupid user. Ignore FMM because FMM + * is a transient state. + */ + if (tfh->state == TFHSTATE_IDLE) + goto failidle; + if (tfh->state == TFHSTATE_MISS_FMM && cb) + goto failfmm; + + write = (tfh->cause & TFHCAUSE_TLB_MOD) != 0; + vaddr = tfh->missvaddr; + asid = tfh->missasid; + if (asid == 0) + goto failnoasid; + + rmb(); /* TFH must be cache resident before reading ms_range_active */ + + /* + * TFH is cache resident - at least briefly. Fail the dropin + * if a range invalidate is active. + */ + if (atomic_read(>s->ts_gms->ms_range_active)) + goto failactive; + + vma = find_vma(mm, vaddr); + if (!vma) + goto failinval; + + /* + * Atomic lookup is faster & usually works even if called in non-atomic + * context. + */ + rmb(); /* Must/check ms_range_active before loading PTEs */ + ret = atomic_pte_lookup(vma, vaddr, write, &paddr, &pageshift); + if (ret) { + if (!cb) + goto failupm; + if (non_atomic_pte_lookup(vma, vaddr, write, &paddr, + &pageshift)) + goto failinval; + } + if (is_gru_paddr(paddr)) + goto failinval; + + paddr = paddr & ~((1UL << pageshift) - 1); + gpa = uv_soc_phys_ram_to_gpa(paddr); + gru_cb_set_istatus_active(cb); + tfh_write_restart(tfh, gpa, GAA_RAM, vaddr, asid, write, + GRU_PAGESIZE(pageshift)); + STAT(tlb_dropin); + gru_dbg(grudev, + "%s: tfh 0x%p, vaddr 0x%lx, asid 0x%x, ps %d, gpa 0x%lx\n", + ret ? "non-atomic" : "atomic", tfh, vaddr, asid, + pageshift, gpa); + return 0; + +failnoasid: + /* No asid (delayed unload). */ + STAT(tlb_dropin_fail_no_asid); + gru_dbg(grudev, "FAILED no_asid tfh: 0x%p, vaddr 0x%lx\n", tfh, vaddr); + if (!cb) + tfh_user_polling_mode(tfh); + else + gru_flush_cache(tfh); + return -EAGAIN; + +failupm: + /* Atomic failure switch CBR to UPM */ + tfh_user_polling_mode(tfh); + STAT(tlb_dropin_fail_upm); + gru_dbg(grudev, "FAILED upm tfh: 0x%p, vaddr 0x%lx\n", tfh, vaddr); + return 1; + +failfmm: + /* FMM state on UPM call */ + STAT(tlb_dropin_fail_fmm); + gru_dbg(grudev, "FAILED fmm tfh: 0x%p, state %d\n", tfh, tfh->state); + return 0; + +failidle: + /* TFH was idle - no miss pending */ + gru_flush_cache(tfh); + if (cb) + gru_flush_cache(cb); + STAT(tlb_dropin_fail_idle); + gru_dbg(grudev, "FAILED idle tfh: 0x%p, state %d\n", tfh, tfh->state); + return 0; + +failinval: + /* All errors (atomic & non-atomic) switch CBR to EXCEPTION state */ + tfh_exception(tfh); + STAT(tlb_dropin_fail_invalid); + gru_dbg(grudev, "FAILED inval tfh: 0x%p, vaddr 0x%lx\n", tfh, vaddr); + return -EFAULT; + +failactive: + /* Range invalidate active. Switch to UPM iff atomic */ + if (!cb) + tfh_user_polling_mode(tfh); + else + gru_flush_cache(tfh); + STAT(tlb_dropin_fail_range_active); + gru_dbg(grudev, "FAILED range active: tfh 0x%p, vaddr 0x%lx\n", + tfh, vaddr); + return 1; +} + +/* + * Process an external interrupt from the GRU. This interrupt is + * caused by a TLB miss. + * Note that this is the interrupt handler that is registered with linux + * interrupt handlers. + */ +irqreturn_t gru_intr(int irq, void *dev_id) +{ + struct gru_state *gru; + struct gru_tlb_fault_map map; + struct gru_thread_state *gts; + struct gru_tlb_fault_handle *tfh = NULL; + int cbrnum, ctxnum; + + STAT(intr); + + gru = irq_to_gru(irq); + if (!gru) { + dev_err(grudev, "GRU: invalid interrupt: cpu %d, irq %d\n", + raw_smp_processor_id(), irq); + return IRQ_NONE; + } + get_clear_fault_map(gru, &map); + gru_dbg(grudev, "irq %d, gru %x, map 0x%lx\n", irq, gru->gs_gid, + map.fault_bits[0]); + + for_each_cbr_in_tfm(cbrnum, map.fault_bits) { + tfh = get_tfh_by_index(gru, cbrnum); + prefetchw(tfh); /* Helps on hdw, required for emulator */ + + /* + * When hardware sets a bit in the faultmap, it implicitly + * locks the GRU context so that it cannot be unloaded. + * The gts cannot change until a TFH start/writestart command + * is issued. + */ + ctxnum = tfh->ctxnum; + gts = gru->gs_gts[ctxnum]; + + /* + * This is running in interrupt context. Trylock the mmap_sem. + * If it fails, retry the fault in user context. + */ + if (down_read_trylock(>s->ts_mm->mmap_sem)) { + gru_try_dropin(gts, tfh, NULL); + up_read(>s->ts_mm->mmap_sem); + } else { + tfh_user_polling_mode(tfh); + } + } + return IRQ_HANDLED; +} + + +static int gru_user_dropin(struct gru_thread_state *gts, + struct gru_tlb_fault_handle *tfh, + unsigned long __user *cb) +{ + struct gru_mm_struct *gms = gts->ts_gms; + int ret; + + while (1) { + wait_event(gms->ms_wait_queue, + atomic_read(&gms->ms_range_active) == 0); + prefetchw(tfh); /* Helps on hdw, required for emulator */ + ret = gru_try_dropin(gts, tfh, cb); + if (ret <= 0) + return ret; + STAT(call_os_wait_queue); + } +} + +/* + * This interface is called as a result of a user detecting a "call OS" bit + * in a user CB. Normally means that a TLB fault has occurred. + * cb - user virtual address of the CB + */ +int gru_handle_user_call_os(unsigned long cb) +{ + struct gru_tlb_fault_handle *tfh; + struct gru_thread_state *gts; + unsigned long __user *cbp; + int ucbnum, cbrnum, ret = -EINVAL; + + STAT(call_os); + gru_dbg(grudev, "address 0x%lx\n", cb); + + /* sanity check the cb pointer */ + ucbnum = get_cb_number((void *)cb); + if ((cb & (GRU_HANDLE_STRIDE - 1)) || ucbnum >= GRU_NUM_CB) + return -EINVAL; + cbp = (unsigned long *)cb; + + gts = gru_find_lock_gts(cb); + if (!gts) + return -EINVAL; + + if (ucbnum >= gts->ts_cbr_au_count * GRU_CBR_AU_SIZE) { + ret = -EINVAL; + goto exit; + } + + /* + * If force_unload is set, the UPM TLB fault is phony. The task + * has migrated to another node and the GSEG must be moved. Just + * unload the context. The task will page fault and assign a new + * context. + */ + ret = -EAGAIN; + cbrnum = thread_cbr_number(gts, ucbnum); + if (gts->ts_force_unload) { + gru_unload_context(gts, 1); + } else if (gts->ts_gru) { + tfh = get_tfh_by_index(gts->ts_gru, cbrnum); + ret = gru_user_dropin(gts, tfh, cbp); + } +exit: + gru_unlock_gts(gts); + return ret; +} + +/* + * Fetch the exception detail information for a CB that terminated with + * an exception. + */ +int gru_get_exception_detail(unsigned long arg) +{ + struct control_block_extended_exc_detail excdet; + struct gru_control_block_extended *cbe; + struct gru_thread_state *gts; + int ucbnum, cbrnum, ret; + + STAT(user_exception); + if (copy_from_user(&excdet, (void __user *)arg, sizeof(excdet))) + return -EFAULT; + + gru_dbg(grudev, "address 0x%lx\n", excdet.cb); + gts = gru_find_lock_gts(excdet.cb); + if (!gts) + return -EINVAL; + + if (gts->ts_gru) { + ucbnum = get_cb_number((void *)excdet.cb); + cbrnum = thread_cbr_number(gts, ucbnum); + cbe = get_cbe_by_index(gts->ts_gru, cbrnum); + prefetchw(cbe); /* Harmless on hardware, required for emulator */ + excdet.opc = cbe->opccpy; + excdet.exopc = cbe->exopccpy; + excdet.ecause = cbe->ecause; + excdet.exceptdet0 = cbe->idef1upd; + excdet.exceptdet1 = cbe->idef3upd; + ret = 0; + } else { + ret = -EAGAIN; + } + gru_unlock_gts(gts); + + gru_dbg(grudev, "address 0x%lx, ecause 0x%x\n", excdet.cb, + excdet.ecause); + if (!ret && copy_to_user((void __user *)arg, &excdet, sizeof(excdet))) + ret = -EFAULT; + return ret; +} + +/* + * User request to unload a context. Content is saved for possible reload. + */ +int gru_user_unload_context(unsigned long arg) +{ + struct gru_thread_state *gts; + struct gru_unload_context_req req; + + STAT(user_unload_context); + if (copy_from_user(&req, (void __user *)arg, sizeof(req))) + return -EFAULT; + + gru_dbg(grudev, "gseg 0x%lx\n", req.gseg); + + gts = gru_find_lock_gts(req.gseg); + if (!gts) + return -EINVAL; + + if (gts->ts_gru) + gru_unload_context(gts, 1); + gru_unlock_gts(gts); + + return 0; +} + +/* + * User request to flush a range of virtual addresses from the GRU TLB + * (Mainly for testing). + */ +int gru_user_flush_tlb(unsigned long arg) +{ + struct gru_thread_state *gts; + struct gru_flush_tlb_req req; + + STAT(user_flush_tlb); + if (copy_from_user(&req, (void __user *)arg, sizeof(req))) + return -EFAULT; + + gru_dbg(grudev, "gseg 0x%lx, vaddr 0x%lx, len 0x%lx\n", req.gseg, + req.vaddr, req.len); + + gts = gru_find_lock_gts(req.gseg); + if (!gts) + return -EINVAL; + + gru_flush_tlb_range(gts->ts_gms, req.vaddr, req.vaddr + req.len); + gru_unlock_gts(gts); + + return 0; +} + +/* + * Register the current task as the user of the GSEG slice. + * Needed for TLB fault interrupt targeting. + */ +int gru_set_task_slice(long address) +{ + struct gru_thread_state *gts; + + STAT(set_task_slice); + gru_dbg(grudev, "address 0x%lx\n", address); + gts = gru_alloc_locked_gts(address); + if (!gts) + return -EINVAL; + + gts->ts_tgid_owner = current->tgid; + gru_unlock_gts(gts); + + return 0; +} diff --git a/drivers/misc/sgi-gru/grufile.c b/drivers/misc/sgi-gru/grufile.c new file mode 100644 index 0000000..6509838 --- /dev/null +++ b/drivers/misc/sgi-gru/grufile.c @@ -0,0 +1,494 @@ +/* + * SN Platform GRU Driver + * + * FILE OPERATIONS & DRIVER INITIALIZATION + * + * This file supports the user system call for file open, close, mmap, etc. + * This also incudes the driver initialization code. + * + * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/errno.h> +#include <linux/slab.h> +#include <linux/mm.h> +#include <linux/io.h> +#include <linux/smp_lock.h> +#include <linux/spinlock.h> +#include <linux/device.h> +#include <linux/miscdevice.h> +#include <linux/interrupt.h> +#include <linux/proc_fs.h> +#include <linux/uaccess.h> +#include "gru.h" +#include "grulib.h" +#include "grutables.h" + +#if defined CONFIG_X86_64 +#include <asm/genapic.h> +#include <asm/irq.h> +#define IS_UV() is_uv_system() +#elif defined CONFIG_IA64 +#include <asm/system.h> +#include <asm/sn/simulator.h> +/* temp support for running on hardware simulator */ +#define IS_UV() IS_MEDUSA() || ia64_platform_is("uv") +#else +#define IS_UV() 0 +#endif + +#include <asm/uv/uv_hub.h> +#include <asm/uv/uv_mmrs.h> + +struct gru_blade_state *gru_base[GRU_MAX_BLADES] __read_mostly; +unsigned long gru_start_paddr, gru_end_paddr __read_mostly; +struct gru_stats_s gru_stats; + +/* Guaranteed user available resources on each node */ +static int max_user_cbrs, max_user_dsr_bytes; + +static struct file_operations gru_fops; +static struct miscdevice gru_miscdev; + + +/* + * gru_vma_close + * + * Called when unmapping a device mapping. Frees all gru resources + * and tables belonging to the vma. + */ +static void gru_vma_close(struct vm_area_struct *vma) +{ + struct gru_vma_data *vdata; + struct gru_thread_state *gts; + struct list_head *entry, *next; + + if (!vma->vm_private_data) + return; + + vdata = vma->vm_private_data; + vma->vm_private_data = NULL; + gru_dbg(grudev, "vma %p, file %p, vdata %p\n", vma, vma->vm_file, + vdata); + list_for_each_safe(entry, next, &vdata->vd_head) { + gts = + list_entry(entry, struct gru_thread_state, ts_next); + list_del(>s->ts_next); + mutex_lock(>s->ts_ctxlock); + if (gts->ts_gru) + gru_unload_context(gts, 0); + mutex_unlock(>s->ts_ctxlock); + gts_drop(gts); + } + kfree(vdata); + STAT(vdata_free); +} + +/* + * gru_file_mmap + * + * Called when mmaping the device. Initializes the vma with a fault handler + * and private data structure necessary to allocate, track, and free the + * underlying pages. + */ +static int gru_file_mmap(struct file *file, struct vm_area_struct *vma) +{ + if ((vma->vm_flags & (VM_SHARED | VM_WRITE)) != (VM_SHARED | VM_WRITE)) + return -EPERM; + + if (vma->vm_start & (GRU_GSEG_PAGESIZE - 1) || + vma->vm_end & (GRU_GSEG_PAGESIZE - 1)) + return -EINVAL; + + vma->vm_flags |= + (VM_IO | VM_DONTCOPY | VM_LOCKED | VM_DONTEXPAND | VM_PFNMAP | + VM_RESERVED); + vma->vm_page_prot = PAGE_SHARED; + vma->vm_ops = &gru_vm_ops; + + vma->vm_private_data = gru_alloc_vma_data(vma, 0); + if (!vma->vm_private_data) + return -ENOMEM; + + gru_dbg(grudev, "file %p, vaddr 0x%lx, vma %p, vdata %p\n", + file, vma->vm_start, vma, vma->vm_private_data); + return 0; +} + +/* + * Create a new GRU context + */ +static int gru_create_new_context(unsigned long arg) +{ + struct gru_create_context_req req; + struct vm_area_struct *vma; + struct gru_vma_data *vdata; + int ret = -EINVAL; + + + if (copy_from_user(&req, (void __user *)arg, sizeof(req))) + return -EFAULT; + + if (req.data_segment_bytes == 0 || + req.data_segment_bytes > max_user_dsr_bytes) + return -EINVAL; + if (!req.control_blocks || !req.maximum_thread_count || + req.control_blocks > max_user_cbrs) + return -EINVAL; + + if (!(req.options & GRU_OPT_MISS_MASK)) + req.options |= GRU_OPT_MISS_FMM_INTR; + + down_write(¤t->mm->mmap_sem); + vma = gru_find_vma(req.gseg); + if (vma) { + vdata = vma->vm_private_data; + vdata->vd_user_options = req.options; + vdata->vd_dsr_au_count = + GRU_DS_BYTES_TO_AU(req.data_segment_bytes); + vdata->vd_cbr_au_count = GRU_CB_COUNT_TO_AU(req.control_blocks); + ret = 0; + } + up_write(¤t->mm->mmap_sem); + + return ret; +} + +/* + * Get GRU configuration info (temp - for emulator testing) + */ +static long gru_get_config_info(unsigned long arg) +{ + struct gru_config_info info; + int nodesperblade; + + if (num_online_nodes() > 1 && + (uv_node_to_blade_id(1) == uv_node_to_blade_id(0))) + nodesperblade = 2; + else + nodesperblade = 1; + info.cpus = num_online_cpus(); + info.nodes = num_online_nodes(); + info.blades = info.nodes / nodesperblade; + info.chiplets = GRU_CHIPLETS_PER_BLADE * info.blades; + + if (copy_to_user((void __user *)arg, &info, sizeof(info))) + return -EFAULT; + return 0; +} + +/* + * Get GRU chiplet status + */ +static long gru_get_chiplet_status(unsigned long arg) +{ + struct gru_state *gru; + struct gru_chiplet_info info; + + if (copy_from_user(&info, (void __user *)arg, sizeof(info))) + return -EFAULT; + + if (info.node == -1) + info.node = numa_node_id(); + if (info.node >= num_possible_nodes() || + info.chiplet >= GRU_CHIPLETS_PER_HUB || + info.node < 0 || info.chiplet < 0) + return -EINVAL; + + info.blade = uv_node_to_blade_id(info.node); + gru = get_gru(info.blade, info.chiplet); + + info.total_dsr_bytes = GRU_NUM_DSR_BYTES; + info.total_cbr = GRU_NUM_CB; + info.total_user_dsr_bytes = GRU_NUM_DSR_BYTES - + gru->gs_reserved_dsr_bytes; + info.total_user_cbr = GRU_NUM_CB - gru->gs_reserved_cbrs; + info.free_user_dsr_bytes = hweight64(gru->gs_dsr_map) * + GRU_DSR_AU_BYTES; + info.free_user_cbr = hweight64(gru->gs_cbr_map) * GRU_CBR_AU_SIZE; + + if (copy_to_user((void __user *)arg, &info, sizeof(info))) + return -EFAULT; + return 0; +} + +/* + * gru_file_unlocked_ioctl + * + * Called to update file attributes via IOCTL calls. + */ +static long gru_file_unlocked_ioctl(struct file *file, unsigned int req, + unsigned long arg) +{ + int err = -EBADRQC; + + gru_dbg(grudev, "file %p\n", file); + + switch (req) { + case GRU_CREATE_CONTEXT: + err = gru_create_new_context(arg); + break; + case GRU_SET_TASK_SLICE: + err = gru_set_task_slice(arg); + break; + case GRU_USER_GET_EXCEPTION_DETAIL: + err = gru_get_exception_detail(arg); + break; + case GRU_USER_UNLOAD_CONTEXT: + err = gru_user_unload_context(arg); + break; + case GRU_GET_CHIPLET_STATUS: + err = gru_get_chiplet_status(arg); + break; + case GRU_USER_FLUSH_TLB: + err = gru_user_flush_tlb(arg); + break; + case GRU_USER_CALL_OS: + err = gru_handle_user_call_os(arg); + break; + case GRU_GET_CONFIG_INFO: + err = gru_get_config_info(arg); + break; + } + return err; +} + +/* + * Called at init time to build tables for all GRUs that are present in the + * system. + */ +static void gru_init_chiplet(struct gru_state *gru, unsigned long paddr, + void *vaddr, int nid, int bid, int grunum) +{ + spin_lock_init(&gru->gs_lock); + spin_lock_init(&gru->gs_asid_lock); + gru->gs_gru_base_paddr = paddr; + gru->gs_gru_base_vaddr = vaddr; + gru->gs_gid = bid * GRU_CHIPLETS_PER_BLADE + grunum; + gru->gs_blade = gru_base[bid]; + gru->gs_blade_id = bid; + gru->gs_cbr_map = (GRU_CBR_AU == 64) ? ~0 : (1UL << GRU_CBR_AU) - 1; + gru->gs_dsr_map = (1UL << GRU_DSR_AU) - 1; + gru_tgh_flush_init(gru); + gru_dbg(grudev, "bid %d, nid %d, gru %x, vaddr %p (0x%lx)\n", + bid, nid, gru->gs_gid, gru->gs_gru_base_vaddr, + gru->gs_gru_base_paddr); + gru_kservices_init(gru); +} + +static int gru_init_tables(unsigned long gru_base_paddr, void *gru_base_vaddr) +{ + int pnode, nid, bid, chip; + int cbrs, dsrbytes, n; + int order = get_order(sizeof(struct gru_blade_state)); + struct page *page; + struct gru_state *gru; + unsigned long paddr; + void *vaddr; + + max_user_cbrs = GRU_NUM_CB; + max_user_dsr_bytes = GRU_NUM_DSR_BYTES; + for_each_online_node(nid) { + bid = uv_node_to_blade_id(nid); + pnode = uv_node_to_pnode(nid); + if (gru_base[bid]) + continue; + page = alloc_pages_node(nid, GFP_KERNEL, order); + if (!page) + goto fail; + gru_base[bid] = page_address(page); + memset(gru_base[bid], 0, sizeof(struct gru_blade_state)); + gru_base[bid]->bs_lru_gru = &gru_base[bid]->bs_grus[0]; + spin_lock_init(&gru_base[bid]->bs_lock); + + dsrbytes = 0; + cbrs = 0; + for (gru = gru_base[bid]->bs_grus, chip = 0; + chip < GRU_CHIPLETS_PER_BLADE; + chip++, gru++) { + paddr = gru_chiplet_paddr(gru_base_paddr, pnode, chip); + vaddr = gru_chiplet_vaddr(gru_base_vaddr, pnode, chip); + gru_init_chiplet(gru, paddr, vaddr, bid, nid, chip); + n = hweight64(gru->gs_cbr_map) * GRU_CBR_AU_SIZE; + cbrs = max(cbrs, n); + n = hweight64(gru->gs_dsr_map) * GRU_DSR_AU_BYTES; + dsrbytes = max(dsrbytes, n); + } + max_user_cbrs = min(max_user_cbrs, cbrs); + max_user_dsr_bytes = min(max_user_dsr_bytes, dsrbytes); + } + + return 0; + +fail: + for (nid--; nid >= 0; nid--) + free_pages((unsigned long)gru_base[nid], order); + return -ENOMEM; +} + +#ifdef CONFIG_IA64 + +static int get_base_irq(void) +{ + return IRQ_GRU; +} + +#elif defined CONFIG_X86_64 + +static void noop(unsigned int irq) +{ +} + +static struct irq_chip gru_chip = { + .name = "gru", + .mask = noop, + .unmask = noop, + .ack = noop, +}; + +static int get_base_irq(void) +{ + set_irq_chip(IRQ_GRU, &gru_chip); + set_irq_chip(IRQ_GRU + 1, &gru_chip); + return IRQ_GRU; +} +#endif + +/* + * gru_init + * + * Called at boot or module load time to initialize the GRUs. + */ +static int __init gru_init(void) +{ + int ret, irq, chip; + char id[10]; + void *gru_start_vaddr; + + if (!IS_UV()) + return 0; + +#if defined CONFIG_IA64 + gru_start_paddr = 0xd000000000UL; /* ZZZZZZZZZZZZZZZZZZZ fixme */ +#else + gru_start_paddr = uv_read_local_mmr(UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR) & + 0x7fffffffffffUL; + +#endif + gru_start_vaddr = __va(gru_start_paddr); + gru_end_paddr = gru_start_paddr + MAX_NUMNODES * GRU_SIZE; + printk(KERN_INFO "GRU space: 0x%lx - 0x%lx\n", + gru_start_paddr, gru_end_paddr); + irq = get_base_irq(); + for (chip = 0; chip < GRU_CHIPLETS_PER_BLADE; chip++) { + ret = request_irq(irq + chip, gru_intr, 0, id, NULL); + /* TODO: fix irq handling on x86. For now ignore failures because + * interrupts are not required & not yet fully supported */ + if (ret) { + printk("!!!WARNING: GRU ignoring request failure!!!\n"); + ret = 0; + } + if (ret) { + printk(KERN_ERR "%s: request_irq failed\n", + GRU_DRIVER_ID_STR); + goto exit1; + } + } + + ret = misc_register(&gru_miscdev); + if (ret) { + printk(KERN_ERR "%s: misc_register failed\n", + GRU_DRIVER_ID_STR); + goto exit1; + } + + ret = gru_proc_init(); + if (ret) { + printk(KERN_ERR "%s: proc init failed\n", GRU_DRIVER_ID_STR); + goto exit2; + } + + ret = gru_init_tables(gru_start_paddr, gru_start_vaddr); + if (ret) { + printk(KERN_ERR "%s: init tables failed\n", GRU_DRIVER_ID_STR); + goto exit3; + } + + printk(KERN_INFO "%s: v%s\n", GRU_DRIVER_ID_STR, + GRU_DRIVER_VERSION_STR); + return 0; + +exit3: + gru_proc_exit(); +exit2: + misc_deregister(&gru_miscdev); +exit1: + for (--chip; chip >= 0; chip--) + free_irq(irq + chip, NULL); + return ret; + +} + +static void __exit gru_exit(void) +{ + int i, bid; + int order = get_order(sizeof(struct gru_state) * + GRU_CHIPLETS_PER_BLADE); + + if (!IS_UV()) + return; + + for (i = 0; i < GRU_CHIPLETS_PER_BLADE; i++) + free_irq(IRQ_GRU + i, NULL); + + for (bid = 0; bid < GRU_MAX_BLADES; bid++) + free_pages((unsigned long)gru_base[bid], order); + + misc_deregister(&gru_miscdev); + gru_proc_exit(); +} + +static struct file_operations gru_fops = { + .owner = THIS_MODULE, + .unlocked_ioctl = gru_file_unlocked_ioctl, + .mmap = gru_file_mmap, +}; + +static struct miscdevice gru_miscdev = { + .minor = MISC_DYNAMIC_MINOR, + .name = "gru", + .fops = &gru_fops, +}; + +struct vm_operations_struct gru_vm_ops = { + .close = gru_vma_close, + .fault = gru_fault, +}; + +fs_initcall(gru_init); +module_exit(gru_exit); + +module_param(gru_options, ulong, 0644); +MODULE_PARM_DESC(gru_options, "Various debug options"); + +MODULE_AUTHOR("Silicon Graphics, Inc."); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION(GRU_DRIVER_ID_STR GRU_DRIVER_VERSION_STR); +MODULE_VERSION(GRU_DRIVER_VERSION_STR); + diff --git a/drivers/misc/sgi-gru/gruhandles.h b/drivers/misc/sgi-gru/gruhandles.h new file mode 100644 index 0000000..b63018d --- /dev/null +++ b/drivers/misc/sgi-gru/gruhandles.h @@ -0,0 +1,658 @@ +/* + * SN Platform GRU Driver + * + * GRU HANDLE DEFINITION + * + * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef __GRUHANDLES_H__ +#define __GRUHANDLES_H__ +#include "gru_instructions.h" + +/* + * Manifest constants for GRU Memory Map + */ +#define GRU_GSEG0_BASE 0 +#define GRU_MCS_BASE (64 * 1024 * 1024) +#define GRU_SIZE (128UL * 1024 * 1024) + +/* Handle & resource counts */ +#define GRU_NUM_CB 128 +#define GRU_NUM_DSR_BYTES (32 * 1024) +#define GRU_NUM_TFM 16 +#define GRU_NUM_TGH 24 +#define GRU_NUM_CBE 128 +#define GRU_NUM_TFH 128 +#define GRU_NUM_CCH 16 +#define GRU_NUM_GSH 1 + +/* Maximum resource counts that can be reserved by user programs */ +#define GRU_NUM_USER_CBR GRU_NUM_CBE +#define GRU_NUM_USER_DSR_BYTES GRU_NUM_DSR_BYTES + +/* Bytes per handle & handle stride. Code assumes all cb, tfh, cbe handles + * are the same */ +#define GRU_HANDLE_BYTES 64 +#define GRU_HANDLE_STRIDE 256 + +/* Base addresses of handles */ +#define GRU_TFM_BASE (GRU_MCS_BASE + 0x00000) +#define GRU_TGH_BASE (GRU_MCS_BASE + 0x08000) +#define GRU_CBE_BASE (GRU_MCS_BASE + 0x10000) +#define GRU_TFH_BASE (GRU_MCS_BASE + 0x18000) +#define GRU_CCH_BASE (GRU_MCS_BASE + 0x20000) +#define GRU_GSH_BASE (GRU_MCS_BASE + 0x30000) + +/* User gseg constants */ +#define GRU_GSEG_STRIDE (4 * 1024 * 1024) +#define GSEG_BASE(a) ((a) & ~(GRU_GSEG_PAGESIZE - 1)) + +/* Data segment constants */ +#define GRU_DSR_AU_BYTES 1024 +#define GRU_DSR_CL (GRU_NUM_DSR_BYTES / GRU_CACHE_LINE_BYTES) +#define GRU_DSR_AU_CL (GRU_DSR_AU_BYTES / GRU_CACHE_LINE_BYTES) +#define GRU_DSR_AU (GRU_NUM_DSR_BYTES / GRU_DSR_AU_BYTES) + +/* Control block constants */ +#define GRU_CBR_AU_SIZE 2 +#define GRU_CBR_AU (GRU_NUM_CBE / GRU_CBR_AU_SIZE) + +/* Convert resource counts to the number of AU */ +#define GRU_DS_BYTES_TO_AU(n) DIV_ROUND_UP(n, GRU_DSR_AU_BYTES) +#define GRU_CB_COUNT_TO_AU(n) DIV_ROUND_UP(n, GRU_CBR_AU_SIZE) + +/* UV limits */ +#define GRU_CHIPLETS_PER_HUB 2 +#define GRU_HUBS_PER_BLADE 1 +#define GRU_CHIPLETS_PER_BLADE (GRU_HUBS_PER_BLADE * GRU_CHIPLETS_PER_HUB) + +/* User GRU Gseg offsets */ +#define GRU_CB_BASE 0 +#define GRU_CB_LIMIT (GRU_CB_BASE + GRU_HANDLE_STRIDE * GRU_NUM_CBE) +#define GRU_DS_BASE 0x20000 +#define GRU_DS_LIMIT (GRU_DS_BASE + GRU_NUM_DSR_BYTES) + +/* Convert a GRU physical address to the chiplet offset */ +#define GSEGPOFF(h) ((h) & (GRU_SIZE - 1)) + +/* Convert an arbitrary handle address to the beginning of the GRU segment */ +#define GRUBASE(h) ((void *)((unsigned long)(h) & ~(GRU_SIZE - 1))) + +/* General addressing macros. */ +static inline void *get_gseg_base_address(void *base, int ctxnum) +{ + return (void *)(base + GRU_GSEG0_BASE + GRU_GSEG_STRIDE * ctxnum); +} + +static inline void *get_gseg_base_address_cb(void *base, int ctxnum, int line) +{ + return (void *)(get_gseg_base_address(base, ctxnum) + + GRU_CB_BASE + GRU_HANDLE_STRIDE * line); +} + +static inline void *get_gseg_base_address_ds(void *base, int ctxnum, int line) +{ + return (void *)(get_gseg_base_address(base, ctxnum) + GRU_DS_BASE + + GRU_CACHE_LINE_BYTES * line); +} + +static inline struct gru_tlb_fault_map *get_tfm(void *base, int ctxnum) +{ + return (struct gru_tlb_fault_map *)(base + GRU_TFM_BASE + + ctxnum * GRU_HANDLE_STRIDE); +} + +static inline struct gru_tlb_global_handle *get_tgh(void *base, int ctxnum) +{ + return (struct gru_tlb_global_handle *)(base + GRU_TGH_BASE + + ctxnum * GRU_HANDLE_STRIDE); +} + +static inline struct gru_control_block_extended *get_cbe(void *base, int ctxnum) +{ + return (struct gru_control_block_extended *)(base + GRU_CBE_BASE + + ctxnum * GRU_HANDLE_STRIDE); +} + +static inline struct gru_tlb_fault_handle *get_tfh(void *base, int ctxnum) +{ + return (struct gru_tlb_fault_handle *)(base + GRU_TFH_BASE + + ctxnum * GRU_HANDLE_STRIDE); +} + +static inline struct gru_context_configuration_handle *get_cch(void *base, + int ctxnum) +{ + return (struct gru_context_configuration_handle *)(base + + GRU_CCH_BASE + ctxnum * GRU_HANDLE_STRIDE); +} + +static inline unsigned long get_cb_number(void *cb) +{ + return (((unsigned long)cb - GRU_CB_BASE) % GRU_GSEG_PAGESIZE) / + GRU_HANDLE_STRIDE; +} + +/* byte offset to a specific GRU chiplet. (p=pnode, c=chiplet (0 or 1)*/ +static inline unsigned long gru_chiplet_paddr(unsigned long paddr, int pnode, + int chiplet) +{ + return paddr + GRU_SIZE * (2 * pnode + chiplet); +} + +static inline void *gru_chiplet_vaddr(void *vaddr, int pnode, int chiplet) +{ + return vaddr + GRU_SIZE * (2 * pnode + chiplet); +} + + + +/* + * Global TLB Fault Map + * Bitmap of outstanding TLB misses needing interrupt/polling service. + * + */ +struct gru_tlb_fault_map { + unsigned long fault_bits[BITS_TO_LONGS(GRU_NUM_CBE)]; + unsigned long fill0[2]; + unsigned long done_bits[BITS_TO_LONGS(GRU_NUM_CBE)]; + unsigned long fill1[2]; +}; + +/* + * TGH - TLB Global Handle + * Used for TLB flushing. + * + */ +struct gru_tlb_global_handle { + unsigned int cmd:1; /* DW 0 */ + unsigned int delresp:1; + unsigned int opc:1; + unsigned int fill1:5; + + unsigned int fill2:8; + + unsigned int status:2; + unsigned long fill3:2; + unsigned int state:3; + unsigned long fill4:1; + + unsigned int cause:3; + unsigned long fill5:37; + + unsigned long vaddr:64; /* DW 1 */ + + unsigned int asid:24; /* DW 2 */ + unsigned int fill6:8; + + unsigned int pagesize:5; + unsigned int fill7:11; + + unsigned int global:1; + unsigned int fill8:15; + + unsigned long vaddrmask:39; /* DW 3 */ + unsigned int fill9:9; + unsigned int n:10; + unsigned int fill10:6; + + unsigned int ctxbitmap:16; /* DW4 */ + unsigned long fill11[3]; +}; + +enum gru_tgh_cmd { + TGHCMD_START +}; + +enum gru_tgh_opc { + TGHOP_TLBNOP, + TGHOP_TLBINV +}; + +enum gru_tgh_status { + TGHSTATUS_IDLE, + TGHSTATUS_EXCEPTION, + TGHSTATUS_ACTIVE +}; + +enum gru_tgh_state { + TGHSTATE_IDLE, + TGHSTATE_PE_INVAL, + TGHSTATE_INTERRUPT_INVAL, + TGHSTATE_WAITDONE, + TGHSTATE_RESTART_CTX, +}; + +/* + * TFH - TLB Global Handle + * Used for TLB dropins into the GRU TLB. + * + */ +struct gru_tlb_fault_handle { + unsigned int cmd:1; /* DW 0 - low 32*/ + unsigned int delresp:1; + unsigned int fill0:2; + unsigned int opc:3; + unsigned int fill1:9; + + unsigned int status:2; + unsigned int fill2:1; + unsigned int color:1; + unsigned int state:3; + unsigned int fill3:1; + + unsigned int cause:7; /* DW 0 - high 32 */ + unsigned int fill4:1; + + unsigned int indexway:12; + unsigned int fill5:4; + + unsigned int ctxnum:4; + unsigned int fill6:12; + + unsigned long missvaddr:64; /* DW 1 */ + + unsigned int missasid:24; /* DW 2 */ + unsigned int fill7:8; + unsigned int fillasid:24; + unsigned int dirty:1; + unsigned int gaa:2; + unsigned long fill8:5; + + unsigned long pfn:41; /* DW 3 */ + unsigned int fill9:7; + unsigned int pagesize:5; + unsigned int fill10:11; + + unsigned long fillvaddr:64; /* DW 4 */ + + unsigned long fill11[3]; +}; + +enum gru_tfh_opc { + TFHOP_NOOP, + TFHOP_RESTART, + TFHOP_WRITE_ONLY, + TFHOP_WRITE_RESTART, + TFHOP_EXCEPTION, + TFHOP_USER_POLLING_MODE = 7, +}; + +enum tfh_status { + TFHSTATUS_IDLE, + TFHSTATUS_EXCEPTION, + TFHSTATUS_ACTIVE, +}; + +enum tfh_state { + TFHSTATE_INACTIVE, + TFHSTATE_IDLE, + TFHSTATE_MISS_UPM, + TFHSTATE_MISS_FMM, + TFHSTATE_HW_ERR, + TFHSTATE_WRITE_TLB, + TFHSTATE_RESTART_CBR, +}; + +/* TFH cause bits */ +enum tfh_cause { + TFHCAUSE_NONE, + TFHCAUSE_TLB_MISS, + TFHCAUSE_TLB_MOD, + TFHCAUSE_HW_ERROR_RR, + TFHCAUSE_HW_ERROR_MAIN_ARRAY, + TFHCAUSE_HW_ERROR_VALID, + TFHCAUSE_HW_ERROR_PAGESIZE, + TFHCAUSE_INSTRUCTION_EXCEPTION, + TFHCAUSE_UNCORRECTIBLE_ERROR, +}; + +/* GAA values */ +#define GAA_RAM 0x0 +#define GAA_NCRAM 0x2 +#define GAA_MMIO 0x1 +#define GAA_REGISTER 0x3 + +/* GRU paddr shift for pfn. (NOTE: shift is NOT by actual pagesize) */ +#define GRU_PADDR_SHIFT 12 + +/* + * Context Configuration handle + * Used to allocate resources to a GSEG context. + * + */ +struct gru_context_configuration_handle { + unsigned int cmd:1; /* DW0 */ + unsigned int delresp:1; + unsigned int opc:3; + unsigned int unmap_enable:1; + unsigned int req_slice_set_enable:1; + unsigned int req_slice:2; + unsigned int cb_int_enable:1; + unsigned int tlb_int_enable:1; + unsigned int tfm_fault_bit_enable:1; + unsigned int tlb_int_select:4; + + unsigned int status:2; + unsigned int state:2; + unsigned int reserved2:4; + + unsigned int cause:4; + unsigned int tfm_done_bit_enable:1; + unsigned int unused:3; + + unsigned int dsr_allocation_map; + + unsigned long cbr_allocation_map; /* DW1 */ + + unsigned int asid[8]; /* DW 2 - 5 */ + unsigned short sizeavail[8]; /* DW 6 - 7 */ +} __attribute__ ((packed)); + +enum gru_cch_opc { + CCHOP_START = 1, + CCHOP_ALLOCATE, + CCHOP_INTERRUPT, + CCHOP_DEALLOCATE, + CCHOP_INTERRUPT_SYNC, +}; + +enum gru_cch_status { + CCHSTATUS_IDLE, + CCHSTATUS_EXCEPTION, + CCHSTATUS_ACTIVE, +}; + +enum gru_cch_state { + CCHSTATE_INACTIVE, + CCHSTATE_MAPPED, + CCHSTATE_ACTIVE, + CCHSTATE_INTERRUPTED, +}; + +/* CCH Exception cause */ +enum gru_cch_cause { + CCHCAUSE_REGION_REGISTER_WRITE_ERROR = 1, + CCHCAUSE_ILLEGAL_OPCODE = 2, + CCHCAUSE_INVALID_START_REQUEST = 3, + CCHCAUSE_INVALID_ALLOCATION_REQUEST = 4, + CCHCAUSE_INVALID_DEALLOCATION_REQUEST = 5, + CCHCAUSE_INVALID_INTERRUPT_REQUEST = 6, + CCHCAUSE_CCH_BUSY = 7, + CCHCAUSE_NO_CBRS_TO_ALLOCATE = 8, + CCHCAUSE_BAD_TFM_CONFIG = 9, + CCHCAUSE_CBR_RESOURCES_OVERSUBSCRIPED = 10, + CCHCAUSE_DSR_RESOURCES_OVERSUBSCRIPED = 11, + CCHCAUSE_CBR_DEALLOCATION_ERROR = 12, +}; +/* + * CBE - Control Block Extended + * Maintains internal GRU state for active CBs. + * + */ +struct gru_control_block_extended { + unsigned int reserved0:1; /* DW 0 - low */ + unsigned int imacpy:3; + unsigned int reserved1:4; + unsigned int xtypecpy:3; + unsigned int iaa0cpy:2; + unsigned int iaa1cpy:2; + unsigned int reserved2:1; + unsigned int opccpy:8; + unsigned int exopccpy:8; + + unsigned int idef2cpy:22; /* DW 0 - high */ + unsigned int reserved3:10; + + unsigned int idef4cpy:22; /* DW 1 */ + unsigned int reserved4:10; + unsigned int idef4upd:22; + unsigned int reserved5:10; + + unsigned long idef1upd:64; /* DW 2 */ + + unsigned long idef5cpy:64; /* DW 3 */ + + unsigned long idef6cpy:64; /* DW 4 */ + + unsigned long idef3upd:64; /* DW 5 */ + + unsigned long idef5upd:64; /* DW 6 */ + + unsigned int idef2upd:22; /* DW 7 */ + unsigned int reserved6:10; + + unsigned int ecause:20; + unsigned int cbrstate:4; + unsigned int cbrexecstatus:8; +}; + +enum gru_cbr_state { + CBRSTATE_INACTIVE, + CBRSTATE_IDLE, + CBRSTATE_PE_CHECK, + CBRSTATE_QUEUED, + CBRSTATE_WAIT_RESPONSE, + CBRSTATE_INTERRUPTED, + CBRSTATE_INTERRUPTED_MISS_FMM, + CBRSTATE_BUSY_INTERRUPT_MISS_FMM, + CBRSTATE_INTERRUPTED_MISS_UPM, + CBRSTATE_BUSY_INTERRUPTED_MISS_UPM, + CBRSTATE_REQUEST_ISSUE, + CBRSTATE_BUSY_INTERRUPT, +}; + +/* CBE cbrexecstatus bits */ +#define CBR_EXS_ABORT_OCC_BIT 0 +#define CBR_EXS_INT_OCC_BIT 1 +#define CBR_EXS_PENDING_BIT 2 +#define CBR_EXS_QUEUED_BIT 3 +#define CBR_EXS_TLBHW_BIT 4 +#define CBR_EXS_EXCEPTION_BIT 5 + +#define CBR_EXS_ABORT_OCC (1 << CBR_EXS_ABORT_OCC_BIT) +#define CBR_EXS_INT_OCC (1 << CBR_EXS_INT_OCC_BIT) +#define CBR_EXS_PENDING (1 << CBR_EXS_PENDING_BIT) +#define CBR_EXS_QUEUED (1 << CBR_EXS_QUEUED_BIT) +#define CBR_EXS_TLBHW (1 << CBR_EXS_TLBHW_BIT) +#define CBR_EXS_EXCEPTION (1 << CBR_EXS_EXCEPTION_BIT) + +/* CBE ecause bits - defined in gru_instructions.h */ + +/* + * Convert a processor pagesize into the strange encoded pagesize used by the + * GRU. Processor pagesize is encoded as log of bytes per page. (or PAGE_SHIFT) + * pagesize log pagesize grupagesize + * 4k 12 0 + * 16k 14 1 + * 64k 16 2 + * 256k 18 3 + * 1m 20 4 + * 2m 21 5 + * 4m 22 6 + * 16m 24 7 + * 64m 26 8 + * ... + */ +#define GRU_PAGESIZE(sh) ((((sh) > 20 ? (sh) + 2: (sh)) >> 1) - 6) +#define GRU_SIZEAVAIL(sh) (1UL << GRU_PAGESIZE(sh)) + +/* minimum TLB purge count to ensure a full purge */ +#define GRUMAXINVAL 1024UL + + +/* Extract the status field from a kernel handle */ +#define GET_MSEG_HANDLE_STATUS(h) (((*(unsigned long *)(h)) >> 16) & 3) + +static inline void start_instruction(void *h) +{ + unsigned long *w0 = h; + + wmb(); /* setting CMD bit must be last */ + *w0 = *w0 | 1; + gru_flush_cache(h); +} + +static inline int wait_instruction_complete(void *h) +{ + int status; + + do { + cpu_relax(); + barrier(); + status = GET_MSEG_HANDLE_STATUS(h); + } while (status == CCHSTATUS_ACTIVE); + return status; +} + +#if defined CONFIG_IA64 +static inline void cch_allocate_set_asids( + struct gru_context_configuration_handle *cch, int asidval) +{ + int i; + + for (i = 0; i <= RGN_HPAGE; i++) { /* assume HPAGE is last region */ + cch->asid[i] = (asidval++); +#if 0 + /* ZZZ hugepages not supported yet */ + if (i == RGN_HPAGE) + cch->sizeavail[i] = GRU_SIZEAVAIL(hpage_shift); + else +#endif + cch->sizeavail[i] = GRU_SIZEAVAIL(PAGE_SHIFT); + } +} +#elif defined CONFIG_X86_64 +static inline void cch_allocate_set_asids( + struct gru_context_configuration_handle *cch, int asidval) +{ + int i; + + for (i = 0; i < 8; i++) { + cch->asid[i] = asidval++; + cch->sizeavail[i] = GRU_SIZEAVAIL(PAGE_SHIFT) | + GRU_SIZEAVAIL(21); + } +} +#endif + +static inline int cch_allocate(struct gru_context_configuration_handle *cch, + int asidval, unsigned long cbrmap, + unsigned long dsrmap) +{ + cch_allocate_set_asids(cch, asidval); + cch->dsr_allocation_map = dsrmap; + cch->cbr_allocation_map = cbrmap; + cch->opc = CCHOP_ALLOCATE; + start_instruction(cch); + return wait_instruction_complete(cch); +} + +static inline int cch_start(struct gru_context_configuration_handle *cch) +{ + cch->opc = CCHOP_START; + start_instruction(cch); + return wait_instruction_complete(cch); +} + +static inline int cch_interrupt(struct gru_context_configuration_handle *cch) +{ + cch->opc = CCHOP_INTERRUPT; + start_instruction(cch); + return wait_instruction_complete(cch); +} + +static inline int cch_deallocate(struct gru_context_configuration_handle *cch) +{ + cch->opc = CCHOP_DEALLOCATE; + start_instruction(cch); + return wait_instruction_complete(cch); +} + +static inline int cch_interrupt_sync(struct gru_context_configuration_handle + *cch) +{ + cch->opc = CCHOP_INTERRUPT_SYNC; + start_instruction(cch); + return wait_instruction_complete(cch); +} + +static inline int tgh_invalidate(struct gru_tlb_global_handle *tgh, + unsigned long vaddr, unsigned long vaddrmask, + int asid, int pagesize, int global, int n, + unsigned short ctxbitmap) +{ + tgh->vaddr = vaddr; + tgh->asid = asid; + tgh->pagesize = pagesize; + tgh->n = n; + tgh->global = global; + tgh->vaddrmask = vaddrmask; + tgh->ctxbitmap = ctxbitmap; + tgh->opc = TGHOP_TLBINV; + start_instruction(tgh); + return wait_instruction_complete(tgh); +} + +static inline void tfh_write_only(struct gru_tlb_fault_handle *tfh, + unsigned long pfn, unsigned long vaddr, + int asid, int dirty, int pagesize) +{ + tfh->fillasid = asid; + tfh->fillvaddr = vaddr; + tfh->pfn = pfn; + tfh->dirty = dirty; + tfh->pagesize = pagesize; + tfh->opc = TFHOP_WRITE_ONLY; + start_instruction(tfh); +} + +static inline void tfh_write_restart(struct gru_tlb_fault_handle *tfh, + unsigned long paddr, int gaa, + unsigned long vaddr, int asid, int dirty, + int pagesize) +{ + tfh->fillasid = asid; + tfh->fillvaddr = vaddr; + tfh->pfn = paddr >> GRU_PADDR_SHIFT; + tfh->gaa = gaa; + tfh->dirty = dirty; + tfh->pagesize = pagesize; + tfh->opc = TFHOP_WRITE_RESTART; + start_instruction(tfh); +} + +static inline void tfh_restart(struct gru_tlb_fault_handle *tfh) +{ + tfh->opc = TFHOP_RESTART; + start_instruction(tfh); +} + +static inline void tfh_user_polling_mode(struct gru_tlb_fault_handle *tfh) +{ + tfh->opc = TFHOP_USER_POLLING_MODE; + start_instruction(tfh); +} + +static inline void tfh_exception(struct gru_tlb_fault_handle *tfh) +{ + tfh->opc = TFHOP_EXCEPTION; + start_instruction(tfh); +} + +#endif /* __GRUHANDLES_H__ */ diff --git a/drivers/misc/sgi-gru/grukservices.c b/drivers/misc/sgi-gru/grukservices.c new file mode 100644 index 0000000..880c55d --- /dev/null +++ b/drivers/misc/sgi-gru/grukservices.c @@ -0,0 +1,680 @@ +/* + * SN Platform GRU Driver + * + * KERNEL SERVICES THAT USE THE GRU + * + * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include <linux/kernel.h> +#include <linux/errno.h> +#include <linux/slab.h> +#include <linux/mm.h> +#include <linux/smp_lock.h> +#include <linux/spinlock.h> +#include <linux/device.h> +#include <linux/miscdevice.h> +#include <linux/proc_fs.h> +#include <linux/interrupt.h> +#include <linux/uaccess.h> +#include "gru.h" +#include "grulib.h" +#include "grutables.h" +#include "grukservices.h" +#include "gru_instructions.h" +#include <asm/uv/uv_hub.h> + +/* + * Kernel GRU Usage + * + * The following is an interim algorithm for management of kernel GRU + * resources. This will likely be replaced when we better understand the + * kernel/user requirements. + * + * At boot time, the kernel permanently reserves a fixed number of + * CBRs/DSRs for each cpu to use. The resources are all taken from + * the GRU chiplet 1 on the blade. This leaves the full set of resources + * of chiplet 0 available to be allocated to a single user. + */ + +/* Blade percpu resources PERMANENTLY reserved for kernel use */ +#define GRU_NUM_KERNEL_CBR 1 +#define GRU_NUM_KERNEL_DSR_BYTES 256 +#define KERNEL_CTXNUM 15 + +/* GRU instruction attributes for all instructions */ +#define IMA IMA_CB_DELAY + +/* GRU cacheline size is always 64 bytes - even on arches with 128 byte lines */ +#define __gru_cacheline_aligned__ \ + __attribute__((__aligned__(GRU_CACHE_LINE_BYTES))) + +#define MAGIC 0x1234567887654321UL + +/* Default retry count for GRU errors on kernel instructions */ +#define EXCEPTION_RETRY_LIMIT 3 + +/* Status of message queue sections */ +#define MQS_EMPTY 0 +#define MQS_FULL 1 +#define MQS_NOOP 2 + +/*----------------- RESOURCE MANAGEMENT -------------------------------------*/ +/* optimized for x86_64 */ +struct message_queue { + union gru_mesqhead head __gru_cacheline_aligned__; /* CL 0 */ + int qlines; /* DW 1 */ + long hstatus[2]; + void *next __gru_cacheline_aligned__;/* CL 1 */ + void *limit; + void *start; + void *start2; + char data ____cacheline_aligned; /* CL 2 */ +}; + +/* First word in every message - used by mesq interface */ +struct message_header { + char present; + char present2; + char lines; + char fill; +}; + +#define QLINES(mq) ((mq) + offsetof(struct message_queue, qlines)) +#define HSTATUS(mq, h) ((mq) + offsetof(struct message_queue, hstatus[h])) + +static int gru_get_cpu_resources(int dsr_bytes, void **cb, void **dsr) +{ + struct gru_blade_state *bs; + int lcpu; + + BUG_ON(dsr_bytes > GRU_NUM_KERNEL_DSR_BYTES); + preempt_disable(); + bs = gru_base[uv_numa_blade_id()]; + lcpu = uv_blade_processor_id(); + *cb = bs->kernel_cb + lcpu * GRU_HANDLE_STRIDE; + *dsr = bs->kernel_dsr + lcpu * GRU_NUM_KERNEL_DSR_BYTES; + return 0; +} + +static void gru_free_cpu_resources(void *cb, void *dsr) +{ + preempt_enable(); +} + +int gru_get_cb_exception_detail(void *cb, + struct control_block_extended_exc_detail *excdet) +{ + struct gru_control_block_extended *cbe; + + cbe = get_cbe(GRUBASE(cb), get_cb_number(cb)); + prefetchw(cbe); /* Harmless on hardware, required for emulator */ + excdet->opc = cbe->opccpy; + excdet->exopc = cbe->exopccpy; + excdet->ecause = cbe->ecause; + excdet->exceptdet0 = cbe->idef1upd; + excdet->exceptdet1 = cbe->idef3upd; + return 0; +} + +char *gru_get_cb_exception_detail_str(int ret, void *cb, + char *buf, int size) +{ + struct gru_control_block_status *gen = (void *)cb; + struct control_block_extended_exc_detail excdet; + + if (ret > 0 && gen->istatus == CBS_EXCEPTION) { + gru_get_cb_exception_detail(cb, &excdet); + snprintf(buf, size, + "GRU exception: cb %p, opc %d, exopc %d, ecause 0x%x," + "excdet0 0x%lx, excdet1 0x%x", + gen, excdet.opc, excdet.exopc, excdet.ecause, + excdet.exceptdet0, excdet.exceptdet1); + } else { + snprintf(buf, size, "No exception"); + } + return buf; +} + +static int gru_wait_idle_or_exception(struct gru_control_block_status *gen) +{ + while (gen->istatus >= CBS_ACTIVE) { + cpu_relax(); + barrier(); + } + return gen->istatus; +} + +static int gru_retry_exception(void *cb) +{ + struct gru_control_block_status *gen = (void *)cb; + struct control_block_extended_exc_detail excdet; + int retry = EXCEPTION_RETRY_LIMIT; + + while (1) { + if (gru_get_cb_message_queue_substatus(cb)) + break; + if (gru_wait_idle_or_exception(gen) == CBS_IDLE) + return CBS_IDLE; + + gru_get_cb_exception_detail(cb, &excdet); + if (excdet.ecause & ~EXCEPTION_RETRY_BITS) + break; + if (retry-- == 0) + break; + gen->icmd = 1; + gru_flush_cache(gen); + } + return CBS_EXCEPTION; +} + +int gru_check_status_proc(void *cb) +{ + struct gru_control_block_status *gen = (void *)cb; + int ret; + + ret = gen->istatus; + if (ret != CBS_EXCEPTION) + return ret; + return gru_retry_exception(cb); + +} + +int gru_wait_proc(void *cb) +{ + struct gru_control_block_status *gen = (void *)cb; + int ret; + + ret = gru_wait_idle_or_exception(gen); + if (ret == CBS_EXCEPTION) + ret = gru_retry_exception(cb); + + return ret; +} + +void gru_abort(int ret, void *cb, char *str) +{ + char buf[GRU_EXC_STR_SIZE]; + + panic("GRU FATAL ERROR: %s - %s\n", str, + gru_get_cb_exception_detail_str(ret, cb, buf, sizeof(buf))); +} + +void gru_wait_abort_proc(void *cb) +{ + int ret; + + ret = gru_wait_proc(cb); + if (ret) + gru_abort(ret, cb, "gru_wait_abort"); +} + + +/*------------------------------ MESSAGE QUEUES -----------------------------*/ + +/* Internal status . These are NOT returned to the user. */ +#define MQIE_AGAIN -1 /* try again */ + + +/* + * Save/restore the "present" flag that is in the second line of 2-line + * messages + */ +static inline int get_present2(void *p) +{ + struct message_header *mhdr = p + GRU_CACHE_LINE_BYTES; + return mhdr->present; +} + +static inline void restore_present2(void *p, int val) +{ + struct message_header *mhdr = p + GRU_CACHE_LINE_BYTES; + mhdr->present = val; +} + +/* + * Create a message queue. + * qlines - message queue size in cache lines. Includes 2-line header. + */ +int gru_create_message_queue(void *p, unsigned int bytes) +{ + struct message_queue *mq = p; + unsigned int qlines; + + qlines = bytes / GRU_CACHE_LINE_BYTES - 2; + memset(mq, 0, bytes); + mq->start = &mq->data; + mq->start2 = &mq->data + (qlines / 2 - 1) * GRU_CACHE_LINE_BYTES; + mq->next = &mq->data; + mq->limit = &mq->data + (qlines - 2) * GRU_CACHE_LINE_BYTES; + mq->qlines = qlines; + mq->hstatus[0] = 0; + mq->hstatus[1] = 1; + mq->head = gru_mesq_head(2, qlines / 2 + 1); + return 0; +} +EXPORT_SYMBOL_GPL(gru_create_message_queue); + +/* + * Send a NOOP message to a message queue + * Returns: + * 0 - if queue is full after the send. This is the normal case + * but various races can change this. + * -1 - if mesq sent successfully but queue not full + * >0 - unexpected error. MQE_xxx returned + */ +static int send_noop_message(void *cb, + unsigned long mq, void *mesg) +{ + const struct message_header noop_header = { + .present = MQS_NOOP, .lines = 1}; + unsigned long m; + int substatus, ret; + struct message_header save_mhdr, *mhdr = mesg; + + STAT(mesq_noop); + save_mhdr = *mhdr; + *mhdr = noop_header; + gru_mesq(cb, mq, gru_get_tri(mhdr), 1, IMA); + ret = gru_wait(cb); + + if (ret) { + substatus = gru_get_cb_message_queue_substatus(cb); + switch (substatus) { + case CBSS_NO_ERROR: + STAT(mesq_noop_unexpected_error); + ret = MQE_UNEXPECTED_CB_ERR; + break; + case CBSS_LB_OVERFLOWED: + STAT(mesq_noop_lb_overflow); + ret = MQE_CONGESTION; + break; + case CBSS_QLIMIT_REACHED: + STAT(mesq_noop_qlimit_reached); + ret = 0; + break; + case CBSS_AMO_NACKED: + STAT(mesq_noop_amo_nacked); + ret = MQE_CONGESTION; + break; + case CBSS_PUT_NACKED: + STAT(mesq_noop_put_nacked); + m = mq + (gru_get_amo_value_head(cb) << 6); + gru_vstore(cb, m, gru_get_tri(mesg), XTYPE_CL, 1, 1, + IMA); + if (gru_wait(cb) == CBS_IDLE) + ret = MQIE_AGAIN; + else + ret = MQE_UNEXPECTED_CB_ERR; + break; + case CBSS_PAGE_OVERFLOW: + default: + BUG(); + } + } + *mhdr = save_mhdr; + return ret; +} + +/* + * Handle a gru_mesq full. + */ +static int send_message_queue_full(void *cb, + unsigned long mq, void *mesg, int lines) +{ + union gru_mesqhead mqh; + unsigned int limit, head; + unsigned long avalue; + int half, qlines, save; + + /* Determine if switching to first/second half of q */ + avalue = gru_get_amo_value(cb); + head = gru_get_amo_value_head(cb); + limit = gru_get_amo_value_limit(cb); + + /* + * Fetch "qlines" from the queue header. Since the queue may be + * in memory that can't be accessed using socket addresses, use + * the GRU to access the data. Use DSR space from the message. + */ + save = *(int *)mesg; + gru_vload(cb, QLINES(mq), gru_get_tri(mesg), XTYPE_W, 1, 1, IMA); + if (gru_wait(cb) != CBS_IDLE) + goto cberr; + qlines = *(int *)mesg; + *(int *)mesg = save; + half = (limit != qlines); + + if (half) + mqh = gru_mesq_head(qlines / 2 + 1, qlines); + else + mqh = gru_mesq_head(2, qlines / 2 + 1); + + /* Try to get lock for switching head pointer */ + gru_gamir(cb, EOP_IR_CLR, HSTATUS(mq, half), XTYPE_DW, IMA); + if (gru_wait(cb) != CBS_IDLE) + goto cberr; + if (!gru_get_amo_value(cb)) { + STAT(mesq_qf_locked); + return MQE_QUEUE_FULL; + } + + /* Got the lock. Send optional NOP if queue not full, */ + if (head != limit) { + if (send_noop_message(cb, mq, mesg)) { + gru_gamir(cb, EOP_IR_INC, HSTATUS(mq, half), + XTYPE_DW, IMA); + if (gru_wait(cb) != CBS_IDLE) + goto cberr; + STAT(mesq_qf_noop_not_full); + return MQIE_AGAIN; + } + avalue++; + } + + /* Then flip queuehead to other half of queue. */ + gru_gamer(cb, EOP_ERR_CSWAP, mq, XTYPE_DW, mqh.val, avalue, IMA); + if (gru_wait(cb) != CBS_IDLE) + goto cberr; + + /* If not successfully in swapping queue head, clear the hstatus lock */ + if (gru_get_amo_value(cb) != avalue) { + STAT(mesq_qf_switch_head_failed); + gru_gamir(cb, EOP_IR_INC, HSTATUS(mq, half), XTYPE_DW, IMA); + if (gru_wait(cb) != CBS_IDLE) + goto cberr; + } + return MQIE_AGAIN; +cberr: + STAT(mesq_qf_unexpected_error); + return MQE_UNEXPECTED_CB_ERR; +} + + +/* + * Handle a gru_mesq failure. Some of these failures are software recoverable + * or retryable. + */ +static int send_message_failure(void *cb, + unsigned long mq, + void *mesg, + int lines) +{ + int substatus, ret = 0; + unsigned long m; + + substatus = gru_get_cb_message_queue_substatus(cb); + switch (substatus) { + case CBSS_NO_ERROR: + STAT(mesq_send_unexpected_error); + ret = MQE_UNEXPECTED_CB_ERR; + break; + case CBSS_LB_OVERFLOWED: + STAT(mesq_send_lb_overflow); + ret = MQE_CONGESTION; + break; + case CBSS_QLIMIT_REACHED: + STAT(mesq_send_qlimit_reached); + ret = send_message_queue_full(cb, mq, mesg, lines); + break; + case CBSS_AMO_NACKED: + STAT(mesq_send_amo_nacked); + ret = MQE_CONGESTION; + break; + case CBSS_PUT_NACKED: + STAT(mesq_send_put_nacked); + m =mq + (gru_get_amo_value_head(cb) << 6); + gru_vstore(cb, m, gru_get_tri(mesg), XTYPE_CL, lines, 1, IMA); + if (gru_wait(cb) == CBS_IDLE) + ret = MQE_OK; + else + ret = MQE_UNEXPECTED_CB_ERR; + break; + default: + BUG(); + } + return ret; +} + +/* + * Send a message to a message queue + * cb GRU control block to use to send message + * mq message queue + * mesg message. ust be vaddr within a GSEG + * bytes message size (<= 2 CL) + */ +int gru_send_message_gpa(unsigned long mq, void *mesg, unsigned int bytes) +{ + struct message_header *mhdr; + void *cb; + void *dsr; + int istatus, clines, ret; + + STAT(mesq_send); + BUG_ON(bytes < sizeof(int) || bytes > 2 * GRU_CACHE_LINE_BYTES); + + clines = DIV_ROUND_UP(bytes, GRU_CACHE_LINE_BYTES); + if (gru_get_cpu_resources(bytes, &cb, &dsr)) + return MQE_BUG_NO_RESOURCES; + memcpy(dsr, mesg, bytes); + mhdr = dsr; + mhdr->present = MQS_FULL; + mhdr->lines = clines; + if (clines == 2) { + mhdr->present2 = get_present2(mhdr); + restore_present2(mhdr, MQS_FULL); + } + + do { + ret = MQE_OK; + gru_mesq(cb, mq, gru_get_tri(mhdr), clines, IMA); + istatus = gru_wait(cb); + if (istatus != CBS_IDLE) + ret = send_message_failure(cb, mq, dsr, clines); + } while (ret == MQIE_AGAIN); + gru_free_cpu_resources(cb, dsr); + + if (ret) + STAT(mesq_send_failed); + return ret; +} +EXPORT_SYMBOL_GPL(gru_send_message_gpa); + +/* + * Advance the receive pointer for the queue to the next message. + */ +void gru_free_message(void *rmq, void *mesg) +{ + struct message_queue *mq = rmq; + struct message_header *mhdr = mq->next; + void *next, *pnext; + int half = -1; + int lines = mhdr->lines; + + if (lines == 2) + restore_present2(mhdr, MQS_EMPTY); + mhdr->present = MQS_EMPTY; + + pnext = mq->next; + next = pnext + GRU_CACHE_LINE_BYTES * lines; + if (next == mq->limit) { + next = mq->start; + half = 1; + } else if (pnext < mq->start2 && next >= mq->start2) { + half = 0; + } + + if (half >= 0) + mq->hstatus[half] = 1; + mq->next = next; +} +EXPORT_SYMBOL_GPL(gru_free_message); + +/* + * Get next message from message queue. Return NULL if no message + * present. User must call next_message() to move to next message. + * rmq message queue + */ +void *gru_get_next_message(void *rmq) +{ + struct message_queue *mq = rmq; + struct message_header *mhdr = mq->next; + int present = mhdr->present; + + /* skip NOOP messages */ + STAT(mesq_receive); + while (present == MQS_NOOP) { + gru_free_message(rmq, mhdr); + mhdr = mq->next; + present = mhdr->present; + } + + /* Wait for both halves of 2 line messages */ + if (present == MQS_FULL && mhdr->lines == 2 && + get_present2(mhdr) == MQS_EMPTY) + present = MQS_EMPTY; + + if (!present) { + STAT(mesq_receive_none); + return NULL; + } + + if (mhdr->lines == 2) + restore_present2(mhdr, mhdr->present2); + + return mhdr; +} +EXPORT_SYMBOL_GPL(gru_get_next_message); + +/* ---------------------- GRU DATA COPY FUNCTIONS ---------------------------*/ + +/* + * Copy a block of data using the GRU resources + */ +int gru_copy_gpa(unsigned long dest_gpa, unsigned long src_gpa, + unsigned int bytes) +{ + void *cb; + void *dsr; + int ret; + + STAT(copy_gpa); + if (gru_get_cpu_resources(GRU_NUM_KERNEL_DSR_BYTES, &cb, &dsr)) + return MQE_BUG_NO_RESOURCES; + gru_bcopy(cb, src_gpa, dest_gpa, gru_get_tri(dsr), + XTYPE_B, bytes, GRU_NUM_KERNEL_DSR_BYTES, IMA); + ret = gru_wait(cb); + gru_free_cpu_resources(cb, dsr); + return ret; +} +EXPORT_SYMBOL_GPL(gru_copy_gpa); + +/* ------------------- KERNEL QUICKTESTS RUN AT STARTUP ----------------*/ +/* Temp - will delete after we gain confidence in the GRU */ +static __cacheline_aligned unsigned long word0; +static __cacheline_aligned unsigned long word1; + +static int quicktest(struct gru_state *gru) +{ + void *cb; + void *ds; + unsigned long *p; + + cb = get_gseg_base_address_cb(gru->gs_gru_base_vaddr, KERNEL_CTXNUM, 0); + ds = get_gseg_base_address_ds(gru->gs_gru_base_vaddr, KERNEL_CTXNUM, 0); + p = ds; + word0 = MAGIC; + + gru_vload(cb, uv_gpa(&word0), 0, XTYPE_DW, 1, 1, IMA); + if (gru_wait(cb) != CBS_IDLE) + BUG(); + + if (*(unsigned long *)ds != MAGIC) + BUG(); + gru_vstore(cb, uv_gpa(&word1), 0, XTYPE_DW, 1, 1, IMA); + if (gru_wait(cb) != CBS_IDLE) + BUG(); + + if (word0 != word1 || word0 != MAGIC) { + printk + ("GRU quicktest err: gru %d, found 0x%lx, expected 0x%lx\n", + gru->gs_gid, word1, MAGIC); + BUG(); /* ZZZ should not be fatal */ + } + + return 0; +} + + +int gru_kservices_init(struct gru_state *gru) +{ + struct gru_blade_state *bs; + struct gru_context_configuration_handle *cch; + unsigned long cbr_map, dsr_map; + int err, num, cpus_possible; + + /* + * Currently, resources are reserved ONLY on the second chiplet + * on each blade. This leaves ALL resources on chiplet 0 available + * for user code. + */ + bs = gru->gs_blade; + if (gru != &bs->bs_grus[1]) + return 0; + + cpus_possible = uv_blade_nr_possible_cpus(gru->gs_blade_id); + + num = GRU_NUM_KERNEL_CBR * cpus_possible; + cbr_map = gru_reserve_cb_resources(gru, GRU_CB_COUNT_TO_AU(num), NULL); + gru->gs_reserved_cbrs += num; + + num = GRU_NUM_KERNEL_DSR_BYTES * cpus_possible; + dsr_map = gru_reserve_ds_resources(gru, GRU_DS_BYTES_TO_AU(num), NULL); + gru->gs_reserved_dsr_bytes += num; + + gru->gs_active_contexts++; + __set_bit(KERNEL_CTXNUM, &gru->gs_context_map); + cch = get_cch(gru->gs_gru_base_vaddr, KERNEL_CTXNUM); + + bs->kernel_cb = get_gseg_base_address_cb(gru->gs_gru_base_vaddr, + KERNEL_CTXNUM, 0); + bs->kernel_dsr = get_gseg_base_address_ds(gru->gs_gru_base_vaddr, + KERNEL_CTXNUM, 0); + + lock_cch_handle(cch); + cch->tfm_fault_bit_enable = 0; + cch->tlb_int_enable = 0; + cch->tfm_done_bit_enable = 0; + cch->unmap_enable = 1; + err = cch_allocate(cch, 0, cbr_map, dsr_map); + if (err) { + gru_dbg(grudev, + "Unable to allocate kernel CCH: gru %d, err %d\n", + gru->gs_gid, err); + BUG(); + } + if (cch_start(cch)) { + gru_dbg(grudev, "Unable to start kernel CCH: gru %d, err %d\n", + gru->gs_gid, err); + BUG(); + } + unlock_cch_handle(cch); + + if (gru_options & GRU_QUICKLOOK) + quicktest(gru); + return 0; +} diff --git a/drivers/misc/sgi-gru/grukservices.h b/drivers/misc/sgi-gru/grukservices.h new file mode 100644 index 0000000..eb17e0a --- /dev/null +++ b/drivers/misc/sgi-gru/grukservices.h @@ -0,0 +1,134 @@ + +/* + * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ +#ifndef __GRU_KSERVICES_H_ +#define __GRU_KSERVICES_H_ + + +/* + * Message queues using the GRU to send/receive messages. + * + * These function allow the user to create a message queue for + * sending/receiving 1 or 2 cacheline messages using the GRU. + * + * Processes SENDING messages will use a kernel CBR/DSR to send + * the message. This is transparent to the caller. + * + * The receiver does not use any GRU resources. + * + * The functions support: + * - single receiver + * - multiple senders + * - cross partition message + * + * Missing features ZZZ: + * - user options for dealing with timeouts, queue full, etc. + * - gru_create_message_queue() needs interrupt vector info + */ + +/* + * Initialize a user allocated chunk of memory to be used as + * a message queue. The caller must ensure that the queue is + * in contiguous physical memory and is cacheline aligned. + * + * Message queue size is the total number of bytes allocated + * to the queue including a 2 cacheline header that is used + * to manage the queue. + * + * Input: + * p pointer to user allocated memory. + * bytes size of message queue in bytes + * + * Errors: + * 0 OK + * >0 error + */ +extern int gru_create_message_queue(void *p, unsigned int bytes); + +/* + * Send a message to a message queue. + * + * Note: The message queue transport mechanism uses the first 32 + * bits of the message. Users should avoid using these bits. + * + * + * Input: + * xmq message queue - must be a UV global physical address + * mesg pointer to message. Must be 64-bit aligned + * bytes size of message in bytes + * + * Output: + * 0 message sent + * >0 Send failure - see error codes below + * + */ +extern int gru_send_message_gpa(unsigned long mq_gpa, void *mesg, + unsigned int bytes); + +/* Status values for gru_send_message() */ +#define MQE_OK 0 /* message sent successfully */ +#define MQE_CONGESTION 1 /* temporary congestion, try again */ +#define MQE_QUEUE_FULL 2 /* queue is full */ +#define MQE_UNEXPECTED_CB_ERR 3 /* unexpected CB error */ +#define MQE_PAGE_OVERFLOW 10 /* BUG - queue overflowed a page */ +#define MQE_BUG_NO_RESOURCES 11 /* BUG - could not alloc GRU cb/dsr */ + +/* + * Advance the receive pointer for the message queue to the next message. + * Note: current API requires messages to be gotten & freed in order. Future + * API extensions may allow for out-of-order freeing. + * + * Input + * mq message queue + * mesq message being freed + */ +extern void gru_free_message(void *mq, void *mesq); + +/* + * Get next message from message queue. Returns pointer to + * message OR NULL if no message present. + * User must call gru_free_message() after message is processed + * in order to move the queue pointers to next message. + * + * Input + * mq message queue + * + * Output: + * p pointer to message + * NULL no message available + */ +extern void *gru_get_next_message(void *mq); + + +/* + * Copy data using the GRU. Source or destination can be located in a remote + * partition. + * + * Input: + * dest_gpa destination global physical address + * src_gpa source global physical address + * bytes number of bytes to copy + * + * Output: + * 0 OK + * >0 error + */ +extern int gru_copy_gpa(unsigned long dest_gpa, unsigned long src_gpa, + unsigned int bytes); + +#endif /* __GRU_KSERVICES_H_ */ diff --git a/drivers/misc/sgi-gru/grulib.h b/drivers/misc/sgi-gru/grulib.h new file mode 100644 index 0000000..e56e196 --- /dev/null +++ b/drivers/misc/sgi-gru/grulib.h @@ -0,0 +1,97 @@ +/* + * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as published by + * the Free Software Foundation; either version 2.1 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef __GRULIB_H__ +#define __GRULIB_H__ + +#define GRU_BASENAME "gru" +#define GRU_FULLNAME "/dev/gru" +#define GRU_IOCTL_NUM 'G' + +/* + * Maximum number of GRU segments that a user can have open + * ZZZ temp - set high for testing. Revisit. + */ +#define GRU_MAX_OPEN_CONTEXTS 32 + +/* Set Number of Request Blocks */ +#define GRU_CREATE_CONTEXT _IOWR(GRU_IOCTL_NUM, 1, void *) + +/* Register task as using the slice */ +#define GRU_SET_TASK_SLICE _IOWR(GRU_IOCTL_NUM, 5, void *) + +/* Fetch exception detail */ +#define GRU_USER_GET_EXCEPTION_DETAIL _IOWR(GRU_IOCTL_NUM, 6, void *) + +/* For user call_os handling - normally a TLB fault */ +#define GRU_USER_CALL_OS _IOWR(GRU_IOCTL_NUM, 8, void *) + +/* For user unload context */ +#define GRU_USER_UNLOAD_CONTEXT _IOWR(GRU_IOCTL_NUM, 9, void *) + +/* For fetching GRU chiplet status */ +#define GRU_GET_CHIPLET_STATUS _IOWR(GRU_IOCTL_NUM, 10, void *) + +/* For user TLB flushing (primarily for tests) */ +#define GRU_USER_FLUSH_TLB _IOWR(GRU_IOCTL_NUM, 50, void *) + +/* Get some config options (primarily for tests & emulator) */ +#define GRU_GET_CONFIG_INFO _IOWR(GRU_IOCTL_NUM, 51, void *) + +#define CONTEXT_WINDOW_BYTES(th) (GRU_GSEG_PAGESIZE * (th)) +#define THREAD_POINTER(p, th) (p + GRU_GSEG_PAGESIZE * (th)) + +/* + * Structure used to pass TLB flush parameters to the driver + */ +struct gru_create_context_req { + unsigned long gseg; + unsigned int data_segment_bytes; + unsigned int control_blocks; + unsigned int maximum_thread_count; + unsigned int options; +}; + +/* + * Structure used to pass unload context parameters to the driver + */ +struct gru_unload_context_req { + unsigned long gseg; +}; + +/* + * Structure used to pass TLB flush parameters to the driver + */ +struct gru_flush_tlb_req { + unsigned long gseg; + unsigned long vaddr; + size_t len; +}; + +/* + * GRU configuration info (temp - for testing) + */ +struct gru_config_info { + int cpus; + int blades; + int nodes; + int chiplets; + int fill[16]; +}; + +#endif /* __GRULIB_H__ */ diff --git a/drivers/misc/sgi-gru/grumain.c b/drivers/misc/sgi-gru/grumain.c new file mode 100644 index 0000000..e11e1ac --- /dev/null +++ b/drivers/misc/sgi-gru/grumain.c @@ -0,0 +1,809 @@ +/* + * SN Platform GRU Driver + * + * DRIVER TABLE MANAGER + GRU CONTEXT LOAD/UNLOAD + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved. + */ + +#include <linux/kernel.h> +#include <linux/slab.h> +#include <linux/mm.h> +#include <linux/spinlock.h> +#include <linux/sched.h> +#include <linux/device.h> +#include <linux/list.h> +#include <asm/uv/uv_hub.h> +#include "gru.h" +#include "grutables.h" +#include "gruhandles.h" + +unsigned long gru_options __read_mostly; + +static struct device_driver gru_driver = { + .name = "gru" +}; + +static struct device gru_device = { + .bus_id = {0}, + .driver = &gru_driver, +}; + +struct device *grudev = &gru_device; + +/* + * Select a gru fault map to be used by the current cpu. Note that + * multiple cpus may be using the same map. + * ZZZ should "shift" be used?? Depends on HT cpu numbering + * ZZZ should be inline but did not work on emulator + */ +int gru_cpu_fault_map_id(void) +{ + return uv_blade_processor_id() % GRU_NUM_TFM; +} + +/*--------- ASID Management ------------------------------------------- + * + * Initially, assign asids sequentially from MIN_ASID .. MAX_ASID. + * Once MAX is reached, flush the TLB & start over. However, + * some asids may still be in use. There won't be many (percentage wise) still + * in use. Search active contexts & determine the value of the first + * asid in use ("x"s below). Set "limit" to this value. + * This defines a block of assignable asids. + * + * When "limit" is reached, search forward from limit+1 and determine the + * next block of assignable asids. + * + * Repeat until MAX_ASID is reached, then start over again. + * + * Each time MAX_ASID is reached, increment the asid generation. Since + * the search for in-use asids only checks contexts with GRUs currently + * assigned, asids in some contexts will be missed. Prior to loading + * a context, the asid generation of the GTS asid is rechecked. If it + * doesn't match the current generation, a new asid will be assigned. + * + * 0---------------x------------x---------------------x----| + * ^-next ^-limit ^-MAX_ASID + * + * All asid manipulation & context loading/unloading is protected by the + * gs_lock. + */ + +/* Hit the asid limit. Start over */ +static int gru_wrap_asid(struct gru_state *gru) +{ + gru_dbg(grudev, "gru %p\n", gru); + STAT(asid_wrap); + gru->gs_asid_gen++; + gru_flush_all_tlb(gru); + return MIN_ASID; +} + +/* Find the next chunk of unused asids */ +static int gru_reset_asid_limit(struct gru_state *gru, int asid) +{ + int i, gid, inuse_asid, limit; + + gru_dbg(grudev, "gru %p, asid 0x%x\n", gru, asid); + STAT(asid_next); + limit = MAX_ASID; + if (asid >= limit) + asid = gru_wrap_asid(gru); + gid = gru->gs_gid; +again: + for (i = 0; i < GRU_NUM_CCH; i++) { + if (!gru->gs_gts[i]) + continue; + inuse_asid = gru->gs_gts[i]->ts_gms->ms_asids[gid].mt_asid; + gru_dbg(grudev, "gru %p, inuse_asid 0x%x, cxtnum %d, gts %p\n", + gru, inuse_asid, i, gru->gs_gts[i]); + if (inuse_asid == asid) { + asid += ASID_INC; + if (asid >= limit) { + /* + * empty range: reset the range limit and + * start over + */ + limit = MAX_ASID; + if (asid >= MAX_ASID) + asid = gru_wrap_asid(gru); + goto again; + } + } + + if ((inuse_asid > asid) && (inuse_asid < limit)) + limit = inuse_asid; + } + gru->gs_asid_limit = limit; + gru->gs_asid = asid; + gru_dbg(grudev, "gru %p, new asid 0x%x, new_limit 0x%x\n", gru, asid, + limit); + return asid; +} + +/* Assign a new ASID to a thread context. */ +static int gru_assign_asid(struct gru_state *gru) +{ + int asid; + + spin_lock(&gru->gs_asid_lock); + gru->gs_asid += ASID_INC; + asid = gru->gs_asid; + if (asid >= gru->gs_asid_limit) + asid = gru_reset_asid_limit(gru, asid); + spin_unlock(&gru->gs_asid_lock); + + gru_dbg(grudev, "gru %p, asid 0x%x\n", gru, asid); + return asid; +} + +/* + * Clear n bits in a word. Return a word indicating the bits that were cleared. + * Optionally, build an array of chars that contain the bit numbers allocated. + */ +static unsigned long reserve_resources(unsigned long *p, int n, int mmax, + char *idx) +{ + unsigned long bits = 0; + int i; + + do { + i = find_first_bit(p, mmax); + if (i == mmax) + BUG(); + __clear_bit(i, p); + __set_bit(i, &bits); + if (idx) + *idx++ = i; + } while (--n); + return bits; +} + +unsigned long gru_reserve_cb_resources(struct gru_state *gru, int cbr_au_count, + char *cbmap) +{ + return reserve_resources(&gru->gs_cbr_map, cbr_au_count, GRU_CBR_AU, + cbmap); +} + +unsigned long gru_reserve_ds_resources(struct gru_state *gru, int dsr_au_count, + char *dsmap) +{ + return reserve_resources(&gru->gs_dsr_map, dsr_au_count, GRU_DSR_AU, + dsmap); +} + +static void reserve_gru_resources(struct gru_state *gru, + struct gru_thread_state *gts) +{ + gru->gs_active_contexts++; + gts->ts_cbr_map = + gru_reserve_cb_resources(gru, gts->ts_cbr_au_count, + gts->ts_cbr_idx); + gts->ts_dsr_map = + gru_reserve_ds_resources(gru, gts->ts_dsr_au_count, NULL); +} + +static void free_gru_resources(struct gru_state *gru, + struct gru_thread_state *gts) +{ + gru->gs_active_contexts--; + gru->gs_cbr_map |= gts->ts_cbr_map; + gru->gs_dsr_map |= gts->ts_dsr_map; +} + +/* + * Check if a GRU has sufficient free resources to satisfy an allocation + * request. Note: GRU locks may or may not be held when this is called. If + * not held, recheck after acquiring the appropriate locks. + * + * Returns 1 if sufficient resources, 0 if not + */ +static int check_gru_resources(struct gru_state *gru, int cbr_au_count, + int dsr_au_count, int max_active_contexts) +{ + return hweight64(gru->gs_cbr_map) >= cbr_au_count + && hweight64(gru->gs_dsr_map) >= dsr_au_count + && gru->gs_active_contexts < max_active_contexts; +} + +/* + * TLB manangment requires tracking all GRU chiplets that have loaded a GSEG + * context. + */ +static int gru_load_mm_tracker(struct gru_state *gru, struct gru_mm_struct *gms, + int ctxnum) +{ + struct gru_mm_tracker *asids = &gms->ms_asids[gru->gs_gid]; + unsigned short ctxbitmap = (1 << ctxnum); + int asid; + + spin_lock(&gms->ms_asid_lock); + asid = asids->mt_asid; + + if (asid == 0 || asids->mt_asid_gen != gru->gs_asid_gen) { + asid = gru_assign_asid(gru); + asids->mt_asid = asid; + asids->mt_asid_gen = gru->gs_asid_gen; + STAT(asid_new); + } else { + STAT(asid_reuse); + } + + BUG_ON(asids->mt_ctxbitmap & ctxbitmap); + asids->mt_ctxbitmap |= ctxbitmap; + if (!test_bit(gru->gs_gid, gms->ms_asidmap)) + __set_bit(gru->gs_gid, gms->ms_asidmap); + spin_unlock(&gms->ms_asid_lock); + + gru_dbg(grudev, + "gru %x, gms %p, ctxnum 0x%d, asid 0x%x, asidmap 0x%lx\n", + gru->gs_gid, gms, ctxnum, asid, gms->ms_asidmap[0]); + return asid; +} + +static void gru_unload_mm_tracker(struct gru_state *gru, + struct gru_mm_struct *gms, int ctxnum) +{ + struct gru_mm_tracker *asids; + unsigned short ctxbitmap; + + asids = &gms->ms_asids[gru->gs_gid]; + ctxbitmap = (1 << ctxnum); + spin_lock(&gms->ms_asid_lock); + BUG_ON((asids->mt_ctxbitmap & ctxbitmap) != ctxbitmap); + asids->mt_ctxbitmap ^= ctxbitmap; + gru_dbg(grudev, "gru %x, gms %p, ctxnum 0x%d, asidmap 0x%lx\n", + gru->gs_gid, gms, ctxnum, gms->ms_asidmap[0]); + spin_unlock(&gms->ms_asid_lock); +} + +/* + * Decrement the reference count on a GTS structure. Free the structure + * if the reference count goes to zero. + */ +void gts_drop(struct gru_thread_state *gts) +{ + if (gts && atomic_dec_return(>s->ts_refcnt) == 0) { + gru_drop_mmu_notifier(gts->ts_gms); + kfree(gts); + STAT(gts_free); + } +} + +/* + * Locate the GTS structure for the current thread. + */ +static struct gru_thread_state *gru_find_current_gts_nolock(struct gru_vma_data + *vdata, int tsid) +{ + struct gru_thread_state *gts; + + list_for_each_entry(gts, &vdata->vd_head, ts_next) + if (gts->ts_tsid == tsid) + return gts; + return NULL; +} + +/* + * Allocate a thread state structure. + */ +static struct gru_thread_state *gru_alloc_gts(struct vm_area_struct *vma, + struct gru_vma_data *vdata, + int tsid) +{ + struct gru_thread_state *gts; + int bytes; + + bytes = DSR_BYTES(vdata->vd_dsr_au_count) + + CBR_BYTES(vdata->vd_cbr_au_count); + bytes += sizeof(struct gru_thread_state); + gts = kzalloc(bytes, GFP_KERNEL); + if (!gts) + return NULL; + + STAT(gts_alloc); + atomic_set(>s->ts_refcnt, 1); + mutex_init(>s->ts_ctxlock); + gts->ts_cbr_au_count = vdata->vd_cbr_au_count; + gts->ts_dsr_au_count = vdata->vd_dsr_au_count; + gts->ts_user_options = vdata->vd_user_options; + gts->ts_tsid = tsid; + gts->ts_user_options = vdata->vd_user_options; + gts->ts_ctxnum = NULLCTX; + gts->ts_mm = current->mm; + gts->ts_vma = vma; + gts->ts_tlb_int_select = -1; + gts->ts_gms = gru_register_mmu_notifier(); + if (!gts->ts_gms) + goto err; + + gru_dbg(grudev, "alloc vdata %p, new gts %p\n", vdata, gts); + return gts; + +err: + gts_drop(gts); + return NULL; +} + +/* + * Allocate a vma private data structure. + */ +struct gru_vma_data *gru_alloc_vma_data(struct vm_area_struct *vma, int tsid) +{ + struct gru_vma_data *vdata = NULL; + + vdata = kmalloc(sizeof(*vdata), GFP_KERNEL); + if (!vdata) + return NULL; + + INIT_LIST_HEAD(&vdata->vd_head); + spin_lock_init(&vdata->vd_lock); + gru_dbg(grudev, "alloc vdata %p\n", vdata); + return vdata; +} + +/* + * Find the thread state structure for the current thread. + */ +struct gru_thread_state *gru_find_thread_state(struct vm_area_struct *vma, + int tsid) +{ + struct gru_vma_data *vdata = vma->vm_private_data; + struct gru_thread_state *gts; + + spin_lock(&vdata->vd_lock); + gts = gru_find_current_gts_nolock(vdata, tsid); + spin_unlock(&vdata->vd_lock); + gru_dbg(grudev, "vma %p, gts %p\n", vma, gts); + return gts; +} + +/* + * Allocate a new thread state for a GSEG. Note that races may allow + * another thread to race to create a gts. + */ +struct gru_thread_state *gru_alloc_thread_state(struct vm_area_struct *vma, + int tsid) +{ + struct gru_vma_data *vdata = vma->vm_private_data; + struct gru_thread_state *gts, *ngts; + + gts = gru_alloc_gts(vma, vdata, tsid); + if (!gts) + return NULL; + + spin_lock(&vdata->vd_lock); + ngts = gru_find_current_gts_nolock(vdata, tsid); + if (ngts) { + gts_drop(gts); + gts = ngts; + STAT(gts_double_allocate); + } else { + list_add(>s->ts_next, &vdata->vd_head); + } + spin_unlock(&vdata->vd_lock); + gru_dbg(grudev, "vma %p, gts %p\n", vma, gts); + return gts; +} + +/* + * Free the GRU context assigned to the thread state. + */ +static void gru_free_gru_context(struct gru_thread_state *gts) +{ + struct gru_state *gru; + + gru = gts->ts_gru; + gru_dbg(grudev, "gts %p, gru %p\n", gts, gru); + + spin_lock(&gru->gs_lock); + gru->gs_gts[gts->ts_ctxnum] = NULL; + free_gru_resources(gru, gts); + BUG_ON(test_bit(gts->ts_ctxnum, &gru->gs_context_map) == 0); + __clear_bit(gts->ts_ctxnum, &gru->gs_context_map); + gts->ts_ctxnum = NULLCTX; + gts->ts_gru = NULL; + spin_unlock(&gru->gs_lock); + + gts_drop(gts); + STAT(free_context); +} + +/* + * Prefetching cachelines help hardware performance. + * (Strictly a performance enhancement. Not functionally required). + */ +static void prefetch_data(void *p, int num, int stride) +{ + while (num-- > 0) { + prefetchw(p); + p += stride; + } +} + +static inline long gru_copy_handle(void *d, void *s) +{ + memcpy(d, s, GRU_HANDLE_BYTES); + return GRU_HANDLE_BYTES; +} + +static void gru_prefetch_context(void *gseg, void *cb, void *cbe, unsigned long cbrmap, + unsigned long length) +{ + int i, scr; + + prefetch_data(gseg + GRU_DS_BASE, length / GRU_CACHE_LINE_BYTES, + GRU_CACHE_LINE_BYTES); + + for_each_cbr_in_allocation_map(i, &cbrmap, scr) { + prefetch_data(cb, 1, GRU_CACHE_LINE_BYTES); + prefetch_data(cbe + i * GRU_HANDLE_STRIDE, 1, + GRU_CACHE_LINE_BYTES); + cb += GRU_HANDLE_STRIDE; + } +} + +static void gru_load_context_data(void *save, void *grubase, int ctxnum, + unsigned long cbrmap, unsigned long dsrmap) +{ + void *gseg, *cb, *cbe; + unsigned long length; + int i, scr; + + gseg = grubase + ctxnum * GRU_GSEG_STRIDE; + cb = gseg + GRU_CB_BASE; + cbe = grubase + GRU_CBE_BASE; + length = hweight64(dsrmap) * GRU_DSR_AU_BYTES; + gru_prefetch_context(gseg, cb, cbe, cbrmap, length); + + for_each_cbr_in_allocation_map(i, &cbrmap, scr) { + save += gru_copy_handle(cb, save); + save += gru_copy_handle(cbe + i * GRU_HANDLE_STRIDE, save); + cb += GRU_HANDLE_STRIDE; + } + + memcpy(gseg + GRU_DS_BASE, save, length); +} + +static void gru_unload_context_data(void *save, void *grubase, int ctxnum, + unsigned long cbrmap, unsigned long dsrmap) +{ + void *gseg, *cb, *cbe; + unsigned long length; + int i, scr; + + gseg = grubase + ctxnum * GRU_GSEG_STRIDE; + cb = gseg + GRU_CB_BASE; + cbe = grubase + GRU_CBE_BASE; + length = hweight64(dsrmap) * GRU_DSR_AU_BYTES; + gru_prefetch_context(gseg, cb, cbe, cbrmap, length); + + for_each_cbr_in_allocation_map(i, &cbrmap, scr) { + save += gru_copy_handle(save, cb); + save += gru_copy_handle(save, cbe + i * GRU_HANDLE_STRIDE); + cb += GRU_HANDLE_STRIDE; + } + memcpy(save, gseg + GRU_DS_BASE, length); +} + +void gru_unload_context(struct gru_thread_state *gts, int savestate) +{ + struct gru_state *gru = gts->ts_gru; + struct gru_context_configuration_handle *cch; + int ctxnum = gts->ts_ctxnum; + + zap_vma_ptes(gts->ts_vma, UGRUADDR(gts), GRU_GSEG_PAGESIZE); + cch = get_cch(gru->gs_gru_base_vaddr, ctxnum); + + lock_cch_handle(cch); + if (cch_interrupt_sync(cch)) + BUG(); + gru_dbg(grudev, "gts %p\n", gts); + + gru_unload_mm_tracker(gru, gts->ts_gms, gts->ts_ctxnum); + if (savestate) + gru_unload_context_data(gts->ts_gdata, gru->gs_gru_base_vaddr, + ctxnum, gts->ts_cbr_map, + gts->ts_dsr_map); + + if (cch_deallocate(cch)) + BUG(); + gts->ts_force_unload = 0; /* ts_force_unload locked by CCH lock */ + unlock_cch_handle(cch); + + gru_free_gru_context(gts); + STAT(unload_context); +} + +/* + * Load a GRU context by copying it from the thread data structure in memory + * to the GRU. + */ +static void gru_load_context(struct gru_thread_state *gts) +{ + struct gru_state *gru = gts->ts_gru; + struct gru_context_configuration_handle *cch; + int err, asid, ctxnum = gts->ts_ctxnum; + + gru_dbg(grudev, "gts %p\n", gts); + cch = get_cch(gru->gs_gru_base_vaddr, ctxnum); + + lock_cch_handle(cch); + asid = gru_load_mm_tracker(gru, gts->ts_gms, gts->ts_ctxnum); + cch->tfm_fault_bit_enable = + (gts->ts_user_options == GRU_OPT_MISS_FMM_POLL + || gts->ts_user_options == GRU_OPT_MISS_FMM_INTR); + cch->tlb_int_enable = (gts->ts_user_options == GRU_OPT_MISS_FMM_INTR); + if (cch->tlb_int_enable) { + gts->ts_tlb_int_select = gru_cpu_fault_map_id(); + cch->tlb_int_select = gts->ts_tlb_int_select; + } + cch->tfm_done_bit_enable = 0; + err = cch_allocate(cch, asid, gts->ts_cbr_map, gts->ts_dsr_map); + if (err) { + gru_dbg(grudev, + "err %d: cch %p, gts %p, cbr 0x%lx, dsr 0x%lx\n", + err, cch, gts, gts->ts_cbr_map, gts->ts_dsr_map); + BUG(); + } + + gru_load_context_data(gts->ts_gdata, gru->gs_gru_base_vaddr, ctxnum, + gts->ts_cbr_map, gts->ts_dsr_map); + + if (cch_start(cch)) + BUG(); + unlock_cch_handle(cch); + + STAT(load_context); +} + +/* + * Update fields in an active CCH: + * - retarget interrupts on local blade + * - force a delayed context unload by clearing the CCH asids. This + * forces TLB misses for new GRU instructions. The context is unloaded + * when the next TLB miss occurs. + */ +static int gru_update_cch(struct gru_thread_state *gts, int int_select) +{ + struct gru_context_configuration_handle *cch; + struct gru_state *gru = gts->ts_gru; + int i, ctxnum = gts->ts_ctxnum, ret = 0; + + cch = get_cch(gru->gs_gru_base_vaddr, ctxnum); + + lock_cch_handle(cch); + if (cch->state == CCHSTATE_ACTIVE) { + if (gru->gs_gts[gts->ts_ctxnum] != gts) + goto exit; + if (cch_interrupt(cch)) + BUG(); + if (int_select >= 0) { + gts->ts_tlb_int_select = int_select; + cch->tlb_int_select = int_select; + } else { + for (i = 0; i < 8; i++) + cch->asid[i] = 0; + cch->tfm_fault_bit_enable = 0; + cch->tlb_int_enable = 0; + gts->ts_force_unload = 1; + } + if (cch_start(cch)) + BUG(); + ret = 1; + } +exit: + unlock_cch_handle(cch); + return ret; +} + +/* + * Update CCH tlb interrupt select. Required when all the following is true: + * - task's GRU context is loaded into a GRU + * - task is using interrupt notification for TLB faults + * - task has migrated to a different cpu on the same blade where + * it was previously running. + */ +static int gru_retarget_intr(struct gru_thread_state *gts) +{ + if (gts->ts_tlb_int_select < 0 + || gts->ts_tlb_int_select == gru_cpu_fault_map_id()) + return 0; + + gru_dbg(grudev, "retarget from %d to %d\n", gts->ts_tlb_int_select, + gru_cpu_fault_map_id()); + return gru_update_cch(gts, gru_cpu_fault_map_id()); +} + + +/* + * Insufficient GRU resources available on the local blade. Steal a context from + * a process. This is a hack until a _real_ resource scheduler is written.... + */ +#define next_ctxnum(n) ((n) < GRU_NUM_CCH - 2 ? (n) + 1 : 0) +#define next_gru(b, g) (((g) < &(b)->bs_grus[GRU_CHIPLETS_PER_BLADE - 1]) ? \ + ((g)+1) : &(b)->bs_grus[0]) + +static void gru_steal_context(struct gru_thread_state *gts) +{ + struct gru_blade_state *blade; + struct gru_state *gru, *gru0; + struct gru_thread_state *ngts = NULL; + int ctxnum, ctxnum0, flag = 0, cbr, dsr; + + cbr = gts->ts_cbr_au_count; + dsr = gts->ts_dsr_au_count; + + preempt_disable(); + blade = gru_base[uv_numa_blade_id()]; + spin_lock(&blade->bs_lock); + + ctxnum = next_ctxnum(blade->bs_lru_ctxnum); + gru = blade->bs_lru_gru; + if (ctxnum == 0) + gru = next_gru(blade, gru); + ctxnum0 = ctxnum; + gru0 = gru; + while (1) { + if (check_gru_resources(gru, cbr, dsr, GRU_NUM_CCH)) + break; + spin_lock(&gru->gs_lock); + for (; ctxnum < GRU_NUM_CCH; ctxnum++) { + if (flag && gru == gru0 && ctxnum == ctxnum0) + break; + ngts = gru->gs_gts[ctxnum]; + /* + * We are grabbing locks out of order, so trylock is + * needed. GTSs are usually not locked, so the odds of + * success are high. If trylock fails, try to steal a + * different GSEG. + */ + if (ngts && mutex_trylock(&ngts->ts_ctxlock)) + break; + ngts = NULL; + flag = 1; + } + spin_unlock(&gru->gs_lock); + if (ngts || (flag && gru == gru0 && ctxnum == ctxnum0)) + break; + ctxnum = 0; + gru = next_gru(blade, gru); + } + blade->bs_lru_gru = gru; + blade->bs_lru_ctxnum = ctxnum; + spin_unlock(&blade->bs_lock); + preempt_enable(); + + if (ngts) { + STAT(steal_context); + ngts->ts_steal_jiffies = jiffies; + gru_unload_context(ngts, 1); + mutex_unlock(&ngts->ts_ctxlock); + } else { + STAT(steal_context_failed); + } + gru_dbg(grudev, + "stole gru %x, ctxnum %d from gts %p. Need cb %d, ds %d;" + " avail cb %ld, ds %ld\n", + gru->gs_gid, ctxnum, ngts, cbr, dsr, hweight64(gru->gs_cbr_map), + hweight64(gru->gs_dsr_map)); +} + +/* + * Scan the GRUs on the local blade & assign a GRU context. + */ +static struct gru_state *gru_assign_gru_context(struct gru_thread_state *gts) +{ + struct gru_state *gru, *grux; + int i, max_active_contexts; + + preempt_disable(); + +again: + gru = NULL; + max_active_contexts = GRU_NUM_CCH; + for_each_gru_on_blade(grux, uv_numa_blade_id(), i) { + if (check_gru_resources(grux, gts->ts_cbr_au_count, + gts->ts_dsr_au_count, + max_active_contexts)) { + gru = grux; + max_active_contexts = grux->gs_active_contexts; + if (max_active_contexts == 0) + break; + } + } + + if (gru) { + spin_lock(&gru->gs_lock); + if (!check_gru_resources(gru, gts->ts_cbr_au_count, + gts->ts_dsr_au_count, GRU_NUM_CCH)) { + spin_unlock(&gru->gs_lock); + goto again; + } + reserve_gru_resources(gru, gts); + gts->ts_gru = gru; + gts->ts_ctxnum = + find_first_zero_bit(&gru->gs_context_map, GRU_NUM_CCH); + BUG_ON(gts->ts_ctxnum == GRU_NUM_CCH); + atomic_inc(>s->ts_refcnt); + gru->gs_gts[gts->ts_ctxnum] = gts; + __set_bit(gts->ts_ctxnum, &gru->gs_context_map); + spin_unlock(&gru->gs_lock); + + STAT(assign_context); + gru_dbg(grudev, + "gseg %p, gts %p, gru %x, ctx %d, cbr %d, dsr %d\n", + gseg_virtual_address(gts->ts_gru, gts->ts_ctxnum), gts, + gts->ts_gru->gs_gid, gts->ts_ctxnum, + gts->ts_cbr_au_count, gts->ts_dsr_au_count); + } else { + gru_dbg(grudev, "failed to allocate a GTS %s\n", ""); + STAT(assign_context_failed); + } + + preempt_enable(); + return gru; +} + +/* + * gru_nopage + * + * Map the user's GRU segment + * + * Note: gru segments alway mmaped on GRU_GSEG_PAGESIZE boundaries. + */ +int gru_fault(struct vm_area_struct *vma, struct vm_fault *vmf) +{ + struct gru_thread_state *gts; + unsigned long paddr, vaddr; + + vaddr = (unsigned long)vmf->virtual_address; + gru_dbg(grudev, "vma %p, vaddr 0x%lx (0x%lx)\n", + vma, vaddr, GSEG_BASE(vaddr)); + STAT(nopfn); + + /* The following check ensures vaddr is a valid address in the VMA */ + gts = gru_find_thread_state(vma, TSID(vaddr, vma)); + if (!gts) + return VM_FAULT_SIGBUS; + +again: + preempt_disable(); + mutex_lock(>s->ts_ctxlock); + if (gts->ts_gru) { + if (gts->ts_gru->gs_blade_id != uv_numa_blade_id()) { + STAT(migrated_nopfn_unload); + gru_unload_context(gts, 1); + } else { + if (gru_retarget_intr(gts)) + STAT(migrated_nopfn_retarget); + } + } + + if (!gts->ts_gru) { + if (!gru_assign_gru_context(gts)) { + mutex_unlock(>s->ts_ctxlock); + preempt_enable(); + schedule_timeout(GRU_ASSIGN_DELAY); /* true hack ZZZ */ + if (gts->ts_steal_jiffies + GRU_STEAL_DELAY < jiffies) + gru_steal_context(gts); + goto again; + } + gru_load_context(gts); + paddr = gseg_physical_address(gts->ts_gru, gts->ts_ctxnum); + remap_pfn_range(vma, vaddr & ~(GRU_GSEG_PAGESIZE - 1), + paddr >> PAGE_SHIFT, GRU_GSEG_PAGESIZE, + vma->vm_page_prot); + } + + mutex_unlock(>s->ts_ctxlock); + preempt_enable(); + + return VM_FAULT_NOPAGE; +} + diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c new file mode 100644 index 0000000..533923f --- /dev/null +++ b/drivers/misc/sgi-gru/gruprocfs.c @@ -0,0 +1,336 @@ +/* + * SN Platform GRU Driver + * + * PROC INTERFACES + * + * This file supports the /proc interfaces for the GRU driver + * + * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include <linux/proc_fs.h> +#include <linux/device.h> +#include <linux/seq_file.h> +#include <linux/uaccess.h> +#include "gru.h" +#include "grulib.h" +#include "grutables.h" + +#define printstat(s, f) printstat_val(s, &gru_stats.f, #f) + +static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id) +{ + unsigned long val = atomic_long_read(v); + + if (val) + seq_printf(s, "%16lu %s\n", val, id); +} + +static int statistics_show(struct seq_file *s, void *p) +{ + printstat(s, vdata_alloc); + printstat(s, vdata_free); + printstat(s, gts_alloc); + printstat(s, gts_free); + printstat(s, vdata_double_alloc); + printstat(s, gts_double_allocate); + printstat(s, assign_context); + printstat(s, assign_context_failed); + printstat(s, free_context); + printstat(s, load_context); + printstat(s, unload_context); + printstat(s, steal_context); + printstat(s, steal_context_failed); + printstat(s, nopfn); + printstat(s, break_cow); + printstat(s, asid_new); + printstat(s, asid_next); + printstat(s, asid_wrap); + printstat(s, asid_reuse); + printstat(s, intr); + printstat(s, call_os); + printstat(s, call_os_check_for_bug); + printstat(s, call_os_wait_queue); + printstat(s, user_flush_tlb); + printstat(s, user_unload_context); + printstat(s, user_exception); + printstat(s, set_task_slice); + printstat(s, migrate_check); + printstat(s, migrated_retarget); + printstat(s, migrated_unload); + printstat(s, migrated_unload_delay); + printstat(s, migrated_nopfn_retarget); + printstat(s, migrated_nopfn_unload); + printstat(s, tlb_dropin); + printstat(s, tlb_dropin_fail_no_asid); + printstat(s, tlb_dropin_fail_upm); + printstat(s, tlb_dropin_fail_invalid); + printstat(s, tlb_dropin_fail_range_active); + printstat(s, tlb_dropin_fail_idle); + printstat(s, tlb_dropin_fail_fmm); + printstat(s, mmu_invalidate_range); + printstat(s, mmu_invalidate_page); + printstat(s, mmu_clear_flush_young); + printstat(s, flush_tlb); + printstat(s, flush_tlb_gru); + printstat(s, flush_tlb_gru_tgh); + printstat(s, flush_tlb_gru_zero_asid); + printstat(s, copy_gpa); + printstat(s, mesq_receive); + printstat(s, mesq_receive_none); + printstat(s, mesq_send); + printstat(s, mesq_send_failed); + printstat(s, mesq_noop); + printstat(s, mesq_send_unexpected_error); + printstat(s, mesq_send_lb_overflow); + printstat(s, mesq_send_qlimit_reached); + printstat(s, mesq_send_amo_nacked); + printstat(s, mesq_send_put_nacked); + printstat(s, mesq_qf_not_full); + printstat(s, mesq_qf_locked); + printstat(s, mesq_qf_noop_not_full); + printstat(s, mesq_qf_switch_head_failed); + printstat(s, mesq_qf_unexpected_error); + printstat(s, mesq_noop_unexpected_error); + printstat(s, mesq_noop_lb_overflow); + printstat(s, mesq_noop_qlimit_reached); + printstat(s, mesq_noop_amo_nacked); + printstat(s, mesq_noop_put_nacked); + return 0; +} + +static ssize_t statistics_write(struct file *file, const char __user *userbuf, + size_t count, loff_t *data) +{ + memset(&gru_stats, 0, sizeof(gru_stats)); + return count; +} + +static int options_show(struct seq_file *s, void *p) +{ + seq_printf(s, "0x%lx\n", gru_options); + return 0; +} + +static ssize_t options_write(struct file *file, const char __user *userbuf, + size_t count, loff_t *data) +{ + unsigned long val; + char buf[80]; + + if (copy_from_user + (buf, userbuf, count < sizeof(buf) ? count : sizeof(buf))) + return -EFAULT; + if (!strict_strtoul(buf, 10, &val)) + gru_options = val; + + return count; +} + +static int cch_seq_show(struct seq_file *file, void *data) +{ + long gid = *(long *)data; + int i; + struct gru_state *gru = GID_TO_GRU(gid); + struct gru_thread_state *ts; + const char *mode[] = { "??", "UPM", "INTR", "OS_POLL" }; + + if (gid == 0) + seq_printf(file, "#%5s%5s%6s%9s%6s%8s%8s\n", "gid", "bid", + "ctx#", "pid", "cbrs", "dsbytes", "mode"); + if (gru) + for (i = 0; i < GRU_NUM_CCH; i++) { + ts = gru->gs_gts[i]; + if (!ts) + continue; + seq_printf(file, " %5d%5d%6d%9d%6d%8d%8s\n", + gru->gs_gid, gru->gs_blade_id, i, + ts->ts_tgid_owner, + ts->ts_cbr_au_count * GRU_CBR_AU_SIZE, + ts->ts_cbr_au_count * GRU_DSR_AU_BYTES, + mode[ts->ts_user_options & + GRU_OPT_MISS_MASK]); + } + + return 0; +} + +static int gru_seq_show(struct seq_file *file, void *data) +{ + long gid = *(long *)data, ctxfree, cbrfree, dsrfree; + struct gru_state *gru = GID_TO_GRU(gid); + + if (gid == 0) { + seq_printf(file, "#%5s%5s%7s%6s%6s%8s%6s%6s\n", "gid", "nid", + "ctx", "cbr", "dsr", "ctx", "cbr", "dsr"); + seq_printf(file, "#%5s%5s%7s%6s%6s%8s%6s%6s\n", "", "", "busy", + "busy", "busy", "free", "free", "free"); + } + if (gru) { + ctxfree = GRU_NUM_CCH - gru->gs_active_contexts; + cbrfree = hweight64(gru->gs_cbr_map) * GRU_CBR_AU_SIZE; + dsrfree = hweight64(gru->gs_dsr_map) * GRU_DSR_AU_BYTES; + seq_printf(file, " %5d%5d%7ld%6ld%6ld%8ld%6ld%6ld\n", + gru->gs_gid, gru->gs_blade_id, GRU_NUM_CCH - ctxfree, + GRU_NUM_CBE - cbrfree, GRU_NUM_DSR_BYTES - dsrfree, + ctxfree, cbrfree, dsrfree); + } + + return 0; +} + +static void seq_stop(struct seq_file *file, void *data) +{ +} + +static void *seq_start(struct seq_file *file, loff_t *gid) +{ + if (*gid < GRU_MAX_GRUS) + return gid; + return NULL; +} + +static void *seq_next(struct seq_file *file, void *data, loff_t *gid) +{ + (*gid)++; + if (*gid < GRU_MAX_GRUS) + return gid; + return NULL; +} + +static const struct seq_operations cch_seq_ops = { + .start = seq_start, + .next = seq_next, + .stop = seq_stop, + .show = cch_seq_show +}; + +static const struct seq_operations gru_seq_ops = { + .start = seq_start, + .next = seq_next, + .stop = seq_stop, + .show = gru_seq_show +}; + +static int statistics_open(struct inode *inode, struct file *file) +{ + return single_open(file, statistics_show, NULL); +} + +static int options_open(struct inode *inode, struct file *file) +{ + return single_open(file, options_show, NULL); +} + +static int cch_open(struct inode *inode, struct file *file) +{ + return seq_open(file, &cch_seq_ops); +} + +static int gru_open(struct inode *inode, struct file *file) +{ + return seq_open(file, &gru_seq_ops); +} + +/* *INDENT-OFF* */ +static const struct file_operations statistics_fops = { + .open = statistics_open, + .read = seq_read, + .write = statistics_write, + .llseek = seq_lseek, + .release = single_release, +}; + +static const struct file_operations options_fops = { + .open = options_open, + .read = seq_read, + .write = options_write, + .llseek = seq_lseek, + .release = single_release, +}; + +static const struct file_operations cch_fops = { + .open = cch_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release, +}; +static const struct file_operations gru_fops = { + .open = gru_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release, +}; + +static struct proc_entry { + char *name; + int mode; + const struct file_operations *fops; + struct proc_dir_entry *entry; +} proc_files[] = { + {"statistics", 0644, &statistics_fops}, + {"debug_options", 0644, &options_fops}, + {"cch_status", 0444, &cch_fops}, + {"gru_status", 0444, &gru_fops}, + {NULL} +}; +/* *INDENT-ON* */ + +static struct proc_dir_entry *proc_gru __read_mostly; + +static int create_proc_file(struct proc_entry *p) +{ + p->entry = create_proc_entry(p->name, p->mode, proc_gru); + if (!p->entry) + return -1; + p->entry->proc_fops = p->fops; + return 0; +} + +static void delete_proc_files(void) +{ + struct proc_entry *p; + + if (proc_gru) { + for (p = proc_files; p->name; p++) + if (p->entry) + remove_proc_entry(p->name, proc_gru); + remove_proc_entry("gru", NULL); + } +} + +int gru_proc_init(void) +{ + struct proc_entry *p; + + proc_mkdir("sgi_uv", NULL); + proc_gru = proc_mkdir("sgi_uv/gru", NULL); + + for (p = proc_files; p->name; p++) + if (create_proc_file(p)) + goto err; + return 0; + +err: + delete_proc_files(); + return -1; +} + +void gru_proc_exit(void) +{ + delete_proc_files(); +} diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h new file mode 100644 index 0000000..a78f70d --- /dev/null +++ b/drivers/misc/sgi-gru/grutables.h @@ -0,0 +1,609 @@ +/* + * SN Platform GRU Driver + * + * GRU DRIVER TABLES, MACROS, externs, etc + * + * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef __GRUTABLES_H__ +#define __GRUTABLES_H__ + +/* + * GRU Chiplet: + * The GRU is a user addressible memory accelerator. It provides + * several forms of load, store, memset, bcopy instructions. In addition, it + * contains special instructions for AMOs, sending messages to message + * queues, etc. + * + * The GRU is an integral part of the node controller. It connects + * directly to the cpu socket. In its current implementation, there are 2 + * GRU chiplets in the node controller on each blade (~node). + * + * The entire GRU memory space is fully coherent and cacheable by the cpus. + * + * Each GRU chiplet has a physical memory map that looks like the following: + * + * +-----------------+ + * |/////////////////| + * |/////////////////| + * |/////////////////| + * |/////////////////| + * |/////////////////| + * |/////////////////| + * |/////////////////| + * |/////////////////| + * +-----------------+ + * | system control | + * +-----------------+ _______ +-------------+ + * |/////////////////| / | | + * |/////////////////| / | | + * |/////////////////| / | instructions| + * |/////////////////| / | | + * |/////////////////| / | | + * |/////////////////| / |-------------| + * |/////////////////| / | | + * +-----------------+ | | + * | context 15 | | data | + * +-----------------+ | | + * | ...... | \ | | + * +-----------------+ \____________ +-------------+ + * | context 1 | + * +-----------------+ + * | context 0 | + * +-----------------+ + * + * Each of the "contexts" is a chunk of memory that can be mmaped into user + * space. The context consists of 2 parts: + * + * - an instruction space that can be directly accessed by the user + * to issue GRU instructions and to check instruction status. + * + * - a data area that acts as normal RAM. + * + * User instructions contain virtual addresses of data to be accessed by the + * GRU. The GRU contains a TLB that is used to convert these user virtual + * addresses to physical addresses. + * + * The "system control" area of the GRU chiplet is used by the kernel driver + * to manage user contexts and to perform functions such as TLB dropin and + * purging. + * + * One context may be reserved for the kernel and used for cross-partition + * communication. The GRU will also be used to asynchronously zero out + * large blocks of memory (not currently implemented). + * + * + * Tables: + * + * VDATA-VMA Data - Holds a few parameters. Head of linked list of + * GTS tables for threads using the GSEG + * GTS - Gru Thread State - contains info for managing a GSEG context. A + * GTS is allocated for each thread accessing a + * GSEG. + * GTD - GRU Thread Data - contains shadow copy of GRU data when GSEG is + * not loaded into a GRU + * GMS - GRU Memory Struct - Used to manage TLB shootdowns. Tracks GRUs + * where a GSEG has been loaded. Similar to + * an mm_struct but for GRU. + * + * GS - GRU State - Used to manage the state of a GRU chiplet + * BS - Blade State - Used to manage state of all GRU chiplets + * on a blade + * + * + * Normal task tables for task using GRU. + * - 2 threads in process + * - 2 GSEGs open in process + * - GSEG1 is being used by both threads + * - GSEG2 is used only by thread 2 + * + * task -->| + * task ---+---> mm ->------ (notifier) -------+-> gms + * | | + * |--> vma -> vdata ---> gts--->| GSEG1 (thread1) + * | | | + * | +-> gts--->| GSEG1 (thread2) + * | | + * |--> vma -> vdata ---> gts--->| GSEG2 (thread2) + * . + * . + * + * GSEGs are marked DONTCOPY on fork + * + * At open + * file.private_data -> NULL + * + * At mmap, + * vma -> vdata + * + * After gseg reference + * vma -> vdata ->gts + * + * After fork + * parent + * vma -> vdata -> gts + * child + * (vma is not copied) + * + */ + +#include <linux/rmap.h> +#include <linux/interrupt.h> +#include <linux/mutex.h> +#include <linux/wait.h> +#include <linux/mmu_notifier.h> +#include "gru.h" +#include "gruhandles.h" + +extern struct gru_stats_s gru_stats; +extern struct gru_blade_state *gru_base[]; +extern unsigned long gru_start_paddr, gru_end_paddr; + +#define GRU_MAX_BLADES MAX_NUMNODES +#define GRU_MAX_GRUS (GRU_MAX_BLADES * GRU_CHIPLETS_PER_BLADE) + +#define GRU_DRIVER_ID_STR "SGI GRU Device Driver" +#define GRU_DRIVER_VERSION_STR "0.80" + +/* + * GRU statistics. + */ +struct gru_stats_s { + atomic_long_t vdata_alloc; + atomic_long_t vdata_free; + atomic_long_t gts_alloc; + atomic_long_t gts_free; + atomic_long_t vdata_double_alloc; + atomic_long_t gts_double_allocate; + atomic_long_t assign_context; + atomic_long_t assign_context_failed; + atomic_long_t free_context; + atomic_long_t load_context; + atomic_long_t unload_context; + atomic_long_t steal_context; + atomic_long_t steal_context_failed; + atomic_long_t nopfn; + atomic_long_t break_cow; + atomic_long_t asid_new; + atomic_long_t asid_next; + atomic_long_t asid_wrap; + atomic_long_t asid_reuse; + atomic_long_t intr; + atomic_long_t call_os; + atomic_long_t call_os_check_for_bug; + atomic_long_t call_os_wait_queue; + atomic_long_t user_flush_tlb; + atomic_long_t user_unload_context; + atomic_long_t user_exception; + atomic_long_t set_task_slice; + atomic_long_t migrate_check; + atomic_long_t migrated_retarget; + atomic_long_t migrated_unload; + atomic_long_t migrated_unload_delay; + atomic_long_t migrated_nopfn_retarget; + atomic_long_t migrated_nopfn_unload; + atomic_long_t tlb_dropin; + atomic_long_t tlb_dropin_fail_no_asid; + atomic_long_t tlb_dropin_fail_upm; + atomic_long_t tlb_dropin_fail_invalid; + atomic_long_t tlb_dropin_fail_range_active; + atomic_long_t tlb_dropin_fail_idle; + atomic_long_t tlb_dropin_fail_fmm; + atomic_long_t mmu_invalidate_range; + atomic_long_t mmu_invalidate_page; + atomic_long_t mmu_clear_flush_young; + atomic_long_t flush_tlb; + atomic_long_t flush_tlb_gru; + atomic_long_t flush_tlb_gru_tgh; + atomic_long_t flush_tlb_gru_zero_asid; + + atomic_long_t copy_gpa; + + atomic_long_t mesq_receive; + atomic_long_t mesq_receive_none; + atomic_long_t mesq_send; + atomic_long_t mesq_send_failed; + atomic_long_t mesq_noop; + atomic_long_t mesq_send_unexpected_error; + atomic_long_t mesq_send_lb_overflow; + atomic_long_t mesq_send_qlimit_reached; + atomic_long_t mesq_send_amo_nacked; + atomic_long_t mesq_send_put_nacked; + atomic_long_t mesq_qf_not_full; + atomic_long_t mesq_qf_locked; + atomic_long_t mesq_qf_noop_not_full; + atomic_long_t mesq_qf_switch_head_failed; + atomic_long_t mesq_qf_unexpected_error; + atomic_long_t mesq_noop_unexpected_error; + atomic_long_t mesq_noop_lb_overflow; + atomic_long_t mesq_noop_qlimit_reached; + atomic_long_t mesq_noop_amo_nacked; + atomic_long_t mesq_noop_put_nacked; + +}; + +#define OPT_DPRINT 1 +#define OPT_STATS 2 +#define GRU_QUICKLOOK 4 + + +#define IRQ_GRU 110 /* Starting IRQ number for interrupts */ + +/* Delay in jiffies between attempts to assign a GRU context */ +#define GRU_ASSIGN_DELAY ((HZ * 20) / 1000) + +/* + * If a process has it's context stolen, min delay in jiffies before trying to + * steal a context from another process. + */ +#define GRU_STEAL_DELAY ((HZ * 200) / 1000) + +#define STAT(id) do { \ + if (gru_options & OPT_STATS) \ + atomic_long_inc(&gru_stats.id); \ + } while (0) + +#ifdef CONFIG_SGI_GRU_DEBUG +#define gru_dbg(dev, fmt, x...) \ + do { \ + if (gru_options & OPT_DPRINT) \ + dev_dbg(dev, "%s: " fmt, __func__, x); \ + } while (0) +#else +#define gru_dbg(x...) +#endif + +/*----------------------------------------------------------------------------- + * ASID management + */ +#define MAX_ASID 0xfffff0 +#define MIN_ASID 8 +#define ASID_INC 8 /* number of regions */ + +/* Generate a GRU asid value from a GRU base asid & a virtual address. */ +#if defined CONFIG_IA64 +#define VADDR_HI_BIT 64 +#define GRUREGION(addr) ((addr) >> (VADDR_HI_BIT - 3) & 3) +#elif defined CONFIG_X86_64 +#define VADDR_HI_BIT 48 +#define GRUREGION(addr) (0) /* ZZZ could do better */ +#else +#error "Unsupported architecture" +#endif +#define GRUASID(asid, addr) ((asid) + GRUREGION(addr)) + +/*------------------------------------------------------------------------------ + * File & VMS Tables + */ + +struct gru_state; + +/* + * This structure is pointed to from the mmstruct via the notifier pointer. + * There is one of these per address space. + */ +struct gru_mm_tracker { + unsigned int mt_asid_gen; /* ASID wrap count */ + int mt_asid; /* current base ASID for gru */ + unsigned short mt_ctxbitmap; /* bitmap of contexts using + asid */ +}; + +struct gru_mm_struct { + struct mmu_notifier ms_notifier; + atomic_t ms_refcnt; + spinlock_t ms_asid_lock; /* protects ASID assignment */ + atomic_t ms_range_active;/* num range_invals active */ + char ms_released; + wait_queue_head_t ms_wait_queue; + DECLARE_BITMAP(ms_asidmap, GRU_MAX_GRUS); + struct gru_mm_tracker ms_asids[GRU_MAX_GRUS]; +}; + +/* + * One of these structures is allocated when a GSEG is mmaped. The + * structure is pointed to by the vma->vm_private_data field in the vma struct. + */ +struct gru_vma_data { + spinlock_t vd_lock; /* Serialize access to vma */ + struct list_head vd_head; /* head of linked list of gts */ + long vd_user_options;/* misc user option flags */ + int vd_cbr_au_count; + int vd_dsr_au_count; +}; + +/* + * One of these is allocated for each thread accessing a mmaped GRU. A linked + * list of these structure is hung off the struct gru_vma_data in the mm_struct. + */ +struct gru_thread_state { + struct list_head ts_next; /* list - head at vma-private */ + struct mutex ts_ctxlock; /* load/unload CTX lock */ + struct mm_struct *ts_mm; /* mm currently mapped to + context */ + struct vm_area_struct *ts_vma; /* vma of GRU context */ + struct gru_state *ts_gru; /* GRU where the context is + loaded */ + struct gru_mm_struct *ts_gms; /* asid & ioproc struct */ + unsigned long ts_cbr_map; /* map of allocated CBRs */ + unsigned long ts_dsr_map; /* map of allocated DATA + resources */ + unsigned long ts_steal_jiffies;/* jiffies when context last + stolen */ + long ts_user_options;/* misc user option flags */ + pid_t ts_tgid_owner; /* task that is using the + context - for migration */ + int ts_tsid; /* thread that owns the + structure */ + int ts_tlb_int_select;/* target cpu if interrupts + enabled */ + int ts_ctxnum; /* context number where the + context is loaded */ + atomic_t ts_refcnt; /* reference count GTS */ + unsigned char ts_dsr_au_count;/* Number of DSR resources + required for contest */ + unsigned char ts_cbr_au_count;/* Number of CBR resources + required for contest */ + char ts_force_unload;/* force context to be unloaded + after migration */ + char ts_cbr_idx[GRU_CBR_AU];/* CBR numbers of each + allocated CB */ + unsigned long ts_gdata[0]; /* save area for GRU data (CB, + DS, CBE) */ +}; + +/* + * Threaded programs actually allocate an array of GSEGs when a context is + * created. Each thread uses a separate GSEG. TSID is the index into the GSEG + * array. + */ +#define TSID(a, v) (((a) - (v)->vm_start) / GRU_GSEG_PAGESIZE) +#define UGRUADDR(gts) ((gts)->ts_vma->vm_start + \ + (gts)->ts_tsid * GRU_GSEG_PAGESIZE) + +#define NULLCTX (-1) /* if context not loaded into GRU */ + +/*----------------------------------------------------------------------------- + * GRU State Tables + */ + +/* + * One of these exists for each GRU chiplet. + */ +struct gru_state { + struct gru_blade_state *gs_blade; /* GRU state for entire + blade */ + unsigned long gs_gru_base_paddr; /* Physical address of + gru segments (64) */ + void *gs_gru_base_vaddr; /* Virtual address of + gru segments (64) */ + unsigned char gs_gid; /* unique GRU number */ + unsigned char gs_tgh_local_shift; /* used to pick TGH for + local flush */ + unsigned char gs_tgh_first_remote; /* starting TGH# for + remote flush */ + unsigned short gs_blade_id; /* blade of GRU */ + spinlock_t gs_asid_lock; /* lock used for + assigning asids */ + spinlock_t gs_lock; /* lock used for + assigning contexts */ + + /* -- the following are protected by the gs_asid_lock spinlock ---- */ + unsigned int gs_asid; /* Next availe ASID */ + unsigned int gs_asid_limit; /* Limit of available + ASIDs */ + unsigned int gs_asid_gen; /* asid generation. + Inc on wrap */ + + /* --- the following fields are protected by the gs_lock spinlock --- */ + unsigned long gs_context_map; /* bitmap to manage + contexts in use */ + unsigned long gs_cbr_map; /* bitmap to manage CB + resources */ + unsigned long gs_dsr_map; /* bitmap used to manage + DATA resources */ + unsigned int gs_reserved_cbrs; /* Number of kernel- + reserved cbrs */ + unsigned int gs_reserved_dsr_bytes; /* Bytes of kernel- + reserved dsrs */ + unsigned short gs_active_contexts; /* number of contexts + in use */ + struct gru_thread_state *gs_gts[GRU_NUM_CCH]; /* GTS currently using + the context */ +}; + +/* + * This structure contains the GRU state for all the GRUs on a blade. + */ +struct gru_blade_state { + void *kernel_cb; /* First kernel + reserved cb */ + void *kernel_dsr; /* First kernel + reserved DSR */ + /* ---- the following are protected by the bs_lock spinlock ---- */ + spinlock_t bs_lock; /* lock used for + stealing contexts */ + int bs_lru_ctxnum; /* STEAL - last context + stolen */ + struct gru_state *bs_lru_gru; /* STEAL - last gru + stolen */ + + struct gru_state bs_grus[GRU_CHIPLETS_PER_BLADE]; +}; + +/*----------------------------------------------------------------------------- + * Address Primitives + */ +#define get_tfm_for_cpu(g, c) \ + ((struct gru_tlb_fault_map *)get_tfm((g)->gs_gru_base_vaddr, (c))) +#define get_tfh_by_index(g, i) \ + ((struct gru_tlb_fault_handle *)get_tfh((g)->gs_gru_base_vaddr, (i))) +#define get_tgh_by_index(g, i) \ + ((struct gru_tlb_global_handle *)get_tgh((g)->gs_gru_base_vaddr, (i))) +#define get_cbe_by_index(g, i) \ + ((struct gru_control_block_extended *)get_cbe((g)->gs_gru_base_vaddr,\ + (i))) + +/*----------------------------------------------------------------------------- + * Useful Macros + */ + +/* Given a blade# & chiplet#, get a pointer to the GRU */ +#define get_gru(b, c) (&gru_base[b]->bs_grus[c]) + +/* Number of bytes to save/restore when unloading/loading GRU contexts */ +#define DSR_BYTES(dsr) ((dsr) * GRU_DSR_AU_BYTES) +#define CBR_BYTES(cbr) ((cbr) * GRU_HANDLE_BYTES * GRU_CBR_AU_SIZE * 2) + +/* Convert a user CB number to the actual CBRNUM */ +#define thread_cbr_number(gts, n) ((gts)->ts_cbr_idx[(n) / GRU_CBR_AU_SIZE] \ + * GRU_CBR_AU_SIZE + (n) % GRU_CBR_AU_SIZE) + +/* Convert a gid to a pointer to the GRU */ +#define GID_TO_GRU(gid) \ + (gru_base[(gid) / GRU_CHIPLETS_PER_BLADE] ? \ + (&gru_base[(gid) / GRU_CHIPLETS_PER_BLADE]-> \ + bs_grus[(gid) % GRU_CHIPLETS_PER_BLADE]) : \ + NULL) + +/* Scan all active GRUs in a GRU bitmap */ +#define for_each_gru_in_bitmap(gid, map) \ + for ((gid) = find_first_bit((map), GRU_MAX_GRUS); (gid) < GRU_MAX_GRUS;\ + (gid)++, (gid) = find_next_bit((map), GRU_MAX_GRUS, (gid))) + +/* Scan all active GRUs on a specific blade */ +#define for_each_gru_on_blade(gru, nid, i) \ + for ((gru) = gru_base[nid]->bs_grus, (i) = 0; \ + (i) < GRU_CHIPLETS_PER_BLADE; \ + (i)++, (gru)++) + +/* Scan all active GTSs on a gru. Note: must hold ss_lock to use this macro. */ +#define for_each_gts_on_gru(gts, gru, ctxnum) \ + for ((ctxnum) = 0; (ctxnum) < GRU_NUM_CCH; (ctxnum)++) \ + if (((gts) = (gru)->gs_gts[ctxnum])) + +/* Scan each CBR whose bit is set in a TFM (or copy of) */ +#define for_each_cbr_in_tfm(i, map) \ + for ((i) = find_first_bit(map, GRU_NUM_CBE); \ + (i) < GRU_NUM_CBE; \ + (i)++, (i) = find_next_bit(map, GRU_NUM_CBE, i)) + +/* Scan each CBR in a CBR bitmap. Note: multiple CBRs in an allocation unit */ +#define for_each_cbr_in_allocation_map(i, map, k) \ + for ((k) = find_first_bit(map, GRU_CBR_AU); (k) < GRU_CBR_AU; \ + (k) = find_next_bit(map, GRU_CBR_AU, (k) + 1)) \ + for ((i) = (k)*GRU_CBR_AU_SIZE; \ + (i) < ((k) + 1) * GRU_CBR_AU_SIZE; (i)++) + +/* Scan each DSR in a DSR bitmap. Note: multiple DSRs in an allocation unit */ +#define for_each_dsr_in_allocation_map(i, map, k) \ + for ((k) = find_first_bit((const unsigned long *)map, GRU_DSR_AU);\ + (k) < GRU_DSR_AU; \ + (k) = find_next_bit((const unsigned long *)map, \ + GRU_DSR_AU, (k) + 1)) \ + for ((i) = (k) * GRU_DSR_AU_CL; \ + (i) < ((k) + 1) * GRU_DSR_AU_CL; (i)++) + +#define gseg_physical_address(gru, ctxnum) \ + ((gru)->gs_gru_base_paddr + ctxnum * GRU_GSEG_STRIDE) +#define gseg_virtual_address(gru, ctxnum) \ + ((gru)->gs_gru_base_vaddr + ctxnum * GRU_GSEG_STRIDE) + +/*----------------------------------------------------------------------------- + * Lock / Unlock GRU handles + * Use the "delresp" bit in the handle as a "lock" bit. + */ + +/* Lock hierarchy checking enabled only in emulator */ + +static inline void __lock_handle(void *h) +{ + while (test_and_set_bit(1, h)) + cpu_relax(); +} + +static inline void __unlock_handle(void *h) +{ + clear_bit(1, h); +} + +static inline void lock_cch_handle(struct gru_context_configuration_handle *cch) +{ + __lock_handle(cch); +} + +static inline void unlock_cch_handle(struct gru_context_configuration_handle + *cch) +{ + __unlock_handle(cch); +} + +static inline void lock_tgh_handle(struct gru_tlb_global_handle *tgh) +{ + __lock_handle(tgh); +} + +static inline void unlock_tgh_handle(struct gru_tlb_global_handle *tgh) +{ + __unlock_handle(tgh); +} + +/*----------------------------------------------------------------------------- + * Function prototypes & externs + */ +struct gru_unload_context_req; + +extern struct vm_operations_struct gru_vm_ops; +extern struct device *grudev; + +extern struct gru_vma_data *gru_alloc_vma_data(struct vm_area_struct *vma, + int tsid); +extern struct gru_thread_state *gru_find_thread_state(struct vm_area_struct + *vma, int tsid); +extern struct gru_thread_state *gru_alloc_thread_state(struct vm_area_struct + *vma, int tsid); +extern void gru_unload_context(struct gru_thread_state *gts, int savestate); +extern void gts_drop(struct gru_thread_state *gts); +extern void gru_tgh_flush_init(struct gru_state *gru); +extern int gru_kservices_init(struct gru_state *gru); +extern irqreturn_t gru_intr(int irq, void *dev_id); +extern int gru_handle_user_call_os(unsigned long address); +extern int gru_user_flush_tlb(unsigned long arg); +extern int gru_user_unload_context(unsigned long arg); +extern int gru_get_exception_detail(unsigned long arg); +extern int gru_set_task_slice(long address); +extern int gru_cpu_fault_map_id(void); +extern struct vm_area_struct *gru_find_vma(unsigned long vaddr); +extern void gru_flush_all_tlb(struct gru_state *gru); +extern int gru_proc_init(void); +extern void gru_proc_exit(void); + +extern unsigned long gru_reserve_cb_resources(struct gru_state *gru, + int cbr_au_count, char *cbmap); +extern unsigned long gru_reserve_ds_resources(struct gru_state *gru, + int dsr_au_count, char *dsmap); +extern int gru_fault(struct vm_area_struct *, struct vm_fault *vmf); +extern struct gru_mm_struct *gru_register_mmu_notifier(void); +extern void gru_drop_mmu_notifier(struct gru_mm_struct *gms); + +extern void gru_flush_tlb_range(struct gru_mm_struct *gms, unsigned long start, + unsigned long len); + +extern unsigned long gru_options; + +#endif /* __GRUTABLES_H__ */ diff --git a/drivers/misc/sgi-gru/grutlbpurge.c b/drivers/misc/sgi-gru/grutlbpurge.c new file mode 100644 index 0000000..c84496a --- /dev/null +++ b/drivers/misc/sgi-gru/grutlbpurge.c @@ -0,0 +1,371 @@ +/* + * SN Platform GRU Driver + * + * MMUOPS callbacks + TLB flushing + * + * This file handles emu notifier callbacks from the core kernel. The callbacks + * are used to update the TLB in the GRU as a result of changes in the + * state of a process address space. This file also handles TLB invalidates + * from the GRU driver. + * + * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include <linux/kernel.h> +#include <linux/list.h> +#include <linux/spinlock.h> +#include <linux/mm.h> +#include <linux/slab.h> +#include <linux/device.h> +#include <linux/hugetlb.h> +#include <linux/delay.h> +#include <linux/timex.h> +#include <linux/srcu.h> +#include <asm/processor.h> +#include "gru.h" +#include "grutables.h" +#include <asm/uv/uv_hub.h> + +#define gru_random() get_cycles() + +/* ---------------------------------- TLB Invalidation functions -------- + * get_tgh_handle + * + * Find a TGH to use for issuing a TLB invalidate. For GRUs that are on the + * local blade, use a fixed TGH that is a function of the blade-local cpu + * number. Normally, this TGH is private to the cpu & no contention occurs for + * the TGH. For offblade GRUs, select a random TGH in the range above the + * private TGHs. A spinlock is required to access this TGH & the lock must be + * released when the invalidate is completes. This sucks, but it is the best we + * can do. + * + * Note that the spinlock is IN the TGH handle so locking does not involve + * additional cache lines. + * + */ +static inline int get_off_blade_tgh(struct gru_state *gru) +{ + int n; + + n = GRU_NUM_TGH - gru->gs_tgh_first_remote; + n = gru_random() % n; + n += gru->gs_tgh_first_remote; + return n; +} + +static inline int get_on_blade_tgh(struct gru_state *gru) +{ + return uv_blade_processor_id() >> gru->gs_tgh_local_shift; +} + +static struct gru_tlb_global_handle *get_lock_tgh_handle(struct gru_state + *gru) +{ + struct gru_tlb_global_handle *tgh; + int n; + + preempt_disable(); + if (uv_numa_blade_id() == gru->gs_blade_id) + n = get_on_blade_tgh(gru); + else + n = get_off_blade_tgh(gru); + tgh = get_tgh_by_index(gru, n); + lock_tgh_handle(tgh); + + return tgh; +} + +static void get_unlock_tgh_handle(struct gru_tlb_global_handle *tgh) +{ + unlock_tgh_handle(tgh); + preempt_enable(); +} + +/* + * gru_flush_tlb_range + * + * General purpose TLB invalidation function. This function scans every GRU in + * the ENTIRE system (partition) looking for GRUs where the specified MM has + * been accessed by the GRU. For each GRU found, the TLB must be invalidated OR + * the ASID invalidated. Invalidating an ASID causes a new ASID to be assigned + * on the next fault. This effectively flushes the ENTIRE TLB for the MM at the + * cost of (possibly) a large number of future TLBmisses. + * + * The current algorithm is optimized based on the following (somewhat true) + * assumptions: + * - GRU contexts are not loaded into a GRU unless a reference is made to + * the data segment or control block (this is true, not an assumption). + * If a DS/CB is referenced, the user will also issue instructions that + * cause TLBmisses. It is not necessary to optimize for the case where + * contexts are loaded but no instructions cause TLB misses. (I know + * this will happen but I'm not optimizing for it). + * - GRU instructions to invalidate TLB entries are SLOOOOWWW - normally + * a few usec but in unusual cases, it could be longer. Avoid if + * possible. + * - intrablade process migration between cpus is not frequent but is + * common. + * - a GRU context is not typically migrated to a different GRU on the + * blade because of intrablade migration + * - interblade migration is rare. Processes migrate their GRU context to + * the new blade. + * - if interblade migration occurs, migration back to the original blade + * is very very rare (ie., no optimization for this case) + * - most GRU instruction operate on a subset of the user REGIONS. Code + * & shared library regions are not likely targets of GRU instructions. + * + * To help improve the efficiency of TLB invalidation, the GMS data + * structure is maintained for EACH address space (MM struct). The GMS is + * also the structure that contains the pointer to the mmu callout + * functions. This structure is linked to the mm_struct for the address space + * using the mmu "register" function. The mmu interfaces are used to + * provide the callbacks for TLB invalidation. The GMS contains: + * + * - asid[maxgrus] array. ASIDs are assigned to a GRU when a context is + * loaded into the GRU. + * - asidmap[maxgrus]. bitmap to make it easier to find non-zero asids in + * the above array + * - ctxbitmap[maxgrus]. Indicates the contexts that are currently active + * in the GRU for the address space. This bitmap must be passed to the + * GRU to do an invalidate. + * + * The current algorithm for invalidating TLBs is: + * - scan the asidmap for GRUs where the context has been loaded, ie, + * asid is non-zero. + * - for each gru found: + * - if the ctxtmap is non-zero, there are active contexts in the + * GRU. TLB invalidate instructions must be issued to the GRU. + * - if the ctxtmap is zero, no context is active. Set the ASID to + * zero to force a full TLB invalidation. This is fast but will + * cause a lot of TLB misses if the context is reloaded onto the + * GRU + * + */ + +void gru_flush_tlb_range(struct gru_mm_struct *gms, unsigned long start, + unsigned long len) +{ + struct gru_state *gru; + struct gru_mm_tracker *asids; + struct gru_tlb_global_handle *tgh; + unsigned long num; + int grupagesize, pagesize, pageshift, gid, asid; + + /* ZZZ TODO - handle huge pages */ + pageshift = PAGE_SHIFT; + pagesize = (1UL << pageshift); + grupagesize = GRU_PAGESIZE(pageshift); + num = min(((len + pagesize - 1) >> pageshift), GRUMAXINVAL); + + STAT(flush_tlb); + gru_dbg(grudev, "gms %p, start 0x%lx, len 0x%lx, asidmap 0x%lx\n", gms, + start, len, gms->ms_asidmap[0]); + + spin_lock(&gms->ms_asid_lock); + for_each_gru_in_bitmap(gid, gms->ms_asidmap) { + STAT(flush_tlb_gru); + gru = GID_TO_GRU(gid); + asids = gms->ms_asids + gid; + asid = asids->mt_asid; + if (asids->mt_ctxbitmap && asid) { + STAT(flush_tlb_gru_tgh); + asid = GRUASID(asid, start); + gru_dbg(grudev, + " FLUSH gruid %d, asid 0x%x, num %ld, cbmap 0x%x\n", + gid, asid, num, asids->mt_ctxbitmap); + tgh = get_lock_tgh_handle(gru); + tgh_invalidate(tgh, start, 0, asid, grupagesize, 0, + num - 1, asids->mt_ctxbitmap); + get_unlock_tgh_handle(tgh); + } else { + STAT(flush_tlb_gru_zero_asid); + asids->mt_asid = 0; + __clear_bit(gru->gs_gid, gms->ms_asidmap); + gru_dbg(grudev, + " CLEARASID gruid %d, asid 0x%x, cbtmap 0x%x, asidmap 0x%lx\n", + gid, asid, asids->mt_ctxbitmap, + gms->ms_asidmap[0]); + } + } + spin_unlock(&gms->ms_asid_lock); +} + +/* + * Flush the entire TLB on a chiplet. + */ +void gru_flush_all_tlb(struct gru_state *gru) +{ + struct gru_tlb_global_handle *tgh; + + gru_dbg(grudev, "gru %p, gid %d\n", gru, gru->gs_gid); + tgh = get_lock_tgh_handle(gru); + tgh_invalidate(tgh, 0, ~0, 0, 1, 1, GRUMAXINVAL - 1, 0); + get_unlock_tgh_handle(tgh); + preempt_enable(); +} + +/* + * MMUOPS notifier callout functions + */ +static void gru_invalidate_range_start(struct mmu_notifier *mn, + struct mm_struct *mm, + unsigned long start, unsigned long end) +{ + struct gru_mm_struct *gms = container_of(mn, struct gru_mm_struct, + ms_notifier); + + STAT(mmu_invalidate_range); + atomic_inc(&gms->ms_range_active); + gru_dbg(grudev, "gms %p, start 0x%lx, end 0x%lx, act %d\n", gms, + start, end, atomic_read(&gms->ms_range_active)); + gru_flush_tlb_range(gms, start, end - start); +} + +static void gru_invalidate_range_end(struct mmu_notifier *mn, + struct mm_struct *mm, unsigned long start, + unsigned long end) +{ + struct gru_mm_struct *gms = container_of(mn, struct gru_mm_struct, + ms_notifier); + + /* ..._and_test() provides needed barrier */ + (void)atomic_dec_and_test(&gms->ms_range_active); + + wake_up_all(&gms->ms_wait_queue); + gru_dbg(grudev, "gms %p, start 0x%lx, end 0x%lx\n", gms, start, end); +} + +static void gru_invalidate_page(struct mmu_notifier *mn, struct mm_struct *mm, + unsigned long address) +{ + struct gru_mm_struct *gms = container_of(mn, struct gru_mm_struct, + ms_notifier); + + STAT(mmu_invalidate_page); + gru_flush_tlb_range(gms, address, PAGE_SIZE); + gru_dbg(grudev, "gms %p, address 0x%lx\n", gms, address); +} + +static void gru_release(struct mmu_notifier *mn, struct mm_struct *mm) +{ + struct gru_mm_struct *gms = container_of(mn, struct gru_mm_struct, + ms_notifier); + + gms->ms_released = 1; + gru_dbg(grudev, "gms %p\n", gms); +} + + +static const struct mmu_notifier_ops gru_mmuops = { + .invalidate_page = gru_invalidate_page, + .invalidate_range_start = gru_invalidate_range_start, + .invalidate_range_end = gru_invalidate_range_end, + .release = gru_release, +}; + +/* Move this to the basic mmu_notifier file. But for now... */ +static struct mmu_notifier *mmu_find_ops(struct mm_struct *mm, + const struct mmu_notifier_ops *ops) +{ + struct mmu_notifier *mn, *gru_mn = NULL; + struct hlist_node *n; + + if (mm->mmu_notifier_mm) { + rcu_read_lock(); + hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, + hlist) + if (mn->ops == ops) { + gru_mn = mn; + break; + } + rcu_read_unlock(); + } + return gru_mn; +} + +struct gru_mm_struct *gru_register_mmu_notifier(void) +{ + struct gru_mm_struct *gms; + struct mmu_notifier *mn; + + mn = mmu_find_ops(current->mm, &gru_mmuops); + if (mn) { + gms = container_of(mn, struct gru_mm_struct, ms_notifier); + atomic_inc(&gms->ms_refcnt); + } else { + gms = kzalloc(sizeof(*gms), GFP_KERNEL); + if (gms) { + spin_lock_init(&gms->ms_asid_lock); + gms->ms_notifier.ops = &gru_mmuops; + atomic_set(&gms->ms_refcnt, 1); + init_waitqueue_head(&gms->ms_wait_queue); + __mmu_notifier_register(&gms->ms_notifier, current->mm); + } + } + gru_dbg(grudev, "gms %p, refcnt %d\n", gms, + atomic_read(&gms->ms_refcnt)); + return gms; +} + +void gru_drop_mmu_notifier(struct gru_mm_struct *gms) +{ + gru_dbg(grudev, "gms %p, refcnt %d, released %d\n", gms, + atomic_read(&gms->ms_refcnt), gms->ms_released); + if (atomic_dec_return(&gms->ms_refcnt) == 0) { + if (!gms->ms_released) + mmu_notifier_unregister(&gms->ms_notifier, current->mm); + kfree(gms); + } +} + +/* + * Setup TGH parameters. There are: + * - 24 TGH handles per GRU chiplet + * - a portion (MAX_LOCAL_TGH) of the handles are reserved for + * use by blade-local cpus + * - the rest are used by off-blade cpus. This usage is + * less frequent than blade-local usage. + * + * For now, use 16 handles for local flushes, 8 for remote flushes. If the blade + * has less tan or equal to 16 cpus, each cpu has a unique handle that it can + * use. + */ +#define MAX_LOCAL_TGH 16 + +void gru_tgh_flush_init(struct gru_state *gru) +{ + int cpus, shift = 0, n; + + cpus = uv_blade_nr_possible_cpus(gru->gs_blade_id); + + /* n = cpus rounded up to next power of 2 */ + if (cpus) { + n = 1 << fls(cpus - 1); + + /* + * shift count for converting local cpu# to TGH index + * 0 if cpus <= MAX_LOCAL_TGH, + * 1 if cpus <= 2*MAX_LOCAL_TGH, + * etc + */ + shift = max(0, fls(n - 1) - fls(MAX_LOCAL_TGH - 1)); + } + gru->gs_tgh_local_shift = shift; + + /* first starting TGH index to use for remote purges */ + gru->gs_tgh_first_remote = (cpus + (1 << shift) - 1) >> shift; + +} diff --git a/drivers/misc/sgi-xp/Makefile b/drivers/misc/sgi-xp/Makefile new file mode 100644 index 0000000..4fc40d8 --- /dev/null +++ b/drivers/misc/sgi-xp/Makefile @@ -0,0 +1,19 @@ +# +# Makefile for SGI's XP devices. +# + +obj-$(CONFIG_SGI_XP) += xp.o +xp-y := xp_main.o +xp-$(CONFIG_IA64_SGI_SN2) += xp_sn2.o xp_nofault.o +xp-$(CONFIG_IA64_GENERIC) += xp_sn2.o xp_nofault.o +xp-$(CONFIG_IA64_SGI_UV) += xp_uv.o +xp-$(CONFIG_X86_64) += xp_uv.o + +obj-$(CONFIG_SGI_XP) += xpc.o +xpc-y := xpc_main.o xpc_channel.o xpc_partition.o +xpc-$(CONFIG_IA64_SGI_SN2) += xpc_sn2.o +xpc-$(CONFIG_IA64_GENERIC) += xpc_sn2.o +xpc-$(CONFIG_IA64_SGI_UV) += xpc_uv.o +xpc-$(CONFIG_X86_64) += xpc_uv.o + +obj-$(CONFIG_SGI_XP) += xpnet.o diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h new file mode 100644 index 0000000..ed1722e --- /dev/null +++ b/drivers/misc/sgi-xp/xp.h @@ -0,0 +1,359 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2004-2008 Silicon Graphics, Inc. All rights reserved. + */ + +/* + * External Cross Partition (XP) structures and defines. + */ + +#ifndef _DRIVERS_MISC_SGIXP_XP_H +#define _DRIVERS_MISC_SGIXP_XP_H + +#include <linux/mutex.h> + +#ifdef CONFIG_IA64 +#include <asm/system.h> +#include <asm/sn/arch.h> /* defines is_shub1() and is_shub2() */ +#define is_shub() ia64_platform_is("sn2") +#ifdef CONFIG_IA64_SGI_UV +#define is_uv() ia64_platform_is("uv") +#else +#define is_uv() 0 +#endif +#endif +#ifdef CONFIG_X86_64 +#include <asm/genapic.h> +#define is_uv() is_uv_system() +#endif + +#ifndef is_shub1 +#define is_shub1() 0 +#endif + +#ifndef is_shub2 +#define is_shub2() 0 +#endif + +#ifndef is_shub +#define is_shub() 0 +#endif + +#ifndef is_uv +#define is_uv() 0 +#endif + +#ifdef USE_DBUG_ON +#define DBUG_ON(condition) BUG_ON(condition) +#else +#define DBUG_ON(condition) +#endif + +/* + * Define the maximum number of partitions the system can possibly support. + * It is based on the maximum number of hardware partitionable regions. The + * term 'region' in this context refers to the minimum number of nodes that + * can comprise an access protection grouping. The access protection is in + * regards to memory, IPI and IOI. + * + * The maximum number of hardware partitionable regions is equal to the + * maximum number of nodes in the entire system divided by the minimum number + * of nodes that comprise an access protection grouping. + */ +#define XP_MAX_NPARTITIONS_SN2 64 +#define XP_MAX_NPARTITIONS_UV 256 + +/* + * XPC establishes channel connections between the local partition and any + * other partition that is currently up. Over these channels, kernel-level + * `users' can communicate with their counterparts on the other partitions. + * + * If the need for additional channels arises, one can simply increase + * XPC_MAX_NCHANNELS accordingly. If the day should come where that number + * exceeds the absolute MAXIMUM number of channels possible (eight), then one + * will need to make changes to the XPC code to accommodate for this. + * + * The absolute maximum number of channels possible is limited to eight for + * performance reasons on sn2 hardware. The internal cross partition structures + * require sixteen bytes per channel, and eight allows all of this + * interface-shared info to fit in one 128-byte cacheline. + */ +#define XPC_MEM_CHANNEL 0 /* memory channel number */ +#define XPC_NET_CHANNEL 1 /* network channel number */ + +#define XPC_MAX_NCHANNELS 2 /* max #of channels allowed */ + +#if XPC_MAX_NCHANNELS > 8 +#error XPC_MAX_NCHANNELS exceeds absolute MAXIMUM possible. +#endif + +/* + * Define macro, XPC_MSG_SIZE(), is provided for the user + * that wants to fit as many msg entries as possible in a given memory size + * (e.g. a memory page). + */ +#define XPC_MSG_MAX_SIZE 128 +#define XPC_MSG_HDR_MAX_SIZE 16 +#define XPC_MSG_PAYLOAD_MAX_SIZE (XPC_MSG_MAX_SIZE - XPC_MSG_HDR_MAX_SIZE) + +#define XPC_MSG_SIZE(_payload_size) \ + ALIGN(XPC_MSG_HDR_MAX_SIZE + (_payload_size), \ + is_uv() ? 64 : 128) + + +/* + * Define the return values and values passed to user's callout functions. + * (It is important to add new value codes at the end just preceding + * xpUnknownReason, which must have the highest numerical value.) + */ +enum xp_retval { + xpSuccess = 0, + + xpNotConnected, /* 1: channel is not connected */ + xpConnected, /* 2: channel connected (opened) */ + xpRETIRED1, /* 3: (formerly xpDisconnected) */ + + xpMsgReceived, /* 4: message received */ + xpMsgDelivered, /* 5: message delivered and acknowledged */ + + xpRETIRED2, /* 6: (formerly xpTransferFailed) */ + + xpNoWait, /* 7: operation would require wait */ + xpRetry, /* 8: retry operation */ + xpTimeout, /* 9: timeout in xpc_allocate_msg_wait() */ + xpInterrupted, /* 10: interrupted wait */ + + xpUnequalMsgSizes, /* 11: message size disparity between sides */ + xpInvalidAddress, /* 12: invalid address */ + + xpNoMemory, /* 13: no memory available for XPC structures */ + xpLackOfResources, /* 14: insufficient resources for operation */ + xpUnregistered, /* 15: channel is not registered */ + xpAlreadyRegistered, /* 16: channel is already registered */ + + xpPartitionDown, /* 17: remote partition is down */ + xpNotLoaded, /* 18: XPC module is not loaded */ + xpUnloading, /* 19: this side is unloading XPC module */ + + xpBadMagic, /* 20: XPC MAGIC string not found */ + + xpReactivating, /* 21: remote partition was reactivated */ + + xpUnregistering, /* 22: this side is unregistering channel */ + xpOtherUnregistering, /* 23: other side is unregistering channel */ + + xpCloneKThread, /* 24: cloning kernel thread */ + xpCloneKThreadFailed, /* 25: cloning kernel thread failed */ + + xpNoHeartbeat, /* 26: remote partition has no heartbeat */ + + xpPioReadError, /* 27: PIO read error */ + xpPhysAddrRegFailed, /* 28: registration of phys addr range failed */ + + xpRETIRED3, /* 29: (formerly xpBteDirectoryError) */ + xpRETIRED4, /* 30: (formerly xpBtePoisonError) */ + xpRETIRED5, /* 31: (formerly xpBteWriteError) */ + xpRETIRED6, /* 32: (formerly xpBteAccessError) */ + xpRETIRED7, /* 33: (formerly xpBtePWriteError) */ + xpRETIRED8, /* 34: (formerly xpBtePReadError) */ + xpRETIRED9, /* 35: (formerly xpBteTimeOutError) */ + xpRETIRED10, /* 36: (formerly xpBteXtalkError) */ + xpRETIRED11, /* 37: (formerly xpBteNotAvailable) */ + xpRETIRED12, /* 38: (formerly xpBteUnmappedError) */ + + xpBadVersion, /* 39: bad version number */ + xpVarsNotSet, /* 40: the XPC variables are not set up */ + xpNoRsvdPageAddr, /* 41: unable to get rsvd page's phys addr */ + xpInvalidPartid, /* 42: invalid partition ID */ + xpLocalPartid, /* 43: local partition ID */ + + xpOtherGoingDown, /* 44: other side going down, reason unknown */ + xpSystemGoingDown, /* 45: system is going down, reason unknown */ + xpSystemHalt, /* 46: system is being halted */ + xpSystemReboot, /* 47: system is being rebooted */ + xpSystemPoweroff, /* 48: system is being powered off */ + + xpDisconnecting, /* 49: channel disconnecting (closing) */ + + xpOpenCloseError, /* 50: channel open/close protocol error */ + + xpDisconnected, /* 51: channel disconnected (closed) */ + + xpBteCopyError, /* 52: bte_copy() returned error */ + xpSalError, /* 53: sn SAL error */ + xpRsvdPageNotSet, /* 54: the reserved page is not set up */ + xpPayloadTooBig, /* 55: payload too large for message slot */ + + xpUnsupported, /* 56: unsupported functionality or resource */ + xpNeedMoreInfo, /* 57: more info is needed by SAL */ + + xpGruCopyError, /* 58: gru_copy_gru() returned error */ + xpGruSendMqError, /* 59: gru send message queue related error */ + + xpBadChannelNumber, /* 60: invalid channel number */ + xpBadMsgType, /* 60: invalid message type */ + + xpUnknownReason /* 61: unknown reason - must be last in enum */ +}; + +/* + * Define the callout function type used by XPC to update the user on + * connection activity and state changes via the user function registered + * by xpc_connect(). + * + * Arguments: + * + * reason - reason code. + * partid - partition ID associated with condition. + * ch_number - channel # associated with condition. + * data - pointer to optional data. + * key - pointer to optional user-defined value provided as the "key" + * argument to xpc_connect(). + * + * A reason code of xpConnected indicates that a connection has been + * established to the specified partition on the specified channel. The data + * argument indicates the max number of entries allowed in the message queue. + * + * A reason code of xpMsgReceived indicates that a XPC message arrived from + * the specified partition on the specified channel. The data argument + * specifies the address of the message's payload. The user must call + * xpc_received() when finished with the payload. + * + * All other reason codes indicate failure. The data argmument is NULL. + * When a failure reason code is received, one can assume that the channel + * is not connected. + */ +typedef void (*xpc_channel_func) (enum xp_retval reason, short partid, + int ch_number, void *data, void *key); + +/* + * Define the callout function type used by XPC to notify the user of + * messages received and delivered via the user function registered by + * xpc_send_notify(). + * + * Arguments: + * + * reason - reason code. + * partid - partition ID associated with condition. + * ch_number - channel # associated with condition. + * key - pointer to optional user-defined value provided as the "key" + * argument to xpc_send_notify(). + * + * A reason code of xpMsgDelivered indicates that the message was delivered + * to the intended recipient and that they have acknowledged its receipt by + * calling xpc_received(). + * + * All other reason codes indicate failure. + * + * NOTE: The user defined function must be callable by an interrupt handler + * and thus cannot block. + */ +typedef void (*xpc_notify_func) (enum xp_retval reason, short partid, + int ch_number, void *key); + +/* + * The following is a registration entry. There is a global array of these, + * one per channel. It is used to record the connection registration made + * by the users of XPC. As long as a registration entry exists, for any + * partition that comes up, XPC will attempt to establish a connection on + * that channel. Notification that a connection has been made will occur via + * the xpc_channel_func function. + * + * The 'func' field points to the function to call when aynchronous + * notification is required for such events as: a connection established/lost, + * or an incoming message received, or an error condition encountered. A + * non-NULL 'func' field indicates that there is an active registration for + * the channel. + */ +struct xpc_registration { + struct mutex mutex; + xpc_channel_func func; /* function to call */ + void *key; /* pointer to user's key */ + u16 nentries; /* #of msg entries in local msg queue */ + u16 entry_size; /* message queue's message entry size */ + u32 assigned_limit; /* limit on #of assigned kthreads */ + u32 idle_limit; /* limit on #of idle kthreads */ +} ____cacheline_aligned; + +#define XPC_CHANNEL_REGISTERED(_c) (xpc_registrations[_c].func != NULL) + +/* the following are valid xpc_send() or xpc_send_notify() flags */ +#define XPC_WAIT 0 /* wait flag */ +#define XPC_NOWAIT 1 /* no wait flag */ + +struct xpc_interface { + void (*connect) (int); + void (*disconnect) (int); + enum xp_retval (*send) (short, int, u32, void *, u16); + enum xp_retval (*send_notify) (short, int, u32, void *, u16, + xpc_notify_func, void *); + void (*received) (short, int, void *); + enum xp_retval (*partid_to_nasids) (short, void *); +}; + +extern struct xpc_interface xpc_interface; + +extern void xpc_set_interface(void (*)(int), + void (*)(int), + enum xp_retval (*)(short, int, u32, void *, u16), + enum xp_retval (*)(short, int, u32, void *, u16, + xpc_notify_func, void *), + void (*)(short, int, void *), + enum xp_retval (*)(short, void *)); +extern void xpc_clear_interface(void); + +extern enum xp_retval xpc_connect(int, xpc_channel_func, void *, u16, + u16, u32, u32); +extern void xpc_disconnect(int); + +static inline enum xp_retval +xpc_send(short partid, int ch_number, u32 flags, void *payload, + u16 payload_size) +{ + return xpc_interface.send(partid, ch_number, flags, payload, + payload_size); +} + +static inline enum xp_retval +xpc_send_notify(short partid, int ch_number, u32 flags, void *payload, + u16 payload_size, xpc_notify_func func, void *key) +{ + return xpc_interface.send_notify(partid, ch_number, flags, payload, + payload_size, func, key); +} + +static inline void +xpc_received(short partid, int ch_number, void *payload) +{ + return xpc_interface.received(partid, ch_number, payload); +} + +static inline enum xp_retval +xpc_partid_to_nasids(short partid, void *nasids) +{ + return xpc_interface.partid_to_nasids(partid, nasids); +} + +extern short xp_max_npartitions; +extern short xp_partition_id; +extern u8 xp_region_size; + +extern unsigned long (*xp_pa) (void *); +extern enum xp_retval (*xp_remote_memcpy) (unsigned long, const unsigned long, + size_t); +extern int (*xp_cpu_to_nasid) (int); + +extern u64 xp_nofault_PIOR_target; +extern int xp_nofault_PIOR(void *); +extern int xp_error_PIOR(void); + +extern struct device *xp; +extern enum xp_retval xp_init_sn2(void); +extern enum xp_retval xp_init_uv(void); +extern void xp_exit_sn2(void); +extern void xp_exit_uv(void); + +#endif /* _DRIVERS_MISC_SGIXP_XP_H */ diff --git a/drivers/misc/sgi-xp/xp_main.c b/drivers/misc/sgi-xp/xp_main.c new file mode 100644 index 0000000..66a1d19 --- /dev/null +++ b/drivers/misc/sgi-xp/xp_main.c @@ -0,0 +1,276 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (c) 2004-2008 Silicon Graphics, Inc. All Rights Reserved. + */ + +/* + * Cross Partition (XP) base. + * + * XP provides a base from which its users can interact + * with XPC, yet not be dependent on XPC. + * + */ + +#include <linux/module.h> +#include <linux/device.h> +#include "xp.h" + +/* define the XP debug device structures to be used with dev_dbg() et al */ + +struct device_driver xp_dbg_name = { + .name = "xp" +}; + +struct device xp_dbg_subname = { + .bus_id = {0}, /* set to "" */ + .driver = &xp_dbg_name +}; + +struct device *xp = &xp_dbg_subname; + +/* max #of partitions possible */ +short xp_max_npartitions; +EXPORT_SYMBOL_GPL(xp_max_npartitions); + +short xp_partition_id; +EXPORT_SYMBOL_GPL(xp_partition_id); + +u8 xp_region_size; +EXPORT_SYMBOL_GPL(xp_region_size); + +unsigned long (*xp_pa) (void *addr); +EXPORT_SYMBOL_GPL(xp_pa); + +enum xp_retval (*xp_remote_memcpy) (unsigned long dst_gpa, + const unsigned long src_gpa, size_t len); +EXPORT_SYMBOL_GPL(xp_remote_memcpy); + +int (*xp_cpu_to_nasid) (int cpuid); +EXPORT_SYMBOL_GPL(xp_cpu_to_nasid); + +/* + * xpc_registrations[] keeps track of xpc_connect()'s done by the kernel-level + * users of XPC. + */ +struct xpc_registration xpc_registrations[XPC_MAX_NCHANNELS]; +EXPORT_SYMBOL_GPL(xpc_registrations); + +/* + * Initialize the XPC interface to indicate that XPC isn't loaded. + */ +static enum xp_retval +xpc_notloaded(void) +{ + return xpNotLoaded; +} + +struct xpc_interface xpc_interface = { + (void (*)(int))xpc_notloaded, + (void (*)(int))xpc_notloaded, + (enum xp_retval(*)(short, int, u32, void *, u16))xpc_notloaded, + (enum xp_retval(*)(short, int, u32, void *, u16, xpc_notify_func, + void *))xpc_notloaded, + (void (*)(short, int, void *))xpc_notloaded, + (enum xp_retval(*)(short, void *))xpc_notloaded +}; +EXPORT_SYMBOL_GPL(xpc_interface); + +/* + * XPC calls this when it (the XPC module) has been loaded. + */ +void +xpc_set_interface(void (*connect) (int), + void (*disconnect) (int), + enum xp_retval (*send) (short, int, u32, void *, u16), + enum xp_retval (*send_notify) (short, int, u32, void *, u16, + xpc_notify_func, void *), + void (*received) (short, int, void *), + enum xp_retval (*partid_to_nasids) (short, void *)) +{ + xpc_interface.connect = connect; + xpc_interface.disconnect = disconnect; + xpc_interface.send = send; + xpc_interface.send_notify = send_notify; + xpc_interface.received = received; + xpc_interface.partid_to_nasids = partid_to_nasids; +} +EXPORT_SYMBOL_GPL(xpc_set_interface); + +/* + * XPC calls this when it (the XPC module) is being unloaded. + */ +void +xpc_clear_interface(void) +{ + xpc_interface.connect = (void (*)(int))xpc_notloaded; + xpc_interface.disconnect = (void (*)(int))xpc_notloaded; + xpc_interface.send = (enum xp_retval(*)(short, int, u32, void *, u16)) + xpc_notloaded; + xpc_interface.send_notify = (enum xp_retval(*)(short, int, u32, void *, + u16, xpc_notify_func, + void *))xpc_notloaded; + xpc_interface.received = (void (*)(short, int, void *)) + xpc_notloaded; + xpc_interface.partid_to_nasids = (enum xp_retval(*)(short, void *)) + xpc_notloaded; +} +EXPORT_SYMBOL_GPL(xpc_clear_interface); + +/* + * Register for automatic establishment of a channel connection whenever + * a partition comes up. + * + * Arguments: + * + * ch_number - channel # to register for connection. + * func - function to call for asynchronous notification of channel + * state changes (i.e., connection, disconnection, error) and + * the arrival of incoming messages. + * key - pointer to optional user-defined value that gets passed back + * to the user on any callouts made to func. + * payload_size - size in bytes of the XPC message's payload area which + * contains a user-defined message. The user should make + * this large enough to hold their largest message. + * nentries - max #of XPC message entries a message queue can contain. + * The actual number, which is determined when a connection + * is established and may be less then requested, will be + * passed to the user via the xpConnected callout. + * assigned_limit - max number of kthreads allowed to be processing + * messages (per connection) at any given instant. + * idle_limit - max number of kthreads allowed to be idle at any given + * instant. + */ +enum xp_retval +xpc_connect(int ch_number, xpc_channel_func func, void *key, u16 payload_size, + u16 nentries, u32 assigned_limit, u32 idle_limit) +{ + struct xpc_registration *registration; + + DBUG_ON(ch_number < 0 || ch_number >= XPC_MAX_NCHANNELS); + DBUG_ON(payload_size == 0 || nentries == 0); + DBUG_ON(func == NULL); + DBUG_ON(assigned_limit == 0 || idle_limit > assigned_limit); + + if (XPC_MSG_SIZE(payload_size) > XPC_MSG_MAX_SIZE) + return xpPayloadTooBig; + + registration = &xpc_registrations[ch_number]; + + if (mutex_lock_interruptible(®istration->mutex) != 0) + return xpInterrupted; + + /* if XPC_CHANNEL_REGISTERED(ch_number) */ + if (registration->func != NULL) { + mutex_unlock(®istration->mutex); + return xpAlreadyRegistered; + } + + /* register the channel for connection */ + registration->entry_size = XPC_MSG_SIZE(payload_size); + registration->nentries = nentries; + registration->assigned_limit = assigned_limit; + registration->idle_limit = idle_limit; + registration->key = key; + registration->func = func; + + mutex_unlock(®istration->mutex); + + xpc_interface.connect(ch_number); + + return xpSuccess; +} +EXPORT_SYMBOL_GPL(xpc_connect); + +/* + * Remove the registration for automatic connection of the specified channel + * when a partition comes up. + * + * Before returning this xpc_disconnect() will wait for all connections on the + * specified channel have been closed/torndown. So the caller can be assured + * that they will not be receiving any more callouts from XPC to their + * function registered via xpc_connect(). + * + * Arguments: + * + * ch_number - channel # to unregister. + */ +void +xpc_disconnect(int ch_number) +{ + struct xpc_registration *registration; + + DBUG_ON(ch_number < 0 || ch_number >= XPC_MAX_NCHANNELS); + + registration = &xpc_registrations[ch_number]; + + /* + * We've decided not to make this a down_interruptible(), since we + * figured XPC's users will just turn around and call xpc_disconnect() + * again anyways, so we might as well wait, if need be. + */ + mutex_lock(®istration->mutex); + + /* if !XPC_CHANNEL_REGISTERED(ch_number) */ + if (registration->func == NULL) { + mutex_unlock(®istration->mutex); + return; + } + + /* remove the connection registration for the specified channel */ + registration->func = NULL; + registration->key = NULL; + registration->nentries = 0; + registration->entry_size = 0; + registration->assigned_limit = 0; + registration->idle_limit = 0; + + xpc_interface.disconnect(ch_number); + + mutex_unlock(®istration->mutex); + + return; +} +EXPORT_SYMBOL_GPL(xpc_disconnect); + +int __init +xp_init(void) +{ + enum xp_retval ret; + int ch_number; + + if (is_shub()) + ret = xp_init_sn2(); + else if (is_uv()) + ret = xp_init_uv(); + else + ret = xpUnsupported; + + if (ret != xpSuccess) + return -ENODEV; + + /* initialize the connection registration mutex */ + for (ch_number = 0; ch_number < XPC_MAX_NCHANNELS; ch_number++) + mutex_init(&xpc_registrations[ch_number].mutex); + + return 0; +} + +module_init(xp_init); + +void __exit +xp_exit(void) +{ + if (is_shub()) + xp_exit_sn2(); + else if (is_uv()) + xp_exit_uv(); +} + +module_exit(xp_exit); + +MODULE_AUTHOR("Silicon Graphics, Inc."); +MODULE_DESCRIPTION("Cross Partition (XP) base"); +MODULE_LICENSE("GPL"); diff --git a/drivers/misc/sgi-xp/xp_nofault.S b/drivers/misc/sgi-xp/xp_nofault.S new file mode 100644 index 0000000..e38d433 --- /dev/null +++ b/drivers/misc/sgi-xp/xp_nofault.S @@ -0,0 +1,35 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (c) 2004-2008 Silicon Graphics, Inc. All Rights Reserved. + */ + +/* + * The xp_nofault_PIOR function takes a pointer to a remote PIO register + * and attempts to load and consume a value from it. This function + * will be registered as a nofault code block. In the event that the + * PIO read fails, the MCA handler will force the error to look + * corrected and vector to the xp_error_PIOR which will return an error. + * + * The definition of "consumption" and the time it takes for an MCA + * to surface is processor implementation specific. This code + * is sufficient on Itanium through the Montvale processor family. + * It may need to be adjusted for future processor implementations. + * + * extern int xp_nofault_PIOR(void *remote_register); + */ + + .global xp_nofault_PIOR +xp_nofault_PIOR: + mov r8=r0 // Stage a success return value + ld8.acq r9=[r32];; // PIO Read the specified register + adds r9=1,r9;; // Add to force consumption + srlz.i;; // Allow time for MCA to surface + br.ret.sptk.many b0;; // Return success + + .global xp_error_PIOR +xp_error_PIOR: + mov r8=1 // Return value of 1 + br.ret.sptk.many b0;; // Return failure diff --git a/drivers/misc/sgi-xp/xp_sn2.c b/drivers/misc/sgi-xp/xp_sn2.c new file mode 100644 index 0000000..1440134 --- /dev/null +++ b/drivers/misc/sgi-xp/xp_sn2.c @@ -0,0 +1,146 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved. + */ + +/* + * Cross Partition (XP) sn2-based functions. + * + * Architecture specific implementation of common functions. + */ + +#include <linux/module.h> +#include <linux/device.h> +#include <asm/sn/bte.h> +#include <asm/sn/sn_sal.h> +#include "xp.h" + +/* + * The export of xp_nofault_PIOR needs to happen here since it is defined + * in drivers/misc/sgi-xp/xp_nofault.S. The target of the nofault read is + * defined here. + */ +EXPORT_SYMBOL_GPL(xp_nofault_PIOR); + +u64 xp_nofault_PIOR_target; +EXPORT_SYMBOL_GPL(xp_nofault_PIOR_target); + +/* + * Register a nofault code region which performs a cross-partition PIO read. + * If the PIO read times out, the MCA handler will consume the error and + * return to a kernel-provided instruction to indicate an error. This PIO read + * exists because it is guaranteed to timeout if the destination is down + * (amo operations do not timeout on at least some CPUs on Shubs <= v1.2, + * which unfortunately we have to work around). + */ +static enum xp_retval +xp_register_nofault_code_sn2(void) +{ + int ret; + u64 func_addr; + u64 err_func_addr; + + func_addr = *(u64 *)xp_nofault_PIOR; + err_func_addr = *(u64 *)xp_error_PIOR; + ret = sn_register_nofault_code(func_addr, err_func_addr, err_func_addr, + 1, 1); + if (ret != 0) { + dev_err(xp, "can't register nofault code, error=%d\n", ret); + return xpSalError; + } + /* + * Setup the nofault PIO read target. (There is no special reason why + * SH_IPI_ACCESS was selected.) + */ + if (is_shub1()) + xp_nofault_PIOR_target = SH1_IPI_ACCESS; + else if (is_shub2()) + xp_nofault_PIOR_target = SH2_IPI_ACCESS0; + + return xpSuccess; +} + +static void +xp_unregister_nofault_code_sn2(void) +{ + u64 func_addr = *(u64 *)xp_nofault_PIOR; + u64 err_func_addr = *(u64 *)xp_error_PIOR; + + /* unregister the PIO read nofault code region */ + (void)sn_register_nofault_code(func_addr, err_func_addr, + err_func_addr, 1, 0); +} + +/* + * Convert a virtual memory address to a physical memory address. + */ +static unsigned long +xp_pa_sn2(void *addr) +{ + return __pa(addr); +} + +/* + * Wrapper for bte_copy(). + * + * dst_pa - physical address of the destination of the transfer. + * src_pa - physical address of the source of the transfer. + * len - number of bytes to transfer from source to destination. + * + * Note: xp_remote_memcpy_sn2() should never be called while holding a spinlock. + */ +static enum xp_retval +xp_remote_memcpy_sn2(unsigned long dst_pa, const unsigned long src_pa, + size_t len) +{ + bte_result_t ret; + + ret = bte_copy(src_pa, dst_pa, len, (BTE_NOTIFY | BTE_WACQUIRE), NULL); + if (ret == BTE_SUCCESS) + return xpSuccess; + + if (is_shub2()) { + dev_err(xp, "bte_copy() on shub2 failed, error=0x%x dst_pa=" + "0x%016lx src_pa=0x%016lx len=%ld\\n", ret, dst_pa, + src_pa, len); + } else { + dev_err(xp, "bte_copy() failed, error=%d dst_pa=0x%016lx " + "src_pa=0x%016lx len=%ld\\n", ret, dst_pa, src_pa, len); + } + + return xpBteCopyError; +} + +static int +xp_cpu_to_nasid_sn2(int cpuid) +{ + return cpuid_to_nasid(cpuid); +} + +enum xp_retval +xp_init_sn2(void) +{ + BUG_ON(!is_shub()); + + xp_max_npartitions = XP_MAX_NPARTITIONS_SN2; + xp_partition_id = sn_partition_id; + xp_region_size = sn_region_size; + + xp_pa = xp_pa_sn2; + xp_remote_memcpy = xp_remote_memcpy_sn2; + xp_cpu_to_nasid = xp_cpu_to_nasid_sn2; + + return xp_register_nofault_code_sn2(); +} + +void +xp_exit_sn2(void) +{ + BUG_ON(!is_shub()); + + xp_unregister_nofault_code_sn2(); +} + diff --git a/drivers/misc/sgi-xp/xp_uv.c b/drivers/misc/sgi-xp/xp_uv.c new file mode 100644 index 0000000..d9f7ce2 --- /dev/null +++ b/drivers/misc/sgi-xp/xp_uv.c @@ -0,0 +1,72 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved. + */ + +/* + * Cross Partition (XP) uv-based functions. + * + * Architecture specific implementation of common functions. + * + */ + +#include <linux/device.h> +#include <asm/uv/uv_hub.h> +#include "../sgi-gru/grukservices.h" +#include "xp.h" + +/* + * Convert a virtual memory address to a physical memory address. + */ +static unsigned long +xp_pa_uv(void *addr) +{ + return uv_gpa(addr); +} + +static enum xp_retval +xp_remote_memcpy_uv(unsigned long dst_gpa, const unsigned long src_gpa, + size_t len) +{ + int ret; + + ret = gru_copy_gpa(dst_gpa, src_gpa, len); + if (ret == 0) + return xpSuccess; + + dev_err(xp, "gru_copy_gpa() failed, dst_gpa=0x%016lx src_gpa=0x%016lx " + "len=%ld\n", dst_gpa, src_gpa, len); + return xpGruCopyError; +} + +static int +xp_cpu_to_nasid_uv(int cpuid) +{ + /* ??? Is this same as sn2 nasid in mach/part bitmaps set up by SAL? */ + return UV_PNODE_TO_NASID(uv_cpu_to_pnode(cpuid)); +} + +enum xp_retval +xp_init_uv(void) +{ + BUG_ON(!is_uv()); + + xp_max_npartitions = XP_MAX_NPARTITIONS_UV; + xp_partition_id = 0; /* !!! not correct value */ + xp_region_size = 0; /* !!! not correct value */ + + xp_pa = xp_pa_uv; + xp_remote_memcpy = xp_remote_memcpy_uv; + xp_cpu_to_nasid = xp_cpu_to_nasid_uv; + + return xpSuccess; +} + +void +xp_exit_uv(void) +{ + BUG_ON(!is_uv()); +} diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h new file mode 100644 index 0000000..bcf3782 --- /dev/null +++ b/drivers/misc/sgi-xp/xpc.h @@ -0,0 +1,991 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (c) 2004-2009 Silicon Graphics, Inc. All Rights Reserved. + */ + +/* + * Cross Partition Communication (XPC) structures and macros. + */ + +#ifndef _DRIVERS_MISC_SGIXP_XPC_H +#define _DRIVERS_MISC_SGIXP_XPC_H + +#include <linux/wait.h> +#include <linux/completion.h> +#include <linux/timer.h> +#include <linux/sched.h> +#include "xp.h" + +/* + * XPC Version numbers consist of a major and minor number. XPC can always + * talk to versions with same major #, and never talk to versions with a + * different major #. + */ +#define _XPC_VERSION(_maj, _min) (((_maj) << 4) | ((_min) & 0xf)) +#define XPC_VERSION_MAJOR(_v) ((_v) >> 4) +#define XPC_VERSION_MINOR(_v) ((_v) & 0xf) + +/* define frequency of the heartbeat and frequency how often it's checked */ +#define XPC_HB_DEFAULT_INTERVAL 5 /* incr HB every x secs */ +#define XPC_HB_CHECK_DEFAULT_INTERVAL 20 /* check HB every x secs */ + +/* define the process name of HB checker and the CPU it is pinned to */ +#define XPC_HB_CHECK_THREAD_NAME "xpc_hb" +#define XPC_HB_CHECK_CPU 0 + +/* define the process name of the discovery thread */ +#define XPC_DISCOVERY_THREAD_NAME "xpc_discovery" + +/* + * the reserved page + * + * SAL reserves one page of memory per partition for XPC. Though a full page + * in length (16384 bytes), its starting address is not page aligned, but it + * is cacheline aligned. The reserved page consists of the following: + * + * reserved page header + * + * The first two 64-byte cachelines of the reserved page contain the + * header (struct xpc_rsvd_page). Before SAL initialization has completed, + * SAL has set up the following fields of the reserved page header: + * SAL_signature, SAL_version, SAL_partid, and SAL_nasids_size. The + * other fields are set up by XPC. (xpc_rsvd_page points to the local + * partition's reserved page.) + * + * part_nasids mask + * mach_nasids mask + * + * SAL also sets up two bitmaps (or masks), one that reflects the actual + * nasids in this partition (part_nasids), and the other that reflects + * the actual nasids in the entire machine (mach_nasids). We're only + * interested in the even numbered nasids (which contain the processors + * and/or memory), so we only need half as many bits to represent the + * nasids. When mapping nasid to bit in a mask (or bit to nasid) be sure + * to either divide or multiply by 2. The part_nasids mask is located + * starting at the first cacheline following the reserved page header. The + * mach_nasids mask follows right after the part_nasids mask. The size in + * bytes of each mask is reflected by the reserved page header field + * 'SAL_nasids_size'. (Local partition's mask pointers are xpc_part_nasids + * and xpc_mach_nasids.) + * + * vars (ia64-sn2 only) + * vars part (ia64-sn2 only) + * + * Immediately following the mach_nasids mask are the XPC variables + * required by other partitions. First are those that are generic to all + * partitions (vars), followed on the next available cacheline by those + * which are partition specific (vars part). These are setup by XPC. + * (Local partition's vars pointers are xpc_vars and xpc_vars_part.) + * + * Note: Until 'ts_jiffies' is set non-zero, the partition XPC code has not been + * initialized. + */ +struct xpc_rsvd_page { + u64 SAL_signature; /* SAL: unique signature */ + u64 SAL_version; /* SAL: version */ + short SAL_partid; /* SAL: partition ID */ + short max_npartitions; /* value of XPC_MAX_PARTITIONS */ + u8 version; + u8 pad1[3]; /* align to next u64 in 1st 64-byte cacheline */ + union { + unsigned long vars_pa; /* phys address of struct xpc_vars */ + unsigned long activate_mq_gpa; /* gru phy addr of activate_mq */ + } sn; + unsigned long ts_jiffies; /* timestamp when rsvd pg was setup by XPC */ + u64 pad2[10]; /* align to last u64 in 2nd 64-byte cacheline */ + u64 SAL_nasids_size; /* SAL: size of each nasid mask in bytes */ +}; + +#define XPC_RP_VERSION _XPC_VERSION(2, 0) /* version 2.0 of the reserved page */ + +/* + * Define the structures by which XPC variables can be exported to other + * partitions. (There are two: struct xpc_vars and struct xpc_vars_part) + */ + +/* + * The following structure describes the partition generic variables + * needed by other partitions in order to properly initialize. + * + * struct xpc_vars version number also applies to struct xpc_vars_part. + * Changes to either structure and/or related functionality should be + * reflected by incrementing either the major or minor version numbers + * of struct xpc_vars. + */ +struct xpc_vars_sn2 { + u8 version; + u64 heartbeat; + DECLARE_BITMAP(heartbeating_to_mask, XP_MAX_NPARTITIONS_SN2); + u64 heartbeat_offline; /* if 0, heartbeat should be changing */ + int activate_IRQ_nasid; + int activate_IRQ_phys_cpuid; + unsigned long vars_part_pa; + unsigned long amos_page_pa;/* paddr of page of amos from MSPEC driver */ + struct amo *amos_page; /* vaddr of page of amos from MSPEC driver */ +}; + +#define XPC_V_VERSION _XPC_VERSION(3, 1) /* version 3.1 of the cross vars */ + +/* + * The following structure describes the per partition specific variables. + * + * An array of these structures, one per partition, will be defined. As a + * partition becomes active XPC will copy the array entry corresponding to + * itself from that partition. It is desirable that the size of this structure + * evenly divides into a 128-byte cacheline, such that none of the entries in + * this array crosses a 128-byte cacheline boundary. As it is now, each entry + * occupies 64-bytes. + */ +struct xpc_vars_part_sn2 { + u64 magic; + + unsigned long openclose_args_pa; /* phys addr of open and close args */ + unsigned long GPs_pa; /* physical address of Get/Put values */ + + unsigned long chctl_amo_pa; /* physical address of chctl flags' amo */ + + int notify_IRQ_nasid; /* nasid of where to send notify IRQs */ + int notify_IRQ_phys_cpuid; /* CPUID of where to send notify IRQs */ + + u8 nchannels; /* #of defined channels supported */ + + u8 reserved[23]; /* pad to a full 64 bytes */ +}; + +/* + * The vars_part MAGIC numbers play a part in the first contact protocol. + * + * MAGIC1 indicates that the per partition specific variables for a remote + * partition have been initialized by this partition. + * + * MAGIC2 indicates that this partition has pulled the remote partititions + * per partition variables that pertain to this partition. + */ +#define XPC_VP_MAGIC1_SN2 0x0053524156435058L /* 'XPCVARS\0'L (little endian) */ +#define XPC_VP_MAGIC2_SN2 0x0073726176435058L /* 'XPCvars\0'L (little endian) */ + +/* the reserved page sizes and offsets */ + +#define XPC_RP_HEADER_SIZE L1_CACHE_ALIGN(sizeof(struct xpc_rsvd_page)) +#define XPC_RP_VARS_SIZE L1_CACHE_ALIGN(sizeof(struct xpc_vars_sn2)) + +#define XPC_RP_PART_NASIDS(_rp) ((unsigned long *)((u8 *)(_rp) + \ + XPC_RP_HEADER_SIZE)) +#define XPC_RP_MACH_NASIDS(_rp) (XPC_RP_PART_NASIDS(_rp) + \ + xpc_nasid_mask_nlongs) +#define XPC_RP_VARS(_rp) ((struct xpc_vars_sn2 *) \ + (XPC_RP_MACH_NASIDS(_rp) + \ + xpc_nasid_mask_nlongs)) + +/* + * The activate_mq is used to send/receive GRU messages that affect XPC's + * heartbeat, partition active state, and channel state. This is UV only. + */ +struct xpc_activate_mq_msghdr_uv { + short partid; /* sender's partid */ + u8 act_state; /* sender's act_state at time msg sent */ + u8 type; /* message's type */ + unsigned long rp_ts_jiffies; /* timestamp of sender's rp setup by XPC */ +}; + +/* activate_mq defined message types */ +#define XPC_ACTIVATE_MQ_MSG_SYNC_ACT_STATE_UV 0 +#define XPC_ACTIVATE_MQ_MSG_INC_HEARTBEAT_UV 1 +#define XPC_ACTIVATE_MQ_MSG_OFFLINE_HEARTBEAT_UV 2 +#define XPC_ACTIVATE_MQ_MSG_ONLINE_HEARTBEAT_UV 3 + +#define XPC_ACTIVATE_MQ_MSG_ACTIVATE_REQ_UV 4 +#define XPC_ACTIVATE_MQ_MSG_DEACTIVATE_REQ_UV 5 + +#define XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREQUEST_UV 6 +#define XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREPLY_UV 7 +#define XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREQUEST_UV 8 +#define XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREPLY_UV 9 + +#define XPC_ACTIVATE_MQ_MSG_MARK_ENGAGED_UV 10 +#define XPC_ACTIVATE_MQ_MSG_MARK_DISENGAGED_UV 11 + +struct xpc_activate_mq_msg_uv { + struct xpc_activate_mq_msghdr_uv hdr; +}; + +struct xpc_activate_mq_msg_heartbeat_req_uv { + struct xpc_activate_mq_msghdr_uv hdr; + u64 heartbeat; +}; + +struct xpc_activate_mq_msg_activate_req_uv { + struct xpc_activate_mq_msghdr_uv hdr; + unsigned long rp_gpa; + unsigned long activate_mq_gpa; +}; + +struct xpc_activate_mq_msg_deactivate_req_uv { + struct xpc_activate_mq_msghdr_uv hdr; + enum xp_retval reason; +}; + +struct xpc_activate_mq_msg_chctl_closerequest_uv { + struct xpc_activate_mq_msghdr_uv hdr; + short ch_number; + enum xp_retval reason; +}; + +struct xpc_activate_mq_msg_chctl_closereply_uv { + struct xpc_activate_mq_msghdr_uv hdr; + short ch_number; +}; + +struct xpc_activate_mq_msg_chctl_openrequest_uv { + struct xpc_activate_mq_msghdr_uv hdr; + short ch_number; + short entry_size; /* size of notify_mq's GRU messages */ + short local_nentries; /* ??? Is this needed? What is? */ +}; + +struct xpc_activate_mq_msg_chctl_openreply_uv { + struct xpc_activate_mq_msghdr_uv hdr; + short ch_number; + short remote_nentries; /* ??? Is this needed? What is? */ + short local_nentries; /* ??? Is this needed? What is? */ + unsigned long local_notify_mq_gpa; +}; + +/* + * Functions registered by add_timer() or called by kernel_thread() only + * allow for a single 64-bit argument. The following macros can be used to + * pack and unpack two (32-bit, 16-bit or 8-bit) arguments into or out from + * the passed argument. + */ +#define XPC_PACK_ARGS(_arg1, _arg2) \ + ((((u64)_arg1) & 0xffffffff) | \ + ((((u64)_arg2) & 0xffffffff) << 32)) + +#define XPC_UNPACK_ARG1(_args) (((u64)_args) & 0xffffffff) +#define XPC_UNPACK_ARG2(_args) ((((u64)_args) >> 32) & 0xffffffff) + +/* + * Define a Get/Put value pair (pointers) used with a message queue. + */ +struct xpc_gp_sn2 { + s64 get; /* Get value */ + s64 put; /* Put value */ +}; + +#define XPC_GP_SIZE \ + L1_CACHE_ALIGN(sizeof(struct xpc_gp_sn2) * XPC_MAX_NCHANNELS) + +/* + * Define a structure that contains arguments associated with opening and + * closing a channel. + */ +struct xpc_openclose_args { + u16 reason; /* reason why channel is closing */ + u16 entry_size; /* sizeof each message entry */ + u16 remote_nentries; /* #of message entries in remote msg queue */ + u16 local_nentries; /* #of message entries in local msg queue */ + unsigned long local_msgqueue_pa; /* phys addr of local message queue */ +}; + +#define XPC_OPENCLOSE_ARGS_SIZE \ + L1_CACHE_ALIGN(sizeof(struct xpc_openclose_args) * \ + XPC_MAX_NCHANNELS) + + +/* + * Structures to define a fifo singly-linked list. + */ + +struct xpc_fifo_entry_uv { + struct xpc_fifo_entry_uv *next; +}; + +struct xpc_fifo_head_uv { + struct xpc_fifo_entry_uv *first; + struct xpc_fifo_entry_uv *last; + spinlock_t lock; + int n_entries; +}; + +/* + * Define a sn2 styled message. + * + * A user-defined message resides in the payload area. The max size of the + * payload is defined by the user via xpc_connect(). + * + * The size of a message entry (within a message queue) must be a 128-byte + * cacheline sized multiple in order to facilitate the BTE transfer of messages + * from one message queue to another. + */ +struct xpc_msg_sn2 { + u8 flags; /* FOR XPC INTERNAL USE ONLY */ + u8 reserved[7]; /* FOR XPC INTERNAL USE ONLY */ + s64 number; /* FOR XPC INTERNAL USE ONLY */ + + u64 payload; /* user defined portion of message */ +}; + +/* struct xpc_msg_sn2 flags */ + +#define XPC_M_SN2_DONE 0x01 /* msg has been received/consumed */ +#define XPC_M_SN2_READY 0x02 /* msg is ready to be sent */ +#define XPC_M_SN2_INTERRUPT 0x04 /* send interrupt when msg consumed */ + +/* + * The format of a uv XPC notify_mq GRU message is as follows: + * + * A user-defined message resides in the payload area. The max size of the + * payload is defined by the user via xpc_connect(). + * + * The size of a message (payload and header) sent via the GRU must be either 1 + * or 2 GRU_CACHE_LINE_BYTES in length. + */ + +struct xpc_notify_mq_msghdr_uv { + union { + unsigned int gru_msg_hdr; /* FOR GRU INTERNAL USE ONLY */ + struct xpc_fifo_entry_uv next; /* FOR XPC INTERNAL USE ONLY */ + } u; + short partid; /* FOR XPC INTERNAL USE ONLY */ + u8 ch_number; /* FOR XPC INTERNAL USE ONLY */ + u8 size; /* FOR XPC INTERNAL USE ONLY */ + unsigned int msg_slot_number; /* FOR XPC INTERNAL USE ONLY */ +}; + +struct xpc_notify_mq_msg_uv { + struct xpc_notify_mq_msghdr_uv hdr; + unsigned long payload; +}; + +/* + * Define sn2's notify entry. + * + * This is used to notify a message's sender that their message was received + * and consumed by the intended recipient. + */ +struct xpc_notify_sn2 { + u8 type; /* type of notification */ + + /* the following two fields are only used if type == XPC_N_CALL */ + xpc_notify_func func; /* user's notify function */ + void *key; /* pointer to user's key */ +}; + +/* struct xpc_notify_sn2 type of notification */ + +#define XPC_N_CALL 0x01 /* notify function provided by user */ + +/* + * Define uv's version of the notify entry. It additionally is used to allocate + * a msg slot on the remote partition into which is copied a sent message. + */ +struct xpc_send_msg_slot_uv { + struct xpc_fifo_entry_uv next; + unsigned int msg_slot_number; + xpc_notify_func func; /* user's notify function */ + void *key; /* pointer to user's key */ +}; + +/* + * Define the structure that manages all the stuff required by a channel. In + * particular, they are used to manage the messages sent across the channel. + * + * This structure is private to a partition, and is NOT shared across the + * partition boundary. + * + * There is an array of these structures for each remote partition. It is + * allocated at the time a partition becomes active. The array contains one + * of these structures for each potential channel connection to that partition. + */ + +/* + * The following is sn2 only. + * + * Each channel structure manages two message queues (circular buffers). + * They are allocated at the time a channel connection is made. One of + * these message queues (local_msgqueue) holds the locally created messages + * that are destined for the remote partition. The other of these message + * queues (remote_msgqueue) is a locally cached copy of the remote partition's + * own local_msgqueue. + * + * The following is a description of the Get/Put pointers used to manage these + * two message queues. Consider the local_msgqueue to be on one partition + * and the remote_msgqueue to be its cached copy on another partition. A + * description of what each of the lettered areas contains is included. + * + * + * local_msgqueue remote_msgqueue + * + * |/////////| |/////////| + * w_remote_GP.get --> +---------+ |/////////| + * | F | |/////////| + * remote_GP.get --> +---------+ +---------+ <-- local_GP->get + * | | | | + * | | | E | + * | | | | + * | | +---------+ <-- w_local_GP.get + * | B | |/////////| + * | | |////D////| + * | | |/////////| + * | | +---------+ <-- w_remote_GP.put + * | | |////C////| + * local_GP->put --> +---------+ +---------+ <-- remote_GP.put + * | | |/////////| + * | A | |/////////| + * | | |/////////| + * w_local_GP.put --> +---------+ |/////////| + * |/////////| |/////////| + * + * + * ( remote_GP.[get|put] are cached copies of the remote + * partition's local_GP->[get|put], and thus their values can + * lag behind their counterparts on the remote partition. ) + * + * + * A - Messages that have been allocated, but have not yet been sent to the + * remote partition. + * + * B - Messages that have been sent, but have not yet been acknowledged by the + * remote partition as having been received. + * + * C - Area that needs to be prepared for the copying of sent messages, by + * the clearing of the message flags of any previously received messages. + * + * D - Area into which sent messages are to be copied from the remote + * partition's local_msgqueue and then delivered to their intended + * recipients. [ To allow for a multi-message copy, another pointer + * (next_msg_to_pull) has been added to keep track of the next message + * number needing to be copied (pulled). It chases after w_remote_GP.put. + * Any messages lying between w_local_GP.get and next_msg_to_pull have + * been copied and are ready to be delivered. ] + * + * E - Messages that have been copied and delivered, but have not yet been + * acknowledged by the recipient as having been received. + * + * F - Messages that have been acknowledged, but XPC has not yet notified the + * sender that the message was received by its intended recipient. + * This is also an area that needs to be prepared for the allocating of + * new messages, by the clearing of the message flags of the acknowledged + * messages. + */ + +struct xpc_channel_sn2 { + struct xpc_openclose_args *local_openclose_args; /* args passed on */ + /* opening or closing of channel */ + + void *local_msgqueue_base; /* base address of kmalloc'd space */ + struct xpc_msg_sn2 *local_msgqueue; /* local message queue */ + void *remote_msgqueue_base; /* base address of kmalloc'd space */ + struct xpc_msg_sn2 *remote_msgqueue; /* cached copy of remote */ + /* partition's local message queue */ + unsigned long remote_msgqueue_pa; /* phys addr of remote partition's */ + /* local message queue */ + + struct xpc_notify_sn2 *notify_queue;/* notify queue for messages sent */ + + /* various flavors of local and remote Get/Put values */ + + struct xpc_gp_sn2 *local_GP; /* local Get/Put values */ + struct xpc_gp_sn2 remote_GP; /* remote Get/Put values */ + struct xpc_gp_sn2 w_local_GP; /* working local Get/Put values */ + struct xpc_gp_sn2 w_remote_GP; /* working remote Get/Put values */ + s64 next_msg_to_pull; /* Put value of next msg to pull */ + + struct mutex msg_to_pull_mutex; /* next msg to pull serialization */ +}; + +struct xpc_channel_uv { + unsigned long remote_notify_mq_gpa; /* gru phys address of remote */ + /* partition's notify mq */ + + struct xpc_send_msg_slot_uv *send_msg_slots; + void *recv_msg_slots; /* each slot will hold a xpc_notify_mq_msg_uv */ + /* structure plus the user's payload */ + + struct xpc_fifo_head_uv msg_slot_free_list; + struct xpc_fifo_head_uv recv_msg_list; /* deliverable payloads */ +}; + +struct xpc_channel { + short partid; /* ID of remote partition connected */ + spinlock_t lock; /* lock for updating this structure */ + unsigned int flags; /* general flags */ + + enum xp_retval reason; /* reason why channel is disconnect'g */ + int reason_line; /* line# disconnect initiated from */ + + u16 number; /* channel # */ + + u16 entry_size; /* sizeof each msg entry */ + u16 local_nentries; /* #of msg entries in local msg queue */ + u16 remote_nentries; /* #of msg entries in remote msg queue */ + + atomic_t references; /* #of external references to queues */ + + atomic_t n_on_msg_allocate_wq; /* #on msg allocation wait queue */ + wait_queue_head_t msg_allocate_wq; /* msg allocation wait queue */ + + u8 delayed_chctl_flags; /* chctl flags received, but delayed */ + /* action until channel disconnected */ + + atomic_t n_to_notify; /* #of msg senders to notify */ + + xpc_channel_func func; /* user's channel function */ + void *key; /* pointer to user's key */ + + struct completion wdisconnect_wait; /* wait for channel disconnect */ + + /* kthread management related fields */ + + atomic_t kthreads_assigned; /* #of kthreads assigned to channel */ + u32 kthreads_assigned_limit; /* limit on #of kthreads assigned */ + atomic_t kthreads_idle; /* #of kthreads idle waiting for work */ + u32 kthreads_idle_limit; /* limit on #of kthreads idle */ + atomic_t kthreads_active; /* #of kthreads actively working */ + + wait_queue_head_t idle_wq; /* idle kthread wait queue */ + + union { + struct xpc_channel_sn2 sn2; + struct xpc_channel_uv uv; + } sn; + +} ____cacheline_aligned; + +/* struct xpc_channel flags */ + +#define XPC_C_WASCONNECTED 0x00000001 /* channel was connected */ + +#define XPC_C_ROPENREPLY 0x00000002 /* remote open channel reply */ +#define XPC_C_OPENREPLY 0x00000004 /* local open channel reply */ +#define XPC_C_ROPENREQUEST 0x00000008 /* remote open channel request */ +#define XPC_C_OPENREQUEST 0x00000010 /* local open channel request */ + +#define XPC_C_SETUP 0x00000020 /* channel's msgqueues are alloc'd */ +#define XPC_C_CONNECTEDCALLOUT 0x00000040 /* connected callout initiated */ +#define XPC_C_CONNECTEDCALLOUT_MADE \ + 0x00000080 /* connected callout completed */ +#define XPC_C_CONNECTED 0x00000100 /* local channel is connected */ +#define XPC_C_CONNECTING 0x00000200 /* channel is being connected */ + +#define XPC_C_RCLOSEREPLY 0x00000400 /* remote close channel reply */ +#define XPC_C_CLOSEREPLY 0x00000800 /* local close channel reply */ +#define XPC_C_RCLOSEREQUEST 0x00001000 /* remote close channel request */ +#define XPC_C_CLOSEREQUEST 0x00002000 /* local close channel request */ + +#define XPC_C_DISCONNECTED 0x00004000 /* channel is disconnected */ +#define XPC_C_DISCONNECTING 0x00008000 /* channel is being disconnected */ +#define XPC_C_DISCONNECTINGCALLOUT \ + 0x00010000 /* disconnecting callout initiated */ +#define XPC_C_DISCONNECTINGCALLOUT_MADE \ + 0x00020000 /* disconnecting callout completed */ +#define XPC_C_WDISCONNECT 0x00040000 /* waiting for channel disconnect */ + +/* + * The channel control flags (chctl) union consists of a 64-bit variable which + * is divided up into eight bytes, ordered from right to left. Byte zero + * pertains to channel 0, byte one to channel 1, and so on. Each channel's byte + * can have one or more of the chctl flags set in it. + */ + +union xpc_channel_ctl_flags { + u64 all_flags; + u8 flags[XPC_MAX_NCHANNELS]; +}; + +/* chctl flags */ +#define XPC_CHCTL_CLOSEREQUEST 0x01 +#define XPC_CHCTL_CLOSEREPLY 0x02 +#define XPC_CHCTL_OPENREQUEST 0x04 +#define XPC_CHCTL_OPENREPLY 0x08 +#define XPC_CHCTL_MSGREQUEST 0x10 + +#define XPC_OPENCLOSE_CHCTL_FLAGS \ + (XPC_CHCTL_CLOSEREQUEST | XPC_CHCTL_CLOSEREPLY | \ + XPC_CHCTL_OPENREQUEST | XPC_CHCTL_OPENREPLY) +#define XPC_MSG_CHCTL_FLAGS XPC_CHCTL_MSGREQUEST + +static inline int +xpc_any_openclose_chctl_flags_set(union xpc_channel_ctl_flags *chctl) +{ + int ch_number; + + for (ch_number = 0; ch_number < XPC_MAX_NCHANNELS; ch_number++) { + if (chctl->flags[ch_number] & XPC_OPENCLOSE_CHCTL_FLAGS) + return 1; + } + return 0; +} + +static inline int +xpc_any_msg_chctl_flags_set(union xpc_channel_ctl_flags *chctl) +{ + int ch_number; + + for (ch_number = 0; ch_number < XPC_MAX_NCHANNELS; ch_number++) { + if (chctl->flags[ch_number] & XPC_MSG_CHCTL_FLAGS) + return 1; + } + return 0; +} + +/* + * Manage channels on a partition basis. There is one of these structures + * for each partition (a partition will never utilize the structure that + * represents itself). + */ + +struct xpc_partition_sn2 { + unsigned long remote_amos_page_pa; /* paddr of partition's amos page */ + int activate_IRQ_nasid; /* active partition's act/deact nasid */ + int activate_IRQ_phys_cpuid; /* active part's act/deact phys cpuid */ + + unsigned long remote_vars_pa; /* phys addr of partition's vars */ + unsigned long remote_vars_part_pa; /* paddr of partition's vars part */ + u8 remote_vars_version; /* version# of partition's vars */ + + void *local_GPs_base; /* base address of kmalloc'd space */ + struct xpc_gp_sn2 *local_GPs; /* local Get/Put values */ + void *remote_GPs_base; /* base address of kmalloc'd space */ + struct xpc_gp_sn2 *remote_GPs; /* copy of remote partition's local */ + /* Get/Put values */ + unsigned long remote_GPs_pa; /* phys addr of remote partition's local */ + /* Get/Put values */ + + void *local_openclose_args_base; /* base address of kmalloc'd space */ + struct xpc_openclose_args *local_openclose_args; /* local's args */ + unsigned long remote_openclose_args_pa; /* phys addr of remote's args */ + + int notify_IRQ_nasid; /* nasid of where to send notify IRQs */ + int notify_IRQ_phys_cpuid; /* CPUID of where to send notify IRQs */ + char notify_IRQ_owner[8]; /* notify IRQ's owner's name */ + + struct amo *remote_chctl_amo_va; /* addr of remote chctl flags' amo */ + struct amo *local_chctl_amo_va; /* address of chctl flags' amo */ + + struct timer_list dropped_notify_IRQ_timer; /* dropped IRQ timer */ +}; + +struct xpc_partition_uv { + unsigned long remote_activate_mq_gpa; /* gru phys address of remote */ + /* partition's activate mq */ + spinlock_t flags_lock; /* protect updating of flags */ + unsigned int flags; /* general flags */ + u8 remote_act_state; /* remote partition's act_state */ + u8 act_state_req; /* act_state request from remote partition */ + enum xp_retval reason; /* reason for deactivate act_state request */ + u64 heartbeat; /* incremented by remote partition */ +}; + +/* struct xpc_partition_uv flags */ + +#define XPC_P_HEARTBEAT_OFFLINE_UV 0x00000001 +#define XPC_P_ENGAGED_UV 0x00000002 + +/* struct xpc_partition_uv act_state change requests */ + +#define XPC_P_ASR_ACTIVATE_UV 0x01 +#define XPC_P_ASR_REACTIVATE_UV 0x02 +#define XPC_P_ASR_DEACTIVATE_UV 0x03 + +struct xpc_partition { + + /* XPC HB infrastructure */ + + u8 remote_rp_version; /* version# of partition's rsvd pg */ + unsigned long remote_rp_ts_jiffies; /* timestamp when rsvd pg setup */ + unsigned long remote_rp_pa; /* phys addr of partition's rsvd pg */ + u64 last_heartbeat; /* HB at last read */ + u32 activate_IRQ_rcvd; /* IRQs since activation */ + spinlock_t act_lock; /* protect updating of act_state */ + u8 act_state; /* from XPC HB viewpoint */ + enum xp_retval reason; /* reason partition is deactivating */ + int reason_line; /* line# deactivation initiated from */ + + unsigned long disengage_timeout; /* timeout in jiffies */ + struct timer_list disengage_timer; + + /* XPC infrastructure referencing and teardown control */ + + u8 setup_state; /* infrastructure setup state */ + wait_queue_head_t teardown_wq; /* kthread waiting to teardown infra */ + atomic_t references; /* #of references to infrastructure */ + + u8 nchannels; /* #of defined channels supported */ + atomic_t nchannels_active; /* #of channels that are not DISCONNECTED */ + atomic_t nchannels_engaged; /* #of channels engaged with remote part */ + struct xpc_channel *channels; /* array of channel structures */ + + /* fields used for managing channel avialability and activity */ + + union xpc_channel_ctl_flags chctl; /* chctl flags yet to be processed */ + spinlock_t chctl_lock; /* chctl flags lock */ + + void *remote_openclose_args_base; /* base address of kmalloc'd space */ + struct xpc_openclose_args *remote_openclose_args; /* copy of remote's */ + /* args */ + + /* channel manager related fields */ + + atomic_t channel_mgr_requests; /* #of requests to activate chan mgr */ + wait_queue_head_t channel_mgr_wq; /* channel mgr's wait queue */ + + union { + struct xpc_partition_sn2 sn2; + struct xpc_partition_uv uv; + } sn; + +} ____cacheline_aligned; + +/* struct xpc_partition act_state values (for XPC HB) */ + +#define XPC_P_AS_INACTIVE 0x00 /* partition is not active */ +#define XPC_P_AS_ACTIVATION_REQ 0x01 /* created thread to activate */ +#define XPC_P_AS_ACTIVATING 0x02 /* activation thread started */ +#define XPC_P_AS_ACTIVE 0x03 /* xpc_partition_up() was called */ +#define XPC_P_AS_DEACTIVATING 0x04 /* partition deactivation initiated */ + +#define XPC_DEACTIVATE_PARTITION(_p, _reason) \ + xpc_deactivate_partition(__LINE__, (_p), (_reason)) + +/* struct xpc_partition setup_state values */ + +#define XPC_P_SS_UNSET 0x00 /* infrastructure was never setup */ +#define XPC_P_SS_SETUP 0x01 /* infrastructure is setup */ +#define XPC_P_SS_WTEARDOWN 0x02 /* waiting to teardown infrastructure */ +#define XPC_P_SS_TORNDOWN 0x03 /* infrastructure is torndown */ + +/* + * struct xpc_partition_sn2's dropped notify IRQ timer is set to wait the + * following interval #of seconds before checking for dropped notify IRQs. + * These can occur whenever an IRQ's associated amo write doesn't complete + * until after the IRQ was received. + */ +#define XPC_DROPPED_NOTIFY_IRQ_WAIT_INTERVAL (0.25 * HZ) + +/* number of seconds to wait for other partitions to disengage */ +#define XPC_DISENGAGE_DEFAULT_TIMELIMIT 90 + +/* interval in seconds to print 'waiting deactivation' messages */ +#define XPC_DEACTIVATE_PRINTMSG_INTERVAL 10 + +#define XPC_PARTID(_p) ((short)((_p) - &xpc_partitions[0])) + +/* found in xp_main.c */ +extern struct xpc_registration xpc_registrations[]; + +/* found in xpc_main.c */ +extern struct device *xpc_part; +extern struct device *xpc_chan; +extern int xpc_disengage_timelimit; +extern int xpc_disengage_timedout; +extern int xpc_activate_IRQ_rcvd; +extern spinlock_t xpc_activate_IRQ_rcvd_lock; +extern wait_queue_head_t xpc_activate_IRQ_wq; +extern void *xpc_heartbeating_to_mask; +extern void *xpc_kzalloc_cacheline_aligned(size_t, gfp_t, void **); +extern void xpc_activate_partition(struct xpc_partition *); +extern void xpc_activate_kthreads(struct xpc_channel *, int); +extern void xpc_create_kthreads(struct xpc_channel *, int, int); +extern void xpc_disconnect_wait(int); +extern int (*xpc_setup_partitions_sn) (void); +extern enum xp_retval (*xpc_get_partition_rsvd_page_pa) (void *, u64 *, + unsigned long *, + size_t *); +extern int (*xpc_setup_rsvd_page_sn) (struct xpc_rsvd_page *); +extern void (*xpc_heartbeat_init) (void); +extern void (*xpc_heartbeat_exit) (void); +extern void (*xpc_increment_heartbeat) (void); +extern void (*xpc_offline_heartbeat) (void); +extern void (*xpc_online_heartbeat) (void); +extern enum xp_retval (*xpc_get_remote_heartbeat) (struct xpc_partition *); +extern enum xp_retval (*xpc_make_first_contact) (struct xpc_partition *); +extern u64 (*xpc_get_chctl_all_flags) (struct xpc_partition *); +extern enum xp_retval (*xpc_setup_msg_structures) (struct xpc_channel *); +extern void (*xpc_teardown_msg_structures) (struct xpc_channel *); +extern void (*xpc_notify_senders_of_disconnect) (struct xpc_channel *); +extern void (*xpc_process_msg_chctl_flags) (struct xpc_partition *, int); +extern int (*xpc_n_of_deliverable_payloads) (struct xpc_channel *); +extern void *(*xpc_get_deliverable_payload) (struct xpc_channel *); +extern void (*xpc_request_partition_activation) (struct xpc_rsvd_page *, + unsigned long, int); +extern void (*xpc_request_partition_reactivation) (struct xpc_partition *); +extern void (*xpc_request_partition_deactivation) (struct xpc_partition *); +extern void (*xpc_cancel_partition_deactivation_request) ( + struct xpc_partition *); +extern void (*xpc_process_activate_IRQ_rcvd) (void); +extern enum xp_retval (*xpc_setup_ch_structures_sn) (struct xpc_partition *); +extern void (*xpc_teardown_ch_structures_sn) (struct xpc_partition *); + +extern void (*xpc_indicate_partition_engaged) (struct xpc_partition *); +extern int (*xpc_partition_engaged) (short); +extern int (*xpc_any_partition_engaged) (void); +extern void (*xpc_indicate_partition_disengaged) (struct xpc_partition *); +extern void (*xpc_assume_partition_disengaged) (short); + +extern void (*xpc_send_chctl_closerequest) (struct xpc_channel *, + unsigned long *); +extern void (*xpc_send_chctl_closereply) (struct xpc_channel *, + unsigned long *); +extern void (*xpc_send_chctl_openrequest) (struct xpc_channel *, + unsigned long *); +extern void (*xpc_send_chctl_openreply) (struct xpc_channel *, unsigned long *); + +extern void (*xpc_save_remote_msgqueue_pa) (struct xpc_channel *, + unsigned long); + +extern enum xp_retval (*xpc_send_payload) (struct xpc_channel *, u32, void *, + u16, u8, xpc_notify_func, void *); +extern void (*xpc_received_payload) (struct xpc_channel *, void *); + +/* found in xpc_sn2.c */ +extern int xpc_init_sn2(void); +extern void xpc_exit_sn2(void); + +/* found in xpc_uv.c */ +extern int xpc_init_uv(void); +extern void xpc_exit_uv(void); + +/* found in xpc_partition.c */ +extern int xpc_exiting; +extern int xpc_nasid_mask_nlongs; +extern struct xpc_rsvd_page *xpc_rsvd_page; +extern unsigned long *xpc_mach_nasids; +extern struct xpc_partition *xpc_partitions; +extern void *xpc_kmalloc_cacheline_aligned(size_t, gfp_t, void **); +extern int xpc_setup_rsvd_page(void); +extern void xpc_teardown_rsvd_page(void); +extern int xpc_identify_activate_IRQ_sender(void); +extern int xpc_partition_disengaged(struct xpc_partition *); +extern enum xp_retval xpc_mark_partition_active(struct xpc_partition *); +extern void xpc_mark_partition_inactive(struct xpc_partition *); +extern void xpc_discovery(void); +extern enum xp_retval xpc_get_remote_rp(int, unsigned long *, + struct xpc_rsvd_page *, + unsigned long *); +extern void xpc_deactivate_partition(const int, struct xpc_partition *, + enum xp_retval); +extern enum xp_retval xpc_initiate_partid_to_nasids(short, void *); + +/* found in xpc_channel.c */ +extern void xpc_initiate_connect(int); +extern void xpc_initiate_disconnect(int); +extern enum xp_retval xpc_allocate_msg_wait(struct xpc_channel *); +extern enum xp_retval xpc_initiate_send(short, int, u32, void *, u16); +extern enum xp_retval xpc_initiate_send_notify(short, int, u32, void *, u16, + xpc_notify_func, void *); +extern void xpc_initiate_received(short, int, void *); +extern void xpc_process_sent_chctl_flags(struct xpc_partition *); +extern void xpc_connected_callout(struct xpc_channel *); +extern void xpc_deliver_payload(struct xpc_channel *); +extern void xpc_disconnect_channel(const int, struct xpc_channel *, + enum xp_retval, unsigned long *); +extern void xpc_disconnect_callout(struct xpc_channel *, enum xp_retval); +extern void xpc_partition_going_down(struct xpc_partition *, enum xp_retval); + +static inline int +xpc_hb_allowed(short partid, void *heartbeating_to_mask) +{ + return test_bit(partid, heartbeating_to_mask); +} + +static inline int +xpc_any_hbs_allowed(void) +{ + DBUG_ON(xpc_heartbeating_to_mask == NULL); + return !bitmap_empty(xpc_heartbeating_to_mask, xp_max_npartitions); +} + +static inline void +xpc_allow_hb(short partid) +{ + DBUG_ON(xpc_heartbeating_to_mask == NULL); + set_bit(partid, xpc_heartbeating_to_mask); +} + +static inline void +xpc_disallow_hb(short partid) +{ + DBUG_ON(xpc_heartbeating_to_mask == NULL); + clear_bit(partid, xpc_heartbeating_to_mask); +} + +static inline void +xpc_disallow_all_hbs(void) +{ + DBUG_ON(xpc_heartbeating_to_mask == NULL); + bitmap_zero(xpc_heartbeating_to_mask, xp_max_npartitions); +} + +static inline void +xpc_wakeup_channel_mgr(struct xpc_partition *part) +{ + if (atomic_inc_return(&part->channel_mgr_requests) == 1) + wake_up(&part->channel_mgr_wq); +} + +/* + * These next two inlines are used to keep us from tearing down a channel's + * msg queues while a thread may be referencing them. + */ +static inline void +xpc_msgqueue_ref(struct xpc_channel *ch) +{ + atomic_inc(&ch->references); +} + +static inline void +xpc_msgqueue_deref(struct xpc_channel *ch) +{ + s32 refs = atomic_dec_return(&ch->references); + + DBUG_ON(refs < 0); + if (refs == 0) + xpc_wakeup_channel_mgr(&xpc_partitions[ch->partid]); +} + +#define XPC_DISCONNECT_CHANNEL(_ch, _reason, _irqflgs) \ + xpc_disconnect_channel(__LINE__, _ch, _reason, _irqflgs) + +/* + * These two inlines are used to keep us from tearing down a partition's + * setup infrastructure while a thread may be referencing it. + */ +static inline void +xpc_part_deref(struct xpc_partition *part) +{ + s32 refs = atomic_dec_return(&part->references); + + DBUG_ON(refs < 0); + if (refs == 0 && part->setup_state == XPC_P_SS_WTEARDOWN) + wake_up(&part->teardown_wq); +} + +static inline int +xpc_part_ref(struct xpc_partition *part) +{ + int setup; + + atomic_inc(&part->references); + setup = (part->setup_state == XPC_P_SS_SETUP); + if (!setup) + xpc_part_deref(part); + + return setup; +} + +/* + * The following macro is to be used for the setting of the reason and + * reason_line fields in both the struct xpc_channel and struct xpc_partition + * structures. + */ +#define XPC_SET_REASON(_p, _reason, _line) \ + { \ + (_p)->reason = _reason; \ + (_p)->reason_line = _line; \ + } + +#endif /* _DRIVERS_MISC_SGIXP_XPC_H */ diff --git a/drivers/misc/sgi-xp/xpc_channel.c b/drivers/misc/sgi-xp/xpc_channel.c new file mode 100644 index 0000000..9cd2ebe --- /dev/null +++ b/drivers/misc/sgi-xp/xpc_channel.c @@ -0,0 +1,988 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (c) 2004-2008 Silicon Graphics, Inc. All Rights Reserved. + */ + +/* + * Cross Partition Communication (XPC) channel support. + * + * This is the part of XPC that manages the channels and + * sends/receives messages across them to/from other partitions. + * + */ + +#include <linux/device.h> +#include "xpc.h" + +/* + * Process a connect message from a remote partition. + * + * Note: xpc_process_connect() is expecting to be called with the + * spin_lock_irqsave held and will leave it locked upon return. + */ +static void +xpc_process_connect(struct xpc_channel *ch, unsigned long *irq_flags) +{ + enum xp_retval ret; + + DBUG_ON(!spin_is_locked(&ch->lock)); + + if (!(ch->flags & XPC_C_OPENREQUEST) || + !(ch->flags & XPC_C_ROPENREQUEST)) { + /* nothing more to do for now */ + return; + } + DBUG_ON(!(ch->flags & XPC_C_CONNECTING)); + + if (!(ch->flags & XPC_C_SETUP)) { + spin_unlock_irqrestore(&ch->lock, *irq_flags); + ret = xpc_setup_msg_structures(ch); + spin_lock_irqsave(&ch->lock, *irq_flags); + + if (ret != xpSuccess) + XPC_DISCONNECT_CHANNEL(ch, ret, irq_flags); + + ch->flags |= XPC_C_SETUP; + + if (ch->flags & (XPC_C_CONNECTED | XPC_C_DISCONNECTING)) + return; + + DBUG_ON(ch->local_msgqueue == NULL); + DBUG_ON(ch->remote_msgqueue == NULL); + } + + if (!(ch->flags & XPC_C_OPENREPLY)) { + ch->flags |= XPC_C_OPENREPLY; + xpc_send_chctl_openreply(ch, irq_flags); + } + + if (!(ch->flags & XPC_C_ROPENREPLY)) + return; + + ch->flags = (XPC_C_CONNECTED | XPC_C_SETUP); /* clear all else */ + + dev_info(xpc_chan, "channel %d to partition %d connected\n", + ch->number, ch->partid); + + spin_unlock_irqrestore(&ch->lock, *irq_flags); + xpc_create_kthreads(ch, 1, 0); + spin_lock_irqsave(&ch->lock, *irq_flags); +} + +/* + * spin_lock_irqsave() is expected to be held on entry. + */ +static void +xpc_process_disconnect(struct xpc_channel *ch, unsigned long *irq_flags) +{ + struct xpc_partition *part = &xpc_partitions[ch->partid]; + u32 channel_was_connected = (ch->flags & XPC_C_WASCONNECTED); + + DBUG_ON(!spin_is_locked(&ch->lock)); + + if (!(ch->flags & XPC_C_DISCONNECTING)) + return; + + DBUG_ON(!(ch->flags & XPC_C_CLOSEREQUEST)); + + /* make sure all activity has settled down first */ + + if (atomic_read(&ch->kthreads_assigned) > 0 || + atomic_read(&ch->references) > 0) { + return; + } + DBUG_ON((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) && + !(ch->flags & XPC_C_DISCONNECTINGCALLOUT_MADE)); + + if (part->act_state == XPC_P_AS_DEACTIVATING) { + /* can't proceed until the other side disengages from us */ + if (xpc_partition_engaged(ch->partid)) + return; + + } else { + + /* as long as the other side is up do the full protocol */ + + if (!(ch->flags & XPC_C_RCLOSEREQUEST)) + return; + + if (!(ch->flags & XPC_C_CLOSEREPLY)) { + ch->flags |= XPC_C_CLOSEREPLY; + xpc_send_chctl_closereply(ch, irq_flags); + } + + if (!(ch->flags & XPC_C_RCLOSEREPLY)) + return; + } + + /* wake those waiting for notify completion */ + if (atomic_read(&ch->n_to_notify) > 0) { + /* we do callout while holding ch->lock, callout can't block */ + xpc_notify_senders_of_disconnect(ch); + } + + /* both sides are disconnected now */ + + if (ch->flags & XPC_C_DISCONNECTINGCALLOUT_MADE) { + spin_unlock_irqrestore(&ch->lock, *irq_flags); + xpc_disconnect_callout(ch, xpDisconnected); + spin_lock_irqsave(&ch->lock, *irq_flags); + } + + DBUG_ON(atomic_read(&ch->n_to_notify) != 0); + + /* it's now safe to free the channel's message queues */ + xpc_teardown_msg_structures(ch); + + ch->func = NULL; + ch->key = NULL; + ch->entry_size = 0; + ch->local_nentries = 0; + ch->remote_nentries = 0; + ch->kthreads_assigned_limit = 0; + ch->kthreads_idle_limit = 0; + + /* + * Mark the channel disconnected and clear all other flags, including + * XPC_C_SETUP (because of call to xpc_teardown_msg_structures()) but + * not including XPC_C_WDISCONNECT (if it was set). + */ + ch->flags = (XPC_C_DISCONNECTED | (ch->flags & XPC_C_WDISCONNECT)); + + atomic_dec(&part->nchannels_active); + + if (channel_was_connected) { + dev_info(xpc_chan, "channel %d to partition %d disconnected, " + "reason=%d\n", ch->number, ch->partid, ch->reason); + } + + if (ch->flags & XPC_C_WDISCONNECT) { + /* we won't lose the CPU since we're holding ch->lock */ + complete(&ch->wdisconnect_wait); + } else if (ch->delayed_chctl_flags) { + if (part->act_state != XPC_P_AS_DEACTIVATING) { + /* time to take action on any delayed chctl flags */ + spin_lock(&part->chctl_lock); + part->chctl.flags[ch->number] |= + ch->delayed_chctl_flags; + spin_unlock(&part->chctl_lock); + } + ch->delayed_chctl_flags = 0; + } +} + +/* + * Process a change in the channel's remote connection state. + */ +static void +xpc_process_openclose_chctl_flags(struct xpc_partition *part, int ch_number, + u8 chctl_flags) +{ + unsigned long irq_flags; + struct xpc_openclose_args *args = + &part->remote_openclose_args[ch_number]; + struct xpc_channel *ch = &part->channels[ch_number]; + enum xp_retval reason; + + spin_lock_irqsave(&ch->lock, irq_flags); + +again: + + if ((ch->flags & XPC_C_DISCONNECTED) && + (ch->flags & XPC_C_WDISCONNECT)) { + /* + * Delay processing chctl flags until thread waiting disconnect + * has had a chance to see that the channel is disconnected. + */ + ch->delayed_chctl_flags |= chctl_flags; + spin_unlock_irqrestore(&ch->lock, irq_flags); + return; + } + + if (chctl_flags & XPC_CHCTL_CLOSEREQUEST) { + + dev_dbg(xpc_chan, "XPC_CHCTL_CLOSEREQUEST (reason=%d) received " + "from partid=%d, channel=%d\n", args->reason, + ch->partid, ch->number); + + /* + * If RCLOSEREQUEST is set, we're probably waiting for + * RCLOSEREPLY. We should find it and a ROPENREQUEST packed + * with this RCLOSEREQUEST in the chctl_flags. + */ + + if (ch->flags & XPC_C_RCLOSEREQUEST) { + DBUG_ON(!(ch->flags & XPC_C_DISCONNECTING)); + DBUG_ON(!(ch->flags & XPC_C_CLOSEREQUEST)); + DBUG_ON(!(ch->flags & XPC_C_CLOSEREPLY)); + DBUG_ON(ch->flags & XPC_C_RCLOSEREPLY); + + DBUG_ON(!(chctl_flags & XPC_CHCTL_CLOSEREPLY)); + chctl_flags &= ~XPC_CHCTL_CLOSEREPLY; + ch->flags |= XPC_C_RCLOSEREPLY; + + /* both sides have finished disconnecting */ + xpc_process_disconnect(ch, &irq_flags); + DBUG_ON(!(ch->flags & XPC_C_DISCONNECTED)); + goto again; + } + + if (ch->flags & XPC_C_DISCONNECTED) { + if (!(chctl_flags & XPC_CHCTL_OPENREQUEST)) { + if (part->chctl.flags[ch_number] & + XPC_CHCTL_OPENREQUEST) { + + DBUG_ON(ch->delayed_chctl_flags != 0); + spin_lock(&part->chctl_lock); + part->chctl.flags[ch_number] |= + XPC_CHCTL_CLOSEREQUEST; + spin_unlock(&part->chctl_lock); + } + spin_unlock_irqrestore(&ch->lock, irq_flags); + return; + } + + XPC_SET_REASON(ch, 0, 0); + ch->flags &= ~XPC_C_DISCONNECTED; + + atomic_inc(&part->nchannels_active); + ch->flags |= (XPC_C_CONNECTING | XPC_C_ROPENREQUEST); + } + + chctl_flags &= ~(XPC_CHCTL_OPENREQUEST | XPC_CHCTL_OPENREPLY); + + /* + * The meaningful CLOSEREQUEST connection state fields are: + * reason = reason connection is to be closed + */ + + ch->flags |= XPC_C_RCLOSEREQUEST; + + if (!(ch->flags & XPC_C_DISCONNECTING)) { + reason = args->reason; + if (reason <= xpSuccess || reason > xpUnknownReason) + reason = xpUnknownReason; + else if (reason == xpUnregistering) + reason = xpOtherUnregistering; + + XPC_DISCONNECT_CHANNEL(ch, reason, &irq_flags); + + DBUG_ON(chctl_flags & XPC_CHCTL_CLOSEREPLY); + spin_unlock_irqrestore(&ch->lock, irq_flags); + return; + } + + xpc_process_disconnect(ch, &irq_flags); + } + + if (chctl_flags & XPC_CHCTL_CLOSEREPLY) { + + dev_dbg(xpc_chan, "XPC_CHCTL_CLOSEREPLY received from partid=" + "%d, channel=%d\n", ch->partid, ch->number); + + if (ch->flags & XPC_C_DISCONNECTED) { + DBUG_ON(part->act_state != XPC_P_AS_DEACTIVATING); + spin_unlock_irqrestore(&ch->lock, irq_flags); + return; + } + + DBUG_ON(!(ch->flags & XPC_C_CLOSEREQUEST)); + + if (!(ch->flags & XPC_C_RCLOSEREQUEST)) { + if (part->chctl.flags[ch_number] & + XPC_CHCTL_CLOSEREQUEST) { + + DBUG_ON(ch->delayed_chctl_flags != 0); + spin_lock(&part->chctl_lock); + part->chctl.flags[ch_number] |= + XPC_CHCTL_CLOSEREPLY; + spin_unlock(&part->chctl_lock); + } + spin_unlock_irqrestore(&ch->lock, irq_flags); + return; + } + + ch->flags |= XPC_C_RCLOSEREPLY; + + if (ch->flags & XPC_C_CLOSEREPLY) { + /* both sides have finished disconnecting */ + xpc_process_disconnect(ch, &irq_flags); + } + } + + if (chctl_flags & XPC_CHCTL_OPENREQUEST) { + + dev_dbg(xpc_chan, "XPC_CHCTL_OPENREQUEST (entry_size=%d, " + "local_nentries=%d) received from partid=%d, " + "channel=%d\n", args->entry_size, args->local_nentries, + ch->partid, ch->number); + + if (part->act_state == XPC_P_AS_DEACTIVATING || + (ch->flags & XPC_C_ROPENREQUEST)) { + spin_unlock_irqrestore(&ch->lock, irq_flags); + return; + } + + if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_WDISCONNECT)) { + ch->delayed_chctl_flags |= XPC_CHCTL_OPENREQUEST; + spin_unlock_irqrestore(&ch->lock, irq_flags); + return; + } + DBUG_ON(!(ch->flags & (XPC_C_DISCONNECTED | + XPC_C_OPENREQUEST))); + DBUG_ON(ch->flags & (XPC_C_ROPENREQUEST | XPC_C_ROPENREPLY | + XPC_C_OPENREPLY | XPC_C_CONNECTED)); + + /* + * The meaningful OPENREQUEST connection state fields are: + * entry_size = size of channel's messages in bytes + * local_nentries = remote partition's local_nentries + */ + if (args->entry_size == 0 || args->local_nentries == 0) { + /* assume OPENREQUEST was delayed by mistake */ + spin_unlock_irqrestore(&ch->lock, irq_flags); + return; + } + + ch->flags |= (XPC_C_ROPENREQUEST | XPC_C_CONNECTING); + ch->remote_nentries = args->local_nentries; + + if (ch->flags & XPC_C_OPENREQUEST) { + if (args->entry_size != ch->entry_size) { + XPC_DISCONNECT_CHANNEL(ch, xpUnequalMsgSizes, + &irq_flags); + spin_unlock_irqrestore(&ch->lock, irq_flags); + return; + } + } else { + ch->entry_size = args->entry_size; + + XPC_SET_REASON(ch, 0, 0); + ch->flags &= ~XPC_C_DISCONNECTED; + + atomic_inc(&part->nchannels_active); + } + + xpc_process_connect(ch, &irq_flags); + } + + if (chctl_flags & XPC_CHCTL_OPENREPLY) { + + dev_dbg(xpc_chan, "XPC_CHCTL_OPENREPLY (local_msgqueue_pa=" + "0x%lx, local_nentries=%d, remote_nentries=%d) " + "received from partid=%d, channel=%d\n", + args->local_msgqueue_pa, args->local_nentries, + args->remote_nentries, ch->partid, ch->number); + + if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_DISCONNECTED)) { + spin_unlock_irqrestore(&ch->lock, irq_flags); + return; + } + if (!(ch->flags & XPC_C_OPENREQUEST)) { + XPC_DISCONNECT_CHANNEL(ch, xpOpenCloseError, + &irq_flags); + spin_unlock_irqrestore(&ch->lock, irq_flags); + return; + } + + DBUG_ON(!(ch->flags & XPC_C_ROPENREQUEST)); + DBUG_ON(ch->flags & XPC_C_CONNECTED); + + /* + * The meaningful OPENREPLY connection state fields are: + * local_msgqueue_pa = physical address of remote + * partition's local_msgqueue + * local_nentries = remote partition's local_nentries + * remote_nentries = remote partition's remote_nentries + */ + DBUG_ON(args->local_msgqueue_pa == 0); + DBUG_ON(args->local_nentries == 0); + DBUG_ON(args->remote_nentries == 0); + + ch->flags |= XPC_C_ROPENREPLY; + xpc_save_remote_msgqueue_pa(ch, args->local_msgqueue_pa); + + if (args->local_nentries < ch->remote_nentries) { + dev_dbg(xpc_chan, "XPC_CHCTL_OPENREPLY: new " + "remote_nentries=%d, old remote_nentries=%d, " + "partid=%d, channel=%d\n", + args->local_nentries, ch->remote_nentries, + ch->partid, ch->number); + + ch->remote_nentries = args->local_nentries; + } + if (args->remote_nentries < ch->local_nentries) { + dev_dbg(xpc_chan, "XPC_CHCTL_OPENREPLY: new " + "local_nentries=%d, old local_nentries=%d, " + "partid=%d, channel=%d\n", + args->remote_nentries, ch->local_nentries, + ch->partid, ch->number); + + ch->local_nentries = args->remote_nentries; + } + + xpc_process_connect(ch, &irq_flags); + } + + spin_unlock_irqrestore(&ch->lock, irq_flags); +} + +/* + * Attempt to establish a channel connection to a remote partition. + */ +static enum xp_retval +xpc_connect_channel(struct xpc_channel *ch) +{ + unsigned long irq_flags; + struct xpc_registration *registration = &xpc_registrations[ch->number]; + + if (mutex_trylock(®istration->mutex) == 0) + return xpRetry; + + if (!XPC_CHANNEL_REGISTERED(ch->number)) { + mutex_unlock(®istration->mutex); + return xpUnregistered; + } + + spin_lock_irqsave(&ch->lock, irq_flags); + + DBUG_ON(ch->flags & XPC_C_CONNECTED); + DBUG_ON(ch->flags & XPC_C_OPENREQUEST); + + if (ch->flags & XPC_C_DISCONNECTING) { + spin_unlock_irqrestore(&ch->lock, irq_flags); + mutex_unlock(®istration->mutex); + return ch->reason; + } + + /* add info from the channel connect registration to the channel */ + + ch->kthreads_assigned_limit = registration->assigned_limit; + ch->kthreads_idle_limit = registration->idle_limit; + DBUG_ON(atomic_read(&ch->kthreads_assigned) != 0); + DBUG_ON(atomic_read(&ch->kthreads_idle) != 0); + DBUG_ON(atomic_read(&ch->kthreads_active) != 0); + + ch->func = registration->func; + DBUG_ON(registration->func == NULL); + ch->key = registration->key; + + ch->local_nentries = registration->nentries; + + if (ch->flags & XPC_C_ROPENREQUEST) { + if (registration->entry_size != ch->entry_size) { + /* the local and remote sides aren't the same */ + + /* + * Because XPC_DISCONNECT_CHANNEL() can block we're + * forced to up the registration sema before we unlock + * the channel lock. But that's okay here because we're + * done with the part that required the registration + * sema. XPC_DISCONNECT_CHANNEL() requires that the + * channel lock be locked and will unlock and relock + * the channel lock as needed. + */ + mutex_unlock(®istration->mutex); + XPC_DISCONNECT_CHANNEL(ch, xpUnequalMsgSizes, + &irq_flags); + spin_unlock_irqrestore(&ch->lock, irq_flags); + return xpUnequalMsgSizes; + } + } else { + ch->entry_size = registration->entry_size; + + XPC_SET_REASON(ch, 0, 0); + ch->flags &= ~XPC_C_DISCONNECTED; + + atomic_inc(&xpc_partitions[ch->partid].nchannels_active); + } + + mutex_unlock(®istration->mutex); + + /* initiate the connection */ + + ch->flags |= (XPC_C_OPENREQUEST | XPC_C_CONNECTING); + xpc_send_chctl_openrequest(ch, &irq_flags); + + xpc_process_connect(ch, &irq_flags); + + spin_unlock_irqrestore(&ch->lock, irq_flags); + + return xpSuccess; +} + +void +xpc_process_sent_chctl_flags(struct xpc_partition *part) +{ + unsigned long irq_flags; + union xpc_channel_ctl_flags chctl; + struct xpc_channel *ch; + int ch_number; + u32 ch_flags; + + chctl.all_flags = xpc_get_chctl_all_flags(part); + + /* + * Initiate channel connections for registered channels. + * + * For each connected channel that has pending messages activate idle + * kthreads and/or create new kthreads as needed. + */ + + for (ch_number = 0; ch_number < part->nchannels; ch_number++) { + ch = &part->channels[ch_number]; + + /* + * Process any open or close related chctl flags, and then deal + * with connecting or disconnecting the channel as required. + */ + + if (chctl.flags[ch_number] & XPC_OPENCLOSE_CHCTL_FLAGS) { + xpc_process_openclose_chctl_flags(part, ch_number, + chctl.flags[ch_number]); + } + + ch_flags = ch->flags; /* need an atomic snapshot of flags */ + + if (ch_flags & XPC_C_DISCONNECTING) { + spin_lock_irqsave(&ch->lock, irq_flags); + xpc_process_disconnect(ch, &irq_flags); + spin_unlock_irqrestore(&ch->lock, irq_flags); + continue; + } + + if (part->act_state == XPC_P_AS_DEACTIVATING) + continue; + + if (!(ch_flags & XPC_C_CONNECTED)) { + if (!(ch_flags & XPC_C_OPENREQUEST)) { + DBUG_ON(ch_flags & XPC_C_SETUP); + (void)xpc_connect_channel(ch); + } else { + spin_lock_irqsave(&ch->lock, irq_flags); + xpc_process_connect(ch, &irq_flags); + spin_unlock_irqrestore(&ch->lock, irq_flags); + } + continue; + } + + /* + * Process any message related chctl flags, this may involve + * the activation of kthreads to deliver any pending messages + * sent from the other partition. + */ + + if (chctl.flags[ch_number] & XPC_MSG_CHCTL_FLAGS) + xpc_process_msg_chctl_flags(part, ch_number); + } +} + +/* + * XPC's heartbeat code calls this function to inform XPC that a partition is + * going down. XPC responds by tearing down the XPartition Communication + * infrastructure used for the just downed partition. + * + * XPC's heartbeat code will never call this function and xpc_partition_up() + * at the same time. Nor will it ever make multiple calls to either function + * at the same time. + */ +void +xpc_partition_going_down(struct xpc_partition *part, enum xp_retval reason) +{ + unsigned long irq_flags; + int ch_number; + struct xpc_channel *ch; + + dev_dbg(xpc_chan, "deactivating partition %d, reason=%d\n", + XPC_PARTID(part), reason); + + if (!xpc_part_ref(part)) { + /* infrastructure for this partition isn't currently set up */ + return; + } + + /* disconnect channels associated with the partition going down */ + + for (ch_number = 0; ch_number < part->nchannels; ch_number++) { + ch = &part->channels[ch_number]; + + xpc_msgqueue_ref(ch); + spin_lock_irqsave(&ch->lock, irq_flags); + + XPC_DISCONNECT_CHANNEL(ch, reason, &irq_flags); + + spin_unlock_irqrestore(&ch->lock, irq_flags); + xpc_msgqueue_deref(ch); + } + + xpc_wakeup_channel_mgr(part); + + xpc_part_deref(part); +} + +/* + * Called by XP at the time of channel connection registration to cause + * XPC to establish connections to all currently active partitions. + */ +void +xpc_initiate_connect(int ch_number) +{ + short partid; + struct xpc_partition *part; + struct xpc_channel *ch; + + DBUG_ON(ch_number < 0 || ch_number >= XPC_MAX_NCHANNELS); + + for (partid = 0; partid < xp_max_npartitions; partid++) { + part = &xpc_partitions[partid]; + + if (xpc_part_ref(part)) { + ch = &part->channels[ch_number]; + + /* + * Initiate the establishment of a connection on the + * newly registered channel to the remote partition. + */ + xpc_wakeup_channel_mgr(part); + xpc_part_deref(part); + } + } +} + +void +xpc_connected_callout(struct xpc_channel *ch) +{ + /* let the registerer know that a connection has been established */ + + if (ch->func != NULL) { + dev_dbg(xpc_chan, "ch->func() called, reason=xpConnected, " + "partid=%d, channel=%d\n", ch->partid, ch->number); + + ch->func(xpConnected, ch->partid, ch->number, + (void *)(u64)ch->local_nentries, ch->key); + + dev_dbg(xpc_chan, "ch->func() returned, reason=xpConnected, " + "partid=%d, channel=%d\n", ch->partid, ch->number); + } +} + +/* + * Called by XP at the time of channel connection unregistration to cause + * XPC to teardown all current connections for the specified channel. + * + * Before returning xpc_initiate_disconnect() will wait until all connections + * on the specified channel have been closed/torndown. So the caller can be + * assured that they will not be receiving any more callouts from XPC to the + * function they registered via xpc_connect(). + * + * Arguments: + * + * ch_number - channel # to unregister. + */ +void +xpc_initiate_disconnect(int ch_number) +{ + unsigned long irq_flags; + short partid; + struct xpc_partition *part; + struct xpc_channel *ch; + + DBUG_ON(ch_number < 0 || ch_number >= XPC_MAX_NCHANNELS); + + /* initiate the channel disconnect for every active partition */ + for (partid = 0; partid < xp_max_npartitions; partid++) { + part = &xpc_partitions[partid]; + + if (xpc_part_ref(part)) { + ch = &part->channels[ch_number]; + xpc_msgqueue_ref(ch); + + spin_lock_irqsave(&ch->lock, irq_flags); + + if (!(ch->flags & XPC_C_DISCONNECTED)) { + ch->flags |= XPC_C_WDISCONNECT; + + XPC_DISCONNECT_CHANNEL(ch, xpUnregistering, + &irq_flags); + } + + spin_unlock_irqrestore(&ch->lock, irq_flags); + + xpc_msgqueue_deref(ch); + xpc_part_deref(part); + } + } + + xpc_disconnect_wait(ch_number); +} + +/* + * To disconnect a channel, and reflect it back to all who may be waiting. + * + * An OPEN is not allowed until XPC_C_DISCONNECTING is cleared by + * xpc_process_disconnect(), and if set, XPC_C_WDISCONNECT is cleared by + * xpc_disconnect_wait(). + * + * THE CHANNEL IS TO BE LOCKED BY THE CALLER AND WILL REMAIN LOCKED UPON RETURN. + */ +void +xpc_disconnect_channel(const int line, struct xpc_channel *ch, + enum xp_retval reason, unsigned long *irq_flags) +{ + u32 channel_was_connected = (ch->flags & XPC_C_CONNECTED); + + DBUG_ON(!spin_is_locked(&ch->lock)); + + if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_DISCONNECTED)) + return; + + DBUG_ON(!(ch->flags & (XPC_C_CONNECTING | XPC_C_CONNECTED))); + + dev_dbg(xpc_chan, "reason=%d, line=%d, partid=%d, channel=%d\n", + reason, line, ch->partid, ch->number); + + XPC_SET_REASON(ch, reason, line); + + ch->flags |= (XPC_C_CLOSEREQUEST | XPC_C_DISCONNECTING); + /* some of these may not have been set */ + ch->flags &= ~(XPC_C_OPENREQUEST | XPC_C_OPENREPLY | + XPC_C_ROPENREQUEST | XPC_C_ROPENREPLY | + XPC_C_CONNECTING | XPC_C_CONNECTED); + + xpc_send_chctl_closerequest(ch, irq_flags); + + if (channel_was_connected) + ch->flags |= XPC_C_WASCONNECTED; + + spin_unlock_irqrestore(&ch->lock, *irq_flags); + + /* wake all idle kthreads so they can exit */ + if (atomic_read(&ch->kthreads_idle) > 0) { + wake_up_all(&ch->idle_wq); + + } else if ((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) && + !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) { + /* start a kthread that will do the xpDisconnecting callout */ + xpc_create_kthreads(ch, 1, 1); + } + + /* wake those waiting to allocate an entry from the local msg queue */ + if (atomic_read(&ch->n_on_msg_allocate_wq) > 0) + wake_up(&ch->msg_allocate_wq); + + spin_lock_irqsave(&ch->lock, *irq_flags); +} + +void +xpc_disconnect_callout(struct xpc_channel *ch, enum xp_retval reason) +{ + /* + * Let the channel's registerer know that the channel is being + * disconnected. We don't want to do this if the registerer was never + * informed of a connection being made. + */ + + if (ch->func != NULL) { + dev_dbg(xpc_chan, "ch->func() called, reason=%d, partid=%d, " + "channel=%d\n", reason, ch->partid, ch->number); + + ch->func(reason, ch->partid, ch->number, NULL, ch->key); + + dev_dbg(xpc_chan, "ch->func() returned, reason=%d, partid=%d, " + "channel=%d\n", reason, ch->partid, ch->number); + } +} + +/* + * Wait for a message entry to become available for the specified channel, + * but don't wait any longer than 1 jiffy. + */ +enum xp_retval +xpc_allocate_msg_wait(struct xpc_channel *ch) +{ + enum xp_retval ret; + + if (ch->flags & XPC_C_DISCONNECTING) { + DBUG_ON(ch->reason == xpInterrupted); + return ch->reason; + } + + atomic_inc(&ch->n_on_msg_allocate_wq); + ret = interruptible_sleep_on_timeout(&ch->msg_allocate_wq, 1); + atomic_dec(&ch->n_on_msg_allocate_wq); + + if (ch->flags & XPC_C_DISCONNECTING) { + ret = ch->reason; + DBUG_ON(ch->reason == xpInterrupted); + } else if (ret == 0) { + ret = xpTimeout; + } else { + ret = xpInterrupted; + } + + return ret; +} + +/* + * Send a message that contains the user's payload on the specified channel + * connected to the specified partition. + * + * NOTE that this routine can sleep waiting for a message entry to become + * available. To not sleep, pass in the XPC_NOWAIT flag. + * + * Once sent, this routine will not wait for the message to be received, nor + * will notification be given when it does happen. + * + * Arguments: + * + * partid - ID of partition to which the channel is connected. + * ch_number - channel # to send message on. + * flags - see xp.h for valid flags. + * payload - pointer to the payload which is to be sent. + * payload_size - size of the payload in bytes. + */ +enum xp_retval +xpc_initiate_send(short partid, int ch_number, u32 flags, void *payload, + u16 payload_size) +{ + struct xpc_partition *part = &xpc_partitions[partid]; + enum xp_retval ret = xpUnknownReason; + + dev_dbg(xpc_chan, "payload=0x%p, partid=%d, channel=%d\n", payload, + partid, ch_number); + + DBUG_ON(partid < 0 || partid >= xp_max_npartitions); + DBUG_ON(ch_number < 0 || ch_number >= part->nchannels); + DBUG_ON(payload == NULL); + + if (xpc_part_ref(part)) { + ret = xpc_send_payload(&part->channels[ch_number], flags, + payload, payload_size, 0, NULL, NULL); + xpc_part_deref(part); + } + + return ret; +} + +/* + * Send a message that contains the user's payload on the specified channel + * connected to the specified partition. + * + * NOTE that this routine can sleep waiting for a message entry to become + * available. To not sleep, pass in the XPC_NOWAIT flag. + * + * This routine will not wait for the message to be sent or received. + * + * Once the remote end of the channel has received the message, the function + * passed as an argument to xpc_initiate_send_notify() will be called. This + * allows the sender to free up or re-use any buffers referenced by the + * message, but does NOT mean the message has been processed at the remote + * end by a receiver. + * + * If this routine returns an error, the caller's function will NOT be called. + * + * Arguments: + * + * partid - ID of partition to which the channel is connected. + * ch_number - channel # to send message on. + * flags - see xp.h for valid flags. + * payload - pointer to the payload which is to be sent. + * payload_size - size of the payload in bytes. + * func - function to call with asynchronous notification of message + * receipt. THIS FUNCTION MUST BE NON-BLOCKING. + * key - user-defined key to be passed to the function when it's called. + */ +enum xp_retval +xpc_initiate_send_notify(short partid, int ch_number, u32 flags, void *payload, + u16 payload_size, xpc_notify_func func, void *key) +{ + struct xpc_partition *part = &xpc_partitions[partid]; + enum xp_retval ret = xpUnknownReason; + + dev_dbg(xpc_chan, "payload=0x%p, partid=%d, channel=%d\n", payload, + partid, ch_number); + + DBUG_ON(partid < 0 || partid >= xp_max_npartitions); + DBUG_ON(ch_number < 0 || ch_number >= part->nchannels); + DBUG_ON(payload == NULL); + DBUG_ON(func == NULL); + + if (xpc_part_ref(part)) { + ret = xpc_send_payload(&part->channels[ch_number], flags, + payload, payload_size, XPC_N_CALL, func, + key); + xpc_part_deref(part); + } + return ret; +} + +/* + * Deliver a message's payload to its intended recipient. + */ +void +xpc_deliver_payload(struct xpc_channel *ch) +{ + void *payload; + + payload = xpc_get_deliverable_payload(ch); + if (payload != NULL) { + + /* + * This ref is taken to protect the payload itself from being + * freed before the user is finished with it, which the user + * indicates by calling xpc_initiate_received(). + */ + xpc_msgqueue_ref(ch); + + atomic_inc(&ch->kthreads_active); + + if (ch->func != NULL) { + dev_dbg(xpc_chan, "ch->func() called, payload=0x%p " + "partid=%d channel=%d\n", payload, ch->partid, + ch->number); + + /* deliver the message to its intended recipient */ + ch->func(xpMsgReceived, ch->partid, ch->number, payload, + ch->key); + + dev_dbg(xpc_chan, "ch->func() returned, payload=0x%p " + "partid=%d channel=%d\n", payload, ch->partid, + ch->number); + } + + atomic_dec(&ch->kthreads_active); + } +} + +/* + * Acknowledge receipt of a delivered message's payload. + * + * This function, although called by users, does not call xpc_part_ref() to + * ensure that the partition infrastructure is in place. It relies on the + * fact that we called xpc_msgqueue_ref() in xpc_deliver_payload(). + * + * Arguments: + * + * partid - ID of partition to which the channel is connected. + * ch_number - channel # message received on. + * payload - pointer to the payload area allocated via + * xpc_initiate_send() or xpc_initiate_send_notify(). + */ +void +xpc_initiate_received(short partid, int ch_number, void *payload) +{ + struct xpc_partition *part = &xpc_partitions[partid]; + struct xpc_channel *ch; + + DBUG_ON(partid < 0 || partid >= xp_max_npartitions); + DBUG_ON(ch_number < 0 || ch_number >= part->nchannels); + + ch = &part->channels[ch_number]; + xpc_received_payload(ch, payload); + + /* the call to xpc_msgqueue_ref() was done by xpc_deliver_payload() */ + xpc_msgqueue_deref(ch); +} diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c new file mode 100644 index 0000000..e8d5cfb --- /dev/null +++ b/drivers/misc/sgi-xp/xpc_main.c @@ -0,0 +1,1399 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (c) 2004-2008 Silicon Graphics, Inc. All Rights Reserved. + */ + +/* + * Cross Partition Communication (XPC) support - standard version. + * + * XPC provides a message passing capability that crosses partition + * boundaries. This module is made up of two parts: + * + * partition This part detects the presence/absence of other + * partitions. It provides a heartbeat and monitors + * the heartbeats of other partitions. + * + * channel This part manages the channels and sends/receives + * messages across them to/from other partitions. + * + * There are a couple of additional functions residing in XP, which + * provide an interface to XPC for its users. + * + * + * Caveats: + * + * . Currently on sn2, we have no way to determine which nasid an IRQ + * came from. Thus, xpc_send_IRQ_sn2() does a remote amo write + * followed by an IPI. The amo indicates where data is to be pulled + * from, so after the IPI arrives, the remote partition checks the amo + * word. The IPI can actually arrive before the amo however, so other + * code must periodically check for this case. Also, remote amo + * operations do not reliably time out. Thus we do a remote PIO read + * solely to know whether the remote partition is down and whether we + * should stop sending IPIs to it. This remote PIO read operation is + * set up in a special nofault region so SAL knows to ignore (and + * cleanup) any errors due to the remote amo write, PIO read, and/or + * PIO write operations. + * + * If/when new hardware solves this IPI problem, we should abandon + * the current approach. + * + */ + +#include <linux/module.h> +#include <linux/sysctl.h> +#include <linux/device.h> +#include <linux/delay.h> +#include <linux/reboot.h> +#include <linux/kdebug.h> +#include <linux/kthread.h> +#include "xpc.h" + +/* define two XPC debug device structures to be used with dev_dbg() et al */ + +struct device_driver xpc_dbg_name = { + .name = "xpc" +}; + +struct device xpc_part_dbg_subname = { + .bus_id = {0}, /* set to "part" at xpc_init() time */ + .driver = &xpc_dbg_name +}; + +struct device xpc_chan_dbg_subname = { + .bus_id = {0}, /* set to "chan" at xpc_init() time */ + .driver = &xpc_dbg_name +}; + +struct device *xpc_part = &xpc_part_dbg_subname; +struct device *xpc_chan = &xpc_chan_dbg_subname; + +static int xpc_kdebug_ignore; + +/* systune related variables for /proc/sys directories */ + +static int xpc_hb_interval = XPC_HB_DEFAULT_INTERVAL; +static int xpc_hb_min_interval = 1; +static int xpc_hb_max_interval = 10; + +static int xpc_hb_check_interval = XPC_HB_CHECK_DEFAULT_INTERVAL; +static int xpc_hb_check_min_interval = 10; +static int xpc_hb_check_max_interval = 120; + +int xpc_disengage_timelimit = XPC_DISENGAGE_DEFAULT_TIMELIMIT; +static int xpc_disengage_min_timelimit; /* = 0 */ +static int xpc_disengage_max_timelimit = 120; + +static ctl_table xpc_sys_xpc_hb_dir[] = { + { + .ctl_name = CTL_UNNUMBERED, + .procname = "hb_interval", + .data = &xpc_hb_interval, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = &proc_dointvec_minmax, + .strategy = &sysctl_intvec, + .extra1 = &xpc_hb_min_interval, + .extra2 = &xpc_hb_max_interval}, + { + .ctl_name = CTL_UNNUMBERED, + .procname = "hb_check_interval", + .data = &xpc_hb_check_interval, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = &proc_dointvec_minmax, + .strategy = &sysctl_intvec, + .extra1 = &xpc_hb_check_min_interval, + .extra2 = &xpc_hb_check_max_interval}, + {} +}; +static ctl_table xpc_sys_xpc_dir[] = { + { + .ctl_name = CTL_UNNUMBERED, + .procname = "hb", + .mode = 0555, + .child = xpc_sys_xpc_hb_dir}, + { + .ctl_name = CTL_UNNUMBERED, + .procname = "disengage_timelimit", + .data = &xpc_disengage_timelimit, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = &proc_dointvec_minmax, + .strategy = &sysctl_intvec, + .extra1 = &xpc_disengage_min_timelimit, + .extra2 = &xpc_disengage_max_timelimit}, + {} +}; +static ctl_table xpc_sys_dir[] = { + { + .ctl_name = CTL_UNNUMBERED, + .procname = "xpc", + .mode = 0555, + .child = xpc_sys_xpc_dir}, + {} +}; +static struct ctl_table_header *xpc_sysctl; + +/* non-zero if any remote partition disengage was timed out */ +int xpc_disengage_timedout; + +/* #of activate IRQs received and not yet processed */ +int xpc_activate_IRQ_rcvd; +DEFINE_SPINLOCK(xpc_activate_IRQ_rcvd_lock); + +/* IRQ handler notifies this wait queue on receipt of an IRQ */ +DECLARE_WAIT_QUEUE_HEAD(xpc_activate_IRQ_wq); + +static unsigned long xpc_hb_check_timeout; +static struct timer_list xpc_hb_timer; +void *xpc_heartbeating_to_mask; + +/* notification that the xpc_hb_checker thread has exited */ +static DECLARE_COMPLETION(xpc_hb_checker_exited); + +/* notification that the xpc_discovery thread has exited */ +static DECLARE_COMPLETION(xpc_discovery_exited); + +static void xpc_kthread_waitmsgs(struct xpc_partition *, struct xpc_channel *); + +static int xpc_system_reboot(struct notifier_block *, unsigned long, void *); +static struct notifier_block xpc_reboot_notifier = { + .notifier_call = xpc_system_reboot, +}; + +static int xpc_system_die(struct notifier_block *, unsigned long, void *); +static struct notifier_block xpc_die_notifier = { + .notifier_call = xpc_system_die, +}; + +int (*xpc_setup_partitions_sn) (void); +enum xp_retval (*xpc_get_partition_rsvd_page_pa) (void *buf, u64 *cookie, + unsigned long *rp_pa, + size_t *len); +int (*xpc_setup_rsvd_page_sn) (struct xpc_rsvd_page *rp); +void (*xpc_heartbeat_init) (void); +void (*xpc_heartbeat_exit) (void); +void (*xpc_increment_heartbeat) (void); +void (*xpc_offline_heartbeat) (void); +void (*xpc_online_heartbeat) (void); +enum xp_retval (*xpc_get_remote_heartbeat) (struct xpc_partition *part); + +enum xp_retval (*xpc_make_first_contact) (struct xpc_partition *part); +void (*xpc_notify_senders_of_disconnect) (struct xpc_channel *ch); +u64 (*xpc_get_chctl_all_flags) (struct xpc_partition *part); +enum xp_retval (*xpc_setup_msg_structures) (struct xpc_channel *ch); +void (*xpc_teardown_msg_structures) (struct xpc_channel *ch); +void (*xpc_process_msg_chctl_flags) (struct xpc_partition *part, int ch_number); +int (*xpc_n_of_deliverable_payloads) (struct xpc_channel *ch); +void *(*xpc_get_deliverable_payload) (struct xpc_channel *ch); + +void (*xpc_request_partition_activation) (struct xpc_rsvd_page *remote_rp, + unsigned long remote_rp_pa, + int nasid); +void (*xpc_request_partition_reactivation) (struct xpc_partition *part); +void (*xpc_request_partition_deactivation) (struct xpc_partition *part); +void (*xpc_cancel_partition_deactivation_request) (struct xpc_partition *part); + +void (*xpc_process_activate_IRQ_rcvd) (void); +enum xp_retval (*xpc_setup_ch_structures_sn) (struct xpc_partition *part); +void (*xpc_teardown_ch_structures_sn) (struct xpc_partition *part); + +void (*xpc_indicate_partition_engaged) (struct xpc_partition *part); +int (*xpc_partition_engaged) (short partid); +int (*xpc_any_partition_engaged) (void); +void (*xpc_indicate_partition_disengaged) (struct xpc_partition *part); +void (*xpc_assume_partition_disengaged) (short partid); + +void (*xpc_send_chctl_closerequest) (struct xpc_channel *ch, + unsigned long *irq_flags); +void (*xpc_send_chctl_closereply) (struct xpc_channel *ch, + unsigned long *irq_flags); +void (*xpc_send_chctl_openrequest) (struct xpc_channel *ch, + unsigned long *irq_flags); +void (*xpc_send_chctl_openreply) (struct xpc_channel *ch, + unsigned long *irq_flags); + +void (*xpc_save_remote_msgqueue_pa) (struct xpc_channel *ch, + unsigned long msgqueue_pa); + +enum xp_retval (*xpc_send_payload) (struct xpc_channel *ch, u32 flags, + void *payload, u16 payload_size, + u8 notify_type, xpc_notify_func func, + void *key); +void (*xpc_received_payload) (struct xpc_channel *ch, void *payload); + +/* + * Timer function to enforce the timelimit on the partition disengage. + */ +static void +xpc_timeout_partition_disengage(unsigned long data) +{ + struct xpc_partition *part = (struct xpc_partition *)data; + + DBUG_ON(time_is_after_jiffies(part->disengage_timeout)); + + (void)xpc_partition_disengaged(part); + + DBUG_ON(part->disengage_timeout != 0); + DBUG_ON(xpc_partition_engaged(XPC_PARTID(part))); +} + +/* + * Timer to produce the heartbeat. The timer structures function is + * already set when this is initially called. A tunable is used to + * specify when the next timeout should occur. + */ +static void +xpc_hb_beater(unsigned long dummy) +{ + xpc_increment_heartbeat(); + + if (time_is_before_eq_jiffies(xpc_hb_check_timeout)) + wake_up_interruptible(&xpc_activate_IRQ_wq); + + xpc_hb_timer.expires = jiffies + (xpc_hb_interval * HZ); + add_timer(&xpc_hb_timer); +} + +static void +xpc_start_hb_beater(void) +{ + xpc_heartbeat_init(); + init_timer(&xpc_hb_timer); + xpc_hb_timer.function = xpc_hb_beater; + xpc_hb_beater(0); +} + +static void +xpc_stop_hb_beater(void) +{ + del_timer_sync(&xpc_hb_timer); + xpc_heartbeat_exit(); +} + +/* + * At periodic intervals, scan through all active partitions and ensure + * their heartbeat is still active. If not, the partition is deactivated. + */ +static void +xpc_check_remote_hb(void) +{ + struct xpc_partition *part; + short partid; + enum xp_retval ret; + + for (partid = 0; partid < xp_max_npartitions; partid++) { + + if (xpc_exiting) + break; + + if (partid == xp_partition_id) + continue; + + part = &xpc_partitions[partid]; + + if (part->act_state == XPC_P_AS_INACTIVE || + part->act_state == XPC_P_AS_DEACTIVATING) { + continue; + } + + ret = xpc_get_remote_heartbeat(part); + if (ret != xpSuccess) + XPC_DEACTIVATE_PARTITION(part, ret); + } +} + +/* + * This thread is responsible for nearly all of the partition + * activation/deactivation. + */ +static int +xpc_hb_checker(void *ignore) +{ + int force_IRQ = 0; + + /* this thread was marked active by xpc_hb_init() */ + + set_cpus_allowed_ptr(current, &cpumask_of_cpu(XPC_HB_CHECK_CPU)); + + /* set our heartbeating to other partitions into motion */ + xpc_hb_check_timeout = jiffies + (xpc_hb_check_interval * HZ); + xpc_start_hb_beater(); + + while (!xpc_exiting) { + + dev_dbg(xpc_part, "woke up with %d ticks rem; %d IRQs have " + "been received\n", + (int)(xpc_hb_check_timeout - jiffies), + xpc_activate_IRQ_rcvd); + + /* checking of remote heartbeats is skewed by IRQ handling */ + if (time_is_before_eq_jiffies(xpc_hb_check_timeout)) { + xpc_hb_check_timeout = jiffies + + (xpc_hb_check_interval * HZ); + + dev_dbg(xpc_part, "checking remote heartbeats\n"); + xpc_check_remote_hb(); + + /* + * On sn2 we need to periodically recheck to ensure no + * IRQ/amo pairs have been missed. + */ + if (is_shub()) + force_IRQ = 1; + } + + /* check for outstanding IRQs */ + if (xpc_activate_IRQ_rcvd > 0 || force_IRQ != 0) { + force_IRQ = 0; + dev_dbg(xpc_part, "processing activate IRQs " + "received\n"); + xpc_process_activate_IRQ_rcvd(); + } + + /* wait for IRQ or timeout */ + (void)wait_event_interruptible(xpc_activate_IRQ_wq, + (time_is_before_eq_jiffies( + xpc_hb_check_timeout) || + xpc_activate_IRQ_rcvd > 0 || + xpc_exiting)); + } + + xpc_stop_hb_beater(); + + dev_dbg(xpc_part, "heartbeat checker is exiting\n"); + + /* mark this thread as having exited */ + complete(&xpc_hb_checker_exited); + return 0; +} + +/* + * This thread will attempt to discover other partitions to activate + * based on info provided by SAL. This new thread is short lived and + * will exit once discovery is complete. + */ +static int +xpc_initiate_discovery(void *ignore) +{ + xpc_discovery(); + + dev_dbg(xpc_part, "discovery thread is exiting\n"); + + /* mark this thread as having exited */ + complete(&xpc_discovery_exited); + return 0; +} + +/* + * The first kthread assigned to a newly activated partition is the one + * created by XPC HB with which it calls xpc_activating(). XPC hangs on to + * that kthread until the partition is brought down, at which time that kthread + * returns back to XPC HB. (The return of that kthread will signify to XPC HB + * that XPC has dismantled all communication infrastructure for the associated + * partition.) This kthread becomes the channel manager for that partition. + * + * Each active partition has a channel manager, who, besides connecting and + * disconnecting channels, will ensure that each of the partition's connected + * channels has the required number of assigned kthreads to get the work done. + */ +static void +xpc_channel_mgr(struct xpc_partition *part) +{ + while (part->act_state != XPC_P_AS_DEACTIVATING || + atomic_read(&part->nchannels_active) > 0 || + !xpc_partition_disengaged(part)) { + + xpc_process_sent_chctl_flags(part); + + /* + * Wait until we've been requested to activate kthreads or + * all of the channel's message queues have been torn down or + * a signal is pending. + * + * The channel_mgr_requests is set to 1 after being awakened, + * This is done to prevent the channel mgr from making one pass + * through the loop for each request, since he will + * be servicing all the requests in one pass. The reason it's + * set to 1 instead of 0 is so that other kthreads will know + * that the channel mgr is running and won't bother trying to + * wake him up. + */ + atomic_dec(&part->channel_mgr_requests); + (void)wait_event_interruptible(part->channel_mgr_wq, + (atomic_read(&part->channel_mgr_requests) > 0 || + part->chctl.all_flags != 0 || + (part->act_state == XPC_P_AS_DEACTIVATING && + atomic_read(&part->nchannels_active) == 0 && + xpc_partition_disengaged(part)))); + atomic_set(&part->channel_mgr_requests, 1); + } +} + +/* + * Guarantee that the kzalloc'd memory is cacheline aligned. + */ +void * +xpc_kzalloc_cacheline_aligned(size_t size, gfp_t flags, void **base) +{ + /* see if kzalloc will give us cachline aligned memory by default */ + *base = kzalloc(size, flags); + if (*base == NULL) + return NULL; + + if ((u64)*base == L1_CACHE_ALIGN((u64)*base)) + return *base; + + kfree(*base); + + /* nope, we'll have to do it ourselves */ + *base = kzalloc(size + L1_CACHE_BYTES, flags); + if (*base == NULL) + return NULL; + + return (void *)L1_CACHE_ALIGN((u64)*base); +} + +/* + * Setup the channel structures necessary to support XPartition Communication + * between the specified remote partition and the local one. + */ +static enum xp_retval +xpc_setup_ch_structures(struct xpc_partition *part) +{ + enum xp_retval ret; + int ch_number; + struct xpc_channel *ch; + short partid = XPC_PARTID(part); + + /* + * Allocate all of the channel structures as a contiguous chunk of + * memory. + */ + DBUG_ON(part->channels != NULL); + part->channels = kzalloc(sizeof(struct xpc_channel) * XPC_MAX_NCHANNELS, + GFP_KERNEL); + if (part->channels == NULL) { + dev_err(xpc_chan, "can't get memory for channels\n"); + return xpNoMemory; + } + + /* allocate the remote open and close args */ + + part->remote_openclose_args = + xpc_kzalloc_cacheline_aligned(XPC_OPENCLOSE_ARGS_SIZE, + GFP_KERNEL, &part-> + remote_openclose_args_base); + if (part->remote_openclose_args == NULL) { + dev_err(xpc_chan, "can't get memory for remote connect args\n"); + ret = xpNoMemory; + goto out_1; + } + + part->chctl.all_flags = 0; + spin_lock_init(&part->chctl_lock); + + atomic_set(&part->channel_mgr_requests, 1); + init_waitqueue_head(&part->channel_mgr_wq); + + part->nchannels = XPC_MAX_NCHANNELS; + + atomic_set(&part->nchannels_active, 0); + atomic_set(&part->nchannels_engaged, 0); + + for (ch_number = 0; ch_number < part->nchannels; ch_number++) { + ch = &part->channels[ch_number]; + + ch->partid = partid; + ch->number = ch_number; + ch->flags = XPC_C_DISCONNECTED; + + atomic_set(&ch->kthreads_assigned, 0); + atomic_set(&ch->kthreads_idle, 0); + atomic_set(&ch->kthreads_active, 0); + + atomic_set(&ch->references, 0); + atomic_set(&ch->n_to_notify, 0); + + spin_lock_init(&ch->lock); + init_completion(&ch->wdisconnect_wait); + + atomic_set(&ch->n_on_msg_allocate_wq, 0); + init_waitqueue_head(&ch->msg_allocate_wq); + init_waitqueue_head(&ch->idle_wq); + } + + ret = xpc_setup_ch_structures_sn(part); + if (ret != xpSuccess) + goto out_2; + + /* + * With the setting of the partition setup_state to XPC_P_SS_SETUP, + * we're declaring that this partition is ready to go. + */ + part->setup_state = XPC_P_SS_SETUP; + + return xpSuccess; + + /* setup of ch structures failed */ +out_2: + kfree(part->remote_openclose_args_base); + part->remote_openclose_args = NULL; +out_1: + kfree(part->channels); + part->channels = NULL; + return ret; +} + +/* + * Teardown the channel structures necessary to support XPartition Communication + * between the specified remote partition and the local one. + */ +static void +xpc_teardown_ch_structures(struct xpc_partition *part) +{ + DBUG_ON(atomic_read(&part->nchannels_engaged) != 0); + DBUG_ON(atomic_read(&part->nchannels_active) != 0); + + /* + * Make this partition inaccessible to local processes by marking it + * as no longer setup. Then wait before proceeding with the teardown + * until all existing references cease. + */ + DBUG_ON(part->setup_state != XPC_P_SS_SETUP); + part->setup_state = XPC_P_SS_WTEARDOWN; + + wait_event(part->teardown_wq, (atomic_read(&part->references) == 0)); + + /* now we can begin tearing down the infrastructure */ + + xpc_teardown_ch_structures_sn(part); + + kfree(part->remote_openclose_args_base); + part->remote_openclose_args = NULL; + kfree(part->channels); + part->channels = NULL; + + part->setup_state = XPC_P_SS_TORNDOWN; +} + +/* + * When XPC HB determines that a partition has come up, it will create a new + * kthread and that kthread will call this function to attempt to set up the + * basic infrastructure used for Cross Partition Communication with the newly + * upped partition. + * + * The kthread that was created by XPC HB and which setup the XPC + * infrastructure will remain assigned to the partition becoming the channel + * manager for that partition until the partition is deactivating, at which + * time the kthread will teardown the XPC infrastructure and then exit. + */ +static int +xpc_activating(void *__partid) +{ + short partid = (u64)__partid; + struct xpc_partition *part = &xpc_partitions[partid]; + unsigned long irq_flags; + + DBUG_ON(partid < 0 || partid >= xp_max_npartitions); + + spin_lock_irqsave(&part->act_lock, irq_flags); + + if (part->act_state == XPC_P_AS_DEACTIVATING) { + part->act_state = XPC_P_AS_INACTIVE; + spin_unlock_irqrestore(&part->act_lock, irq_flags); + part->remote_rp_pa = 0; + return 0; + } + + /* indicate the thread is activating */ + DBUG_ON(part->act_state != XPC_P_AS_ACTIVATION_REQ); + part->act_state = XPC_P_AS_ACTIVATING; + + XPC_SET_REASON(part, 0, 0); + spin_unlock_irqrestore(&part->act_lock, irq_flags); + + dev_dbg(xpc_part, "activating partition %d\n", partid); + + xpc_allow_hb(partid); + + if (xpc_setup_ch_structures(part) == xpSuccess) { + (void)xpc_part_ref(part); /* this will always succeed */ + + if (xpc_make_first_contact(part) == xpSuccess) { + xpc_mark_partition_active(part); + xpc_channel_mgr(part); + /* won't return until partition is deactivating */ + } + + xpc_part_deref(part); + xpc_teardown_ch_structures(part); + } + + xpc_disallow_hb(partid); + xpc_mark_partition_inactive(part); + + if (part->reason == xpReactivating) { + /* interrupting ourselves results in activating partition */ + xpc_request_partition_reactivation(part); + } + + return 0; +} + +void +xpc_activate_partition(struct xpc_partition *part) +{ + short partid = XPC_PARTID(part); + unsigned long irq_flags; + struct task_struct *kthread; + + spin_lock_irqsave(&part->act_lock, irq_flags); + + DBUG_ON(part->act_state != XPC_P_AS_INACTIVE); + + part->act_state = XPC_P_AS_ACTIVATION_REQ; + XPC_SET_REASON(part, xpCloneKThread, __LINE__); + + spin_unlock_irqrestore(&part->act_lock, irq_flags); + + kthread = kthread_run(xpc_activating, (void *)((u64)partid), "xpc%02d", + partid); + if (IS_ERR(kthread)) { + spin_lock_irqsave(&part->act_lock, irq_flags); + part->act_state = XPC_P_AS_INACTIVE; + XPC_SET_REASON(part, xpCloneKThreadFailed, __LINE__); + spin_unlock_irqrestore(&part->act_lock, irq_flags); + } +} + +void +xpc_activate_kthreads(struct xpc_channel *ch, int needed) +{ + int idle = atomic_read(&ch->kthreads_idle); + int assigned = atomic_read(&ch->kthreads_assigned); + int wakeup; + + DBUG_ON(needed <= 0); + + if (idle > 0) { + wakeup = (needed > idle) ? idle : needed; + needed -= wakeup; + + dev_dbg(xpc_chan, "wakeup %d idle kthreads, partid=%d, " + "channel=%d\n", wakeup, ch->partid, ch->number); + + /* only wakeup the requested number of kthreads */ + wake_up_nr(&ch->idle_wq, wakeup); + } + + if (needed <= 0) + return; + + if (needed + assigned > ch->kthreads_assigned_limit) { + needed = ch->kthreads_assigned_limit - assigned; + if (needed <= 0) + return; + } + + dev_dbg(xpc_chan, "create %d new kthreads, partid=%d, channel=%d\n", + needed, ch->partid, ch->number); + + xpc_create_kthreads(ch, needed, 0); +} + +/* + * This function is where XPC's kthreads wait for messages to deliver. + */ +static void +xpc_kthread_waitmsgs(struct xpc_partition *part, struct xpc_channel *ch) +{ + do { + /* deliver messages to their intended recipients */ + + while (xpc_n_of_deliverable_payloads(ch) > 0 && + !(ch->flags & XPC_C_DISCONNECTING)) { + xpc_deliver_payload(ch); + } + + if (atomic_inc_return(&ch->kthreads_idle) > + ch->kthreads_idle_limit) { + /* too many idle kthreads on this channel */ + atomic_dec(&ch->kthreads_idle); + break; + } + + dev_dbg(xpc_chan, "idle kthread calling " + "wait_event_interruptible_exclusive()\n"); + + (void)wait_event_interruptible_exclusive(ch->idle_wq, + (xpc_n_of_deliverable_payloads(ch) > 0 || + (ch->flags & XPC_C_DISCONNECTING))); + + atomic_dec(&ch->kthreads_idle); + + } while (!(ch->flags & XPC_C_DISCONNECTING)); +} + +static int +xpc_kthread_start(void *args) +{ + short partid = XPC_UNPACK_ARG1(args); + u16 ch_number = XPC_UNPACK_ARG2(args); + struct xpc_partition *part = &xpc_partitions[partid]; + struct xpc_channel *ch; + int n_needed; + unsigned long irq_flags; + + dev_dbg(xpc_chan, "kthread starting, partid=%d, channel=%d\n", + partid, ch_number); + + ch = &part->channels[ch_number]; + + if (!(ch->flags & XPC_C_DISCONNECTING)) { + + /* let registerer know that connection has been established */ + + spin_lock_irqsave(&ch->lock, irq_flags); + if (!(ch->flags & XPC_C_CONNECTEDCALLOUT)) { + ch->flags |= XPC_C_CONNECTEDCALLOUT; + spin_unlock_irqrestore(&ch->lock, irq_flags); + + xpc_connected_callout(ch); + + spin_lock_irqsave(&ch->lock, irq_flags); + ch->flags |= XPC_C_CONNECTEDCALLOUT_MADE; + spin_unlock_irqrestore(&ch->lock, irq_flags); + + /* + * It is possible that while the callout was being + * made that the remote partition sent some messages. + * If that is the case, we may need to activate + * additional kthreads to help deliver them. We only + * need one less than total #of messages to deliver. + */ + n_needed = xpc_n_of_deliverable_payloads(ch) - 1; + if (n_needed > 0 && !(ch->flags & XPC_C_DISCONNECTING)) + xpc_activate_kthreads(ch, n_needed); + + } else { + spin_unlock_irqrestore(&ch->lock, irq_flags); + } + + xpc_kthread_waitmsgs(part, ch); + } + + /* let registerer know that connection is disconnecting */ + + spin_lock_irqsave(&ch->lock, irq_flags); + if ((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) && + !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) { + ch->flags |= XPC_C_DISCONNECTINGCALLOUT; + spin_unlock_irqrestore(&ch->lock, irq_flags); + + xpc_disconnect_callout(ch, xpDisconnecting); + + spin_lock_irqsave(&ch->lock, irq_flags); + ch->flags |= XPC_C_DISCONNECTINGCALLOUT_MADE; + } + spin_unlock_irqrestore(&ch->lock, irq_flags); + + if (atomic_dec_return(&ch->kthreads_assigned) == 0 && + atomic_dec_return(&part->nchannels_engaged) == 0) { + xpc_indicate_partition_disengaged(part); + } + + xpc_msgqueue_deref(ch); + + dev_dbg(xpc_chan, "kthread exiting, partid=%d, channel=%d\n", + partid, ch_number); + + xpc_part_deref(part); + return 0; +} + +/* + * For each partition that XPC has established communications with, there is + * a minimum of one kernel thread assigned to perform any operation that + * may potentially sleep or block (basically the callouts to the asynchronous + * functions registered via xpc_connect()). + * + * Additional kthreads are created and destroyed by XPC as the workload + * demands. + * + * A kthread is assigned to one of the active channels that exists for a given + * partition. + */ +void +xpc_create_kthreads(struct xpc_channel *ch, int needed, + int ignore_disconnecting) +{ + unsigned long irq_flags; + u64 args = XPC_PACK_ARGS(ch->partid, ch->number); + struct xpc_partition *part = &xpc_partitions[ch->partid]; + struct task_struct *kthread; + + while (needed-- > 0) { + + /* + * The following is done on behalf of the newly created + * kthread. That kthread is responsible for doing the + * counterpart to the following before it exits. + */ + if (ignore_disconnecting) { + if (!atomic_inc_not_zero(&ch->kthreads_assigned)) { + /* kthreads assigned had gone to zero */ + BUG_ON(!(ch->flags & + XPC_C_DISCONNECTINGCALLOUT_MADE)); + break; + } + + } else if (ch->flags & XPC_C_DISCONNECTING) { + break; + + } else if (atomic_inc_return(&ch->kthreads_assigned) == 1 && + atomic_inc_return(&part->nchannels_engaged) == 1) { + xpc_indicate_partition_engaged(part); + } + (void)xpc_part_ref(part); + xpc_msgqueue_ref(ch); + + kthread = kthread_run(xpc_kthread_start, (void *)args, + "xpc%02dc%d", ch->partid, ch->number); + if (IS_ERR(kthread)) { + /* the fork failed */ + + /* + * NOTE: if (ignore_disconnecting && + * !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) is true, + * then we'll deadlock if all other kthreads assigned + * to this channel are blocked in the channel's + * registerer, because the only thing that will unblock + * them is the xpDisconnecting callout that this + * failed kthread_run() would have made. + */ + + if (atomic_dec_return(&ch->kthreads_assigned) == 0 && + atomic_dec_return(&part->nchannels_engaged) == 0) { + xpc_indicate_partition_disengaged(part); + } + xpc_msgqueue_deref(ch); + xpc_part_deref(part); + + if (atomic_read(&ch->kthreads_assigned) < + ch->kthreads_idle_limit) { + /* + * Flag this as an error only if we have an + * insufficient #of kthreads for the channel + * to function. + */ + spin_lock_irqsave(&ch->lock, irq_flags); + XPC_DISCONNECT_CHANNEL(ch, xpLackOfResources, + &irq_flags); + spin_unlock_irqrestore(&ch->lock, irq_flags); + } + break; + } + } +} + +void +xpc_disconnect_wait(int ch_number) +{ + unsigned long irq_flags; + short partid; + struct xpc_partition *part; + struct xpc_channel *ch; + int wakeup_channel_mgr; + + /* now wait for all callouts to the caller's function to cease */ + for (partid = 0; partid < xp_max_npartitions; partid++) { + part = &xpc_partitions[partid]; + + if (!xpc_part_ref(part)) + continue; + + ch = &part->channels[ch_number]; + + if (!(ch->flags & XPC_C_WDISCONNECT)) { + xpc_part_deref(part); + continue; + } + + wait_for_completion(&ch->wdisconnect_wait); + + spin_lock_irqsave(&ch->lock, irq_flags); + DBUG_ON(!(ch->flags & XPC_C_DISCONNECTED)); + wakeup_channel_mgr = 0; + + if (ch->delayed_chctl_flags) { + if (part->act_state != XPC_P_AS_DEACTIVATING) { + spin_lock(&part->chctl_lock); + part->chctl.flags[ch->number] |= + ch->delayed_chctl_flags; + spin_unlock(&part->chctl_lock); + wakeup_channel_mgr = 1; + } + ch->delayed_chctl_flags = 0; + } + + ch->flags &= ~XPC_C_WDISCONNECT; + spin_unlock_irqrestore(&ch->lock, irq_flags); + + if (wakeup_channel_mgr) + xpc_wakeup_channel_mgr(part); + + xpc_part_deref(part); + } +} + +static int +xpc_setup_partitions(void) +{ + short partid; + struct xpc_partition *part; + + xpc_partitions = kzalloc(sizeof(struct xpc_partition) * + xp_max_npartitions, GFP_KERNEL); + if (xpc_partitions == NULL) { + dev_err(xpc_part, "can't get memory for partition structure\n"); + return -ENOMEM; + } + + /* + * The first few fields of each entry of xpc_partitions[] need to + * be initialized now so that calls to xpc_connect() and + * xpc_disconnect() can be made prior to the activation of any remote + * partition. NOTE THAT NONE OF THE OTHER FIELDS BELONGING TO THESE + * ENTRIES ARE MEANINGFUL UNTIL AFTER AN ENTRY'S CORRESPONDING + * PARTITION HAS BEEN ACTIVATED. + */ + for (partid = 0; partid < xp_max_npartitions; partid++) { + part = &xpc_partitions[partid]; + + DBUG_ON((u64)part != L1_CACHE_ALIGN((u64)part)); + + part->activate_IRQ_rcvd = 0; + spin_lock_init(&part->act_lock); + part->act_state = XPC_P_AS_INACTIVE; + XPC_SET_REASON(part, 0, 0); + + init_timer(&part->disengage_timer); + part->disengage_timer.function = + xpc_timeout_partition_disengage; + part->disengage_timer.data = (unsigned long)part; + + part->setup_state = XPC_P_SS_UNSET; + init_waitqueue_head(&part->teardown_wq); + atomic_set(&part->references, 0); + } + + return xpc_setup_partitions_sn(); +} + +static void +xpc_teardown_partitions(void) +{ + kfree(xpc_partitions); +} + +static void +xpc_do_exit(enum xp_retval reason) +{ + short partid; + int active_part_count, printed_waiting_msg = 0; + struct xpc_partition *part; + unsigned long printmsg_time, disengage_timeout = 0; + + /* a 'rmmod XPC' and a 'reboot' cannot both end up here together */ + DBUG_ON(xpc_exiting == 1); + + /* + * Let the heartbeat checker thread and the discovery thread + * (if one is running) know that they should exit. Also wake up + * the heartbeat checker thread in case it's sleeping. + */ + xpc_exiting = 1; + wake_up_interruptible(&xpc_activate_IRQ_wq); + + /* wait for the discovery thread to exit */ + wait_for_completion(&xpc_discovery_exited); + + /* wait for the heartbeat checker thread to exit */ + wait_for_completion(&xpc_hb_checker_exited); + + /* sleep for a 1/3 of a second or so */ + (void)msleep_interruptible(300); + + /* wait for all partitions to become inactive */ + + printmsg_time = jiffies + (XPC_DEACTIVATE_PRINTMSG_INTERVAL * HZ); + xpc_disengage_timedout = 0; + + do { + active_part_count = 0; + + for (partid = 0; partid < xp_max_npartitions; partid++) { + part = &xpc_partitions[partid]; + + if (xpc_partition_disengaged(part) && + part->act_state == XPC_P_AS_INACTIVE) { + continue; + } + + active_part_count++; + + XPC_DEACTIVATE_PARTITION(part, reason); + + if (part->disengage_timeout > disengage_timeout) + disengage_timeout = part->disengage_timeout; + } + + if (xpc_any_partition_engaged()) { + if (time_is_before_jiffies(printmsg_time)) { + dev_info(xpc_part, "waiting for remote " + "partitions to deactivate, timeout in " + "%ld seconds\n", (disengage_timeout - + jiffies) / HZ); + printmsg_time = jiffies + + (XPC_DEACTIVATE_PRINTMSG_INTERVAL * HZ); + printed_waiting_msg = 1; + } + + } else if (active_part_count > 0) { + if (printed_waiting_msg) { + dev_info(xpc_part, "waiting for local partition" + " to deactivate\n"); + printed_waiting_msg = 0; + } + + } else { + if (!xpc_disengage_timedout) { + dev_info(xpc_part, "all partitions have " + "deactivated\n"); + } + break; + } + + /* sleep for a 1/3 of a second or so */ + (void)msleep_interruptible(300); + + } while (1); + + DBUG_ON(xpc_any_partition_engaged()); + DBUG_ON(xpc_any_hbs_allowed() != 0); + + xpc_teardown_rsvd_page(); + + if (reason == xpUnloading) { + (void)unregister_die_notifier(&xpc_die_notifier); + (void)unregister_reboot_notifier(&xpc_reboot_notifier); + } + + /* clear the interface to XPC's functions */ + xpc_clear_interface(); + + if (xpc_sysctl) + unregister_sysctl_table(xpc_sysctl); + + xpc_teardown_partitions(); + + if (is_shub()) + xpc_exit_sn2(); + else if (is_uv()) + xpc_exit_uv(); +} + +/* + * This function is called when the system is being rebooted. + */ +static int +xpc_system_reboot(struct notifier_block *nb, unsigned long event, void *unused) +{ + enum xp_retval reason; + + switch (event) { + case SYS_RESTART: + reason = xpSystemReboot; + break; + case SYS_HALT: + reason = xpSystemHalt; + break; + case SYS_POWER_OFF: + reason = xpSystemPoweroff; + break; + default: + reason = xpSystemGoingDown; + } + + xpc_do_exit(reason); + return NOTIFY_DONE; +} + +/* + * Notify other partitions to deactivate from us by first disengaging from all + * references to our memory. + */ +static void +xpc_die_deactivate(void) +{ + struct xpc_partition *part; + short partid; + int any_engaged; + long keep_waiting; + long wait_to_print; + + /* keep xpc_hb_checker thread from doing anything (just in case) */ + xpc_exiting = 1; + + xpc_disallow_all_hbs(); /*indicate we're deactivated */ + + for (partid = 0; partid < xp_max_npartitions; partid++) { + part = &xpc_partitions[partid]; + + if (xpc_partition_engaged(partid) || + part->act_state != XPC_P_AS_INACTIVE) { + xpc_request_partition_deactivation(part); + xpc_indicate_partition_disengaged(part); + } + } + + /* + * Though we requested that all other partitions deactivate from us, + * we only wait until they've all disengaged or we've reached the + * defined timelimit. + * + * Given that one iteration through the following while-loop takes + * approximately 200 microseconds, calculate the #of loops to take + * before bailing and the #of loops before printing a waiting message. + */ + keep_waiting = xpc_disengage_timelimit * 1000 * 5; + wait_to_print = XPC_DEACTIVATE_PRINTMSG_INTERVAL * 1000 * 5; + + while (1) { + any_engaged = xpc_any_partition_engaged(); + if (!any_engaged) { + dev_info(xpc_part, "all partitions have deactivated\n"); + break; + } + + if (!keep_waiting--) { + for (partid = 0; partid < xp_max_npartitions; + partid++) { + if (xpc_partition_engaged(partid)) { + dev_info(xpc_part, "deactivate from " + "remote partition %d timed " + "out\n", partid); + } + } + break; + } + + if (!wait_to_print--) { + dev_info(xpc_part, "waiting for remote partitions to " + "deactivate, timeout in %ld seconds\n", + keep_waiting / (1000 * 5)); + wait_to_print = XPC_DEACTIVATE_PRINTMSG_INTERVAL * + 1000 * 5; + } + + udelay(200); + } +} + +/* + * This function is called when the system is being restarted or halted due + * to some sort of system failure. If this is the case we need to notify the + * other partitions to disengage from all references to our memory. + * This function can also be called when our heartbeater could be offlined + * for a time. In this case we need to notify other partitions to not worry + * about the lack of a heartbeat. + */ +static int +xpc_system_die(struct notifier_block *nb, unsigned long event, void *unused) +{ +#ifdef CONFIG_IA64 /* !!! temporary kludge */ + switch (event) { + case DIE_MACHINE_RESTART: + case DIE_MACHINE_HALT: + xpc_die_deactivate(); + break; + + case DIE_KDEBUG_ENTER: + /* Should lack of heartbeat be ignored by other partitions? */ + if (!xpc_kdebug_ignore) + break; + + /* fall through */ + case DIE_MCA_MONARCH_ENTER: + case DIE_INIT_MONARCH_ENTER: + xpc_offline_heartbeat(); + break; + + case DIE_KDEBUG_LEAVE: + /* Is lack of heartbeat being ignored by other partitions? */ + if (!xpc_kdebug_ignore) + break; + + /* fall through */ + case DIE_MCA_MONARCH_LEAVE: + case DIE_INIT_MONARCH_LEAVE: + xpc_online_heartbeat(); + break; + } +#else + xpc_die_deactivate(); +#endif + + return NOTIFY_DONE; +} + +int __init +xpc_init(void) +{ + int ret; + struct task_struct *kthread; + + snprintf(xpc_part->bus_id, BUS_ID_SIZE, "part"); + snprintf(xpc_chan->bus_id, BUS_ID_SIZE, "chan"); + + if (is_shub()) { + /* + * The ia64-sn2 architecture supports at most 64 partitions. + * And the inability to unregister remote amos restricts us + * further to only support exactly 64 partitions on this + * architecture, no less. + */ + if (xp_max_npartitions != 64) { + dev_err(xpc_part, "max #of partitions not set to 64\n"); + ret = -EINVAL; + } else { + ret = xpc_init_sn2(); + } + + } else if (is_uv()) { + ret = xpc_init_uv(); + + } else { + ret = -ENODEV; + } + + if (ret != 0) + return ret; + + ret = xpc_setup_partitions(); + if (ret != 0) { + dev_err(xpc_part, "can't get memory for partition structure\n"); + goto out_1; + } + + xpc_sysctl = register_sysctl_table(xpc_sys_dir); + + /* + * Fill the partition reserved page with the information needed by + * other partitions to discover we are alive and establish initial + * communications. + */ + ret = xpc_setup_rsvd_page(); + if (ret != 0) { + dev_err(xpc_part, "can't setup our reserved page\n"); + goto out_2; + } + + /* add ourselves to the reboot_notifier_list */ + ret = register_reboot_notifier(&xpc_reboot_notifier); + if (ret != 0) + dev_warn(xpc_part, "can't register reboot notifier\n"); + + /* add ourselves to the die_notifier list */ + ret = register_die_notifier(&xpc_die_notifier); + if (ret != 0) + dev_warn(xpc_part, "can't register die notifier\n"); + + /* + * The real work-horse behind xpc. This processes incoming + * interrupts and monitors remote heartbeats. + */ + kthread = kthread_run(xpc_hb_checker, NULL, XPC_HB_CHECK_THREAD_NAME); + if (IS_ERR(kthread)) { + dev_err(xpc_part, "failed while forking hb check thread\n"); + ret = -EBUSY; + goto out_3; + } + + /* + * Startup a thread that will attempt to discover other partitions to + * activate based on info provided by SAL. This new thread is short + * lived and will exit once discovery is complete. + */ + kthread = kthread_run(xpc_initiate_discovery, NULL, + XPC_DISCOVERY_THREAD_NAME); + if (IS_ERR(kthread)) { + dev_err(xpc_part, "failed while forking discovery thread\n"); + + /* mark this new thread as a non-starter */ + complete(&xpc_discovery_exited); + + xpc_do_exit(xpUnloading); + return -EBUSY; + } + + /* set the interface to point at XPC's functions */ + xpc_set_interface(xpc_initiate_connect, xpc_initiate_disconnect, + xpc_initiate_send, xpc_initiate_send_notify, + xpc_initiate_received, xpc_initiate_partid_to_nasids); + + return 0; + + /* initialization was not successful */ +out_3: + xpc_teardown_rsvd_page(); + + (void)unregister_die_notifier(&xpc_die_notifier); + (void)unregister_reboot_notifier(&xpc_reboot_notifier); +out_2: + if (xpc_sysctl) + unregister_sysctl_table(xpc_sysctl); + + xpc_teardown_partitions(); +out_1: + if (is_shub()) + xpc_exit_sn2(); + else if (is_uv()) + xpc_exit_uv(); + return ret; +} + +module_init(xpc_init); + +void __exit +xpc_exit(void) +{ + xpc_do_exit(xpUnloading); +} + +module_exit(xpc_exit); + +MODULE_AUTHOR("Silicon Graphics, Inc."); +MODULE_DESCRIPTION("Cross Partition Communication (XPC) support"); +MODULE_LICENSE("GPL"); + +module_param(xpc_hb_interval, int, 0); +MODULE_PARM_DESC(xpc_hb_interval, "Number of seconds between " + "heartbeat increments."); + +module_param(xpc_hb_check_interval, int, 0); +MODULE_PARM_DESC(xpc_hb_check_interval, "Number of seconds between " + "heartbeat checks."); + +module_param(xpc_disengage_timelimit, int, 0); +MODULE_PARM_DESC(xpc_disengage_timelimit, "Number of seconds to wait " + "for disengage to complete."); + +module_param(xpc_kdebug_ignore, int, 0); +MODULE_PARM_DESC(xpc_kdebug_ignore, "Should lack of heartbeat be ignored by " + "other partitions when dropping into kdebug."); diff --git a/drivers/misc/sgi-xp/xpc_partition.c b/drivers/misc/sgi-xp/xpc_partition.c new file mode 100644 index 0000000..6722f6f --- /dev/null +++ b/drivers/misc/sgi-xp/xpc_partition.c @@ -0,0 +1,528 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (c) 2004-2008 Silicon Graphics, Inc. All Rights Reserved. + */ + +/* + * Cross Partition Communication (XPC) partition support. + * + * This is the part of XPC that detects the presence/absence of + * other partitions. It provides a heartbeat and monitors the + * heartbeats of other partitions. + * + */ + +#include <linux/device.h> +#include <linux/hardirq.h> +#include "xpc.h" + +/* XPC is exiting flag */ +int xpc_exiting; + +/* this partition's reserved page pointers */ +struct xpc_rsvd_page *xpc_rsvd_page; +static unsigned long *xpc_part_nasids; +unsigned long *xpc_mach_nasids; + +static int xpc_nasid_mask_nbytes; /* #of bytes in nasid mask */ +int xpc_nasid_mask_nlongs; /* #of longs in nasid mask */ + +struct xpc_partition *xpc_partitions; + +/* + * Guarantee that the kmalloc'd memory is cacheline aligned. + */ +void * +xpc_kmalloc_cacheline_aligned(size_t size, gfp_t flags, void **base) +{ + /* see if kmalloc will give us cachline aligned memory by default */ + *base = kmalloc(size, flags); + if (*base == NULL) + return NULL; + + if ((u64)*base == L1_CACHE_ALIGN((u64)*base)) + return *base; + + kfree(*base); + + /* nope, we'll have to do it ourselves */ + *base = kmalloc(size + L1_CACHE_BYTES, flags); + if (*base == NULL) + return NULL; + + return (void *)L1_CACHE_ALIGN((u64)*base); +} + +/* + * Given a nasid, get the physical address of the partition's reserved page + * for that nasid. This function returns 0 on any error. + */ +static unsigned long +xpc_get_rsvd_page_pa(int nasid) +{ + enum xp_retval ret; + u64 cookie = 0; + unsigned long rp_pa = nasid; /* seed with nasid */ + size_t len = 0; + size_t buf_len = 0; + void *buf = buf; + void *buf_base = NULL; + + while (1) { + + /* !!! rp_pa will need to be _gpa on UV. + * ??? So do we save it into the architecture specific parts + * ??? of the xpc_partition structure? Do we rename this + * ??? function or have two versions? Rename rp_pa for UV to + * ??? rp_gpa? + */ + ret = xpc_get_partition_rsvd_page_pa(buf, &cookie, &rp_pa, + &len); + + dev_dbg(xpc_part, "SAL returned with ret=%d, cookie=0x%016lx, " + "address=0x%016lx, len=0x%016lx\n", ret, + (unsigned long)cookie, rp_pa, len); + + if (ret != xpNeedMoreInfo) + break; + + /* !!! L1_CACHE_ALIGN() is only a sn2-bte_copy requirement */ + if (L1_CACHE_ALIGN(len) > buf_len) { + kfree(buf_base); + buf_len = L1_CACHE_ALIGN(len); + buf = xpc_kmalloc_cacheline_aligned(buf_len, GFP_KERNEL, + &buf_base); + if (buf_base == NULL) { + dev_err(xpc_part, "unable to kmalloc " + "len=0x%016lx\n", buf_len); + ret = xpNoMemory; + break; + } + } + + ret = xp_remote_memcpy(xp_pa(buf), rp_pa, buf_len); + if (ret != xpSuccess) { + dev_dbg(xpc_part, "xp_remote_memcpy failed %d\n", ret); + break; + } + } + + kfree(buf_base); + + if (ret != xpSuccess) + rp_pa = 0; + + dev_dbg(xpc_part, "reserved page at phys address 0x%016lx\n", rp_pa); + return rp_pa; +} + +/* + * Fill the partition reserved page with the information needed by + * other partitions to discover we are alive and establish initial + * communications. + */ +int +xpc_setup_rsvd_page(void) +{ + int ret; + struct xpc_rsvd_page *rp; + unsigned long rp_pa; + unsigned long new_ts_jiffies; + + /* get the local reserved page's address */ + + preempt_disable(); + rp_pa = xpc_get_rsvd_page_pa(xp_cpu_to_nasid(smp_processor_id())); + preempt_enable(); + if (rp_pa == 0) { + dev_err(xpc_part, "SAL failed to locate the reserved page\n"); + return -ESRCH; + } + rp = (struct xpc_rsvd_page *)__va(rp_pa); + + if (rp->SAL_version < 3) { + /* SAL_versions < 3 had a SAL_partid defined as a u8 */ + rp->SAL_partid &= 0xff; + } + BUG_ON(rp->SAL_partid != xp_partition_id); + + if (rp->SAL_partid < 0 || rp->SAL_partid >= xp_max_npartitions) { + dev_err(xpc_part, "the reserved page's partid of %d is outside " + "supported range (< 0 || >= %d)\n", rp->SAL_partid, + xp_max_npartitions); + return -EINVAL; + } + + rp->version = XPC_RP_VERSION; + rp->max_npartitions = xp_max_npartitions; + + /* establish the actual sizes of the nasid masks */ + if (rp->SAL_version == 1) { + /* SAL_version 1 didn't set the nasids_size field */ + rp->SAL_nasids_size = 128; + } + xpc_nasid_mask_nbytes = rp->SAL_nasids_size; + xpc_nasid_mask_nlongs = BITS_TO_LONGS(rp->SAL_nasids_size * + BITS_PER_BYTE); + + /* setup the pointers to the various items in the reserved page */ + xpc_part_nasids = XPC_RP_PART_NASIDS(rp); + xpc_mach_nasids = XPC_RP_MACH_NASIDS(rp); + + ret = xpc_setup_rsvd_page_sn(rp); + if (ret != 0) + return ret; + + /* + * Set timestamp of when reserved page was setup by XPC. + * This signifies to the remote partition that our reserved + * page is initialized. + */ + new_ts_jiffies = jiffies; + if (new_ts_jiffies == 0 || new_ts_jiffies == rp->ts_jiffies) + new_ts_jiffies++; + rp->ts_jiffies = new_ts_jiffies; + + xpc_rsvd_page = rp; + return 0; +} + +void +xpc_teardown_rsvd_page(void) +{ + /* a zero timestamp indicates our rsvd page is not initialized */ + xpc_rsvd_page->ts_jiffies = 0; +} + +/* + * Get a copy of a portion of the remote partition's rsvd page. + * + * remote_rp points to a buffer that is cacheline aligned for BTE copies and + * is large enough to contain a copy of their reserved page header and + * part_nasids mask. + */ +enum xp_retval +xpc_get_remote_rp(int nasid, unsigned long *discovered_nasids, + struct xpc_rsvd_page *remote_rp, unsigned long *remote_rp_pa) +{ + int l; + enum xp_retval ret; + + /* get the reserved page's physical address */ + + *remote_rp_pa = xpc_get_rsvd_page_pa(nasid); + if (*remote_rp_pa == 0) + return xpNoRsvdPageAddr; + + /* pull over the reserved page header and part_nasids mask */ + ret = xp_remote_memcpy(xp_pa(remote_rp), *remote_rp_pa, + XPC_RP_HEADER_SIZE + xpc_nasid_mask_nbytes); + if (ret != xpSuccess) + return ret; + + if (discovered_nasids != NULL) { + unsigned long *remote_part_nasids = + XPC_RP_PART_NASIDS(remote_rp); + + for (l = 0; l < xpc_nasid_mask_nlongs; l++) + discovered_nasids[l] |= remote_part_nasids[l]; + } + + /* zero timestamp indicates the reserved page has not been setup */ + if (remote_rp->ts_jiffies == 0) + return xpRsvdPageNotSet; + + if (XPC_VERSION_MAJOR(remote_rp->version) != + XPC_VERSION_MAJOR(XPC_RP_VERSION)) { + return xpBadVersion; + } + + /* check that both remote and local partids are valid for each side */ + if (remote_rp->SAL_partid < 0 || + remote_rp->SAL_partid >= xp_max_npartitions || + remote_rp->max_npartitions <= xp_partition_id) { + return xpInvalidPartid; + } + + if (remote_rp->SAL_partid == xp_partition_id) + return xpLocalPartid; + + return xpSuccess; +} + +/* + * See if the other side has responded to a partition deactivate request + * from us. Though we requested the remote partition to deactivate with regard + * to us, we really only need to wait for the other side to disengage from us. + */ +int +xpc_partition_disengaged(struct xpc_partition *part) +{ + short partid = XPC_PARTID(part); + int disengaged; + + disengaged = !xpc_partition_engaged(partid); + if (part->disengage_timeout) { + if (!disengaged) { + if (time_is_after_jiffies(part->disengage_timeout)) { + /* timelimit hasn't been reached yet */ + return 0; + } + + /* + * Other side hasn't responded to our deactivate + * request in a timely fashion, so assume it's dead. + */ + + dev_info(xpc_part, "deactivate request to remote " + "partition %d timed out\n", partid); + xpc_disengage_timedout = 1; + xpc_assume_partition_disengaged(partid); + disengaged = 1; + } + part->disengage_timeout = 0; + + /* cancel the timer function, provided it's not us */ + if (!in_interrupt()) + del_singleshot_timer_sync(&part->disengage_timer); + + DBUG_ON(part->act_state != XPC_P_AS_DEACTIVATING && + part->act_state != XPC_P_AS_INACTIVE); + if (part->act_state != XPC_P_AS_INACTIVE) + xpc_wakeup_channel_mgr(part); + + xpc_cancel_partition_deactivation_request(part); + } + return disengaged; +} + +/* + * Mark specified partition as active. + */ +enum xp_retval +xpc_mark_partition_active(struct xpc_partition *part) +{ + unsigned long irq_flags; + enum xp_retval ret; + + dev_dbg(xpc_part, "setting partition %d to ACTIVE\n", XPC_PARTID(part)); + + spin_lock_irqsave(&part->act_lock, irq_flags); + if (part->act_state == XPC_P_AS_ACTIVATING) { + part->act_state = XPC_P_AS_ACTIVE; + ret = xpSuccess; + } else { + DBUG_ON(part->reason == xpSuccess); + ret = part->reason; + } + spin_unlock_irqrestore(&part->act_lock, irq_flags); + + return ret; +} + +/* + * Start the process of deactivating the specified partition. + */ +void +xpc_deactivate_partition(const int line, struct xpc_partition *part, + enum xp_retval reason) +{ + unsigned long irq_flags; + + spin_lock_irqsave(&part->act_lock, irq_flags); + + if (part->act_state == XPC_P_AS_INACTIVE) { + XPC_SET_REASON(part, reason, line); + spin_unlock_irqrestore(&part->act_lock, irq_flags); + if (reason == xpReactivating) { + /* we interrupt ourselves to reactivate partition */ + xpc_request_partition_reactivation(part); + } + return; + } + if (part->act_state == XPC_P_AS_DEACTIVATING) { + if ((part->reason == xpUnloading && reason != xpUnloading) || + reason == xpReactivating) { + XPC_SET_REASON(part, reason, line); + } + spin_unlock_irqrestore(&part->act_lock, irq_flags); + return; + } + + part->act_state = XPC_P_AS_DEACTIVATING; + XPC_SET_REASON(part, reason, line); + + spin_unlock_irqrestore(&part->act_lock, irq_flags); + + /* ask remote partition to deactivate with regard to us */ + xpc_request_partition_deactivation(part); + + /* set a timelimit on the disengage phase of the deactivation request */ + part->disengage_timeout = jiffies + (xpc_disengage_timelimit * HZ); + part->disengage_timer.expires = part->disengage_timeout; + add_timer(&part->disengage_timer); + + dev_dbg(xpc_part, "bringing partition %d down, reason = %d\n", + XPC_PARTID(part), reason); + + xpc_partition_going_down(part, reason); +} + +/* + * Mark specified partition as inactive. + */ +void +xpc_mark_partition_inactive(struct xpc_partition *part) +{ + unsigned long irq_flags; + + dev_dbg(xpc_part, "setting partition %d to INACTIVE\n", + XPC_PARTID(part)); + + spin_lock_irqsave(&part->act_lock, irq_flags); + part->act_state = XPC_P_AS_INACTIVE; + spin_unlock_irqrestore(&part->act_lock, irq_flags); + part->remote_rp_pa = 0; +} + +/* + * SAL has provided a partition and machine mask. The partition mask + * contains a bit for each even nasid in our partition. The machine + * mask contains a bit for each even nasid in the entire machine. + * + * Using those two bit arrays, we can determine which nasids are + * known in the machine. Each should also have a reserved page + * initialized if they are available for partitioning. + */ +void +xpc_discovery(void) +{ + void *remote_rp_base; + struct xpc_rsvd_page *remote_rp; + unsigned long remote_rp_pa; + int region; + int region_size; + int max_regions; + int nasid; + struct xpc_rsvd_page *rp; + unsigned long *discovered_nasids; + enum xp_retval ret; + + remote_rp = xpc_kmalloc_cacheline_aligned(XPC_RP_HEADER_SIZE + + xpc_nasid_mask_nbytes, + GFP_KERNEL, &remote_rp_base); + if (remote_rp == NULL) + return; + + discovered_nasids = kzalloc(sizeof(long) * xpc_nasid_mask_nlongs, + GFP_KERNEL); + if (discovered_nasids == NULL) { + kfree(remote_rp_base); + return; + } + + rp = (struct xpc_rsvd_page *)xpc_rsvd_page; + + /* + * The term 'region' in this context refers to the minimum number of + * nodes that can comprise an access protection grouping. The access + * protection is in regards to memory, IOI and IPI. + */ + max_regions = 64; + region_size = xp_region_size; + + switch (region_size) { + case 128: + max_regions *= 2; + case 64: + max_regions *= 2; + case 32: + max_regions *= 2; + region_size = 16; + DBUG_ON(!is_shub2()); + } + + for (region = 0; region < max_regions; region++) { + + if (xpc_exiting) + break; + + dev_dbg(xpc_part, "searching region %d\n", region); + + for (nasid = (region * region_size * 2); + nasid < ((region + 1) * region_size * 2); nasid += 2) { + + if (xpc_exiting) + break; + + dev_dbg(xpc_part, "checking nasid %d\n", nasid); + + if (test_bit(nasid / 2, xpc_part_nasids)) { + dev_dbg(xpc_part, "PROM indicates Nasid %d is " + "part of the local partition; skipping " + "region\n", nasid); + break; + } + + if (!(test_bit(nasid / 2, xpc_mach_nasids))) { + dev_dbg(xpc_part, "PROM indicates Nasid %d was " + "not on Numa-Link network at reset\n", + nasid); + continue; + } + + if (test_bit(nasid / 2, discovered_nasids)) { + dev_dbg(xpc_part, "Nasid %d is part of a " + "partition which was previously " + "discovered\n", nasid); + continue; + } + + /* pull over the rsvd page header & part_nasids mask */ + + ret = xpc_get_remote_rp(nasid, discovered_nasids, + remote_rp, &remote_rp_pa); + if (ret != xpSuccess) { + dev_dbg(xpc_part, "unable to get reserved page " + "from nasid %d, reason=%d\n", nasid, + ret); + + if (ret == xpLocalPartid) + break; + + continue; + } + + xpc_request_partition_activation(remote_rp, + remote_rp_pa, nasid); + } + } + + kfree(discovered_nasids); + kfree(remote_rp_base); +} + +/* + * Given a partid, get the nasids owned by that partition from the + * remote partition's reserved page. + */ +enum xp_retval +xpc_initiate_partid_to_nasids(short partid, void *nasid_mask) +{ + struct xpc_partition *part; + unsigned long part_nasid_pa; + + part = &xpc_partitions[partid]; + if (part->remote_rp_pa == 0) + return xpPartitionDown; + + memset(nasid_mask, 0, xpc_nasid_mask_nbytes); + + part_nasid_pa = (unsigned long)XPC_RP_PART_NASIDS(part->remote_rp_pa); + + return xp_remote_memcpy(xp_pa(nasid_mask), part_nasid_pa, + xpc_nasid_mask_nbytes); +} diff --git a/drivers/misc/sgi-xp/xpc_sn2.c b/drivers/misc/sgi-xp/xpc_sn2.c new file mode 100644 index 0000000..59ff816 --- /dev/null +++ b/drivers/misc/sgi-xp/xpc_sn2.c @@ -0,0 +1,2407 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved. + */ + +/* + * Cross Partition Communication (XPC) sn2-based functions. + * + * Architecture specific implementation of common functions. + * + */ + +#include <linux/delay.h> +#include <asm/uncached.h> +#include <asm/sn/mspec.h> +#include <asm/sn/sn_sal.h> +#include "xpc.h" + +/* + * Define the number of u64s required to represent all the C-brick nasids + * as a bitmap. The cross-partition kernel modules deal only with + * C-brick nasids, thus the need for bitmaps which don't account for + * odd-numbered (non C-brick) nasids. + */ +#define XPC_MAX_PHYSNODES_SN2 (MAX_NUMALINK_NODES / 2) +#define XP_NASID_MASK_BYTES_SN2 ((XPC_MAX_PHYSNODES_SN2 + 7) / 8) +#define XP_NASID_MASK_WORDS_SN2 ((XPC_MAX_PHYSNODES_SN2 + 63) / 64) + +/* + * Memory for XPC's amo variables is allocated by the MSPEC driver. These + * pages are located in the lowest granule. The lowest granule uses 4k pages + * for cached references and an alternate TLB handler to never provide a + * cacheable mapping for the entire region. This will prevent speculative + * reading of cached copies of our lines from being issued which will cause + * a PI FSB Protocol error to be generated by the SHUB. For XPC, we need 64 + * amo variables (based on XP_MAX_NPARTITIONS_SN2) to identify the senders of + * NOTIFY IRQs, 128 amo variables (based on XP_NASID_MASK_WORDS_SN2) to identify + * the senders of ACTIVATE IRQs, 1 amo variable to identify which remote + * partitions (i.e., XPCs) consider themselves currently engaged with the + * local XPC and 1 amo variable to request partition deactivation. + */ +#define XPC_NOTIFY_IRQ_AMOS_SN2 0 +#define XPC_ACTIVATE_IRQ_AMOS_SN2 (XPC_NOTIFY_IRQ_AMOS_SN2 + \ + XP_MAX_NPARTITIONS_SN2) +#define XPC_ENGAGED_PARTITIONS_AMO_SN2 (XPC_ACTIVATE_IRQ_AMOS_SN2 + \ + XP_NASID_MASK_WORDS_SN2) +#define XPC_DEACTIVATE_REQUEST_AMO_SN2 (XPC_ENGAGED_PARTITIONS_AMO_SN2 + 1) + +/* + * Buffer used to store a local copy of portions of a remote partition's + * reserved page (either its header and part_nasids mask, or its vars). + */ +static void *xpc_remote_copy_buffer_base_sn2; +static char *xpc_remote_copy_buffer_sn2; + +static struct xpc_vars_sn2 *xpc_vars_sn2; +static struct xpc_vars_part_sn2 *xpc_vars_part_sn2; + +static int +xpc_setup_partitions_sn_sn2(void) +{ + /* nothing needs to be done */ + return 0; +} + +/* SH_IPI_ACCESS shub register value on startup */ +static u64 xpc_sh1_IPI_access_sn2; +static u64 xpc_sh2_IPI_access0_sn2; +static u64 xpc_sh2_IPI_access1_sn2; +static u64 xpc_sh2_IPI_access2_sn2; +static u64 xpc_sh2_IPI_access3_sn2; + +/* + * Change protections to allow IPI operations. + */ +static void +xpc_allow_IPI_ops_sn2(void) +{ + int node; + int nasid; + + /* !!! The following should get moved into SAL. */ + if (is_shub2()) { + xpc_sh2_IPI_access0_sn2 = + (u64)HUB_L((u64 *)LOCAL_MMR_ADDR(SH2_IPI_ACCESS0)); + xpc_sh2_IPI_access1_sn2 = + (u64)HUB_L((u64 *)LOCAL_MMR_ADDR(SH2_IPI_ACCESS1)); + xpc_sh2_IPI_access2_sn2 = + (u64)HUB_L((u64 *)LOCAL_MMR_ADDR(SH2_IPI_ACCESS2)); + xpc_sh2_IPI_access3_sn2 = + (u64)HUB_L((u64 *)LOCAL_MMR_ADDR(SH2_IPI_ACCESS3)); + + for_each_online_node(node) { + nasid = cnodeid_to_nasid(node); + HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS0), + -1UL); + HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS1), + -1UL); + HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS2), + -1UL); + HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS3), + -1UL); + } + } else { + xpc_sh1_IPI_access_sn2 = + (u64)HUB_L((u64 *)LOCAL_MMR_ADDR(SH1_IPI_ACCESS)); + + for_each_online_node(node) { + nasid = cnodeid_to_nasid(node); + HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH1_IPI_ACCESS), + -1UL); + } + } +} + +/* + * Restrict protections to disallow IPI operations. + */ +static void +xpc_disallow_IPI_ops_sn2(void) +{ + int node; + int nasid; + + /* !!! The following should get moved into SAL. */ + if (is_shub2()) { + for_each_online_node(node) { + nasid = cnodeid_to_nasid(node); + HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS0), + xpc_sh2_IPI_access0_sn2); + HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS1), + xpc_sh2_IPI_access1_sn2); + HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS2), + xpc_sh2_IPI_access2_sn2); + HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS3), + xpc_sh2_IPI_access3_sn2); + } + } else { + for_each_online_node(node) { + nasid = cnodeid_to_nasid(node); + HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH1_IPI_ACCESS), + xpc_sh1_IPI_access_sn2); + } + } +} + +/* + * The following set of functions are used for the sending and receiving of + * IRQs (also known as IPIs). There are two flavors of IRQs, one that is + * associated with partition activity (SGI_XPC_ACTIVATE) and the other that + * is associated with channel activity (SGI_XPC_NOTIFY). + */ + +static u64 +xpc_receive_IRQ_amo_sn2(struct amo *amo) +{ + return FETCHOP_LOAD_OP(TO_AMO((u64)&amo->variable), FETCHOP_CLEAR); +} + +static enum xp_retval +xpc_send_IRQ_sn2(struct amo *amo, u64 flag, int nasid, int phys_cpuid, + int vector) +{ + int ret = 0; + unsigned long irq_flags; + + local_irq_save(irq_flags); + + FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_OR, flag); + sn_send_IPI_phys(nasid, phys_cpuid, vector, 0); + + /* + * We must always use the nofault function regardless of whether we + * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we + * didn't, we'd never know that the other partition is down and would + * keep sending IRQs and amos to it until the heartbeat times out. + */ + ret = xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(&amo->variable), + xp_nofault_PIOR_target)); + + local_irq_restore(irq_flags); + + return (ret == 0) ? xpSuccess : xpPioReadError; +} + +static struct amo * +xpc_init_IRQ_amo_sn2(int index) +{ + struct amo *amo = xpc_vars_sn2->amos_page + index; + + (void)xpc_receive_IRQ_amo_sn2(amo); /* clear amo variable */ + return amo; +} + +/* + * Functions associated with SGI_XPC_ACTIVATE IRQ. + */ + +/* + * Notify the heartbeat check thread that an activate IRQ has been received. + */ +static irqreturn_t +xpc_handle_activate_IRQ_sn2(int irq, void *dev_id) +{ + unsigned long irq_flags; + + spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags); + xpc_activate_IRQ_rcvd++; + spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags); + + wake_up_interruptible(&xpc_activate_IRQ_wq); + return IRQ_HANDLED; +} + +/* + * Flag the appropriate amo variable and send an IRQ to the specified node. + */ +static void +xpc_send_activate_IRQ_sn2(unsigned long amos_page_pa, int from_nasid, + int to_nasid, int to_phys_cpuid) +{ + struct amo *amos = (struct amo *)__va(amos_page_pa + + (XPC_ACTIVATE_IRQ_AMOS_SN2 * + sizeof(struct amo))); + + (void)xpc_send_IRQ_sn2(&amos[BIT_WORD(from_nasid / 2)], + BIT_MASK(from_nasid / 2), to_nasid, + to_phys_cpuid, SGI_XPC_ACTIVATE); +} + +static void +xpc_send_local_activate_IRQ_sn2(int from_nasid) +{ + unsigned long irq_flags; + struct amo *amos = (struct amo *)__va(xpc_vars_sn2->amos_page_pa + + (XPC_ACTIVATE_IRQ_AMOS_SN2 * + sizeof(struct amo))); + + /* fake the sending and receipt of an activate IRQ from remote nasid */ + FETCHOP_STORE_OP(TO_AMO((u64)&amos[BIT_WORD(from_nasid / 2)].variable), + FETCHOP_OR, BIT_MASK(from_nasid / 2)); + + spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags); + xpc_activate_IRQ_rcvd++; + spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags); + + wake_up_interruptible(&xpc_activate_IRQ_wq); +} + +/* + * Functions associated with SGI_XPC_NOTIFY IRQ. + */ + +/* + * Check to see if any chctl flags were sent from the specified partition. + */ +static void +xpc_check_for_sent_chctl_flags_sn2(struct xpc_partition *part) +{ + union xpc_channel_ctl_flags chctl; + unsigned long irq_flags; + + chctl.all_flags = xpc_receive_IRQ_amo_sn2(part->sn.sn2. + local_chctl_amo_va); + if (chctl.all_flags == 0) + return; + + spin_lock_irqsave(&part->chctl_lock, irq_flags); + part->chctl.all_flags |= chctl.all_flags; + spin_unlock_irqrestore(&part->chctl_lock, irq_flags); + + dev_dbg(xpc_chan, "received notify IRQ from partid=%d, chctl.all_flags=" + "0x%lx\n", XPC_PARTID(part), chctl.all_flags); + + xpc_wakeup_channel_mgr(part); +} + +/* + * Handle the receipt of a SGI_XPC_NOTIFY IRQ by seeing whether the specified + * partition actually sent it. Since SGI_XPC_NOTIFY IRQs may be shared by more + * than one partition, we use an amo structure per partition to indicate + * whether a partition has sent an IRQ or not. If it has, then wake up the + * associated kthread to handle it. + * + * All SGI_XPC_NOTIFY IRQs received by XPC are the result of IRQs sent by XPC + * running on other partitions. + * + * Noteworthy Arguments: + * + * irq - Interrupt ReQuest number. NOT USED. + * + * dev_id - partid of IRQ's potential sender. + */ +static irqreturn_t +xpc_handle_notify_IRQ_sn2(int irq, void *dev_id) +{ + short partid = (short)(u64)dev_id; + struct xpc_partition *part = &xpc_partitions[partid]; + + DBUG_ON(partid < 0 || partid >= XP_MAX_NPARTITIONS_SN2); + + if (xpc_part_ref(part)) { + xpc_check_for_sent_chctl_flags_sn2(part); + + xpc_part_deref(part); + } + return IRQ_HANDLED; +} + +/* + * Check to see if xpc_handle_notify_IRQ_sn2() dropped any IRQs on the floor + * because the write to their associated amo variable completed after the IRQ + * was received. + */ +static void +xpc_check_for_dropped_notify_IRQ_sn2(struct xpc_partition *part) +{ + struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2; + + if (xpc_part_ref(part)) { + xpc_check_for_sent_chctl_flags_sn2(part); + + part_sn2->dropped_notify_IRQ_timer.expires = jiffies + + XPC_DROPPED_NOTIFY_IRQ_WAIT_INTERVAL; + add_timer(&part_sn2->dropped_notify_IRQ_timer); + xpc_part_deref(part); + } +} + +/* + * Send a notify IRQ to the remote partition that is associated with the + * specified channel. + */ +static void +xpc_send_notify_IRQ_sn2(struct xpc_channel *ch, u8 chctl_flag, + char *chctl_flag_string, unsigned long *irq_flags) +{ + struct xpc_partition *part = &xpc_partitions[ch->partid]; + struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2; + union xpc_channel_ctl_flags chctl = { 0 }; + enum xp_retval ret; + + if (likely(part->act_state != XPC_P_AS_DEACTIVATING)) { + chctl.flags[ch->number] = chctl_flag; + ret = xpc_send_IRQ_sn2(part_sn2->remote_chctl_amo_va, + chctl.all_flags, + part_sn2->notify_IRQ_nasid, + part_sn2->notify_IRQ_phys_cpuid, + SGI_XPC_NOTIFY); + dev_dbg(xpc_chan, "%s sent to partid=%d, channel=%d, ret=%d\n", + chctl_flag_string, ch->partid, ch->number, ret); + if (unlikely(ret != xpSuccess)) { + if (irq_flags != NULL) + spin_unlock_irqrestore(&ch->lock, *irq_flags); + XPC_DEACTIVATE_PARTITION(part, ret); + if (irq_flags != NULL) + spin_lock_irqsave(&ch->lock, *irq_flags); + } + } +} + +#define XPC_SEND_NOTIFY_IRQ_SN2(_ch, _ipi_f, _irq_f) \ + xpc_send_notify_IRQ_sn2(_ch, _ipi_f, #_ipi_f, _irq_f) + +/* + * Make it look like the remote partition, which is associated with the + * specified channel, sent us a notify IRQ. This faked IRQ will be handled + * by xpc_check_for_dropped_notify_IRQ_sn2(). + */ +static void +xpc_send_local_notify_IRQ_sn2(struct xpc_channel *ch, u8 chctl_flag, + char *chctl_flag_string) +{ + struct xpc_partition *part = &xpc_partitions[ch->partid]; + union xpc_channel_ctl_flags chctl = { 0 }; + + chctl.flags[ch->number] = chctl_flag; + FETCHOP_STORE_OP(TO_AMO((u64)&part->sn.sn2.local_chctl_amo_va-> + variable), FETCHOP_OR, chctl.all_flags); + dev_dbg(xpc_chan, "%s sent local from partid=%d, channel=%d\n", + chctl_flag_string, ch->partid, ch->number); +} + +#define XPC_SEND_LOCAL_NOTIFY_IRQ_SN2(_ch, _ipi_f) \ + xpc_send_local_notify_IRQ_sn2(_ch, _ipi_f, #_ipi_f) + +static void +xpc_send_chctl_closerequest_sn2(struct xpc_channel *ch, + unsigned long *irq_flags) +{ + struct xpc_openclose_args *args = ch->sn.sn2.local_openclose_args; + + args->reason = ch->reason; + XPC_SEND_NOTIFY_IRQ_SN2(ch, XPC_CHCTL_CLOSEREQUEST, irq_flags); +} + +static void +xpc_send_chctl_closereply_sn2(struct xpc_channel *ch, unsigned long *irq_flags) +{ + XPC_SEND_NOTIFY_IRQ_SN2(ch, XPC_CHCTL_CLOSEREPLY, irq_flags); +} + +static void +xpc_send_chctl_openrequest_sn2(struct xpc_channel *ch, unsigned long *irq_flags) +{ + struct xpc_openclose_args *args = ch->sn.sn2.local_openclose_args; + + args->entry_size = ch->entry_size; + args->local_nentries = ch->local_nentries; + XPC_SEND_NOTIFY_IRQ_SN2(ch, XPC_CHCTL_OPENREQUEST, irq_flags); +} + +static void +xpc_send_chctl_openreply_sn2(struct xpc_channel *ch, unsigned long *irq_flags) +{ + struct xpc_openclose_args *args = ch->sn.sn2.local_openclose_args; + + args->remote_nentries = ch->remote_nentries; + args->local_nentries = ch->local_nentries; + args->local_msgqueue_pa = xp_pa(ch->sn.sn2.local_msgqueue); + XPC_SEND_NOTIFY_IRQ_SN2(ch, XPC_CHCTL_OPENREPLY, irq_flags); +} + +static void +xpc_send_chctl_msgrequest_sn2(struct xpc_channel *ch) +{ + XPC_SEND_NOTIFY_IRQ_SN2(ch, XPC_CHCTL_MSGREQUEST, NULL); +} + +static void +xpc_send_chctl_local_msgrequest_sn2(struct xpc_channel *ch) +{ + XPC_SEND_LOCAL_NOTIFY_IRQ_SN2(ch, XPC_CHCTL_MSGREQUEST); +} + +static void +xpc_save_remote_msgqueue_pa_sn2(struct xpc_channel *ch, + unsigned long msgqueue_pa) +{ + ch->sn.sn2.remote_msgqueue_pa = msgqueue_pa; +} + +/* + * This next set of functions are used to keep track of when a partition is + * potentially engaged in accessing memory belonging to another partition. + */ + +static void +xpc_indicate_partition_engaged_sn2(struct xpc_partition *part) +{ + unsigned long irq_flags; + struct amo *amo = (struct amo *)__va(part->sn.sn2.remote_amos_page_pa + + (XPC_ENGAGED_PARTITIONS_AMO_SN2 * + sizeof(struct amo))); + + local_irq_save(irq_flags); + + /* set bit corresponding to our partid in remote partition's amo */ + FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_OR, + BIT(sn_partition_id)); + + /* + * We must always use the nofault function regardless of whether we + * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we + * didn't, we'd never know that the other partition is down and would + * keep sending IRQs and amos to it until the heartbeat times out. + */ + (void)xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(&amo-> + variable), + xp_nofault_PIOR_target)); + + local_irq_restore(irq_flags); +} + +static void +xpc_indicate_partition_disengaged_sn2(struct xpc_partition *part) +{ + struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2; + unsigned long irq_flags; + struct amo *amo = (struct amo *)__va(part_sn2->remote_amos_page_pa + + (XPC_ENGAGED_PARTITIONS_AMO_SN2 * + sizeof(struct amo))); + + local_irq_save(irq_flags); + + /* clear bit corresponding to our partid in remote partition's amo */ + FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_AND, + ~BIT(sn_partition_id)); + + /* + * We must always use the nofault function regardless of whether we + * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we + * didn't, we'd never know that the other partition is down and would + * keep sending IRQs and amos to it until the heartbeat times out. + */ + (void)xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(&amo-> + variable), + xp_nofault_PIOR_target)); + + local_irq_restore(irq_flags); + + /* + * Send activate IRQ to get other side to see that we've cleared our + * bit in their engaged partitions amo. + */ + xpc_send_activate_IRQ_sn2(part_sn2->remote_amos_page_pa, + cnodeid_to_nasid(0), + part_sn2->activate_IRQ_nasid, + part_sn2->activate_IRQ_phys_cpuid); +} + +static void +xpc_assume_partition_disengaged_sn2(short partid) +{ + struct amo *amo = xpc_vars_sn2->amos_page + + XPC_ENGAGED_PARTITIONS_AMO_SN2; + + /* clear bit(s) based on partid mask in our partition's amo */ + FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_AND, + ~BIT(partid)); +} + +static int +xpc_partition_engaged_sn2(short partid) +{ + struct amo *amo = xpc_vars_sn2->amos_page + + XPC_ENGAGED_PARTITIONS_AMO_SN2; + + /* our partition's amo variable ANDed with partid mask */ + return (FETCHOP_LOAD_OP(TO_AMO((u64)&amo->variable), FETCHOP_LOAD) & + BIT(partid)) != 0; +} + +static int +xpc_any_partition_engaged_sn2(void) +{ + struct amo *amo = xpc_vars_sn2->amos_page + + XPC_ENGAGED_PARTITIONS_AMO_SN2; + + /* our partition's amo variable */ + return FETCHOP_LOAD_OP(TO_AMO((u64)&amo->variable), FETCHOP_LOAD) != 0; +} + +/* original protection values for each node */ +static u64 xpc_prot_vec_sn2[MAX_NUMNODES]; + +/* + * Change protections to allow amo operations on non-Shub 1.1 systems. + */ +static enum xp_retval +xpc_allow_amo_ops_sn2(struct amo *amos_page) +{ + u64 nasid_array = 0; + int ret; + + /* + * On SHUB 1.1, we cannot call sn_change_memprotect() since the BIST + * collides with memory operations. On those systems we call + * xpc_allow_amo_ops_shub_wars_1_1_sn2() instead. + */ + if (!enable_shub_wars_1_1()) { + ret = sn_change_memprotect(ia64_tpa((u64)amos_page), PAGE_SIZE, + SN_MEMPROT_ACCESS_CLASS_1, + &nasid_array); + if (ret != 0) + return xpSalError; + } + return xpSuccess; +} + +/* + * Change protections to allow amo operations on Shub 1.1 systems. + */ +static void +xpc_allow_amo_ops_shub_wars_1_1_sn2(void) +{ + int node; + int nasid; + + if (!enable_shub_wars_1_1()) + return; + + for_each_online_node(node) { + nasid = cnodeid_to_nasid(node); + /* save current protection values */ + xpc_prot_vec_sn2[node] = + (u64)HUB_L((u64 *)GLOBAL_MMR_ADDR(nasid, + SH1_MD_DQLP_MMR_DIR_PRIVEC0)); + /* open up everything */ + HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, + SH1_MD_DQLP_MMR_DIR_PRIVEC0), + -1UL); + HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, + SH1_MD_DQRP_MMR_DIR_PRIVEC0), + -1UL); + } +} + +static enum xp_retval +xpc_get_partition_rsvd_page_pa_sn2(void *buf, u64 *cookie, unsigned long *rp_pa, + size_t *len) +{ + s64 status; + enum xp_retval ret; + + status = sn_partition_reserved_page_pa((u64)buf, cookie, rp_pa, len); + if (status == SALRET_OK) + ret = xpSuccess; + else if (status == SALRET_MORE_PASSES) + ret = xpNeedMoreInfo; + else + ret = xpSalError; + + return ret; +} + + +static int +xpc_setup_rsvd_page_sn_sn2(struct xpc_rsvd_page *rp) +{ + struct amo *amos_page; + int i; + int ret; + + xpc_vars_sn2 = XPC_RP_VARS(rp); + + rp->sn.vars_pa = xp_pa(xpc_vars_sn2); + + /* vars_part array follows immediately after vars */ + xpc_vars_part_sn2 = (struct xpc_vars_part_sn2 *)((u8 *)XPC_RP_VARS(rp) + + XPC_RP_VARS_SIZE); + + /* + * Before clearing xpc_vars_sn2, see if a page of amos had been + * previously allocated. If not we'll need to allocate one and set + * permissions so that cross-partition amos are allowed. + * + * The allocated amo page needs MCA reporting to remain disabled after + * XPC has unloaded. To make this work, we keep a copy of the pointer + * to this page (i.e., amos_page) in the struct xpc_vars_sn2 structure, + * which is pointed to by the reserved page, and re-use that saved copy + * on subsequent loads of XPC. This amo page is never freed, and its + * memory protections are never restricted. + */ + amos_page = xpc_vars_sn2->amos_page; + if (amos_page == NULL) { + amos_page = (struct amo *)TO_AMO(uncached_alloc_page(0, 1)); + if (amos_page == NULL) { + dev_err(xpc_part, "can't allocate page of amos\n"); + return -ENOMEM; + } + + /* + * Open up amo-R/W to cpu. This is done on Shub 1.1 systems + * when xpc_allow_amo_ops_shub_wars_1_1_sn2() is called. + */ + ret = xpc_allow_amo_ops_sn2(amos_page); + if (ret != xpSuccess) { + dev_err(xpc_part, "can't allow amo operations\n"); + uncached_free_page(__IA64_UNCACHED_OFFSET | + TO_PHYS((u64)amos_page), 1); + return -EPERM; + } + } + + /* clear xpc_vars_sn2 */ + memset(xpc_vars_sn2, 0, sizeof(struct xpc_vars_sn2)); + + xpc_vars_sn2->version = XPC_V_VERSION; + xpc_vars_sn2->activate_IRQ_nasid = cpuid_to_nasid(0); + xpc_vars_sn2->activate_IRQ_phys_cpuid = cpu_physical_id(0); + xpc_vars_sn2->vars_part_pa = xp_pa(xpc_vars_part_sn2); + xpc_vars_sn2->amos_page_pa = ia64_tpa((u64)amos_page); + xpc_vars_sn2->amos_page = amos_page; /* save for next load of XPC */ + + /* clear xpc_vars_part_sn2 */ + memset((u64 *)xpc_vars_part_sn2, 0, sizeof(struct xpc_vars_part_sn2) * + XP_MAX_NPARTITIONS_SN2); + + /* initialize the activate IRQ related amo variables */ + for (i = 0; i < xpc_nasid_mask_nlongs; i++) + (void)xpc_init_IRQ_amo_sn2(XPC_ACTIVATE_IRQ_AMOS_SN2 + i); + + /* initialize the engaged remote partitions related amo variables */ + (void)xpc_init_IRQ_amo_sn2(XPC_ENGAGED_PARTITIONS_AMO_SN2); + (void)xpc_init_IRQ_amo_sn2(XPC_DEACTIVATE_REQUEST_AMO_SN2); + + return 0; +} + +static void +xpc_increment_heartbeat_sn2(void) +{ + xpc_vars_sn2->heartbeat++; +} + +static void +xpc_offline_heartbeat_sn2(void) +{ + xpc_increment_heartbeat_sn2(); + xpc_vars_sn2->heartbeat_offline = 1; +} + +static void +xpc_online_heartbeat_sn2(void) +{ + xpc_increment_heartbeat_sn2(); + xpc_vars_sn2->heartbeat_offline = 0; +} + +static void +xpc_heartbeat_init_sn2(void) +{ + DBUG_ON(xpc_vars_sn2 == NULL); + + bitmap_zero(xpc_vars_sn2->heartbeating_to_mask, XP_MAX_NPARTITIONS_SN2); + xpc_heartbeating_to_mask = &xpc_vars_sn2->heartbeating_to_mask[0]; + xpc_online_heartbeat_sn2(); +} + +static void +xpc_heartbeat_exit_sn2(void) +{ + xpc_offline_heartbeat_sn2(); +} + +static enum xp_retval +xpc_get_remote_heartbeat_sn2(struct xpc_partition *part) +{ + struct xpc_vars_sn2 *remote_vars; + enum xp_retval ret; + + remote_vars = (struct xpc_vars_sn2 *)xpc_remote_copy_buffer_sn2; + + /* pull the remote vars structure that contains the heartbeat */ + ret = xp_remote_memcpy(xp_pa(remote_vars), + part->sn.sn2.remote_vars_pa, + XPC_RP_VARS_SIZE); + if (ret != xpSuccess) + return ret; + + dev_dbg(xpc_part, "partid=%d, heartbeat=%ld, last_heartbeat=%ld, " + "heartbeat_offline=%ld, HB_mask[0]=0x%lx\n", XPC_PARTID(part), + remote_vars->heartbeat, part->last_heartbeat, + remote_vars->heartbeat_offline, + remote_vars->heartbeating_to_mask[0]); + + if ((remote_vars->heartbeat == part->last_heartbeat && + remote_vars->heartbeat_offline == 0) || + !xpc_hb_allowed(sn_partition_id, + &remote_vars->heartbeating_to_mask)) { + ret = xpNoHeartbeat; + } else { + part->last_heartbeat = remote_vars->heartbeat; + } + + return ret; +} + +/* + * Get a copy of the remote partition's XPC variables from the reserved page. + * + * remote_vars points to a buffer that is cacheline aligned for BTE copies and + * assumed to be of size XPC_RP_VARS_SIZE. + */ +static enum xp_retval +xpc_get_remote_vars_sn2(unsigned long remote_vars_pa, + struct xpc_vars_sn2 *remote_vars) +{ + enum xp_retval ret; + + if (remote_vars_pa == 0) + return xpVarsNotSet; + + /* pull over the cross partition variables */ + ret = xp_remote_memcpy(xp_pa(remote_vars), remote_vars_pa, + XPC_RP_VARS_SIZE); + if (ret != xpSuccess) + return ret; + + if (XPC_VERSION_MAJOR(remote_vars->version) != + XPC_VERSION_MAJOR(XPC_V_VERSION)) { + return xpBadVersion; + } + + return xpSuccess; +} + +static void +xpc_request_partition_activation_sn2(struct xpc_rsvd_page *remote_rp, + unsigned long remote_rp_pa, int nasid) +{ + xpc_send_local_activate_IRQ_sn2(nasid); +} + +static void +xpc_request_partition_reactivation_sn2(struct xpc_partition *part) +{ + xpc_send_local_activate_IRQ_sn2(part->sn.sn2.activate_IRQ_nasid); +} + +static void +xpc_request_partition_deactivation_sn2(struct xpc_partition *part) +{ + struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2; + unsigned long irq_flags; + struct amo *amo = (struct amo *)__va(part_sn2->remote_amos_page_pa + + (XPC_DEACTIVATE_REQUEST_AMO_SN2 * + sizeof(struct amo))); + + local_irq_save(irq_flags); + + /* set bit corresponding to our partid in remote partition's amo */ + FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_OR, + BIT(sn_partition_id)); + + /* + * We must always use the nofault function regardless of whether we + * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we + * didn't, we'd never know that the other partition is down and would + * keep sending IRQs and amos to it until the heartbeat times out. + */ + (void)xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(&amo-> + variable), + xp_nofault_PIOR_target)); + + local_irq_restore(irq_flags); + + /* + * Send activate IRQ to get other side to see that we've set our + * bit in their deactivate request amo. + */ + xpc_send_activate_IRQ_sn2(part_sn2->remote_amos_page_pa, + cnodeid_to_nasid(0), + part_sn2->activate_IRQ_nasid, + part_sn2->activate_IRQ_phys_cpuid); +} + +static void +xpc_cancel_partition_deactivation_request_sn2(struct xpc_partition *part) +{ + unsigned long irq_flags; + struct amo *amo = (struct amo *)__va(part->sn.sn2.remote_amos_page_pa + + (XPC_DEACTIVATE_REQUEST_AMO_SN2 * + sizeof(struct amo))); + + local_irq_save(irq_flags); + + /* clear bit corresponding to our partid in remote partition's amo */ + FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_AND, + ~BIT(sn_partition_id)); + + /* + * We must always use the nofault function regardless of whether we + * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we + * didn't, we'd never know that the other partition is down and would + * keep sending IRQs and amos to it until the heartbeat times out. + */ + (void)xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(&amo-> + variable), + xp_nofault_PIOR_target)); + + local_irq_restore(irq_flags); +} + +static int +xpc_partition_deactivation_requested_sn2(short partid) +{ + struct amo *amo = xpc_vars_sn2->amos_page + + XPC_DEACTIVATE_REQUEST_AMO_SN2; + + /* our partition's amo variable ANDed with partid mask */ + return (FETCHOP_LOAD_OP(TO_AMO((u64)&amo->variable), FETCHOP_LOAD) & + BIT(partid)) != 0; +} + +/* + * Update the remote partition's info. + */ +static void +xpc_update_partition_info_sn2(struct xpc_partition *part, u8 remote_rp_version, + unsigned long *remote_rp_ts_jiffies, + unsigned long remote_rp_pa, + unsigned long remote_vars_pa, + struct xpc_vars_sn2 *remote_vars) +{ + struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2; + + part->remote_rp_version = remote_rp_version; + dev_dbg(xpc_part, " remote_rp_version = 0x%016x\n", + part->remote_rp_version); + + part->remote_rp_ts_jiffies = *remote_rp_ts_jiffies; + dev_dbg(xpc_part, " remote_rp_ts_jiffies = 0x%016lx\n", + part->remote_rp_ts_jiffies); + + part->remote_rp_pa = remote_rp_pa; + dev_dbg(xpc_part, " remote_rp_pa = 0x%016lx\n", part->remote_rp_pa); + + part_sn2->remote_vars_pa = remote_vars_pa; + dev_dbg(xpc_part, " remote_vars_pa = 0x%016lx\n", + part_sn2->remote_vars_pa); + + part->last_heartbeat = remote_vars->heartbeat - 1; + dev_dbg(xpc_part, " last_heartbeat = 0x%016lx\n", + part->last_heartbeat); + + part_sn2->remote_vars_part_pa = remote_vars->vars_part_pa; + dev_dbg(xpc_part, " remote_vars_part_pa = 0x%016lx\n", + part_sn2->remote_vars_part_pa); + + part_sn2->activate_IRQ_nasid = remote_vars->activate_IRQ_nasid; + dev_dbg(xpc_part, " activate_IRQ_nasid = 0x%x\n", + part_sn2->activate_IRQ_nasid); + + part_sn2->activate_IRQ_phys_cpuid = + remote_vars->activate_IRQ_phys_cpuid; + dev_dbg(xpc_part, " activate_IRQ_phys_cpuid = 0x%x\n", + part_sn2->activate_IRQ_phys_cpuid); + + part_sn2->remote_amos_page_pa = remote_vars->amos_page_pa; + dev_dbg(xpc_part, " remote_amos_page_pa = 0x%lx\n", + part_sn2->remote_amos_page_pa); + + part_sn2->remote_vars_version = remote_vars->version; + dev_dbg(xpc_part, " remote_vars_version = 0x%x\n", + part_sn2->remote_vars_version); +} + +/* + * Prior code has determined the nasid which generated a activate IRQ. + * Inspect that nasid to determine if its partition needs to be activated + * or deactivated. + * + * A partition is considered "awaiting activation" if our partition + * flags indicate it is not active and it has a heartbeat. A + * partition is considered "awaiting deactivation" if our partition + * flags indicate it is active but it has no heartbeat or it is not + * sending its heartbeat to us. + * + * To determine the heartbeat, the remote nasid must have a properly + * initialized reserved page. + */ +static void +xpc_identify_activate_IRQ_req_sn2(int nasid) +{ + struct xpc_rsvd_page *remote_rp; + struct xpc_vars_sn2 *remote_vars; + unsigned long remote_rp_pa; + unsigned long remote_vars_pa; + int remote_rp_version; + int reactivate = 0; + unsigned long remote_rp_ts_jiffies = 0; + short partid; + struct xpc_partition *part; + struct xpc_partition_sn2 *part_sn2; + enum xp_retval ret; + + /* pull over the reserved page structure */ + + remote_rp = (struct xpc_rsvd_page *)xpc_remote_copy_buffer_sn2; + + ret = xpc_get_remote_rp(nasid, NULL, remote_rp, &remote_rp_pa); + if (ret != xpSuccess) { + dev_warn(xpc_part, "unable to get reserved page from nasid %d, " + "which sent interrupt, reason=%d\n", nasid, ret); + return; + } + + remote_vars_pa = remote_rp->sn.vars_pa; + remote_rp_version = remote_rp->version; + remote_rp_ts_jiffies = remote_rp->ts_jiffies; + + partid = remote_rp->SAL_partid; + part = &xpc_partitions[partid]; + part_sn2 = &part->sn.sn2; + + /* pull over the cross partition variables */ + + remote_vars = (struct xpc_vars_sn2 *)xpc_remote_copy_buffer_sn2; + + ret = xpc_get_remote_vars_sn2(remote_vars_pa, remote_vars); + if (ret != xpSuccess) { + dev_warn(xpc_part, "unable to get XPC variables from nasid %d, " + "which sent interrupt, reason=%d\n", nasid, ret); + + XPC_DEACTIVATE_PARTITION(part, ret); + return; + } + + part->activate_IRQ_rcvd++; + + dev_dbg(xpc_part, "partid for nasid %d is %d; IRQs = %d; HB = " + "%ld:0x%lx\n", (int)nasid, (int)partid, part->activate_IRQ_rcvd, + remote_vars->heartbeat, remote_vars->heartbeating_to_mask[0]); + + if (xpc_partition_disengaged(part) && + part->act_state == XPC_P_AS_INACTIVE) { + + xpc_update_partition_info_sn2(part, remote_rp_version, + &remote_rp_ts_jiffies, + remote_rp_pa, remote_vars_pa, + remote_vars); + + if (xpc_partition_deactivation_requested_sn2(partid)) { + /* + * Other side is waiting on us to deactivate even though + * we already have. + */ + return; + } + + xpc_activate_partition(part); + return; + } + + DBUG_ON(part->remote_rp_version == 0); + DBUG_ON(part_sn2->remote_vars_version == 0); + + if (remote_rp_ts_jiffies != part->remote_rp_ts_jiffies) { + + /* the other side rebooted */ + + DBUG_ON(xpc_partition_engaged_sn2(partid)); + DBUG_ON(xpc_partition_deactivation_requested_sn2(partid)); + + xpc_update_partition_info_sn2(part, remote_rp_version, + &remote_rp_ts_jiffies, + remote_rp_pa, remote_vars_pa, + remote_vars); + reactivate = 1; + } + + if (part->disengage_timeout > 0 && !xpc_partition_disengaged(part)) { + /* still waiting on other side to disengage from us */ + return; + } + + if (reactivate) + XPC_DEACTIVATE_PARTITION(part, xpReactivating); + else if (xpc_partition_deactivation_requested_sn2(partid)) + XPC_DEACTIVATE_PARTITION(part, xpOtherGoingDown); +} + +/* + * Loop through the activation amo variables and process any bits + * which are set. Each bit indicates a nasid sending a partition + * activation or deactivation request. + * + * Return #of IRQs detected. + */ +int +xpc_identify_activate_IRQ_sender_sn2(void) +{ + int l; + int b; + unsigned long nasid_mask_long; + u64 nasid; /* remote nasid */ + int n_IRQs_detected = 0; + struct amo *act_amos; + + act_amos = xpc_vars_sn2->amos_page + XPC_ACTIVATE_IRQ_AMOS_SN2; + + /* scan through activate amo variables looking for non-zero entries */ + for (l = 0; l < xpc_nasid_mask_nlongs; l++) { + + if (xpc_exiting) + break; + + nasid_mask_long = xpc_receive_IRQ_amo_sn2(&act_amos[l]); + + b = find_first_bit(&nasid_mask_long, BITS_PER_LONG); + if (b >= BITS_PER_LONG) { + /* no IRQs from nasids in this amo variable */ + continue; + } + + dev_dbg(xpc_part, "amo[%d] gave back 0x%lx\n", l, + nasid_mask_long); + + /* + * If this nasid has been added to the machine since + * our partition was reset, this will retain the + * remote nasid in our reserved pages machine mask. + * This is used in the event of module reload. + */ + xpc_mach_nasids[l] |= nasid_mask_long; + + /* locate the nasid(s) which sent interrupts */ + + do { + n_IRQs_detected++; + nasid = (l * BITS_PER_LONG + b) * 2; + dev_dbg(xpc_part, "interrupt from nasid %ld\n", nasid); + xpc_identify_activate_IRQ_req_sn2(nasid); + + b = find_next_bit(&nasid_mask_long, BITS_PER_LONG, + b + 1); + } while (b < BITS_PER_LONG); + } + return n_IRQs_detected; +} + +static void +xpc_process_activate_IRQ_rcvd_sn2(void) +{ + unsigned long irq_flags; + int n_IRQs_expected; + int n_IRQs_detected; + + DBUG_ON(xpc_activate_IRQ_rcvd == 0); + + spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags); + n_IRQs_expected = xpc_activate_IRQ_rcvd; + xpc_activate_IRQ_rcvd = 0; + spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags); + + n_IRQs_detected = xpc_identify_activate_IRQ_sender_sn2(); + if (n_IRQs_detected < n_IRQs_expected) { + /* retry once to help avoid missing amo */ + (void)xpc_identify_activate_IRQ_sender_sn2(); + } +} + +/* + * Setup the channel structures that are sn2 specific. + */ +static enum xp_retval +xpc_setup_ch_structures_sn_sn2(struct xpc_partition *part) +{ + struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2; + struct xpc_channel_sn2 *ch_sn2; + enum xp_retval retval; + int ret; + int cpuid; + int ch_number; + struct timer_list *timer; + short partid = XPC_PARTID(part); + + /* allocate all the required GET/PUT values */ + + part_sn2->local_GPs = + xpc_kzalloc_cacheline_aligned(XPC_GP_SIZE, GFP_KERNEL, + &part_sn2->local_GPs_base); + if (part_sn2->local_GPs == NULL) { + dev_err(xpc_chan, "can't get memory for local get/put " + "values\n"); + return xpNoMemory; + } + + part_sn2->remote_GPs = + xpc_kzalloc_cacheline_aligned(XPC_GP_SIZE, GFP_KERNEL, + &part_sn2->remote_GPs_base); + if (part_sn2->remote_GPs == NULL) { + dev_err(xpc_chan, "can't get memory for remote get/put " + "values\n"); + retval = xpNoMemory; + goto out_1; + } + + part_sn2->remote_GPs_pa = 0; + + /* allocate all the required open and close args */ + + part_sn2->local_openclose_args = + xpc_kzalloc_cacheline_aligned(XPC_OPENCLOSE_ARGS_SIZE, + GFP_KERNEL, &part_sn2-> + local_openclose_args_base); + if (part_sn2->local_openclose_args == NULL) { + dev_err(xpc_chan, "can't get memory for local connect args\n"); + retval = xpNoMemory; + goto out_2; + } + + part_sn2->remote_openclose_args_pa = 0; + + part_sn2->local_chctl_amo_va = xpc_init_IRQ_amo_sn2(partid); + + part_sn2->notify_IRQ_nasid = 0; + part_sn2->notify_IRQ_phys_cpuid = 0; + part_sn2->remote_chctl_amo_va = NULL; + + sprintf(part_sn2->notify_IRQ_owner, "xpc%02d", partid); + ret = request_irq(SGI_XPC_NOTIFY, xpc_handle_notify_IRQ_sn2, + IRQF_SHARED, part_sn2->notify_IRQ_owner, + (void *)(u64)partid); + if (ret != 0) { + dev_err(xpc_chan, "can't register NOTIFY IRQ handler, " + "errno=%d\n", -ret); + retval = xpLackOfResources; + goto out_3; + } + + /* Setup a timer to check for dropped notify IRQs */ + timer = &part_sn2->dropped_notify_IRQ_timer; + init_timer(timer); + timer->function = + (void (*)(unsigned long))xpc_check_for_dropped_notify_IRQ_sn2; + timer->data = (unsigned long)part; + timer->expires = jiffies + XPC_DROPPED_NOTIFY_IRQ_WAIT_INTERVAL; + add_timer(timer); + + for (ch_number = 0; ch_number < part->nchannels; ch_number++) { + ch_sn2 = &part->channels[ch_number].sn.sn2; + + ch_sn2->local_GP = &part_sn2->local_GPs[ch_number]; + ch_sn2->local_openclose_args = + &part_sn2->local_openclose_args[ch_number]; + + mutex_init(&ch_sn2->msg_to_pull_mutex); + } + + /* + * Setup the per partition specific variables required by the + * remote partition to establish channel connections with us. + * + * The setting of the magic # indicates that these per partition + * specific variables are ready to be used. + */ + xpc_vars_part_sn2[partid].GPs_pa = xp_pa(part_sn2->local_GPs); + xpc_vars_part_sn2[partid].openclose_args_pa = + xp_pa(part_sn2->local_openclose_args); + xpc_vars_part_sn2[partid].chctl_amo_pa = + xp_pa(part_sn2->local_chctl_amo_va); + cpuid = raw_smp_processor_id(); /* any CPU in this partition will do */ + xpc_vars_part_sn2[partid].notify_IRQ_nasid = cpuid_to_nasid(cpuid); + xpc_vars_part_sn2[partid].notify_IRQ_phys_cpuid = + cpu_physical_id(cpuid); + xpc_vars_part_sn2[partid].nchannels = part->nchannels; + xpc_vars_part_sn2[partid].magic = XPC_VP_MAGIC1_SN2; + + return xpSuccess; + + /* setup of ch structures failed */ +out_3: + kfree(part_sn2->local_openclose_args_base); + part_sn2->local_openclose_args = NULL; +out_2: + kfree(part_sn2->remote_GPs_base); + part_sn2->remote_GPs = NULL; +out_1: + kfree(part_sn2->local_GPs_base); + part_sn2->local_GPs = NULL; + return retval; +} + +/* + * Teardown the channel structures that are sn2 specific. + */ +static void +xpc_teardown_ch_structures_sn_sn2(struct xpc_partition *part) +{ + struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2; + short partid = XPC_PARTID(part); + + /* + * Indicate that the variables specific to the remote partition are no + * longer available for its use. + */ + xpc_vars_part_sn2[partid].magic = 0; + + /* in case we've still got outstanding timers registered... */ + del_timer_sync(&part_sn2->dropped_notify_IRQ_timer); + free_irq(SGI_XPC_NOTIFY, (void *)(u64)partid); + + kfree(part_sn2->local_openclose_args_base); + part_sn2->local_openclose_args = NULL; + kfree(part_sn2->remote_GPs_base); + part_sn2->remote_GPs = NULL; + kfree(part_sn2->local_GPs_base); + part_sn2->local_GPs = NULL; + part_sn2->local_chctl_amo_va = NULL; +} + +/* + * Create a wrapper that hides the underlying mechanism for pulling a cacheline + * (or multiple cachelines) from a remote partition. + * + * src_pa must be a cacheline aligned physical address on the remote partition. + * dst must be a cacheline aligned virtual address on this partition. + * cnt must be cacheline sized + */ +/* ??? Replace this function by call to xp_remote_memcpy() or bte_copy()? */ +static enum xp_retval +xpc_pull_remote_cachelines_sn2(struct xpc_partition *part, void *dst, + const unsigned long src_pa, size_t cnt) +{ + enum xp_retval ret; + + DBUG_ON(src_pa != L1_CACHE_ALIGN(src_pa)); + DBUG_ON((unsigned long)dst != L1_CACHE_ALIGN((unsigned long)dst)); + DBUG_ON(cnt != L1_CACHE_ALIGN(cnt)); + + if (part->act_state == XPC_P_AS_DEACTIVATING) + return part->reason; + + ret = xp_remote_memcpy(xp_pa(dst), src_pa, cnt); + if (ret != xpSuccess) { + dev_dbg(xpc_chan, "xp_remote_memcpy() from partition %d failed," + " ret=%d\n", XPC_PARTID(part), ret); + } + return ret; +} + +/* + * Pull the remote per partition specific variables from the specified + * partition. + */ +static enum xp_retval +xpc_pull_remote_vars_part_sn2(struct xpc_partition *part) +{ + struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2; + u8 buffer[L1_CACHE_BYTES * 2]; + struct xpc_vars_part_sn2 *pulled_entry_cacheline = + (struct xpc_vars_part_sn2 *)L1_CACHE_ALIGN((u64)buffer); + struct xpc_vars_part_sn2 *pulled_entry; + unsigned long remote_entry_cacheline_pa; + unsigned long remote_entry_pa; + short partid = XPC_PARTID(part); + enum xp_retval ret; + + /* pull the cacheline that contains the variables we're interested in */ + + DBUG_ON(part_sn2->remote_vars_part_pa != + L1_CACHE_ALIGN(part_sn2->remote_vars_part_pa)); + DBUG_ON(sizeof(struct xpc_vars_part_sn2) != L1_CACHE_BYTES / 2); + + remote_entry_pa = part_sn2->remote_vars_part_pa + + sn_partition_id * sizeof(struct xpc_vars_part_sn2); + + remote_entry_cacheline_pa = (remote_entry_pa & ~(L1_CACHE_BYTES - 1)); + + pulled_entry = (struct xpc_vars_part_sn2 *)((u64)pulled_entry_cacheline + + (remote_entry_pa & + (L1_CACHE_BYTES - 1))); + + ret = xpc_pull_remote_cachelines_sn2(part, pulled_entry_cacheline, + remote_entry_cacheline_pa, + L1_CACHE_BYTES); + if (ret != xpSuccess) { + dev_dbg(xpc_chan, "failed to pull XPC vars_part from " + "partition %d, ret=%d\n", partid, ret); + return ret; + } + + /* see if they've been set up yet */ + + if (pulled_entry->magic != XPC_VP_MAGIC1_SN2 && + pulled_entry->magic != XPC_VP_MAGIC2_SN2) { + + if (pulled_entry->magic != 0) { + dev_dbg(xpc_chan, "partition %d's XPC vars_part for " + "partition %d has bad magic value (=0x%lx)\n", + partid, sn_partition_id, pulled_entry->magic); + return xpBadMagic; + } + + /* they've not been initialized yet */ + return xpRetry; + } + + if (xpc_vars_part_sn2[partid].magic == XPC_VP_MAGIC1_SN2) { + + /* validate the variables */ + + if (pulled_entry->GPs_pa == 0 || + pulled_entry->openclose_args_pa == 0 || + pulled_entry->chctl_amo_pa == 0) { + + dev_err(xpc_chan, "partition %d's XPC vars_part for " + "partition %d are not valid\n", partid, + sn_partition_id); + return xpInvalidAddress; + } + + /* the variables we imported look to be valid */ + + part_sn2->remote_GPs_pa = pulled_entry->GPs_pa; + part_sn2->remote_openclose_args_pa = + pulled_entry->openclose_args_pa; + part_sn2->remote_chctl_amo_va = + (struct amo *)__va(pulled_entry->chctl_amo_pa); + part_sn2->notify_IRQ_nasid = pulled_entry->notify_IRQ_nasid; + part_sn2->notify_IRQ_phys_cpuid = + pulled_entry->notify_IRQ_phys_cpuid; + + if (part->nchannels > pulled_entry->nchannels) + part->nchannels = pulled_entry->nchannels; + + /* let the other side know that we've pulled their variables */ + + xpc_vars_part_sn2[partid].magic = XPC_VP_MAGIC2_SN2; + } + + if (pulled_entry->magic == XPC_VP_MAGIC1_SN2) + return xpRetry; + + return xpSuccess; +} + +/* + * Establish first contact with the remote partititon. This involves pulling + * the XPC per partition variables from the remote partition and waiting for + * the remote partition to pull ours. + */ +static enum xp_retval +xpc_make_first_contact_sn2(struct xpc_partition *part) +{ + struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2; + enum xp_retval ret; + + /* + * Register the remote partition's amos with SAL so it can handle + * and cleanup errors within that address range should the remote + * partition go down. We don't unregister this range because it is + * difficult to tell when outstanding writes to the remote partition + * are finished and thus when it is safe to unregister. This should + * not result in wasted space in the SAL xp_addr_region table because + * we should get the same page for remote_amos_page_pa after module + * reloads and system reboots. + */ + if (sn_register_xp_addr_region(part_sn2->remote_amos_page_pa, + PAGE_SIZE, 1) < 0) { + dev_warn(xpc_part, "xpc_activating(%d) failed to register " + "xp_addr region\n", XPC_PARTID(part)); + + ret = xpPhysAddrRegFailed; + XPC_DEACTIVATE_PARTITION(part, ret); + return ret; + } + + /* + * Send activate IRQ to get other side to activate if they've not + * already begun to do so. + */ + xpc_send_activate_IRQ_sn2(part_sn2->remote_amos_page_pa, + cnodeid_to_nasid(0), + part_sn2->activate_IRQ_nasid, + part_sn2->activate_IRQ_phys_cpuid); + + while ((ret = xpc_pull_remote_vars_part_sn2(part)) != xpSuccess) { + if (ret != xpRetry) { + XPC_DEACTIVATE_PARTITION(part, ret); + return ret; + } + + dev_dbg(xpc_part, "waiting to make first contact with " + "partition %d\n", XPC_PARTID(part)); + + /* wait a 1/4 of a second or so */ + (void)msleep_interruptible(250); + + if (part->act_state == XPC_P_AS_DEACTIVATING) + return part->reason; + } + + return xpSuccess; +} + +/* + * Get the chctl flags and pull the openclose args and/or remote GPs as needed. + */ +static u64 +xpc_get_chctl_all_flags_sn2(struct xpc_partition *part) +{ + struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2; + unsigned long irq_flags; + union xpc_channel_ctl_flags chctl; + enum xp_retval ret; + + /* + * See if there are any chctl flags to be handled. + */ + + spin_lock_irqsave(&part->chctl_lock, irq_flags); + chctl = part->chctl; + if (chctl.all_flags != 0) + part->chctl.all_flags = 0; + + spin_unlock_irqrestore(&part->chctl_lock, irq_flags); + + if (xpc_any_openclose_chctl_flags_set(&chctl)) { + ret = xpc_pull_remote_cachelines_sn2(part, part-> + remote_openclose_args, + part_sn2-> + remote_openclose_args_pa, + XPC_OPENCLOSE_ARGS_SIZE); + if (ret != xpSuccess) { + XPC_DEACTIVATE_PARTITION(part, ret); + + dev_dbg(xpc_chan, "failed to pull openclose args from " + "partition %d, ret=%d\n", XPC_PARTID(part), + ret); + + /* don't bother processing chctl flags anymore */ + chctl.all_flags = 0; + } + } + + if (xpc_any_msg_chctl_flags_set(&chctl)) { + ret = xpc_pull_remote_cachelines_sn2(part, part_sn2->remote_GPs, + part_sn2->remote_GPs_pa, + XPC_GP_SIZE); + if (ret != xpSuccess) { + XPC_DEACTIVATE_PARTITION(part, ret); + + dev_dbg(xpc_chan, "failed to pull GPs from partition " + "%d, ret=%d\n", XPC_PARTID(part), ret); + + /* don't bother processing chctl flags anymore */ + chctl.all_flags = 0; + } + } + + return chctl.all_flags; +} + +/* + * Allocate the local message queue and the notify queue. + */ +static enum xp_retval +xpc_allocate_local_msgqueue_sn2(struct xpc_channel *ch) +{ + struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2; + unsigned long irq_flags; + int nentries; + size_t nbytes; + + for (nentries = ch->local_nentries; nentries > 0; nentries--) { + + nbytes = nentries * ch->entry_size; + ch_sn2->local_msgqueue = + xpc_kzalloc_cacheline_aligned(nbytes, GFP_KERNEL, + &ch_sn2->local_msgqueue_base); + if (ch_sn2->local_msgqueue == NULL) + continue; + + nbytes = nentries * sizeof(struct xpc_notify_sn2); + ch_sn2->notify_queue = kzalloc(nbytes, GFP_KERNEL); + if (ch_sn2->notify_queue == NULL) { + kfree(ch_sn2->local_msgqueue_base); + ch_sn2->local_msgqueue = NULL; + continue; + } + + spin_lock_irqsave(&ch->lock, irq_flags); + if (nentries < ch->local_nentries) { + dev_dbg(xpc_chan, "nentries=%d local_nentries=%d, " + "partid=%d, channel=%d\n", nentries, + ch->local_nentries, ch->partid, ch->number); + + ch->local_nentries = nentries; + } + spin_unlock_irqrestore(&ch->lock, irq_flags); + return xpSuccess; + } + + dev_dbg(xpc_chan, "can't get memory for local message queue and notify " + "queue, partid=%d, channel=%d\n", ch->partid, ch->number); + return xpNoMemory; +} + +/* + * Allocate the cached remote message queue. + */ +static enum xp_retval +xpc_allocate_remote_msgqueue_sn2(struct xpc_channel *ch) +{ + struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2; + unsigned long irq_flags; + int nentries; + size_t nbytes; + + DBUG_ON(ch->remote_nentries <= 0); + + for (nentries = ch->remote_nentries; nentries > 0; nentries--) { + + nbytes = nentries * ch->entry_size; + ch_sn2->remote_msgqueue = + xpc_kzalloc_cacheline_aligned(nbytes, GFP_KERNEL, &ch_sn2-> + remote_msgqueue_base); + if (ch_sn2->remote_msgqueue == NULL) + continue; + + spin_lock_irqsave(&ch->lock, irq_flags); + if (nentries < ch->remote_nentries) { + dev_dbg(xpc_chan, "nentries=%d remote_nentries=%d, " + "partid=%d, channel=%d\n", nentries, + ch->remote_nentries, ch->partid, ch->number); + + ch->remote_nentries = nentries; + } + spin_unlock_irqrestore(&ch->lock, irq_flags); + return xpSuccess; + } + + dev_dbg(xpc_chan, "can't get memory for cached remote message queue, " + "partid=%d, channel=%d\n", ch->partid, ch->number); + return xpNoMemory; +} + +/* + * Allocate message queues and other stuff associated with a channel. + * + * Note: Assumes all of the channel sizes are filled in. + */ +static enum xp_retval +xpc_setup_msg_structures_sn2(struct xpc_channel *ch) +{ + struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2; + enum xp_retval ret; + + DBUG_ON(ch->flags & XPC_C_SETUP); + + ret = xpc_allocate_local_msgqueue_sn2(ch); + if (ret == xpSuccess) { + + ret = xpc_allocate_remote_msgqueue_sn2(ch); + if (ret != xpSuccess) { + kfree(ch_sn2->local_msgqueue_base); + ch_sn2->local_msgqueue = NULL; + kfree(ch_sn2->notify_queue); + ch_sn2->notify_queue = NULL; + } + } + return ret; +} + +/* + * Free up message queues and other stuff that were allocated for the specified + * channel. + */ +static void +xpc_teardown_msg_structures_sn2(struct xpc_channel *ch) +{ + struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2; + + DBUG_ON(!spin_is_locked(&ch->lock)); + + ch_sn2->remote_msgqueue_pa = 0; + + ch_sn2->local_GP->get = 0; + ch_sn2->local_GP->put = 0; + ch_sn2->remote_GP.get = 0; + ch_sn2->remote_GP.put = 0; + ch_sn2->w_local_GP.get = 0; + ch_sn2->w_local_GP.put = 0; + ch_sn2->w_remote_GP.get = 0; + ch_sn2->w_remote_GP.put = 0; + ch_sn2->next_msg_to_pull = 0; + + if (ch->flags & XPC_C_SETUP) { + dev_dbg(xpc_chan, "ch->flags=0x%x, partid=%d, channel=%d\n", + ch->flags, ch->partid, ch->number); + + kfree(ch_sn2->local_msgqueue_base); + ch_sn2->local_msgqueue = NULL; + kfree(ch_sn2->remote_msgqueue_base); + ch_sn2->remote_msgqueue = NULL; + kfree(ch_sn2->notify_queue); + ch_sn2->notify_queue = NULL; + } +} + +/* + * Notify those who wanted to be notified upon delivery of their message. + */ +static void +xpc_notify_senders_sn2(struct xpc_channel *ch, enum xp_retval reason, s64 put) +{ + struct xpc_notify_sn2 *notify; + u8 notify_type; + s64 get = ch->sn.sn2.w_remote_GP.get - 1; + + while (++get < put && atomic_read(&ch->n_to_notify) > 0) { + + notify = &ch->sn.sn2.notify_queue[get % ch->local_nentries]; + + /* + * See if the notify entry indicates it was associated with + * a message who's sender wants to be notified. It is possible + * that it is, but someone else is doing or has done the + * notification. + */ + notify_type = notify->type; + if (notify_type == 0 || + cmpxchg(¬ify->type, notify_type, 0) != notify_type) { + continue; + } + + DBUG_ON(notify_type != XPC_N_CALL); + + atomic_dec(&ch->n_to_notify); + + if (notify->func != NULL) { + dev_dbg(xpc_chan, "notify->func() called, notify=0x%p " + "msg_number=%ld partid=%d channel=%d\n", + (void *)notify, get, ch->partid, ch->number); + + notify->func(reason, ch->partid, ch->number, + notify->key); + + dev_dbg(xpc_chan, "notify->func() returned, notify=0x%p" + " msg_number=%ld partid=%d channel=%d\n", + (void *)notify, get, ch->partid, ch->number); + } + } +} + +static void +xpc_notify_senders_of_disconnect_sn2(struct xpc_channel *ch) +{ + xpc_notify_senders_sn2(ch, ch->reason, ch->sn.sn2.w_local_GP.put); +} + +/* + * Clear some of the msg flags in the local message queue. + */ +static inline void +xpc_clear_local_msgqueue_flags_sn2(struct xpc_channel *ch) +{ + struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2; + struct xpc_msg_sn2 *msg; + s64 get; + + get = ch_sn2->w_remote_GP.get; + do { + msg = (struct xpc_msg_sn2 *)((u64)ch_sn2->local_msgqueue + + (get % ch->local_nentries) * + ch->entry_size); + msg->flags = 0; + } while (++get < ch_sn2->remote_GP.get); +} + +/* + * Clear some of the msg flags in the remote message queue. + */ +static inline void +xpc_clear_remote_msgqueue_flags_sn2(struct xpc_channel *ch) +{ + struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2; + struct xpc_msg_sn2 *msg; + s64 put; + + put = ch_sn2->w_remote_GP.put; + do { + msg = (struct xpc_msg_sn2 *)((u64)ch_sn2->remote_msgqueue + + (put % ch->remote_nentries) * + ch->entry_size); + msg->flags = 0; + } while (++put < ch_sn2->remote_GP.put); +} + +static int +xpc_n_of_deliverable_payloads_sn2(struct xpc_channel *ch) +{ + return ch->sn.sn2.w_remote_GP.put - ch->sn.sn2.w_local_GP.get; +} + +static void +xpc_process_msg_chctl_flags_sn2(struct xpc_partition *part, int ch_number) +{ + struct xpc_channel *ch = &part->channels[ch_number]; + struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2; + int npayloads_sent; + + ch_sn2->remote_GP = part->sn.sn2.remote_GPs[ch_number]; + + /* See what, if anything, has changed for each connected channel */ + + xpc_msgqueue_ref(ch); + + if (ch_sn2->w_remote_GP.get == ch_sn2->remote_GP.get && + ch_sn2->w_remote_GP.put == ch_sn2->remote_GP.put) { + /* nothing changed since GPs were last pulled */ + xpc_msgqueue_deref(ch); + return; + } + + if (!(ch->flags & XPC_C_CONNECTED)) { + xpc_msgqueue_deref(ch); + return; + } + + /* + * First check to see if messages recently sent by us have been + * received by the other side. (The remote GET value will have + * changed since we last looked at it.) + */ + + if (ch_sn2->w_remote_GP.get != ch_sn2->remote_GP.get) { + + /* + * We need to notify any senders that want to be notified + * that their sent messages have been received by their + * intended recipients. We need to do this before updating + * w_remote_GP.get so that we don't allocate the same message + * queue entries prematurely (see xpc_allocate_msg()). + */ + if (atomic_read(&ch->n_to_notify) > 0) { + /* + * Notify senders that messages sent have been + * received and delivered by the other side. + */ + xpc_notify_senders_sn2(ch, xpMsgDelivered, + ch_sn2->remote_GP.get); + } + + /* + * Clear msg->flags in previously sent messages, so that + * they're ready for xpc_allocate_msg(). + */ + xpc_clear_local_msgqueue_flags_sn2(ch); + + ch_sn2->w_remote_GP.get = ch_sn2->remote_GP.get; + + dev_dbg(xpc_chan, "w_remote_GP.get changed to %ld, partid=%d, " + "channel=%d\n", ch_sn2->w_remote_GP.get, ch->partid, + ch->number); + + /* + * If anyone was waiting for message queue entries to become + * available, wake them up. + */ + if (atomic_read(&ch->n_on_msg_allocate_wq) > 0) + wake_up(&ch->msg_allocate_wq); + } + + /* + * Now check for newly sent messages by the other side. (The remote + * PUT value will have changed since we last looked at it.) + */ + + if (ch_sn2->w_remote_GP.put != ch_sn2->remote_GP.put) { + /* + * Clear msg->flags in previously received messages, so that + * they're ready for xpc_get_deliverable_payload_sn2(). + */ + xpc_clear_remote_msgqueue_flags_sn2(ch); + + smp_wmb(); /* ensure flags have been cleared before bte_copy */ + ch_sn2->w_remote_GP.put = ch_sn2->remote_GP.put; + + dev_dbg(xpc_chan, "w_remote_GP.put changed to %ld, partid=%d, " + "channel=%d\n", ch_sn2->w_remote_GP.put, ch->partid, + ch->number); + + npayloads_sent = xpc_n_of_deliverable_payloads_sn2(ch); + if (npayloads_sent > 0) { + dev_dbg(xpc_chan, "msgs waiting to be copied and " + "delivered=%d, partid=%d, channel=%d\n", + npayloads_sent, ch->partid, ch->number); + + if (ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) + xpc_activate_kthreads(ch, npayloads_sent); + } + } + + xpc_msgqueue_deref(ch); +} + +static struct xpc_msg_sn2 * +xpc_pull_remote_msg_sn2(struct xpc_channel *ch, s64 get) +{ + struct xpc_partition *part = &xpc_partitions[ch->partid]; + struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2; + unsigned long remote_msg_pa; + struct xpc_msg_sn2 *msg; + u32 msg_index; + u32 nmsgs; + u64 msg_offset; + enum xp_retval ret; + + if (mutex_lock_interruptible(&ch_sn2->msg_to_pull_mutex) != 0) { + /* we were interrupted by a signal */ + return NULL; + } + + while (get >= ch_sn2->next_msg_to_pull) { + + /* pull as many messages as are ready and able to be pulled */ + + msg_index = ch_sn2->next_msg_to_pull % ch->remote_nentries; + + DBUG_ON(ch_sn2->next_msg_to_pull >= ch_sn2->w_remote_GP.put); + nmsgs = ch_sn2->w_remote_GP.put - ch_sn2->next_msg_to_pull; + if (msg_index + nmsgs > ch->remote_nentries) { + /* ignore the ones that wrap the msg queue for now */ + nmsgs = ch->remote_nentries - msg_index; + } + + msg_offset = msg_index * ch->entry_size; + msg = (struct xpc_msg_sn2 *)((u64)ch_sn2->remote_msgqueue + + msg_offset); + remote_msg_pa = ch_sn2->remote_msgqueue_pa + msg_offset; + + ret = xpc_pull_remote_cachelines_sn2(part, msg, remote_msg_pa, + nmsgs * ch->entry_size); + if (ret != xpSuccess) { + + dev_dbg(xpc_chan, "failed to pull %d msgs starting with" + " msg %ld from partition %d, channel=%d, " + "ret=%d\n", nmsgs, ch_sn2->next_msg_to_pull, + ch->partid, ch->number, ret); + + XPC_DEACTIVATE_PARTITION(part, ret); + + mutex_unlock(&ch_sn2->msg_to_pull_mutex); + return NULL; + } + + ch_sn2->next_msg_to_pull += nmsgs; + } + + mutex_unlock(&ch_sn2->msg_to_pull_mutex); + + /* return the message we were looking for */ + msg_offset = (get % ch->remote_nentries) * ch->entry_size; + msg = (struct xpc_msg_sn2 *)((u64)ch_sn2->remote_msgqueue + msg_offset); + + return msg; +} + +/* + * Get the next deliverable message's payload. + */ +static void * +xpc_get_deliverable_payload_sn2(struct xpc_channel *ch) +{ + struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2; + struct xpc_msg_sn2 *msg; + void *payload = NULL; + s64 get; + + do { + if (ch->flags & XPC_C_DISCONNECTING) + break; + + get = ch_sn2->w_local_GP.get; + smp_rmb(); /* guarantee that .get loads before .put */ + if (get == ch_sn2->w_remote_GP.put) + break; + + /* There are messages waiting to be pulled and delivered. + * We need to try to secure one for ourselves. We'll do this + * by trying to increment w_local_GP.get and hope that no one + * else beats us to it. If they do, we'll we'll simply have + * to try again for the next one. + */ + + if (cmpxchg(&ch_sn2->w_local_GP.get, get, get + 1) == get) { + /* we got the entry referenced by get */ + + dev_dbg(xpc_chan, "w_local_GP.get changed to %ld, " + "partid=%d, channel=%d\n", get + 1, + ch->partid, ch->number); + + /* pull the message from the remote partition */ + + msg = xpc_pull_remote_msg_sn2(ch, get); + + if (msg != NULL) { + DBUG_ON(msg->number != get); + DBUG_ON(msg->flags & XPC_M_SN2_DONE); + DBUG_ON(!(msg->flags & XPC_M_SN2_READY)); + + payload = &msg->payload; + } + break; + } + + } while (1); + + return payload; +} + +/* + * Now we actually send the messages that are ready to be sent by advancing + * the local message queue's Put value and then send a chctl msgrequest to the + * recipient partition. + */ +static void +xpc_send_msgs_sn2(struct xpc_channel *ch, s64 initial_put) +{ + struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2; + struct xpc_msg_sn2 *msg; + s64 put = initial_put + 1; + int send_msgrequest = 0; + + while (1) { + + while (1) { + if (put == ch_sn2->w_local_GP.put) + break; + + msg = (struct xpc_msg_sn2 *)((u64)ch_sn2-> + local_msgqueue + (put % + ch->local_nentries) * + ch->entry_size); + + if (!(msg->flags & XPC_M_SN2_READY)) + break; + + put++; + } + + if (put == initial_put) { + /* nothing's changed */ + break; + } + + if (cmpxchg_rel(&ch_sn2->local_GP->put, initial_put, put) != + initial_put) { + /* someone else beat us to it */ + DBUG_ON(ch_sn2->local_GP->put < initial_put); + break; + } + + /* we just set the new value of local_GP->put */ + + dev_dbg(xpc_chan, "local_GP->put changed to %ld, partid=%d, " + "channel=%d\n", put, ch->partid, ch->number); + + send_msgrequest = 1; + + /* + * We need to ensure that the message referenced by + * local_GP->put is not XPC_M_SN2_READY or that local_GP->put + * equals w_local_GP.put, so we'll go have a look. + */ + initial_put = put; + } + + if (send_msgrequest) + xpc_send_chctl_msgrequest_sn2(ch); +} + +/* + * Allocate an entry for a message from the message queue associated with the + * specified channel. + */ +static enum xp_retval +xpc_allocate_msg_sn2(struct xpc_channel *ch, u32 flags, + struct xpc_msg_sn2 **address_of_msg) +{ + struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2; + struct xpc_msg_sn2 *msg; + enum xp_retval ret; + s64 put; + + /* + * Get the next available message entry from the local message queue. + * If none are available, we'll make sure that we grab the latest + * GP values. + */ + ret = xpTimeout; + + while (1) { + + put = ch_sn2->w_local_GP.put; + smp_rmb(); /* guarantee that .put loads before .get */ + if (put - ch_sn2->w_remote_GP.get < ch->local_nentries) { + + /* There are available message entries. We need to try + * to secure one for ourselves. We'll do this by trying + * to increment w_local_GP.put as long as someone else + * doesn't beat us to it. If they do, we'll have to + * try again. + */ + if (cmpxchg(&ch_sn2->w_local_GP.put, put, put + 1) == + put) { + /* we got the entry referenced by put */ + break; + } + continue; /* try again */ + } + + /* + * There aren't any available msg entries at this time. + * + * In waiting for a message entry to become available, + * we set a timeout in case the other side is not sending + * completion interrupts. This lets us fake a notify IRQ + * that will cause the notify IRQ handler to fetch the latest + * GP values as if an interrupt was sent by the other side. + */ + if (ret == xpTimeout) + xpc_send_chctl_local_msgrequest_sn2(ch); + + if (flags & XPC_NOWAIT) + return xpNoWait; + + ret = xpc_allocate_msg_wait(ch); + if (ret != xpInterrupted && ret != xpTimeout) + return ret; + } + + /* get the message's address and initialize it */ + msg = (struct xpc_msg_sn2 *)((u64)ch_sn2->local_msgqueue + + (put % ch->local_nentries) * + ch->entry_size); + + DBUG_ON(msg->flags != 0); + msg->number = put; + + dev_dbg(xpc_chan, "w_local_GP.put changed to %ld; msg=0x%p, " + "msg_number=%ld, partid=%d, channel=%d\n", put + 1, + (void *)msg, msg->number, ch->partid, ch->number); + + *address_of_msg = msg; + return xpSuccess; +} + +/* + * Common code that does the actual sending of the message by advancing the + * local message queue's Put value and sends a chctl msgrequest to the + * partition the message is being sent to. + */ +static enum xp_retval +xpc_send_payload_sn2(struct xpc_channel *ch, u32 flags, void *payload, + u16 payload_size, u8 notify_type, xpc_notify_func func, + void *key) +{ + enum xp_retval ret = xpSuccess; + struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2; + struct xpc_msg_sn2 *msg = msg; + struct xpc_notify_sn2 *notify = notify; + s64 msg_number; + s64 put; + + DBUG_ON(notify_type == XPC_N_CALL && func == NULL); + + if (XPC_MSG_SIZE(payload_size) > ch->entry_size) + return xpPayloadTooBig; + + xpc_msgqueue_ref(ch); + + if (ch->flags & XPC_C_DISCONNECTING) { + ret = ch->reason; + goto out_1; + } + if (!(ch->flags & XPC_C_CONNECTED)) { + ret = xpNotConnected; + goto out_1; + } + + ret = xpc_allocate_msg_sn2(ch, flags, &msg); + if (ret != xpSuccess) + goto out_1; + + msg_number = msg->number; + + if (notify_type != 0) { + /* + * Tell the remote side to send an ACK interrupt when the + * message has been delivered. + */ + msg->flags |= XPC_M_SN2_INTERRUPT; + + atomic_inc(&ch->n_to_notify); + + notify = &ch_sn2->notify_queue[msg_number % ch->local_nentries]; + notify->func = func; + notify->key = key; + notify->type = notify_type; + + /* ??? Is a mb() needed here? */ + + if (ch->flags & XPC_C_DISCONNECTING) { + /* + * An error occurred between our last error check and + * this one. We will try to clear the type field from + * the notify entry. If we succeed then + * xpc_disconnect_channel() didn't already process + * the notify entry. + */ + if (cmpxchg(¬ify->type, notify_type, 0) == + notify_type) { + atomic_dec(&ch->n_to_notify); + ret = ch->reason; + } + goto out_1; + } + } + + memcpy(&msg->payload, payload, payload_size); + + msg->flags |= XPC_M_SN2_READY; + + /* + * The preceding store of msg->flags must occur before the following + * load of local_GP->put. + */ + smp_mb(); + + /* see if the message is next in line to be sent, if so send it */ + + put = ch_sn2->local_GP->put; + if (put == msg_number) + xpc_send_msgs_sn2(ch, put); + +out_1: + xpc_msgqueue_deref(ch); + return ret; +} + +/* + * Now we actually acknowledge the messages that have been delivered and ack'd + * by advancing the cached remote message queue's Get value and if requested + * send a chctl msgrequest to the message sender's partition. + * + * If a message has XPC_M_SN2_INTERRUPT set, send an interrupt to the partition + * that sent the message. + */ +static void +xpc_acknowledge_msgs_sn2(struct xpc_channel *ch, s64 initial_get, u8 msg_flags) +{ + struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2; + struct xpc_msg_sn2 *msg; + s64 get = initial_get + 1; + int send_msgrequest = 0; + + while (1) { + + while (1) { + if (get == ch_sn2->w_local_GP.get) + break; + + msg = (struct xpc_msg_sn2 *)((u64)ch_sn2-> + remote_msgqueue + (get % + ch->remote_nentries) * + ch->entry_size); + + if (!(msg->flags & XPC_M_SN2_DONE)) + break; + + msg_flags |= msg->flags; + get++; + } + + if (get == initial_get) { + /* nothing's changed */ + break; + } + + if (cmpxchg_rel(&ch_sn2->local_GP->get, initial_get, get) != + initial_get) { + /* someone else beat us to it */ + DBUG_ON(ch_sn2->local_GP->get <= initial_get); + break; + } + + /* we just set the new value of local_GP->get */ + + dev_dbg(xpc_chan, "local_GP->get changed to %ld, partid=%d, " + "channel=%d\n", get, ch->partid, ch->number); + + send_msgrequest = (msg_flags & XPC_M_SN2_INTERRUPT); + + /* + * We need to ensure that the message referenced by + * local_GP->get is not XPC_M_SN2_DONE or that local_GP->get + * equals w_local_GP.get, so we'll go have a look. + */ + initial_get = get; + } + + if (send_msgrequest) + xpc_send_chctl_msgrequest_sn2(ch); +} + +static void +xpc_received_payload_sn2(struct xpc_channel *ch, void *payload) +{ + struct xpc_msg_sn2 *msg; + s64 msg_number; + s64 get; + + msg = container_of(payload, struct xpc_msg_sn2, payload); + msg_number = msg->number; + + dev_dbg(xpc_chan, "msg=0x%p, msg_number=%ld, partid=%d, channel=%d\n", + (void *)msg, msg_number, ch->partid, ch->number); + + DBUG_ON((((u64)msg - (u64)ch->remote_msgqueue) / ch->entry_size) != + msg_number % ch->remote_nentries); + DBUG_ON(msg->flags & XPC_M_SN2_DONE); + + msg->flags |= XPC_M_SN2_DONE; + + /* + * The preceding store of msg->flags must occur before the following + * load of local_GP->get. + */ + smp_mb(); + + /* + * See if this message is next in line to be acknowledged as having + * been delivered. + */ + get = ch->sn.sn2.local_GP->get; + if (get == msg_number) + xpc_acknowledge_msgs_sn2(ch, get, msg->flags); +} + +int +xpc_init_sn2(void) +{ + int ret; + size_t buf_size; + + xpc_setup_partitions_sn = xpc_setup_partitions_sn_sn2; + xpc_get_partition_rsvd_page_pa = xpc_get_partition_rsvd_page_pa_sn2; + xpc_setup_rsvd_page_sn = xpc_setup_rsvd_page_sn_sn2; + xpc_increment_heartbeat = xpc_increment_heartbeat_sn2; + xpc_offline_heartbeat = xpc_offline_heartbeat_sn2; + xpc_online_heartbeat = xpc_online_heartbeat_sn2; + xpc_heartbeat_init = xpc_heartbeat_init_sn2; + xpc_heartbeat_exit = xpc_heartbeat_exit_sn2; + xpc_get_remote_heartbeat = xpc_get_remote_heartbeat_sn2; + + xpc_request_partition_activation = xpc_request_partition_activation_sn2; + xpc_request_partition_reactivation = + xpc_request_partition_reactivation_sn2; + xpc_request_partition_deactivation = + xpc_request_partition_deactivation_sn2; + xpc_cancel_partition_deactivation_request = + xpc_cancel_partition_deactivation_request_sn2; + + xpc_process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_sn2; + xpc_setup_ch_structures_sn = xpc_setup_ch_structures_sn_sn2; + xpc_teardown_ch_structures_sn = xpc_teardown_ch_structures_sn_sn2; + xpc_make_first_contact = xpc_make_first_contact_sn2; + + xpc_get_chctl_all_flags = xpc_get_chctl_all_flags_sn2; + xpc_send_chctl_closerequest = xpc_send_chctl_closerequest_sn2; + xpc_send_chctl_closereply = xpc_send_chctl_closereply_sn2; + xpc_send_chctl_openrequest = xpc_send_chctl_openrequest_sn2; + xpc_send_chctl_openreply = xpc_send_chctl_openreply_sn2; + + xpc_save_remote_msgqueue_pa = xpc_save_remote_msgqueue_pa_sn2; + + xpc_setup_msg_structures = xpc_setup_msg_structures_sn2; + xpc_teardown_msg_structures = xpc_teardown_msg_structures_sn2; + + xpc_notify_senders_of_disconnect = xpc_notify_senders_of_disconnect_sn2; + xpc_process_msg_chctl_flags = xpc_process_msg_chctl_flags_sn2; + xpc_n_of_deliverable_payloads = xpc_n_of_deliverable_payloads_sn2; + xpc_get_deliverable_payload = xpc_get_deliverable_payload_sn2; + + xpc_indicate_partition_engaged = xpc_indicate_partition_engaged_sn2; + xpc_indicate_partition_disengaged = + xpc_indicate_partition_disengaged_sn2; + xpc_partition_engaged = xpc_partition_engaged_sn2; + xpc_any_partition_engaged = xpc_any_partition_engaged_sn2; + xpc_assume_partition_disengaged = xpc_assume_partition_disengaged_sn2; + + xpc_send_payload = xpc_send_payload_sn2; + xpc_received_payload = xpc_received_payload_sn2; + + if (offsetof(struct xpc_msg_sn2, payload) > XPC_MSG_HDR_MAX_SIZE) { + dev_err(xpc_part, "header portion of struct xpc_msg_sn2 is " + "larger than %d\n", XPC_MSG_HDR_MAX_SIZE); + return -E2BIG; + } + + buf_size = max(XPC_RP_VARS_SIZE, + XPC_RP_HEADER_SIZE + XP_NASID_MASK_BYTES_SN2); + xpc_remote_copy_buffer_sn2 = xpc_kmalloc_cacheline_aligned(buf_size, + GFP_KERNEL, + &xpc_remote_copy_buffer_base_sn2); + if (xpc_remote_copy_buffer_sn2 == NULL) { + dev_err(xpc_part, "can't get memory for remote copy buffer\n"); + return -ENOMEM; + } + + /* open up protections for IPI and [potentially] amo operations */ + xpc_allow_IPI_ops_sn2(); + xpc_allow_amo_ops_shub_wars_1_1_sn2(); + + /* + * This is safe to do before the xpc_hb_checker thread has started + * because the handler releases a wait queue. If an interrupt is + * received before the thread is waiting, it will not go to sleep, + * but rather immediately process the interrupt. + */ + ret = request_irq(SGI_XPC_ACTIVATE, xpc_handle_activate_IRQ_sn2, 0, + "xpc hb", NULL); + if (ret != 0) { + dev_err(xpc_part, "can't register ACTIVATE IRQ handler, " + "errno=%d\n", -ret); + xpc_disallow_IPI_ops_sn2(); + kfree(xpc_remote_copy_buffer_base_sn2); + } + return ret; +} + +void +xpc_exit_sn2(void) +{ + free_irq(SGI_XPC_ACTIVATE, NULL); + xpc_disallow_IPI_ops_sn2(); + kfree(xpc_remote_copy_buffer_base_sn2); +} diff --git a/drivers/misc/sgi-xp/xpc_uv.c b/drivers/misc/sgi-xp/xpc_uv.c new file mode 100644 index 0000000..1bb8bc5 --- /dev/null +++ b/drivers/misc/sgi-xp/xpc_uv.c @@ -0,0 +1,1442 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (c) 2008-2009 Silicon Graphics, Inc. All Rights Reserved. + */ + +/* + * Cross Partition Communication (XPC) uv-based functions. + * + * Architecture specific implementation of common functions. + * + */ + +#include <linux/kernel.h> +#include <linux/mm.h> +#include <linux/interrupt.h> +#include <linux/delay.h> +#include <linux/device.h> +#include <asm/uv/uv_hub.h> +#include "../sgi-gru/gru.h" +#include "../sgi-gru/grukservices.h" +#include "xpc.h" + +static atomic64_t xpc_heartbeat_uv; +static DECLARE_BITMAP(xpc_heartbeating_to_mask_uv, XP_MAX_NPARTITIONS_UV); + +#define XPC_ACTIVATE_MSG_SIZE_UV (1 * GRU_CACHE_LINE_BYTES) +#define XPC_NOTIFY_MSG_SIZE_UV (2 * GRU_CACHE_LINE_BYTES) + +#define XPC_ACTIVATE_MQ_SIZE_UV (4 * XP_MAX_NPARTITIONS_UV * \ + XPC_ACTIVATE_MSG_SIZE_UV) +#define XPC_NOTIFY_MQ_SIZE_UV (4 * XP_MAX_NPARTITIONS_UV * \ + XPC_NOTIFY_MSG_SIZE_UV) + +static void *xpc_activate_mq_uv; +static void *xpc_notify_mq_uv; + +static int +xpc_setup_partitions_sn_uv(void) +{ + short partid; + struct xpc_partition_uv *part_uv; + + for (partid = 0; partid < XP_MAX_NPARTITIONS_UV; partid++) { + part_uv = &xpc_partitions[partid].sn.uv; + + spin_lock_init(&part_uv->flags_lock); + part_uv->remote_act_state = XPC_P_AS_INACTIVE; + } + return 0; +} + +static void * +xpc_create_gru_mq_uv(unsigned int mq_size, int cpuid, unsigned int irq, + irq_handler_t irq_handler) +{ + int ret; + int nid; + int mq_order; + struct page *page; + void *mq; + + nid = cpu_to_node(cpuid); + mq_order = get_order(mq_size); + page = alloc_pages_node(nid, GFP_KERNEL | __GFP_ZERO | GFP_THISNODE, + mq_order); + if (page == NULL) { + dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to alloc %d " + "bytes of memory on nid=%d for GRU mq\n", mq_size, nid); + return NULL; + } + + mq = page_address(page); + ret = gru_create_message_queue(mq, mq_size); + if (ret != 0) { + dev_err(xpc_part, "gru_create_message_queue() returned " + "error=%d\n", ret); + free_pages((unsigned long)mq, mq_order); + return NULL; + } + + /* !!! Need to do some other things to set up IRQ */ + + ret = request_irq(irq, irq_handler, 0, "xpc", NULL); + if (ret != 0) { + dev_err(xpc_part, "request_irq(irq=%d) returned error=%d\n", + irq, ret); + free_pages((unsigned long)mq, mq_order); + return NULL; + } + + /* !!! enable generation of irq when GRU mq op occurs to this mq */ + + /* ??? allow other partitions to access GRU mq? */ + + return mq; +} + +static void +xpc_destroy_gru_mq_uv(void *mq, unsigned int mq_size, unsigned int irq) +{ + /* ??? disallow other partitions to access GRU mq? */ + + /* !!! disable generation of irq when GRU mq op occurs to this mq */ + + free_irq(irq, NULL); + + free_pages((unsigned long)mq, get_order(mq_size)); +} + +static enum xp_retval +xpc_send_gru_msg(unsigned long mq_gpa, void *msg, size_t msg_size) +{ + enum xp_retval xp_ret; + int ret; + + while (1) { + ret = gru_send_message_gpa(mq_gpa, msg, msg_size); + if (ret == MQE_OK) { + xp_ret = xpSuccess; + break; + } + + if (ret == MQE_QUEUE_FULL) { + dev_dbg(xpc_chan, "gru_send_message_gpa() returned " + "error=MQE_QUEUE_FULL\n"); + /* !!! handle QLimit reached; delay & try again */ + /* ??? Do we add a limit to the number of retries? */ + (void)msleep_interruptible(10); + } else if (ret == MQE_CONGESTION) { + dev_dbg(xpc_chan, "gru_send_message_gpa() returned " + "error=MQE_CONGESTION\n"); + /* !!! handle LB Overflow; simply try again */ + /* ??? Do we add a limit to the number of retries? */ + } else { + /* !!! Currently this is MQE_UNEXPECTED_CB_ERR */ + dev_err(xpc_chan, "gru_send_message_gpa() returned " + "error=%d\n", ret); + xp_ret = xpGruSendMqError; + break; + } + } + return xp_ret; +} + +static void +xpc_process_activate_IRQ_rcvd_uv(void) +{ + unsigned long irq_flags; + short partid; + struct xpc_partition *part; + u8 act_state_req; + + DBUG_ON(xpc_activate_IRQ_rcvd == 0); + + spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags); + for (partid = 0; partid < XP_MAX_NPARTITIONS_UV; partid++) { + part = &xpc_partitions[partid]; + + if (part->sn.uv.act_state_req == 0) + continue; + + xpc_activate_IRQ_rcvd--; + BUG_ON(xpc_activate_IRQ_rcvd < 0); + + act_state_req = part->sn.uv.act_state_req; + part->sn.uv.act_state_req = 0; + spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags); + + if (act_state_req == XPC_P_ASR_ACTIVATE_UV) { + if (part->act_state == XPC_P_AS_INACTIVE) + xpc_activate_partition(part); + else if (part->act_state == XPC_P_AS_DEACTIVATING) + XPC_DEACTIVATE_PARTITION(part, xpReactivating); + + } else if (act_state_req == XPC_P_ASR_REACTIVATE_UV) { + if (part->act_state == XPC_P_AS_INACTIVE) + xpc_activate_partition(part); + else + XPC_DEACTIVATE_PARTITION(part, xpReactivating); + + } else if (act_state_req == XPC_P_ASR_DEACTIVATE_UV) { + XPC_DEACTIVATE_PARTITION(part, part->sn.uv.reason); + + } else { + BUG(); + } + + spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags); + if (xpc_activate_IRQ_rcvd == 0) + break; + } + spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags); + +} + +static void +xpc_handle_activate_mq_msg_uv(struct xpc_partition *part, + struct xpc_activate_mq_msghdr_uv *msg_hdr, + int *wakeup_hb_checker) +{ + unsigned long irq_flags; + struct xpc_partition_uv *part_uv = &part->sn.uv; + struct xpc_openclose_args *args; + + part_uv->remote_act_state = msg_hdr->act_state; + + switch (msg_hdr->type) { + case XPC_ACTIVATE_MQ_MSG_SYNC_ACT_STATE_UV: + /* syncing of remote_act_state was just done above */ + break; + + case XPC_ACTIVATE_MQ_MSG_INC_HEARTBEAT_UV: { + struct xpc_activate_mq_msg_heartbeat_req_uv *msg; + + msg = container_of(msg_hdr, + struct xpc_activate_mq_msg_heartbeat_req_uv, + hdr); + part_uv->heartbeat = msg->heartbeat; + break; + } + case XPC_ACTIVATE_MQ_MSG_OFFLINE_HEARTBEAT_UV: { + struct xpc_activate_mq_msg_heartbeat_req_uv *msg; + + msg = container_of(msg_hdr, + struct xpc_activate_mq_msg_heartbeat_req_uv, + hdr); + part_uv->heartbeat = msg->heartbeat; + + spin_lock_irqsave(&part_uv->flags_lock, irq_flags); + part_uv->flags |= XPC_P_HEARTBEAT_OFFLINE_UV; + spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags); + break; + } + case XPC_ACTIVATE_MQ_MSG_ONLINE_HEARTBEAT_UV: { + struct xpc_activate_mq_msg_heartbeat_req_uv *msg; + + msg = container_of(msg_hdr, + struct xpc_activate_mq_msg_heartbeat_req_uv, + hdr); + part_uv->heartbeat = msg->heartbeat; + + spin_lock_irqsave(&part_uv->flags_lock, irq_flags); + part_uv->flags &= ~XPC_P_HEARTBEAT_OFFLINE_UV; + spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags); + break; + } + case XPC_ACTIVATE_MQ_MSG_ACTIVATE_REQ_UV: { + struct xpc_activate_mq_msg_activate_req_uv *msg; + + /* + * ??? Do we deal here with ts_jiffies being different + * ??? if act_state != XPC_P_AS_INACTIVE instead of + * ??? below? + */ + msg = container_of(msg_hdr, struct + xpc_activate_mq_msg_activate_req_uv, hdr); + + spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags); + if (part_uv->act_state_req == 0) + xpc_activate_IRQ_rcvd++; + part_uv->act_state_req = XPC_P_ASR_ACTIVATE_UV; + part->remote_rp_pa = msg->rp_gpa; /* !!! _pa is _gpa */ + part->remote_rp_ts_jiffies = msg_hdr->rp_ts_jiffies; + part_uv->remote_activate_mq_gpa = msg->activate_mq_gpa; + spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags); + + (*wakeup_hb_checker)++; + break; + } + case XPC_ACTIVATE_MQ_MSG_DEACTIVATE_REQ_UV: { + struct xpc_activate_mq_msg_deactivate_req_uv *msg; + + msg = container_of(msg_hdr, struct + xpc_activate_mq_msg_deactivate_req_uv, hdr); + + spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags); + if (part_uv->act_state_req == 0) + xpc_activate_IRQ_rcvd++; + part_uv->act_state_req = XPC_P_ASR_DEACTIVATE_UV; + part_uv->reason = msg->reason; + spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags); + + (*wakeup_hb_checker)++; + return; + } + case XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREQUEST_UV: { + struct xpc_activate_mq_msg_chctl_closerequest_uv *msg; + + msg = container_of(msg_hdr, struct + xpc_activate_mq_msg_chctl_closerequest_uv, + hdr); + args = &part->remote_openclose_args[msg->ch_number]; + args->reason = msg->reason; + + spin_lock_irqsave(&part->chctl_lock, irq_flags); + part->chctl.flags[msg->ch_number] |= XPC_CHCTL_CLOSEREQUEST; + spin_unlock_irqrestore(&part->chctl_lock, irq_flags); + + xpc_wakeup_channel_mgr(part); + break; + } + case XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREPLY_UV: { + struct xpc_activate_mq_msg_chctl_closereply_uv *msg; + + msg = container_of(msg_hdr, struct + xpc_activate_mq_msg_chctl_closereply_uv, + hdr); + + spin_lock_irqsave(&part->chctl_lock, irq_flags); + part->chctl.flags[msg->ch_number] |= XPC_CHCTL_CLOSEREPLY; + spin_unlock_irqrestore(&part->chctl_lock, irq_flags); + + xpc_wakeup_channel_mgr(part); + break; + } + case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREQUEST_UV: { + struct xpc_activate_mq_msg_chctl_openrequest_uv *msg; + + msg = container_of(msg_hdr, struct + xpc_activate_mq_msg_chctl_openrequest_uv, + hdr); + args = &part->remote_openclose_args[msg->ch_number]; + args->entry_size = msg->entry_size; + args->local_nentries = msg->local_nentries; + + spin_lock_irqsave(&part->chctl_lock, irq_flags); + part->chctl.flags[msg->ch_number] |= XPC_CHCTL_OPENREQUEST; + spin_unlock_irqrestore(&part->chctl_lock, irq_flags); + + xpc_wakeup_channel_mgr(part); + break; + } + case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREPLY_UV: { + struct xpc_activate_mq_msg_chctl_openreply_uv *msg; + + msg = container_of(msg_hdr, struct + xpc_activate_mq_msg_chctl_openreply_uv, hdr); + args = &part->remote_openclose_args[msg->ch_number]; + args->remote_nentries = msg->remote_nentries; + args->local_nentries = msg->local_nentries; + args->local_msgqueue_pa = msg->local_notify_mq_gpa; + + spin_lock_irqsave(&part->chctl_lock, irq_flags); + part->chctl.flags[msg->ch_number] |= XPC_CHCTL_OPENREPLY; + spin_unlock_irqrestore(&part->chctl_lock, irq_flags); + + xpc_wakeup_channel_mgr(part); + break; + } + case XPC_ACTIVATE_MQ_MSG_MARK_ENGAGED_UV: + spin_lock_irqsave(&part_uv->flags_lock, irq_flags); + part_uv->flags |= XPC_P_ENGAGED_UV; + spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags); + break; + + case XPC_ACTIVATE_MQ_MSG_MARK_DISENGAGED_UV: + spin_lock_irqsave(&part_uv->flags_lock, irq_flags); + part_uv->flags &= ~XPC_P_ENGAGED_UV; + spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags); + break; + + default: + dev_err(xpc_part, "received unknown activate_mq msg type=%d " + "from partition=%d\n", msg_hdr->type, XPC_PARTID(part)); + + /* get hb checker to deactivate from the remote partition */ + spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags); + if (part_uv->act_state_req == 0) + xpc_activate_IRQ_rcvd++; + part_uv->act_state_req = XPC_P_ASR_DEACTIVATE_UV; + part_uv->reason = xpBadMsgType; + spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags); + + (*wakeup_hb_checker)++; + return; + } + + if (msg_hdr->rp_ts_jiffies != part->remote_rp_ts_jiffies && + part->remote_rp_ts_jiffies != 0) { + /* + * ??? Does what we do here need to be sensitive to + * ??? act_state or remote_act_state? + */ + spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags); + if (part_uv->act_state_req == 0) + xpc_activate_IRQ_rcvd++; + part_uv->act_state_req = XPC_P_ASR_REACTIVATE_UV; + spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags); + + (*wakeup_hb_checker)++; + } +} + +static irqreturn_t +xpc_handle_activate_IRQ_uv(int irq, void *dev_id) +{ + struct xpc_activate_mq_msghdr_uv *msg_hdr; + short partid; + struct xpc_partition *part; + int wakeup_hb_checker = 0; + + while ((msg_hdr = gru_get_next_message(xpc_activate_mq_uv)) != NULL) { + + partid = msg_hdr->partid; + if (partid < 0 || partid >= XP_MAX_NPARTITIONS_UV) { + dev_err(xpc_part, "xpc_handle_activate_IRQ_uv() " + "received invalid partid=0x%x in message\n", + partid); + } else { + part = &xpc_partitions[partid]; + if (xpc_part_ref(part)) { + xpc_handle_activate_mq_msg_uv(part, msg_hdr, + &wakeup_hb_checker); + xpc_part_deref(part); + } + } + + gru_free_message(xpc_activate_mq_uv, msg_hdr); + } + + if (wakeup_hb_checker) + wake_up_interruptible(&xpc_activate_IRQ_wq); + + return IRQ_HANDLED; +} + +static enum xp_retval +xpc_send_activate_IRQ_uv(struct xpc_partition *part, void *msg, size_t msg_size, + int msg_type) +{ + struct xpc_activate_mq_msghdr_uv *msg_hdr = msg; + + DBUG_ON(msg_size > XPC_ACTIVATE_MSG_SIZE_UV); + + msg_hdr->type = msg_type; + msg_hdr->partid = XPC_PARTID(part); + msg_hdr->act_state = part->act_state; + msg_hdr->rp_ts_jiffies = xpc_rsvd_page->ts_jiffies; + + /* ??? Is holding a spin_lock (ch->lock) during this call a bad idea? */ + return xpc_send_gru_msg(part->sn.uv.remote_activate_mq_gpa, msg, + msg_size); +} + +static void +xpc_send_activate_IRQ_part_uv(struct xpc_partition *part, void *msg, + size_t msg_size, int msg_type) +{ + enum xp_retval ret; + + ret = xpc_send_activate_IRQ_uv(part, msg, msg_size, msg_type); + if (unlikely(ret != xpSuccess)) + XPC_DEACTIVATE_PARTITION(part, ret); +} + +static void +xpc_send_activate_IRQ_ch_uv(struct xpc_channel *ch, unsigned long *irq_flags, + void *msg, size_t msg_size, int msg_type) +{ + struct xpc_partition *part = &xpc_partitions[ch->number]; + enum xp_retval ret; + + ret = xpc_send_activate_IRQ_uv(part, msg, msg_size, msg_type); + if (unlikely(ret != xpSuccess)) { + if (irq_flags != NULL) + spin_unlock_irqrestore(&ch->lock, *irq_flags); + + XPC_DEACTIVATE_PARTITION(part, ret); + + if (irq_flags != NULL) + spin_lock_irqsave(&ch->lock, *irq_flags); + } +} + +static void +xpc_send_local_activate_IRQ_uv(struct xpc_partition *part, int act_state_req) +{ + unsigned long irq_flags; + struct xpc_partition_uv *part_uv = &part->sn.uv; + + /* + * !!! Make our side think that the remote parition sent an activate + * !!! message our way by doing what the activate IRQ handler would + * !!! do had one really been sent. + */ + + spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags); + if (part_uv->act_state_req == 0) + xpc_activate_IRQ_rcvd++; + part_uv->act_state_req = act_state_req; + spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags); + + wake_up_interruptible(&xpc_activate_IRQ_wq); +} + +static enum xp_retval +xpc_get_partition_rsvd_page_pa_uv(void *buf, u64 *cookie, unsigned long *rp_pa, + size_t *len) +{ + /* !!! call the UV version of sn_partition_reserved_page_pa() */ + return xpUnsupported; +} + +static int +xpc_setup_rsvd_page_sn_uv(struct xpc_rsvd_page *rp) +{ + rp->sn.activate_mq_gpa = uv_gpa(xpc_activate_mq_uv); + return 0; +} + +static void +xpc_send_heartbeat_uv(int msg_type) +{ + short partid; + struct xpc_partition *part; + struct xpc_activate_mq_msg_heartbeat_req_uv msg; + + /* + * !!! On uv we're broadcasting a heartbeat message every 5 seconds. + * !!! Whereas on sn2 we're bte_copy'ng the heartbeat info every 20 + * !!! seconds. This is an increase in numalink traffic. + * ??? Is this good? + */ + + msg.heartbeat = atomic64_inc_return(&xpc_heartbeat_uv); + + partid = find_first_bit(xpc_heartbeating_to_mask_uv, + XP_MAX_NPARTITIONS_UV); + + while (partid < XP_MAX_NPARTITIONS_UV) { + part = &xpc_partitions[partid]; + + xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg), + msg_type); + + partid = find_next_bit(xpc_heartbeating_to_mask_uv, + XP_MAX_NPARTITIONS_UV, partid + 1); + } +} + +static void +xpc_increment_heartbeat_uv(void) +{ + xpc_send_heartbeat_uv(XPC_ACTIVATE_MQ_MSG_INC_HEARTBEAT_UV); +} + +static void +xpc_offline_heartbeat_uv(void) +{ + xpc_send_heartbeat_uv(XPC_ACTIVATE_MQ_MSG_OFFLINE_HEARTBEAT_UV); +} + +static void +xpc_online_heartbeat_uv(void) +{ + xpc_send_heartbeat_uv(XPC_ACTIVATE_MQ_MSG_ONLINE_HEARTBEAT_UV); +} + +static void +xpc_heartbeat_init_uv(void) +{ + atomic64_set(&xpc_heartbeat_uv, 0); + bitmap_zero(xpc_heartbeating_to_mask_uv, XP_MAX_NPARTITIONS_UV); + xpc_heartbeating_to_mask = &xpc_heartbeating_to_mask_uv[0]; +} + +static void +xpc_heartbeat_exit_uv(void) +{ + xpc_send_heartbeat_uv(XPC_ACTIVATE_MQ_MSG_OFFLINE_HEARTBEAT_UV); +} + +static enum xp_retval +xpc_get_remote_heartbeat_uv(struct xpc_partition *part) +{ + struct xpc_partition_uv *part_uv = &part->sn.uv; + enum xp_retval ret = xpNoHeartbeat; + + if (part_uv->remote_act_state != XPC_P_AS_INACTIVE && + part_uv->remote_act_state != XPC_P_AS_DEACTIVATING) { + + if (part_uv->heartbeat != part->last_heartbeat || + (part_uv->flags & XPC_P_HEARTBEAT_OFFLINE_UV)) { + + part->last_heartbeat = part_uv->heartbeat; + ret = xpSuccess; + } + } + return ret; +} + +static void +xpc_request_partition_activation_uv(struct xpc_rsvd_page *remote_rp, + unsigned long remote_rp_gpa, int nasid) +{ + short partid = remote_rp->SAL_partid; + struct xpc_partition *part = &xpc_partitions[partid]; + struct xpc_activate_mq_msg_activate_req_uv msg; + + part->remote_rp_pa = remote_rp_gpa; /* !!! _pa here is really _gpa */ + part->remote_rp_ts_jiffies = remote_rp->ts_jiffies; + part->sn.uv.remote_activate_mq_gpa = remote_rp->sn.activate_mq_gpa; + + /* + * ??? Is it a good idea to make this conditional on what is + * ??? potentially stale state information? + */ + if (part->sn.uv.remote_act_state == XPC_P_AS_INACTIVE) { + msg.rp_gpa = uv_gpa(xpc_rsvd_page); + msg.activate_mq_gpa = xpc_rsvd_page->sn.activate_mq_gpa; + xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg), + XPC_ACTIVATE_MQ_MSG_ACTIVATE_REQ_UV); + } + + if (part->act_state == XPC_P_AS_INACTIVE) + xpc_send_local_activate_IRQ_uv(part, XPC_P_ASR_ACTIVATE_UV); +} + +static void +xpc_request_partition_reactivation_uv(struct xpc_partition *part) +{ + xpc_send_local_activate_IRQ_uv(part, XPC_P_ASR_ACTIVATE_UV); +} + +static void +xpc_request_partition_deactivation_uv(struct xpc_partition *part) +{ + struct xpc_activate_mq_msg_deactivate_req_uv msg; + + /* + * ??? Is it a good idea to make this conditional on what is + * ??? potentially stale state information? + */ + if (part->sn.uv.remote_act_state != XPC_P_AS_DEACTIVATING && + part->sn.uv.remote_act_state != XPC_P_AS_INACTIVE) { + + msg.reason = part->reason; + xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg), + XPC_ACTIVATE_MQ_MSG_DEACTIVATE_REQ_UV); + } +} + +static void +xpc_cancel_partition_deactivation_request_uv(struct xpc_partition *part) +{ + /* nothing needs to be done */ + return; +} + +static void +xpc_init_fifo_uv(struct xpc_fifo_head_uv *head) +{ + head->first = NULL; + head->last = NULL; + spin_lock_init(&head->lock); + head->n_entries = 0; +} + +static void * +xpc_get_fifo_entry_uv(struct xpc_fifo_head_uv *head) +{ + unsigned long irq_flags; + struct xpc_fifo_entry_uv *first; + + spin_lock_irqsave(&head->lock, irq_flags); + first = head->first; + if (head->first != NULL) { + head->first = first->next; + if (head->first == NULL) + head->last = NULL; + } + head->n_entries++; + spin_unlock_irqrestore(&head->lock, irq_flags); + first->next = NULL; + return first; +} + +static void +xpc_put_fifo_entry_uv(struct xpc_fifo_head_uv *head, + struct xpc_fifo_entry_uv *last) +{ + unsigned long irq_flags; + + last->next = NULL; + spin_lock_irqsave(&head->lock, irq_flags); + if (head->last != NULL) + head->last->next = last; + else + head->first = last; + head->last = last; + head->n_entries--; + BUG_ON(head->n_entries < 0); + spin_unlock_irqrestore(&head->lock, irq_flags); +} + +static int +xpc_n_of_fifo_entries_uv(struct xpc_fifo_head_uv *head) +{ + return head->n_entries; +} + +/* + * Setup the channel structures that are uv specific. + */ +static enum xp_retval +xpc_setup_ch_structures_sn_uv(struct xpc_partition *part) +{ + struct xpc_channel_uv *ch_uv; + int ch_number; + + for (ch_number = 0; ch_number < part->nchannels; ch_number++) { + ch_uv = &part->channels[ch_number].sn.uv; + + xpc_init_fifo_uv(&ch_uv->msg_slot_free_list); + xpc_init_fifo_uv(&ch_uv->recv_msg_list); + } + + return xpSuccess; +} + +/* + * Teardown the channel structures that are uv specific. + */ +static void +xpc_teardown_ch_structures_sn_uv(struct xpc_partition *part) +{ + /* nothing needs to be done */ + return; +} + +static enum xp_retval +xpc_make_first_contact_uv(struct xpc_partition *part) +{ + struct xpc_activate_mq_msg_uv msg; + + /* + * We send a sync msg to get the remote partition's remote_act_state + * updated to our current act_state which at this point should + * be XPC_P_AS_ACTIVATING. + */ + xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg), + XPC_ACTIVATE_MQ_MSG_SYNC_ACT_STATE_UV); + + while (part->sn.uv.remote_act_state != XPC_P_AS_ACTIVATING) { + + dev_dbg(xpc_part, "waiting to make first contact with " + "partition %d\n", XPC_PARTID(part)); + + /* wait a 1/4 of a second or so */ + (void)msleep_interruptible(250); + + if (part->act_state == XPC_P_AS_DEACTIVATING) + return part->reason; + } + + return xpSuccess; +} + +static u64 +xpc_get_chctl_all_flags_uv(struct xpc_partition *part) +{ + unsigned long irq_flags; + union xpc_channel_ctl_flags chctl; + + spin_lock_irqsave(&part->chctl_lock, irq_flags); + chctl = part->chctl; + if (chctl.all_flags != 0) + part->chctl.all_flags = 0; + + spin_unlock_irqrestore(&part->chctl_lock, irq_flags); + return chctl.all_flags; +} + +static enum xp_retval +xpc_allocate_send_msg_slot_uv(struct xpc_channel *ch) +{ + struct xpc_channel_uv *ch_uv = &ch->sn.uv; + struct xpc_send_msg_slot_uv *msg_slot; + unsigned long irq_flags; + int nentries; + int entry; + size_t nbytes; + + for (nentries = ch->local_nentries; nentries > 0; nentries--) { + nbytes = nentries * sizeof(struct xpc_send_msg_slot_uv); + ch_uv->send_msg_slots = kzalloc(nbytes, GFP_KERNEL); + if (ch_uv->send_msg_slots == NULL) + continue; + + for (entry = 0; entry < nentries; entry++) { + msg_slot = &ch_uv->send_msg_slots[entry]; + + msg_slot->msg_slot_number = entry; + xpc_put_fifo_entry_uv(&ch_uv->msg_slot_free_list, + &msg_slot->next); + } + + spin_lock_irqsave(&ch->lock, irq_flags); + if (nentries < ch->local_nentries) + ch->local_nentries = nentries; + spin_unlock_irqrestore(&ch->lock, irq_flags); + return xpSuccess; + } + + return xpNoMemory; +} + +static enum xp_retval +xpc_allocate_recv_msg_slot_uv(struct xpc_channel *ch) +{ + struct xpc_channel_uv *ch_uv = &ch->sn.uv; + struct xpc_notify_mq_msg_uv *msg_slot; + unsigned long irq_flags; + int nentries; + int entry; + size_t nbytes; + + for (nentries = ch->remote_nentries; nentries > 0; nentries--) { + nbytes = nentries * ch->entry_size; + ch_uv->recv_msg_slots = kzalloc(nbytes, GFP_KERNEL); + if (ch_uv->recv_msg_slots == NULL) + continue; + + for (entry = 0; entry < nentries; entry++) { + msg_slot = ch_uv->recv_msg_slots + + entry * ch->entry_size; + + msg_slot->hdr.msg_slot_number = entry; + } + + spin_lock_irqsave(&ch->lock, irq_flags); + if (nentries < ch->remote_nentries) + ch->remote_nentries = nentries; + spin_unlock_irqrestore(&ch->lock, irq_flags); + return xpSuccess; + } + + return xpNoMemory; +} + +/* + * Allocate msg_slots associated with the channel. + */ +static enum xp_retval +xpc_setup_msg_structures_uv(struct xpc_channel *ch) +{ + static enum xp_retval ret; + struct xpc_channel_uv *ch_uv = &ch->sn.uv; + + DBUG_ON(ch->flags & XPC_C_SETUP); + + ret = xpc_allocate_send_msg_slot_uv(ch); + if (ret == xpSuccess) { + + ret = xpc_allocate_recv_msg_slot_uv(ch); + if (ret != xpSuccess) { + kfree(ch_uv->send_msg_slots); + xpc_init_fifo_uv(&ch_uv->msg_slot_free_list); + } + } + return ret; +} + +/* + * Free up msg_slots and clear other stuff that were setup for the specified + * channel. + */ +static void +xpc_teardown_msg_structures_uv(struct xpc_channel *ch) +{ + struct xpc_channel_uv *ch_uv = &ch->sn.uv; + + DBUG_ON(!spin_is_locked(&ch->lock)); + + ch_uv->remote_notify_mq_gpa = 0; + + if (ch->flags & XPC_C_SETUP) { + xpc_init_fifo_uv(&ch_uv->msg_slot_free_list); + kfree(ch_uv->send_msg_slots); + xpc_init_fifo_uv(&ch_uv->recv_msg_list); + kfree(ch_uv->recv_msg_slots); + } +} + +static void +xpc_send_chctl_closerequest_uv(struct xpc_channel *ch, unsigned long *irq_flags) +{ + struct xpc_activate_mq_msg_chctl_closerequest_uv msg; + + msg.ch_number = ch->number; + msg.reason = ch->reason; + xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg), + XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREQUEST_UV); +} + +static void +xpc_send_chctl_closereply_uv(struct xpc_channel *ch, unsigned long *irq_flags) +{ + struct xpc_activate_mq_msg_chctl_closereply_uv msg; + + msg.ch_number = ch->number; + xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg), + XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREPLY_UV); +} + +static void +xpc_send_chctl_openrequest_uv(struct xpc_channel *ch, unsigned long *irq_flags) +{ + struct xpc_activate_mq_msg_chctl_openrequest_uv msg; + + msg.ch_number = ch->number; + msg.entry_size = ch->entry_size; + msg.local_nentries = ch->local_nentries; + xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg), + XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREQUEST_UV); +} + +static void +xpc_send_chctl_openreply_uv(struct xpc_channel *ch, unsigned long *irq_flags) +{ + struct xpc_activate_mq_msg_chctl_openreply_uv msg; + + msg.ch_number = ch->number; + msg.local_nentries = ch->local_nentries; + msg.remote_nentries = ch->remote_nentries; + msg.local_notify_mq_gpa = uv_gpa(xpc_notify_mq_uv); + xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg), + XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREPLY_UV); +} + +static void +xpc_send_chctl_local_msgrequest_uv(struct xpc_partition *part, int ch_number) +{ + unsigned long irq_flags; + + spin_lock_irqsave(&part->chctl_lock, irq_flags); + part->chctl.flags[ch_number] |= XPC_CHCTL_MSGREQUEST; + spin_unlock_irqrestore(&part->chctl_lock, irq_flags); + + xpc_wakeup_channel_mgr(part); +} + +static void +xpc_save_remote_msgqueue_pa_uv(struct xpc_channel *ch, + unsigned long msgqueue_pa) +{ + ch->sn.uv.remote_notify_mq_gpa = msgqueue_pa; +} + +static void +xpc_indicate_partition_engaged_uv(struct xpc_partition *part) +{ + struct xpc_activate_mq_msg_uv msg; + + xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg), + XPC_ACTIVATE_MQ_MSG_MARK_ENGAGED_UV); +} + +static void +xpc_indicate_partition_disengaged_uv(struct xpc_partition *part) +{ + struct xpc_activate_mq_msg_uv msg; + + xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg), + XPC_ACTIVATE_MQ_MSG_MARK_DISENGAGED_UV); +} + +static void +xpc_assume_partition_disengaged_uv(short partid) +{ + struct xpc_partition_uv *part_uv = &xpc_partitions[partid].sn.uv; + unsigned long irq_flags; + + spin_lock_irqsave(&part_uv->flags_lock, irq_flags); + part_uv->flags &= ~XPC_P_ENGAGED_UV; + spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags); +} + +static int +xpc_partition_engaged_uv(short partid) +{ + return (xpc_partitions[partid].sn.uv.flags & XPC_P_ENGAGED_UV) != 0; +} + +static int +xpc_any_partition_engaged_uv(void) +{ + struct xpc_partition_uv *part_uv; + short partid; + + for (partid = 0; partid < XP_MAX_NPARTITIONS_UV; partid++) { + part_uv = &xpc_partitions[partid].sn.uv; + if ((part_uv->flags & XPC_P_ENGAGED_UV) != 0) + return 1; + } + return 0; +} + +static enum xp_retval +xpc_allocate_msg_slot_uv(struct xpc_channel *ch, u32 flags, + struct xpc_send_msg_slot_uv **address_of_msg_slot) +{ + enum xp_retval ret; + struct xpc_send_msg_slot_uv *msg_slot; + struct xpc_fifo_entry_uv *entry; + + while (1) { + entry = xpc_get_fifo_entry_uv(&ch->sn.uv.msg_slot_free_list); + if (entry != NULL) + break; + + if (flags & XPC_NOWAIT) + return xpNoWait; + + ret = xpc_allocate_msg_wait(ch); + if (ret != xpInterrupted && ret != xpTimeout) + return ret; + } + + msg_slot = container_of(entry, struct xpc_send_msg_slot_uv, next); + *address_of_msg_slot = msg_slot; + return xpSuccess; +} + +static void +xpc_free_msg_slot_uv(struct xpc_channel *ch, + struct xpc_send_msg_slot_uv *msg_slot) +{ + xpc_put_fifo_entry_uv(&ch->sn.uv.msg_slot_free_list, &msg_slot->next); + + /* wakeup anyone waiting for a free msg slot */ + if (atomic_read(&ch->n_on_msg_allocate_wq) > 0) + wake_up(&ch->msg_allocate_wq); +} + +static void +xpc_notify_sender_uv(struct xpc_channel *ch, + struct xpc_send_msg_slot_uv *msg_slot, + enum xp_retval reason) +{ + xpc_notify_func func = msg_slot->func; + + if (func != NULL && cmpxchg(&msg_slot->func, func, NULL) == func) { + + atomic_dec(&ch->n_to_notify); + + dev_dbg(xpc_chan, "msg_slot->func() called, msg_slot=0x%p " + "msg_slot_number=%d partid=%d channel=%d\n", msg_slot, + msg_slot->msg_slot_number, ch->partid, ch->number); + + func(reason, ch->partid, ch->number, msg_slot->key); + + dev_dbg(xpc_chan, "msg_slot->func() returned, msg_slot=0x%p " + "msg_slot_number=%d partid=%d channel=%d\n", msg_slot, + msg_slot->msg_slot_number, ch->partid, ch->number); + } +} + +static void +xpc_handle_notify_mq_ack_uv(struct xpc_channel *ch, + struct xpc_notify_mq_msg_uv *msg) +{ + struct xpc_send_msg_slot_uv *msg_slot; + int entry = msg->hdr.msg_slot_number % ch->local_nentries; + + msg_slot = &ch->sn.uv.send_msg_slots[entry]; + + BUG_ON(msg_slot->msg_slot_number != msg->hdr.msg_slot_number); + msg_slot->msg_slot_number += ch->local_nentries; + + if (msg_slot->func != NULL) + xpc_notify_sender_uv(ch, msg_slot, xpMsgDelivered); + + xpc_free_msg_slot_uv(ch, msg_slot); +} + +static void +xpc_handle_notify_mq_msg_uv(struct xpc_partition *part, + struct xpc_notify_mq_msg_uv *msg) +{ + struct xpc_partition_uv *part_uv = &part->sn.uv; + struct xpc_channel *ch; + struct xpc_channel_uv *ch_uv; + struct xpc_notify_mq_msg_uv *msg_slot; + unsigned long irq_flags; + int ch_number = msg->hdr.ch_number; + + if (unlikely(ch_number >= part->nchannels)) { + dev_err(xpc_part, "xpc_handle_notify_IRQ_uv() received invalid " + "channel number=0x%x in message from partid=%d\n", + ch_number, XPC_PARTID(part)); + + /* get hb checker to deactivate from the remote partition */ + spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags); + if (part_uv->act_state_req == 0) + xpc_activate_IRQ_rcvd++; + part_uv->act_state_req = XPC_P_ASR_DEACTIVATE_UV; + part_uv->reason = xpBadChannelNumber; + spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags); + + wake_up_interruptible(&xpc_activate_IRQ_wq); + return; + } + + ch = &part->channels[ch_number]; + xpc_msgqueue_ref(ch); + + if (!(ch->flags & XPC_C_CONNECTED)) { + xpc_msgqueue_deref(ch); + return; + } + + /* see if we're really dealing with an ACK for a previously sent msg */ + if (msg->hdr.size == 0) { + xpc_handle_notify_mq_ack_uv(ch, msg); + xpc_msgqueue_deref(ch); + return; + } + + /* we're dealing with a normal message sent via the notify_mq */ + ch_uv = &ch->sn.uv; + + msg_slot = ch_uv->recv_msg_slots + + (msg->hdr.msg_slot_number % ch->remote_nentries) * ch->entry_size; + + BUG_ON(msg->hdr.msg_slot_number != msg_slot->hdr.msg_slot_number); + BUG_ON(msg_slot->hdr.size != 0); + + memcpy(msg_slot, msg, msg->hdr.size); + + xpc_put_fifo_entry_uv(&ch_uv->recv_msg_list, &msg_slot->hdr.u.next); + + if (ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) { + /* + * If there is an existing idle kthread get it to deliver + * the payload, otherwise we'll have to get the channel mgr + * for this partition to create a kthread to do the delivery. + */ + if (atomic_read(&ch->kthreads_idle) > 0) + wake_up_nr(&ch->idle_wq, 1); + else + xpc_send_chctl_local_msgrequest_uv(part, ch->number); + } + xpc_msgqueue_deref(ch); +} + +static irqreturn_t +xpc_handle_notify_IRQ_uv(int irq, void *dev_id) +{ + struct xpc_notify_mq_msg_uv *msg; + short partid; + struct xpc_partition *part; + + while ((msg = gru_get_next_message(xpc_notify_mq_uv)) != NULL) { + + partid = msg->hdr.partid; + if (partid < 0 || partid >= XP_MAX_NPARTITIONS_UV) { + dev_err(xpc_part, "xpc_handle_notify_IRQ_uv() received " + "invalid partid=0x%x in message\n", partid); + } else { + part = &xpc_partitions[partid]; + + if (xpc_part_ref(part)) { + xpc_handle_notify_mq_msg_uv(part, msg); + xpc_part_deref(part); + } + } + + gru_free_message(xpc_notify_mq_uv, msg); + } + + return IRQ_HANDLED; +} + +static int +xpc_n_of_deliverable_payloads_uv(struct xpc_channel *ch) +{ + return xpc_n_of_fifo_entries_uv(&ch->sn.uv.recv_msg_list); +} + +static void +xpc_process_msg_chctl_flags_uv(struct xpc_partition *part, int ch_number) +{ + struct xpc_channel *ch = &part->channels[ch_number]; + int ndeliverable_payloads; + + xpc_msgqueue_ref(ch); + + ndeliverable_payloads = xpc_n_of_deliverable_payloads_uv(ch); + + if (ndeliverable_payloads > 0 && + (ch->flags & XPC_C_CONNECTED) && + (ch->flags & XPC_C_CONNECTEDCALLOUT_MADE)) { + + xpc_activate_kthreads(ch, ndeliverable_payloads); + } + + xpc_msgqueue_deref(ch); +} + +static enum xp_retval +xpc_send_payload_uv(struct xpc_channel *ch, u32 flags, void *payload, + u16 payload_size, u8 notify_type, xpc_notify_func func, + void *key) +{ + enum xp_retval ret = xpSuccess; + struct xpc_send_msg_slot_uv *msg_slot = NULL; + struct xpc_notify_mq_msg_uv *msg; + u8 msg_buffer[XPC_NOTIFY_MSG_SIZE_UV]; + size_t msg_size; + + DBUG_ON(notify_type != XPC_N_CALL); + + msg_size = sizeof(struct xpc_notify_mq_msghdr_uv) + payload_size; + if (msg_size > ch->entry_size) + return xpPayloadTooBig; + + xpc_msgqueue_ref(ch); + + if (ch->flags & XPC_C_DISCONNECTING) { + ret = ch->reason; + goto out_1; + } + if (!(ch->flags & XPC_C_CONNECTED)) { + ret = xpNotConnected; + goto out_1; + } + + ret = xpc_allocate_msg_slot_uv(ch, flags, &msg_slot); + if (ret != xpSuccess) + goto out_1; + + if (func != NULL) { + atomic_inc(&ch->n_to_notify); + + msg_slot->key = key; + smp_wmb(); /* a non-NULL func must hit memory after the key */ + msg_slot->func = func; + + if (ch->flags & XPC_C_DISCONNECTING) { + ret = ch->reason; + goto out_2; + } + } + + msg = (struct xpc_notify_mq_msg_uv *)&msg_buffer; + msg->hdr.partid = xp_partition_id; + msg->hdr.ch_number = ch->number; + msg->hdr.size = msg_size; + msg->hdr.msg_slot_number = msg_slot->msg_slot_number; + memcpy(&msg->payload, payload, payload_size); + + ret = xpc_send_gru_msg(ch->sn.uv.remote_notify_mq_gpa, msg, msg_size); + if (ret == xpSuccess) + goto out_1; + + XPC_DEACTIVATE_PARTITION(&xpc_partitions[ch->partid], ret); +out_2: + if (func != NULL) { + /* + * Try to NULL the msg_slot's func field. If we fail, then + * xpc_notify_senders_of_disconnect_uv() beat us to it, in which + * case we need to pretend we succeeded to send the message + * since the user will get a callout for the disconnect error + * by xpc_notify_senders_of_disconnect_uv(), and to also get an + * error returned here will confuse them. Additionally, since + * in this case the channel is being disconnected we don't need + * to put the the msg_slot back on the free list. + */ + if (cmpxchg(&msg_slot->func, func, NULL) != func) { + ret = xpSuccess; + goto out_1; + } + + msg_slot->key = NULL; + atomic_dec(&ch->n_to_notify); + } + xpc_free_msg_slot_uv(ch, msg_slot); +out_1: + xpc_msgqueue_deref(ch); + return ret; +} + +/* + * Tell the callers of xpc_send_notify() that the status of their payloads + * is unknown because the channel is now disconnecting. + * + * We don't worry about putting these msg_slots on the free list since the + * msg_slots themselves are about to be kfree'd. + */ +static void +xpc_notify_senders_of_disconnect_uv(struct xpc_channel *ch) +{ + struct xpc_send_msg_slot_uv *msg_slot; + int entry; + + DBUG_ON(!(ch->flags & XPC_C_DISCONNECTING)); + + for (entry = 0; entry < ch->local_nentries; entry++) { + + if (atomic_read(&ch->n_to_notify) == 0) + break; + + msg_slot = &ch->sn.uv.send_msg_slots[entry]; + if (msg_slot->func != NULL) + xpc_notify_sender_uv(ch, msg_slot, ch->reason); + } +} + +/* + * Get the next deliverable message's payload. + */ +static void * +xpc_get_deliverable_payload_uv(struct xpc_channel *ch) +{ + struct xpc_fifo_entry_uv *entry; + struct xpc_notify_mq_msg_uv *msg; + void *payload = NULL; + + if (!(ch->flags & XPC_C_DISCONNECTING)) { + entry = xpc_get_fifo_entry_uv(&ch->sn.uv.recv_msg_list); + if (entry != NULL) { + msg = container_of(entry, struct xpc_notify_mq_msg_uv, + hdr.u.next); + payload = &msg->payload; + } + } + return payload; +} + +static void +xpc_received_payload_uv(struct xpc_channel *ch, void *payload) +{ + struct xpc_notify_mq_msg_uv *msg; + enum xp_retval ret; + + msg = container_of(payload, struct xpc_notify_mq_msg_uv, payload); + + /* return an ACK to the sender of this message */ + + msg->hdr.partid = xp_partition_id; + msg->hdr.size = 0; /* size of zero indicates this is an ACK */ + + ret = xpc_send_gru_msg(ch->sn.uv.remote_notify_mq_gpa, msg, + sizeof(struct xpc_notify_mq_msghdr_uv)); + if (ret != xpSuccess) + XPC_DEACTIVATE_PARTITION(&xpc_partitions[ch->partid], ret); + + msg->hdr.msg_slot_number += ch->remote_nentries; +} + +int +xpc_init_uv(void) +{ + xpc_setup_partitions_sn = xpc_setup_partitions_sn_uv; + xpc_process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_uv; + xpc_get_partition_rsvd_page_pa = xpc_get_partition_rsvd_page_pa_uv; + xpc_setup_rsvd_page_sn = xpc_setup_rsvd_page_sn_uv; + xpc_increment_heartbeat = xpc_increment_heartbeat_uv; + xpc_offline_heartbeat = xpc_offline_heartbeat_uv; + xpc_online_heartbeat = xpc_online_heartbeat_uv; + xpc_heartbeat_init = xpc_heartbeat_init_uv; + xpc_heartbeat_exit = xpc_heartbeat_exit_uv; + xpc_get_remote_heartbeat = xpc_get_remote_heartbeat_uv; + + xpc_request_partition_activation = xpc_request_partition_activation_uv; + xpc_request_partition_reactivation = + xpc_request_partition_reactivation_uv; + xpc_request_partition_deactivation = + xpc_request_partition_deactivation_uv; + xpc_cancel_partition_deactivation_request = + xpc_cancel_partition_deactivation_request_uv; + + xpc_setup_ch_structures_sn = xpc_setup_ch_structures_sn_uv; + xpc_teardown_ch_structures_sn = xpc_teardown_ch_structures_sn_uv; + + xpc_make_first_contact = xpc_make_first_contact_uv; + + xpc_get_chctl_all_flags = xpc_get_chctl_all_flags_uv; + xpc_send_chctl_closerequest = xpc_send_chctl_closerequest_uv; + xpc_send_chctl_closereply = xpc_send_chctl_closereply_uv; + xpc_send_chctl_openrequest = xpc_send_chctl_openrequest_uv; + xpc_send_chctl_openreply = xpc_send_chctl_openreply_uv; + + xpc_save_remote_msgqueue_pa = xpc_save_remote_msgqueue_pa_uv; + + xpc_setup_msg_structures = xpc_setup_msg_structures_uv; + xpc_teardown_msg_structures = xpc_teardown_msg_structures_uv; + + xpc_indicate_partition_engaged = xpc_indicate_partition_engaged_uv; + xpc_indicate_partition_disengaged = + xpc_indicate_partition_disengaged_uv; + xpc_assume_partition_disengaged = xpc_assume_partition_disengaged_uv; + xpc_partition_engaged = xpc_partition_engaged_uv; + xpc_any_partition_engaged = xpc_any_partition_engaged_uv; + + xpc_n_of_deliverable_payloads = xpc_n_of_deliverable_payloads_uv; + xpc_process_msg_chctl_flags = xpc_process_msg_chctl_flags_uv; + xpc_send_payload = xpc_send_payload_uv; + xpc_notify_senders_of_disconnect = xpc_notify_senders_of_disconnect_uv; + xpc_get_deliverable_payload = xpc_get_deliverable_payload_uv; + xpc_received_payload = xpc_received_payload_uv; + + if (sizeof(struct xpc_notify_mq_msghdr_uv) > XPC_MSG_HDR_MAX_SIZE) { + dev_err(xpc_part, "xpc_notify_mq_msghdr_uv is larger than %d\n", + XPC_MSG_HDR_MAX_SIZE); + return -E2BIG; + } + + /* ??? The cpuid argument's value is 0, is that what we want? */ + /* !!! The irq argument's value isn't correct. */ + xpc_activate_mq_uv = xpc_create_gru_mq_uv(XPC_ACTIVATE_MQ_SIZE_UV, 0, 0, + xpc_handle_activate_IRQ_uv); + if (xpc_activate_mq_uv == NULL) + return -ENOMEM; + + /* ??? The cpuid argument's value is 0, is that what we want? */ + /* !!! The irq argument's value isn't correct. */ + xpc_notify_mq_uv = xpc_create_gru_mq_uv(XPC_NOTIFY_MQ_SIZE_UV, 0, 0, + xpc_handle_notify_IRQ_uv); + if (xpc_notify_mq_uv == NULL) { + /* !!! The irq argument's value isn't correct. */ + xpc_destroy_gru_mq_uv(xpc_activate_mq_uv, + XPC_ACTIVATE_MQ_SIZE_UV, 0); + return -ENOMEM; + } + + return 0; +} + +void +xpc_exit_uv(void) +{ + /* !!! The irq argument's value isn't correct. */ + xpc_destroy_gru_mq_uv(xpc_notify_mq_uv, XPC_NOTIFY_MQ_SIZE_UV, 0); + + /* !!! The irq argument's value isn't correct. */ + xpc_destroy_gru_mq_uv(xpc_activate_mq_uv, XPC_ACTIVATE_MQ_SIZE_UV, 0); +} diff --git a/drivers/misc/sgi-xp/xpnet.c b/drivers/misc/sgi-xp/xpnet.c new file mode 100644 index 0000000..71513b3 --- /dev/null +++ b/drivers/misc/sgi-xp/xpnet.c @@ -0,0 +1,638 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1999-2008 Silicon Graphics, Inc. All rights reserved. + */ + +/* + * Cross Partition Network Interface (XPNET) support + * + * XPNET provides a virtual network layered on top of the Cross + * Partition communication layer. + * + * XPNET provides direct point-to-point and broadcast-like support + * for an ethernet-like device. The ethernet broadcast medium is + * replaced with a point-to-point message structure which passes + * pointers to a DMA-capable block that a remote partition should + * retrieve and pass to the upper level networking layer. + * + */ + +#include <linux/module.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include "xp.h" + +/* + * The message payload transferred by XPC. + * + * buf_pa is the physical address where the DMA should pull from. + * + * NOTE: for performance reasons, buf_pa should _ALWAYS_ begin on a + * cacheline boundary. To accomplish this, we record the number of + * bytes from the beginning of the first cacheline to the first useful + * byte of the skb (leadin_ignore) and the number of bytes from the + * last useful byte of the skb to the end of the last cacheline + * (tailout_ignore). + * + * size is the number of bytes to transfer which includes the skb->len + * (useful bytes of the senders skb) plus the leadin and tailout + */ +struct xpnet_message { + u16 version; /* Version for this message */ + u16 embedded_bytes; /* #of bytes embedded in XPC message */ + u32 magic; /* Special number indicating this is xpnet */ + unsigned long buf_pa; /* phys address of buffer to retrieve */ + u32 size; /* #of bytes in buffer */ + u8 leadin_ignore; /* #of bytes to ignore at the beginning */ + u8 tailout_ignore; /* #of bytes to ignore at the end */ + unsigned char data; /* body of small packets */ +}; + +/* + * Determine the size of our message, the cacheline aligned size, + * and then the number of message will request from XPC. + * + * XPC expects each message to exist in an individual cacheline. + */ +#define XPNET_MSG_SIZE XPC_MSG_PAYLOAD_MAX_SIZE +#define XPNET_MSG_DATA_MAX \ + (XPNET_MSG_SIZE - offsetof(struct xpnet_message, data)) +#define XPNET_MSG_NENTRIES (PAGE_SIZE / XPC_MSG_MAX_SIZE) + +#define XPNET_MAX_KTHREADS (XPNET_MSG_NENTRIES + 1) +#define XPNET_MAX_IDLE_KTHREADS (XPNET_MSG_NENTRIES + 1) + +/* + * Version number of XPNET implementation. XPNET can always talk to versions + * with same major #, and never talk to versions with a different version. + */ +#define _XPNET_VERSION(_major, _minor) (((_major) << 4) | (_minor)) +#define XPNET_VERSION_MAJOR(_v) ((_v) >> 4) +#define XPNET_VERSION_MINOR(_v) ((_v) & 0xf) + +#define XPNET_VERSION _XPNET_VERSION(1, 0) /* version 1.0 */ +#define XPNET_VERSION_EMBED _XPNET_VERSION(1, 1) /* version 1.1 */ +#define XPNET_MAGIC 0x88786984 /* "XNET" */ + +#define XPNET_VALID_MSG(_m) \ + ((XPNET_VERSION_MAJOR(_m->version) == XPNET_VERSION_MAJOR(XPNET_VERSION)) \ + && (msg->magic == XPNET_MAGIC)) + +#define XPNET_DEVICE_NAME "xp0" + +/* + * When messages are queued with xpc_send_notify, a kmalloc'd buffer + * of the following type is passed as a notification cookie. When the + * notification function is called, we use the cookie to decide + * whether all outstanding message sends have completed. The skb can + * then be released. + */ +struct xpnet_pending_msg { + struct sk_buff *skb; + atomic_t use_count; +}; + +/* driver specific structure pointed to by the device structure */ +struct xpnet_dev_private { + struct net_device_stats stats; +}; + +struct net_device *xpnet_device; + +/* + * When we are notified of other partitions activating, we add them to + * our bitmask of partitions to which we broadcast. + */ +static unsigned long *xpnet_broadcast_partitions; +/* protect above */ +static DEFINE_SPINLOCK(xpnet_broadcast_lock); + +/* + * Since the Block Transfer Engine (BTE) is being used for the transfer + * and it relies upon cache-line size transfers, we need to reserve at + * least one cache-line for head and tail alignment. The BTE is + * limited to 8MB transfers. + * + * Testing has shown that changing MTU to greater than 64KB has no effect + * on TCP as the two sides negotiate a Max Segment Size that is limited + * to 64K. Other protocols May use packets greater than this, but for + * now, the default is 64KB. + */ +#define XPNET_MAX_MTU (0x800000UL - L1_CACHE_BYTES) +/* 32KB has been determined to be the ideal */ +#define XPNET_DEF_MTU (0x8000UL) + +/* + * The partid is encapsulated in the MAC address beginning in the following + * octet and it consists of two octets. + */ +#define XPNET_PARTID_OCTET 2 + +/* Define the XPNET debug device structures to be used with dev_dbg() et al */ + +struct device_driver xpnet_dbg_name = { + .name = "xpnet" +}; + +struct device xpnet_dbg_subname = { + .bus_id = {0}, /* set to "" */ + .driver = &xpnet_dbg_name +}; + +struct device *xpnet = &xpnet_dbg_subname; + +/* + * Packet was recevied by XPC and forwarded to us. + */ +static void +xpnet_receive(short partid, int channel, struct xpnet_message *msg) +{ + struct sk_buff *skb; + void *dst; + enum xp_retval ret; + struct xpnet_dev_private *priv = + (struct xpnet_dev_private *)xpnet_device->priv; + + if (!XPNET_VALID_MSG(msg)) { + /* + * Packet with a different XPC version. Ignore. + */ + xpc_received(partid, channel, (void *)msg); + + priv->stats.rx_errors++; + + return; + } + dev_dbg(xpnet, "received 0x%lx, %d, %d, %d\n", msg->buf_pa, msg->size, + msg->leadin_ignore, msg->tailout_ignore); + + /* reserve an extra cache line */ + skb = dev_alloc_skb(msg->size + L1_CACHE_BYTES); + if (!skb) { + dev_err(xpnet, "failed on dev_alloc_skb(%d)\n", + msg->size + L1_CACHE_BYTES); + + xpc_received(partid, channel, (void *)msg); + + priv->stats.rx_errors++; + + return; + } + + /* + * The allocated skb has some reserved space. + * In order to use xp_remote_memcpy(), we need to get the + * skb->data pointer moved forward. + */ + skb_reserve(skb, (L1_CACHE_BYTES - ((u64)skb->data & + (L1_CACHE_BYTES - 1)) + + msg->leadin_ignore)); + + /* + * Update the tail pointer to indicate data actually + * transferred. + */ + skb_put(skb, (msg->size - msg->leadin_ignore - msg->tailout_ignore)); + + /* + * Move the data over from the other side. + */ + if ((XPNET_VERSION_MINOR(msg->version) == 1) && + (msg->embedded_bytes != 0)) { + dev_dbg(xpnet, "copying embedded message. memcpy(0x%p, 0x%p, " + "%lu)\n", skb->data, &msg->data, + (size_t)msg->embedded_bytes); + + skb_copy_to_linear_data(skb, &msg->data, + (size_t)msg->embedded_bytes); + } else { + dst = (void *)((u64)skb->data & ~(L1_CACHE_BYTES - 1)); + dev_dbg(xpnet, "transferring buffer to the skb->data area;\n\t" + "xp_remote_memcpy(0x%p, 0x%p, %hu)\n", dst, + (void *)msg->buf_pa, msg->size); + + ret = xp_remote_memcpy(xp_pa(dst), msg->buf_pa, msg->size); + if (ret != xpSuccess) { + /* + * !!! Need better way of cleaning skb. Currently skb + * !!! appears in_use and we can't just call + * !!! dev_kfree_skb. + */ + dev_err(xpnet, "xp_remote_memcpy(0x%p, 0x%p, 0x%hx) " + "returned error=0x%x\n", dst, + (void *)msg->buf_pa, msg->size, ret); + + xpc_received(partid, channel, (void *)msg); + + priv->stats.rx_errors++; + + return; + } + } + + dev_dbg(xpnet, "<skb->head=0x%p skb->data=0x%p skb->tail=0x%p " + "skb->end=0x%p skb->len=%d\n", (void *)skb->head, + (void *)skb->data, skb_tail_pointer(skb), skb_end_pointer(skb), + skb->len); + + skb->protocol = eth_type_trans(skb, xpnet_device); + skb->ip_summed = CHECKSUM_UNNECESSARY; + + dev_dbg(xpnet, "passing skb to network layer\n" + KERN_DEBUG "\tskb->head=0x%p skb->data=0x%p skb->tail=0x%p " + "skb->end=0x%p skb->len=%d\n", + (void *)skb->head, (void *)skb->data, skb_tail_pointer(skb), + skb_end_pointer(skb), skb->len); + + xpnet_device->last_rx = jiffies; + priv->stats.rx_packets++; + priv->stats.rx_bytes += skb->len + ETH_HLEN; + + netif_rx_ni(skb); + xpc_received(partid, channel, (void *)msg); +} + +/* + * This is the handler which XPC calls during any sort of change in + * state or message reception on a connection. + */ +static void +xpnet_connection_activity(enum xp_retval reason, short partid, int channel, + void *data, void *key) +{ + DBUG_ON(partid < 0 || partid >= xp_max_npartitions); + DBUG_ON(channel != XPC_NET_CHANNEL); + + switch (reason) { + case xpMsgReceived: /* message received */ + DBUG_ON(data == NULL); + + xpnet_receive(partid, channel, (struct xpnet_message *)data); + break; + + case xpConnected: /* connection completed to a partition */ + spin_lock_bh(&xpnet_broadcast_lock); + __set_bit(partid, xpnet_broadcast_partitions); + spin_unlock_bh(&xpnet_broadcast_lock); + + netif_carrier_on(xpnet_device); + + dev_dbg(xpnet, "%s connected to partition %d\n", + xpnet_device->name, partid); + break; + + default: + spin_lock_bh(&xpnet_broadcast_lock); + __clear_bit(partid, xpnet_broadcast_partitions); + spin_unlock_bh(&xpnet_broadcast_lock); + + if (bitmap_empty((unsigned long *)xpnet_broadcast_partitions, + xp_max_npartitions)) { + netif_carrier_off(xpnet_device); + } + + dev_dbg(xpnet, "%s disconnected from partition %d\n", + xpnet_device->name, partid); + break; + } +} + +static int +xpnet_dev_open(struct net_device *dev) +{ + enum xp_retval ret; + + dev_dbg(xpnet, "calling xpc_connect(%d, 0x%p, NULL, %ld, %ld, %ld, " + "%ld)\n", XPC_NET_CHANNEL, xpnet_connection_activity, + (unsigned long)XPNET_MSG_SIZE, + (unsigned long)XPNET_MSG_NENTRIES, + (unsigned long)XPNET_MAX_KTHREADS, + (unsigned long)XPNET_MAX_IDLE_KTHREADS); + + ret = xpc_connect(XPC_NET_CHANNEL, xpnet_connection_activity, NULL, + XPNET_MSG_SIZE, XPNET_MSG_NENTRIES, + XPNET_MAX_KTHREADS, XPNET_MAX_IDLE_KTHREADS); + if (ret != xpSuccess) { + dev_err(xpnet, "ifconfig up of %s failed on XPC connect, " + "ret=%d\n", dev->name, ret); + + return -ENOMEM; + } + + dev_dbg(xpnet, "ifconfig up of %s; XPC connected\n", dev->name); + + return 0; +} + +static int +xpnet_dev_stop(struct net_device *dev) +{ + xpc_disconnect(XPC_NET_CHANNEL); + + dev_dbg(xpnet, "ifconfig down of %s; XPC disconnected\n", dev->name); + + return 0; +} + +static int +xpnet_dev_change_mtu(struct net_device *dev, int new_mtu) +{ + /* 68 comes from min TCP+IP+MAC header */ + if ((new_mtu < 68) || (new_mtu > XPNET_MAX_MTU)) { + dev_err(xpnet, "ifconfig %s mtu %d failed; value must be " + "between 68 and %ld\n", dev->name, new_mtu, + XPNET_MAX_MTU); + return -EINVAL; + } + + dev->mtu = new_mtu; + dev_dbg(xpnet, "ifconfig %s mtu set to %d\n", dev->name, new_mtu); + return 0; +} + +/* + * Required for the net_device structure. + */ +static int +xpnet_dev_set_config(struct net_device *dev, struct ifmap *new_map) +{ + return 0; +} + +/* + * Return statistics to the caller. + */ +static struct net_device_stats * +xpnet_dev_get_stats(struct net_device *dev) +{ + struct xpnet_dev_private *priv; + + priv = (struct xpnet_dev_private *)dev->priv; + + return &priv->stats; +} + +/* + * Notification that the other end has received the message and + * DMA'd the skb information. At this point, they are done with + * our side. When all recipients are done processing, we + * release the skb and then release our pending message structure. + */ +static void +xpnet_send_completed(enum xp_retval reason, short partid, int channel, + void *__qm) +{ + struct xpnet_pending_msg *queued_msg = (struct xpnet_pending_msg *)__qm; + + DBUG_ON(queued_msg == NULL); + + dev_dbg(xpnet, "message to %d notified with reason %d\n", + partid, reason); + + if (atomic_dec_return(&queued_msg->use_count) == 0) { + dev_dbg(xpnet, "all acks for skb->head=-x%p\n", + (void *)queued_msg->skb->head); + + dev_kfree_skb_any(queued_msg->skb); + kfree(queued_msg); + } +} + +static void +xpnet_send(struct sk_buff *skb, struct xpnet_pending_msg *queued_msg, + u64 start_addr, u64 end_addr, u16 embedded_bytes, int dest_partid) +{ + u8 msg_buffer[XPNET_MSG_SIZE]; + struct xpnet_message *msg = (struct xpnet_message *)&msg_buffer; + u16 msg_size = sizeof(struct xpnet_message); + enum xp_retval ret; + + msg->embedded_bytes = embedded_bytes; + if (unlikely(embedded_bytes != 0)) { + msg->version = XPNET_VERSION_EMBED; + dev_dbg(xpnet, "calling memcpy(0x%p, 0x%p, 0x%lx)\n", + &msg->data, skb->data, (size_t)embedded_bytes); + skb_copy_from_linear_data(skb, &msg->data, + (size_t)embedded_bytes); + msg_size += embedded_bytes - 1; + } else { + msg->version = XPNET_VERSION; + } + msg->magic = XPNET_MAGIC; + msg->size = end_addr - start_addr; + msg->leadin_ignore = (u64)skb->data - start_addr; + msg->tailout_ignore = end_addr - (u64)skb_tail_pointer(skb); + msg->buf_pa = xp_pa((void *)start_addr); + + dev_dbg(xpnet, "sending XPC message to %d:%d\n" + KERN_DEBUG "msg->buf_pa=0x%lx, msg->size=%u, " + "msg->leadin_ignore=%u, msg->tailout_ignore=%u\n", + dest_partid, XPC_NET_CHANNEL, msg->buf_pa, msg->size, + msg->leadin_ignore, msg->tailout_ignore); + + atomic_inc(&queued_msg->use_count); + + ret = xpc_send_notify(dest_partid, XPC_NET_CHANNEL, XPC_NOWAIT, msg, + msg_size, xpnet_send_completed, queued_msg); + if (unlikely(ret != xpSuccess)) + atomic_dec(&queued_msg->use_count); +} + +/* + * Network layer has formatted a packet (skb) and is ready to place it + * "on the wire". Prepare and send an xpnet_message to all partitions + * which have connected with us and are targets of this packet. + * + * MAC-NOTE: For the XPNET driver, the MAC address contains the + * destination partid. If the destination partid octets are 0xffff, + * this packet is to be broadcast to all connected partitions. + */ +static int +xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct xpnet_pending_msg *queued_msg; + u64 start_addr, end_addr; + short dest_partid; + struct xpnet_dev_private *priv = (struct xpnet_dev_private *)dev->priv; + u16 embedded_bytes = 0; + + dev_dbg(xpnet, ">skb->head=0x%p skb->data=0x%p skb->tail=0x%p " + "skb->end=0x%p skb->len=%d\n", (void *)skb->head, + (void *)skb->data, skb_tail_pointer(skb), skb_end_pointer(skb), + skb->len); + + if (skb->data[0] == 0x33) { + dev_kfree_skb(skb); + return 0; /* nothing needed to be done */ + } + + /* + * The xpnet_pending_msg tracks how many outstanding + * xpc_send_notifies are relying on this skb. When none + * remain, release the skb. + */ + queued_msg = kmalloc(sizeof(struct xpnet_pending_msg), GFP_ATOMIC); + if (queued_msg == NULL) { + dev_warn(xpnet, "failed to kmalloc %ld bytes; dropping " + "packet\n", sizeof(struct xpnet_pending_msg)); + + priv->stats.tx_errors++; + return -ENOMEM; + } + + /* get the beginning of the first cacheline and end of last */ + start_addr = ((u64)skb->data & ~(L1_CACHE_BYTES - 1)); + end_addr = L1_CACHE_ALIGN((u64)skb_tail_pointer(skb)); + + /* calculate how many bytes to embed in the XPC message */ + if (unlikely(skb->len <= XPNET_MSG_DATA_MAX)) { + /* skb->data does fit so embed */ + embedded_bytes = skb->len; + } + + /* + * Since the send occurs asynchronously, we set the count to one + * and begin sending. Any sends that happen to complete before + * we are done sending will not free the skb. We will be left + * with that task during exit. This also handles the case of + * a packet destined for a partition which is no longer up. + */ + atomic_set(&queued_msg->use_count, 1); + queued_msg->skb = skb; + + if (skb->data[0] == 0xff) { + /* we are being asked to broadcast to all partitions */ + for_each_bit(dest_partid, xpnet_broadcast_partitions, + xp_max_npartitions) { + + xpnet_send(skb, queued_msg, start_addr, end_addr, + embedded_bytes, dest_partid); + } + } else { + dest_partid = (short)skb->data[XPNET_PARTID_OCTET + 1]; + dest_partid |= (short)skb->data[XPNET_PARTID_OCTET + 0] << 8; + + if (dest_partid >= 0 && + dest_partid < xp_max_npartitions && + test_bit(dest_partid, xpnet_broadcast_partitions) != 0) { + + xpnet_send(skb, queued_msg, start_addr, end_addr, + embedded_bytes, dest_partid); + } + } + + if (atomic_dec_return(&queued_msg->use_count) == 0) { + dev_kfree_skb(skb); + kfree(queued_msg); + } + + priv->stats.tx_packets++; + priv->stats.tx_bytes += skb->len; + + return 0; +} + +/* + * Deal with transmit timeouts coming from the network layer. + */ +static void +xpnet_dev_tx_timeout(struct net_device *dev) +{ + struct xpnet_dev_private *priv; + + priv = (struct xpnet_dev_private *)dev->priv; + + priv->stats.tx_errors++; + return; +} + +static int __init +xpnet_init(void) +{ + int result; + + if (!is_shub() && !is_uv()) + return -ENODEV; + + dev_info(xpnet, "registering network device %s\n", XPNET_DEVICE_NAME); + + xpnet_broadcast_partitions = kzalloc(BITS_TO_LONGS(xp_max_npartitions) * + sizeof(long), GFP_KERNEL); + if (xpnet_broadcast_partitions == NULL) + return -ENOMEM; + + /* + * use ether_setup() to init the majority of our device + * structure and then override the necessary pieces. + */ + xpnet_device = alloc_netdev(sizeof(struct xpnet_dev_private), + XPNET_DEVICE_NAME, ether_setup); + if (xpnet_device == NULL) { + kfree(xpnet_broadcast_partitions); + return -ENOMEM; + } + + netif_carrier_off(xpnet_device); + + xpnet_device->mtu = XPNET_DEF_MTU; + xpnet_device->change_mtu = xpnet_dev_change_mtu; + xpnet_device->open = xpnet_dev_open; + xpnet_device->get_stats = xpnet_dev_get_stats; + xpnet_device->stop = xpnet_dev_stop; + xpnet_device->hard_start_xmit = xpnet_dev_hard_start_xmit; + xpnet_device->tx_timeout = xpnet_dev_tx_timeout; + xpnet_device->set_config = xpnet_dev_set_config; + + /* + * Multicast assumes the LSB of the first octet is set for multicast + * MAC addresses. We chose the first octet of the MAC to be unlikely + * to collide with any vendor's officially issued MAC. + */ + xpnet_device->dev_addr[0] = 0x02; /* locally administered, no OUI */ + + xpnet_device->dev_addr[XPNET_PARTID_OCTET + 1] = xp_partition_id; + xpnet_device->dev_addr[XPNET_PARTID_OCTET + 0] = (xp_partition_id >> 8); + + /* + * ether_setup() sets this to a multicast device. We are + * really not supporting multicast at this time. + */ + xpnet_device->flags &= ~IFF_MULTICAST; + + /* + * No need to checksum as it is a DMA transfer. The BTE will + * report an error if the data is not retrievable and the + * packet will be dropped. + */ + xpnet_device->features = NETIF_F_NO_CSUM; + + result = register_netdev(xpnet_device); + if (result != 0) { + free_netdev(xpnet_device); + kfree(xpnet_broadcast_partitions); + } + + return result; +} + +module_init(xpnet_init); + +static void __exit +xpnet_exit(void) +{ + dev_info(xpnet, "unregistering network device %s\n", + xpnet_device[0].name); + + unregister_netdev(xpnet_device); + free_netdev(xpnet_device); + kfree(xpnet_broadcast_partitions); +} + +module_exit(xpnet_exit); + +MODULE_AUTHOR("Silicon Graphics, Inc."); +MODULE_DESCRIPTION("Cross Partition Network adapter (XPNET)"); +MODULE_LICENSE("GPL"); diff --git a/drivers/misc/sony-laptop.c b/drivers/misc/sony-laptop.c new file mode 100644 index 0000000..571b211 --- /dev/null +++ b/drivers/misc/sony-laptop.c @@ -0,0 +1,2781 @@ +/* + * ACPI Sony Notebook Control Driver (SNC and SPIC) + * + * Copyright (C) 2004-2005 Stelian Pop <stelian@popies.net> + * Copyright (C) 2007 Mattia Dongili <malattia@linux.it> + * + * Parts of this driver inspired from asus_acpi.c and ibm_acpi.c + * which are copyrighted by their respective authors. + * + * The SNY6001 driver part is based on the sonypi driver which includes + * material from: + * + * Copyright (C) 2001-2005 Stelian Pop <stelian@popies.net> + * + * Copyright (C) 2005 Narayanan R S <nars@kadamba.org> + * + * Copyright (C) 2001-2002 Alcôve <www.alcove.com> + * + * Copyright (C) 2001 Michael Ashley <m.ashley@unsw.edu.au> + * + * Copyright (C) 2001 Junichi Morita <jun1m@mars.dti.ne.jp> + * + * Copyright (C) 2000 Takaya Kinjo <t-kinjo@tc4.so-net.ne.jp> + * + * Copyright (C) 2000 Andrew Tridgell <tridge@valinux.com> + * + * Earlier work by Werner Almesberger, Paul `Rusty' Russell and Paul Mackerras. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/init.h> +#include <linux/smp_lock.h> +#include <linux/types.h> +#include <linux/backlight.h> +#include <linux/platform_device.h> +#include <linux/err.h> +#include <linux/dmi.h> +#include <linux/pci.h> +#include <linux/interrupt.h> +#include <linux/delay.h> +#include <linux/input.h> +#include <linux/kfifo.h> +#include <linux/workqueue.h> +#include <linux/acpi.h> +#include <acpi/acpi_drivers.h> +#include <acpi/acpi_bus.h> +#include <asm/uaccess.h> +#include <linux/sonypi.h> +#include <linux/sony-laptop.h> +#ifdef CONFIG_SONYPI_COMPAT +#include <linux/poll.h> +#include <linux/miscdevice.h> +#endif + +#define DRV_PFX "sony-laptop: " +#define dprintk(msg...) do { \ + if (debug) printk(KERN_WARNING DRV_PFX msg); \ +} while (0) + +#define SONY_LAPTOP_DRIVER_VERSION "0.6" + +#define SONY_NC_CLASS "sony-nc" +#define SONY_NC_HID "SNY5001" +#define SONY_NC_DRIVER_NAME "Sony Notebook Control Driver" + +#define SONY_PIC_CLASS "sony-pic" +#define SONY_PIC_HID "SNY6001" +#define SONY_PIC_DRIVER_NAME "Sony Programmable IO Control Driver" + +MODULE_AUTHOR("Stelian Pop, Mattia Dongili"); +MODULE_DESCRIPTION("Sony laptop extras driver (SPIC and SNC ACPI device)"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(SONY_LAPTOP_DRIVER_VERSION); + +static int debug; +module_param(debug, int, 0); +MODULE_PARM_DESC(debug, "set this to 1 (and RTFM) if you want to help " + "the development of this driver"); + +static int no_spic; /* = 0 */ +module_param(no_spic, int, 0444); +MODULE_PARM_DESC(no_spic, + "set this if you don't want to enable the SPIC device"); + +static int compat; /* = 0 */ +module_param(compat, int, 0444); +MODULE_PARM_DESC(compat, + "set this if you want to enable backward compatibility mode"); + +static unsigned long mask = 0xffffffff; +module_param(mask, ulong, 0644); +MODULE_PARM_DESC(mask, + "set this to the mask of event you want to enable (see doc)"); + +static int camera; /* = 0 */ +module_param(camera, int, 0444); +MODULE_PARM_DESC(camera, + "set this to 1 to enable Motion Eye camera controls " + "(only use it if you have a C1VE or C1VN model)"); + +#ifdef CONFIG_SONYPI_COMPAT +static int minor = -1; +module_param(minor, int, 0); +MODULE_PARM_DESC(minor, + "minor number of the misc device for the SPIC compatibility code, " + "default is -1 (automatic)"); +#endif + +/*********** Input Devices ***********/ + +#define SONY_LAPTOP_BUF_SIZE 128 +struct sony_laptop_input_s { + atomic_t users; + struct input_dev *jog_dev; + struct input_dev *key_dev; + struct kfifo *fifo; + spinlock_t fifo_lock; + struct workqueue_struct *wq; +}; +static struct sony_laptop_input_s sony_laptop_input = { + .users = ATOMIC_INIT(0), +}; + +struct sony_laptop_keypress { + struct input_dev *dev; + int key; +}; + +/* Correspondance table between sonypi events + * and input layer indexes in the keymap + */ +static int sony_laptop_input_index[] = { + -1, /* 0 no event */ + -1, /* 1 SONYPI_EVENT_JOGDIAL_DOWN */ + -1, /* 2 SONYPI_EVENT_JOGDIAL_UP */ + -1, /* 3 SONYPI_EVENT_JOGDIAL_DOWN_PRESSED */ + -1, /* 4 SONYPI_EVENT_JOGDIAL_UP_PRESSED */ + -1, /* 5 SONYPI_EVENT_JOGDIAL_PRESSED */ + -1, /* 6 SONYPI_EVENT_JOGDIAL_RELEASED */ + 0, /* 7 SONYPI_EVENT_CAPTURE_PRESSED */ + 1, /* 8 SONYPI_EVENT_CAPTURE_RELEASED */ + 2, /* 9 SONYPI_EVENT_CAPTURE_PARTIALPRESSED */ + 3, /* 10 SONYPI_EVENT_CAPTURE_PARTIALRELEASED */ + 4, /* 11 SONYPI_EVENT_FNKEY_ESC */ + 5, /* 12 SONYPI_EVENT_FNKEY_F1 */ + 6, /* 13 SONYPI_EVENT_FNKEY_F2 */ + 7, /* 14 SONYPI_EVENT_FNKEY_F3 */ + 8, /* 15 SONYPI_EVENT_FNKEY_F4 */ + 9, /* 16 SONYPI_EVENT_FNKEY_F5 */ + 10, /* 17 SONYPI_EVENT_FNKEY_F6 */ + 11, /* 18 SONYPI_EVENT_FNKEY_F7 */ + 12, /* 19 SONYPI_EVENT_FNKEY_F8 */ + 13, /* 20 SONYPI_EVENT_FNKEY_F9 */ + 14, /* 21 SONYPI_EVENT_FNKEY_F10 */ + 15, /* 22 SONYPI_EVENT_FNKEY_F11 */ + 16, /* 23 SONYPI_EVENT_FNKEY_F12 */ + 17, /* 24 SONYPI_EVENT_FNKEY_1 */ + 18, /* 25 SONYPI_EVENT_FNKEY_2 */ + 19, /* 26 SONYPI_EVENT_FNKEY_D */ + 20, /* 27 SONYPI_EVENT_FNKEY_E */ + 21, /* 28 SONYPI_EVENT_FNKEY_F */ + 22, /* 29 SONYPI_EVENT_FNKEY_S */ + 23, /* 30 SONYPI_EVENT_FNKEY_B */ + 24, /* 31 SONYPI_EVENT_BLUETOOTH_PRESSED */ + 25, /* 32 SONYPI_EVENT_PKEY_P1 */ + 26, /* 33 SONYPI_EVENT_PKEY_P2 */ + 27, /* 34 SONYPI_EVENT_PKEY_P3 */ + 28, /* 35 SONYPI_EVENT_BACK_PRESSED */ + -1, /* 36 SONYPI_EVENT_LID_CLOSED */ + -1, /* 37 SONYPI_EVENT_LID_OPENED */ + 29, /* 38 SONYPI_EVENT_BLUETOOTH_ON */ + 30, /* 39 SONYPI_EVENT_BLUETOOTH_OFF */ + 31, /* 40 SONYPI_EVENT_HELP_PRESSED */ + 32, /* 41 SONYPI_EVENT_FNKEY_ONLY */ + 33, /* 42 SONYPI_EVENT_JOGDIAL_FAST_DOWN */ + 34, /* 43 SONYPI_EVENT_JOGDIAL_FAST_UP */ + 35, /* 44 SONYPI_EVENT_JOGDIAL_FAST_DOWN_PRESSED */ + 36, /* 45 SONYPI_EVENT_JOGDIAL_FAST_UP_PRESSED */ + 37, /* 46 SONYPI_EVENT_JOGDIAL_VFAST_DOWN */ + 38, /* 47 SONYPI_EVENT_JOGDIAL_VFAST_UP */ + 39, /* 48 SONYPI_EVENT_JOGDIAL_VFAST_DOWN_PRESSED */ + 40, /* 49 SONYPI_EVENT_JOGDIAL_VFAST_UP_PRESSED */ + 41, /* 50 SONYPI_EVENT_ZOOM_PRESSED */ + 42, /* 51 SONYPI_EVENT_THUMBPHRASE_PRESSED */ + 43, /* 52 SONYPI_EVENT_MEYE_FACE */ + 44, /* 53 SONYPI_EVENT_MEYE_OPPOSITE */ + 45, /* 54 SONYPI_EVENT_MEMORYSTICK_INSERT */ + 46, /* 55 SONYPI_EVENT_MEMORYSTICK_EJECT */ + -1, /* 56 SONYPI_EVENT_ANYBUTTON_RELEASED */ + -1, /* 57 SONYPI_EVENT_BATTERY_INSERT */ + -1, /* 58 SONYPI_EVENT_BATTERY_REMOVE */ + -1, /* 59 SONYPI_EVENT_FNKEY_RELEASED */ + 47, /* 60 SONYPI_EVENT_WIRELESS_ON */ + 48, /* 61 SONYPI_EVENT_WIRELESS_OFF */ + 49, /* 62 SONYPI_EVENT_ZOOM_IN_PRESSED */ + 50, /* 63 SONYPI_EVENT_ZOOM_OUT_PRESSED */ +}; + +static int sony_laptop_input_keycode_map[] = { + KEY_CAMERA, /* 0 SONYPI_EVENT_CAPTURE_PRESSED */ + KEY_RESERVED, /* 1 SONYPI_EVENT_CAPTURE_RELEASED */ + KEY_RESERVED, /* 2 SONYPI_EVENT_CAPTURE_PARTIALPRESSED */ + KEY_RESERVED, /* 3 SONYPI_EVENT_CAPTURE_PARTIALRELEASED */ + KEY_FN_ESC, /* 4 SONYPI_EVENT_FNKEY_ESC */ + KEY_FN_F1, /* 5 SONYPI_EVENT_FNKEY_F1 */ + KEY_FN_F2, /* 6 SONYPI_EVENT_FNKEY_F2 */ + KEY_FN_F3, /* 7 SONYPI_EVENT_FNKEY_F3 */ + KEY_FN_F4, /* 8 SONYPI_EVENT_FNKEY_F4 */ + KEY_FN_F5, /* 9 SONYPI_EVENT_FNKEY_F5 */ + KEY_FN_F6, /* 10 SONYPI_EVENT_FNKEY_F6 */ + KEY_FN_F7, /* 11 SONYPI_EVENT_FNKEY_F7 */ + KEY_FN_F8, /* 12 SONYPI_EVENT_FNKEY_F8 */ + KEY_FN_F9, /* 13 SONYPI_EVENT_FNKEY_F9 */ + KEY_FN_F10, /* 14 SONYPI_EVENT_FNKEY_F10 */ + KEY_FN_F11, /* 15 SONYPI_EVENT_FNKEY_F11 */ + KEY_FN_F12, /* 16 SONYPI_EVENT_FNKEY_F12 */ + KEY_FN_F1, /* 17 SONYPI_EVENT_FNKEY_1 */ + KEY_FN_F2, /* 18 SONYPI_EVENT_FNKEY_2 */ + KEY_FN_D, /* 19 SONYPI_EVENT_FNKEY_D */ + KEY_FN_E, /* 20 SONYPI_EVENT_FNKEY_E */ + KEY_FN_F, /* 21 SONYPI_EVENT_FNKEY_F */ + KEY_FN_S, /* 22 SONYPI_EVENT_FNKEY_S */ + KEY_FN_B, /* 23 SONYPI_EVENT_FNKEY_B */ + KEY_BLUETOOTH, /* 24 SONYPI_EVENT_BLUETOOTH_PRESSED */ + KEY_PROG1, /* 25 SONYPI_EVENT_PKEY_P1 */ + KEY_PROG2, /* 26 SONYPI_EVENT_PKEY_P2 */ + KEY_PROG3, /* 27 SONYPI_EVENT_PKEY_P3 */ + KEY_BACK, /* 28 SONYPI_EVENT_BACK_PRESSED */ + KEY_BLUETOOTH, /* 29 SONYPI_EVENT_BLUETOOTH_ON */ + KEY_BLUETOOTH, /* 30 SONYPI_EVENT_BLUETOOTH_OFF */ + KEY_HELP, /* 31 SONYPI_EVENT_HELP_PRESSED */ + KEY_FN, /* 32 SONYPI_EVENT_FNKEY_ONLY */ + KEY_RESERVED, /* 33 SONYPI_EVENT_JOGDIAL_FAST_DOWN */ + KEY_RESERVED, /* 34 SONYPI_EVENT_JOGDIAL_FAST_UP */ + KEY_RESERVED, /* 35 SONYPI_EVENT_JOGDIAL_FAST_DOWN_PRESSED */ + KEY_RESERVED, /* 36 SONYPI_EVENT_JOGDIAL_FAST_UP_PRESSED */ + KEY_RESERVED, /* 37 SONYPI_EVENT_JOGDIAL_VFAST_DOWN */ + KEY_RESERVED, /* 38 SONYPI_EVENT_JOGDIAL_VFAST_UP */ + KEY_RESERVED, /* 39 SONYPI_EVENT_JOGDIAL_VFAST_DOWN_PRESSED */ + KEY_RESERVED, /* 40 SONYPI_EVENT_JOGDIAL_VFAST_UP_PRESSED */ + KEY_ZOOM, /* 41 SONYPI_EVENT_ZOOM_PRESSED */ + BTN_THUMB, /* 42 SONYPI_EVENT_THUMBPHRASE_PRESSED */ + KEY_RESERVED, /* 43 SONYPI_EVENT_MEYE_FACE */ + KEY_RESERVED, /* 44 SONYPI_EVENT_MEYE_OPPOSITE */ + KEY_RESERVED, /* 45 SONYPI_EVENT_MEMORYSTICK_INSERT */ + KEY_RESERVED, /* 46 SONYPI_EVENT_MEMORYSTICK_EJECT */ + KEY_WLAN, /* 47 SONYPI_EVENT_WIRELESS_ON */ + KEY_WLAN, /* 48 SONYPI_EVENT_WIRELESS_OFF */ + KEY_ZOOMIN, /* 49 SONYPI_EVENT_ZOOM_IN_PRESSED */ + KEY_ZOOMOUT /* 50 SONYPI_EVENT_ZOOM_OUT_PRESSED */ +}; + +/* release buttons after a short delay if pressed */ +static void do_sony_laptop_release_key(struct work_struct *work) +{ + struct sony_laptop_keypress kp; + + while (kfifo_get(sony_laptop_input.fifo, (unsigned char *)&kp, + sizeof(kp)) == sizeof(kp)) { + msleep(10); + input_report_key(kp.dev, kp.key, 0); + input_sync(kp.dev); + } +} +static DECLARE_WORK(sony_laptop_release_key_work, + do_sony_laptop_release_key); + +/* forward event to the input subsystem */ +static void sony_laptop_report_input_event(u8 event) +{ + struct input_dev *jog_dev = sony_laptop_input.jog_dev; + struct input_dev *key_dev = sony_laptop_input.key_dev; + struct sony_laptop_keypress kp = { NULL }; + + if (event == SONYPI_EVENT_FNKEY_RELEASED) { + /* Nothing, not all VAIOs generate this event */ + return; + } + + /* report events */ + switch (event) { + /* jog_dev events */ + case SONYPI_EVENT_JOGDIAL_UP: + case SONYPI_EVENT_JOGDIAL_UP_PRESSED: + input_report_rel(jog_dev, REL_WHEEL, 1); + input_sync(jog_dev); + return; + + case SONYPI_EVENT_JOGDIAL_DOWN: + case SONYPI_EVENT_JOGDIAL_DOWN_PRESSED: + input_report_rel(jog_dev, REL_WHEEL, -1); + input_sync(jog_dev); + return; + + /* key_dev events */ + case SONYPI_EVENT_JOGDIAL_PRESSED: + kp.key = BTN_MIDDLE; + kp.dev = jog_dev; + break; + + default: + if (event >= ARRAY_SIZE(sony_laptop_input_index)) { + dprintk("sony_laptop_report_input_event, event not known: %d\n", event); + break; + } + if (sony_laptop_input_index[event] != -1) { + kp.key = sony_laptop_input_keycode_map[sony_laptop_input_index[event]]; + if (kp.key != KEY_UNKNOWN) + kp.dev = key_dev; + } + break; + } + + if (kp.dev) { + input_report_key(kp.dev, kp.key, 1); + /* we emit the scancode so we can always remap the key */ + input_event(kp.dev, EV_MSC, MSC_SCAN, event); + input_sync(kp.dev); + kfifo_put(sony_laptop_input.fifo, + (unsigned char *)&kp, sizeof(kp)); + + if (!work_pending(&sony_laptop_release_key_work)) + queue_work(sony_laptop_input.wq, + &sony_laptop_release_key_work); + } else + dprintk("unknown input event %.2x\n", event); +} + +static int sony_laptop_setup_input(struct acpi_device *acpi_device) +{ + struct input_dev *jog_dev; + struct input_dev *key_dev; + int i; + int error; + + /* don't run again if already initialized */ + if (atomic_add_return(1, &sony_laptop_input.users) > 1) + return 0; + + /* kfifo */ + spin_lock_init(&sony_laptop_input.fifo_lock); + sony_laptop_input.fifo = + kfifo_alloc(SONY_LAPTOP_BUF_SIZE, GFP_KERNEL, + &sony_laptop_input.fifo_lock); + if (IS_ERR(sony_laptop_input.fifo)) { + printk(KERN_ERR DRV_PFX "kfifo_alloc failed\n"); + error = PTR_ERR(sony_laptop_input.fifo); + goto err_dec_users; + } + + /* init workqueue */ + sony_laptop_input.wq = create_singlethread_workqueue("sony-laptop"); + if (!sony_laptop_input.wq) { + printk(KERN_ERR DRV_PFX + "Unabe to create workqueue.\n"); + error = -ENXIO; + goto err_free_kfifo; + } + + /* input keys */ + key_dev = input_allocate_device(); + if (!key_dev) { + error = -ENOMEM; + goto err_destroy_wq; + } + + key_dev->name = "Sony Vaio Keys"; + key_dev->id.bustype = BUS_ISA; + key_dev->id.vendor = PCI_VENDOR_ID_SONY; + key_dev->dev.parent = &acpi_device->dev; + + /* Initialize the Input Drivers: special keys */ + set_bit(EV_KEY, key_dev->evbit); + set_bit(EV_MSC, key_dev->evbit); + set_bit(MSC_SCAN, key_dev->mscbit); + key_dev->keycodesize = sizeof(sony_laptop_input_keycode_map[0]); + key_dev->keycodemax = ARRAY_SIZE(sony_laptop_input_keycode_map); + key_dev->keycode = &sony_laptop_input_keycode_map; + for (i = 0; i < ARRAY_SIZE(sony_laptop_input_keycode_map); i++) { + if (sony_laptop_input_keycode_map[i] != KEY_RESERVED) { + set_bit(sony_laptop_input_keycode_map[i], + key_dev->keybit); + } + } + + error = input_register_device(key_dev); + if (error) + goto err_free_keydev; + + sony_laptop_input.key_dev = key_dev; + + /* jogdial */ + jog_dev = input_allocate_device(); + if (!jog_dev) { + error = -ENOMEM; + goto err_unregister_keydev; + } + + jog_dev->name = "Sony Vaio Jogdial"; + jog_dev->id.bustype = BUS_ISA; + jog_dev->id.vendor = PCI_VENDOR_ID_SONY; + key_dev->dev.parent = &acpi_device->dev; + + jog_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REL); + jog_dev->keybit[BIT_WORD(BTN_MOUSE)] = BIT_MASK(BTN_MIDDLE); + jog_dev->relbit[0] = BIT_MASK(REL_WHEEL); + + error = input_register_device(jog_dev); + if (error) + goto err_free_jogdev; + + sony_laptop_input.jog_dev = jog_dev; + + return 0; + +err_free_jogdev: + input_free_device(jog_dev); + +err_unregister_keydev: + input_unregister_device(key_dev); + /* to avoid kref underflow below at input_free_device */ + key_dev = NULL; + +err_free_keydev: + input_free_device(key_dev); + +err_destroy_wq: + destroy_workqueue(sony_laptop_input.wq); + +err_free_kfifo: + kfifo_free(sony_laptop_input.fifo); + +err_dec_users: + atomic_dec(&sony_laptop_input.users); + return error; +} + +static void sony_laptop_remove_input(void) +{ + /* cleanup only after the last user has gone */ + if (!atomic_dec_and_test(&sony_laptop_input.users)) + return; + + /* flush workqueue first */ + flush_workqueue(sony_laptop_input.wq); + + /* destroy input devs */ + input_unregister_device(sony_laptop_input.key_dev); + sony_laptop_input.key_dev = NULL; + + if (sony_laptop_input.jog_dev) { + input_unregister_device(sony_laptop_input.jog_dev); + sony_laptop_input.jog_dev = NULL; + } + + destroy_workqueue(sony_laptop_input.wq); + kfifo_free(sony_laptop_input.fifo); +} + +/*********** Platform Device ***********/ + +static atomic_t sony_pf_users = ATOMIC_INIT(0); +static struct platform_driver sony_pf_driver = { + .driver = { + .name = "sony-laptop", + .owner = THIS_MODULE, + } +}; +static struct platform_device *sony_pf_device; + +static int sony_pf_add(void) +{ + int ret = 0; + + /* don't run again if already initialized */ + if (atomic_add_return(1, &sony_pf_users) > 1) + return 0; + + ret = platform_driver_register(&sony_pf_driver); + if (ret) + goto out; + + sony_pf_device = platform_device_alloc("sony-laptop", -1); + if (!sony_pf_device) { + ret = -ENOMEM; + goto out_platform_registered; + } + + ret = platform_device_add(sony_pf_device); + if (ret) + goto out_platform_alloced; + + return 0; + + out_platform_alloced: + platform_device_put(sony_pf_device); + sony_pf_device = NULL; + out_platform_registered: + platform_driver_unregister(&sony_pf_driver); + out: + atomic_dec(&sony_pf_users); + return ret; +} + +static void sony_pf_remove(void) +{ + /* deregister only after the last user has gone */ + if (!atomic_dec_and_test(&sony_pf_users)) + return; + + platform_device_del(sony_pf_device); + platform_device_put(sony_pf_device); + platform_driver_unregister(&sony_pf_driver); +} + +/*********** SNC (SNY5001) Device ***********/ + +/* the device uses 1-based values, while the backlight subsystem uses + 0-based values */ +#define SONY_MAX_BRIGHTNESS 8 + +#define SNC_VALIDATE_IN 0 +#define SNC_VALIDATE_OUT 1 + +static ssize_t sony_nc_sysfs_show(struct device *, struct device_attribute *, + char *); +static ssize_t sony_nc_sysfs_store(struct device *, struct device_attribute *, + const char *, size_t); +static int boolean_validate(const int, const int); +static int brightness_default_validate(const int, const int); + +struct sony_nc_value { + char *name; /* name of the entry */ + char **acpiget; /* names of the ACPI get function */ + char **acpiset; /* names of the ACPI set function */ + int (*validate)(const int, const int); /* input/output validation */ + int value; /* current setting */ + int valid; /* Has ever been set */ + int debug; /* active only in debug mode ? */ + struct device_attribute devattr; /* sysfs atribute */ +}; + +#define SNC_HANDLE_NAMES(_name, _values...) \ + static char *snc_##_name[] = { _values, NULL } + +#define SNC_HANDLE(_name, _getters, _setters, _validate, _debug) \ + { \ + .name = __stringify(_name), \ + .acpiget = _getters, \ + .acpiset = _setters, \ + .validate = _validate, \ + .debug = _debug, \ + .devattr = __ATTR(_name, 0, sony_nc_sysfs_show, sony_nc_sysfs_store), \ + } + +#define SNC_HANDLE_NULL { .name = NULL } + +SNC_HANDLE_NAMES(fnkey_get, "GHKE"); + +SNC_HANDLE_NAMES(brightness_def_get, "GPBR"); +SNC_HANDLE_NAMES(brightness_def_set, "SPBR"); + +SNC_HANDLE_NAMES(cdpower_get, "GCDP"); +SNC_HANDLE_NAMES(cdpower_set, "SCDP", "CDPW"); + +SNC_HANDLE_NAMES(audiopower_get, "GAZP"); +SNC_HANDLE_NAMES(audiopower_set, "AZPW"); + +SNC_HANDLE_NAMES(lanpower_get, "GLNP"); +SNC_HANDLE_NAMES(lanpower_set, "LNPW"); + +SNC_HANDLE_NAMES(lidstate_get, "GLID"); + +SNC_HANDLE_NAMES(indicatorlamp_get, "GILS"); +SNC_HANDLE_NAMES(indicatorlamp_set, "SILS"); + +SNC_HANDLE_NAMES(gainbass_get, "GMGB"); +SNC_HANDLE_NAMES(gainbass_set, "CMGB"); + +SNC_HANDLE_NAMES(PID_get, "GPID"); + +SNC_HANDLE_NAMES(CTR_get, "GCTR"); +SNC_HANDLE_NAMES(CTR_set, "SCTR"); + +SNC_HANDLE_NAMES(PCR_get, "GPCR"); +SNC_HANDLE_NAMES(PCR_set, "SPCR"); + +SNC_HANDLE_NAMES(CMI_get, "GCMI"); +SNC_HANDLE_NAMES(CMI_set, "SCMI"); + +static struct sony_nc_value sony_nc_values[] = { + SNC_HANDLE(brightness_default, snc_brightness_def_get, + snc_brightness_def_set, brightness_default_validate, 0), + SNC_HANDLE(fnkey, snc_fnkey_get, NULL, NULL, 0), + SNC_HANDLE(cdpower, snc_cdpower_get, snc_cdpower_set, boolean_validate, 0), + SNC_HANDLE(audiopower, snc_audiopower_get, snc_audiopower_set, + boolean_validate, 0), + SNC_HANDLE(lanpower, snc_lanpower_get, snc_lanpower_set, + boolean_validate, 1), + SNC_HANDLE(lidstate, snc_lidstate_get, NULL, + boolean_validate, 0), + SNC_HANDLE(indicatorlamp, snc_indicatorlamp_get, snc_indicatorlamp_set, + boolean_validate, 0), + SNC_HANDLE(gainbass, snc_gainbass_get, snc_gainbass_set, + boolean_validate, 0), + /* unknown methods */ + SNC_HANDLE(PID, snc_PID_get, NULL, NULL, 1), + SNC_HANDLE(CTR, snc_CTR_get, snc_CTR_set, NULL, 1), + SNC_HANDLE(PCR, snc_PCR_get, snc_PCR_set, NULL, 1), + SNC_HANDLE(CMI, snc_CMI_get, snc_CMI_set, NULL, 1), + SNC_HANDLE_NULL +}; + +static acpi_handle sony_nc_acpi_handle; +static struct acpi_device *sony_nc_acpi_device = NULL; + +/* + * acpi_evaluate_object wrappers + */ +static int acpi_callgetfunc(acpi_handle handle, char *name, int *result) +{ + struct acpi_buffer output; + union acpi_object out_obj; + acpi_status status; + + output.length = sizeof(out_obj); + output.pointer = &out_obj; + + status = acpi_evaluate_object(handle, name, NULL, &output); + if ((status == AE_OK) && (out_obj.type == ACPI_TYPE_INTEGER)) { + *result = out_obj.integer.value; + return 0; + } + + printk(KERN_WARNING DRV_PFX "acpi_callreadfunc failed\n"); + + return -1; +} + +static int acpi_callsetfunc(acpi_handle handle, char *name, int value, + int *result) +{ + struct acpi_object_list params; + union acpi_object in_obj; + struct acpi_buffer output; + union acpi_object out_obj; + acpi_status status; + + params.count = 1; + params.pointer = &in_obj; + in_obj.type = ACPI_TYPE_INTEGER; + in_obj.integer.value = value; + + output.length = sizeof(out_obj); + output.pointer = &out_obj; + + status = acpi_evaluate_object(handle, name, ¶ms, &output); + if (status == AE_OK) { + if (result != NULL) { + if (out_obj.type != ACPI_TYPE_INTEGER) { + printk(KERN_WARNING DRV_PFX "acpi_evaluate_object bad " + "return type\n"); + return -1; + } + *result = out_obj.integer.value; + } + return 0; + } + + printk(KERN_WARNING DRV_PFX "acpi_evaluate_object failed\n"); + + return -1; +} + +/* + * sony_nc_values input/output validate functions + */ + +/* brightness_default_validate: + * + * manipulate input output values to keep consistency with the + * backlight framework for which brightness values are 0-based. + */ +static int brightness_default_validate(const int direction, const int value) +{ + switch (direction) { + case SNC_VALIDATE_OUT: + return value - 1; + case SNC_VALIDATE_IN: + if (value >= 0 && value < SONY_MAX_BRIGHTNESS) + return value + 1; + } + return -EINVAL; +} + +/* boolean_validate: + * + * on input validate boolean values 0/1, on output just pass the + * received value. + */ +static int boolean_validate(const int direction, const int value) +{ + if (direction == SNC_VALIDATE_IN) { + if (value != 0 && value != 1) + return -EINVAL; + } + return value; +} + +/* + * Sysfs show/store common to all sony_nc_values + */ +static ssize_t sony_nc_sysfs_show(struct device *dev, struct device_attribute *attr, + char *buffer) +{ + int value; + struct sony_nc_value *item = + container_of(attr, struct sony_nc_value, devattr); + + if (!*item->acpiget) + return -EIO; + + if (acpi_callgetfunc(sony_nc_acpi_handle, *item->acpiget, &value) < 0) + return -EIO; + + if (item->validate) + value = item->validate(SNC_VALIDATE_OUT, value); + + return snprintf(buffer, PAGE_SIZE, "%d\n", value); +} + +static ssize_t sony_nc_sysfs_store(struct device *dev, + struct device_attribute *attr, + const char *buffer, size_t count) +{ + int value; + struct sony_nc_value *item = + container_of(attr, struct sony_nc_value, devattr); + + if (!item->acpiset) + return -EIO; + + if (count > 31) + return -EINVAL; + + value = simple_strtoul(buffer, NULL, 10); + + if (item->validate) + value = item->validate(SNC_VALIDATE_IN, value); + + if (value < 0) + return value; + + if (acpi_callsetfunc(sony_nc_acpi_handle, *item->acpiset, value, NULL) < 0) + return -EIO; + item->value = value; + item->valid = 1; + return count; +} + + +/* + * Backlight device + */ +static int sony_backlight_update_status(struct backlight_device *bd) +{ + return acpi_callsetfunc(sony_nc_acpi_handle, "SBRT", + bd->props.brightness + 1, NULL); +} + +static int sony_backlight_get_brightness(struct backlight_device *bd) +{ + int value; + + if (acpi_callgetfunc(sony_nc_acpi_handle, "GBRT", &value)) + return 0; + /* brightness levels are 1-based, while backlight ones are 0-based */ + return value - 1; +} + +static struct backlight_device *sony_backlight_device; +static struct backlight_ops sony_backlight_ops = { + .update_status = sony_backlight_update_status, + .get_brightness = sony_backlight_get_brightness, +}; + +/* + * New SNC-only Vaios event mapping to driver known keys + */ +struct sony_nc_event { + u8 data; + u8 event; +}; + +static struct sony_nc_event *sony_nc_events; + +/* Vaio C* --maybe also FE*, N* and AR* ?-- special init sequence + * for Fn keys + */ +static int sony_nc_C_enable(const struct dmi_system_id *id) +{ + int result = 0; + + printk(KERN_NOTICE DRV_PFX "detected %s\n", id->ident); + + sony_nc_events = id->driver_data; + + if (acpi_callsetfunc(sony_nc_acpi_handle, "SN02", 0x4, &result) < 0 + || acpi_callsetfunc(sony_nc_acpi_handle, "SN07", 0x2, &result) < 0 + || acpi_callsetfunc(sony_nc_acpi_handle, "SN02", 0x10, &result) < 0 + || acpi_callsetfunc(sony_nc_acpi_handle, "SN07", 0x0, &result) < 0 + || acpi_callsetfunc(sony_nc_acpi_handle, "SN03", 0x2, &result) < 0 + || acpi_callsetfunc(sony_nc_acpi_handle, "SN07", 0x101, &result) < 0) { + printk(KERN_WARNING DRV_PFX "failed to initialize SNC, some " + "functionalities may be missing\n"); + return 1; + } + return 0; +} + +static struct sony_nc_event sony_C_events[] = { + { 0x81, SONYPI_EVENT_FNKEY_F1 }, + { 0x01, SONYPI_EVENT_FNKEY_RELEASED }, + { 0x85, SONYPI_EVENT_FNKEY_F5 }, + { 0x05, SONYPI_EVENT_FNKEY_RELEASED }, + { 0x86, SONYPI_EVENT_FNKEY_F6 }, + { 0x06, SONYPI_EVENT_FNKEY_RELEASED }, + { 0x87, SONYPI_EVENT_FNKEY_F7 }, + { 0x07, SONYPI_EVENT_FNKEY_RELEASED }, + { 0x8A, SONYPI_EVENT_FNKEY_F10 }, + { 0x0A, SONYPI_EVENT_FNKEY_RELEASED }, + { 0x8C, SONYPI_EVENT_FNKEY_F12 }, + { 0x0C, SONYPI_EVENT_FNKEY_RELEASED }, + { 0, 0 }, +}; + +/* SNC-only model map */ +static const struct dmi_system_id sony_nc_ids[] = { + { + .ident = "Sony Vaio FE Series", + .callback = sony_nc_C_enable, + .driver_data = sony_C_events, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), + DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FE"), + }, + }, + { + .ident = "Sony Vaio FZ Series", + .callback = sony_nc_C_enable, + .driver_data = sony_C_events, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), + DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FZ"), + }, + }, + { + .ident = "Sony Vaio C Series", + .callback = sony_nc_C_enable, + .driver_data = sony_C_events, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), + DMI_MATCH(DMI_PRODUCT_NAME, "VGN-C"), + }, + }, + { + .ident = "Sony Vaio N Series", + .callback = sony_nc_C_enable, + .driver_data = sony_C_events, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), + DMI_MATCH(DMI_PRODUCT_NAME, "VGN-N"), + }, + }, + { } +}; + +/* + * ACPI callbacks + */ +static void sony_acpi_notify(acpi_handle handle, u32 event, void *data) +{ + struct sony_nc_event *evmap; + u32 ev = event; + int result; + + if (ev == 0x92) { + /* read the key pressed from EC.GECR + * A call to SN07 with 0x0202 will do it as well respecting + * the current protocol on different OSes + * + * Note: the path for GECR may be + * \_SB.PCI0.LPCB.EC (C, FE, AR, N and friends) + * \_SB.PCI0.PIB.EC0 (VGN-FR notifications are sent directly, no GECR) + * + * TODO: we may want to do the same for the older GHKE -need + * dmi list- so this snippet may become one more callback. + */ + if (acpi_callsetfunc(handle, "SN07", 0x0202, &result) < 0) + dprintk("sony_acpi_notify, unable to decode event 0x%.2x\n", ev); + else + ev = result & 0xFF; + } + + if (sony_nc_events) + for (evmap = sony_nc_events; evmap->event; evmap++) { + if (evmap->data == ev) { + ev = evmap->event; + break; + } + } + + dprintk("sony_acpi_notify, event: 0x%.2x\n", ev); + sony_laptop_report_input_event(ev); + acpi_bus_generate_proc_event(sony_nc_acpi_device, 1, ev); +} + +static acpi_status sony_walk_callback(acpi_handle handle, u32 level, + void *context, void **return_value) +{ + struct acpi_namespace_node *node; + union acpi_operand_object *operand; + + node = (struct acpi_namespace_node *)handle; + operand = (union acpi_operand_object *)node->object; + + printk(KERN_WARNING DRV_PFX "method: name: %4.4s, args %X\n", node->name.ascii, + (u32) operand->method.param_count); + + return AE_OK; +} + +/* + * ACPI device + */ +static int sony_nc_resume(struct acpi_device *device) +{ + struct sony_nc_value *item; + + for (item = sony_nc_values; item->name; item++) { + int ret; + + if (!item->valid) + continue; + ret = acpi_callsetfunc(sony_nc_acpi_handle, *item->acpiset, + item->value, NULL); + if (ret < 0) { + printk("%s: %d\n", __func__, ret); + break; + } + } + + /* set the last requested brightness level */ + if (sony_backlight_device && + !sony_backlight_update_status(sony_backlight_device)) + printk(KERN_WARNING DRV_PFX "unable to restore brightness level\n"); + + /* re-initialize models with specific requirements */ + dmi_check_system(sony_nc_ids); + + return 0; +} + +static int sony_nc_add(struct acpi_device *device) +{ + acpi_status status; + int result = 0; + acpi_handle handle; + struct sony_nc_value *item; + + printk(KERN_INFO DRV_PFX "%s v%s.\n", + SONY_NC_DRIVER_NAME, SONY_LAPTOP_DRIVER_VERSION); + + sony_nc_acpi_device = device; + strcpy(acpi_device_class(device), "sony/hotkey"); + + sony_nc_acpi_handle = device->handle; + + /* read device status */ + result = acpi_bus_get_status(device); + /* bail IFF the above call was successful and the device is not present */ + if (!result && !device->status.present) { + dprintk("Device not present\n"); + result = -ENODEV; + goto outwalk; + } + + if (debug) { + status = acpi_walk_namespace(ACPI_TYPE_METHOD, sony_nc_acpi_handle, + 1, sony_walk_callback, NULL, NULL); + if (ACPI_FAILURE(status)) { + printk(KERN_WARNING DRV_PFX "unable to walk acpi resources\n"); + result = -ENODEV; + goto outwalk; + } + } + + /* try to _INI the device if such method exists (ACPI spec 3.0-6.5.1 + * should be respected as we already checked for the device presence above */ + if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, METHOD_NAME__INI, &handle))) { + dprintk("Invoking _INI\n"); + if (ACPI_FAILURE(acpi_evaluate_object(sony_nc_acpi_handle, METHOD_NAME__INI, + NULL, NULL))) + dprintk("_INI Method failed\n"); + } + + /* setup input devices and helper fifo */ + result = sony_laptop_setup_input(device); + if (result) { + printk(KERN_ERR DRV_PFX + "Unabe to create input devices.\n"); + goto outwalk; + } + + status = acpi_install_notify_handler(sony_nc_acpi_handle, + ACPI_DEVICE_NOTIFY, + sony_acpi_notify, NULL); + if (ACPI_FAILURE(status)) { + printk(KERN_WARNING DRV_PFX "unable to install notify handler (%u)\n", status); + result = -ENODEV; + goto outinput; + } + + if (acpi_video_backlight_support()) { + printk(KERN_INFO DRV_PFX "brightness ignored, must be " + "controlled by ACPI video driver\n"); + } else if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "GBRT", + &handle))) { + sony_backlight_device = backlight_device_register("sony", NULL, + NULL, + &sony_backlight_ops); + + if (IS_ERR(sony_backlight_device)) { + printk(KERN_WARNING DRV_PFX "unable to register backlight device\n"); + sony_backlight_device = NULL; + } else { + sony_backlight_device->props.brightness = + sony_backlight_get_brightness + (sony_backlight_device); + sony_backlight_device->props.max_brightness = + SONY_MAX_BRIGHTNESS - 1; + } + + } + + /* initialize models with specific requirements */ + dmi_check_system(sony_nc_ids); + + result = sony_pf_add(); + if (result) + goto outbacklight; + + /* create sony_pf sysfs attributes related to the SNC device */ + for (item = sony_nc_values; item->name; ++item) { + + if (!debug && item->debug) + continue; + + /* find the available acpiget as described in the DSDT */ + for (; item->acpiget && *item->acpiget; ++item->acpiget) { + if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, + *item->acpiget, + &handle))) { + dprintk("Found %s getter: %s\n", + item->name, *item->acpiget); + item->devattr.attr.mode |= S_IRUGO; + break; + } + } + + /* find the available acpiset as described in the DSDT */ + for (; item->acpiset && *item->acpiset; ++item->acpiset) { + if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, + *item->acpiset, + &handle))) { + dprintk("Found %s setter: %s\n", + item->name, *item->acpiset); + item->devattr.attr.mode |= S_IWUSR; + break; + } + } + + if (item->devattr.attr.mode != 0) { + result = + device_create_file(&sony_pf_device->dev, + &item->devattr); + if (result) + goto out_sysfs; + } + } + + return 0; + + out_sysfs: + for (item = sony_nc_values; item->name; ++item) { + device_remove_file(&sony_pf_device->dev, &item->devattr); + } + sony_pf_remove(); + + outbacklight: + if (sony_backlight_device) + backlight_device_unregister(sony_backlight_device); + + status = acpi_remove_notify_handler(sony_nc_acpi_handle, + ACPI_DEVICE_NOTIFY, + sony_acpi_notify); + if (ACPI_FAILURE(status)) + printk(KERN_WARNING DRV_PFX "unable to remove notify handler\n"); + + outinput: + sony_laptop_remove_input(); + + outwalk: + return result; +} + +static int sony_nc_remove(struct acpi_device *device, int type) +{ + acpi_status status; + struct sony_nc_value *item; + + if (sony_backlight_device) + backlight_device_unregister(sony_backlight_device); + + sony_nc_acpi_device = NULL; + + status = acpi_remove_notify_handler(sony_nc_acpi_handle, + ACPI_DEVICE_NOTIFY, + sony_acpi_notify); + if (ACPI_FAILURE(status)) + printk(KERN_WARNING DRV_PFX "unable to remove notify handler\n"); + + for (item = sony_nc_values; item->name; ++item) { + device_remove_file(&sony_pf_device->dev, &item->devattr); + } + + sony_pf_remove(); + sony_laptop_remove_input(); + dprintk(SONY_NC_DRIVER_NAME " removed.\n"); + + return 0; +} + +static const struct acpi_device_id sony_device_ids[] = { + {SONY_NC_HID, 0}, + {SONY_PIC_HID, 0}, + {"", 0}, +}; +MODULE_DEVICE_TABLE(acpi, sony_device_ids); + +static const struct acpi_device_id sony_nc_device_ids[] = { + {SONY_NC_HID, 0}, + {"", 0}, +}; + +static struct acpi_driver sony_nc_driver = { + .name = SONY_NC_DRIVER_NAME, + .class = SONY_NC_CLASS, + .ids = sony_nc_device_ids, + .owner = THIS_MODULE, + .ops = { + .add = sony_nc_add, + .remove = sony_nc_remove, + .resume = sony_nc_resume, + }, +}; + +/*********** SPIC (SNY6001) Device ***********/ + +#define SONYPI_DEVICE_TYPE1 0x00000001 +#define SONYPI_DEVICE_TYPE2 0x00000002 +#define SONYPI_DEVICE_TYPE3 0x00000004 +#define SONYPI_DEVICE_TYPE4 0x00000008 + +#define SONYPI_TYPE1_OFFSET 0x04 +#define SONYPI_TYPE2_OFFSET 0x12 +#define SONYPI_TYPE3_OFFSET 0x12 +#define SONYPI_TYPE4_OFFSET 0x12 + +struct sony_pic_ioport { + struct acpi_resource_io io1; + struct acpi_resource_io io2; + struct list_head list; +}; + +struct sony_pic_irq { + struct acpi_resource_irq irq; + struct list_head list; +}; + +struct sonypi_eventtypes { + u8 data; + unsigned long mask; + struct sonypi_event *events; +}; + +struct device_ctrl { + int model; + int (*handle_irq)(const u8, const u8); + u16 evport_offset; + u8 has_camera; + u8 has_bluetooth; + u8 has_wwan; + struct sonypi_eventtypes *event_types; +}; + +struct sony_pic_dev { + struct device_ctrl *control; + struct acpi_device *acpi_dev; + struct sony_pic_irq *cur_irq; + struct sony_pic_ioport *cur_ioport; + struct list_head interrupts; + struct list_head ioports; + struct mutex lock; + u8 camera_power; + u8 bluetooth_power; + u8 wwan_power; +}; + +static struct sony_pic_dev spic_dev = { + .interrupts = LIST_HEAD_INIT(spic_dev.interrupts), + .ioports = LIST_HEAD_INIT(spic_dev.ioports), +}; + +/* Event masks */ +#define SONYPI_JOGGER_MASK 0x00000001 +#define SONYPI_CAPTURE_MASK 0x00000002 +#define SONYPI_FNKEY_MASK 0x00000004 +#define SONYPI_BLUETOOTH_MASK 0x00000008 +#define SONYPI_PKEY_MASK 0x00000010 +#define SONYPI_BACK_MASK 0x00000020 +#define SONYPI_HELP_MASK 0x00000040 +#define SONYPI_LID_MASK 0x00000080 +#define SONYPI_ZOOM_MASK 0x00000100 +#define SONYPI_THUMBPHRASE_MASK 0x00000200 +#define SONYPI_MEYE_MASK 0x00000400 +#define SONYPI_MEMORYSTICK_MASK 0x00000800 +#define SONYPI_BATTERY_MASK 0x00001000 +#define SONYPI_WIRELESS_MASK 0x00002000 + +struct sonypi_event { + u8 data; + u8 event; +}; + +/* The set of possible button release events */ +static struct sonypi_event sonypi_releaseev[] = { + { 0x00, SONYPI_EVENT_ANYBUTTON_RELEASED }, + { 0, 0 } +}; + +/* The set of possible jogger events */ +static struct sonypi_event sonypi_joggerev[] = { + { 0x1f, SONYPI_EVENT_JOGDIAL_UP }, + { 0x01, SONYPI_EVENT_JOGDIAL_DOWN }, + { 0x5f, SONYPI_EVENT_JOGDIAL_UP_PRESSED }, + { 0x41, SONYPI_EVENT_JOGDIAL_DOWN_PRESSED }, + { 0x1e, SONYPI_EVENT_JOGDIAL_FAST_UP }, + { 0x02, SONYPI_EVENT_JOGDIAL_FAST_DOWN }, + { 0x5e, SONYPI_EVENT_JOGDIAL_FAST_UP_PRESSED }, + { 0x42, SONYPI_EVENT_JOGDIAL_FAST_DOWN_PRESSED }, + { 0x1d, SONYPI_EVENT_JOGDIAL_VFAST_UP }, + { 0x03, SONYPI_EVENT_JOGDIAL_VFAST_DOWN }, + { 0x5d, SONYPI_EVENT_JOGDIAL_VFAST_UP_PRESSED }, + { 0x43, SONYPI_EVENT_JOGDIAL_VFAST_DOWN_PRESSED }, + { 0x40, SONYPI_EVENT_JOGDIAL_PRESSED }, + { 0, 0 } +}; + +/* The set of possible capture button events */ +static struct sonypi_event sonypi_captureev[] = { + { 0x05, SONYPI_EVENT_CAPTURE_PARTIALPRESSED }, + { 0x07, SONYPI_EVENT_CAPTURE_PRESSED }, + { 0x40, SONYPI_EVENT_CAPTURE_PRESSED }, + { 0x01, SONYPI_EVENT_CAPTURE_PARTIALRELEASED }, + { 0, 0 } +}; + +/* The set of possible fnkeys events */ +static struct sonypi_event sonypi_fnkeyev[] = { + { 0x10, SONYPI_EVENT_FNKEY_ESC }, + { 0x11, SONYPI_EVENT_FNKEY_F1 }, + { 0x12, SONYPI_EVENT_FNKEY_F2 }, + { 0x13, SONYPI_EVENT_FNKEY_F3 }, + { 0x14, SONYPI_EVENT_FNKEY_F4 }, + { 0x15, SONYPI_EVENT_FNKEY_F5 }, + { 0x16, SONYPI_EVENT_FNKEY_F6 }, + { 0x17, SONYPI_EVENT_FNKEY_F7 }, + { 0x18, SONYPI_EVENT_FNKEY_F8 }, + { 0x19, SONYPI_EVENT_FNKEY_F9 }, + { 0x1a, SONYPI_EVENT_FNKEY_F10 }, + { 0x1b, SONYPI_EVENT_FNKEY_F11 }, + { 0x1c, SONYPI_EVENT_FNKEY_F12 }, + { 0x1f, SONYPI_EVENT_FNKEY_RELEASED }, + { 0x21, SONYPI_EVENT_FNKEY_1 }, + { 0x22, SONYPI_EVENT_FNKEY_2 }, + { 0x31, SONYPI_EVENT_FNKEY_D }, + { 0x32, SONYPI_EVENT_FNKEY_E }, + { 0x33, SONYPI_EVENT_FNKEY_F }, + { 0x34, SONYPI_EVENT_FNKEY_S }, + { 0x35, SONYPI_EVENT_FNKEY_B }, + { 0x36, SONYPI_EVENT_FNKEY_ONLY }, + { 0, 0 } +}; + +/* The set of possible program key events */ +static struct sonypi_event sonypi_pkeyev[] = { + { 0x01, SONYPI_EVENT_PKEY_P1 }, + { 0x02, SONYPI_EVENT_PKEY_P2 }, + { 0x04, SONYPI_EVENT_PKEY_P3 }, + { 0, 0 } +}; + +/* The set of possible bluetooth events */ +static struct sonypi_event sonypi_blueev[] = { + { 0x55, SONYPI_EVENT_BLUETOOTH_PRESSED }, + { 0x59, SONYPI_EVENT_BLUETOOTH_ON }, + { 0x5a, SONYPI_EVENT_BLUETOOTH_OFF }, + { 0, 0 } +}; + +/* The set of possible wireless events */ +static struct sonypi_event sonypi_wlessev[] = { + { 0x59, SONYPI_EVENT_WIRELESS_ON }, + { 0x5a, SONYPI_EVENT_WIRELESS_OFF }, + { 0, 0 } +}; + +/* The set of possible back button events */ +static struct sonypi_event sonypi_backev[] = { + { 0x20, SONYPI_EVENT_BACK_PRESSED }, + { 0, 0 } +}; + +/* The set of possible help button events */ +static struct sonypi_event sonypi_helpev[] = { + { 0x3b, SONYPI_EVENT_HELP_PRESSED }, + { 0, 0 } +}; + + +/* The set of possible lid events */ +static struct sonypi_event sonypi_lidev[] = { + { 0x51, SONYPI_EVENT_LID_CLOSED }, + { 0x50, SONYPI_EVENT_LID_OPENED }, + { 0, 0 } +}; + +/* The set of possible zoom events */ +static struct sonypi_event sonypi_zoomev[] = { + { 0x39, SONYPI_EVENT_ZOOM_PRESSED }, + { 0x10, SONYPI_EVENT_ZOOM_IN_PRESSED }, + { 0x20, SONYPI_EVENT_ZOOM_OUT_PRESSED }, + { 0, 0 } +}; + +/* The set of possible thumbphrase events */ +static struct sonypi_event sonypi_thumbphraseev[] = { + { 0x3a, SONYPI_EVENT_THUMBPHRASE_PRESSED }, + { 0, 0 } +}; + +/* The set of possible motioneye camera events */ +static struct sonypi_event sonypi_meyeev[] = { + { 0x00, SONYPI_EVENT_MEYE_FACE }, + { 0x01, SONYPI_EVENT_MEYE_OPPOSITE }, + { 0, 0 } +}; + +/* The set of possible memorystick events */ +static struct sonypi_event sonypi_memorystickev[] = { + { 0x53, SONYPI_EVENT_MEMORYSTICK_INSERT }, + { 0x54, SONYPI_EVENT_MEMORYSTICK_EJECT }, + { 0, 0 } +}; + +/* The set of possible battery events */ +static struct sonypi_event sonypi_batteryev[] = { + { 0x20, SONYPI_EVENT_BATTERY_INSERT }, + { 0x30, SONYPI_EVENT_BATTERY_REMOVE }, + { 0, 0 } +}; + +static struct sonypi_eventtypes type1_events[] = { + { 0, 0xffffffff, sonypi_releaseev }, + { 0x70, SONYPI_MEYE_MASK, sonypi_meyeev }, + { 0x30, SONYPI_LID_MASK, sonypi_lidev }, + { 0x60, SONYPI_CAPTURE_MASK, sonypi_captureev }, + { 0x10, SONYPI_JOGGER_MASK, sonypi_joggerev }, + { 0x20, SONYPI_FNKEY_MASK, sonypi_fnkeyev }, + { 0x30, SONYPI_BLUETOOTH_MASK, sonypi_blueev }, + { 0x40, SONYPI_PKEY_MASK, sonypi_pkeyev }, + { 0x30, SONYPI_MEMORYSTICK_MASK, sonypi_memorystickev }, + { 0x40, SONYPI_BATTERY_MASK, sonypi_batteryev }, + { 0 }, +}; +static struct sonypi_eventtypes type2_events[] = { + { 0, 0xffffffff, sonypi_releaseev }, + { 0x38, SONYPI_LID_MASK, sonypi_lidev }, + { 0x11, SONYPI_JOGGER_MASK, sonypi_joggerev }, + { 0x61, SONYPI_CAPTURE_MASK, sonypi_captureev }, + { 0x21, SONYPI_FNKEY_MASK, sonypi_fnkeyev }, + { 0x31, SONYPI_BLUETOOTH_MASK, sonypi_blueev }, + { 0x08, SONYPI_PKEY_MASK, sonypi_pkeyev }, + { 0x11, SONYPI_BACK_MASK, sonypi_backev }, + { 0x21, SONYPI_HELP_MASK, sonypi_helpev }, + { 0x21, SONYPI_ZOOM_MASK, sonypi_zoomev }, + { 0x20, SONYPI_THUMBPHRASE_MASK, sonypi_thumbphraseev }, + { 0x31, SONYPI_MEMORYSTICK_MASK, sonypi_memorystickev }, + { 0x41, SONYPI_BATTERY_MASK, sonypi_batteryev }, + { 0x31, SONYPI_PKEY_MASK, sonypi_pkeyev }, + { 0 }, +}; +static struct sonypi_eventtypes type3_events[] = { + { 0, 0xffffffff, sonypi_releaseev }, + { 0x21, SONYPI_FNKEY_MASK, sonypi_fnkeyev }, + { 0x31, SONYPI_WIRELESS_MASK, sonypi_wlessev }, + { 0x31, SONYPI_MEMORYSTICK_MASK, sonypi_memorystickev }, + { 0x41, SONYPI_BATTERY_MASK, sonypi_batteryev }, + { 0x31, SONYPI_PKEY_MASK, sonypi_pkeyev }, + { 0 }, +}; +static struct sonypi_eventtypes type4_events[] = { + { 0, 0xffffffff, sonypi_releaseev }, + { 0x21, SONYPI_FNKEY_MASK, sonypi_fnkeyev }, + { 0x31, SONYPI_WIRELESS_MASK, sonypi_wlessev }, + { 0x31, SONYPI_MEMORYSTICK_MASK, sonypi_memorystickev }, + { 0x41, SONYPI_BATTERY_MASK, sonypi_batteryev }, + { 0x05, SONYPI_PKEY_MASK, sonypi_pkeyev }, + { 0x05, SONYPI_ZOOM_MASK, sonypi_zoomev }, + { 0x05, SONYPI_CAPTURE_MASK, sonypi_captureev }, + { 0 }, +}; + +/* low level spic calls */ +#define ITERATIONS_LONG 10000 +#define ITERATIONS_SHORT 10 +#define wait_on_command(command, iterations) { \ + unsigned int n = iterations; \ + while (--n && (command)) \ + udelay(1); \ + if (!n) \ + dprintk("command failed at %s : %s (line %d)\n", \ + __FILE__, __func__, __LINE__); \ +} + +static u8 sony_pic_call1(u8 dev) +{ + u8 v1, v2; + + wait_on_command(inb_p(spic_dev.cur_ioport->io1.minimum + 4) & 2, + ITERATIONS_LONG); + outb(dev, spic_dev.cur_ioport->io1.minimum + 4); + v1 = inb_p(spic_dev.cur_ioport->io1.minimum + 4); + v2 = inb_p(spic_dev.cur_ioport->io1.minimum); + dprintk("sony_pic_call1(0x%.2x): 0x%.4x\n", dev, (v2 << 8) | v1); + return v2; +} + +static u8 sony_pic_call2(u8 dev, u8 fn) +{ + u8 v1; + + wait_on_command(inb_p(spic_dev.cur_ioport->io1.minimum + 4) & 2, + ITERATIONS_LONG); + outb(dev, spic_dev.cur_ioport->io1.minimum + 4); + wait_on_command(inb_p(spic_dev.cur_ioport->io1.minimum + 4) & 2, + ITERATIONS_LONG); + outb(fn, spic_dev.cur_ioport->io1.minimum); + v1 = inb_p(spic_dev.cur_ioport->io1.minimum); + dprintk("sony_pic_call2(0x%.2x - 0x%.2x): 0x%.4x\n", dev, fn, v1); + return v1; +} + +static u8 sony_pic_call3(u8 dev, u8 fn, u8 v) +{ + u8 v1; + + wait_on_command(inb_p(spic_dev.cur_ioport->io1.minimum + 4) & 2, ITERATIONS_LONG); + outb(dev, spic_dev.cur_ioport->io1.minimum + 4); + wait_on_command(inb_p(spic_dev.cur_ioport->io1.minimum + 4) & 2, ITERATIONS_LONG); + outb(fn, spic_dev.cur_ioport->io1.minimum); + wait_on_command(inb_p(spic_dev.cur_ioport->io1.minimum + 4) & 2, ITERATIONS_LONG); + outb(v, spic_dev.cur_ioport->io1.minimum); + v1 = inb_p(spic_dev.cur_ioport->io1.minimum); + dprintk("sony_pic_call3(0x%.2x - 0x%.2x - 0x%.2x): 0x%.4x\n", + dev, fn, v, v1); + return v1; +} + +/* + * minidrivers for SPIC models + */ +static int type4_handle_irq(const u8 data_mask, const u8 ev) +{ + /* + * 0x31 could mean we have to take some extra action and wait for + * the next irq for some Type4 models, it will generate a new + * irq and we can read new data from the device: + * - 0x5c and 0x5f requires 0xA0 + * - 0x61 requires 0xB3 + */ + if (data_mask == 0x31) { + if (ev == 0x5c || ev == 0x5f) + sony_pic_call1(0xA0); + else if (ev == 0x61) + sony_pic_call1(0xB3); + return 0; + } + return 1; +} + +static struct device_ctrl spic_types[] = { + { + .model = SONYPI_DEVICE_TYPE1, + .handle_irq = NULL, + .evport_offset = SONYPI_TYPE1_OFFSET, + .event_types = type1_events, + }, + { + .model = SONYPI_DEVICE_TYPE2, + .handle_irq = NULL, + .evport_offset = SONYPI_TYPE2_OFFSET, + .event_types = type2_events, + }, + { + .model = SONYPI_DEVICE_TYPE3, + .handle_irq = NULL, + .evport_offset = SONYPI_TYPE3_OFFSET, + .event_types = type3_events, + }, + { + .model = SONYPI_DEVICE_TYPE4, + .handle_irq = type4_handle_irq, + .evport_offset = SONYPI_TYPE4_OFFSET, + .event_types = type4_events, + }, +}; + +static void sony_pic_detect_device_type(struct sony_pic_dev *dev) +{ + struct pci_dev *pcidev; + + pcidev = pci_get_device(PCI_VENDOR_ID_INTEL, + PCI_DEVICE_ID_INTEL_82371AB_3, NULL); + if (pcidev) { + dev->control = &spic_types[0]; + goto out; + } + + pcidev = pci_get_device(PCI_VENDOR_ID_INTEL, + PCI_DEVICE_ID_INTEL_ICH6_1, NULL); + if (pcidev) { + dev->control = &spic_types[2]; + goto out; + } + + pcidev = pci_get_device(PCI_VENDOR_ID_INTEL, + PCI_DEVICE_ID_INTEL_ICH7_1, NULL); + if (pcidev) { + dev->control = &spic_types[3]; + goto out; + } + + pcidev = pci_get_device(PCI_VENDOR_ID_INTEL, + PCI_DEVICE_ID_INTEL_ICH8_4, NULL); + if (pcidev) { + dev->control = &spic_types[3]; + goto out; + } + + /* default */ + dev->control = &spic_types[1]; + +out: + if (pcidev) + pci_dev_put(pcidev); + + printk(KERN_INFO DRV_PFX "detected Type%d model\n", + dev->control->model == SONYPI_DEVICE_TYPE1 ? 1 : + dev->control->model == SONYPI_DEVICE_TYPE2 ? 2 : + dev->control->model == SONYPI_DEVICE_TYPE3 ? 3 : 4); +} + +/* camera tests and poweron/poweroff */ +#define SONYPI_CAMERA_PICTURE 5 +#define SONYPI_CAMERA_CONTROL 0x10 + +#define SONYPI_CAMERA_BRIGHTNESS 0 +#define SONYPI_CAMERA_CONTRAST 1 +#define SONYPI_CAMERA_HUE 2 +#define SONYPI_CAMERA_COLOR 3 +#define SONYPI_CAMERA_SHARPNESS 4 + +#define SONYPI_CAMERA_EXPOSURE_MASK 0xC +#define SONYPI_CAMERA_WHITE_BALANCE_MASK 0x3 +#define SONYPI_CAMERA_PICTURE_MODE_MASK 0x30 +#define SONYPI_CAMERA_MUTE_MASK 0x40 + +/* the rest don't need a loop until not 0xff */ +#define SONYPI_CAMERA_AGC 6 +#define SONYPI_CAMERA_AGC_MASK 0x30 +#define SONYPI_CAMERA_SHUTTER_MASK 0x7 + +#define SONYPI_CAMERA_SHUTDOWN_REQUEST 7 +#define SONYPI_CAMERA_CONTROL 0x10 + +#define SONYPI_CAMERA_STATUS 7 +#define SONYPI_CAMERA_STATUS_READY 0x2 +#define SONYPI_CAMERA_STATUS_POSITION 0x4 + +#define SONYPI_DIRECTION_BACKWARDS 0x4 + +#define SONYPI_CAMERA_REVISION 8 +#define SONYPI_CAMERA_ROMVERSION 9 + +static int __sony_pic_camera_ready(void) +{ + u8 v; + + v = sony_pic_call2(0x8f, SONYPI_CAMERA_STATUS); + return (v != 0xff && (v & SONYPI_CAMERA_STATUS_READY)); +} + +static int __sony_pic_camera_off(void) +{ + if (!camera) { + printk(KERN_WARNING DRV_PFX "camera control not enabled\n"); + return -ENODEV; + } + + wait_on_command(sony_pic_call3(0x90, SONYPI_CAMERA_PICTURE, + SONYPI_CAMERA_MUTE_MASK), + ITERATIONS_SHORT); + + if (spic_dev.camera_power) { + sony_pic_call2(0x91, 0); + spic_dev.camera_power = 0; + } + return 0; +} + +static int __sony_pic_camera_on(void) +{ + int i, j, x; + + if (!camera) { + printk(KERN_WARNING DRV_PFX "camera control not enabled\n"); + return -ENODEV; + } + + if (spic_dev.camera_power) + return 0; + + for (j = 5; j > 0; j--) { + + for (x = 0; x < 100 && sony_pic_call2(0x91, 0x1); x++) + msleep(10); + sony_pic_call1(0x93); + + for (i = 400; i > 0; i--) { + if (__sony_pic_camera_ready()) + break; + msleep(10); + } + if (i) + break; + } + + if (j == 0) { + printk(KERN_WARNING DRV_PFX "failed to power on camera\n"); + return -ENODEV; + } + + wait_on_command(sony_pic_call3(0x90, SONYPI_CAMERA_CONTROL, + 0x5a), + ITERATIONS_SHORT); + + spic_dev.camera_power = 1; + return 0; +} + +/* External camera command (exported to the motion eye v4l driver) */ +int sony_pic_camera_command(int command, u8 value) +{ + if (!camera) + return -EIO; + + mutex_lock(&spic_dev.lock); + + switch (command) { + case SONY_PIC_COMMAND_SETCAMERA: + if (value) + __sony_pic_camera_on(); + else + __sony_pic_camera_off(); + break; + case SONY_PIC_COMMAND_SETCAMERABRIGHTNESS: + wait_on_command(sony_pic_call3(0x90, SONYPI_CAMERA_BRIGHTNESS, value), + ITERATIONS_SHORT); + break; + case SONY_PIC_COMMAND_SETCAMERACONTRAST: + wait_on_command(sony_pic_call3(0x90, SONYPI_CAMERA_CONTRAST, value), + ITERATIONS_SHORT); + break; + case SONY_PIC_COMMAND_SETCAMERAHUE: + wait_on_command(sony_pic_call3(0x90, SONYPI_CAMERA_HUE, value), + ITERATIONS_SHORT); + break; + case SONY_PIC_COMMAND_SETCAMERACOLOR: + wait_on_command(sony_pic_call3(0x90, SONYPI_CAMERA_COLOR, value), + ITERATIONS_SHORT); + break; + case SONY_PIC_COMMAND_SETCAMERASHARPNESS: + wait_on_command(sony_pic_call3(0x90, SONYPI_CAMERA_SHARPNESS, value), + ITERATIONS_SHORT); + break; + case SONY_PIC_COMMAND_SETCAMERAPICTURE: + wait_on_command(sony_pic_call3(0x90, SONYPI_CAMERA_PICTURE, value), + ITERATIONS_SHORT); + break; + case SONY_PIC_COMMAND_SETCAMERAAGC: + wait_on_command(sony_pic_call3(0x90, SONYPI_CAMERA_AGC, value), + ITERATIONS_SHORT); + break; + default: + printk(KERN_ERR DRV_PFX "sony_pic_camera_command invalid: %d\n", + command); + break; + } + mutex_unlock(&spic_dev.lock); + return 0; +} +EXPORT_SYMBOL(sony_pic_camera_command); + +/* gprs/edge modem (SZ460N and SZ210P), thanks to Joshua Wise */ +static void sony_pic_set_wwanpower(u8 state) +{ + state = !!state; + mutex_lock(&spic_dev.lock); + if (spic_dev.wwan_power == state) { + mutex_unlock(&spic_dev.lock); + return; + } + sony_pic_call2(0xB0, state); + spic_dev.wwan_power = state; + mutex_unlock(&spic_dev.lock); +} + +static ssize_t sony_pic_wwanpower_store(struct device *dev, + struct device_attribute *attr, + const char *buffer, size_t count) +{ + unsigned long value; + if (count > 31) + return -EINVAL; + + value = simple_strtoul(buffer, NULL, 10); + sony_pic_set_wwanpower(value); + + return count; +} + +static ssize_t sony_pic_wwanpower_show(struct device *dev, + struct device_attribute *attr, char *buffer) +{ + ssize_t count; + mutex_lock(&spic_dev.lock); + count = snprintf(buffer, PAGE_SIZE, "%d\n", spic_dev.wwan_power); + mutex_unlock(&spic_dev.lock); + return count; +} + +/* bluetooth subsystem power state */ +static void __sony_pic_set_bluetoothpower(u8 state) +{ + state = !!state; + if (spic_dev.bluetooth_power == state) + return; + sony_pic_call2(0x96, state); + sony_pic_call1(0x82); + spic_dev.bluetooth_power = state; +} + +static ssize_t sony_pic_bluetoothpower_store(struct device *dev, + struct device_attribute *attr, + const char *buffer, size_t count) +{ + unsigned long value; + if (count > 31) + return -EINVAL; + + value = simple_strtoul(buffer, NULL, 10); + mutex_lock(&spic_dev.lock); + __sony_pic_set_bluetoothpower(value); + mutex_unlock(&spic_dev.lock); + + return count; +} + +static ssize_t sony_pic_bluetoothpower_show(struct device *dev, + struct device_attribute *attr, char *buffer) +{ + ssize_t count = 0; + mutex_lock(&spic_dev.lock); + count = snprintf(buffer, PAGE_SIZE, "%d\n", spic_dev.bluetooth_power); + mutex_unlock(&spic_dev.lock); + return count; +} + +/* fan speed */ +/* FAN0 information (reverse engineered from ACPI tables) */ +#define SONY_PIC_FAN0_STATUS 0x93 +static int sony_pic_set_fanspeed(unsigned long value) +{ + return ec_write(SONY_PIC_FAN0_STATUS, value); +} + +static int sony_pic_get_fanspeed(u8 *value) +{ + return ec_read(SONY_PIC_FAN0_STATUS, value); +} + +static ssize_t sony_pic_fanspeed_store(struct device *dev, + struct device_attribute *attr, + const char *buffer, size_t count) +{ + unsigned long value; + if (count > 31) + return -EINVAL; + + value = simple_strtoul(buffer, NULL, 10); + if (sony_pic_set_fanspeed(value)) + return -EIO; + + return count; +} + +static ssize_t sony_pic_fanspeed_show(struct device *dev, + struct device_attribute *attr, char *buffer) +{ + u8 value = 0; + if (sony_pic_get_fanspeed(&value)) + return -EIO; + + return snprintf(buffer, PAGE_SIZE, "%d\n", value); +} + +#define SPIC_ATTR(_name, _mode) \ +struct device_attribute spic_attr_##_name = __ATTR(_name, \ + _mode, sony_pic_## _name ##_show, \ + sony_pic_## _name ##_store) + +static SPIC_ATTR(bluetoothpower, 0644); +static SPIC_ATTR(wwanpower, 0644); +static SPIC_ATTR(fanspeed, 0644); + +static struct attribute *spic_attributes[] = { + &spic_attr_bluetoothpower.attr, + &spic_attr_wwanpower.attr, + &spic_attr_fanspeed.attr, + NULL +}; + +static struct attribute_group spic_attribute_group = { + .attrs = spic_attributes +}; + +/******** SONYPI compatibility **********/ +#ifdef CONFIG_SONYPI_COMPAT + +/* battery / brightness / temperature addresses */ +#define SONYPI_BAT_FLAGS 0x81 +#define SONYPI_LCD_LIGHT 0x96 +#define SONYPI_BAT1_PCTRM 0xa0 +#define SONYPI_BAT1_LEFT 0xa2 +#define SONYPI_BAT1_MAXRT 0xa4 +#define SONYPI_BAT2_PCTRM 0xa8 +#define SONYPI_BAT2_LEFT 0xaa +#define SONYPI_BAT2_MAXRT 0xac +#define SONYPI_BAT1_MAXTK 0xb0 +#define SONYPI_BAT1_FULL 0xb2 +#define SONYPI_BAT2_MAXTK 0xb8 +#define SONYPI_BAT2_FULL 0xba +#define SONYPI_TEMP_STATUS 0xC1 + +struct sonypi_compat_s { + struct fasync_struct *fifo_async; + struct kfifo *fifo; + spinlock_t fifo_lock; + wait_queue_head_t fifo_proc_list; + atomic_t open_count; +}; +static struct sonypi_compat_s sonypi_compat = { + .open_count = ATOMIC_INIT(0), +}; + +static int sonypi_misc_fasync(int fd, struct file *filp, int on) +{ + int retval; + + retval = fasync_helper(fd, filp, on, &sonypi_compat.fifo_async); + if (retval < 0) + return retval; + return 0; +} + +static int sonypi_misc_release(struct inode *inode, struct file *file) +{ + atomic_dec(&sonypi_compat.open_count); + return 0; +} + +static int sonypi_misc_open(struct inode *inode, struct file *file) +{ + /* Flush input queue on first open */ + lock_kernel(); + if (atomic_inc_return(&sonypi_compat.open_count) == 1) + kfifo_reset(sonypi_compat.fifo); + unlock_kernel(); + return 0; +} + +static ssize_t sonypi_misc_read(struct file *file, char __user *buf, + size_t count, loff_t *pos) +{ + ssize_t ret; + unsigned char c; + + if ((kfifo_len(sonypi_compat.fifo) == 0) && + (file->f_flags & O_NONBLOCK)) + return -EAGAIN; + + ret = wait_event_interruptible(sonypi_compat.fifo_proc_list, + kfifo_len(sonypi_compat.fifo) != 0); + if (ret) + return ret; + + while (ret < count && + (kfifo_get(sonypi_compat.fifo, &c, sizeof(c)) == sizeof(c))) { + if (put_user(c, buf++)) + return -EFAULT; + ret++; + } + + if (ret > 0) { + struct inode *inode = file->f_path.dentry->d_inode; + inode->i_atime = current_fs_time(inode->i_sb); + } + + return ret; +} + +static unsigned int sonypi_misc_poll(struct file *file, poll_table *wait) +{ + poll_wait(file, &sonypi_compat.fifo_proc_list, wait); + if (kfifo_len(sonypi_compat.fifo)) + return POLLIN | POLLRDNORM; + return 0; +} + +static int ec_read16(u8 addr, u16 *value) +{ + u8 val_lb, val_hb; + if (ec_read(addr, &val_lb)) + return -1; + if (ec_read(addr + 1, &val_hb)) + return -1; + *value = val_lb | (val_hb << 8); + return 0; +} + +static int sonypi_misc_ioctl(struct inode *ip, struct file *fp, + unsigned int cmd, unsigned long arg) +{ + int ret = 0; + void __user *argp = (void __user *)arg; + u8 val8; + u16 val16; + int value; + + mutex_lock(&spic_dev.lock); + switch (cmd) { + case SONYPI_IOCGBRT: + if (sony_backlight_device == NULL) { + ret = -EIO; + break; + } + if (acpi_callgetfunc(sony_nc_acpi_handle, "GBRT", &value)) { + ret = -EIO; + break; + } + val8 = ((value & 0xff) - 1) << 5; + if (copy_to_user(argp, &val8, sizeof(val8))) + ret = -EFAULT; + break; + case SONYPI_IOCSBRT: + if (sony_backlight_device == NULL) { + ret = -EIO; + break; + } + if (copy_from_user(&val8, argp, sizeof(val8))) { + ret = -EFAULT; + break; + } + if (acpi_callsetfunc(sony_nc_acpi_handle, "SBRT", + (val8 >> 5) + 1, NULL)) { + ret = -EIO; + break; + } + /* sync the backlight device status */ + sony_backlight_device->props.brightness = + sony_backlight_get_brightness(sony_backlight_device); + break; + case SONYPI_IOCGBAT1CAP: + if (ec_read16(SONYPI_BAT1_FULL, &val16)) { + ret = -EIO; + break; + } + if (copy_to_user(argp, &val16, sizeof(val16))) + ret = -EFAULT; + break; + case SONYPI_IOCGBAT1REM: + if (ec_read16(SONYPI_BAT1_LEFT, &val16)) { + ret = -EIO; + break; + } + if (copy_to_user(argp, &val16, sizeof(val16))) + ret = -EFAULT; + break; + case SONYPI_IOCGBAT2CAP: + if (ec_read16(SONYPI_BAT2_FULL, &val16)) { + ret = -EIO; + break; + } + if (copy_to_user(argp, &val16, sizeof(val16))) + ret = -EFAULT; + break; + case SONYPI_IOCGBAT2REM: + if (ec_read16(SONYPI_BAT2_LEFT, &val16)) { + ret = -EIO; + break; + } + if (copy_to_user(argp, &val16, sizeof(val16))) + ret = -EFAULT; + break; + case SONYPI_IOCGBATFLAGS: + if (ec_read(SONYPI_BAT_FLAGS, &val8)) { + ret = -EIO; + break; + } + val8 &= 0x07; + if (copy_to_user(argp, &val8, sizeof(val8))) + ret = -EFAULT; + break; + case SONYPI_IOCGBLUE: + val8 = spic_dev.bluetooth_power; + if (copy_to_user(argp, &val8, sizeof(val8))) + ret = -EFAULT; + break; + case SONYPI_IOCSBLUE: + if (copy_from_user(&val8, argp, sizeof(val8))) { + ret = -EFAULT; + break; + } + __sony_pic_set_bluetoothpower(val8); + break; + /* FAN Controls */ + case SONYPI_IOCGFAN: + if (sony_pic_get_fanspeed(&val8)) { + ret = -EIO; + break; + } + if (copy_to_user(argp, &val8, sizeof(val8))) + ret = -EFAULT; + break; + case SONYPI_IOCSFAN: + if (copy_from_user(&val8, argp, sizeof(val8))) { + ret = -EFAULT; + break; + } + if (sony_pic_set_fanspeed(val8)) + ret = -EIO; + break; + /* GET Temperature (useful under APM) */ + case SONYPI_IOCGTEMP: + if (ec_read(SONYPI_TEMP_STATUS, &val8)) { + ret = -EIO; + break; + } + if (copy_to_user(argp, &val8, sizeof(val8))) + ret = -EFAULT; + break; + default: + ret = -EINVAL; + } + mutex_unlock(&spic_dev.lock); + return ret; +} + +static const struct file_operations sonypi_misc_fops = { + .owner = THIS_MODULE, + .read = sonypi_misc_read, + .poll = sonypi_misc_poll, + .open = sonypi_misc_open, + .release = sonypi_misc_release, + .fasync = sonypi_misc_fasync, + .ioctl = sonypi_misc_ioctl, +}; + +static struct miscdevice sonypi_misc_device = { + .minor = MISC_DYNAMIC_MINOR, + .name = "sonypi", + .fops = &sonypi_misc_fops, +}; + +static void sonypi_compat_report_event(u8 event) +{ + kfifo_put(sonypi_compat.fifo, (unsigned char *)&event, sizeof(event)); + kill_fasync(&sonypi_compat.fifo_async, SIGIO, POLL_IN); + wake_up_interruptible(&sonypi_compat.fifo_proc_list); +} + +static int sonypi_compat_init(void) +{ + int error; + + spin_lock_init(&sonypi_compat.fifo_lock); + sonypi_compat.fifo = kfifo_alloc(SONY_LAPTOP_BUF_SIZE, GFP_KERNEL, + &sonypi_compat.fifo_lock); + if (IS_ERR(sonypi_compat.fifo)) { + printk(KERN_ERR DRV_PFX "kfifo_alloc failed\n"); + return PTR_ERR(sonypi_compat.fifo); + } + + init_waitqueue_head(&sonypi_compat.fifo_proc_list); + + if (minor != -1) + sonypi_misc_device.minor = minor; + error = misc_register(&sonypi_misc_device); + if (error) { + printk(KERN_ERR DRV_PFX "misc_register failed\n"); + goto err_free_kfifo; + } + if (minor == -1) + printk(KERN_INFO DRV_PFX "device allocated minor is %d\n", + sonypi_misc_device.minor); + + return 0; + +err_free_kfifo: + kfifo_free(sonypi_compat.fifo); + return error; +} + +static void sonypi_compat_exit(void) +{ + misc_deregister(&sonypi_misc_device); + kfifo_free(sonypi_compat.fifo); +} +#else +static int sonypi_compat_init(void) { return 0; } +static void sonypi_compat_exit(void) { } +static void sonypi_compat_report_event(u8 event) { } +#endif /* CONFIG_SONYPI_COMPAT */ + +/* + * ACPI callbacks + */ +static acpi_status +sony_pic_read_possible_resource(struct acpi_resource *resource, void *context) +{ + u32 i; + struct sony_pic_dev *dev = (struct sony_pic_dev *)context; + + switch (resource->type) { + case ACPI_RESOURCE_TYPE_START_DEPENDENT: + { + /* start IO enumeration */ + struct sony_pic_ioport *ioport = kzalloc(sizeof(*ioport), GFP_KERNEL); + if (!ioport) + return AE_ERROR; + + list_add(&ioport->list, &dev->ioports); + return AE_OK; + } + + case ACPI_RESOURCE_TYPE_END_DEPENDENT: + /* end IO enumeration */ + return AE_OK; + + case ACPI_RESOURCE_TYPE_IRQ: + { + struct acpi_resource_irq *p = &resource->data.irq; + struct sony_pic_irq *interrupt = NULL; + if (!p || !p->interrupt_count) { + /* + * IRQ descriptors may have no IRQ# bits set, + * particularly those those w/ _STA disabled + */ + dprintk("Blank IRQ resource\n"); + return AE_OK; + } + for (i = 0; i < p->interrupt_count; i++) { + if (!p->interrupts[i]) { + printk(KERN_WARNING DRV_PFX + "Invalid IRQ %d\n", + p->interrupts[i]); + continue; + } + interrupt = kzalloc(sizeof(*interrupt), + GFP_KERNEL); + if (!interrupt) + return AE_ERROR; + + list_add(&interrupt->list, &dev->interrupts); + interrupt->irq.triggering = p->triggering; + interrupt->irq.polarity = p->polarity; + interrupt->irq.sharable = p->sharable; + interrupt->irq.interrupt_count = 1; + interrupt->irq.interrupts[0] = p->interrupts[i]; + } + return AE_OK; + } + case ACPI_RESOURCE_TYPE_IO: + { + struct acpi_resource_io *io = &resource->data.io; + struct sony_pic_ioport *ioport = + list_first_entry(&dev->ioports, struct sony_pic_ioport, list); + if (!io) { + dprintk("Blank IO resource\n"); + return AE_OK; + } + + if (!ioport->io1.minimum) { + memcpy(&ioport->io1, io, sizeof(*io)); + dprintk("IO1 at 0x%.4x (0x%.2x)\n", ioport->io1.minimum, + ioport->io1.address_length); + } + else if (!ioport->io2.minimum) { + memcpy(&ioport->io2, io, sizeof(*io)); + dprintk("IO2 at 0x%.4x (0x%.2x)\n", ioport->io2.minimum, + ioport->io2.address_length); + } + else { + printk(KERN_ERR DRV_PFX "Unknown SPIC Type, more than 2 IO Ports\n"); + return AE_ERROR; + } + return AE_OK; + } + default: + dprintk("Resource %d isn't an IRQ nor an IO port\n", + resource->type); + + case ACPI_RESOURCE_TYPE_END_TAG: + return AE_OK; + } + return AE_CTRL_TERMINATE; +} + +static int sony_pic_possible_resources(struct acpi_device *device) +{ + int result = 0; + acpi_status status = AE_OK; + + if (!device) + return -EINVAL; + + /* get device status */ + /* see acpi_pci_link_get_current acpi_pci_link_get_possible */ + dprintk("Evaluating _STA\n"); + result = acpi_bus_get_status(device); + if (result) { + printk(KERN_WARNING DRV_PFX "Unable to read status\n"); + goto end; + } + + if (!device->status.enabled) + dprintk("Device disabled\n"); + else + dprintk("Device enabled\n"); + + /* + * Query and parse 'method' + */ + dprintk("Evaluating %s\n", METHOD_NAME__PRS); + status = acpi_walk_resources(device->handle, METHOD_NAME__PRS, + sony_pic_read_possible_resource, &spic_dev); + if (ACPI_FAILURE(status)) { + printk(KERN_WARNING DRV_PFX + "Failure evaluating %s\n", + METHOD_NAME__PRS); + result = -ENODEV; + } +end: + return result; +} + +/* + * Disable the spic device by calling its _DIS method + */ +static int sony_pic_disable(struct acpi_device *device) +{ + acpi_status ret = acpi_evaluate_object(device->handle, "_DIS", NULL, + NULL); + + if (ACPI_FAILURE(ret) && ret != AE_NOT_FOUND) + return -ENXIO; + + dprintk("Device disabled\n"); + return 0; +} + + +/* + * Based on drivers/acpi/pci_link.c:acpi_pci_link_set + * + * Call _SRS to set current resources + */ +static int sony_pic_enable(struct acpi_device *device, + struct sony_pic_ioport *ioport, struct sony_pic_irq *irq) +{ + acpi_status status; + int result = 0; + /* Type 1 resource layout is: + * IO + * IO + * IRQNoFlags + * End + * + * Type 2 and 3 resource layout is: + * IO + * IRQNoFlags + * End + */ + struct { + struct acpi_resource res1; + struct acpi_resource res2; + struct acpi_resource res3; + struct acpi_resource res4; + } *resource; + struct acpi_buffer buffer = { 0, NULL }; + + if (!ioport || !irq) + return -EINVAL; + + /* init acpi_buffer */ + resource = kzalloc(sizeof(*resource) + 1, GFP_KERNEL); + if (!resource) + return -ENOMEM; + + buffer.length = sizeof(*resource) + 1; + buffer.pointer = resource; + + /* setup Type 1 resources */ + if (spic_dev.control->model == SONYPI_DEVICE_TYPE1) { + + /* setup io resources */ + resource->res1.type = ACPI_RESOURCE_TYPE_IO; + resource->res1.length = sizeof(struct acpi_resource); + memcpy(&resource->res1.data.io, &ioport->io1, + sizeof(struct acpi_resource_io)); + + resource->res2.type = ACPI_RESOURCE_TYPE_IO; + resource->res2.length = sizeof(struct acpi_resource); + memcpy(&resource->res2.data.io, &ioport->io2, + sizeof(struct acpi_resource_io)); + + /* setup irq resource */ + resource->res3.type = ACPI_RESOURCE_TYPE_IRQ; + resource->res3.length = sizeof(struct acpi_resource); + memcpy(&resource->res3.data.irq, &irq->irq, + sizeof(struct acpi_resource_irq)); + /* we requested a shared irq */ + resource->res3.data.irq.sharable = ACPI_SHARED; + + resource->res4.type = ACPI_RESOURCE_TYPE_END_TAG; + + } + /* setup Type 2/3 resources */ + else { + /* setup io resource */ + resource->res1.type = ACPI_RESOURCE_TYPE_IO; + resource->res1.length = sizeof(struct acpi_resource); + memcpy(&resource->res1.data.io, &ioport->io1, + sizeof(struct acpi_resource_io)); + + /* setup irq resource */ + resource->res2.type = ACPI_RESOURCE_TYPE_IRQ; + resource->res2.length = sizeof(struct acpi_resource); + memcpy(&resource->res2.data.irq, &irq->irq, + sizeof(struct acpi_resource_irq)); + /* we requested a shared irq */ + resource->res2.data.irq.sharable = ACPI_SHARED; + + resource->res3.type = ACPI_RESOURCE_TYPE_END_TAG; + } + + /* Attempt to set the resource */ + dprintk("Evaluating _SRS\n"); + status = acpi_set_current_resources(device->handle, &buffer); + + /* check for total failure */ + if (ACPI_FAILURE(status)) { + printk(KERN_ERR DRV_PFX "Error evaluating _SRS\n"); + result = -ENODEV; + goto end; + } + + /* Necessary device initializations calls (from sonypi) */ + sony_pic_call1(0x82); + sony_pic_call2(0x81, 0xff); + sony_pic_call1(compat ? 0x92 : 0x82); + +end: + kfree(resource); + return result; +} + +/***************** + * + * ISR: some event is available + * + *****************/ +static irqreturn_t sony_pic_irq(int irq, void *dev_id) +{ + int i, j; + u8 ev = 0; + u8 data_mask = 0; + u8 device_event = 0; + + struct sony_pic_dev *dev = (struct sony_pic_dev *) dev_id; + + ev = inb_p(dev->cur_ioport->io1.minimum); + if (dev->cur_ioport->io2.minimum) + data_mask = inb_p(dev->cur_ioport->io2.minimum); + else + data_mask = inb_p(dev->cur_ioport->io1.minimum + + dev->control->evport_offset); + + dprintk("event ([%.2x] [%.2x]) at port 0x%.4x(+0x%.2x)\n", + ev, data_mask, dev->cur_ioport->io1.minimum, + dev->control->evport_offset); + + if (ev == 0x00 || ev == 0xff) + return IRQ_HANDLED; + + for (i = 0; dev->control->event_types[i].mask; i++) { + + if ((data_mask & dev->control->event_types[i].data) != + dev->control->event_types[i].data) + continue; + + if (!(mask & dev->control->event_types[i].mask)) + continue; + + for (j = 0; dev->control->event_types[i].events[j].event; j++) { + if (ev == dev->control->event_types[i].events[j].data) { + device_event = + dev->control-> + event_types[i].events[j].event; + goto found; + } + } + } + /* Still not able to decode the event try to pass + * it over to the minidriver + */ + if (dev->control->handle_irq && + dev->control->handle_irq(data_mask, ev) == 0) + return IRQ_HANDLED; + + dprintk("unknown event ([%.2x] [%.2x]) at port 0x%.4x(+0x%.2x)\n", + ev, data_mask, dev->cur_ioport->io1.minimum, + dev->control->evport_offset); + return IRQ_HANDLED; + +found: + sony_laptop_report_input_event(device_event); + acpi_bus_generate_proc_event(dev->acpi_dev, 1, device_event); + sonypi_compat_report_event(device_event); + + return IRQ_HANDLED; +} + +/***************** + * + * ACPI driver + * + *****************/ +static int sony_pic_remove(struct acpi_device *device, int type) +{ + struct sony_pic_ioport *io, *tmp_io; + struct sony_pic_irq *irq, *tmp_irq; + + if (sony_pic_disable(device)) { + printk(KERN_ERR DRV_PFX "Couldn't disable device.\n"); + return -ENXIO; + } + + free_irq(spic_dev.cur_irq->irq.interrupts[0], &spic_dev); + release_region(spic_dev.cur_ioport->io1.minimum, + spic_dev.cur_ioport->io1.address_length); + if (spic_dev.cur_ioport->io2.minimum) + release_region(spic_dev.cur_ioport->io2.minimum, + spic_dev.cur_ioport->io2.address_length); + + sonypi_compat_exit(); + + sony_laptop_remove_input(); + + /* pf attrs */ + sysfs_remove_group(&sony_pf_device->dev.kobj, &spic_attribute_group); + sony_pf_remove(); + + list_for_each_entry_safe(io, tmp_io, &spic_dev.ioports, list) { + list_del(&io->list); + kfree(io); + } + list_for_each_entry_safe(irq, tmp_irq, &spic_dev.interrupts, list) { + list_del(&irq->list); + kfree(irq); + } + spic_dev.cur_ioport = NULL; + spic_dev.cur_irq = NULL; + + dprintk(SONY_PIC_DRIVER_NAME " removed.\n"); + return 0; +} + +static int sony_pic_add(struct acpi_device *device) +{ + int result; + struct sony_pic_ioport *io, *tmp_io; + struct sony_pic_irq *irq, *tmp_irq; + + printk(KERN_INFO DRV_PFX "%s v%s.\n", + SONY_PIC_DRIVER_NAME, SONY_LAPTOP_DRIVER_VERSION); + + spic_dev.acpi_dev = device; + strcpy(acpi_device_class(device), "sony/hotkey"); + sony_pic_detect_device_type(&spic_dev); + mutex_init(&spic_dev.lock); + + /* read _PRS resources */ + result = sony_pic_possible_resources(device); + if (result) { + printk(KERN_ERR DRV_PFX + "Unabe to read possible resources.\n"); + goto err_free_resources; + } + + /* setup input devices and helper fifo */ + result = sony_laptop_setup_input(device); + if (result) { + printk(KERN_ERR DRV_PFX + "Unabe to create input devices.\n"); + goto err_free_resources; + } + + if (sonypi_compat_init()) + goto err_remove_input; + + /* request io port */ + list_for_each_entry_reverse(io, &spic_dev.ioports, list) { + if (request_region(io->io1.minimum, io->io1.address_length, + "Sony Programable I/O Device")) { + dprintk("I/O port1: 0x%.4x (0x%.4x) + 0x%.2x\n", + io->io1.minimum, io->io1.maximum, + io->io1.address_length); + /* Type 1 have 2 ioports */ + if (io->io2.minimum) { + if (request_region(io->io2.minimum, + io->io2.address_length, + "Sony Programable I/O Device")) { + dprintk("I/O port2: 0x%.4x (0x%.4x) + 0x%.2x\n", + io->io2.minimum, io->io2.maximum, + io->io2.address_length); + spic_dev.cur_ioport = io; + break; + } + else { + dprintk("Unable to get I/O port2: " + "0x%.4x (0x%.4x) + 0x%.2x\n", + io->io2.minimum, io->io2.maximum, + io->io2.address_length); + release_region(io->io1.minimum, + io->io1.address_length); + } + } + else { + spic_dev.cur_ioport = io; + break; + } + } + } + if (!spic_dev.cur_ioport) { + printk(KERN_ERR DRV_PFX "Failed to request_region.\n"); + result = -ENODEV; + goto err_remove_compat; + } + + /* request IRQ */ + list_for_each_entry_reverse(irq, &spic_dev.interrupts, list) { + if (!request_irq(irq->irq.interrupts[0], sony_pic_irq, + IRQF_SHARED, "sony-laptop", &spic_dev)) { + dprintk("IRQ: %d - triggering: %d - " + "polarity: %d - shr: %d\n", + irq->irq.interrupts[0], + irq->irq.triggering, + irq->irq.polarity, + irq->irq.sharable); + spic_dev.cur_irq = irq; + break; + } + } + if (!spic_dev.cur_irq) { + printk(KERN_ERR DRV_PFX "Failed to request_irq.\n"); + result = -ENODEV; + goto err_release_region; + } + + /* set resource status _SRS */ + result = sony_pic_enable(device, spic_dev.cur_ioport, spic_dev.cur_irq); + if (result) { + printk(KERN_ERR DRV_PFX "Couldn't enable device.\n"); + goto err_free_irq; + } + + spic_dev.bluetooth_power = -1; + /* create device attributes */ + result = sony_pf_add(); + if (result) + goto err_disable_device; + + result = sysfs_create_group(&sony_pf_device->dev.kobj, &spic_attribute_group); + if (result) + goto err_remove_pf; + + return 0; + +err_remove_pf: + sony_pf_remove(); + +err_disable_device: + sony_pic_disable(device); + +err_free_irq: + free_irq(spic_dev.cur_irq->irq.interrupts[0], &spic_dev); + +err_release_region: + release_region(spic_dev.cur_ioport->io1.minimum, + spic_dev.cur_ioport->io1.address_length); + if (spic_dev.cur_ioport->io2.minimum) + release_region(spic_dev.cur_ioport->io2.minimum, + spic_dev.cur_ioport->io2.address_length); + +err_remove_compat: + sonypi_compat_exit(); + +err_remove_input: + sony_laptop_remove_input(); + +err_free_resources: + list_for_each_entry_safe(io, tmp_io, &spic_dev.ioports, list) { + list_del(&io->list); + kfree(io); + } + list_for_each_entry_safe(irq, tmp_irq, &spic_dev.interrupts, list) { + list_del(&irq->list); + kfree(irq); + } + spic_dev.cur_ioport = NULL; + spic_dev.cur_irq = NULL; + + return result; +} + +static int sony_pic_suspend(struct acpi_device *device, pm_message_t state) +{ + if (sony_pic_disable(device)) + return -ENXIO; + return 0; +} + +static int sony_pic_resume(struct acpi_device *device) +{ + sony_pic_enable(device, spic_dev.cur_ioport, spic_dev.cur_irq); + return 0; +} + +static const struct acpi_device_id sony_pic_device_ids[] = { + {SONY_PIC_HID, 0}, + {"", 0}, +}; + +static struct acpi_driver sony_pic_driver = { + .name = SONY_PIC_DRIVER_NAME, + .class = SONY_PIC_CLASS, + .ids = sony_pic_device_ids, + .owner = THIS_MODULE, + .ops = { + .add = sony_pic_add, + .remove = sony_pic_remove, + .suspend = sony_pic_suspend, + .resume = sony_pic_resume, + }, +}; + +static struct dmi_system_id __initdata sonypi_dmi_table[] = { + { + .ident = "Sony Vaio", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), + DMI_MATCH(DMI_PRODUCT_NAME, "PCG-"), + }, + }, + { + .ident = "Sony Vaio", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), + DMI_MATCH(DMI_PRODUCT_NAME, "VGN-"), + }, + }, + { } +}; + +static int __init sony_laptop_init(void) +{ + int result; + + if (!no_spic && dmi_check_system(sonypi_dmi_table)) { + result = acpi_bus_register_driver(&sony_pic_driver); + if (result) { + printk(KERN_ERR DRV_PFX + "Unable to register SPIC driver."); + goto out; + } + } + + result = acpi_bus_register_driver(&sony_nc_driver); + if (result) { + printk(KERN_ERR DRV_PFX "Unable to register SNC driver."); + goto out_unregister_pic; + } + + return 0; + +out_unregister_pic: + if (!no_spic) + acpi_bus_unregister_driver(&sony_pic_driver); +out: + return result; +} + +static void __exit sony_laptop_exit(void) +{ + acpi_bus_unregister_driver(&sony_nc_driver); + if (!no_spic) + acpi_bus_unregister_driver(&sony_pic_driver); +} + +module_init(sony_laptop_init); +module_exit(sony_laptop_exit); diff --git a/drivers/misc/tc1100-wmi.c b/drivers/misc/tc1100-wmi.c new file mode 100644 index 0000000..f25e4c9 --- /dev/null +++ b/drivers/misc/tc1100-wmi.c @@ -0,0 +1,290 @@ +/* + * HP Compaq TC1100 Tablet WMI Extras Driver + * + * Copyright (C) 2007 Carlos Corbacho <carlos@strangeworlds.co.uk> + * Copyright (C) 2004 Jamey Hicks <jamey.hicks@hp.com> + * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com> + * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/types.h> +#include <acpi/acpi.h> +#include <acpi/actypes.h> +#include <acpi/acpi_bus.h> +#include <acpi/acpi_drivers.h> +#include <linux/platform_device.h> + +#define GUID "C364AC71-36DB-495A-8494-B439D472A505" + +#define TC1100_INSTANCE_WIRELESS 1 +#define TC1100_INSTANCE_JOGDIAL 2 + +#define TC1100_LOGPREFIX "tc1100-wmi: " +#define TC1100_INFO KERN_INFO TC1100_LOGPREFIX + +MODULE_AUTHOR("Jamey Hicks, Carlos Corbacho"); +MODULE_DESCRIPTION("HP Compaq TC1100 Tablet WMI Extras"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("wmi:C364AC71-36DB-495A-8494-B439D472A505"); + +static int tc1100_probe(struct platform_device *device); +static int tc1100_remove(struct platform_device *device); +static int tc1100_suspend(struct platform_device *device, pm_message_t state); +static int tc1100_resume(struct platform_device *device); + +static struct platform_driver tc1100_driver = { + .driver = { + .name = "tc1100-wmi", + .owner = THIS_MODULE, + }, + .probe = tc1100_probe, + .remove = tc1100_remove, + .suspend = tc1100_suspend, + .resume = tc1100_resume, +}; + +static struct platform_device *tc1100_device; + +struct tc1100_data { + u32 wireless; + u32 jogdial; +}; + +static struct tc1100_data suspend_data; + +/* -------------------------------------------------------------------------- + Device Management + -------------------------------------------------------------------------- */ + +static int get_state(u32 *out, u8 instance) +{ + u32 tmp; + acpi_status status; + struct acpi_buffer result = { ACPI_ALLOCATE_BUFFER, NULL }; + union acpi_object *obj; + + if (!out) + return -EINVAL; + + if (instance > 2) + return -ENODEV; + + status = wmi_query_block(GUID, instance, &result); + if (ACPI_FAILURE(status)) + return -ENODEV; + + obj = (union acpi_object *) result.pointer; + if (obj && obj->type == ACPI_TYPE_BUFFER && + obj->buffer.length == sizeof(u32)) { + tmp = *((u32 *) obj->buffer.pointer); + } else { + tmp = 0; + } + + if (result.length > 0 && result.pointer) + kfree(result.pointer); + + switch (instance) { + case TC1100_INSTANCE_WIRELESS: + *out = (tmp == 3) ? 1 : 0; + return 0; + case TC1100_INSTANCE_JOGDIAL: + *out = (tmp == 1) ? 1 : 0; + return 0; + default: + return -ENODEV; + } +} + +static int set_state(u32 *in, u8 instance) +{ + u32 value; + acpi_status status; + struct acpi_buffer input; + + if (!in) + return -EINVAL; + + if (instance > 2) + return -ENODEV; + + switch (instance) { + case TC1100_INSTANCE_WIRELESS: + value = (*in) ? 1 : 2; + break; + case TC1100_INSTANCE_JOGDIAL: + value = (*in) ? 0 : 1; + break; + default: + return -ENODEV; + } + + input.length = sizeof(u32); + input.pointer = &value; + + status = wmi_set_block(GUID, instance, &input); + if (ACPI_FAILURE(status)) + return -ENODEV; + + return 0; +} + +/* -------------------------------------------------------------------------- + FS Interface (/sys) + -------------------------------------------------------------------------- */ + +/* + * Read/ write bool sysfs macro + */ +#define show_set_bool(value, instance) \ +static ssize_t \ +show_bool_##value(struct device *dev, struct device_attribute *attr, \ + char *buf) \ +{ \ + u32 result; \ + acpi_status status = get_state(&result, instance); \ + if (ACPI_SUCCESS(status)) \ + return sprintf(buf, "%d\n", result); \ + return sprintf(buf, "Read error\n"); \ +} \ +\ +static ssize_t \ +set_bool_##value(struct device *dev, struct device_attribute *attr, \ + const char *buf, size_t count) \ +{ \ + u32 tmp = simple_strtoul(buf, NULL, 10); \ + acpi_status status = set_state(&tmp, instance); \ + if (ACPI_FAILURE(status)) \ + return -EINVAL; \ + return count; \ +} \ +static DEVICE_ATTR(value, S_IWUGO | S_IRUGO | S_IWUSR, \ + show_bool_##value, set_bool_##value); + +show_set_bool(wireless, TC1100_INSTANCE_WIRELESS); +show_set_bool(jogdial, TC1100_INSTANCE_JOGDIAL); + +static void remove_fs(void) +{ + device_remove_file(&tc1100_device->dev, &dev_attr_wireless); + device_remove_file(&tc1100_device->dev, &dev_attr_jogdial); +} + +static int add_fs(void) +{ + int ret; + + ret = device_create_file(&tc1100_device->dev, &dev_attr_wireless); + if (ret) + goto add_sysfs_error; + + ret = device_create_file(&tc1100_device->dev, &dev_attr_jogdial); + if (ret) + goto add_sysfs_error; + + return ret; + +add_sysfs_error: + remove_fs(); + return ret; +} + +/* -------------------------------------------------------------------------- + Driver Model + -------------------------------------------------------------------------- */ + +static int tc1100_probe(struct platform_device *device) +{ + int result = 0; + + result = add_fs(); + return result; +} + + +static int tc1100_remove(struct platform_device *device) +{ + remove_fs(); + return 0; +} + +static int tc1100_suspend(struct platform_device *dev, pm_message_t state) +{ + int ret; + + ret = get_state(&suspend_data.wireless, TC1100_INSTANCE_WIRELESS); + if (ret) + return ret; + + ret = get_state(&suspend_data.jogdial, TC1100_INSTANCE_JOGDIAL); + if (ret) + return ret; + + return ret; +} + +static int tc1100_resume(struct platform_device *dev) +{ + int ret; + + ret = set_state(&suspend_data.wireless, TC1100_INSTANCE_WIRELESS); + if (ret) + return ret; + + ret = set_state(&suspend_data.jogdial, TC1100_INSTANCE_JOGDIAL); + if (ret) + return ret; + + return ret; +} + +static int __init tc1100_init(void) +{ + int result = 0; + + if (!wmi_has_guid(GUID)) + return -ENODEV; + + result = platform_driver_register(&tc1100_driver); + if (result) + return result; + + tc1100_device = platform_device_alloc("tc1100-wmi", -1); + platform_device_add(tc1100_device); + + printk(TC1100_INFO "HP Compaq TC1100 Tablet WMI Extras loaded\n"); + + return result; +} + +static void __exit tc1100_exit(void) +{ + platform_device_del(tc1100_device); + platform_driver_unregister(&tc1100_driver); + + printk(TC1100_INFO "HP Compaq TC1100 Tablet WMI Extras unloaded\n"); +} + +module_init(tc1100_init); +module_exit(tc1100_exit); diff --git a/drivers/misc/thinkpad_acpi.c b/drivers/misc/thinkpad_acpi.c new file mode 100644 index 0000000..36b88e7 --- /dev/null +++ b/drivers/misc/thinkpad_acpi.c @@ -0,0 +1,6949 @@ +/* + * thinkpad_acpi.c - ThinkPad ACPI Extras + * + * + * Copyright (C) 2004-2005 Borislav Deianov <borislav@users.sf.net> + * Copyright (C) 2006-2008 Henrique de Moraes Holschuh <hmh@hmh.eng.br> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + */ + +#define TPACPI_VERSION "0.21" +#define TPACPI_SYSFS_VERSION 0x020200 + +/* + * Changelog: + * 2007-10-20 changelog trimmed down + * + * 2007-03-27 0.14 renamed to thinkpad_acpi and moved to + * drivers/misc. + * + * 2006-11-22 0.13 new maintainer + * changelog now lives in git commit history, and will + * not be updated further in-file. + * + * 2005-03-17 0.11 support for 600e, 770x + * thanks to Jamie Lentin <lentinj@dial.pipex.com> + * + * 2005-01-16 0.9 use MODULE_VERSION + * thanks to Henrik Brix Andersen <brix@gentoo.org> + * fix parameter passing on module loading + * thanks to Rusty Russell <rusty@rustcorp.com.au> + * thanks to Jim Radford <radford@blackbean.org> + * 2004-11-08 0.8 fix init error case, don't return from a macro + * thanks to Chris Wright <chrisw@osdl.org> + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/types.h> +#include <linux/string.h> +#include <linux/list.h> +#include <linux/mutex.h> +#include <linux/kthread.h> +#include <linux/freezer.h> +#include <linux/delay.h> + +#include <linux/nvram.h> +#include <linux/proc_fs.h> +#include <linux/sysfs.h> +#include <linux/backlight.h> +#include <linux/fb.h> +#include <linux/platform_device.h> +#include <linux/hwmon.h> +#include <linux/hwmon-sysfs.h> +#include <linux/input.h> +#include <linux/leds.h> +#include <linux/rfkill.h> +#include <asm/uaccess.h> + +#include <linux/dmi.h> +#include <linux/jiffies.h> +#include <linux/workqueue.h> + +#include <acpi/acpi_drivers.h> +#include <acpi/acnamesp.h> + +#include <linux/pci_ids.h> + + +/* ThinkPad CMOS commands */ +#define TP_CMOS_VOLUME_DOWN 0 +#define TP_CMOS_VOLUME_UP 1 +#define TP_CMOS_VOLUME_MUTE 2 +#define TP_CMOS_BRIGHTNESS_UP 4 +#define TP_CMOS_BRIGHTNESS_DOWN 5 +#define TP_CMOS_THINKLIGHT_ON 12 +#define TP_CMOS_THINKLIGHT_OFF 13 + +/* NVRAM Addresses */ +enum tp_nvram_addr { + TP_NVRAM_ADDR_HK2 = 0x57, + TP_NVRAM_ADDR_THINKLIGHT = 0x58, + TP_NVRAM_ADDR_VIDEO = 0x59, + TP_NVRAM_ADDR_BRIGHTNESS = 0x5e, + TP_NVRAM_ADDR_MIXER = 0x60, +}; + +/* NVRAM bit masks */ +enum { + TP_NVRAM_MASK_HKT_THINKPAD = 0x08, + TP_NVRAM_MASK_HKT_ZOOM = 0x20, + TP_NVRAM_MASK_HKT_DISPLAY = 0x40, + TP_NVRAM_MASK_HKT_HIBERNATE = 0x80, + TP_NVRAM_MASK_THINKLIGHT = 0x10, + TP_NVRAM_MASK_HKT_DISPEXPND = 0x30, + TP_NVRAM_MASK_HKT_BRIGHTNESS = 0x20, + TP_NVRAM_MASK_LEVEL_BRIGHTNESS = 0x0f, + TP_NVRAM_POS_LEVEL_BRIGHTNESS = 0, + TP_NVRAM_MASK_MUTE = 0x40, + TP_NVRAM_MASK_HKT_VOLUME = 0x80, + TP_NVRAM_MASK_LEVEL_VOLUME = 0x0f, + TP_NVRAM_POS_LEVEL_VOLUME = 0, +}; + +/* ACPI HIDs */ +#define TPACPI_ACPI_HKEY_HID "IBM0068" + +/* Input IDs */ +#define TPACPI_HKEY_INPUT_PRODUCT 0x5054 /* "TP" */ +#define TPACPI_HKEY_INPUT_VERSION 0x4101 + + +/**************************************************************************** + * Main driver + */ + +#define TPACPI_NAME "thinkpad" +#define TPACPI_DESC "ThinkPad ACPI Extras" +#define TPACPI_FILE TPACPI_NAME "_acpi" +#define TPACPI_URL "http://ibm-acpi.sf.net/" +#define TPACPI_MAIL "ibm-acpi-devel@lists.sourceforge.net" + +#define TPACPI_PROC_DIR "ibm" +#define TPACPI_ACPI_EVENT_PREFIX "ibm" +#define TPACPI_DRVR_NAME TPACPI_FILE +#define TPACPI_DRVR_SHORTNAME "tpacpi" +#define TPACPI_HWMON_DRVR_NAME TPACPI_NAME "_hwmon" + +#define TPACPI_NVRAM_KTHREAD_NAME "ktpacpi_nvramd" +#define TPACPI_WORKQUEUE_NAME "ktpacpid" + +#define TPACPI_MAX_ACPI_ARGS 3 + +/* rfkill switches */ +enum { + TPACPI_RFK_BLUETOOTH_SW_ID = 0, + TPACPI_RFK_WWAN_SW_ID, +}; + +/* Debugging */ +#define TPACPI_LOG TPACPI_FILE ": " +#define TPACPI_ERR KERN_ERR TPACPI_LOG +#define TPACPI_NOTICE KERN_NOTICE TPACPI_LOG +#define TPACPI_INFO KERN_INFO TPACPI_LOG +#define TPACPI_DEBUG KERN_DEBUG TPACPI_LOG + +#define TPACPI_DBG_ALL 0xffff +#define TPACPI_DBG_INIT 0x0001 +#define TPACPI_DBG_EXIT 0x0002 +#define dbg_printk(a_dbg_level, format, arg...) \ + do { if (dbg_level & a_dbg_level) \ + printk(TPACPI_DEBUG "%s: " format, __func__ , ## arg); \ + } while (0) +#ifdef CONFIG_THINKPAD_ACPI_DEBUG +#define vdbg_printk(a_dbg_level, format, arg...) \ + dbg_printk(a_dbg_level, format, ## arg) +static const char *str_supported(int is_supported); +#else +#define vdbg_printk(a_dbg_level, format, arg...) +#endif + +#define onoff(status, bit) ((status) & (1 << (bit)) ? "on" : "off") +#define enabled(status, bit) ((status) & (1 << (bit)) ? "enabled" : "disabled") +#define strlencmp(a, b) (strncmp((a), (b), strlen(b))) + + +/**************************************************************************** + * Driver-wide structs and misc. variables + */ + +struct ibm_struct; + +struct tp_acpi_drv_struct { + const struct acpi_device_id *hid; + struct acpi_driver *driver; + + void (*notify) (struct ibm_struct *, u32); + acpi_handle *handle; + u32 type; + struct acpi_device *device; +}; + +struct ibm_struct { + char *name; + + int (*read) (char *); + int (*write) (char *); + void (*exit) (void); + void (*resume) (void); + void (*suspend) (pm_message_t state); + + struct list_head all_drivers; + + struct tp_acpi_drv_struct *acpi; + + struct { + u8 acpi_driver_registered:1; + u8 acpi_notify_installed:1; + u8 proc_created:1; + u8 init_called:1; + u8 experimental:1; + } flags; +}; + +struct ibm_init_struct { + char param[32]; + + int (*init) (struct ibm_init_struct *); + struct ibm_struct *data; +}; + +static struct { +#ifdef CONFIG_THINKPAD_ACPI_BAY + u32 bay_status:1; + u32 bay_eject:1; + u32 bay_status2:1; + u32 bay_eject2:1; +#endif + u32 bluetooth:1; + u32 hotkey:1; + u32 hotkey_mask:1; + u32 hotkey_wlsw:1; + u32 hotkey_tablet:1; + u32 light:1; + u32 light_status:1; + u32 bright_16levels:1; + u32 bright_acpimode:1; + u32 wan:1; + u32 fan_ctrl_status_undef:1; + u32 input_device_registered:1; + u32 platform_drv_registered:1; + u32 platform_drv_attrs_registered:1; + u32 sensors_pdrv_registered:1; + u32 sensors_pdrv_attrs_registered:1; + u32 sensors_pdev_attrs_registered:1; + u32 hotkey_poll_active:1; +} tp_features; + +static struct { + u16 hotkey_mask_ff:1; + u16 bright_cmos_ec_unsync:1; +} tp_warned; + +struct thinkpad_id_data { + unsigned int vendor; /* ThinkPad vendor: + * PCI_VENDOR_ID_IBM/PCI_VENDOR_ID_LENOVO */ + + char *bios_version_str; /* Something like 1ZET51WW (1.03z) */ + char *ec_version_str; /* Something like 1ZHT51WW-1.04a */ + + u16 bios_model; /* Big Endian, TP-1Y = 0x5931, 0 = unknown */ + u16 ec_model; + + char *model_str; /* ThinkPad T43 */ + char *nummodel_str; /* 9384A9C for a 9384-A9C model */ +}; +static struct thinkpad_id_data thinkpad_id; + +static enum { + TPACPI_LIFE_INIT = 0, + TPACPI_LIFE_RUNNING, + TPACPI_LIFE_EXITING, +} tpacpi_lifecycle; + +static int experimental; +static u32 dbg_level; + +static struct workqueue_struct *tpacpi_wq; + +/* Special LED class that can defer work */ +struct tpacpi_led_classdev { + struct led_classdev led_classdev; + struct work_struct work; + enum led_brightness new_brightness; + unsigned int led; +}; + +/**************************************************************************** + **************************************************************************** + * + * ACPI Helpers and device model + * + **************************************************************************** + ****************************************************************************/ + +/************************************************************************* + * ACPI basic handles + */ + +static acpi_handle root_handle; + +#define TPACPI_HANDLE(object, parent, paths...) \ + static acpi_handle object##_handle; \ + static acpi_handle *object##_parent = &parent##_handle; \ + static char *object##_path; \ + static char *object##_paths[] = { paths } + +TPACPI_HANDLE(ec, root, "\\_SB.PCI0.ISA.EC0", /* 240, 240x */ + "\\_SB.PCI.ISA.EC", /* 570 */ + "\\_SB.PCI0.ISA0.EC0", /* 600e/x, 770e, 770x */ + "\\_SB.PCI0.ISA.EC", /* A21e, A2xm/p, T20-22, X20-21 */ + "\\_SB.PCI0.AD4S.EC0", /* i1400, R30 */ + "\\_SB.PCI0.ICH3.EC0", /* R31 */ + "\\_SB.PCI0.LPC.EC", /* all others */ + ); + +TPACPI_HANDLE(ecrd, ec, "ECRD"); /* 570 */ +TPACPI_HANDLE(ecwr, ec, "ECWR"); /* 570 */ + +TPACPI_HANDLE(cmos, root, "\\UCMS", /* R50, R50e, R50p, R51, */ + /* T4x, X31, X40 */ + "\\CMOS", /* A3x, G4x, R32, T23, T30, X22-24, X30 */ + "\\CMS", /* R40, R40e */ + ); /* all others */ + +TPACPI_HANDLE(hkey, ec, "\\_SB.HKEY", /* 600e/x, 770e, 770x */ + "^HKEY", /* R30, R31 */ + "HKEY", /* all others */ + ); /* 570 */ + +TPACPI_HANDLE(vid, root, "\\_SB.PCI.AGP.VGA", /* 570 */ + "\\_SB.PCI0.AGP0.VID0", /* 600e/x, 770x */ + "\\_SB.PCI0.VID0", /* 770e */ + "\\_SB.PCI0.VID", /* A21e, G4x, R50e, X30, X40 */ + "\\_SB.PCI0.AGP.VID", /* all others */ + ); /* R30, R31 */ + + +/************************************************************************* + * ACPI helpers + */ + +static int acpi_evalf(acpi_handle handle, + void *res, char *method, char *fmt, ...) +{ + char *fmt0 = fmt; + struct acpi_object_list params; + union acpi_object in_objs[TPACPI_MAX_ACPI_ARGS]; + struct acpi_buffer result, *resultp; + union acpi_object out_obj; + acpi_status status; + va_list ap; + char res_type; + int success; + int quiet; + + if (!*fmt) { + printk(TPACPI_ERR "acpi_evalf() called with empty format\n"); + return 0; + } + + if (*fmt == 'q') { + quiet = 1; + fmt++; + } else + quiet = 0; + + res_type = *(fmt++); + + params.count = 0; + params.pointer = &in_objs[0]; + + va_start(ap, fmt); + while (*fmt) { + char c = *(fmt++); + switch (c) { + case 'd': /* int */ + in_objs[params.count].integer.value = va_arg(ap, int); + in_objs[params.count++].type = ACPI_TYPE_INTEGER; + break; + /* add more types as needed */ + default: + printk(TPACPI_ERR "acpi_evalf() called " + "with invalid format character '%c'\n", c); + return 0; + } + } + va_end(ap); + + if (res_type != 'v') { + result.length = sizeof(out_obj); + result.pointer = &out_obj; + resultp = &result; + } else + resultp = NULL; + + status = acpi_evaluate_object(handle, method, ¶ms, resultp); + + switch (res_type) { + case 'd': /* int */ + if (res) + *(int *)res = out_obj.integer.value; + success = status == AE_OK && out_obj.type == ACPI_TYPE_INTEGER; + break; + case 'v': /* void */ + success = status == AE_OK; + break; + /* add more types as needed */ + default: + printk(TPACPI_ERR "acpi_evalf() called " + "with invalid format character '%c'\n", res_type); + return 0; + } + + if (!success && !quiet) + printk(TPACPI_ERR "acpi_evalf(%s, %s, ...) failed: %d\n", + method, fmt0, status); + + return success; +} + +static int acpi_ec_read(int i, u8 *p) +{ + int v; + + if (ecrd_handle) { + if (!acpi_evalf(ecrd_handle, &v, NULL, "dd", i)) + return 0; + *p = v; + } else { + if (ec_read(i, p) < 0) + return 0; + } + + return 1; +} + +static int acpi_ec_write(int i, u8 v) +{ + if (ecwr_handle) { + if (!acpi_evalf(ecwr_handle, NULL, NULL, "vdd", i, v)) + return 0; + } else { + if (ec_write(i, v) < 0) + return 0; + } + + return 1; +} + +#if defined(CONFIG_THINKPAD_ACPI_DOCK) || defined(CONFIG_THINKPAD_ACPI_BAY) +static int _sta(acpi_handle handle) +{ + int status; + + if (!handle || !acpi_evalf(handle, &status, "_STA", "d")) + status = 0; + + return status; +} +#endif + +static int issue_thinkpad_cmos_command(int cmos_cmd) +{ + if (!cmos_handle) + return -ENXIO; + + if (!acpi_evalf(cmos_handle, NULL, NULL, "vd", cmos_cmd)) + return -EIO; + + return 0; +} + +/************************************************************************* + * ACPI device model + */ + +#define TPACPI_ACPIHANDLE_INIT(object) \ + drv_acpi_handle_init(#object, &object##_handle, *object##_parent, \ + object##_paths, ARRAY_SIZE(object##_paths), &object##_path) + +static void drv_acpi_handle_init(char *name, + acpi_handle *handle, acpi_handle parent, + char **paths, int num_paths, char **path) +{ + int i; + acpi_status status; + + vdbg_printk(TPACPI_DBG_INIT, "trying to locate ACPI handle for %s\n", + name); + + for (i = 0; i < num_paths; i++) { + status = acpi_get_handle(parent, paths[i], handle); + if (ACPI_SUCCESS(status)) { + *path = paths[i]; + dbg_printk(TPACPI_DBG_INIT, + "Found ACPI handle %s for %s\n", + *path, name); + return; + } + } + + vdbg_printk(TPACPI_DBG_INIT, "ACPI handle for %s not found\n", + name); + *handle = NULL; +} + +static void dispatch_acpi_notify(acpi_handle handle, u32 event, void *data) +{ + struct ibm_struct *ibm = data; + + if (tpacpi_lifecycle != TPACPI_LIFE_RUNNING) + return; + + if (!ibm || !ibm->acpi || !ibm->acpi->notify) + return; + + ibm->acpi->notify(ibm, event); +} + +static int __init setup_acpi_notify(struct ibm_struct *ibm) +{ + acpi_status status; + int rc; + + BUG_ON(!ibm->acpi); + + if (!*ibm->acpi->handle) + return 0; + + vdbg_printk(TPACPI_DBG_INIT, + "setting up ACPI notify for %s\n", ibm->name); + + rc = acpi_bus_get_device(*ibm->acpi->handle, &ibm->acpi->device); + if (rc < 0) { + printk(TPACPI_ERR "acpi_bus_get_device(%s) failed: %d\n", + ibm->name, rc); + return -ENODEV; + } + + ibm->acpi->device->driver_data = ibm; + sprintf(acpi_device_class(ibm->acpi->device), "%s/%s", + TPACPI_ACPI_EVENT_PREFIX, + ibm->name); + + status = acpi_install_notify_handler(*ibm->acpi->handle, + ibm->acpi->type, dispatch_acpi_notify, ibm); + if (ACPI_FAILURE(status)) { + if (status == AE_ALREADY_EXISTS) { + printk(TPACPI_NOTICE + "another device driver is already " + "handling %s events\n", ibm->name); + } else { + printk(TPACPI_ERR + "acpi_install_notify_handler(%s) failed: %d\n", + ibm->name, status); + } + return -ENODEV; + } + ibm->flags.acpi_notify_installed = 1; + return 0; +} + +static int __init tpacpi_device_add(struct acpi_device *device) +{ + return 0; +} + +static int __init register_tpacpi_subdriver(struct ibm_struct *ibm) +{ + int rc; + + dbg_printk(TPACPI_DBG_INIT, + "registering %s as an ACPI driver\n", ibm->name); + + BUG_ON(!ibm->acpi); + + ibm->acpi->driver = kzalloc(sizeof(struct acpi_driver), GFP_KERNEL); + if (!ibm->acpi->driver) { + printk(TPACPI_ERR + "failed to allocate memory for ibm->acpi->driver\n"); + return -ENOMEM; + } + + sprintf(ibm->acpi->driver->name, "%s_%s", TPACPI_NAME, ibm->name); + ibm->acpi->driver->ids = ibm->acpi->hid; + + ibm->acpi->driver->ops.add = &tpacpi_device_add; + + rc = acpi_bus_register_driver(ibm->acpi->driver); + if (rc < 0) { + printk(TPACPI_ERR "acpi_bus_register_driver(%s) failed: %d\n", + ibm->name, rc); + kfree(ibm->acpi->driver); + ibm->acpi->driver = NULL; + } else if (!rc) + ibm->flags.acpi_driver_registered = 1; + + return rc; +} + + +/**************************************************************************** + **************************************************************************** + * + * Procfs Helpers + * + **************************************************************************** + ****************************************************************************/ + +static int dispatch_procfs_read(char *page, char **start, off_t off, + int count, int *eof, void *data) +{ + struct ibm_struct *ibm = data; + int len; + + if (!ibm || !ibm->read) + return -EINVAL; + + len = ibm->read(page); + if (len < 0) + return len; + + if (len <= off + count) + *eof = 1; + *start = page + off; + len -= off; + if (len > count) + len = count; + if (len < 0) + len = 0; + + return len; +} + +static int dispatch_procfs_write(struct file *file, + const char __user *userbuf, + unsigned long count, void *data) +{ + struct ibm_struct *ibm = data; + char *kernbuf; + int ret; + + if (!ibm || !ibm->write) + return -EINVAL; + + kernbuf = kmalloc(count + 2, GFP_KERNEL); + if (!kernbuf) + return -ENOMEM; + + if (copy_from_user(kernbuf, userbuf, count)) { + kfree(kernbuf); + return -EFAULT; + } + + kernbuf[count] = 0; + strcat(kernbuf, ","); + ret = ibm->write(kernbuf); + if (ret == 0) + ret = count; + + kfree(kernbuf); + + return ret; +} + +static char *next_cmd(char **cmds) +{ + char *start = *cmds; + char *end; + + while ((end = strchr(start, ',')) && end == start) + start = end + 1; + + if (!end) + return NULL; + + *end = 0; + *cmds = end + 1; + return start; +} + + +/**************************************************************************** + **************************************************************************** + * + * Device model: input, hwmon and platform + * + **************************************************************************** + ****************************************************************************/ + +static struct platform_device *tpacpi_pdev; +static struct platform_device *tpacpi_sensors_pdev; +static struct device *tpacpi_hwmon; +static struct input_dev *tpacpi_inputdev; +static struct mutex tpacpi_inputdev_send_mutex; +static LIST_HEAD(tpacpi_all_drivers); + +static int tpacpi_suspend_handler(struct platform_device *pdev, + pm_message_t state) +{ + struct ibm_struct *ibm, *itmp; + + list_for_each_entry_safe(ibm, itmp, + &tpacpi_all_drivers, + all_drivers) { + if (ibm->suspend) + (ibm->suspend)(state); + } + + return 0; +} + +static int tpacpi_resume_handler(struct platform_device *pdev) +{ + struct ibm_struct *ibm, *itmp; + + list_for_each_entry_safe(ibm, itmp, + &tpacpi_all_drivers, + all_drivers) { + if (ibm->resume) + (ibm->resume)(); + } + + return 0; +} + +static struct platform_driver tpacpi_pdriver = { + .driver = { + .name = TPACPI_DRVR_NAME, + .owner = THIS_MODULE, + }, + .suspend = tpacpi_suspend_handler, + .resume = tpacpi_resume_handler, +}; + +static struct platform_driver tpacpi_hwmon_pdriver = { + .driver = { + .name = TPACPI_HWMON_DRVR_NAME, + .owner = THIS_MODULE, + }, +}; + +/************************************************************************* + * sysfs support helpers + */ + +struct attribute_set { + unsigned int members, max_members; + struct attribute_group group; +}; + +struct attribute_set_obj { + struct attribute_set s; + struct attribute *a; +} __attribute__((packed)); + +static struct attribute_set *create_attr_set(unsigned int max_members, + const char *name) +{ + struct attribute_set_obj *sobj; + + if (max_members == 0) + return NULL; + + /* Allocates space for implicit NULL at the end too */ + sobj = kzalloc(sizeof(struct attribute_set_obj) + + max_members * sizeof(struct attribute *), + GFP_KERNEL); + if (!sobj) + return NULL; + sobj->s.max_members = max_members; + sobj->s.group.attrs = &sobj->a; + sobj->s.group.name = name; + + return &sobj->s; +} + +#define destroy_attr_set(_set) \ + kfree(_set); + +/* not multi-threaded safe, use it in a single thread per set */ +static int add_to_attr_set(struct attribute_set *s, struct attribute *attr) +{ + if (!s || !attr) + return -EINVAL; + + if (s->members >= s->max_members) + return -ENOMEM; + + s->group.attrs[s->members] = attr; + s->members++; + + return 0; +} + +static int add_many_to_attr_set(struct attribute_set *s, + struct attribute **attr, + unsigned int count) +{ + int i, res; + + for (i = 0; i < count; i++) { + res = add_to_attr_set(s, attr[i]); + if (res) + return res; + } + + return 0; +} + +static void delete_attr_set(struct attribute_set *s, struct kobject *kobj) +{ + sysfs_remove_group(kobj, &s->group); + destroy_attr_set(s); +} + +#define register_attr_set_with_sysfs(_attr_set, _kobj) \ + sysfs_create_group(_kobj, &_attr_set->group) + +static int parse_strtoul(const char *buf, + unsigned long max, unsigned long *value) +{ + char *endp; + + while (*buf && isspace(*buf)) + buf++; + *value = simple_strtoul(buf, &endp, 0); + while (*endp && isspace(*endp)) + endp++; + if (*endp || *value > max) + return -EINVAL; + + return 0; +} + +static void tpacpi_disable_brightness_delay(void) +{ + if (acpi_evalf(hkey_handle, NULL, "PWMS", "qvd", 0)) + printk(TPACPI_NOTICE + "ACPI backlight control delay disabled\n"); +} + +static int __init tpacpi_query_bcl_levels(acpi_handle handle) +{ + struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; + union acpi_object *obj; + int rc; + + if (ACPI_SUCCESS(acpi_evaluate_object(handle, NULL, NULL, &buffer))) { + obj = (union acpi_object *)buffer.pointer; + if (!obj || (obj->type != ACPI_TYPE_PACKAGE)) { + printk(TPACPI_ERR "Unknown _BCL data, " + "please report this to %s\n", TPACPI_MAIL); + rc = 0; + } else { + rc = obj->package.count; + } + } else { + return 0; + } + + kfree(buffer.pointer); + return rc; +} + +static acpi_status __init tpacpi_acpi_walk_find_bcl(acpi_handle handle, + u32 lvl, void *context, void **rv) +{ + char name[ACPI_PATH_SEGMENT_LENGTH]; + struct acpi_buffer buffer = { sizeof(name), &name }; + + if (ACPI_SUCCESS(acpi_get_name(handle, ACPI_SINGLE_NAME, &buffer)) && + !strncmp("_BCL", name, sizeof(name) - 1)) { + BUG_ON(!rv || !*rv); + **(int **)rv = tpacpi_query_bcl_levels(handle); + return AE_CTRL_TERMINATE; + } else { + return AE_OK; + } +} + +/* + * Returns 0 (no ACPI _BCL or _BCL invalid), or size of brightness map + */ +static int __init tpacpi_check_std_acpi_brightness_support(void) +{ + int status; + int bcl_levels = 0; + void *bcl_ptr = &bcl_levels; + + if (!vid_handle) { + TPACPI_ACPIHANDLE_INIT(vid); + } + if (!vid_handle) + return 0; + + /* + * Search for a _BCL method, and execute it. This is safe on all + * ThinkPads, and as a side-effect, _BCL will place a Lenovo Vista + * BIOS in ACPI backlight control mode. We do NOT have to care + * about calling the _BCL method in an enabled video device, any + * will do for our purposes. + */ + + status = acpi_walk_namespace(ACPI_TYPE_METHOD, vid_handle, 3, + tpacpi_acpi_walk_find_bcl, NULL, + &bcl_ptr); + + if (ACPI_SUCCESS(status) && bcl_levels > 2) { + tp_features.bright_acpimode = 1; + return (bcl_levels - 2); + } + + return 0; +} + +static int __init tpacpi_new_rfkill(const unsigned int id, + struct rfkill **rfk, + const enum rfkill_type rfktype, + const char *name, + int (*toggle_radio)(void *, enum rfkill_state), + int (*get_state)(void *, enum rfkill_state *)) +{ + int res; + enum rfkill_state initial_state; + + *rfk = rfkill_allocate(&tpacpi_pdev->dev, rfktype); + if (!*rfk) { + printk(TPACPI_ERR + "failed to allocate memory for rfkill class\n"); + return -ENOMEM; + } + + (*rfk)->name = name; + (*rfk)->get_state = get_state; + (*rfk)->toggle_radio = toggle_radio; + + if (!get_state(NULL, &initial_state)) + (*rfk)->state = initial_state; + + res = rfkill_register(*rfk); + if (res < 0) { + printk(TPACPI_ERR + "failed to register %s rfkill switch: %d\n", + name, res); + rfkill_free(*rfk); + *rfk = NULL; + return res; + } + + return 0; +} + +/************************************************************************* + * thinkpad-acpi driver attributes + */ + +/* interface_version --------------------------------------------------- */ +static ssize_t tpacpi_driver_interface_version_show( + struct device_driver *drv, + char *buf) +{ + return snprintf(buf, PAGE_SIZE, "0x%08x\n", TPACPI_SYSFS_VERSION); +} + +static DRIVER_ATTR(interface_version, S_IRUGO, + tpacpi_driver_interface_version_show, NULL); + +/* debug_level --------------------------------------------------------- */ +static ssize_t tpacpi_driver_debug_show(struct device_driver *drv, + char *buf) +{ + return snprintf(buf, PAGE_SIZE, "0x%04x\n", dbg_level); +} + +static ssize_t tpacpi_driver_debug_store(struct device_driver *drv, + const char *buf, size_t count) +{ + unsigned long t; + + if (parse_strtoul(buf, 0xffff, &t)) + return -EINVAL; + + dbg_level = t; + + return count; +} + +static DRIVER_ATTR(debug_level, S_IWUSR | S_IRUGO, + tpacpi_driver_debug_show, tpacpi_driver_debug_store); + +/* version ------------------------------------------------------------- */ +static ssize_t tpacpi_driver_version_show(struct device_driver *drv, + char *buf) +{ + return snprintf(buf, PAGE_SIZE, "%s v%s\n", + TPACPI_DESC, TPACPI_VERSION); +} + +static DRIVER_ATTR(version, S_IRUGO, + tpacpi_driver_version_show, NULL); + +/* --------------------------------------------------------------------- */ + +static struct driver_attribute *tpacpi_driver_attributes[] = { + &driver_attr_debug_level, &driver_attr_version, + &driver_attr_interface_version, +}; + +static int __init tpacpi_create_driver_attributes(struct device_driver *drv) +{ + int i, res; + + i = 0; + res = 0; + while (!res && i < ARRAY_SIZE(tpacpi_driver_attributes)) { + res = driver_create_file(drv, tpacpi_driver_attributes[i]); + i++; + } + + return res; +} + +static void tpacpi_remove_driver_attributes(struct device_driver *drv) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(tpacpi_driver_attributes); i++) + driver_remove_file(drv, tpacpi_driver_attributes[i]); +} + +/**************************************************************************** + **************************************************************************** + * + * Subdrivers + * + **************************************************************************** + ****************************************************************************/ + +/************************************************************************* + * thinkpad-acpi init subdriver + */ + +static int __init thinkpad_acpi_driver_init(struct ibm_init_struct *iibm) +{ + printk(TPACPI_INFO "%s v%s\n", TPACPI_DESC, TPACPI_VERSION); + printk(TPACPI_INFO "%s\n", TPACPI_URL); + + printk(TPACPI_INFO "ThinkPad BIOS %s, EC %s\n", + (thinkpad_id.bios_version_str) ? + thinkpad_id.bios_version_str : "unknown", + (thinkpad_id.ec_version_str) ? + thinkpad_id.ec_version_str : "unknown"); + + if (thinkpad_id.vendor && thinkpad_id.model_str) + printk(TPACPI_INFO "%s %s, model %s\n", + (thinkpad_id.vendor == PCI_VENDOR_ID_IBM) ? + "IBM" : ((thinkpad_id.vendor == + PCI_VENDOR_ID_LENOVO) ? + "Lenovo" : "Unknown vendor"), + thinkpad_id.model_str, + (thinkpad_id.nummodel_str) ? + thinkpad_id.nummodel_str : "unknown"); + + return 0; +} + +static int thinkpad_acpi_driver_read(char *p) +{ + int len = 0; + + len += sprintf(p + len, "driver:\t\t%s\n", TPACPI_DESC); + len += sprintf(p + len, "version:\t%s\n", TPACPI_VERSION); + + return len; +} + +static struct ibm_struct thinkpad_acpi_driver_data = { + .name = "driver", + .read = thinkpad_acpi_driver_read, +}; + +/************************************************************************* + * Hotkey subdriver + */ + +enum { /* hot key scan codes (derived from ACPI DSDT) */ + TP_ACPI_HOTKEYSCAN_FNF1 = 0, + TP_ACPI_HOTKEYSCAN_FNF2, + TP_ACPI_HOTKEYSCAN_FNF3, + TP_ACPI_HOTKEYSCAN_FNF4, + TP_ACPI_HOTKEYSCAN_FNF5, + TP_ACPI_HOTKEYSCAN_FNF6, + TP_ACPI_HOTKEYSCAN_FNF7, + TP_ACPI_HOTKEYSCAN_FNF8, + TP_ACPI_HOTKEYSCAN_FNF9, + TP_ACPI_HOTKEYSCAN_FNF10, + TP_ACPI_HOTKEYSCAN_FNF11, + TP_ACPI_HOTKEYSCAN_FNF12, + TP_ACPI_HOTKEYSCAN_FNBACKSPACE, + TP_ACPI_HOTKEYSCAN_FNINSERT, + TP_ACPI_HOTKEYSCAN_FNDELETE, + TP_ACPI_HOTKEYSCAN_FNHOME, + TP_ACPI_HOTKEYSCAN_FNEND, + TP_ACPI_HOTKEYSCAN_FNPAGEUP, + TP_ACPI_HOTKEYSCAN_FNPAGEDOWN, + TP_ACPI_HOTKEYSCAN_FNSPACE, + TP_ACPI_HOTKEYSCAN_VOLUMEUP, + TP_ACPI_HOTKEYSCAN_VOLUMEDOWN, + TP_ACPI_HOTKEYSCAN_MUTE, + TP_ACPI_HOTKEYSCAN_THINKPAD, +}; + +enum { /* Keys available through NVRAM polling */ + TPACPI_HKEY_NVRAM_KNOWN_MASK = 0x00fb88c0U, + TPACPI_HKEY_NVRAM_GOOD_MASK = 0x00fb8000U, +}; + +enum { /* Positions of some of the keys in hotkey masks */ + TP_ACPI_HKEY_DISPSWTCH_MASK = 1 << TP_ACPI_HOTKEYSCAN_FNF7, + TP_ACPI_HKEY_DISPXPAND_MASK = 1 << TP_ACPI_HOTKEYSCAN_FNF8, + TP_ACPI_HKEY_HIBERNATE_MASK = 1 << TP_ACPI_HOTKEYSCAN_FNF12, + TP_ACPI_HKEY_BRGHTUP_MASK = 1 << TP_ACPI_HOTKEYSCAN_FNHOME, + TP_ACPI_HKEY_BRGHTDWN_MASK = 1 << TP_ACPI_HOTKEYSCAN_FNEND, + TP_ACPI_HKEY_THNKLGHT_MASK = 1 << TP_ACPI_HOTKEYSCAN_FNPAGEUP, + TP_ACPI_HKEY_ZOOM_MASK = 1 << TP_ACPI_HOTKEYSCAN_FNSPACE, + TP_ACPI_HKEY_VOLUP_MASK = 1 << TP_ACPI_HOTKEYSCAN_VOLUMEUP, + TP_ACPI_HKEY_VOLDWN_MASK = 1 << TP_ACPI_HOTKEYSCAN_VOLUMEDOWN, + TP_ACPI_HKEY_MUTE_MASK = 1 << TP_ACPI_HOTKEYSCAN_MUTE, + TP_ACPI_HKEY_THINKPAD_MASK = 1 << TP_ACPI_HOTKEYSCAN_THINKPAD, +}; + +enum { /* NVRAM to ACPI HKEY group map */ + TP_NVRAM_HKEY_GROUP_HK2 = TP_ACPI_HKEY_THINKPAD_MASK | + TP_ACPI_HKEY_ZOOM_MASK | + TP_ACPI_HKEY_DISPSWTCH_MASK | + TP_ACPI_HKEY_HIBERNATE_MASK, + TP_NVRAM_HKEY_GROUP_BRIGHTNESS = TP_ACPI_HKEY_BRGHTUP_MASK | + TP_ACPI_HKEY_BRGHTDWN_MASK, + TP_NVRAM_HKEY_GROUP_VOLUME = TP_ACPI_HKEY_VOLUP_MASK | + TP_ACPI_HKEY_VOLDWN_MASK | + TP_ACPI_HKEY_MUTE_MASK, +}; + +#ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL +struct tp_nvram_state { + u16 thinkpad_toggle:1; + u16 zoom_toggle:1; + u16 display_toggle:1; + u16 thinklight_toggle:1; + u16 hibernate_toggle:1; + u16 displayexp_toggle:1; + u16 display_state:1; + u16 brightness_toggle:1; + u16 volume_toggle:1; + u16 mute:1; + + u8 brightness_level; + u8 volume_level; +}; + +static struct task_struct *tpacpi_hotkey_task; +static u32 hotkey_source_mask; /* bit mask 0=ACPI,1=NVRAM */ +static int hotkey_poll_freq = 10; /* Hz */ +static struct mutex hotkey_thread_mutex; +static struct mutex hotkey_thread_data_mutex; +static unsigned int hotkey_config_change; + +#else /* CONFIG_THINKPAD_ACPI_HOTKEY_POLL */ + +#define hotkey_source_mask 0U + +#endif /* CONFIG_THINKPAD_ACPI_HOTKEY_POLL */ + +static struct mutex hotkey_mutex; + +static enum { /* Reasons for waking up */ + TP_ACPI_WAKEUP_NONE = 0, /* None or unknown */ + TP_ACPI_WAKEUP_BAYEJ, /* Bay ejection request */ + TP_ACPI_WAKEUP_UNDOCK, /* Undock request */ +} hotkey_wakeup_reason; + +static int hotkey_autosleep_ack; + +static int hotkey_orig_status; +static u32 hotkey_orig_mask; +static u32 hotkey_all_mask; +static u32 hotkey_reserved_mask; +static u32 hotkey_mask; + +static unsigned int hotkey_report_mode; + +static u16 *hotkey_keycode_map; + +static struct attribute_set *hotkey_dev_attributes; + +#ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL +#define HOTKEY_CONFIG_CRITICAL_START \ + do { \ + mutex_lock(&hotkey_thread_data_mutex); \ + hotkey_config_change++; \ + } while (0); +#define HOTKEY_CONFIG_CRITICAL_END \ + mutex_unlock(&hotkey_thread_data_mutex); +#else +#define HOTKEY_CONFIG_CRITICAL_START +#define HOTKEY_CONFIG_CRITICAL_END +#endif /* CONFIG_THINKPAD_ACPI_HOTKEY_POLL */ + +/* HKEY.MHKG() return bits */ +#define TP_HOTKEY_TABLET_MASK (1 << 3) + +static int hotkey_get_wlsw(int *status) +{ + if (!acpi_evalf(hkey_handle, status, "WLSW", "d")) + return -EIO; + return 0; +} + +static int hotkey_get_tablet_mode(int *status) +{ + int s; + + if (!acpi_evalf(hkey_handle, &s, "MHKG", "d")) + return -EIO; + + *status = ((s & TP_HOTKEY_TABLET_MASK) != 0); + return 0; +} + +/* + * Call with hotkey_mutex held + */ +static int hotkey_mask_get(void) +{ + u32 m = 0; + + if (tp_features.hotkey_mask) { + if (!acpi_evalf(hkey_handle, &m, "DHKN", "d")) + return -EIO; + } + hotkey_mask = m | (hotkey_source_mask & hotkey_mask); + + return 0; +} + +/* + * Call with hotkey_mutex held + */ +static int hotkey_mask_set(u32 mask) +{ + int i; + int rc = 0; + + if (tp_features.hotkey_mask) { + if (!tp_warned.hotkey_mask_ff && + (mask == 0xffff || mask == 0xffffff || + mask == 0xffffffff)) { + tp_warned.hotkey_mask_ff = 1; + printk(TPACPI_NOTICE + "setting the hotkey mask to 0x%08x is likely " + "not the best way to go about it\n", mask); + printk(TPACPI_NOTICE + "please consider using the driver defaults, " + "and refer to up-to-date thinkpad-acpi " + "documentation\n"); + } + + HOTKEY_CONFIG_CRITICAL_START + for (i = 0; i < 32; i++) { + u32 m = 1 << i; + /* enable in firmware mask only keys not in NVRAM + * mode, but enable the key in the cached hotkey_mask + * regardless of mode, or the key will end up + * disabled by hotkey_mask_get() */ + if (!acpi_evalf(hkey_handle, + NULL, "MHKM", "vdd", i + 1, + !!((mask & ~hotkey_source_mask) & m))) { + rc = -EIO; + break; + } else { + hotkey_mask = (hotkey_mask & ~m) | (mask & m); + } + } + HOTKEY_CONFIG_CRITICAL_END + + /* hotkey_mask_get must be called unconditionally below */ + if (!hotkey_mask_get() && !rc && + (hotkey_mask & ~hotkey_source_mask) != + (mask & ~hotkey_source_mask)) { + printk(TPACPI_NOTICE + "requested hot key mask 0x%08x, but " + "firmware forced it to 0x%08x\n", + mask, hotkey_mask); + } + } else { +#ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL + HOTKEY_CONFIG_CRITICAL_START + hotkey_mask = mask & hotkey_source_mask; + HOTKEY_CONFIG_CRITICAL_END + hotkey_mask_get(); + if (hotkey_mask != mask) { + printk(TPACPI_NOTICE + "requested hot key mask 0x%08x, " + "forced to 0x%08x (NVRAM poll mask is " + "0x%08x): no firmware mask support\n", + mask, hotkey_mask, hotkey_source_mask); + } +#else + hotkey_mask_get(); + rc = -ENXIO; +#endif /* CONFIG_THINKPAD_ACPI_HOTKEY_POLL */ + } + + return rc; +} + +static int hotkey_status_get(int *status) +{ + if (!acpi_evalf(hkey_handle, status, "DHKC", "d")) + return -EIO; + + return 0; +} + +static int hotkey_status_set(int status) +{ + if (!acpi_evalf(hkey_handle, NULL, "MHKC", "vd", status)) + return -EIO; + + return 0; +} + +static void tpacpi_input_send_tabletsw(void) +{ + int state; + + if (tp_features.hotkey_tablet && + !hotkey_get_tablet_mode(&state)) { + mutex_lock(&tpacpi_inputdev_send_mutex); + + input_report_switch(tpacpi_inputdev, + SW_TABLET_MODE, !!state); + input_sync(tpacpi_inputdev); + + mutex_unlock(&tpacpi_inputdev_send_mutex); + } +} + +static void tpacpi_input_send_key(unsigned int scancode) +{ + unsigned int keycode; + + keycode = hotkey_keycode_map[scancode]; + + if (keycode != KEY_RESERVED) { + mutex_lock(&tpacpi_inputdev_send_mutex); + + input_report_key(tpacpi_inputdev, keycode, 1); + if (keycode == KEY_UNKNOWN) + input_event(tpacpi_inputdev, EV_MSC, MSC_SCAN, + scancode); + input_sync(tpacpi_inputdev); + + input_report_key(tpacpi_inputdev, keycode, 0); + if (keycode == KEY_UNKNOWN) + input_event(tpacpi_inputdev, EV_MSC, MSC_SCAN, + scancode); + input_sync(tpacpi_inputdev); + + mutex_unlock(&tpacpi_inputdev_send_mutex); + } +} + +#ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL +static struct tp_acpi_drv_struct ibm_hotkey_acpidriver; + +static void tpacpi_hotkey_send_key(unsigned int scancode) +{ + tpacpi_input_send_key(scancode); + if (hotkey_report_mode < 2) { + acpi_bus_generate_proc_event(ibm_hotkey_acpidriver.device, + 0x80, 0x1001 + scancode); + } +} + +static void hotkey_read_nvram(struct tp_nvram_state *n, u32 m) +{ + u8 d; + + if (m & TP_NVRAM_HKEY_GROUP_HK2) { + d = nvram_read_byte(TP_NVRAM_ADDR_HK2); + n->thinkpad_toggle = !!(d & TP_NVRAM_MASK_HKT_THINKPAD); + n->zoom_toggle = !!(d & TP_NVRAM_MASK_HKT_ZOOM); + n->display_toggle = !!(d & TP_NVRAM_MASK_HKT_DISPLAY); + n->hibernate_toggle = !!(d & TP_NVRAM_MASK_HKT_HIBERNATE); + } + if (m & TP_ACPI_HKEY_THNKLGHT_MASK) { + d = nvram_read_byte(TP_NVRAM_ADDR_THINKLIGHT); + n->thinklight_toggle = !!(d & TP_NVRAM_MASK_THINKLIGHT); + } + if (m & TP_ACPI_HKEY_DISPXPAND_MASK) { + d = nvram_read_byte(TP_NVRAM_ADDR_VIDEO); + n->displayexp_toggle = + !!(d & TP_NVRAM_MASK_HKT_DISPEXPND); + } + if (m & TP_NVRAM_HKEY_GROUP_BRIGHTNESS) { + d = nvram_read_byte(TP_NVRAM_ADDR_BRIGHTNESS); + n->brightness_level = (d & TP_NVRAM_MASK_LEVEL_BRIGHTNESS) + >> TP_NVRAM_POS_LEVEL_BRIGHTNESS; + n->brightness_toggle = + !!(d & TP_NVRAM_MASK_HKT_BRIGHTNESS); + } + if (m & TP_NVRAM_HKEY_GROUP_VOLUME) { + d = nvram_read_byte(TP_NVRAM_ADDR_MIXER); + n->volume_level = (d & TP_NVRAM_MASK_LEVEL_VOLUME) + >> TP_NVRAM_POS_LEVEL_VOLUME; + n->mute = !!(d & TP_NVRAM_MASK_MUTE); + n->volume_toggle = !!(d & TP_NVRAM_MASK_HKT_VOLUME); + } +} + +#define TPACPI_COMPARE_KEY(__scancode, __member) \ + do { \ + if ((mask & (1 << __scancode)) && \ + oldn->__member != newn->__member) \ + tpacpi_hotkey_send_key(__scancode); \ + } while (0) + +#define TPACPI_MAY_SEND_KEY(__scancode) \ + do { if (mask & (1 << __scancode)) \ + tpacpi_hotkey_send_key(__scancode); } while (0) + +static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn, + struct tp_nvram_state *newn, + u32 mask) +{ + TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_THINKPAD, thinkpad_toggle); + TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNSPACE, zoom_toggle); + TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNF7, display_toggle); + TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNF12, hibernate_toggle); + + TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNPAGEUP, thinklight_toggle); + + TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNF8, displayexp_toggle); + + /* handle volume */ + if (oldn->volume_toggle != newn->volume_toggle) { + if (oldn->mute != newn->mute) { + TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_MUTE); + } + if (oldn->volume_level > newn->volume_level) { + TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN); + } else if (oldn->volume_level < newn->volume_level) { + TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP); + } else if (oldn->mute == newn->mute) { + /* repeated key presses that didn't change state */ + if (newn->mute) { + TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_MUTE); + } else if (newn->volume_level != 0) { + TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP); + } else { + TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN); + } + } + } + + /* handle brightness */ + if (oldn->brightness_toggle != newn->brightness_toggle) { + if (oldn->brightness_level < newn->brightness_level) { + TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME); + } else if (oldn->brightness_level > newn->brightness_level) { + TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND); + } else { + /* repeated key presses that didn't change state */ + if (newn->brightness_level != 0) { + TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME); + } else { + TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND); + } + } + } +} + +#undef TPACPI_COMPARE_KEY +#undef TPACPI_MAY_SEND_KEY + +static int hotkey_kthread(void *data) +{ + struct tp_nvram_state s[2]; + u32 mask; + unsigned int si, so; + unsigned long t; + unsigned int change_detector, must_reset; + + mutex_lock(&hotkey_thread_mutex); + + if (tpacpi_lifecycle == TPACPI_LIFE_EXITING) + goto exit; + + set_freezable(); + + so = 0; + si = 1; + t = 0; + + /* Initial state for compares */ + mutex_lock(&hotkey_thread_data_mutex); + change_detector = hotkey_config_change; + mask = hotkey_source_mask & hotkey_mask; + mutex_unlock(&hotkey_thread_data_mutex); + hotkey_read_nvram(&s[so], mask); + + while (!kthread_should_stop() && hotkey_poll_freq) { + if (t == 0) + t = 1000/hotkey_poll_freq; + t = msleep_interruptible(t); + if (unlikely(kthread_should_stop())) + break; + must_reset = try_to_freeze(); + if (t > 0 && !must_reset) + continue; + + mutex_lock(&hotkey_thread_data_mutex); + if (must_reset || hotkey_config_change != change_detector) { + /* forget old state on thaw or config change */ + si = so; + t = 0; + change_detector = hotkey_config_change; + } + mask = hotkey_source_mask & hotkey_mask; + mutex_unlock(&hotkey_thread_data_mutex); + + if (likely(mask)) { + hotkey_read_nvram(&s[si], mask); + if (likely(si != so)) { + hotkey_compare_and_issue_event(&s[so], &s[si], + mask); + } + } + + so = si; + si ^= 1; + } + +exit: + mutex_unlock(&hotkey_thread_mutex); + return 0; +} + +static void hotkey_poll_stop_sync(void) +{ + if (tpacpi_hotkey_task) { + if (frozen(tpacpi_hotkey_task) || + freezing(tpacpi_hotkey_task)) + thaw_process(tpacpi_hotkey_task); + + kthread_stop(tpacpi_hotkey_task); + tpacpi_hotkey_task = NULL; + mutex_lock(&hotkey_thread_mutex); + /* at this point, the thread did exit */ + mutex_unlock(&hotkey_thread_mutex); + } +} + +/* call with hotkey_mutex held */ +static void hotkey_poll_setup(int may_warn) +{ + if ((hotkey_source_mask & hotkey_mask) != 0 && + hotkey_poll_freq > 0 && + (tpacpi_inputdev->users > 0 || hotkey_report_mode < 2)) { + if (!tpacpi_hotkey_task) { + tpacpi_hotkey_task = kthread_run(hotkey_kthread, + NULL, TPACPI_NVRAM_KTHREAD_NAME); + if (IS_ERR(tpacpi_hotkey_task)) { + tpacpi_hotkey_task = NULL; + printk(TPACPI_ERR + "could not create kernel thread " + "for hotkey polling\n"); + } + } + } else { + hotkey_poll_stop_sync(); + if (may_warn && + hotkey_source_mask != 0 && hotkey_poll_freq == 0) { + printk(TPACPI_NOTICE + "hot keys 0x%08x require polling, " + "which is currently disabled\n", + hotkey_source_mask); + } + } +} + +static void hotkey_poll_setup_safe(int may_warn) +{ + mutex_lock(&hotkey_mutex); + hotkey_poll_setup(may_warn); + mutex_unlock(&hotkey_mutex); +} + +#else /* CONFIG_THINKPAD_ACPI_HOTKEY_POLL */ + +static void hotkey_poll_setup_safe(int __unused) +{ +} + +#endif /* CONFIG_THINKPAD_ACPI_HOTKEY_POLL */ + +static int hotkey_inputdev_open(struct input_dev *dev) +{ + switch (tpacpi_lifecycle) { + case TPACPI_LIFE_INIT: + /* + * hotkey_init will call hotkey_poll_setup_safe + * at the appropriate moment + */ + return 0; + case TPACPI_LIFE_EXITING: + return -EBUSY; + case TPACPI_LIFE_RUNNING: + hotkey_poll_setup_safe(0); + return 0; + } + + /* Should only happen if tpacpi_lifecycle is corrupt */ + BUG(); + return -EBUSY; +} + +static void hotkey_inputdev_close(struct input_dev *dev) +{ + /* disable hotkey polling when possible */ + if (tpacpi_lifecycle == TPACPI_LIFE_RUNNING) + hotkey_poll_setup_safe(0); +} + +/* sysfs hotkey enable ------------------------------------------------- */ +static ssize_t hotkey_enable_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + int res, status; + + res = hotkey_status_get(&status); + if (res) + return res; + + return snprintf(buf, PAGE_SIZE, "%d\n", status); +} + +static ssize_t hotkey_enable_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + unsigned long t; + int res; + + if (parse_strtoul(buf, 1, &t)) + return -EINVAL; + + res = hotkey_status_set(t); + + return (res) ? res : count; +} + +static struct device_attribute dev_attr_hotkey_enable = + __ATTR(hotkey_enable, S_IWUSR | S_IRUGO, + hotkey_enable_show, hotkey_enable_store); + +/* sysfs hotkey mask --------------------------------------------------- */ +static ssize_t hotkey_mask_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + int res; + + if (mutex_lock_interruptible(&hotkey_mutex)) + return -ERESTARTSYS; + res = hotkey_mask_get(); + mutex_unlock(&hotkey_mutex); + + return (res)? + res : snprintf(buf, PAGE_SIZE, "0x%08x\n", hotkey_mask); +} + +static ssize_t hotkey_mask_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + unsigned long t; + int res; + + if (parse_strtoul(buf, 0xffffffffUL, &t)) + return -EINVAL; + + if (mutex_lock_interruptible(&hotkey_mutex)) + return -ERESTARTSYS; + + res = hotkey_mask_set(t); + +#ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL + hotkey_poll_setup(1); +#endif + + mutex_unlock(&hotkey_mutex); + + return (res) ? res : count; +} + +static struct device_attribute dev_attr_hotkey_mask = + __ATTR(hotkey_mask, S_IWUSR | S_IRUGO, + hotkey_mask_show, hotkey_mask_store); + +/* sysfs hotkey bios_enabled ------------------------------------------- */ +static ssize_t hotkey_bios_enabled_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + return snprintf(buf, PAGE_SIZE, "%d\n", hotkey_orig_status); +} + +static struct device_attribute dev_attr_hotkey_bios_enabled = + __ATTR(hotkey_bios_enabled, S_IRUGO, hotkey_bios_enabled_show, NULL); + +/* sysfs hotkey bios_mask ---------------------------------------------- */ +static ssize_t hotkey_bios_mask_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + return snprintf(buf, PAGE_SIZE, "0x%08x\n", hotkey_orig_mask); +} + +static struct device_attribute dev_attr_hotkey_bios_mask = + __ATTR(hotkey_bios_mask, S_IRUGO, hotkey_bios_mask_show, NULL); + +/* sysfs hotkey all_mask ----------------------------------------------- */ +static ssize_t hotkey_all_mask_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + return snprintf(buf, PAGE_SIZE, "0x%08x\n", + hotkey_all_mask | hotkey_source_mask); +} + +static struct device_attribute dev_attr_hotkey_all_mask = + __ATTR(hotkey_all_mask, S_IRUGO, hotkey_all_mask_show, NULL); + +/* sysfs hotkey recommended_mask --------------------------------------- */ +static ssize_t hotkey_recommended_mask_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + return snprintf(buf, PAGE_SIZE, "0x%08x\n", + (hotkey_all_mask | hotkey_source_mask) + & ~hotkey_reserved_mask); +} + +static struct device_attribute dev_attr_hotkey_recommended_mask = + __ATTR(hotkey_recommended_mask, S_IRUGO, + hotkey_recommended_mask_show, NULL); + +#ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL + +/* sysfs hotkey hotkey_source_mask ------------------------------------- */ +static ssize_t hotkey_source_mask_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + return snprintf(buf, PAGE_SIZE, "0x%08x\n", hotkey_source_mask); +} + +static ssize_t hotkey_source_mask_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + unsigned long t; + + if (parse_strtoul(buf, 0xffffffffUL, &t) || + ((t & ~TPACPI_HKEY_NVRAM_KNOWN_MASK) != 0)) + return -EINVAL; + + if (mutex_lock_interruptible(&hotkey_mutex)) + return -ERESTARTSYS; + + HOTKEY_CONFIG_CRITICAL_START + hotkey_source_mask = t; + HOTKEY_CONFIG_CRITICAL_END + + hotkey_poll_setup(1); + + mutex_unlock(&hotkey_mutex); + + return count; +} + +static struct device_attribute dev_attr_hotkey_source_mask = + __ATTR(hotkey_source_mask, S_IWUSR | S_IRUGO, + hotkey_source_mask_show, hotkey_source_mask_store); + +/* sysfs hotkey hotkey_poll_freq --------------------------------------- */ +static ssize_t hotkey_poll_freq_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + return snprintf(buf, PAGE_SIZE, "%d\n", hotkey_poll_freq); +} + +static ssize_t hotkey_poll_freq_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + unsigned long t; + + if (parse_strtoul(buf, 25, &t)) + return -EINVAL; + + if (mutex_lock_interruptible(&hotkey_mutex)) + return -ERESTARTSYS; + + hotkey_poll_freq = t; + + hotkey_poll_setup(1); + mutex_unlock(&hotkey_mutex); + + return count; +} + +static struct device_attribute dev_attr_hotkey_poll_freq = + __ATTR(hotkey_poll_freq, S_IWUSR | S_IRUGO, + hotkey_poll_freq_show, hotkey_poll_freq_store); + +#endif /* CONFIG_THINKPAD_ACPI_HOTKEY_POLL */ + +/* sysfs hotkey radio_sw (pollable) ------------------------------------ */ +static ssize_t hotkey_radio_sw_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + int res, s; + res = hotkey_get_wlsw(&s); + if (res < 0) + return res; + + return snprintf(buf, PAGE_SIZE, "%d\n", !!s); +} + +static struct device_attribute dev_attr_hotkey_radio_sw = + __ATTR(hotkey_radio_sw, S_IRUGO, hotkey_radio_sw_show, NULL); + +static void hotkey_radio_sw_notify_change(void) +{ + if (tp_features.hotkey_wlsw) + sysfs_notify(&tpacpi_pdev->dev.kobj, NULL, + "hotkey_radio_sw"); +} + +/* sysfs hotkey tablet mode (pollable) --------------------------------- */ +static ssize_t hotkey_tablet_mode_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + int res, s; + res = hotkey_get_tablet_mode(&s); + if (res < 0) + return res; + + return snprintf(buf, PAGE_SIZE, "%d\n", !!s); +} + +static struct device_attribute dev_attr_hotkey_tablet_mode = + __ATTR(hotkey_tablet_mode, S_IRUGO, hotkey_tablet_mode_show, NULL); + +static void hotkey_tablet_mode_notify_change(void) +{ + if (tp_features.hotkey_tablet) + sysfs_notify(&tpacpi_pdev->dev.kobj, NULL, + "hotkey_tablet_mode"); +} + +/* sysfs hotkey report_mode -------------------------------------------- */ +static ssize_t hotkey_report_mode_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + return snprintf(buf, PAGE_SIZE, "%d\n", + (hotkey_report_mode != 0) ? hotkey_report_mode : 1); +} + +static struct device_attribute dev_attr_hotkey_report_mode = + __ATTR(hotkey_report_mode, S_IRUGO, hotkey_report_mode_show, NULL); + +/* sysfs wakeup reason (pollable) -------------------------------------- */ +static ssize_t hotkey_wakeup_reason_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + return snprintf(buf, PAGE_SIZE, "%d\n", hotkey_wakeup_reason); +} + +static struct device_attribute dev_attr_hotkey_wakeup_reason = + __ATTR(wakeup_reason, S_IRUGO, hotkey_wakeup_reason_show, NULL); + +static void hotkey_wakeup_reason_notify_change(void) +{ + if (tp_features.hotkey_mask) + sysfs_notify(&tpacpi_pdev->dev.kobj, NULL, + "wakeup_reason"); +} + +/* sysfs wakeup hotunplug_complete (pollable) -------------------------- */ +static ssize_t hotkey_wakeup_hotunplug_complete_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + return snprintf(buf, PAGE_SIZE, "%d\n", hotkey_autosleep_ack); +} + +static struct device_attribute dev_attr_hotkey_wakeup_hotunplug_complete = + __ATTR(wakeup_hotunplug_complete, S_IRUGO, + hotkey_wakeup_hotunplug_complete_show, NULL); + +static void hotkey_wakeup_hotunplug_complete_notify_change(void) +{ + if (tp_features.hotkey_mask) + sysfs_notify(&tpacpi_pdev->dev.kobj, NULL, + "wakeup_hotunplug_complete"); +} + +/* --------------------------------------------------------------------- */ + +static struct attribute *hotkey_attributes[] __initdata = { + &dev_attr_hotkey_enable.attr, + &dev_attr_hotkey_bios_enabled.attr, + &dev_attr_hotkey_report_mode.attr, +#ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL + &dev_attr_hotkey_mask.attr, + &dev_attr_hotkey_all_mask.attr, + &dev_attr_hotkey_recommended_mask.attr, + &dev_attr_hotkey_source_mask.attr, + &dev_attr_hotkey_poll_freq.attr, +#endif +}; + +static struct attribute *hotkey_mask_attributes[] __initdata = { + &dev_attr_hotkey_bios_mask.attr, +#ifndef CONFIG_THINKPAD_ACPI_HOTKEY_POLL + &dev_attr_hotkey_mask.attr, + &dev_attr_hotkey_all_mask.attr, + &dev_attr_hotkey_recommended_mask.attr, +#endif + &dev_attr_hotkey_wakeup_reason.attr, + &dev_attr_hotkey_wakeup_hotunplug_complete.attr, +}; + +static void bluetooth_update_rfk(void); +static void wan_update_rfk(void); +static void tpacpi_send_radiosw_update(void) +{ + int wlsw; + + /* Sync these BEFORE sending any rfkill events */ + if (tp_features.bluetooth) + bluetooth_update_rfk(); + if (tp_features.wan) + wan_update_rfk(); + + if (tp_features.hotkey_wlsw && !hotkey_get_wlsw(&wlsw)) { + mutex_lock(&tpacpi_inputdev_send_mutex); + + input_report_switch(tpacpi_inputdev, + SW_RFKILL_ALL, !!wlsw); + input_sync(tpacpi_inputdev); + + mutex_unlock(&tpacpi_inputdev_send_mutex); + } + hotkey_radio_sw_notify_change(); +} + +static void hotkey_exit(void) +{ +#ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL + hotkey_poll_stop_sync(); +#endif + + if (hotkey_dev_attributes) + delete_attr_set(hotkey_dev_attributes, &tpacpi_pdev->dev.kobj); + + kfree(hotkey_keycode_map); + + if (tp_features.hotkey) { + dbg_printk(TPACPI_DBG_EXIT, + "restoring original hot key mask\n"); + /* no short-circuit boolean operator below! */ + if ((hotkey_mask_set(hotkey_orig_mask) | + hotkey_status_set(hotkey_orig_status)) != 0) + printk(TPACPI_ERR + "failed to restore hot key mask " + "to BIOS defaults\n"); + } +} + +static int __init hotkey_init(struct ibm_init_struct *iibm) +{ + /* Requirements for changing the default keymaps: + * + * 1. Many of the keys are mapped to KEY_RESERVED for very + * good reasons. Do not change them unless you have deep + * knowledge on the IBM and Lenovo ThinkPad firmware for + * the various ThinkPad models. The driver behaves + * differently for KEY_RESERVED: such keys have their + * hot key mask *unset* in mask_recommended, and also + * in the initial hot key mask programmed into the + * firmware at driver load time, which means the firm- + * ware may react very differently if you change them to + * something else; + * + * 2. You must be subscribed to the linux-thinkpad and + * ibm-acpi-devel mailing lists, and you should read the + * list archives since 2007 if you want to change the + * keymaps. This requirement exists so that you will + * know the past history of problems with the thinkpad- + * acpi driver keymaps, and also that you will be + * listening to any bug reports; + * + * 3. Do not send thinkpad-acpi specific patches directly to + * for merging, *ever*. Send them to the linux-acpi + * mailinglist for comments. Merging is to be done only + * through acpi-test and the ACPI maintainer. + * + * If the above is too much to ask, don't change the keymap. + * Ask the thinkpad-acpi maintainer to do it, instead. + */ + static u16 ibm_keycode_map[] __initdata = { + /* Scan Codes 0x00 to 0x0B: ACPI HKEY FN+F1..F12 */ + KEY_FN_F1, KEY_FN_F2, KEY_COFFEE, KEY_SLEEP, + KEY_WLAN, KEY_FN_F6, KEY_SWITCHVIDEOMODE, KEY_FN_F8, + KEY_FN_F9, KEY_FN_F10, KEY_FN_F11, KEY_SUSPEND, + + /* Scan codes 0x0C to 0x1F: Other ACPI HKEY hot keys */ + KEY_UNKNOWN, /* 0x0C: FN+BACKSPACE */ + KEY_UNKNOWN, /* 0x0D: FN+INSERT */ + KEY_UNKNOWN, /* 0x0E: FN+DELETE */ + + /* brightness: firmware always reacts to them, unless + * X.org did some tricks in the radeon BIOS scratch + * registers of *some* models */ + KEY_RESERVED, /* 0x0F: FN+HOME (brightness up) */ + KEY_RESERVED, /* 0x10: FN+END (brightness down) */ + + /* Thinklight: firmware always react to it */ + KEY_RESERVED, /* 0x11: FN+PGUP (thinklight toggle) */ + + KEY_UNKNOWN, /* 0x12: FN+PGDOWN */ + KEY_ZOOM, /* 0x13: FN+SPACE (zoom) */ + + /* Volume: firmware always react to it and reprograms + * the built-in *extra* mixer. Never map it to control + * another mixer by default. */ + KEY_RESERVED, /* 0x14: VOLUME UP */ + KEY_RESERVED, /* 0x15: VOLUME DOWN */ + KEY_RESERVED, /* 0x16: MUTE */ + + KEY_VENDOR, /* 0x17: Thinkpad/AccessIBM/Lenovo */ + + /* (assignments unknown, please report if found) */ + KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, + KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, + }; + static u16 lenovo_keycode_map[] __initdata = { + /* Scan Codes 0x00 to 0x0B: ACPI HKEY FN+F1..F12 */ + KEY_FN_F1, KEY_COFFEE, KEY_BATTERY, KEY_SLEEP, + KEY_WLAN, KEY_FN_F6, KEY_SWITCHVIDEOMODE, KEY_FN_F8, + KEY_FN_F9, KEY_FN_F10, KEY_FN_F11, KEY_SUSPEND, + + /* Scan codes 0x0C to 0x1F: Other ACPI HKEY hot keys */ + KEY_UNKNOWN, /* 0x0C: FN+BACKSPACE */ + KEY_UNKNOWN, /* 0x0D: FN+INSERT */ + KEY_UNKNOWN, /* 0x0E: FN+DELETE */ + + /* These either have to go through ACPI video, or + * act like in the IBM ThinkPads, so don't ever + * enable them by default */ + KEY_RESERVED, /* 0x0F: FN+HOME (brightness up) */ + KEY_RESERVED, /* 0x10: FN+END (brightness down) */ + + KEY_RESERVED, /* 0x11: FN+PGUP (thinklight toggle) */ + + KEY_UNKNOWN, /* 0x12: FN+PGDOWN */ + KEY_ZOOM, /* 0x13: FN+SPACE (zoom) */ + + /* Volume: z60/z61, T60 (BIOS version?): firmware always + * react to it and reprograms the built-in *extra* mixer. + * Never map it to control another mixer by default. + * + * T60?, T61, R60?, R61: firmware and EC tries to send + * these over the regular keyboard, so these are no-ops, + * but there are still weird bugs re. MUTE, so do not + * change unless you get test reports from all Lenovo + * models. May cause the BIOS to interfere with the + * HDA mixer. + */ + KEY_RESERVED, /* 0x14: VOLUME UP */ + KEY_RESERVED, /* 0x15: VOLUME DOWN */ + KEY_RESERVED, /* 0x16: MUTE */ + + KEY_VENDOR, /* 0x17: Thinkpad/AccessIBM/Lenovo */ + + /* (assignments unknown, please report if found) */ + KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, + KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, + }; + +#define TPACPI_HOTKEY_MAP_LEN ARRAY_SIZE(ibm_keycode_map) +#define TPACPI_HOTKEY_MAP_SIZE sizeof(ibm_keycode_map) +#define TPACPI_HOTKEY_MAP_TYPESIZE sizeof(ibm_keycode_map[0]) + + int res, i; + int status; + int hkeyv; + + vdbg_printk(TPACPI_DBG_INIT, "initializing hotkey subdriver\n"); + + BUG_ON(!tpacpi_inputdev); + BUG_ON(tpacpi_inputdev->open != NULL || + tpacpi_inputdev->close != NULL); + + TPACPI_ACPIHANDLE_INIT(hkey); + mutex_init(&hotkey_mutex); + +#ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL + mutex_init(&hotkey_thread_mutex); + mutex_init(&hotkey_thread_data_mutex); +#endif + + /* hotkey not supported on 570 */ + tp_features.hotkey = hkey_handle != NULL; + + vdbg_printk(TPACPI_DBG_INIT, "hotkeys are %s\n", + str_supported(tp_features.hotkey)); + + if (!tp_features.hotkey) + return 1; + + tpacpi_disable_brightness_delay(); + + hotkey_dev_attributes = create_attr_set(13, NULL); + if (!hotkey_dev_attributes) + return -ENOMEM; + res = add_many_to_attr_set(hotkey_dev_attributes, + hotkey_attributes, + ARRAY_SIZE(hotkey_attributes)); + if (res) + goto err_exit; + + /* mask not supported on 570, 600e/x, 770e, 770x, A21e, A2xm/p, + A30, R30, R31, T20-22, X20-21, X22-24. Detected by checking + for HKEY interface version 0x100 */ + if (acpi_evalf(hkey_handle, &hkeyv, "MHKV", "qd")) { + if ((hkeyv >> 8) != 1) { + printk(TPACPI_ERR "unknown version of the " + "HKEY interface: 0x%x\n", hkeyv); + printk(TPACPI_ERR "please report this to %s\n", + TPACPI_MAIL); + } else { + /* + * MHKV 0x100 in A31, R40, R40e, + * T4x, X31, and later + */ + tp_features.hotkey_mask = 1; + } + } + + vdbg_printk(TPACPI_DBG_INIT, "hotkey masks are %s\n", + str_supported(tp_features.hotkey_mask)); + + if (tp_features.hotkey_mask) { + if (!acpi_evalf(hkey_handle, &hotkey_all_mask, + "MHKA", "qd")) { + printk(TPACPI_ERR + "missing MHKA handler, " + "please report this to %s\n", + TPACPI_MAIL); + /* FN+F12, FN+F4, FN+F3 */ + hotkey_all_mask = 0x080cU; + } + } + + /* hotkey_source_mask *must* be zero for + * the first hotkey_mask_get */ + res = hotkey_status_get(&hotkey_orig_status); + if (res) + goto err_exit; + + if (tp_features.hotkey_mask) { + res = hotkey_mask_get(); + if (res) + goto err_exit; + + hotkey_orig_mask = hotkey_mask; + res = add_many_to_attr_set( + hotkey_dev_attributes, + hotkey_mask_attributes, + ARRAY_SIZE(hotkey_mask_attributes)); + if (res) + goto err_exit; + } + +#ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL + if (tp_features.hotkey_mask) { + hotkey_source_mask = TPACPI_HKEY_NVRAM_GOOD_MASK + & ~hotkey_all_mask; + } else { + hotkey_source_mask = TPACPI_HKEY_NVRAM_GOOD_MASK; + } + + vdbg_printk(TPACPI_DBG_INIT, + "hotkey source mask 0x%08x, polling freq %d\n", + hotkey_source_mask, hotkey_poll_freq); +#endif + + /* Not all thinkpads have a hardware radio switch */ + if (acpi_evalf(hkey_handle, &status, "WLSW", "qd")) { + tp_features.hotkey_wlsw = 1; + printk(TPACPI_INFO + "radio switch found; radios are %s\n", + enabled(status, 0)); + } + if (tp_features.hotkey_wlsw) + res = add_to_attr_set(hotkey_dev_attributes, + &dev_attr_hotkey_radio_sw.attr); + + /* For X41t, X60t, X61t Tablets... */ + if (!res && acpi_evalf(hkey_handle, &status, "MHKG", "qd")) { + tp_features.hotkey_tablet = 1; + printk(TPACPI_INFO + "possible tablet mode switch found; " + "ThinkPad in %s mode\n", + (status & TP_HOTKEY_TABLET_MASK)? + "tablet" : "laptop"); + res = add_to_attr_set(hotkey_dev_attributes, + &dev_attr_hotkey_tablet_mode.attr); + } + + if (!res) + res = register_attr_set_with_sysfs( + hotkey_dev_attributes, + &tpacpi_pdev->dev.kobj); + if (res) + goto err_exit; + + /* Set up key map */ + + hotkey_keycode_map = kmalloc(TPACPI_HOTKEY_MAP_SIZE, + GFP_KERNEL); + if (!hotkey_keycode_map) { + printk(TPACPI_ERR + "failed to allocate memory for key map\n"); + res = -ENOMEM; + goto err_exit; + } + + if (thinkpad_id.vendor == PCI_VENDOR_ID_LENOVO) { + dbg_printk(TPACPI_DBG_INIT, + "using Lenovo default hot key map\n"); + memcpy(hotkey_keycode_map, &lenovo_keycode_map, + TPACPI_HOTKEY_MAP_SIZE); + } else { + dbg_printk(TPACPI_DBG_INIT, + "using IBM default hot key map\n"); + memcpy(hotkey_keycode_map, &ibm_keycode_map, + TPACPI_HOTKEY_MAP_SIZE); + } + + set_bit(EV_KEY, tpacpi_inputdev->evbit); + set_bit(EV_MSC, tpacpi_inputdev->evbit); + set_bit(MSC_SCAN, tpacpi_inputdev->mscbit); + tpacpi_inputdev->keycodesize = TPACPI_HOTKEY_MAP_TYPESIZE; + tpacpi_inputdev->keycodemax = TPACPI_HOTKEY_MAP_LEN; + tpacpi_inputdev->keycode = hotkey_keycode_map; + for (i = 0; i < TPACPI_HOTKEY_MAP_LEN; i++) { + if (hotkey_keycode_map[i] != KEY_RESERVED) { + set_bit(hotkey_keycode_map[i], + tpacpi_inputdev->keybit); + } else { + if (i < sizeof(hotkey_reserved_mask)*8) + hotkey_reserved_mask |= 1 << i; + } + } + + if (tp_features.hotkey_wlsw) { + set_bit(EV_SW, tpacpi_inputdev->evbit); + set_bit(SW_RFKILL_ALL, tpacpi_inputdev->swbit); + } + if (tp_features.hotkey_tablet) { + set_bit(EV_SW, tpacpi_inputdev->evbit); + set_bit(SW_TABLET_MODE, tpacpi_inputdev->swbit); + } + + /* Do not issue duplicate brightness change events to + * userspace */ + if (!tp_features.bright_acpimode) + /* update bright_acpimode... */ + tpacpi_check_std_acpi_brightness_support(); + + if (tp_features.bright_acpimode) { + printk(TPACPI_INFO + "This ThinkPad has standard ACPI backlight " + "brightness control, supported by the ACPI " + "video driver\n"); + printk(TPACPI_NOTICE + "Disabling thinkpad-acpi brightness events " + "by default...\n"); + + /* The hotkey_reserved_mask change below is not + * necessary while the keys are at KEY_RESERVED in the + * default map, but better safe than sorry, leave it + * here as a marker of what we have to do, especially + * when we finally become able to set this at runtime + * on response to X.org requests */ + hotkey_reserved_mask |= + (1 << TP_ACPI_HOTKEYSCAN_FNHOME) + | (1 << TP_ACPI_HOTKEYSCAN_FNEND); + } + + dbg_printk(TPACPI_DBG_INIT, "enabling hot key handling\n"); + res = hotkey_status_set(1); + if (res) { + hotkey_exit(); + return res; + } + res = hotkey_mask_set(((hotkey_all_mask | hotkey_source_mask) + & ~hotkey_reserved_mask) + | hotkey_orig_mask); + if (res < 0 && res != -ENXIO) { + hotkey_exit(); + return res; + } + + dbg_printk(TPACPI_DBG_INIT, + "legacy hot key reporting over procfs %s\n", + (hotkey_report_mode < 2) ? + "enabled" : "disabled"); + + tpacpi_inputdev->open = &hotkey_inputdev_open; + tpacpi_inputdev->close = &hotkey_inputdev_close; + + hotkey_poll_setup_safe(1); + tpacpi_send_radiosw_update(); + tpacpi_input_send_tabletsw(); + + return 0; + +err_exit: + delete_attr_set(hotkey_dev_attributes, &tpacpi_pdev->dev.kobj); + hotkey_dev_attributes = NULL; + + return (res < 0)? res : 1; +} + +static void hotkey_notify(struct ibm_struct *ibm, u32 event) +{ + u32 hkey; + unsigned int scancode; + int send_acpi_ev; + int ignore_acpi_ev; + int unk_ev; + + if (event != 0x80) { + printk(TPACPI_ERR + "unknown HKEY notification event %d\n", event); + /* forward it to userspace, maybe it knows how to handle it */ + acpi_bus_generate_netlink_event( + ibm->acpi->device->pnp.device_class, + ibm->acpi->device->dev.bus_id, + event, 0); + return; + } + + while (1) { + if (!acpi_evalf(hkey_handle, &hkey, "MHKP", "d")) { + printk(TPACPI_ERR "failed to retrieve HKEY event\n"); + return; + } + + if (hkey == 0) { + /* queue empty */ + return; + } + + send_acpi_ev = 1; + ignore_acpi_ev = 0; + unk_ev = 0; + + switch (hkey >> 12) { + case 1: + /* 0x1000-0x1FFF: key presses */ + scancode = hkey & 0xfff; + if (scancode > 0 && scancode < 0x21) { + scancode--; + if (!(hotkey_source_mask & (1 << scancode))) { + tpacpi_input_send_key(scancode); + send_acpi_ev = 0; + } else { + ignore_acpi_ev = 1; + } + } else { + unk_ev = 1; + } + break; + case 2: + /* Wakeup reason */ + switch (hkey) { + case 0x2304: /* suspend, undock */ + case 0x2404: /* hibernation, undock */ + hotkey_wakeup_reason = TP_ACPI_WAKEUP_UNDOCK; + ignore_acpi_ev = 1; + break; + case 0x2305: /* suspend, bay eject */ + case 0x2405: /* hibernation, bay eject */ + hotkey_wakeup_reason = TP_ACPI_WAKEUP_BAYEJ; + ignore_acpi_ev = 1; + break; + default: + unk_ev = 1; + } + if (hotkey_wakeup_reason != TP_ACPI_WAKEUP_NONE) { + printk(TPACPI_INFO + "woke up due to a hot-unplug " + "request...\n"); + hotkey_wakeup_reason_notify_change(); + } + break; + case 3: + /* bay-related wakeups */ + if (hkey == 0x3003) { + hotkey_autosleep_ack = 1; + printk(TPACPI_INFO + "bay ejected\n"); + hotkey_wakeup_hotunplug_complete_notify_change(); + } else { + unk_ev = 1; + } + break; + case 4: + /* dock-related wakeups */ + if (hkey == 0x4003) { + hotkey_autosleep_ack = 1; + printk(TPACPI_INFO + "undocked\n"); + hotkey_wakeup_hotunplug_complete_notify_change(); + } else { + unk_ev = 1; + } + break; + case 5: + /* 0x5000-0x5FFF: human interface helpers */ + switch (hkey) { + case 0x5010: /* Lenovo new BIOS: brightness changed */ + case 0x500b: /* X61t: tablet pen inserted into bay */ + case 0x500c: /* X61t: tablet pen removed from bay */ + break; + case 0x5009: /* X41t-X61t: swivel up (tablet mode) */ + case 0x500a: /* X41t-X61t: swivel down (normal mode) */ + tpacpi_input_send_tabletsw(); + hotkey_tablet_mode_notify_change(); + send_acpi_ev = 0; + break; + case 0x5001: + case 0x5002: + /* LID switch events. Do not propagate */ + ignore_acpi_ev = 1; + break; + default: + unk_ev = 1; + } + break; + case 7: + /* 0x7000-0x7FFF: misc */ + if (tp_features.hotkey_wlsw && hkey == 0x7000) { + tpacpi_send_radiosw_update(); + send_acpi_ev = 0; + break; + } + /* fallthrough to default */ + default: + unk_ev = 1; + } + if (unk_ev) { + printk(TPACPI_NOTICE + "unhandled HKEY event 0x%04x\n", hkey); + } + + /* Legacy events */ + if (!ignore_acpi_ev && + (send_acpi_ev || hotkey_report_mode < 2)) { + acpi_bus_generate_proc_event(ibm->acpi->device, + event, hkey); + } + + /* netlink events */ + if (!ignore_acpi_ev && send_acpi_ev) { + acpi_bus_generate_netlink_event( + ibm->acpi->device->pnp.device_class, + ibm->acpi->device->dev.bus_id, + event, hkey); + } + } +} + +static void hotkey_suspend(pm_message_t state) +{ + /* Do these on suspend, we get the events on early resume! */ + hotkey_wakeup_reason = TP_ACPI_WAKEUP_NONE; + hotkey_autosleep_ack = 0; +} + +static void hotkey_resume(void) +{ + tpacpi_disable_brightness_delay(); + + if (hotkey_mask_get()) + printk(TPACPI_ERR + "error while trying to read hot key mask " + "from firmware\n"); + tpacpi_send_radiosw_update(); + hotkey_tablet_mode_notify_change(); + hotkey_wakeup_reason_notify_change(); + hotkey_wakeup_hotunplug_complete_notify_change(); + hotkey_poll_setup_safe(0); +} + +/* procfs -------------------------------------------------------------- */ +static int hotkey_read(char *p) +{ + int res, status; + int len = 0; + + if (!tp_features.hotkey) { + len += sprintf(p + len, "status:\t\tnot supported\n"); + return len; + } + + if (mutex_lock_interruptible(&hotkey_mutex)) + return -ERESTARTSYS; + res = hotkey_status_get(&status); + if (!res) + res = hotkey_mask_get(); + mutex_unlock(&hotkey_mutex); + if (res) + return res; + + len += sprintf(p + len, "status:\t\t%s\n", enabled(status, 0)); + if (tp_features.hotkey_mask) { + len += sprintf(p + len, "mask:\t\t0x%08x\n", hotkey_mask); + len += sprintf(p + len, + "commands:\tenable, disable, reset, <mask>\n"); + } else { + len += sprintf(p + len, "mask:\t\tnot supported\n"); + len += sprintf(p + len, "commands:\tenable, disable, reset\n"); + } + + return len; +} + +static int hotkey_write(char *buf) +{ + int res, status; + u32 mask; + char *cmd; + + if (!tp_features.hotkey) + return -ENODEV; + + if (mutex_lock_interruptible(&hotkey_mutex)) + return -ERESTARTSYS; + + status = -1; + mask = hotkey_mask; + + res = 0; + while ((cmd = next_cmd(&buf))) { + if (strlencmp(cmd, "enable") == 0) { + status = 1; + } else if (strlencmp(cmd, "disable") == 0) { + status = 0; + } else if (strlencmp(cmd, "reset") == 0) { + status = hotkey_orig_status; + mask = hotkey_orig_mask; + } else if (sscanf(cmd, "0x%x", &mask) == 1) { + /* mask set */ + } else if (sscanf(cmd, "%x", &mask) == 1) { + /* mask set */ + } else { + res = -EINVAL; + goto errexit; + } + } + if (status != -1) + res = hotkey_status_set(status); + + if (!res && mask != hotkey_mask) + res = hotkey_mask_set(mask); + +errexit: + mutex_unlock(&hotkey_mutex); + return res; +} + +static const struct acpi_device_id ibm_htk_device_ids[] = { + {TPACPI_ACPI_HKEY_HID, 0}, + {"", 0}, +}; + +static struct tp_acpi_drv_struct ibm_hotkey_acpidriver = { + .hid = ibm_htk_device_ids, + .notify = hotkey_notify, + .handle = &hkey_handle, + .type = ACPI_DEVICE_NOTIFY, +}; + +static struct ibm_struct hotkey_driver_data = { + .name = "hotkey", + .read = hotkey_read, + .write = hotkey_write, + .exit = hotkey_exit, + .resume = hotkey_resume, + .suspend = hotkey_suspend, + .acpi = &ibm_hotkey_acpidriver, +}; + +/************************************************************************* + * Bluetooth subdriver + */ + +enum { + /* ACPI GBDC/SBDC bits */ + TP_ACPI_BLUETOOTH_HWPRESENT = 0x01, /* Bluetooth hw available */ + TP_ACPI_BLUETOOTH_RADIOSSW = 0x02, /* Bluetooth radio enabled */ + TP_ACPI_BLUETOOTH_UNK = 0x04, /* unknown function */ +}; + +static struct rfkill *tpacpi_bluetooth_rfkill; + +static int bluetooth_get_radiosw(void) +{ + int status; + + if (!tp_features.bluetooth) + return -ENODEV; + + /* WLSW overrides bluetooth in firmware/hardware, reflect that */ + if (tp_features.hotkey_wlsw && !hotkey_get_wlsw(&status) && !status) + return RFKILL_STATE_HARD_BLOCKED; + + if (!acpi_evalf(hkey_handle, &status, "GBDC", "d")) + return -EIO; + + return ((status & TP_ACPI_BLUETOOTH_RADIOSSW) != 0) ? + RFKILL_STATE_UNBLOCKED : RFKILL_STATE_SOFT_BLOCKED; +} + +static void bluetooth_update_rfk(void) +{ + int status; + + if (!tpacpi_bluetooth_rfkill) + return; + + status = bluetooth_get_radiosw(); + if (status < 0) + return; + rfkill_force_state(tpacpi_bluetooth_rfkill, status); +} + +static int bluetooth_set_radiosw(int radio_on, int update_rfk) +{ + int status; + + if (!tp_features.bluetooth) + return -ENODEV; + + /* WLSW overrides bluetooth in firmware/hardware, but there is no + * reason to risk weird behaviour. */ + if (tp_features.hotkey_wlsw && !hotkey_get_wlsw(&status) && !status + && radio_on) + return -EPERM; + + if (!acpi_evalf(hkey_handle, &status, "GBDC", "d")) + return -EIO; + if (radio_on) + status |= TP_ACPI_BLUETOOTH_RADIOSSW; + else + status &= ~TP_ACPI_BLUETOOTH_RADIOSSW; + if (!acpi_evalf(hkey_handle, NULL, "SBDC", "vd", status)) + return -EIO; + + if (update_rfk) + bluetooth_update_rfk(); + + return 0; +} + +/* sysfs bluetooth enable ---------------------------------------------- */ +static ssize_t bluetooth_enable_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + int status; + + status = bluetooth_get_radiosw(); + if (status < 0) + return status; + + return snprintf(buf, PAGE_SIZE, "%d\n", + (status == RFKILL_STATE_UNBLOCKED) ? 1 : 0); +} + +static ssize_t bluetooth_enable_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + unsigned long t; + int res; + + if (parse_strtoul(buf, 1, &t)) + return -EINVAL; + + res = bluetooth_set_radiosw(t, 1); + + return (res) ? res : count; +} + +static struct device_attribute dev_attr_bluetooth_enable = + __ATTR(bluetooth_enable, S_IWUSR | S_IRUGO, + bluetooth_enable_show, bluetooth_enable_store); + +/* --------------------------------------------------------------------- */ + +static struct attribute *bluetooth_attributes[] = { + &dev_attr_bluetooth_enable.attr, + NULL +}; + +static const struct attribute_group bluetooth_attr_group = { + .attrs = bluetooth_attributes, +}; + +static int tpacpi_bluetooth_rfk_get(void *data, enum rfkill_state *state) +{ + int bts = bluetooth_get_radiosw(); + + if (bts < 0) + return bts; + + *state = bts; + return 0; +} + +static int tpacpi_bluetooth_rfk_set(void *data, enum rfkill_state state) +{ + return bluetooth_set_radiosw((state == RFKILL_STATE_UNBLOCKED), 0); +} + +static void bluetooth_exit(void) +{ + if (tpacpi_bluetooth_rfkill) + rfkill_unregister(tpacpi_bluetooth_rfkill); + + sysfs_remove_group(&tpacpi_pdev->dev.kobj, + &bluetooth_attr_group); +} + +static int __init bluetooth_init(struct ibm_init_struct *iibm) +{ + int res; + int status = 0; + + vdbg_printk(TPACPI_DBG_INIT, "initializing bluetooth subdriver\n"); + + TPACPI_ACPIHANDLE_INIT(hkey); + + /* bluetooth not supported on 570, 600e/x, 770e, 770x, A21e, A2xm/p, + G4x, R30, R31, R40e, R50e, T20-22, X20-21 */ + tp_features.bluetooth = hkey_handle && + acpi_evalf(hkey_handle, &status, "GBDC", "qd"); + + vdbg_printk(TPACPI_DBG_INIT, "bluetooth is %s, status 0x%02x\n", + str_supported(tp_features.bluetooth), + status); + + if (tp_features.bluetooth && + !(status & TP_ACPI_BLUETOOTH_HWPRESENT)) { + /* no bluetooth hardware present in system */ + tp_features.bluetooth = 0; + dbg_printk(TPACPI_DBG_INIT, + "bluetooth hardware not installed\n"); + } + + if (!tp_features.bluetooth) + return 1; + + res = sysfs_create_group(&tpacpi_pdev->dev.kobj, + &bluetooth_attr_group); + if (res) + return res; + + res = tpacpi_new_rfkill(TPACPI_RFK_BLUETOOTH_SW_ID, + &tpacpi_bluetooth_rfkill, + RFKILL_TYPE_BLUETOOTH, + "tpacpi_bluetooth_sw", + tpacpi_bluetooth_rfk_set, + tpacpi_bluetooth_rfk_get); + if (res) { + bluetooth_exit(); + return res; + } + + return 0; +} + +/* procfs -------------------------------------------------------------- */ +static int bluetooth_read(char *p) +{ + int len = 0; + int status = bluetooth_get_radiosw(); + + if (!tp_features.bluetooth) + len += sprintf(p + len, "status:\t\tnot supported\n"); + else { + len += sprintf(p + len, "status:\t\t%s\n", + (status == RFKILL_STATE_UNBLOCKED) ? + "enabled" : "disabled"); + len += sprintf(p + len, "commands:\tenable, disable\n"); + } + + return len; +} + +static int bluetooth_write(char *buf) +{ + char *cmd; + + if (!tp_features.bluetooth) + return -ENODEV; + + while ((cmd = next_cmd(&buf))) { + if (strlencmp(cmd, "enable") == 0) { + bluetooth_set_radiosw(1, 1); + } else if (strlencmp(cmd, "disable") == 0) { + bluetooth_set_radiosw(0, 1); + } else + return -EINVAL; + } + + return 0; +} + +static struct ibm_struct bluetooth_driver_data = { + .name = "bluetooth", + .read = bluetooth_read, + .write = bluetooth_write, + .exit = bluetooth_exit, +}; + +/************************************************************************* + * Wan subdriver + */ + +enum { + /* ACPI GWAN/SWAN bits */ + TP_ACPI_WANCARD_HWPRESENT = 0x01, /* Wan hw available */ + TP_ACPI_WANCARD_RADIOSSW = 0x02, /* Wan radio enabled */ + TP_ACPI_WANCARD_UNK = 0x04, /* unknown function */ +}; + +static struct rfkill *tpacpi_wan_rfkill; + +static int wan_get_radiosw(void) +{ + int status; + + if (!tp_features.wan) + return -ENODEV; + + /* WLSW overrides WWAN in firmware/hardware, reflect that */ + if (tp_features.hotkey_wlsw && !hotkey_get_wlsw(&status) && !status) + return RFKILL_STATE_HARD_BLOCKED; + + if (!acpi_evalf(hkey_handle, &status, "GWAN", "d")) + return -EIO; + + return ((status & TP_ACPI_WANCARD_RADIOSSW) != 0) ? + RFKILL_STATE_UNBLOCKED : RFKILL_STATE_SOFT_BLOCKED; +} + +static void wan_update_rfk(void) +{ + int status; + + if (!tpacpi_wan_rfkill) + return; + + status = wan_get_radiosw(); + if (status < 0) + return; + rfkill_force_state(tpacpi_wan_rfkill, status); +} + +static int wan_set_radiosw(int radio_on, int update_rfk) +{ + int status; + + if (!tp_features.wan) + return -ENODEV; + + /* WLSW overrides bluetooth in firmware/hardware, but there is no + * reason to risk weird behaviour. */ + if (tp_features.hotkey_wlsw && !hotkey_get_wlsw(&status) && !status + && radio_on) + return -EPERM; + + if (!acpi_evalf(hkey_handle, &status, "GWAN", "d")) + return -EIO; + if (radio_on) + status |= TP_ACPI_WANCARD_RADIOSSW; + else + status &= ~TP_ACPI_WANCARD_RADIOSSW; + if (!acpi_evalf(hkey_handle, NULL, "SWAN", "vd", status)) + return -EIO; + + if (update_rfk) + wan_update_rfk(); + + return 0; +} + +/* sysfs wan enable ---------------------------------------------------- */ +static ssize_t wan_enable_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + int status; + + status = wan_get_radiosw(); + if (status < 0) + return status; + + return snprintf(buf, PAGE_SIZE, "%d\n", + (status == RFKILL_STATE_UNBLOCKED) ? 1 : 0); +} + +static ssize_t wan_enable_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + unsigned long t; + int res; + + if (parse_strtoul(buf, 1, &t)) + return -EINVAL; + + res = wan_set_radiosw(t, 1); + + return (res) ? res : count; +} + +static struct device_attribute dev_attr_wan_enable = + __ATTR(wwan_enable, S_IWUSR | S_IRUGO, + wan_enable_show, wan_enable_store); + +/* --------------------------------------------------------------------- */ + +static struct attribute *wan_attributes[] = { + &dev_attr_wan_enable.attr, + NULL +}; + +static const struct attribute_group wan_attr_group = { + .attrs = wan_attributes, +}; + +static int tpacpi_wan_rfk_get(void *data, enum rfkill_state *state) +{ + int wans = wan_get_radiosw(); + + if (wans < 0) + return wans; + + *state = wans; + return 0; +} + +static int tpacpi_wan_rfk_set(void *data, enum rfkill_state state) +{ + return wan_set_radiosw((state == RFKILL_STATE_UNBLOCKED), 0); +} + +static void wan_exit(void) +{ + if (tpacpi_wan_rfkill) + rfkill_unregister(tpacpi_wan_rfkill); + + sysfs_remove_group(&tpacpi_pdev->dev.kobj, + &wan_attr_group); +} + +static int __init wan_init(struct ibm_init_struct *iibm) +{ + int res; + int status = 0; + + vdbg_printk(TPACPI_DBG_INIT, "initializing wan subdriver\n"); + + TPACPI_ACPIHANDLE_INIT(hkey); + + tp_features.wan = hkey_handle && + acpi_evalf(hkey_handle, &status, "GWAN", "qd"); + + vdbg_printk(TPACPI_DBG_INIT, "wan is %s, status 0x%02x\n", + str_supported(tp_features.wan), + status); + + if (tp_features.wan && + !(status & TP_ACPI_WANCARD_HWPRESENT)) { + /* no wan hardware present in system */ + tp_features.wan = 0; + dbg_printk(TPACPI_DBG_INIT, + "wan hardware not installed\n"); + } + + if (!tp_features.wan) + return 1; + + res = sysfs_create_group(&tpacpi_pdev->dev.kobj, + &wan_attr_group); + if (res) + return res; + + res = tpacpi_new_rfkill(TPACPI_RFK_WWAN_SW_ID, + &tpacpi_wan_rfkill, + RFKILL_TYPE_WWAN, + "tpacpi_wwan_sw", + tpacpi_wan_rfk_set, + tpacpi_wan_rfk_get); + if (res) { + wan_exit(); + return res; + } + + return 0; +} + +/* procfs -------------------------------------------------------------- */ +static int wan_read(char *p) +{ + int len = 0; + int status = wan_get_radiosw(); + + if (!tp_features.wan) + len += sprintf(p + len, "status:\t\tnot supported\n"); + else { + len += sprintf(p + len, "status:\t\t%s\n", + (status == RFKILL_STATE_UNBLOCKED) ? + "enabled" : "disabled"); + len += sprintf(p + len, "commands:\tenable, disable\n"); + } + + return len; +} + +static int wan_write(char *buf) +{ + char *cmd; + + if (!tp_features.wan) + return -ENODEV; + + while ((cmd = next_cmd(&buf))) { + if (strlencmp(cmd, "enable") == 0) { + wan_set_radiosw(1, 1); + } else if (strlencmp(cmd, "disable") == 0) { + wan_set_radiosw(0, 1); + } else + return -EINVAL; + } + + return 0; +} + +static struct ibm_struct wan_driver_data = { + .name = "wan", + .read = wan_read, + .write = wan_write, + .exit = wan_exit, +}; + +/************************************************************************* + * Video subdriver + */ + +#ifdef CONFIG_THINKPAD_ACPI_VIDEO + +enum video_access_mode { + TPACPI_VIDEO_NONE = 0, + TPACPI_VIDEO_570, /* 570 */ + TPACPI_VIDEO_770, /* 600e/x, 770e, 770x */ + TPACPI_VIDEO_NEW, /* all others */ +}; + +enum { /* video status flags, based on VIDEO_570 */ + TP_ACPI_VIDEO_S_LCD = 0x01, /* LCD output enabled */ + TP_ACPI_VIDEO_S_CRT = 0x02, /* CRT output enabled */ + TP_ACPI_VIDEO_S_DVI = 0x08, /* DVI output enabled */ +}; + +enum { /* TPACPI_VIDEO_570 constants */ + TP_ACPI_VIDEO_570_PHSCMD = 0x87, /* unknown magic constant :( */ + TP_ACPI_VIDEO_570_PHSMASK = 0x03, /* PHS bits that map to + * video_status_flags */ + TP_ACPI_VIDEO_570_PHS2CMD = 0x8b, /* unknown magic constant :( */ + TP_ACPI_VIDEO_570_PHS2SET = 0x80, /* unknown magic constant :( */ +}; + +static enum video_access_mode video_supported; +static int video_orig_autosw; + +static int video_autosw_get(void); +static int video_autosw_set(int enable); + +TPACPI_HANDLE(vid2, root, "\\_SB.PCI0.AGPB.VID"); /* G41 */ + +static int __init video_init(struct ibm_init_struct *iibm) +{ + int ivga; + + vdbg_printk(TPACPI_DBG_INIT, "initializing video subdriver\n"); + + TPACPI_ACPIHANDLE_INIT(vid); + TPACPI_ACPIHANDLE_INIT(vid2); + + if (vid2_handle && acpi_evalf(NULL, &ivga, "\\IVGA", "d") && ivga) + /* G41, assume IVGA doesn't change */ + vid_handle = vid2_handle; + + if (!vid_handle) + /* video switching not supported on R30, R31 */ + video_supported = TPACPI_VIDEO_NONE; + else if (acpi_evalf(vid_handle, &video_orig_autosw, "SWIT", "qd")) + /* 570 */ + video_supported = TPACPI_VIDEO_570; + else if (acpi_evalf(vid_handle, &video_orig_autosw, "^VADL", "qd")) + /* 600e/x, 770e, 770x */ + video_supported = TPACPI_VIDEO_770; + else + /* all others */ + video_supported = TPACPI_VIDEO_NEW; + + vdbg_printk(TPACPI_DBG_INIT, "video is %s, mode %d\n", + str_supported(video_supported != TPACPI_VIDEO_NONE), + video_supported); + + return (video_supported != TPACPI_VIDEO_NONE)? 0 : 1; +} + +static void video_exit(void) +{ + dbg_printk(TPACPI_DBG_EXIT, + "restoring original video autoswitch mode\n"); + if (video_autosw_set(video_orig_autosw)) + printk(TPACPI_ERR "error while trying to restore original " + "video autoswitch mode\n"); +} + +static int video_outputsw_get(void) +{ + int status = 0; + int i; + + switch (video_supported) { + case TPACPI_VIDEO_570: + if (!acpi_evalf(NULL, &i, "\\_SB.PHS", "dd", + TP_ACPI_VIDEO_570_PHSCMD)) + return -EIO; + status = i & TP_ACPI_VIDEO_570_PHSMASK; + break; + case TPACPI_VIDEO_770: + if (!acpi_evalf(NULL, &i, "\\VCDL", "d")) + return -EIO; + if (i) + status |= TP_ACPI_VIDEO_S_LCD; + if (!acpi_evalf(NULL, &i, "\\VCDC", "d")) + return -EIO; + if (i) + status |= TP_ACPI_VIDEO_S_CRT; + break; + case TPACPI_VIDEO_NEW: + if (!acpi_evalf(NULL, NULL, "\\VUPS", "vd", 1) || + !acpi_evalf(NULL, &i, "\\VCDC", "d")) + return -EIO; + if (i) + status |= TP_ACPI_VIDEO_S_CRT; + + if (!acpi_evalf(NULL, NULL, "\\VUPS", "vd", 0) || + !acpi_evalf(NULL, &i, "\\VCDL", "d")) + return -EIO; + if (i) + status |= TP_ACPI_VIDEO_S_LCD; + if (!acpi_evalf(NULL, &i, "\\VCDD", "d")) + return -EIO; + if (i) + status |= TP_ACPI_VIDEO_S_DVI; + break; + default: + return -ENOSYS; + } + + return status; +} + +static int video_outputsw_set(int status) +{ + int autosw; + int res = 0; + + switch (video_supported) { + case TPACPI_VIDEO_570: + res = acpi_evalf(NULL, NULL, + "\\_SB.PHS2", "vdd", + TP_ACPI_VIDEO_570_PHS2CMD, + status | TP_ACPI_VIDEO_570_PHS2SET); + break; + case TPACPI_VIDEO_770: + autosw = video_autosw_get(); + if (autosw < 0) + return autosw; + + res = video_autosw_set(1); + if (res) + return res; + res = acpi_evalf(vid_handle, NULL, + "ASWT", "vdd", status * 0x100, 0); + if (!autosw && video_autosw_set(autosw)) { + printk(TPACPI_ERR + "video auto-switch left enabled due to error\n"); + return -EIO; + } + break; + case TPACPI_VIDEO_NEW: + res = acpi_evalf(NULL, NULL, "\\VUPS", "vd", 0x80) && + acpi_evalf(NULL, NULL, "\\VSDS", "vdd", status, 1); + break; + default: + return -ENOSYS; + } + + return (res)? 0 : -EIO; +} + +static int video_autosw_get(void) +{ + int autosw = 0; + + switch (video_supported) { + case TPACPI_VIDEO_570: + if (!acpi_evalf(vid_handle, &autosw, "SWIT", "d")) + return -EIO; + break; + case TPACPI_VIDEO_770: + case TPACPI_VIDEO_NEW: + if (!acpi_evalf(vid_handle, &autosw, "^VDEE", "d")) + return -EIO; + break; + default: + return -ENOSYS; + } + + return autosw & 1; +} + +static int video_autosw_set(int enable) +{ + if (!acpi_evalf(vid_handle, NULL, "_DOS", "vd", (enable)? 1 : 0)) + return -EIO; + return 0; +} + +static int video_outputsw_cycle(void) +{ + int autosw = video_autosw_get(); + int res; + + if (autosw < 0) + return autosw; + + switch (video_supported) { + case TPACPI_VIDEO_570: + res = video_autosw_set(1); + if (res) + return res; + res = acpi_evalf(ec_handle, NULL, "_Q16", "v"); + break; + case TPACPI_VIDEO_770: + case TPACPI_VIDEO_NEW: + res = video_autosw_set(1); + if (res) + return res; + res = acpi_evalf(vid_handle, NULL, "VSWT", "v"); + break; + default: + return -ENOSYS; + } + if (!autosw && video_autosw_set(autosw)) { + printk(TPACPI_ERR + "video auto-switch left enabled due to error\n"); + return -EIO; + } + + return (res)? 0 : -EIO; +} + +static int video_expand_toggle(void) +{ + switch (video_supported) { + case TPACPI_VIDEO_570: + return acpi_evalf(ec_handle, NULL, "_Q17", "v")? + 0 : -EIO; + case TPACPI_VIDEO_770: + return acpi_evalf(vid_handle, NULL, "VEXP", "v")? + 0 : -EIO; + case TPACPI_VIDEO_NEW: + return acpi_evalf(NULL, NULL, "\\VEXP", "v")? + 0 : -EIO; + default: + return -ENOSYS; + } + /* not reached */ +} + +static int video_read(char *p) +{ + int status, autosw; + int len = 0; + + if (video_supported == TPACPI_VIDEO_NONE) { + len += sprintf(p + len, "status:\t\tnot supported\n"); + return len; + } + + status = video_outputsw_get(); + if (status < 0) + return status; + + autosw = video_autosw_get(); + if (autosw < 0) + return autosw; + + len += sprintf(p + len, "status:\t\tsupported\n"); + len += sprintf(p + len, "lcd:\t\t%s\n", enabled(status, 0)); + len += sprintf(p + len, "crt:\t\t%s\n", enabled(status, 1)); + if (video_supported == TPACPI_VIDEO_NEW) + len += sprintf(p + len, "dvi:\t\t%s\n", enabled(status, 3)); + len += sprintf(p + len, "auto:\t\t%s\n", enabled(autosw, 0)); + len += sprintf(p + len, "commands:\tlcd_enable, lcd_disable\n"); + len += sprintf(p + len, "commands:\tcrt_enable, crt_disable\n"); + if (video_supported == TPACPI_VIDEO_NEW) + len += sprintf(p + len, "commands:\tdvi_enable, dvi_disable\n"); + len += sprintf(p + len, "commands:\tauto_enable, auto_disable\n"); + len += sprintf(p + len, "commands:\tvideo_switch, expand_toggle\n"); + + return len; +} + +static int video_write(char *buf) +{ + char *cmd; + int enable, disable, status; + int res; + + if (video_supported == TPACPI_VIDEO_NONE) + return -ENODEV; + + enable = 0; + disable = 0; + + while ((cmd = next_cmd(&buf))) { + if (strlencmp(cmd, "lcd_enable") == 0) { + enable |= TP_ACPI_VIDEO_S_LCD; + } else if (strlencmp(cmd, "lcd_disable") == 0) { + disable |= TP_ACPI_VIDEO_S_LCD; + } else if (strlencmp(cmd, "crt_enable") == 0) { + enable |= TP_ACPI_VIDEO_S_CRT; + } else if (strlencmp(cmd, "crt_disable") == 0) { + disable |= TP_ACPI_VIDEO_S_CRT; + } else if (video_supported == TPACPI_VIDEO_NEW && + strlencmp(cmd, "dvi_enable") == 0) { + enable |= TP_ACPI_VIDEO_S_DVI; + } else if (video_supported == TPACPI_VIDEO_NEW && + strlencmp(cmd, "dvi_disable") == 0) { + disable |= TP_ACPI_VIDEO_S_DVI; + } else if (strlencmp(cmd, "auto_enable") == 0) { + res = video_autosw_set(1); + if (res) + return res; + } else if (strlencmp(cmd, "auto_disable") == 0) { + res = video_autosw_set(0); + if (res) + return res; + } else if (strlencmp(cmd, "video_switch") == 0) { + res = video_outputsw_cycle(); + if (res) + return res; + } else if (strlencmp(cmd, "expand_toggle") == 0) { + res = video_expand_toggle(); + if (res) + return res; + } else + return -EINVAL; + } + + if (enable || disable) { + status = video_outputsw_get(); + if (status < 0) + return status; + res = video_outputsw_set((status & ~disable) | enable); + if (res) + return res; + } + + return 0; +} + +static struct ibm_struct video_driver_data = { + .name = "video", + .read = video_read, + .write = video_write, + .exit = video_exit, +}; + +#endif /* CONFIG_THINKPAD_ACPI_VIDEO */ + +/************************************************************************* + * Light (thinklight) subdriver + */ + +TPACPI_HANDLE(lght, root, "\\LGHT"); /* A21e, A2xm/p, T20-22, X20-21 */ +TPACPI_HANDLE(ledb, ec, "LEDB"); /* G4x */ + +static int light_get_status(void) +{ + int status = 0; + + if (tp_features.light_status) { + if (!acpi_evalf(ec_handle, &status, "KBLT", "d")) + return -EIO; + return (!!status); + } + + return -ENXIO; +} + +static int light_set_status(int status) +{ + int rc; + + if (tp_features.light) { + if (cmos_handle) { + rc = acpi_evalf(cmos_handle, NULL, NULL, "vd", + (status)? + TP_CMOS_THINKLIGHT_ON : + TP_CMOS_THINKLIGHT_OFF); + } else { + rc = acpi_evalf(lght_handle, NULL, NULL, "vd", + (status)? 1 : 0); + } + return (rc)? 0 : -EIO; + } + + return -ENXIO; +} + +static void light_set_status_worker(struct work_struct *work) +{ + struct tpacpi_led_classdev *data = + container_of(work, struct tpacpi_led_classdev, work); + + if (likely(tpacpi_lifecycle == TPACPI_LIFE_RUNNING)) + light_set_status((data->new_brightness != LED_OFF)); +} + +static void light_sysfs_set(struct led_classdev *led_cdev, + enum led_brightness brightness) +{ + struct tpacpi_led_classdev *data = + container_of(led_cdev, + struct tpacpi_led_classdev, + led_classdev); + data->new_brightness = brightness; + queue_work(tpacpi_wq, &data->work); +} + +static enum led_brightness light_sysfs_get(struct led_classdev *led_cdev) +{ + return (light_get_status() == 1)? LED_FULL : LED_OFF; +} + +static struct tpacpi_led_classdev tpacpi_led_thinklight = { + .led_classdev = { + .name = "tpacpi::thinklight", + .brightness_set = &light_sysfs_set, + .brightness_get = &light_sysfs_get, + } +}; + +static int __init light_init(struct ibm_init_struct *iibm) +{ + int rc; + + vdbg_printk(TPACPI_DBG_INIT, "initializing light subdriver\n"); + + TPACPI_ACPIHANDLE_INIT(ledb); + TPACPI_ACPIHANDLE_INIT(lght); + TPACPI_ACPIHANDLE_INIT(cmos); + INIT_WORK(&tpacpi_led_thinklight.work, light_set_status_worker); + + /* light not supported on 570, 600e/x, 770e, 770x, G4x, R30, R31 */ + tp_features.light = (cmos_handle || lght_handle) && !ledb_handle; + + if (tp_features.light) + /* light status not supported on + 570, 600e/x, 770e, 770x, G4x, R30, R31, R32, X20 */ + tp_features.light_status = + acpi_evalf(ec_handle, NULL, "KBLT", "qv"); + + vdbg_printk(TPACPI_DBG_INIT, "light is %s, light status is %s\n", + str_supported(tp_features.light), + str_supported(tp_features.light_status)); + + if (!tp_features.light) + return 1; + + rc = led_classdev_register(&tpacpi_pdev->dev, + &tpacpi_led_thinklight.led_classdev); + + if (rc < 0) { + tp_features.light = 0; + tp_features.light_status = 0; + } else { + rc = 0; + } + + return rc; +} + +static void light_exit(void) +{ + led_classdev_unregister(&tpacpi_led_thinklight.led_classdev); + if (work_pending(&tpacpi_led_thinklight.work)) + flush_workqueue(tpacpi_wq); +} + +static int light_read(char *p) +{ + int len = 0; + int status; + + if (!tp_features.light) { + len += sprintf(p + len, "status:\t\tnot supported\n"); + } else if (!tp_features.light_status) { + len += sprintf(p + len, "status:\t\tunknown\n"); + len += sprintf(p + len, "commands:\ton, off\n"); + } else { + status = light_get_status(); + if (status < 0) + return status; + len += sprintf(p + len, "status:\t\t%s\n", onoff(status, 0)); + len += sprintf(p + len, "commands:\ton, off\n"); + } + + return len; +} + +static int light_write(char *buf) +{ + char *cmd; + int newstatus = 0; + + if (!tp_features.light) + return -ENODEV; + + while ((cmd = next_cmd(&buf))) { + if (strlencmp(cmd, "on") == 0) { + newstatus = 1; + } else if (strlencmp(cmd, "off") == 0) { + newstatus = 0; + } else + return -EINVAL; + } + + return light_set_status(newstatus); +} + +static struct ibm_struct light_driver_data = { + .name = "light", + .read = light_read, + .write = light_write, + .exit = light_exit, +}; + +/************************************************************************* + * Dock subdriver + */ + +#ifdef CONFIG_THINKPAD_ACPI_DOCK + +static void dock_notify(struct ibm_struct *ibm, u32 event); +static int dock_read(char *p); +static int dock_write(char *buf); + +TPACPI_HANDLE(dock, root, "\\_SB.GDCK", /* X30, X31, X40 */ + "\\_SB.PCI0.DOCK", /* 600e/x,770e,770x,A2xm/p,T20-22,X20-21 */ + "\\_SB.PCI0.PCI1.DOCK", /* all others */ + "\\_SB.PCI.ISA.SLCE", /* 570 */ + ); /* A21e,G4x,R30,R31,R32,R40,R40e,R50e */ + +/* don't list other alternatives as we install a notify handler on the 570 */ +TPACPI_HANDLE(pci, root, "\\_SB.PCI"); /* 570 */ + +static const struct acpi_device_id ibm_pci_device_ids[] = { + {PCI_ROOT_HID_STRING, 0}, + {"", 0}, +}; + +static struct tp_acpi_drv_struct ibm_dock_acpidriver[2] = { + { + .notify = dock_notify, + .handle = &dock_handle, + .type = ACPI_SYSTEM_NOTIFY, + }, + { + /* THIS ONE MUST NEVER BE USED FOR DRIVER AUTOLOADING. + * We just use it to get notifications of dock hotplug + * in very old thinkpads */ + .hid = ibm_pci_device_ids, + .notify = dock_notify, + .handle = &pci_handle, + .type = ACPI_SYSTEM_NOTIFY, + }, +}; + +static struct ibm_struct dock_driver_data[2] = { + { + .name = "dock", + .read = dock_read, + .write = dock_write, + .acpi = &ibm_dock_acpidriver[0], + }, + { + .name = "dock", + .acpi = &ibm_dock_acpidriver[1], + }, +}; + +#define dock_docked() (_sta(dock_handle) & 1) + +static int __init dock_init(struct ibm_init_struct *iibm) +{ + vdbg_printk(TPACPI_DBG_INIT, "initializing dock subdriver\n"); + + TPACPI_ACPIHANDLE_INIT(dock); + + vdbg_printk(TPACPI_DBG_INIT, "dock is %s\n", + str_supported(dock_handle != NULL)); + + return (dock_handle)? 0 : 1; +} + +static int __init dock_init2(struct ibm_init_struct *iibm) +{ + int dock2_needed; + + vdbg_printk(TPACPI_DBG_INIT, "initializing dock subdriver part 2\n"); + + if (dock_driver_data[0].flags.acpi_driver_registered && + dock_driver_data[0].flags.acpi_notify_installed) { + TPACPI_ACPIHANDLE_INIT(pci); + dock2_needed = (pci_handle != NULL); + vdbg_printk(TPACPI_DBG_INIT, + "dock PCI handler for the TP 570 is %s\n", + str_supported(dock2_needed)); + } else { + vdbg_printk(TPACPI_DBG_INIT, + "dock subdriver part 2 not required\n"); + dock2_needed = 0; + } + + return (dock2_needed)? 0 : 1; +} + +static void dock_notify(struct ibm_struct *ibm, u32 event) +{ + int docked = dock_docked(); + int pci = ibm->acpi->hid && ibm->acpi->device && + acpi_match_device_ids(ibm->acpi->device, ibm_pci_device_ids); + int data; + + if (event == 1 && !pci) /* 570 */ + data = 1; /* button */ + else if (event == 1 && pci) /* 570 */ + data = 3; /* dock */ + else if (event == 3 && docked) + data = 1; /* button */ + else if (event == 3 && !docked) + data = 2; /* undock */ + else if (event == 0 && docked) + data = 3; /* dock */ + else { + printk(TPACPI_ERR "unknown dock event %d, status %d\n", + event, _sta(dock_handle)); + data = 0; /* unknown */ + } + acpi_bus_generate_proc_event(ibm->acpi->device, event, data); + acpi_bus_generate_netlink_event(ibm->acpi->device->pnp.device_class, + ibm->acpi->device->dev.bus_id, + event, data); +} + +static int dock_read(char *p) +{ + int len = 0; + int docked = dock_docked(); + + if (!dock_handle) + len += sprintf(p + len, "status:\t\tnot supported\n"); + else if (!docked) + len += sprintf(p + len, "status:\t\tundocked\n"); + else { + len += sprintf(p + len, "status:\t\tdocked\n"); + len += sprintf(p + len, "commands:\tdock, undock\n"); + } + + return len; +} + +static int dock_write(char *buf) +{ + char *cmd; + + if (!dock_docked()) + return -ENODEV; + + while ((cmd = next_cmd(&buf))) { + if (strlencmp(cmd, "undock") == 0) { + if (!acpi_evalf(dock_handle, NULL, "_DCK", "vd", 0) || + !acpi_evalf(dock_handle, NULL, "_EJ0", "vd", 1)) + return -EIO; + } else if (strlencmp(cmd, "dock") == 0) { + if (!acpi_evalf(dock_handle, NULL, "_DCK", "vd", 1)) + return -EIO; + } else + return -EINVAL; + } + + return 0; +} + +#endif /* CONFIG_THINKPAD_ACPI_DOCK */ + +/************************************************************************* + * Bay subdriver + */ + +#ifdef CONFIG_THINKPAD_ACPI_BAY + +TPACPI_HANDLE(bay, root, "\\_SB.PCI.IDE.SECN.MAST", /* 570 */ + "\\_SB.PCI0.IDE0.IDES.IDSM", /* 600e/x, 770e, 770x */ + "\\_SB.PCI0.SATA.SCND.MSTR", /* T60, X60, Z60 */ + "\\_SB.PCI0.IDE0.SCND.MSTR", /* all others */ + ); /* A21e, R30, R31 */ +TPACPI_HANDLE(bay_ej, bay, "_EJ3", /* 600e/x, A2xm/p, A3x */ + "_EJ0", /* all others */ + ); /* 570,A21e,G4x,R30,R31,R32,R40e,R50e */ +TPACPI_HANDLE(bay2, root, "\\_SB.PCI0.IDE0.PRIM.SLAV", /* A3x, R32 */ + "\\_SB.PCI0.IDE0.IDEP.IDPS", /* 600e/x, 770e, 770x */ + ); /* all others */ +TPACPI_HANDLE(bay2_ej, bay2, "_EJ3", /* 600e/x, 770e, A3x */ + "_EJ0", /* 770x */ + ); /* all others */ + +static int __init bay_init(struct ibm_init_struct *iibm) +{ + vdbg_printk(TPACPI_DBG_INIT, "initializing bay subdriver\n"); + + TPACPI_ACPIHANDLE_INIT(bay); + if (bay_handle) + TPACPI_ACPIHANDLE_INIT(bay_ej); + TPACPI_ACPIHANDLE_INIT(bay2); + if (bay2_handle) + TPACPI_ACPIHANDLE_INIT(bay2_ej); + + tp_features.bay_status = bay_handle && + acpi_evalf(bay_handle, NULL, "_STA", "qv"); + tp_features.bay_status2 = bay2_handle && + acpi_evalf(bay2_handle, NULL, "_STA", "qv"); + + tp_features.bay_eject = bay_handle && bay_ej_handle && + (strlencmp(bay_ej_path, "_EJ0") == 0 || experimental); + tp_features.bay_eject2 = bay2_handle && bay2_ej_handle && + (strlencmp(bay2_ej_path, "_EJ0") == 0 || experimental); + + vdbg_printk(TPACPI_DBG_INIT, + "bay 1: status %s, eject %s; bay 2: status %s, eject %s\n", + str_supported(tp_features.bay_status), + str_supported(tp_features.bay_eject), + str_supported(tp_features.bay_status2), + str_supported(tp_features.bay_eject2)); + + return (tp_features.bay_status || tp_features.bay_eject || + tp_features.bay_status2 || tp_features.bay_eject2)? 0 : 1; +} + +static void bay_notify(struct ibm_struct *ibm, u32 event) +{ + acpi_bus_generate_proc_event(ibm->acpi->device, event, 0); + acpi_bus_generate_netlink_event(ibm->acpi->device->pnp.device_class, + ibm->acpi->device->dev.bus_id, + event, 0); +} + +#define bay_occupied(b) (_sta(b##_handle) & 1) + +static int bay_read(char *p) +{ + int len = 0; + int occupied = bay_occupied(bay); + int occupied2 = bay_occupied(bay2); + int eject, eject2; + + len += sprintf(p + len, "status:\t\t%s\n", + tp_features.bay_status ? + (occupied ? "occupied" : "unoccupied") : + "not supported"); + if (tp_features.bay_status2) + len += sprintf(p + len, "status2:\t%s\n", occupied2 ? + "occupied" : "unoccupied"); + + eject = tp_features.bay_eject && occupied; + eject2 = tp_features.bay_eject2 && occupied2; + + if (eject && eject2) + len += sprintf(p + len, "commands:\teject, eject2\n"); + else if (eject) + len += sprintf(p + len, "commands:\teject\n"); + else if (eject2) + len += sprintf(p + len, "commands:\teject2\n"); + + return len; +} + +static int bay_write(char *buf) +{ + char *cmd; + + if (!tp_features.bay_eject && !tp_features.bay_eject2) + return -ENODEV; + + while ((cmd = next_cmd(&buf))) { + if (tp_features.bay_eject && strlencmp(cmd, "eject") == 0) { + if (!acpi_evalf(bay_ej_handle, NULL, NULL, "vd", 1)) + return -EIO; + } else if (tp_features.bay_eject2 && + strlencmp(cmd, "eject2") == 0) { + if (!acpi_evalf(bay2_ej_handle, NULL, NULL, "vd", 1)) + return -EIO; + } else + return -EINVAL; + } + + return 0; +} + +static struct tp_acpi_drv_struct ibm_bay_acpidriver = { + .notify = bay_notify, + .handle = &bay_handle, + .type = ACPI_SYSTEM_NOTIFY, +}; + +static struct ibm_struct bay_driver_data = { + .name = "bay", + .read = bay_read, + .write = bay_write, + .acpi = &ibm_bay_acpidriver, +}; + +#endif /* CONFIG_THINKPAD_ACPI_BAY */ + +/************************************************************************* + * CMOS subdriver + */ + +/* sysfs cmos_command -------------------------------------------------- */ +static ssize_t cmos_command_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + unsigned long cmos_cmd; + int res; + + if (parse_strtoul(buf, 21, &cmos_cmd)) + return -EINVAL; + + res = issue_thinkpad_cmos_command(cmos_cmd); + return (res)? res : count; +} + +static struct device_attribute dev_attr_cmos_command = + __ATTR(cmos_command, S_IWUSR, NULL, cmos_command_store); + +/* --------------------------------------------------------------------- */ + +static int __init cmos_init(struct ibm_init_struct *iibm) +{ + int res; + + vdbg_printk(TPACPI_DBG_INIT, + "initializing cmos commands subdriver\n"); + + TPACPI_ACPIHANDLE_INIT(cmos); + + vdbg_printk(TPACPI_DBG_INIT, "cmos commands are %s\n", + str_supported(cmos_handle != NULL)); + + res = device_create_file(&tpacpi_pdev->dev, &dev_attr_cmos_command); + if (res) + return res; + + return (cmos_handle)? 0 : 1; +} + +static void cmos_exit(void) +{ + device_remove_file(&tpacpi_pdev->dev, &dev_attr_cmos_command); +} + +static int cmos_read(char *p) +{ + int len = 0; + + /* cmos not supported on 570, 600e/x, 770e, 770x, A21e, A2xm/p, + R30, R31, T20-22, X20-21 */ + if (!cmos_handle) + len += sprintf(p + len, "status:\t\tnot supported\n"); + else { + len += sprintf(p + len, "status:\t\tsupported\n"); + len += sprintf(p + len, "commands:\t<cmd> (<cmd> is 0-21)\n"); + } + + return len; +} + +static int cmos_write(char *buf) +{ + char *cmd; + int cmos_cmd, res; + + while ((cmd = next_cmd(&buf))) { + if (sscanf(cmd, "%u", &cmos_cmd) == 1 && + cmos_cmd >= 0 && cmos_cmd <= 21) { + /* cmos_cmd set */ + } else + return -EINVAL; + + res = issue_thinkpad_cmos_command(cmos_cmd); + if (res) + return res; + } + + return 0; +} + +static struct ibm_struct cmos_driver_data = { + .name = "cmos", + .read = cmos_read, + .write = cmos_write, + .exit = cmos_exit, +}; + +/************************************************************************* + * LED subdriver + */ + +enum led_access_mode { + TPACPI_LED_NONE = 0, + TPACPI_LED_570, /* 570 */ + TPACPI_LED_OLD, /* 600e/x, 770e, 770x, A21e, A2xm/p, T20-22, X20-21 */ + TPACPI_LED_NEW, /* all others */ +}; + +enum { /* For TPACPI_LED_OLD */ + TPACPI_LED_EC_HLCL = 0x0c, /* EC reg to get led to power on */ + TPACPI_LED_EC_HLBL = 0x0d, /* EC reg to blink a lit led */ + TPACPI_LED_EC_HLMS = 0x0e, /* EC reg to select led to command */ +}; + +enum led_status_t { + TPACPI_LED_OFF = 0, + TPACPI_LED_ON, + TPACPI_LED_BLINK, +}; + +static enum led_access_mode led_supported; + +TPACPI_HANDLE(led, ec, "SLED", /* 570 */ + "SYSL", /* 600e/x, 770e, 770x, A21e, A2xm/p, */ + /* T20-22, X20-21 */ + "LED", /* all others */ + ); /* R30, R31 */ + +#define TPACPI_LED_NUMLEDS 8 +static struct tpacpi_led_classdev *tpacpi_leds; +static enum led_status_t tpacpi_led_state_cache[TPACPI_LED_NUMLEDS]; +static const char * const tpacpi_led_names[TPACPI_LED_NUMLEDS] = { + /* there's a limit of 19 chars + NULL before 2.6.26 */ + "tpacpi::power", + "tpacpi:orange:batt", + "tpacpi:green:batt", + "tpacpi::dock_active", + "tpacpi::bay_active", + "tpacpi::dock_batt", + "tpacpi::unknown_led", + "tpacpi::standby", +}; + +static int led_get_status(const unsigned int led) +{ + int status; + enum led_status_t led_s; + + switch (led_supported) { + case TPACPI_LED_570: + if (!acpi_evalf(ec_handle, + &status, "GLED", "dd", 1 << led)) + return -EIO; + led_s = (status == 0)? + TPACPI_LED_OFF : + ((status == 1)? + TPACPI_LED_ON : + TPACPI_LED_BLINK); + tpacpi_led_state_cache[led] = led_s; + return led_s; + default: + return -ENXIO; + } + + /* not reached */ +} + +static int led_set_status(const unsigned int led, + const enum led_status_t ledstatus) +{ + /* off, on, blink. Index is led_status_t */ + static const unsigned int led_sled_arg1[] = { 0, 1, 3 }; + static const unsigned int led_led_arg1[] = { 0, 0x80, 0xc0 }; + + int rc = 0; + + switch (led_supported) { + case TPACPI_LED_570: + /* 570 */ + if (led > 7) + return -EINVAL; + if (!acpi_evalf(led_handle, NULL, NULL, "vdd", + (1 << led), led_sled_arg1[ledstatus])) + rc = -EIO; + break; + case TPACPI_LED_OLD: + /* 600e/x, 770e, 770x, A21e, A2xm/p, T20-22, X20 */ + if (led > 7) + return -EINVAL; + rc = ec_write(TPACPI_LED_EC_HLMS, (1 << led)); + if (rc >= 0) + rc = ec_write(TPACPI_LED_EC_HLBL, + (ledstatus == TPACPI_LED_BLINK) << led); + if (rc >= 0) + rc = ec_write(TPACPI_LED_EC_HLCL, + (ledstatus != TPACPI_LED_OFF) << led); + break; + case TPACPI_LED_NEW: + /* all others */ + if (!acpi_evalf(led_handle, NULL, NULL, "vdd", + led, led_led_arg1[ledstatus])) + rc = -EIO; + break; + default: + rc = -ENXIO; + } + + if (!rc) + tpacpi_led_state_cache[led] = ledstatus; + + return rc; +} + +static void led_sysfs_set_status(unsigned int led, + enum led_brightness brightness) +{ + led_set_status(led, + (brightness == LED_OFF) ? + TPACPI_LED_OFF : + (tpacpi_led_state_cache[led] == TPACPI_LED_BLINK) ? + TPACPI_LED_BLINK : TPACPI_LED_ON); +} + +static void led_set_status_worker(struct work_struct *work) +{ + struct tpacpi_led_classdev *data = + container_of(work, struct tpacpi_led_classdev, work); + + if (likely(tpacpi_lifecycle == TPACPI_LIFE_RUNNING)) + led_sysfs_set_status(data->led, data->new_brightness); +} + +static void led_sysfs_set(struct led_classdev *led_cdev, + enum led_brightness brightness) +{ + struct tpacpi_led_classdev *data = container_of(led_cdev, + struct tpacpi_led_classdev, led_classdev); + + data->new_brightness = brightness; + queue_work(tpacpi_wq, &data->work); +} + +static int led_sysfs_blink_set(struct led_classdev *led_cdev, + unsigned long *delay_on, unsigned long *delay_off) +{ + struct tpacpi_led_classdev *data = container_of(led_cdev, + struct tpacpi_led_classdev, led_classdev); + + /* Can we choose the flash rate? */ + if (*delay_on == 0 && *delay_off == 0) { + /* yes. set them to the hardware blink rate (1 Hz) */ + *delay_on = 500; /* ms */ + *delay_off = 500; /* ms */ + } else if ((*delay_on != 500) || (*delay_off != 500)) + return -EINVAL; + + data->new_brightness = TPACPI_LED_BLINK; + queue_work(tpacpi_wq, &data->work); + + return 0; +} + +static enum led_brightness led_sysfs_get(struct led_classdev *led_cdev) +{ + int rc; + + struct tpacpi_led_classdev *data = container_of(led_cdev, + struct tpacpi_led_classdev, led_classdev); + + rc = led_get_status(data->led); + + if (rc == TPACPI_LED_OFF || rc < 0) + rc = LED_OFF; /* no error handling in led class :( */ + else + rc = LED_FULL; + + return rc; +} + +static void led_exit(void) +{ + unsigned int i; + + for (i = 0; i < TPACPI_LED_NUMLEDS; i++) { + if (tpacpi_leds[i].led_classdev.name) + led_classdev_unregister(&tpacpi_leds[i].led_classdev); + } + + kfree(tpacpi_leds); +} + +static int __init led_init(struct ibm_init_struct *iibm) +{ + unsigned int i; + int rc; + + vdbg_printk(TPACPI_DBG_INIT, "initializing LED subdriver\n"); + + TPACPI_ACPIHANDLE_INIT(led); + + if (!led_handle) + /* led not supported on R30, R31 */ + led_supported = TPACPI_LED_NONE; + else if (strlencmp(led_path, "SLED") == 0) + /* 570 */ + led_supported = TPACPI_LED_570; + else if (strlencmp(led_path, "SYSL") == 0) + /* 600e/x, 770e, 770x, A21e, A2xm/p, T20-22, X20-21 */ + led_supported = TPACPI_LED_OLD; + else + /* all others */ + led_supported = TPACPI_LED_NEW; + + vdbg_printk(TPACPI_DBG_INIT, "LED commands are %s, mode %d\n", + str_supported(led_supported), led_supported); + + tpacpi_leds = kzalloc(sizeof(*tpacpi_leds) * TPACPI_LED_NUMLEDS, + GFP_KERNEL); + if (!tpacpi_leds) { + printk(TPACPI_ERR "Out of memory for LED data\n"); + return -ENOMEM; + } + + for (i = 0; i < TPACPI_LED_NUMLEDS; i++) { + tpacpi_leds[i].led = i; + + tpacpi_leds[i].led_classdev.brightness_set = &led_sysfs_set; + tpacpi_leds[i].led_classdev.blink_set = &led_sysfs_blink_set; + if (led_supported == TPACPI_LED_570) + tpacpi_leds[i].led_classdev.brightness_get = + &led_sysfs_get; + + tpacpi_leds[i].led_classdev.name = tpacpi_led_names[i]; + + INIT_WORK(&tpacpi_leds[i].work, led_set_status_worker); + + rc = led_classdev_register(&tpacpi_pdev->dev, + &tpacpi_leds[i].led_classdev); + if (rc < 0) { + tpacpi_leds[i].led_classdev.name = NULL; + led_exit(); + return rc; + } + } + + return (led_supported != TPACPI_LED_NONE)? 0 : 1; +} + +#define str_led_status(s) \ + ((s) == TPACPI_LED_OFF ? "off" : \ + ((s) == TPACPI_LED_ON ? "on" : "blinking")) + +static int led_read(char *p) +{ + int len = 0; + + if (!led_supported) { + len += sprintf(p + len, "status:\t\tnot supported\n"); + return len; + } + len += sprintf(p + len, "status:\t\tsupported\n"); + + if (led_supported == TPACPI_LED_570) { + /* 570 */ + int i, status; + for (i = 0; i < 8; i++) { + status = led_get_status(i); + if (status < 0) + return -EIO; + len += sprintf(p + len, "%d:\t\t%s\n", + i, str_led_status(status)); + } + } + + len += sprintf(p + len, "commands:\t" + "<led> on, <led> off, <led> blink (<led> is 0-7)\n"); + + return len; +} + +static int led_write(char *buf) +{ + char *cmd; + int led, rc; + enum led_status_t s; + + if (!led_supported) + return -ENODEV; + + while ((cmd = next_cmd(&buf))) { + if (sscanf(cmd, "%d", &led) != 1 || led < 0 || led > 7) + return -EINVAL; + + if (strstr(cmd, "off")) { + s = TPACPI_LED_OFF; + } else if (strstr(cmd, "on")) { + s = TPACPI_LED_ON; + } else if (strstr(cmd, "blink")) { + s = TPACPI_LED_BLINK; + } else { + return -EINVAL; + } + + rc = led_set_status(led, s); + if (rc < 0) + return rc; + } + + return 0; +} + +static struct ibm_struct led_driver_data = { + .name = "led", + .read = led_read, + .write = led_write, + .exit = led_exit, +}; + +/************************************************************************* + * Beep subdriver + */ + +TPACPI_HANDLE(beep, ec, "BEEP"); /* all except R30, R31 */ + +static int __init beep_init(struct ibm_init_struct *iibm) +{ + vdbg_printk(TPACPI_DBG_INIT, "initializing beep subdriver\n"); + + TPACPI_ACPIHANDLE_INIT(beep); + + vdbg_printk(TPACPI_DBG_INIT, "beep is %s\n", + str_supported(beep_handle != NULL)); + + return (beep_handle)? 0 : 1; +} + +static int beep_read(char *p) +{ + int len = 0; + + if (!beep_handle) + len += sprintf(p + len, "status:\t\tnot supported\n"); + else { + len += sprintf(p + len, "status:\t\tsupported\n"); + len += sprintf(p + len, "commands:\t<cmd> (<cmd> is 0-17)\n"); + } + + return len; +} + +static int beep_write(char *buf) +{ + char *cmd; + int beep_cmd; + + if (!beep_handle) + return -ENODEV; + + while ((cmd = next_cmd(&buf))) { + if (sscanf(cmd, "%u", &beep_cmd) == 1 && + beep_cmd >= 0 && beep_cmd <= 17) { + /* beep_cmd set */ + } else + return -EINVAL; + if (!acpi_evalf(beep_handle, NULL, NULL, "vdd", beep_cmd, 0)) + return -EIO; + } + + return 0; +} + +static struct ibm_struct beep_driver_data = { + .name = "beep", + .read = beep_read, + .write = beep_write, +}; + +/************************************************************************* + * Thermal subdriver + */ + +enum thermal_access_mode { + TPACPI_THERMAL_NONE = 0, /* No thermal support */ + TPACPI_THERMAL_ACPI_TMP07, /* Use ACPI TMP0-7 */ + TPACPI_THERMAL_ACPI_UPDT, /* Use ACPI TMP0-7 with UPDT */ + TPACPI_THERMAL_TPEC_8, /* Use ACPI EC regs, 8 sensors */ + TPACPI_THERMAL_TPEC_16, /* Use ACPI EC regs, 16 sensors */ +}; + +enum { /* TPACPI_THERMAL_TPEC_* */ + TP_EC_THERMAL_TMP0 = 0x78, /* ACPI EC regs TMP 0..7 */ + TP_EC_THERMAL_TMP8 = 0xC0, /* ACPI EC regs TMP 8..15 */ + TP_EC_THERMAL_TMP_NA = -128, /* ACPI EC sensor not available */ +}; + +#define TPACPI_MAX_THERMAL_SENSORS 16 /* Max thermal sensors supported */ +struct ibm_thermal_sensors_struct { + s32 temp[TPACPI_MAX_THERMAL_SENSORS]; +}; + +static enum thermal_access_mode thermal_read_mode; + +/* idx is zero-based */ +static int thermal_get_sensor(int idx, s32 *value) +{ + int t; + s8 tmp; + char tmpi[5]; + + t = TP_EC_THERMAL_TMP0; + + switch (thermal_read_mode) { +#if TPACPI_MAX_THERMAL_SENSORS >= 16 + case TPACPI_THERMAL_TPEC_16: + if (idx >= 8 && idx <= 15) { + t = TP_EC_THERMAL_TMP8; + idx -= 8; + } + /* fallthrough */ +#endif + case TPACPI_THERMAL_TPEC_8: + if (idx <= 7) { + if (!acpi_ec_read(t + idx, &tmp)) + return -EIO; + *value = tmp * 1000; + return 0; + } + break; + + case TPACPI_THERMAL_ACPI_UPDT: + if (idx <= 7) { + snprintf(tmpi, sizeof(tmpi), "TMP%c", '0' + idx); + if (!acpi_evalf(ec_handle, NULL, "UPDT", "v")) + return -EIO; + if (!acpi_evalf(ec_handle, &t, tmpi, "d")) + return -EIO; + *value = (t - 2732) * 100; + return 0; + } + break; + + case TPACPI_THERMAL_ACPI_TMP07: + if (idx <= 7) { + snprintf(tmpi, sizeof(tmpi), "TMP%c", '0' + idx); + if (!acpi_evalf(ec_handle, &t, tmpi, "d")) + return -EIO; + if (t > 127 || t < -127) + t = TP_EC_THERMAL_TMP_NA; + *value = t * 1000; + return 0; + } + break; + + case TPACPI_THERMAL_NONE: + default: + return -ENOSYS; + } + + return -EINVAL; +} + +static int thermal_get_sensors(struct ibm_thermal_sensors_struct *s) +{ + int res, i; + int n; + + n = 8; + i = 0; + + if (!s) + return -EINVAL; + + if (thermal_read_mode == TPACPI_THERMAL_TPEC_16) + n = 16; + + for (i = 0 ; i < n; i++) { + res = thermal_get_sensor(i, &s->temp[i]); + if (res) + return res; + } + + return n; +} + +/* sysfs temp##_input -------------------------------------------------- */ + +static ssize_t thermal_temp_input_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct sensor_device_attribute *sensor_attr = + to_sensor_dev_attr(attr); + int idx = sensor_attr->index; + s32 value; + int res; + + res = thermal_get_sensor(idx, &value); + if (res) + return res; + if (value == TP_EC_THERMAL_TMP_NA * 1000) + return -ENXIO; + + return snprintf(buf, PAGE_SIZE, "%d\n", value); +} + +#define THERMAL_SENSOR_ATTR_TEMP(_idxA, _idxB) \ + SENSOR_ATTR(temp##_idxA##_input, S_IRUGO, \ + thermal_temp_input_show, NULL, _idxB) + +static struct sensor_device_attribute sensor_dev_attr_thermal_temp_input[] = { + THERMAL_SENSOR_ATTR_TEMP(1, 0), + THERMAL_SENSOR_ATTR_TEMP(2, 1), + THERMAL_SENSOR_ATTR_TEMP(3, 2), + THERMAL_SENSOR_ATTR_TEMP(4, 3), + THERMAL_SENSOR_ATTR_TEMP(5, 4), + THERMAL_SENSOR_ATTR_TEMP(6, 5), + THERMAL_SENSOR_ATTR_TEMP(7, 6), + THERMAL_SENSOR_ATTR_TEMP(8, 7), + THERMAL_SENSOR_ATTR_TEMP(9, 8), + THERMAL_SENSOR_ATTR_TEMP(10, 9), + THERMAL_SENSOR_ATTR_TEMP(11, 10), + THERMAL_SENSOR_ATTR_TEMP(12, 11), + THERMAL_SENSOR_ATTR_TEMP(13, 12), + THERMAL_SENSOR_ATTR_TEMP(14, 13), + THERMAL_SENSOR_ATTR_TEMP(15, 14), + THERMAL_SENSOR_ATTR_TEMP(16, 15), +}; + +#define THERMAL_ATTRS(X) \ + &sensor_dev_attr_thermal_temp_input[X].dev_attr.attr + +static struct attribute *thermal_temp_input_attr[] = { + THERMAL_ATTRS(8), + THERMAL_ATTRS(9), + THERMAL_ATTRS(10), + THERMAL_ATTRS(11), + THERMAL_ATTRS(12), + THERMAL_ATTRS(13), + THERMAL_ATTRS(14), + THERMAL_ATTRS(15), + THERMAL_ATTRS(0), + THERMAL_ATTRS(1), + THERMAL_ATTRS(2), + THERMAL_ATTRS(3), + THERMAL_ATTRS(4), + THERMAL_ATTRS(5), + THERMAL_ATTRS(6), + THERMAL_ATTRS(7), + NULL +}; + +static const struct attribute_group thermal_temp_input16_group = { + .attrs = thermal_temp_input_attr +}; + +static const struct attribute_group thermal_temp_input8_group = { + .attrs = &thermal_temp_input_attr[8] +}; + +#undef THERMAL_SENSOR_ATTR_TEMP +#undef THERMAL_ATTRS + +/* --------------------------------------------------------------------- */ + +static int __init thermal_init(struct ibm_init_struct *iibm) +{ + u8 t, ta1, ta2; + int i; + int acpi_tmp7; + int res; + + vdbg_printk(TPACPI_DBG_INIT, "initializing thermal subdriver\n"); + + acpi_tmp7 = acpi_evalf(ec_handle, NULL, "TMP7", "qv"); + + if (thinkpad_id.ec_model) { + /* + * Direct EC access mode: sensors at registers + * 0x78-0x7F, 0xC0-0xC7. Registers return 0x00 for + * non-implemented, thermal sensors return 0x80 when + * not available + */ + + ta1 = ta2 = 0; + for (i = 0; i < 8; i++) { + if (acpi_ec_read(TP_EC_THERMAL_TMP0 + i, &t)) { + ta1 |= t; + } else { + ta1 = 0; + break; + } + if (acpi_ec_read(TP_EC_THERMAL_TMP8 + i, &t)) { + ta2 |= t; + } else { + ta1 = 0; + break; + } + } + if (ta1 == 0) { + /* This is sheer paranoia, but we handle it anyway */ + if (acpi_tmp7) { + printk(TPACPI_ERR + "ThinkPad ACPI EC access misbehaving, " + "falling back to ACPI TMPx access " + "mode\n"); + thermal_read_mode = TPACPI_THERMAL_ACPI_TMP07; + } else { + printk(TPACPI_ERR + "ThinkPad ACPI EC access misbehaving, " + "disabling thermal sensors access\n"); + thermal_read_mode = TPACPI_THERMAL_NONE; + } + } else { + thermal_read_mode = + (ta2 != 0) ? + TPACPI_THERMAL_TPEC_16 : TPACPI_THERMAL_TPEC_8; + } + } else if (acpi_tmp7) { + if (acpi_evalf(ec_handle, NULL, "UPDT", "qv")) { + /* 600e/x, 770e, 770x */ + thermal_read_mode = TPACPI_THERMAL_ACPI_UPDT; + } else { + /* Standard ACPI TMPx access, max 8 sensors */ + thermal_read_mode = TPACPI_THERMAL_ACPI_TMP07; + } + } else { + /* temperatures not supported on 570, G4x, R30, R31, R32 */ + thermal_read_mode = TPACPI_THERMAL_NONE; + } + + vdbg_printk(TPACPI_DBG_INIT, "thermal is %s, mode %d\n", + str_supported(thermal_read_mode != TPACPI_THERMAL_NONE), + thermal_read_mode); + + switch (thermal_read_mode) { + case TPACPI_THERMAL_TPEC_16: + res = sysfs_create_group(&tpacpi_sensors_pdev->dev.kobj, + &thermal_temp_input16_group); + if (res) + return res; + break; + case TPACPI_THERMAL_TPEC_8: + case TPACPI_THERMAL_ACPI_TMP07: + case TPACPI_THERMAL_ACPI_UPDT: + res = sysfs_create_group(&tpacpi_sensors_pdev->dev.kobj, + &thermal_temp_input8_group); + if (res) + return res; + break; + case TPACPI_THERMAL_NONE: + default: + return 1; + } + + return 0; +} + +static void thermal_exit(void) +{ + switch (thermal_read_mode) { + case TPACPI_THERMAL_TPEC_16: + sysfs_remove_group(&tpacpi_sensors_pdev->dev.kobj, + &thermal_temp_input16_group); + break; + case TPACPI_THERMAL_TPEC_8: + case TPACPI_THERMAL_ACPI_TMP07: + case TPACPI_THERMAL_ACPI_UPDT: + sysfs_remove_group(&tpacpi_sensors_pdev->dev.kobj, + &thermal_temp_input16_group); + break; + case TPACPI_THERMAL_NONE: + default: + break; + } +} + +static int thermal_read(char *p) +{ + int len = 0; + int n, i; + struct ibm_thermal_sensors_struct t; + + n = thermal_get_sensors(&t); + if (unlikely(n < 0)) + return n; + + len += sprintf(p + len, "temperatures:\t"); + + if (n > 0) { + for (i = 0; i < (n - 1); i++) + len += sprintf(p + len, "%d ", t.temp[i] / 1000); + len += sprintf(p + len, "%d\n", t.temp[i] / 1000); + } else + len += sprintf(p + len, "not supported\n"); + + return len; +} + +static struct ibm_struct thermal_driver_data = { + .name = "thermal", + .read = thermal_read, + .exit = thermal_exit, +}; + +/************************************************************************* + * EC Dump subdriver + */ + +static u8 ecdump_regs[256]; + +static int ecdump_read(char *p) +{ + int len = 0; + int i, j; + u8 v; + + len += sprintf(p + len, "EC " + " +00 +01 +02 +03 +04 +05 +06 +07" + " +08 +09 +0a +0b +0c +0d +0e +0f\n"); + for (i = 0; i < 256; i += 16) { + len += sprintf(p + len, "EC 0x%02x:", i); + for (j = 0; j < 16; j++) { + if (!acpi_ec_read(i + j, &v)) + break; + if (v != ecdump_regs[i + j]) + len += sprintf(p + len, " *%02x", v); + else + len += sprintf(p + len, " %02x", v); + ecdump_regs[i + j] = v; + } + len += sprintf(p + len, "\n"); + if (j != 16) + break; + } + + /* These are way too dangerous to advertise openly... */ +#if 0 + len += sprintf(p + len, "commands:\t0x<offset> 0x<value>" + " (<offset> is 00-ff, <value> is 00-ff)\n"); + len += sprintf(p + len, "commands:\t0x<offset> <value> " + " (<offset> is 00-ff, <value> is 0-255)\n"); +#endif + return len; +} + +static int ecdump_write(char *buf) +{ + char *cmd; + int i, v; + + while ((cmd = next_cmd(&buf))) { + if (sscanf(cmd, "0x%x 0x%x", &i, &v) == 2) { + /* i and v set */ + } else if (sscanf(cmd, "0x%x %u", &i, &v) == 2) { + /* i and v set */ + } else + return -EINVAL; + if (i >= 0 && i < 256 && v >= 0 && v < 256) { + if (!acpi_ec_write(i, v)) + return -EIO; + } else + return -EINVAL; + } + + return 0; +} + +static struct ibm_struct ecdump_driver_data = { + .name = "ecdump", + .read = ecdump_read, + .write = ecdump_write, + .flags.experimental = 1, +}; + +/************************************************************************* + * Backlight/brightness subdriver + */ + +#define TPACPI_BACKLIGHT_DEV_NAME "thinkpad_screen" + +enum { + TP_EC_BACKLIGHT = 0x31, + + /* TP_EC_BACKLIGHT bitmasks */ + TP_EC_BACKLIGHT_LVLMSK = 0x1F, + TP_EC_BACKLIGHT_CMDMSK = 0xE0, + TP_EC_BACKLIGHT_MAPSW = 0x20, +}; + +static struct backlight_device *ibm_backlight_device; +static int brightness_mode; +static unsigned int brightness_enable = 2; /* 2 = auto, 0 = no, 1 = yes */ + +static struct mutex brightness_mutex; + +/* + * ThinkPads can read brightness from two places: EC 0x31, or + * CMOS NVRAM byte 0x5E, bits 0-3. + * + * EC 0x31 has the following layout + * Bit 7: unknown function + * Bit 6: unknown function + * Bit 5: Z: honour scale changes, NZ: ignore scale changes + * Bit 4: must be set to zero to avoid problems + * Bit 3-0: backlight brightness level + * + * brightness_get_raw returns status data in the EC 0x31 layout + */ +static int brightness_get_raw(int *status) +{ + u8 lec = 0, lcmos = 0, level = 0; + + if (brightness_mode & 1) { + if (!acpi_ec_read(TP_EC_BACKLIGHT, &lec)) + return -EIO; + level = lec & TP_EC_BACKLIGHT_LVLMSK; + }; + if (brightness_mode & 2) { + lcmos = (nvram_read_byte(TP_NVRAM_ADDR_BRIGHTNESS) + & TP_NVRAM_MASK_LEVEL_BRIGHTNESS) + >> TP_NVRAM_POS_LEVEL_BRIGHTNESS; + lcmos &= (tp_features.bright_16levels)? 0x0f : 0x07; + level = lcmos; + } + + if (brightness_mode == 3) { + *status = lec; /* Prefer EC, CMOS is just a backing store */ + lec &= TP_EC_BACKLIGHT_LVLMSK; + if (lec == lcmos) + tp_warned.bright_cmos_ec_unsync = 0; + else { + if (!tp_warned.bright_cmos_ec_unsync) { + printk(TPACPI_ERR + "CMOS NVRAM (%u) and EC (%u) do not " + "agree on display brightness level\n", + (unsigned int) lcmos, + (unsigned int) lec); + tp_warned.bright_cmos_ec_unsync = 1; + } + return -EIO; + } + } else { + *status = level; + } + + return 0; +} + +/* May return EINTR which can always be mapped to ERESTARTSYS */ +static int brightness_set(int value) +{ + int cmos_cmd, inc, i, res; + int current_value; + int command_bits; + + if (value > ((tp_features.bright_16levels)? 15 : 7) || + value < 0) + return -EINVAL; + + res = mutex_lock_interruptible(&brightness_mutex); + if (res < 0) + return res; + + res = brightness_get_raw(¤t_value); + if (res < 0) + goto errout; + + command_bits = current_value & TP_EC_BACKLIGHT_CMDMSK; + current_value &= TP_EC_BACKLIGHT_LVLMSK; + + cmos_cmd = value > current_value ? + TP_CMOS_BRIGHTNESS_UP : + TP_CMOS_BRIGHTNESS_DOWN; + inc = (value > current_value)? 1 : -1; + + res = 0; + for (i = current_value; i != value; i += inc) { + if ((brightness_mode & 2) && + issue_thinkpad_cmos_command(cmos_cmd)) { + res = -EIO; + goto errout; + } + if ((brightness_mode & 1) && + !acpi_ec_write(TP_EC_BACKLIGHT, + (i + inc) | command_bits)) { + res = -EIO; + goto errout;; + } + } + +errout: + mutex_unlock(&brightness_mutex); + return res; +} + +/* sysfs backlight class ----------------------------------------------- */ + +static int brightness_update_status(struct backlight_device *bd) +{ + /* it is the backlight class's job (caller) to handle + * EINTR and other errors properly */ + return brightness_set( + (bd->props.fb_blank == FB_BLANK_UNBLANK && + bd->props.power == FB_BLANK_UNBLANK) ? + bd->props.brightness : 0); +} + +static int brightness_get(struct backlight_device *bd) +{ + int status, res; + + res = brightness_get_raw(&status); + if (res < 0) + return 0; /* FIXME: teach backlight about error handling */ + + return status & TP_EC_BACKLIGHT_LVLMSK; +} + +static struct backlight_ops ibm_backlight_data = { + .get_brightness = brightness_get, + .update_status = brightness_update_status, +}; + +/* --------------------------------------------------------------------- */ + +static int __init brightness_init(struct ibm_init_struct *iibm) +{ + int b; + + vdbg_printk(TPACPI_DBG_INIT, "initializing brightness subdriver\n"); + + mutex_init(&brightness_mutex); + + /* + * We always attempt to detect acpi support, so as to switch + * Lenovo Vista BIOS to ACPI brightness mode even if we are not + * going to publish a backlight interface + */ + b = tpacpi_check_std_acpi_brightness_support(); + if (b > 0) { + + if (acpi_video_backlight_support()) { + if (brightness_enable > 1) { + printk(TPACPI_NOTICE + "Standard ACPI backlight interface " + "available, not loading native one.\n"); + return 1; + } else if (brightness_enable == 1) { + printk(TPACPI_NOTICE + "Backlight control force enabled, even if standard " + "ACPI backlight interface is available\n"); + } + } else { + if (brightness_enable > 1) { + printk(TPACPI_NOTICE + "Standard ACPI backlight interface not " + "available, thinkpad_acpi native " + "brightness control enabled\n"); + } + } + } + + if (!brightness_enable) { + dbg_printk(TPACPI_DBG_INIT, + "brightness support disabled by " + "module parameter\n"); + return 1; + } + + if (b > 16) { + printk(TPACPI_ERR + "Unsupported brightness interface, " + "please contact %s\n", TPACPI_MAIL); + return 1; + } + if (b == 16) + tp_features.bright_16levels = 1; + + if (!brightness_mode) { + if (thinkpad_id.vendor == PCI_VENDOR_ID_LENOVO) + brightness_mode = 2; + else + brightness_mode = 3; + + dbg_printk(TPACPI_DBG_INIT, "selected brightness_mode=%d\n", + brightness_mode); + } + + if (brightness_mode > 3) + return -EINVAL; + + if (brightness_get_raw(&b) < 0) + return 1; + + if (tp_features.bright_16levels) + printk(TPACPI_INFO + "detected a 16-level brightness capable ThinkPad\n"); + + ibm_backlight_device = backlight_device_register( + TPACPI_BACKLIGHT_DEV_NAME, NULL, NULL, + &ibm_backlight_data); + if (IS_ERR(ibm_backlight_device)) { + printk(TPACPI_ERR "Could not register backlight device\n"); + return PTR_ERR(ibm_backlight_device); + } + vdbg_printk(TPACPI_DBG_INIT, "brightness is supported\n"); + + ibm_backlight_device->props.max_brightness = + (tp_features.bright_16levels)? 15 : 7; + ibm_backlight_device->props.brightness = b & TP_EC_BACKLIGHT_LVLMSK; + backlight_update_status(ibm_backlight_device); + + return 0; +} + +static void brightness_exit(void) +{ + if (ibm_backlight_device) { + vdbg_printk(TPACPI_DBG_EXIT, + "calling backlight_device_unregister()\n"); + backlight_device_unregister(ibm_backlight_device); + } +} + +static int brightness_read(char *p) +{ + int len = 0; + int level; + + level = brightness_get(NULL); + if (level < 0) { + len += sprintf(p + len, "level:\t\tunreadable\n"); + } else { + len += sprintf(p + len, "level:\t\t%d\n", level); + len += sprintf(p + len, "commands:\tup, down\n"); + len += sprintf(p + len, "commands:\tlevel <level>" + " (<level> is 0-%d)\n", + (tp_features.bright_16levels) ? 15 : 7); + } + + return len; +} + +static int brightness_write(char *buf) +{ + int level; + int rc; + char *cmd; + int max_level = (tp_features.bright_16levels) ? 15 : 7; + + level = brightness_get(NULL); + if (level < 0) + return level; + + while ((cmd = next_cmd(&buf))) { + if (strlencmp(cmd, "up") == 0) { + if (level < max_level) + level++; + } else if (strlencmp(cmd, "down") == 0) { + if (level > 0) + level--; + } else if (sscanf(cmd, "level %d", &level) == 1 && + level >= 0 && level <= max_level) { + /* new level set */ + } else + return -EINVAL; + } + + /* + * Now we know what the final level should be, so we try to set it. + * Doing it this way makes the syscall restartable in case of EINTR + */ + rc = brightness_set(level); + return (rc == -EINTR)? ERESTARTSYS : rc; +} + +static struct ibm_struct brightness_driver_data = { + .name = "brightness", + .read = brightness_read, + .write = brightness_write, + .exit = brightness_exit, +}; + +/************************************************************************* + * Volume subdriver + */ + +static int volume_offset = 0x30; + +static int volume_read(char *p) +{ + int len = 0; + u8 level; + + if (!acpi_ec_read(volume_offset, &level)) { + len += sprintf(p + len, "level:\t\tunreadable\n"); + } else { + len += sprintf(p + len, "level:\t\t%d\n", level & 0xf); + len += sprintf(p + len, "mute:\t\t%s\n", onoff(level, 6)); + len += sprintf(p + len, "commands:\tup, down, mute\n"); + len += sprintf(p + len, "commands:\tlevel <level>" + " (<level> is 0-15)\n"); + } + + return len; +} + +static int volume_write(char *buf) +{ + int cmos_cmd, inc, i; + u8 level, mute; + int new_level, new_mute; + char *cmd; + + while ((cmd = next_cmd(&buf))) { + if (!acpi_ec_read(volume_offset, &level)) + return -EIO; + new_mute = mute = level & 0x40; + new_level = level = level & 0xf; + + if (strlencmp(cmd, "up") == 0) { + if (mute) + new_mute = 0; + else + new_level = level == 15 ? 15 : level + 1; + } else if (strlencmp(cmd, "down") == 0) { + if (mute) + new_mute = 0; + else + new_level = level == 0 ? 0 : level - 1; + } else if (sscanf(cmd, "level %d", &new_level) == 1 && + new_level >= 0 && new_level <= 15) { + /* new_level set */ + } else if (strlencmp(cmd, "mute") == 0) { + new_mute = 0x40; + } else + return -EINVAL; + + if (new_level != level) { + /* mute doesn't change */ + + cmos_cmd = (new_level > level) ? + TP_CMOS_VOLUME_UP : TP_CMOS_VOLUME_DOWN; + inc = new_level > level ? 1 : -1; + + if (mute && (issue_thinkpad_cmos_command(cmos_cmd) || + !acpi_ec_write(volume_offset, level))) + return -EIO; + + for (i = level; i != new_level; i += inc) + if (issue_thinkpad_cmos_command(cmos_cmd) || + !acpi_ec_write(volume_offset, i + inc)) + return -EIO; + + if (mute && + (issue_thinkpad_cmos_command(TP_CMOS_VOLUME_MUTE) || + !acpi_ec_write(volume_offset, new_level + mute))) { + return -EIO; + } + } + + if (new_mute != mute) { + /* level doesn't change */ + + cmos_cmd = (new_mute) ? + TP_CMOS_VOLUME_MUTE : TP_CMOS_VOLUME_UP; + + if (issue_thinkpad_cmos_command(cmos_cmd) || + !acpi_ec_write(volume_offset, level + new_mute)) + return -EIO; + } + } + + return 0; +} + +static struct ibm_struct volume_driver_data = { + .name = "volume", + .read = volume_read, + .write = volume_write, +}; + +/************************************************************************* + * Fan subdriver + */ + +/* + * FAN ACCESS MODES + * + * TPACPI_FAN_RD_ACPI_GFAN: + * ACPI GFAN method: returns fan level + * + * see TPACPI_FAN_WR_ACPI_SFAN + * EC 0x2f (HFSP) not available if GFAN exists + * + * TPACPI_FAN_WR_ACPI_SFAN: + * ACPI SFAN method: sets fan level, 0 (stop) to 7 (max) + * + * EC 0x2f (HFSP) might be available *for reading*, but do not use + * it for writing. + * + * TPACPI_FAN_WR_TPEC: + * ThinkPad EC register 0x2f (HFSP): fan control loop mode + * Supported on almost all ThinkPads + * + * Fan speed changes of any sort (including those caused by the + * disengaged mode) are usually done slowly by the firmware as the + * maximum ammount of fan duty cycle change per second seems to be + * limited. + * + * Reading is not available if GFAN exists. + * Writing is not available if SFAN exists. + * + * Bits + * 7 automatic mode engaged; + * (default operation mode of the ThinkPad) + * fan level is ignored in this mode. + * 6 full speed mode (takes precedence over bit 7); + * not available on all thinkpads. May disable + * the tachometer while the fan controller ramps up + * the speed (which can take up to a few *minutes*). + * Speeds up fan to 100% duty-cycle, which is far above + * the standard RPM levels. It is not impossible that + * it could cause hardware damage. + * 5-3 unused in some models. Extra bits for fan level + * in others, but still useless as all values above + * 7 map to the same speed as level 7 in these models. + * 2-0 fan level (0..7 usually) + * 0x00 = stop + * 0x07 = max (set when temperatures critical) + * Some ThinkPads may have other levels, see + * TPACPI_FAN_WR_ACPI_FANS (X31/X40/X41) + * + * FIRMWARE BUG: on some models, EC 0x2f might not be initialized at + * boot. Apparently the EC does not intialize it, so unless ACPI DSDT + * does so, its initial value is meaningless (0x07). + * + * For firmware bugs, refer to: + * http://thinkwiki.org/wiki/Embedded_Controller_Firmware#Firmware_Issues + * + * ---- + * + * ThinkPad EC register 0x84 (LSB), 0x85 (MSB): + * Main fan tachometer reading (in RPM) + * + * This register is present on all ThinkPads with a new-style EC, and + * it is known not to be present on the A21m/e, and T22, as there is + * something else in offset 0x84 according to the ACPI DSDT. Other + * ThinkPads from this same time period (and earlier) probably lack the + * tachometer as well. + * + * Unfortunately a lot of ThinkPads with new-style ECs but whose firwmare + * was never fixed by IBM to report the EC firmware version string + * probably support the tachometer (like the early X models), so + * detecting it is quite hard. We need more data to know for sure. + * + * FIRMWARE BUG: always read 0x84 first, otherwise incorrect readings + * might result. + * + * FIRMWARE BUG: may go stale while the EC is switching to full speed + * mode. + * + * For firmware bugs, refer to: + * http://thinkwiki.org/wiki/Embedded_Controller_Firmware#Firmware_Issues + * + * TPACPI_FAN_WR_ACPI_FANS: + * ThinkPad X31, X40, X41. Not available in the X60. + * + * FANS ACPI handle: takes three arguments: low speed, medium speed, + * high speed. ACPI DSDT seems to map these three speeds to levels + * as follows: STOP LOW LOW MED MED HIGH HIGH HIGH HIGH + * (this map is stored on FAN0..FAN8 as "0,1,1,2,2,3,3,3,3") + * + * The speeds are stored on handles + * (FANA:FAN9), (FANC:FANB), (FANE:FAND). + * + * There are three default speed sets, acessible as handles: + * FS1L,FS1M,FS1H; FS2L,FS2M,FS2H; FS3L,FS3M,FS3H + * + * ACPI DSDT switches which set is in use depending on various + * factors. + * + * TPACPI_FAN_WR_TPEC is also available and should be used to + * command the fan. The X31/X40/X41 seems to have 8 fan levels, + * but the ACPI tables just mention level 7. + */ + +enum { /* Fan control constants */ + fan_status_offset = 0x2f, /* EC register 0x2f */ + fan_rpm_offset = 0x84, /* EC register 0x84: LSB, 0x85 MSB (RPM) + * 0x84 must be read before 0x85 */ + + TP_EC_FAN_FULLSPEED = 0x40, /* EC fan mode: full speed */ + TP_EC_FAN_AUTO = 0x80, /* EC fan mode: auto fan control */ + + TPACPI_FAN_LAST_LEVEL = 0x100, /* Use cached last-seen fan level */ +}; + +enum fan_status_access_mode { + TPACPI_FAN_NONE = 0, /* No fan status or control */ + TPACPI_FAN_RD_ACPI_GFAN, /* Use ACPI GFAN */ + TPACPI_FAN_RD_TPEC, /* Use ACPI EC regs 0x2f, 0x84-0x85 */ +}; + +enum fan_control_access_mode { + TPACPI_FAN_WR_NONE = 0, /* No fan control */ + TPACPI_FAN_WR_ACPI_SFAN, /* Use ACPI SFAN */ + TPACPI_FAN_WR_TPEC, /* Use ACPI EC reg 0x2f */ + TPACPI_FAN_WR_ACPI_FANS, /* Use ACPI FANS and EC reg 0x2f */ +}; + +enum fan_control_commands { + TPACPI_FAN_CMD_SPEED = 0x0001, /* speed command */ + TPACPI_FAN_CMD_LEVEL = 0x0002, /* level command */ + TPACPI_FAN_CMD_ENABLE = 0x0004, /* enable/disable cmd, + * and also watchdog cmd */ +}; + +static int fan_control_allowed; + +static enum fan_status_access_mode fan_status_access_mode; +static enum fan_control_access_mode fan_control_access_mode; +static enum fan_control_commands fan_control_commands; + +static u8 fan_control_initial_status; +static u8 fan_control_desired_level; +static u8 fan_control_resume_level; +static int fan_watchdog_maxinterval; + +static struct mutex fan_mutex; + +static void fan_watchdog_fire(struct work_struct *ignored); +static DECLARE_DELAYED_WORK(fan_watchdog_task, fan_watchdog_fire); + +TPACPI_HANDLE(fans, ec, "FANS"); /* X31, X40, X41 */ +TPACPI_HANDLE(gfan, ec, "GFAN", /* 570 */ + "\\FSPD", /* 600e/x, 770e, 770x */ + ); /* all others */ +TPACPI_HANDLE(sfan, ec, "SFAN", /* 570 */ + "JFNS", /* 770x-JL */ + ); /* all others */ + +/* + * Call with fan_mutex held + */ +static void fan_update_desired_level(u8 status) +{ + if ((status & + (TP_EC_FAN_AUTO | TP_EC_FAN_FULLSPEED)) == 0) { + if (status > 7) + fan_control_desired_level = 7; + else + fan_control_desired_level = status; + } +} + +static int fan_get_status(u8 *status) +{ + u8 s; + + /* TODO: + * Add TPACPI_FAN_RD_ACPI_FANS ? */ + + switch (fan_status_access_mode) { + case TPACPI_FAN_RD_ACPI_GFAN: + /* 570, 600e/x, 770e, 770x */ + + if (unlikely(!acpi_evalf(gfan_handle, &s, NULL, "d"))) + return -EIO; + + if (likely(status)) + *status = s & 0x07; + + break; + + case TPACPI_FAN_RD_TPEC: + /* all except 570, 600e/x, 770e, 770x */ + if (unlikely(!acpi_ec_read(fan_status_offset, &s))) + return -EIO; + + if (likely(status)) + *status = s; + + break; + + default: + return -ENXIO; + } + + return 0; +} + +static int fan_get_status_safe(u8 *status) +{ + int rc; + u8 s; + + if (mutex_lock_interruptible(&fan_mutex)) + return -ERESTARTSYS; + rc = fan_get_status(&s); + if (!rc) + fan_update_desired_level(s); + mutex_unlock(&fan_mutex); + + if (status) + *status = s; + + return rc; +} + +static int fan_get_speed(unsigned int *speed) +{ + u8 hi, lo; + + switch (fan_status_access_mode) { + case TPACPI_FAN_RD_TPEC: + /* all except 570, 600e/x, 770e, 770x */ + if (unlikely(!acpi_ec_read(fan_rpm_offset, &lo) || + !acpi_ec_read(fan_rpm_offset + 1, &hi))) + return -EIO; + + if (likely(speed)) + *speed = (hi << 8) | lo; + + break; + + default: + return -ENXIO; + } + + return 0; +} + +static int fan_set_level(int level) +{ + if (!fan_control_allowed) + return -EPERM; + + switch (fan_control_access_mode) { + case TPACPI_FAN_WR_ACPI_SFAN: + if (level >= 0 && level <= 7) { + if (!acpi_evalf(sfan_handle, NULL, NULL, "vd", level)) + return -EIO; + } else + return -EINVAL; + break; + + case TPACPI_FAN_WR_ACPI_FANS: + case TPACPI_FAN_WR_TPEC: + if (!(level & TP_EC_FAN_AUTO) && + !(level & TP_EC_FAN_FULLSPEED) && + ((level < 0) || (level > 7))) + return -EINVAL; + + /* safety net should the EC not support AUTO + * or FULLSPEED mode bits and just ignore them */ + if (level & TP_EC_FAN_FULLSPEED) + level |= 7; /* safety min speed 7 */ + else if (level & TP_EC_FAN_AUTO) + level |= 4; /* safety min speed 4 */ + + if (!acpi_ec_write(fan_status_offset, level)) + return -EIO; + else + tp_features.fan_ctrl_status_undef = 0; + break; + + default: + return -ENXIO; + } + return 0; +} + +static int fan_set_level_safe(int level) +{ + int rc; + + if (!fan_control_allowed) + return -EPERM; + + if (mutex_lock_interruptible(&fan_mutex)) + return -ERESTARTSYS; + + if (level == TPACPI_FAN_LAST_LEVEL) + level = fan_control_desired_level; + + rc = fan_set_level(level); + if (!rc) + fan_update_desired_level(level); + + mutex_unlock(&fan_mutex); + return rc; +} + +static int fan_set_enable(void) +{ + u8 s; + int rc; + + if (!fan_control_allowed) + return -EPERM; + + if (mutex_lock_interruptible(&fan_mutex)) + return -ERESTARTSYS; + + switch (fan_control_access_mode) { + case TPACPI_FAN_WR_ACPI_FANS: + case TPACPI_FAN_WR_TPEC: + rc = fan_get_status(&s); + if (rc < 0) + break; + + /* Don't go out of emergency fan mode */ + if (s != 7) { + s &= 0x07; + s |= TP_EC_FAN_AUTO | 4; /* min fan speed 4 */ + } + + if (!acpi_ec_write(fan_status_offset, s)) + rc = -EIO; + else { + tp_features.fan_ctrl_status_undef = 0; + rc = 0; + } + break; + + case TPACPI_FAN_WR_ACPI_SFAN: + rc = fan_get_status(&s); + if (rc < 0) + break; + + s &= 0x07; + + /* Set fan to at least level 4 */ + s |= 4; + + if (!acpi_evalf(sfan_handle, NULL, NULL, "vd", s)) + rc = -EIO; + else + rc = 0; + break; + + default: + rc = -ENXIO; + } + + mutex_unlock(&fan_mutex); + return rc; +} + +static int fan_set_disable(void) +{ + int rc; + + if (!fan_control_allowed) + return -EPERM; + + if (mutex_lock_interruptible(&fan_mutex)) + return -ERESTARTSYS; + + rc = 0; + switch (fan_control_access_mode) { + case TPACPI_FAN_WR_ACPI_FANS: + case TPACPI_FAN_WR_TPEC: + if (!acpi_ec_write(fan_status_offset, 0x00)) + rc = -EIO; + else { + fan_control_desired_level = 0; + tp_features.fan_ctrl_status_undef = 0; + } + break; + + case TPACPI_FAN_WR_ACPI_SFAN: + if (!acpi_evalf(sfan_handle, NULL, NULL, "vd", 0x00)) + rc = -EIO; + else + fan_control_desired_level = 0; + break; + + default: + rc = -ENXIO; + } + + + mutex_unlock(&fan_mutex); + return rc; +} + +static int fan_set_speed(int speed) +{ + int rc; + + if (!fan_control_allowed) + return -EPERM; + + if (mutex_lock_interruptible(&fan_mutex)) + return -ERESTARTSYS; + + rc = 0; + switch (fan_control_access_mode) { + case TPACPI_FAN_WR_ACPI_FANS: + if (speed >= 0 && speed <= 65535) { + if (!acpi_evalf(fans_handle, NULL, NULL, "vddd", + speed, speed, speed)) + rc = -EIO; + } else + rc = -EINVAL; + break; + + default: + rc = -ENXIO; + } + + mutex_unlock(&fan_mutex); + return rc; +} + +static void fan_watchdog_reset(void) +{ + static int fan_watchdog_active; + + if (fan_control_access_mode == TPACPI_FAN_WR_NONE) + return; + + if (fan_watchdog_active) + cancel_delayed_work(&fan_watchdog_task); + + if (fan_watchdog_maxinterval > 0 && + tpacpi_lifecycle != TPACPI_LIFE_EXITING) { + fan_watchdog_active = 1; + if (!queue_delayed_work(tpacpi_wq, &fan_watchdog_task, + msecs_to_jiffies(fan_watchdog_maxinterval + * 1000))) { + printk(TPACPI_ERR + "failed to queue the fan watchdog, " + "watchdog will not trigger\n"); + } + } else + fan_watchdog_active = 0; +} + +static void fan_watchdog_fire(struct work_struct *ignored) +{ + int rc; + + if (tpacpi_lifecycle != TPACPI_LIFE_RUNNING) + return; + + printk(TPACPI_NOTICE "fan watchdog: enabling fan\n"); + rc = fan_set_enable(); + if (rc < 0) { + printk(TPACPI_ERR "fan watchdog: error %d while enabling fan, " + "will try again later...\n", -rc); + /* reschedule for later */ + fan_watchdog_reset(); + } +} + +/* + * SYSFS fan layout: hwmon compatible (device) + * + * pwm*_enable: + * 0: "disengaged" mode + * 1: manual mode + * 2: native EC "auto" mode (recommended, hardware default) + * + * pwm*: set speed in manual mode, ignored otherwise. + * 0 is level 0; 255 is level 7. Intermediate points done with linear + * interpolation. + * + * fan*_input: tachometer reading, RPM + * + * + * SYSFS fan layout: extensions + * + * fan_watchdog (driver): + * fan watchdog interval in seconds, 0 disables (default), max 120 + */ + +/* sysfs fan pwm1_enable ----------------------------------------------- */ +static ssize_t fan_pwm1_enable_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + int res, mode; + u8 status; + + res = fan_get_status_safe(&status); + if (res) + return res; + + if (unlikely(tp_features.fan_ctrl_status_undef)) { + if (status != fan_control_initial_status) { + tp_features.fan_ctrl_status_undef = 0; + } else { + /* Return most likely status. In fact, it + * might be the only possible status */ + status = TP_EC_FAN_AUTO; + } + } + + if (status & TP_EC_FAN_FULLSPEED) { + mode = 0; + } else if (status & TP_EC_FAN_AUTO) { + mode = 2; + } else + mode = 1; + + return snprintf(buf, PAGE_SIZE, "%d\n", mode); +} + +static ssize_t fan_pwm1_enable_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + unsigned long t; + int res, level; + + if (parse_strtoul(buf, 2, &t)) + return -EINVAL; + + switch (t) { + case 0: + level = TP_EC_FAN_FULLSPEED; + break; + case 1: + level = TPACPI_FAN_LAST_LEVEL; + break; + case 2: + level = TP_EC_FAN_AUTO; + break; + case 3: + /* reserved for software-controlled auto mode */ + return -ENOSYS; + default: + return -EINVAL; + } + + res = fan_set_level_safe(level); + if (res == -ENXIO) + return -EINVAL; + else if (res < 0) + return res; + + fan_watchdog_reset(); + + return count; +} + +static struct device_attribute dev_attr_fan_pwm1_enable = + __ATTR(pwm1_enable, S_IWUSR | S_IRUGO, + fan_pwm1_enable_show, fan_pwm1_enable_store); + +/* sysfs fan pwm1 ------------------------------------------------------ */ +static ssize_t fan_pwm1_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + int res; + u8 status; + + res = fan_get_status_safe(&status); + if (res) + return res; + + if (unlikely(tp_features.fan_ctrl_status_undef)) { + if (status != fan_control_initial_status) { + tp_features.fan_ctrl_status_undef = 0; + } else { + status = TP_EC_FAN_AUTO; + } + } + + if ((status & + (TP_EC_FAN_AUTO | TP_EC_FAN_FULLSPEED)) != 0) + status = fan_control_desired_level; + + if (status > 7) + status = 7; + + return snprintf(buf, PAGE_SIZE, "%u\n", (status * 255) / 7); +} + +static ssize_t fan_pwm1_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + unsigned long s; + int rc; + u8 status, newlevel; + + if (parse_strtoul(buf, 255, &s)) + return -EINVAL; + + /* scale down from 0-255 to 0-7 */ + newlevel = (s >> 5) & 0x07; + + if (mutex_lock_interruptible(&fan_mutex)) + return -ERESTARTSYS; + + rc = fan_get_status(&status); + if (!rc && (status & + (TP_EC_FAN_AUTO | TP_EC_FAN_FULLSPEED)) == 0) { + rc = fan_set_level(newlevel); + if (rc == -ENXIO) + rc = -EINVAL; + else if (!rc) { + fan_update_desired_level(newlevel); + fan_watchdog_reset(); + } + } + + mutex_unlock(&fan_mutex); + return (rc)? rc : count; +} + +static struct device_attribute dev_attr_fan_pwm1 = + __ATTR(pwm1, S_IWUSR | S_IRUGO, + fan_pwm1_show, fan_pwm1_store); + +/* sysfs fan fan1_input ------------------------------------------------ */ +static ssize_t fan_fan1_input_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + int res; + unsigned int speed; + + res = fan_get_speed(&speed); + if (res < 0) + return res; + + return snprintf(buf, PAGE_SIZE, "%u\n", speed); +} + +static struct device_attribute dev_attr_fan_fan1_input = + __ATTR(fan1_input, S_IRUGO, + fan_fan1_input_show, NULL); + +/* sysfs fan fan_watchdog (hwmon driver) ------------------------------- */ +static ssize_t fan_fan_watchdog_show(struct device_driver *drv, + char *buf) +{ + return snprintf(buf, PAGE_SIZE, "%u\n", fan_watchdog_maxinterval); +} + +static ssize_t fan_fan_watchdog_store(struct device_driver *drv, + const char *buf, size_t count) +{ + unsigned long t; + + if (parse_strtoul(buf, 120, &t)) + return -EINVAL; + + if (!fan_control_allowed) + return -EPERM; + + fan_watchdog_maxinterval = t; + fan_watchdog_reset(); + + return count; +} + +static DRIVER_ATTR(fan_watchdog, S_IWUSR | S_IRUGO, + fan_fan_watchdog_show, fan_fan_watchdog_store); + +/* --------------------------------------------------------------------- */ +static struct attribute *fan_attributes[] = { + &dev_attr_fan_pwm1_enable.attr, &dev_attr_fan_pwm1.attr, + &dev_attr_fan_fan1_input.attr, + NULL +}; + +static const struct attribute_group fan_attr_group = { + .attrs = fan_attributes, +}; + +static int __init fan_init(struct ibm_init_struct *iibm) +{ + int rc; + + vdbg_printk(TPACPI_DBG_INIT, "initializing fan subdriver\n"); + + mutex_init(&fan_mutex); + fan_status_access_mode = TPACPI_FAN_NONE; + fan_control_access_mode = TPACPI_FAN_WR_NONE; + fan_control_commands = 0; + fan_watchdog_maxinterval = 0; + tp_features.fan_ctrl_status_undef = 0; + fan_control_desired_level = 7; + + TPACPI_ACPIHANDLE_INIT(fans); + TPACPI_ACPIHANDLE_INIT(gfan); + TPACPI_ACPIHANDLE_INIT(sfan); + + if (gfan_handle) { + /* 570, 600e/x, 770e, 770x */ + fan_status_access_mode = TPACPI_FAN_RD_ACPI_GFAN; + } else { + /* all other ThinkPads: note that even old-style + * ThinkPad ECs supports the fan control register */ + if (likely(acpi_ec_read(fan_status_offset, + &fan_control_initial_status))) { + fan_status_access_mode = TPACPI_FAN_RD_TPEC; + + /* In some ThinkPads, neither the EC nor the ACPI + * DSDT initialize the fan status, and it ends up + * being set to 0x07 when it *could* be either + * 0x07 or 0x80. + * + * Enable for TP-1Y (T43), TP-78 (R51e), + * TP-76 (R52), TP-70 (T43, R52), which are known + * to be buggy. */ + if (fan_control_initial_status == 0x07) { + switch (thinkpad_id.ec_model) { + case 0x5931: /* TP-1Y */ + case 0x3837: /* TP-78 */ + case 0x3637: /* TP-76 */ + case 0x3037: /* TP-70 */ + printk(TPACPI_NOTICE + "fan_init: initial fan status " + "is unknown, assuming it is " + "in auto mode\n"); + tp_features.fan_ctrl_status_undef = 1; + ;; + } + } + } else { + printk(TPACPI_ERR + "ThinkPad ACPI EC access misbehaving, " + "fan status and control unavailable\n"); + return 1; + } + } + + if (sfan_handle) { + /* 570, 770x-JL */ + fan_control_access_mode = TPACPI_FAN_WR_ACPI_SFAN; + fan_control_commands |= + TPACPI_FAN_CMD_LEVEL | TPACPI_FAN_CMD_ENABLE; + } else { + if (!gfan_handle) { + /* gfan without sfan means no fan control */ + /* all other models implement TP EC 0x2f control */ + + if (fans_handle) { + /* X31, X40, X41 */ + fan_control_access_mode = + TPACPI_FAN_WR_ACPI_FANS; + fan_control_commands |= + TPACPI_FAN_CMD_SPEED | + TPACPI_FAN_CMD_LEVEL | + TPACPI_FAN_CMD_ENABLE; + } else { + fan_control_access_mode = TPACPI_FAN_WR_TPEC; + fan_control_commands |= + TPACPI_FAN_CMD_LEVEL | + TPACPI_FAN_CMD_ENABLE; + } + } + } + + vdbg_printk(TPACPI_DBG_INIT, "fan is %s, modes %d, %d\n", + str_supported(fan_status_access_mode != TPACPI_FAN_NONE || + fan_control_access_mode != TPACPI_FAN_WR_NONE), + fan_status_access_mode, fan_control_access_mode); + + /* fan control master switch */ + if (!fan_control_allowed) { + fan_control_access_mode = TPACPI_FAN_WR_NONE; + fan_control_commands = 0; + dbg_printk(TPACPI_DBG_INIT, + "fan control features disabled by parameter\n"); + } + + /* update fan_control_desired_level */ + if (fan_status_access_mode != TPACPI_FAN_NONE) + fan_get_status_safe(NULL); + + if (fan_status_access_mode != TPACPI_FAN_NONE || + fan_control_access_mode != TPACPI_FAN_WR_NONE) { + rc = sysfs_create_group(&tpacpi_sensors_pdev->dev.kobj, + &fan_attr_group); + if (rc < 0) + return rc; + + rc = driver_create_file(&tpacpi_hwmon_pdriver.driver, + &driver_attr_fan_watchdog); + if (rc < 0) { + sysfs_remove_group(&tpacpi_sensors_pdev->dev.kobj, + &fan_attr_group); + return rc; + } + return 0; + } else + return 1; +} + +static void fan_exit(void) +{ + vdbg_printk(TPACPI_DBG_EXIT, + "cancelling any pending fan watchdog tasks\n"); + + /* FIXME: can we really do this unconditionally? */ + sysfs_remove_group(&tpacpi_sensors_pdev->dev.kobj, &fan_attr_group); + driver_remove_file(&tpacpi_hwmon_pdriver.driver, + &driver_attr_fan_watchdog); + + cancel_delayed_work(&fan_watchdog_task); + flush_workqueue(tpacpi_wq); +} + +static void fan_suspend(pm_message_t state) +{ + int rc; + + if (!fan_control_allowed) + return; + + /* Store fan status in cache */ + fan_control_resume_level = 0; + rc = fan_get_status_safe(&fan_control_resume_level); + if (rc < 0) + printk(TPACPI_NOTICE + "failed to read fan level for later " + "restore during resume: %d\n", rc); + + /* if it is undefined, don't attempt to restore it. + * KEEP THIS LAST */ + if (tp_features.fan_ctrl_status_undef) + fan_control_resume_level = 0; +} + +static void fan_resume(void) +{ + u8 current_level = 7; + bool do_set = false; + int rc; + + /* DSDT *always* updates status on resume */ + tp_features.fan_ctrl_status_undef = 0; + + if (!fan_control_allowed || + !fan_control_resume_level || + (fan_get_status_safe(¤t_level) < 0)) + return; + + switch (fan_control_access_mode) { + case TPACPI_FAN_WR_ACPI_SFAN: + /* never decrease fan level */ + do_set = (fan_control_resume_level > current_level); + break; + case TPACPI_FAN_WR_ACPI_FANS: + case TPACPI_FAN_WR_TPEC: + /* never decrease fan level, scale is: + * TP_EC_FAN_FULLSPEED > 7 >= TP_EC_FAN_AUTO + * + * We expect the firmware to set either 7 or AUTO, but we + * handle FULLSPEED out of paranoia. + * + * So, we can safely only restore FULLSPEED or 7, anything + * else could slow the fan. Restoring AUTO is useless, at + * best that's exactly what the DSDT already set (it is the + * slower it uses). + * + * Always keep in mind that the DSDT *will* have set the + * fans to what the vendor supposes is the best level. We + * muck with it only to speed the fan up. + */ + if (fan_control_resume_level != 7 && + !(fan_control_resume_level & TP_EC_FAN_FULLSPEED)) + return; + else + do_set = !(current_level & TP_EC_FAN_FULLSPEED) && + (current_level != fan_control_resume_level); + break; + default: + return; + } + if (do_set) { + printk(TPACPI_NOTICE + "restoring fan level to 0x%02x\n", + fan_control_resume_level); + rc = fan_set_level_safe(fan_control_resume_level); + if (rc < 0) + printk(TPACPI_NOTICE + "failed to restore fan level: %d\n", rc); + } +} + +static int fan_read(char *p) +{ + int len = 0; + int rc; + u8 status; + unsigned int speed = 0; + + switch (fan_status_access_mode) { + case TPACPI_FAN_RD_ACPI_GFAN: + /* 570, 600e/x, 770e, 770x */ + rc = fan_get_status_safe(&status); + if (rc < 0) + return rc; + + len += sprintf(p + len, "status:\t\t%s\n" + "level:\t\t%d\n", + (status != 0) ? "enabled" : "disabled", status); + break; + + case TPACPI_FAN_RD_TPEC: + /* all except 570, 600e/x, 770e, 770x */ + rc = fan_get_status_safe(&status); + if (rc < 0) + return rc; + + if (unlikely(tp_features.fan_ctrl_status_undef)) { + if (status != fan_control_initial_status) + tp_features.fan_ctrl_status_undef = 0; + else + /* Return most likely status. In fact, it + * might be the only possible status */ + status = TP_EC_FAN_AUTO; + } + + len += sprintf(p + len, "status:\t\t%s\n", + (status != 0) ? "enabled" : "disabled"); + + rc = fan_get_speed(&speed); + if (rc < 0) + return rc; + + len += sprintf(p + len, "speed:\t\t%d\n", speed); + + if (status & TP_EC_FAN_FULLSPEED) + /* Disengaged mode takes precedence */ + len += sprintf(p + len, "level:\t\tdisengaged\n"); + else if (status & TP_EC_FAN_AUTO) + len += sprintf(p + len, "level:\t\tauto\n"); + else + len += sprintf(p + len, "level:\t\t%d\n", status); + break; + + case TPACPI_FAN_NONE: + default: + len += sprintf(p + len, "status:\t\tnot supported\n"); + } + + if (fan_control_commands & TPACPI_FAN_CMD_LEVEL) { + len += sprintf(p + len, "commands:\tlevel <level>"); + + switch (fan_control_access_mode) { + case TPACPI_FAN_WR_ACPI_SFAN: + len += sprintf(p + len, " (<level> is 0-7)\n"); + break; + + default: + len += sprintf(p + len, " (<level> is 0-7, " + "auto, disengaged, full-speed)\n"); + break; + } + } + + if (fan_control_commands & TPACPI_FAN_CMD_ENABLE) + len += sprintf(p + len, "commands:\tenable, disable\n" + "commands:\twatchdog <timeout> (<timeout> " + "is 0 (off), 1-120 (seconds))\n"); + + if (fan_control_commands & TPACPI_FAN_CMD_SPEED) + len += sprintf(p + len, "commands:\tspeed <speed>" + " (<speed> is 0-65535)\n"); + + return len; +} + +static int fan_write_cmd_level(const char *cmd, int *rc) +{ + int level; + + if (strlencmp(cmd, "level auto") == 0) + level = TP_EC_FAN_AUTO; + else if ((strlencmp(cmd, "level disengaged") == 0) | + (strlencmp(cmd, "level full-speed") == 0)) + level = TP_EC_FAN_FULLSPEED; + else if (sscanf(cmd, "level %d", &level) != 1) + return 0; + + *rc = fan_set_level_safe(level); + if (*rc == -ENXIO) + printk(TPACPI_ERR "level command accepted for unsupported " + "access mode %d", fan_control_access_mode); + + return 1; +} + +static int fan_write_cmd_enable(const char *cmd, int *rc) +{ + if (strlencmp(cmd, "enable") != 0) + return 0; + + *rc = fan_set_enable(); + if (*rc == -ENXIO) + printk(TPACPI_ERR "enable command accepted for unsupported " + "access mode %d", fan_control_access_mode); + + return 1; +} + +static int fan_write_cmd_disable(const char *cmd, int *rc) +{ + if (strlencmp(cmd, "disable") != 0) + return 0; + + *rc = fan_set_disable(); + if (*rc == -ENXIO) + printk(TPACPI_ERR "disable command accepted for unsupported " + "access mode %d", fan_control_access_mode); + + return 1; +} + +static int fan_write_cmd_speed(const char *cmd, int *rc) +{ + int speed; + + /* TODO: + * Support speed <low> <medium> <high> ? */ + + if (sscanf(cmd, "speed %d", &speed) != 1) + return 0; + + *rc = fan_set_speed(speed); + if (*rc == -ENXIO) + printk(TPACPI_ERR "speed command accepted for unsupported " + "access mode %d", fan_control_access_mode); + + return 1; +} + +static int fan_write_cmd_watchdog(const char *cmd, int *rc) +{ + int interval; + + if (sscanf(cmd, "watchdog %d", &interval) != 1) + return 0; + + if (interval < 0 || interval > 120) + *rc = -EINVAL; + else + fan_watchdog_maxinterval = interval; + + return 1; +} + +static int fan_write(char *buf) +{ + char *cmd; + int rc = 0; + + while (!rc && (cmd = next_cmd(&buf))) { + if (!((fan_control_commands & TPACPI_FAN_CMD_LEVEL) && + fan_write_cmd_level(cmd, &rc)) && + !((fan_control_commands & TPACPI_FAN_CMD_ENABLE) && + (fan_write_cmd_enable(cmd, &rc) || + fan_write_cmd_disable(cmd, &rc) || + fan_write_cmd_watchdog(cmd, &rc))) && + !((fan_control_commands & TPACPI_FAN_CMD_SPEED) && + fan_write_cmd_speed(cmd, &rc)) + ) + rc = -EINVAL; + else if (!rc) + fan_watchdog_reset(); + } + + return rc; +} + +static struct ibm_struct fan_driver_data = { + .name = "fan", + .read = fan_read, + .write = fan_write, + .exit = fan_exit, + .suspend = fan_suspend, + .resume = fan_resume, +}; + +/**************************************************************************** + **************************************************************************** + * + * Infrastructure + * + **************************************************************************** + ****************************************************************************/ + +/* sysfs name ---------------------------------------------------------- */ +static ssize_t thinkpad_acpi_pdev_name_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + return snprintf(buf, PAGE_SIZE, "%s\n", TPACPI_NAME); +} + +static struct device_attribute dev_attr_thinkpad_acpi_pdev_name = + __ATTR(name, S_IRUGO, thinkpad_acpi_pdev_name_show, NULL); + +/* --------------------------------------------------------------------- */ + +/* /proc support */ +static struct proc_dir_entry *proc_dir; + +/* + * Module and infrastructure proble, init and exit handling + */ + +static int force_load; + +#ifdef CONFIG_THINKPAD_ACPI_DEBUG +static const char * __init str_supported(int is_supported) +{ + static char text_unsupported[] __initdata = "not supported"; + + return (is_supported)? &text_unsupported[4] : &text_unsupported[0]; +} +#endif /* CONFIG_THINKPAD_ACPI_DEBUG */ + +static void ibm_exit(struct ibm_struct *ibm) +{ + dbg_printk(TPACPI_DBG_EXIT, "removing %s\n", ibm->name); + + list_del_init(&ibm->all_drivers); + + if (ibm->flags.acpi_notify_installed) { + dbg_printk(TPACPI_DBG_EXIT, + "%s: acpi_remove_notify_handler\n", ibm->name); + BUG_ON(!ibm->acpi); + acpi_remove_notify_handler(*ibm->acpi->handle, + ibm->acpi->type, + dispatch_acpi_notify); + ibm->flags.acpi_notify_installed = 0; + ibm->flags.acpi_notify_installed = 0; + } + + if (ibm->flags.proc_created) { + dbg_printk(TPACPI_DBG_EXIT, + "%s: remove_proc_entry\n", ibm->name); + remove_proc_entry(ibm->name, proc_dir); + ibm->flags.proc_created = 0; + } + + if (ibm->flags.acpi_driver_registered) { + dbg_printk(TPACPI_DBG_EXIT, + "%s: acpi_bus_unregister_driver\n", ibm->name); + BUG_ON(!ibm->acpi); + acpi_bus_unregister_driver(ibm->acpi->driver); + kfree(ibm->acpi->driver); + ibm->acpi->driver = NULL; + ibm->flags.acpi_driver_registered = 0; + } + + if (ibm->flags.init_called && ibm->exit) { + ibm->exit(); + ibm->flags.init_called = 0; + } + + dbg_printk(TPACPI_DBG_INIT, "finished removing %s\n", ibm->name); +} + +static int __init ibm_init(struct ibm_init_struct *iibm) +{ + int ret; + struct ibm_struct *ibm = iibm->data; + struct proc_dir_entry *entry; + + BUG_ON(ibm == NULL); + + INIT_LIST_HEAD(&ibm->all_drivers); + + if (ibm->flags.experimental && !experimental) + return 0; + + dbg_printk(TPACPI_DBG_INIT, + "probing for %s\n", ibm->name); + + if (iibm->init) { + ret = iibm->init(iibm); + if (ret > 0) + return 0; /* probe failed */ + if (ret) + return ret; + + ibm->flags.init_called = 1; + } + + if (ibm->acpi) { + if (ibm->acpi->hid) { + ret = register_tpacpi_subdriver(ibm); + if (ret) + goto err_out; + } + + if (ibm->acpi->notify) { + ret = setup_acpi_notify(ibm); + if (ret == -ENODEV) { + printk(TPACPI_NOTICE "disabling subdriver %s\n", + ibm->name); + ret = 0; + goto err_out; + } + if (ret < 0) + goto err_out; + } + } + + dbg_printk(TPACPI_DBG_INIT, + "%s installed\n", ibm->name); + + if (ibm->read) { + entry = create_proc_entry(ibm->name, + S_IFREG | S_IRUGO | S_IWUSR, + proc_dir); + if (!entry) { + printk(TPACPI_ERR "unable to create proc entry %s\n", + ibm->name); + ret = -ENODEV; + goto err_out; + } + entry->owner = THIS_MODULE; + entry->data = ibm; + entry->read_proc = &dispatch_procfs_read; + if (ibm->write) + entry->write_proc = &dispatch_procfs_write; + ibm->flags.proc_created = 1; + } + + list_add_tail(&ibm->all_drivers, &tpacpi_all_drivers); + + return 0; + +err_out: + dbg_printk(TPACPI_DBG_INIT, + "%s: at error exit path with result %d\n", + ibm->name, ret); + + ibm_exit(ibm); + return (ret < 0)? ret : 0; +} + +/* Probing */ + +/* returns 0 - probe ok, or < 0 - probe error. + * Probe ok doesn't mean thinkpad found. + * On error, kfree() cleanup on tp->* is not performed, caller must do it */ +static int __must_check __init get_thinkpad_model_data( + struct thinkpad_id_data *tp) +{ + const struct dmi_device *dev = NULL; + char ec_fw_string[18]; + char const *s; + + if (!tp) + return -EINVAL; + + memset(tp, 0, sizeof(*tp)); + + if (dmi_name_in_vendors("IBM")) + tp->vendor = PCI_VENDOR_ID_IBM; + else if (dmi_name_in_vendors("LENOVO")) + tp->vendor = PCI_VENDOR_ID_LENOVO; + else + return 0; + + s = dmi_get_system_info(DMI_BIOS_VERSION); + tp->bios_version_str = kstrdup(s, GFP_KERNEL); + if (s && !tp->bios_version_str) + return -ENOMEM; + if (!tp->bios_version_str) + return 0; + tp->bios_model = tp->bios_version_str[0] + | (tp->bios_version_str[1] << 8); + + /* + * ThinkPad T23 or newer, A31 or newer, R50e or newer, + * X32 or newer, all Z series; Some models must have an + * up-to-date BIOS or they will not be detected. + * + * See http://thinkwiki.org/wiki/List_of_DMI_IDs + */ + while ((dev = dmi_find_device(DMI_DEV_TYPE_OEM_STRING, NULL, dev))) { + if (sscanf(dev->name, + "IBM ThinkPad Embedded Controller -[%17c", + ec_fw_string) == 1) { + ec_fw_string[sizeof(ec_fw_string) - 1] = 0; + ec_fw_string[strcspn(ec_fw_string, " ]")] = 0; + + tp->ec_version_str = kstrdup(ec_fw_string, GFP_KERNEL); + if (!tp->ec_version_str) + return -ENOMEM; + tp->ec_model = ec_fw_string[0] + | (ec_fw_string[1] << 8); + break; + } + } + + s = dmi_get_system_info(DMI_PRODUCT_VERSION); + if (s && !strnicmp(s, "ThinkPad", 8)) { + tp->model_str = kstrdup(s, GFP_KERNEL); + if (!tp->model_str) + return -ENOMEM; + } + + s = dmi_get_system_info(DMI_PRODUCT_NAME); + tp->nummodel_str = kstrdup(s, GFP_KERNEL); + if (s && !tp->nummodel_str) + return -ENOMEM; + + return 0; +} + +static int __init probe_for_thinkpad(void) +{ + int is_thinkpad; + + if (acpi_disabled) + return -ENODEV; + + /* + * Non-ancient models have better DMI tagging, but very old models + * don't. + */ + is_thinkpad = (thinkpad_id.model_str != NULL); + + /* ec is required because many other handles are relative to it */ + TPACPI_ACPIHANDLE_INIT(ec); + if (!ec_handle) { + if (is_thinkpad) + printk(TPACPI_ERR + "Not yet supported ThinkPad detected!\n"); + return -ENODEV; + } + + /* + * Risks a regression on very old machines, but reduces potential + * false positives a damn great deal + */ + if (!is_thinkpad) + is_thinkpad = (thinkpad_id.vendor == PCI_VENDOR_ID_IBM); + + if (!is_thinkpad && !force_load) + return -ENODEV; + + return 0; +} + + +/* Module init, exit, parameters */ + +static struct ibm_init_struct ibms_init[] __initdata = { + { + .init = thinkpad_acpi_driver_init, + .data = &thinkpad_acpi_driver_data, + }, + { + .init = hotkey_init, + .data = &hotkey_driver_data, + }, + { + .init = bluetooth_init, + .data = &bluetooth_driver_data, + }, + { + .init = wan_init, + .data = &wan_driver_data, + }, +#ifdef CONFIG_THINKPAD_ACPI_VIDEO + { + .init = video_init, + .data = &video_driver_data, + }, +#endif + { + .init = light_init, + .data = &light_driver_data, + }, +#ifdef CONFIG_THINKPAD_ACPI_DOCK + { + .init = dock_init, + .data = &dock_driver_data[0], + }, + { + .init = dock_init2, + .data = &dock_driver_data[1], + }, +#endif +#ifdef CONFIG_THINKPAD_ACPI_BAY + { + .init = bay_init, + .data = &bay_driver_data, + }, +#endif + { + .init = cmos_init, + .data = &cmos_driver_data, + }, + { + .init = led_init, + .data = &led_driver_data, + }, + { + .init = beep_init, + .data = &beep_driver_data, + }, + { + .init = thermal_init, + .data = &thermal_driver_data, + }, + { + .data = &ecdump_driver_data, + }, + { + .init = brightness_init, + .data = &brightness_driver_data, + }, + { + .data = &volume_driver_data, + }, + { + .init = fan_init, + .data = &fan_driver_data, + }, +}; + +static int __init set_ibm_param(const char *val, struct kernel_param *kp) +{ + unsigned int i; + struct ibm_struct *ibm; + + if (!kp || !kp->name || !val) + return -EINVAL; + + for (i = 0; i < ARRAY_SIZE(ibms_init); i++) { + ibm = ibms_init[i].data; + WARN_ON(ibm == NULL); + + if (!ibm || !ibm->name) + continue; + + if (strcmp(ibm->name, kp->name) == 0 && ibm->write) { + if (strlen(val) > sizeof(ibms_init[i].param) - 2) + return -ENOSPC; + strcpy(ibms_init[i].param, val); + strcat(ibms_init[i].param, ","); + return 0; + } + } + + return -EINVAL; +} + +module_param(experimental, int, 0); +MODULE_PARM_DESC(experimental, + "Enables experimental features when non-zero"); + +module_param_named(debug, dbg_level, uint, 0); +MODULE_PARM_DESC(debug, "Sets debug level bit-mask"); + +module_param(force_load, bool, 0); +MODULE_PARM_DESC(force_load, + "Attempts to load the driver even on a " + "mis-identified ThinkPad when true"); + +module_param_named(fan_control, fan_control_allowed, bool, 0); +MODULE_PARM_DESC(fan_control, + "Enables setting fan parameters features when true"); + +module_param_named(brightness_mode, brightness_mode, int, 0); +MODULE_PARM_DESC(brightness_mode, + "Selects brightness control strategy: " + "0=auto, 1=EC, 2=CMOS, 3=both"); + +module_param(brightness_enable, uint, 0); +MODULE_PARM_DESC(brightness_enable, + "Enables backlight control when 1, disables when 0"); + +module_param(hotkey_report_mode, uint, 0); +MODULE_PARM_DESC(hotkey_report_mode, + "used for backwards compatibility with userspace, " + "see documentation"); + +#define TPACPI_PARAM(feature) \ + module_param_call(feature, set_ibm_param, NULL, NULL, 0); \ + MODULE_PARM_DESC(feature, "Simulates thinkpad-acpi procfs command " \ + "at module load, see documentation") + +TPACPI_PARAM(hotkey); +TPACPI_PARAM(bluetooth); +TPACPI_PARAM(video); +TPACPI_PARAM(light); +#ifdef CONFIG_THINKPAD_ACPI_DOCK +TPACPI_PARAM(dock); +#endif +#ifdef CONFIG_THINKPAD_ACPI_BAY +TPACPI_PARAM(bay); +#endif /* CONFIG_THINKPAD_ACPI_BAY */ +TPACPI_PARAM(cmos); +TPACPI_PARAM(led); +TPACPI_PARAM(beep); +TPACPI_PARAM(ecdump); +TPACPI_PARAM(brightness); +TPACPI_PARAM(volume); +TPACPI_PARAM(fan); + +static void thinkpad_acpi_module_exit(void) +{ + struct ibm_struct *ibm, *itmp; + + tpacpi_lifecycle = TPACPI_LIFE_EXITING; + + list_for_each_entry_safe_reverse(ibm, itmp, + &tpacpi_all_drivers, + all_drivers) { + ibm_exit(ibm); + } + + dbg_printk(TPACPI_DBG_INIT, "finished subdriver exit path...\n"); + + if (tpacpi_inputdev) { + if (tp_features.input_device_registered) + input_unregister_device(tpacpi_inputdev); + else + input_free_device(tpacpi_inputdev); + } + + if (tpacpi_hwmon) + hwmon_device_unregister(tpacpi_hwmon); + + if (tp_features.sensors_pdev_attrs_registered) + device_remove_file(&tpacpi_sensors_pdev->dev, + &dev_attr_thinkpad_acpi_pdev_name); + if (tpacpi_sensors_pdev) + platform_device_unregister(tpacpi_sensors_pdev); + if (tpacpi_pdev) + platform_device_unregister(tpacpi_pdev); + + if (tp_features.sensors_pdrv_attrs_registered) + tpacpi_remove_driver_attributes(&tpacpi_hwmon_pdriver.driver); + if (tp_features.platform_drv_attrs_registered) + tpacpi_remove_driver_attributes(&tpacpi_pdriver.driver); + + if (tp_features.sensors_pdrv_registered) + platform_driver_unregister(&tpacpi_hwmon_pdriver); + + if (tp_features.platform_drv_registered) + platform_driver_unregister(&tpacpi_pdriver); + + if (proc_dir) + remove_proc_entry(TPACPI_PROC_DIR, acpi_root_dir); + + if (tpacpi_wq) + destroy_workqueue(tpacpi_wq); + + kfree(thinkpad_id.bios_version_str); + kfree(thinkpad_id.ec_version_str); + kfree(thinkpad_id.model_str); +} + + +static int __init thinkpad_acpi_module_init(void) +{ + int ret, i; + + tpacpi_lifecycle = TPACPI_LIFE_INIT; + + /* Parameter checking */ + if (hotkey_report_mode > 2) + return -EINVAL; + + /* Driver-level probe */ + + ret = get_thinkpad_model_data(&thinkpad_id); + if (ret) { + printk(TPACPI_ERR + "unable to get DMI data: %d\n", ret); + thinkpad_acpi_module_exit(); + return ret; + } + ret = probe_for_thinkpad(); + if (ret) { + thinkpad_acpi_module_exit(); + return ret; + } + + /* Driver initialization */ + + TPACPI_ACPIHANDLE_INIT(ecrd); + TPACPI_ACPIHANDLE_INIT(ecwr); + + tpacpi_wq = create_singlethread_workqueue(TPACPI_WORKQUEUE_NAME); + if (!tpacpi_wq) { + thinkpad_acpi_module_exit(); + return -ENOMEM; + } + + proc_dir = proc_mkdir(TPACPI_PROC_DIR, acpi_root_dir); + if (!proc_dir) { + printk(TPACPI_ERR + "unable to create proc dir " TPACPI_PROC_DIR); + thinkpad_acpi_module_exit(); + return -ENODEV; + } + proc_dir->owner = THIS_MODULE; + + ret = platform_driver_register(&tpacpi_pdriver); + if (ret) { + printk(TPACPI_ERR + "unable to register main platform driver\n"); + thinkpad_acpi_module_exit(); + return ret; + } + tp_features.platform_drv_registered = 1; + + ret = platform_driver_register(&tpacpi_hwmon_pdriver); + if (ret) { + printk(TPACPI_ERR + "unable to register hwmon platform driver\n"); + thinkpad_acpi_module_exit(); + return ret; + } + tp_features.sensors_pdrv_registered = 1; + + ret = tpacpi_create_driver_attributes(&tpacpi_pdriver.driver); + if (!ret) { + tp_features.platform_drv_attrs_registered = 1; + ret = tpacpi_create_driver_attributes( + &tpacpi_hwmon_pdriver.driver); + } + if (ret) { + printk(TPACPI_ERR + "unable to create sysfs driver attributes\n"); + thinkpad_acpi_module_exit(); + return ret; + } + tp_features.sensors_pdrv_attrs_registered = 1; + + + /* Device initialization */ + tpacpi_pdev = platform_device_register_simple(TPACPI_DRVR_NAME, -1, + NULL, 0); + if (IS_ERR(tpacpi_pdev)) { + ret = PTR_ERR(tpacpi_pdev); + tpacpi_pdev = NULL; + printk(TPACPI_ERR "unable to register platform device\n"); + thinkpad_acpi_module_exit(); + return ret; + } + tpacpi_sensors_pdev = platform_device_register_simple( + TPACPI_HWMON_DRVR_NAME, + -1, NULL, 0); + if (IS_ERR(tpacpi_sensors_pdev)) { + ret = PTR_ERR(tpacpi_sensors_pdev); + tpacpi_sensors_pdev = NULL; + printk(TPACPI_ERR + "unable to register hwmon platform device\n"); + thinkpad_acpi_module_exit(); + return ret; + } + ret = device_create_file(&tpacpi_sensors_pdev->dev, + &dev_attr_thinkpad_acpi_pdev_name); + if (ret) { + printk(TPACPI_ERR + "unable to create sysfs hwmon device attributes\n"); + thinkpad_acpi_module_exit(); + return ret; + } + tp_features.sensors_pdev_attrs_registered = 1; + tpacpi_hwmon = hwmon_device_register(&tpacpi_sensors_pdev->dev); + if (IS_ERR(tpacpi_hwmon)) { + ret = PTR_ERR(tpacpi_hwmon); + tpacpi_hwmon = NULL; + printk(TPACPI_ERR "unable to register hwmon device\n"); + thinkpad_acpi_module_exit(); + return ret; + } + mutex_init(&tpacpi_inputdev_send_mutex); + tpacpi_inputdev = input_allocate_device(); + if (!tpacpi_inputdev) { + printk(TPACPI_ERR "unable to allocate input device\n"); + thinkpad_acpi_module_exit(); + return -ENOMEM; + } else { + /* Prepare input device, but don't register */ + tpacpi_inputdev->name = "ThinkPad Extra Buttons"; + tpacpi_inputdev->phys = TPACPI_DRVR_NAME "/input0"; + tpacpi_inputdev->id.bustype = BUS_HOST; + tpacpi_inputdev->id.vendor = (thinkpad_id.vendor) ? + thinkpad_id.vendor : + PCI_VENDOR_ID_IBM; + tpacpi_inputdev->id.product = TPACPI_HKEY_INPUT_PRODUCT; + tpacpi_inputdev->id.version = TPACPI_HKEY_INPUT_VERSION; + } + for (i = 0; i < ARRAY_SIZE(ibms_init); i++) { + ret = ibm_init(&ibms_init[i]); + if (ret >= 0 && *ibms_init[i].param) + ret = ibms_init[i].data->write(ibms_init[i].param); + if (ret < 0) { + thinkpad_acpi_module_exit(); + return ret; + } + } + ret = input_register_device(tpacpi_inputdev); + if (ret < 0) { + printk(TPACPI_ERR "unable to register input device\n"); + thinkpad_acpi_module_exit(); + return ret; + } else { + tp_features.input_device_registered = 1; + } + + tpacpi_lifecycle = TPACPI_LIFE_RUNNING; + return 0; +} + +/* Please remove this in year 2009 */ +MODULE_ALIAS("ibm_acpi"); + +MODULE_ALIAS(TPACPI_DRVR_SHORTNAME); + +/* + * DMI matching for module autoloading + * + * See http://thinkwiki.org/wiki/List_of_DMI_IDs + * See http://thinkwiki.org/wiki/BIOS_Upgrade_Downloads + * + * Only models listed in thinkwiki will be supported, so add yours + * if it is not there yet. + */ +#define IBM_BIOS_MODULE_ALIAS(__type) \ + MODULE_ALIAS("dmi:bvnIBM:bvr" __type "ET??WW*") + +/* Non-ancient thinkpads */ +MODULE_ALIAS("dmi:bvnIBM:*:svnIBM:*:pvrThinkPad*:rvnIBM:*"); +MODULE_ALIAS("dmi:bvnLENOVO:*:svnLENOVO:*:pvrThinkPad*:rvnLENOVO:*"); + +/* Ancient thinkpad BIOSes have to be identified by + * BIOS type or model number, and there are far less + * BIOS types than model numbers... */ +IBM_BIOS_MODULE_ALIAS("I[BDHIMNOTWVYZ]"); +IBM_BIOS_MODULE_ALIAS("1[0368A-GIKM-PST]"); +IBM_BIOS_MODULE_ALIAS("K[UX-Z]"); + +MODULE_AUTHOR("Borislav Deianov, Henrique de Moraes Holschuh"); +MODULE_DESCRIPTION(TPACPI_DESC); +MODULE_VERSION(TPACPI_VERSION); +MODULE_LICENSE("GPL"); + +module_init(thinkpad_acpi_module_init); +module_exit(thinkpad_acpi_module_exit); diff --git a/drivers/misc/tifm_7xx1.c b/drivers/misc/tifm_7xx1.c new file mode 100644 index 0000000..67503ea --- /dev/null +++ b/drivers/misc/tifm_7xx1.c @@ -0,0 +1,454 @@ +/* + * tifm_7xx1.c - TI FlashMedia driver + * + * Copyright (C) 2006 Alex Dubov <oakad@yahoo.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#include <linux/tifm.h> +#include <linux/dma-mapping.h> + +#define DRIVER_NAME "tifm_7xx1" +#define DRIVER_VERSION "0.8" + +#define TIFM_IRQ_ENABLE 0x80000000 +#define TIFM_IRQ_SOCKMASK(x) (x) +#define TIFM_IRQ_CARDMASK(x) ((x) << 8) +#define TIFM_IRQ_FIFOMASK(x) ((x) << 16) +#define TIFM_IRQ_SETALL 0xffffffff + +static void tifm_7xx1_dummy_eject(struct tifm_adapter *fm, + struct tifm_dev *sock) +{ +} + +static void tifm_7xx1_eject(struct tifm_adapter *fm, struct tifm_dev *sock) +{ + unsigned long flags; + + spin_lock_irqsave(&fm->lock, flags); + fm->socket_change_set |= 1 << sock->socket_id; + tifm_queue_work(&fm->media_switcher); + spin_unlock_irqrestore(&fm->lock, flags); +} + +static irqreturn_t tifm_7xx1_isr(int irq, void *dev_id) +{ + struct tifm_adapter *fm = dev_id; + struct tifm_dev *sock; + unsigned int irq_status, cnt; + + spin_lock(&fm->lock); + irq_status = readl(fm->addr + FM_INTERRUPT_STATUS); + if (irq_status == 0 || irq_status == (~0)) { + spin_unlock(&fm->lock); + return IRQ_NONE; + } + + if (irq_status & TIFM_IRQ_ENABLE) { + writel(TIFM_IRQ_ENABLE, fm->addr + FM_CLEAR_INTERRUPT_ENABLE); + + for (cnt = 0; cnt < fm->num_sockets; cnt++) { + sock = fm->sockets[cnt]; + if (sock) { + if ((irq_status >> cnt) & TIFM_IRQ_FIFOMASK(1)) + sock->data_event(sock); + if ((irq_status >> cnt) & TIFM_IRQ_CARDMASK(1)) + sock->card_event(sock); + } + } + + fm->socket_change_set |= irq_status + & ((1 << fm->num_sockets) - 1); + } + writel(irq_status, fm->addr + FM_INTERRUPT_STATUS); + + if (fm->finish_me) + complete_all(fm->finish_me); + else if (!fm->socket_change_set) + writel(TIFM_IRQ_ENABLE, fm->addr + FM_SET_INTERRUPT_ENABLE); + else + tifm_queue_work(&fm->media_switcher); + + spin_unlock(&fm->lock); + return IRQ_HANDLED; +} + +static unsigned char tifm_7xx1_toggle_sock_power(char __iomem *sock_addr) +{ + unsigned int s_state; + int cnt; + + writel(0x0e00, sock_addr + SOCK_CONTROL); + + for (cnt = 16; cnt <= 256; cnt <<= 1) { + if (!(TIFM_SOCK_STATE_POWERED + & readl(sock_addr + SOCK_PRESENT_STATE))) + break; + + msleep(cnt); + } + + s_state = readl(sock_addr + SOCK_PRESENT_STATE); + if (!(TIFM_SOCK_STATE_OCCUPIED & s_state)) + return 0; + + writel(readl(sock_addr + SOCK_CONTROL) | TIFM_CTRL_LED, + sock_addr + SOCK_CONTROL); + + /* xd needs some extra time before power on */ + if (((readl(sock_addr + SOCK_PRESENT_STATE) >> 4) & 7) + == TIFM_TYPE_XD) + msleep(40); + + writel((s_state & TIFM_CTRL_POWER_MASK) | 0x0c00, + sock_addr + SOCK_CONTROL); + /* wait for power to stabilize */ + msleep(20); + for (cnt = 16; cnt <= 256; cnt <<= 1) { + if ((TIFM_SOCK_STATE_POWERED + & readl(sock_addr + SOCK_PRESENT_STATE))) + break; + + msleep(cnt); + } + + writel(readl(sock_addr + SOCK_CONTROL) & (~TIFM_CTRL_LED), + sock_addr + SOCK_CONTROL); + + return (readl(sock_addr + SOCK_PRESENT_STATE) >> 4) & 7; +} + +inline static void tifm_7xx1_sock_power_off(char __iomem *sock_addr) +{ + writel((~TIFM_CTRL_POWER_MASK) & readl(sock_addr + SOCK_CONTROL), + sock_addr + SOCK_CONTROL); +} + +inline static char __iomem * +tifm_7xx1_sock_addr(char __iomem *base_addr, unsigned int sock_num) +{ + return base_addr + ((sock_num + 1) << 10); +} + +static void tifm_7xx1_switch_media(struct work_struct *work) +{ + struct tifm_adapter *fm = container_of(work, struct tifm_adapter, + media_switcher); + struct tifm_dev *sock; + char __iomem *sock_addr; + unsigned long flags; + unsigned char media_id; + unsigned int socket_change_set, cnt; + + spin_lock_irqsave(&fm->lock, flags); + socket_change_set = fm->socket_change_set; + fm->socket_change_set = 0; + + dev_dbg(fm->dev.parent, "checking media set %x\n", + socket_change_set); + + if (!socket_change_set) { + spin_unlock_irqrestore(&fm->lock, flags); + return; + } + + for (cnt = 0; cnt < fm->num_sockets; cnt++) { + if (!(socket_change_set & (1 << cnt))) + continue; + sock = fm->sockets[cnt]; + if (sock) { + printk(KERN_INFO + "%s : demand removing card from socket %u:%u\n", + fm->dev.bus_id, fm->id, cnt); + fm->sockets[cnt] = NULL; + sock_addr = sock->addr; + spin_unlock_irqrestore(&fm->lock, flags); + device_unregister(&sock->dev); + spin_lock_irqsave(&fm->lock, flags); + tifm_7xx1_sock_power_off(sock_addr); + writel(0x0e00, sock_addr + SOCK_CONTROL); + } + + spin_unlock_irqrestore(&fm->lock, flags); + + media_id = tifm_7xx1_toggle_sock_power( + tifm_7xx1_sock_addr(fm->addr, cnt)); + + // tifm_alloc_device will check if media_id is valid + sock = tifm_alloc_device(fm, cnt, media_id); + if (sock) { + sock->addr = tifm_7xx1_sock_addr(fm->addr, cnt); + + if (!device_register(&sock->dev)) { + spin_lock_irqsave(&fm->lock, flags); + if (!fm->sockets[cnt]) { + fm->sockets[cnt] = sock; + sock = NULL; + } + spin_unlock_irqrestore(&fm->lock, flags); + } + if (sock) + tifm_free_device(&sock->dev); + } + spin_lock_irqsave(&fm->lock, flags); + } + + writel(TIFM_IRQ_FIFOMASK(socket_change_set) + | TIFM_IRQ_CARDMASK(socket_change_set), + fm->addr + FM_CLEAR_INTERRUPT_ENABLE); + + writel(TIFM_IRQ_FIFOMASK(socket_change_set) + | TIFM_IRQ_CARDMASK(socket_change_set), + fm->addr + FM_SET_INTERRUPT_ENABLE); + + writel(TIFM_IRQ_ENABLE, fm->addr + FM_SET_INTERRUPT_ENABLE); + spin_unlock_irqrestore(&fm->lock, flags); +} + +#ifdef CONFIG_PM + +static int tifm_7xx1_suspend(struct pci_dev *dev, pm_message_t state) +{ + struct tifm_adapter *fm = pci_get_drvdata(dev); + int cnt; + + dev_dbg(&dev->dev, "suspending host\n"); + + for (cnt = 0; cnt < fm->num_sockets; cnt++) { + if (fm->sockets[cnt]) + tifm_7xx1_sock_power_off(fm->sockets[cnt]->addr); + } + + pci_save_state(dev); + pci_enable_wake(dev, pci_choose_state(dev, state), 0); + pci_disable_device(dev); + pci_set_power_state(dev, pci_choose_state(dev, state)); + return 0; +} + +static int tifm_7xx1_resume(struct pci_dev *dev) +{ + struct tifm_adapter *fm = pci_get_drvdata(dev); + int rc; + unsigned int good_sockets = 0, bad_sockets = 0; + unsigned long flags; + unsigned char new_ids[fm->num_sockets]; + DECLARE_COMPLETION_ONSTACK(finish_resume); + + pci_set_power_state(dev, PCI_D0); + pci_restore_state(dev); + rc = pci_enable_device(dev); + if (rc) + return rc; + pci_set_master(dev); + + dev_dbg(&dev->dev, "resuming host\n"); + + for (rc = 0; rc < fm->num_sockets; rc++) + new_ids[rc] = tifm_7xx1_toggle_sock_power( + tifm_7xx1_sock_addr(fm->addr, rc)); + spin_lock_irqsave(&fm->lock, flags); + for (rc = 0; rc < fm->num_sockets; rc++) { + if (fm->sockets[rc]) { + if (fm->sockets[rc]->type == new_ids[rc]) + good_sockets |= 1 << rc; + else + bad_sockets |= 1 << rc; + } + } + + writel(TIFM_IRQ_ENABLE | TIFM_IRQ_SOCKMASK((1 << fm->num_sockets) - 1), + fm->addr + FM_SET_INTERRUPT_ENABLE); + dev_dbg(&dev->dev, "change sets on resume: good %x, bad %x\n", + good_sockets, bad_sockets); + + fm->socket_change_set = 0; + if (good_sockets) { + fm->finish_me = &finish_resume; + spin_unlock_irqrestore(&fm->lock, flags); + rc = wait_for_completion_timeout(&finish_resume, HZ); + dev_dbg(&dev->dev, "wait returned %d\n", rc); + writel(TIFM_IRQ_FIFOMASK(good_sockets) + | TIFM_IRQ_CARDMASK(good_sockets), + fm->addr + FM_CLEAR_INTERRUPT_ENABLE); + writel(TIFM_IRQ_FIFOMASK(good_sockets) + | TIFM_IRQ_CARDMASK(good_sockets), + fm->addr + FM_SET_INTERRUPT_ENABLE); + spin_lock_irqsave(&fm->lock, flags); + fm->finish_me = NULL; + fm->socket_change_set ^= good_sockets & fm->socket_change_set; + } + + fm->socket_change_set |= bad_sockets; + if (fm->socket_change_set) + tifm_queue_work(&fm->media_switcher); + + spin_unlock_irqrestore(&fm->lock, flags); + writel(TIFM_IRQ_ENABLE, + fm->addr + FM_SET_INTERRUPT_ENABLE); + + return 0; +} + +#else + +#define tifm_7xx1_suspend NULL +#define tifm_7xx1_resume NULL + +#endif /* CONFIG_PM */ + +static int tifm_7xx1_dummy_has_ms_pif(struct tifm_adapter *fm, + struct tifm_dev *sock) +{ + return 0; +} + +static int tifm_7xx1_has_ms_pif(struct tifm_adapter *fm, struct tifm_dev *sock) +{ + if (((fm->num_sockets == 4) && (sock->socket_id == 2)) + || ((fm->num_sockets == 2) && (sock->socket_id == 0))) + return 1; + + return 0; +} + +static int tifm_7xx1_probe(struct pci_dev *dev, + const struct pci_device_id *dev_id) +{ + struct tifm_adapter *fm; + int pci_dev_busy = 0; + int rc; + + rc = pci_set_dma_mask(dev, DMA_32BIT_MASK); + if (rc) + return rc; + + rc = pci_enable_device(dev); + if (rc) + return rc; + + pci_set_master(dev); + + rc = pci_request_regions(dev, DRIVER_NAME); + if (rc) { + pci_dev_busy = 1; + goto err_out; + } + + pci_intx(dev, 1); + + fm = tifm_alloc_adapter(dev->device == PCI_DEVICE_ID_TI_XX21_XX11_FM + ? 4 : 2, &dev->dev); + if (!fm) { + rc = -ENOMEM; + goto err_out_int; + } + + INIT_WORK(&fm->media_switcher, tifm_7xx1_switch_media); + fm->eject = tifm_7xx1_eject; + fm->has_ms_pif = tifm_7xx1_has_ms_pif; + pci_set_drvdata(dev, fm); + + fm->addr = ioremap(pci_resource_start(dev, 0), + pci_resource_len(dev, 0)); + if (!fm->addr) + goto err_out_free; + + rc = request_irq(dev->irq, tifm_7xx1_isr, IRQF_SHARED, DRIVER_NAME, fm); + if (rc) + goto err_out_unmap; + + rc = tifm_add_adapter(fm); + if (rc) + goto err_out_irq; + + writel(TIFM_IRQ_ENABLE | TIFM_IRQ_SOCKMASK((1 << fm->num_sockets) - 1), + fm->addr + FM_CLEAR_INTERRUPT_ENABLE); + writel(TIFM_IRQ_ENABLE | TIFM_IRQ_SOCKMASK((1 << fm->num_sockets) - 1), + fm->addr + FM_SET_INTERRUPT_ENABLE); + return 0; + +err_out_irq: + free_irq(dev->irq, fm); +err_out_unmap: + iounmap(fm->addr); +err_out_free: + pci_set_drvdata(dev, NULL); + tifm_free_adapter(fm); +err_out_int: + pci_intx(dev, 0); + pci_release_regions(dev); +err_out: + if (!pci_dev_busy) + pci_disable_device(dev); + return rc; +} + +static void tifm_7xx1_remove(struct pci_dev *dev) +{ + struct tifm_adapter *fm = pci_get_drvdata(dev); + int cnt; + + fm->eject = tifm_7xx1_dummy_eject; + fm->has_ms_pif = tifm_7xx1_dummy_has_ms_pif; + writel(TIFM_IRQ_SETALL, fm->addr + FM_CLEAR_INTERRUPT_ENABLE); + mmiowb(); + free_irq(dev->irq, fm); + + tifm_remove_adapter(fm); + + for (cnt = 0; cnt < fm->num_sockets; cnt++) + tifm_7xx1_sock_power_off(tifm_7xx1_sock_addr(fm->addr, cnt)); + + pci_set_drvdata(dev, NULL); + + iounmap(fm->addr); + pci_intx(dev, 0); + pci_release_regions(dev); + + pci_disable_device(dev); + tifm_free_adapter(fm); +} + +static struct pci_device_id tifm_7xx1_pci_tbl [] = { + { PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_XX21_XX11_FM, PCI_ANY_ID, + PCI_ANY_ID, 0, 0, 0 }, /* xx21 - the one I have */ + { PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_XX12_FM, PCI_ANY_ID, + PCI_ANY_ID, 0, 0, 0 }, + { PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_XX20_FM, PCI_ANY_ID, + PCI_ANY_ID, 0, 0, 0 }, + { } +}; + +static struct pci_driver tifm_7xx1_driver = { + .name = DRIVER_NAME, + .id_table = tifm_7xx1_pci_tbl, + .probe = tifm_7xx1_probe, + .remove = tifm_7xx1_remove, + .suspend = tifm_7xx1_suspend, + .resume = tifm_7xx1_resume, +}; + +static int __init tifm_7xx1_init(void) +{ + return pci_register_driver(&tifm_7xx1_driver); +} + +static void __exit tifm_7xx1_exit(void) +{ + pci_unregister_driver(&tifm_7xx1_driver); +} + +MODULE_AUTHOR("Alex Dubov"); +MODULE_DESCRIPTION("TI FlashMedia host driver"); +MODULE_LICENSE("GPL"); +MODULE_DEVICE_TABLE(pci, tifm_7xx1_pci_tbl); +MODULE_VERSION(DRIVER_VERSION); + +module_init(tifm_7xx1_init); +module_exit(tifm_7xx1_exit); diff --git a/drivers/misc/tifm_core.c b/drivers/misc/tifm_core.c new file mode 100644 index 0000000..82dc72a --- /dev/null +++ b/drivers/misc/tifm_core.c @@ -0,0 +1,367 @@ +/* + * tifm_core.c - TI FlashMedia driver + * + * Copyright (C) 2006 Alex Dubov <oakad@yahoo.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#include <linux/tifm.h> +#include <linux/init.h> +#include <linux/idr.h> + +#define DRIVER_NAME "tifm_core" +#define DRIVER_VERSION "0.8" + +static struct workqueue_struct *workqueue; +static DEFINE_IDR(tifm_adapter_idr); +static DEFINE_SPINLOCK(tifm_adapter_lock); + +static const char *tifm_media_type_name(unsigned char type, unsigned char nt) +{ + const char *card_type_name[3][3] = { + { "SmartMedia/xD", "MemoryStick", "MMC/SD" }, + { "XD", "MS", "SD"}, + { "xd", "ms", "sd"} + }; + + if (nt > 2 || type < 1 || type > 3) + return NULL; + return card_type_name[nt][type - 1]; +} + +static int tifm_dev_match(struct tifm_dev *sock, struct tifm_device_id *id) +{ + if (sock->type == id->type) + return 1; + return 0; +} + +static int tifm_bus_match(struct device *dev, struct device_driver *drv) +{ + struct tifm_dev *sock = container_of(dev, struct tifm_dev, dev); + struct tifm_driver *fm_drv = container_of(drv, struct tifm_driver, + driver); + struct tifm_device_id *ids = fm_drv->id_table; + + if (ids) { + while (ids->type) { + if (tifm_dev_match(sock, ids)) + return 1; + ++ids; + } + } + return 0; +} + +static int tifm_uevent(struct device *dev, struct kobj_uevent_env *env) +{ + struct tifm_dev *sock = container_of(dev, struct tifm_dev, dev); + + if (add_uevent_var(env, "TIFM_CARD_TYPE=%s", tifm_media_type_name(sock->type, 1))) + return -ENOMEM; + + return 0; +} + +static int tifm_device_probe(struct device *dev) +{ + struct tifm_dev *sock = container_of(dev, struct tifm_dev, dev); + struct tifm_driver *drv = container_of(dev->driver, struct tifm_driver, + driver); + int rc = -ENODEV; + + get_device(dev); + if (dev->driver && drv->probe) { + rc = drv->probe(sock); + if (!rc) + return 0; + } + put_device(dev); + return rc; +} + +static void tifm_dummy_event(struct tifm_dev *sock) +{ + return; +} + +static int tifm_device_remove(struct device *dev) +{ + struct tifm_dev *sock = container_of(dev, struct tifm_dev, dev); + struct tifm_driver *drv = container_of(dev->driver, struct tifm_driver, + driver); + + if (dev->driver && drv->remove) { + sock->card_event = tifm_dummy_event; + sock->data_event = tifm_dummy_event; + drv->remove(sock); + sock->dev.driver = NULL; + } + + put_device(dev); + return 0; +} + +#ifdef CONFIG_PM + +static int tifm_device_suspend(struct device *dev, pm_message_t state) +{ + struct tifm_dev *sock = container_of(dev, struct tifm_dev, dev); + struct tifm_driver *drv = container_of(dev->driver, struct tifm_driver, + driver); + + if (dev->driver && drv->suspend) + return drv->suspend(sock, state); + return 0; +} + +static int tifm_device_resume(struct device *dev) +{ + struct tifm_dev *sock = container_of(dev, struct tifm_dev, dev); + struct tifm_driver *drv = container_of(dev->driver, struct tifm_driver, + driver); + + if (dev->driver && drv->resume) + return drv->resume(sock); + return 0; +} + +#else + +#define tifm_device_suspend NULL +#define tifm_device_resume NULL + +#endif /* CONFIG_PM */ + +static ssize_t type_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct tifm_dev *sock = container_of(dev, struct tifm_dev, dev); + return sprintf(buf, "%x", sock->type); +} + +static struct device_attribute tifm_dev_attrs[] = { + __ATTR(type, S_IRUGO, type_show, NULL), + __ATTR_NULL +}; + +static struct bus_type tifm_bus_type = { + .name = "tifm", + .dev_attrs = tifm_dev_attrs, + .match = tifm_bus_match, + .uevent = tifm_uevent, + .probe = tifm_device_probe, + .remove = tifm_device_remove, + .suspend = tifm_device_suspend, + .resume = tifm_device_resume +}; + +static void tifm_free(struct device *dev) +{ + struct tifm_adapter *fm = container_of(dev, struct tifm_adapter, dev); + + kfree(fm); +} + +static struct class tifm_adapter_class = { + .name = "tifm_adapter", + .dev_release = tifm_free +}; + +struct tifm_adapter *tifm_alloc_adapter(unsigned int num_sockets, + struct device *dev) +{ + struct tifm_adapter *fm; + + fm = kzalloc(sizeof(struct tifm_adapter) + + sizeof(struct tifm_dev*) * num_sockets, GFP_KERNEL); + if (fm) { + fm->dev.class = &tifm_adapter_class; + fm->dev.parent = dev; + device_initialize(&fm->dev); + spin_lock_init(&fm->lock); + fm->num_sockets = num_sockets; + } + return fm; +} +EXPORT_SYMBOL(tifm_alloc_adapter); + +int tifm_add_adapter(struct tifm_adapter *fm) +{ + int rc; + + if (!idr_pre_get(&tifm_adapter_idr, GFP_KERNEL)) + return -ENOMEM; + + spin_lock(&tifm_adapter_lock); + rc = idr_get_new(&tifm_adapter_idr, fm, &fm->id); + spin_unlock(&tifm_adapter_lock); + if (rc) + return rc; + + snprintf(fm->dev.bus_id, BUS_ID_SIZE, "tifm%u", fm->id); + rc = device_add(&fm->dev); + if (rc) { + spin_lock(&tifm_adapter_lock); + idr_remove(&tifm_adapter_idr, fm->id); + spin_unlock(&tifm_adapter_lock); + } + + return rc; +} +EXPORT_SYMBOL(tifm_add_adapter); + +void tifm_remove_adapter(struct tifm_adapter *fm) +{ + unsigned int cnt; + + flush_workqueue(workqueue); + for (cnt = 0; cnt < fm->num_sockets; ++cnt) { + if (fm->sockets[cnt]) + device_unregister(&fm->sockets[cnt]->dev); + } + + spin_lock(&tifm_adapter_lock); + idr_remove(&tifm_adapter_idr, fm->id); + spin_unlock(&tifm_adapter_lock); + device_del(&fm->dev); +} +EXPORT_SYMBOL(tifm_remove_adapter); + +void tifm_free_adapter(struct tifm_adapter *fm) +{ + put_device(&fm->dev); +} +EXPORT_SYMBOL(tifm_free_adapter); + +void tifm_free_device(struct device *dev) +{ + struct tifm_dev *sock = container_of(dev, struct tifm_dev, dev); + kfree(sock); +} +EXPORT_SYMBOL(tifm_free_device); + +struct tifm_dev *tifm_alloc_device(struct tifm_adapter *fm, unsigned int id, + unsigned char type) +{ + struct tifm_dev *sock = NULL; + + if (!tifm_media_type_name(type, 0)) + return sock; + + sock = kzalloc(sizeof(struct tifm_dev), GFP_KERNEL); + if (sock) { + spin_lock_init(&sock->lock); + sock->type = type; + sock->socket_id = id; + sock->card_event = tifm_dummy_event; + sock->data_event = tifm_dummy_event; + + sock->dev.parent = fm->dev.parent; + sock->dev.bus = &tifm_bus_type; + sock->dev.dma_mask = fm->dev.parent->dma_mask; + sock->dev.release = tifm_free_device; + + snprintf(sock->dev.bus_id, BUS_ID_SIZE, + "tifm_%s%u:%u", tifm_media_type_name(type, 2), + fm->id, id); + printk(KERN_INFO DRIVER_NAME + ": %s card detected in socket %u:%u\n", + tifm_media_type_name(type, 0), fm->id, id); + } + return sock; +} +EXPORT_SYMBOL(tifm_alloc_device); + +void tifm_eject(struct tifm_dev *sock) +{ + struct tifm_adapter *fm = dev_get_drvdata(sock->dev.parent); + fm->eject(fm, sock); +} +EXPORT_SYMBOL(tifm_eject); + +int tifm_has_ms_pif(struct tifm_dev *sock) +{ + struct tifm_adapter *fm = dev_get_drvdata(sock->dev.parent); + return fm->has_ms_pif(fm, sock); +} +EXPORT_SYMBOL(tifm_has_ms_pif); + +int tifm_map_sg(struct tifm_dev *sock, struct scatterlist *sg, int nents, + int direction) +{ + return pci_map_sg(to_pci_dev(sock->dev.parent), sg, nents, direction); +} +EXPORT_SYMBOL(tifm_map_sg); + +void tifm_unmap_sg(struct tifm_dev *sock, struct scatterlist *sg, int nents, + int direction) +{ + pci_unmap_sg(to_pci_dev(sock->dev.parent), sg, nents, direction); +} +EXPORT_SYMBOL(tifm_unmap_sg); + +void tifm_queue_work(struct work_struct *work) +{ + queue_work(workqueue, work); +} +EXPORT_SYMBOL(tifm_queue_work); + +int tifm_register_driver(struct tifm_driver *drv) +{ + drv->driver.bus = &tifm_bus_type; + + return driver_register(&drv->driver); +} +EXPORT_SYMBOL(tifm_register_driver); + +void tifm_unregister_driver(struct tifm_driver *drv) +{ + driver_unregister(&drv->driver); +} +EXPORT_SYMBOL(tifm_unregister_driver); + +static int __init tifm_init(void) +{ + int rc; + + workqueue = create_freezeable_workqueue("tifm"); + if (!workqueue) + return -ENOMEM; + + rc = bus_register(&tifm_bus_type); + + if (rc) + goto err_out_wq; + + rc = class_register(&tifm_adapter_class); + if (!rc) + return 0; + + bus_unregister(&tifm_bus_type); + +err_out_wq: + destroy_workqueue(workqueue); + + return rc; +} + +static void __exit tifm_exit(void) +{ + class_unregister(&tifm_adapter_class); + bus_unregister(&tifm_bus_type); + destroy_workqueue(workqueue); +} + +subsys_initcall(tifm_init); +module_exit(tifm_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Alex Dubov"); +MODULE_DESCRIPTION("TI FlashMedia core driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRIVER_VERSION); |