diff options
Diffstat (limited to 'drivers/acpi')
-rw-r--r-- | drivers/acpi/Kconfig | 10 | ||||
-rw-r--r-- | drivers/acpi/Makefile | 2 | ||||
-rw-r--r-- | drivers/acpi/acpica/tbutils.c | 7 | ||||
-rw-r--r-- | drivers/acpi/acpica/uteval.c | 21 | ||||
-rw-r--r-- | drivers/acpi/battery.c | 25 | ||||
-rw-r--r-- | drivers/acpi/bus.c | 9 | ||||
-rw-r--r-- | drivers/acpi/container.c | 5 | ||||
-rw-r--r-- | drivers/acpi/dock.c | 14 | ||||
-rw-r--r-- | drivers/acpi/ec.c | 11 | ||||
-rw-r--r-- | drivers/acpi/glue.c | 8 | ||||
-rw-r--r-- | drivers/acpi/numa.c | 2 | ||||
-rw-r--r-- | drivers/acpi/osl.c | 4 | ||||
-rw-r--r-- | drivers/acpi/pci_link.c | 2 | ||||
-rw-r--r-- | drivers/acpi/processor_idle.c | 667 | ||||
-rw-r--r-- | drivers/acpi/processor_perflib.c | 105 | ||||
-rw-r--r-- | drivers/acpi/sleep.c | 69 | ||||
-rw-r--r-- | drivers/acpi/tables.c | 7 | ||||
-rw-r--r-- | drivers/acpi/video.c | 16 |
18 files changed, 140 insertions, 844 deletions
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig index d7f9839..8a851d0 100644 --- a/drivers/acpi/Kconfig +++ b/drivers/acpi/Kconfig @@ -9,6 +9,7 @@ menuconfig ACPI depends on PCI depends on PM select PNP + select CPU_IDLE default y ---help--- Advanced Configuration and Power Interface (ACPI) support for @@ -253,13 +254,6 @@ config ACPI_PCI_SLOT help you correlate PCI bus addresses with the physical geography of your slots. If you are unsure, say N. -config ACPI_SYSTEM - bool - default y - help - This driver will enable your system to shut down using ACPI, and - dump your ACPI DSDT table using /proc/acpi/dsdt. - config X86_PM_TIMER bool "Power Management Timer Support" if EMBEDDED depends on X86 @@ -287,7 +281,7 @@ config ACPI_CONTAINER support physical cpu/memory hot-plug. If one selects "m", this driver can be loaded with - "modprobe acpi_container". + "modprobe container". config ACPI_HOTPLUG_MEMORY tristate "Memory Hotplug" diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile index 65d90c7..b130ea0 100644 --- a/drivers/acpi/Makefile +++ b/drivers/acpi/Makefile @@ -52,7 +52,7 @@ obj-$(CONFIG_ACPI_PROCESSOR) += processor.o obj-$(CONFIG_ACPI_CONTAINER) += container.o obj-$(CONFIG_ACPI_THERMAL) += thermal.o obj-y += power.o -obj-$(CONFIG_ACPI_SYSTEM) += system.o event.o +obj-y += system.o event.o obj-$(CONFIG_ACPI_DEBUG) += debug.o obj-$(CONFIG_ACPI_NUMA) += numa.o obj-$(CONFIG_ACPI_HOTPLUG_MEMORY) += acpi_memhotplug.o diff --git a/drivers/acpi/acpica/tbutils.c b/drivers/acpi/acpica/tbutils.c index 9684cc8..22ce489 100644 --- a/drivers/acpi/acpica/tbutils.c +++ b/drivers/acpi/acpica/tbutils.c @@ -538,10 +538,9 @@ acpi_tb_parse_root_table(acpi_physical_address rsdp_address, u8 flags) if (ACPI_FAILURE(status)) { ACPI_WARNING((AE_INFO, "Truncating %u table entries!", - (unsigned) - (acpi_gbl_root_table_list.size - - acpi_gbl_root_table_list. - count))); + (unsigned) (table_count - + (acpi_gbl_root_table_list. + count - 2)))); break; } } diff --git a/drivers/acpi/acpica/uteval.c b/drivers/acpi/acpica/uteval.c index da9450bc..9c9897d 100644 --- a/drivers/acpi/acpica/uteval.c +++ b/drivers/acpi/acpica/uteval.c @@ -116,9 +116,9 @@ acpi_status acpi_ut_osi_implementation(struct acpi_walk_state *walk_state) return_ACPI_STATUS(AE_NO_MEMORY); } - /* Default return value is SUPPORTED */ + /* Default return value is 0, NOT-SUPPORTED */ - return_desc->integer.value = ACPI_UINT32_MAX; + return_desc->integer.value = 0; walk_state->return_desc = return_desc; /* Compare input string to static table of supported interfaces */ @@ -127,10 +127,8 @@ acpi_status acpi_ut_osi_implementation(struct acpi_walk_state *walk_state) if (!ACPI_STRCMP (string_desc->string.pointer, acpi_interfaces_supported[i])) { - - /* The interface is supported */ - - return_ACPI_STATUS(AE_OK); + return_desc->integer.value = ACPI_UINT32_MAX; + goto done; } } @@ -141,15 +139,14 @@ acpi_status acpi_ut_osi_implementation(struct acpi_walk_state *walk_state) */ status = acpi_os_validate_interface(string_desc->string.pointer); if (ACPI_SUCCESS(status)) { - - /* The interface is supported */ - - return_ACPI_STATUS(AE_OK); + return_desc->integer.value = ACPI_UINT32_MAX; } - /* The interface is not supported */ +done: + ACPI_DEBUG_PRINT_RAW((ACPI_DB_INFO, "ACPI: BIOS _OSI(%s) %ssupported\n", + string_desc->string.pointer, + return_desc->integer.value == 0 ? "not-" : "")); - return_desc->integer.value = 0; return_ACPI_STATUS(AE_OK); } diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c index 65132f9..69cbc57 100644 --- a/drivers/acpi/battery.c +++ b/drivers/acpi/battery.c @@ -138,6 +138,29 @@ static int acpi_battery_technology(struct acpi_battery *battery) static int acpi_battery_get_state(struct acpi_battery *battery); +static int acpi_battery_is_charged(struct acpi_battery *battery) +{ + /* either charging or discharging */ + if (battery->state != 0) + return 0; + + /* battery not reporting charge */ + if (battery->capacity_now == ACPI_BATTERY_VALUE_UNKNOWN || + battery->capacity_now == 0) + return 0; + + /* good batteries update full_charge as the batteries degrade */ + if (battery->full_charge_capacity == battery->capacity_now) + return 1; + + /* fallback to using design values for broken batteries */ + if (battery->design_capacity == battery->capacity_now) + return 1; + + /* we don't do any sort of metric based on percentages */ + return 0; +} + static int acpi_battery_get_property(struct power_supply *psy, enum power_supply_property psp, union power_supply_propval *val) @@ -155,7 +178,7 @@ static int acpi_battery_get_property(struct power_supply *psy, val->intval = POWER_SUPPLY_STATUS_DISCHARGING; else if (battery->state & 0x02) val->intval = POWER_SUPPLY_STATUS_CHARGING; - else if (battery->state == 0) + else if (acpi_battery_is_charged(battery)) val->intval = POWER_SUPPLY_STATUS_FULL; else val->intval = POWER_SUPPLY_STATUS_UNKNOWN; diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c index 765fd1c..bee64b7 100644 --- a/drivers/acpi/bus.c +++ b/drivers/acpi/bus.c @@ -758,8 +758,7 @@ static int __init acpi_bus_init(void) acpi_status status = AE_OK; extern acpi_status acpi_os_initialize1(void); - - status = acpi_os_initialize1(); + acpi_os_initialize1(); status = acpi_enable_subsystem(ACPI_NO_HARDWARE_INIT | ACPI_NO_ACPI_ENABLE); @@ -769,12 +768,6 @@ static int __init acpi_bus_init(void) goto error1; } - if (ACPI_FAILURE(status)) { - printk(KERN_ERR PREFIX - "Unable to initialize ACPI OS objects\n"); - goto error1; - } - /* * ACPI 2.0 requires the EC driver to be loaded and work before * the EC device is found in the namespace (i.e. before acpi_initialize_objects() diff --git a/drivers/acpi/container.c b/drivers/acpi/container.c index 17020c1..fe0cdf8 100644 --- a/drivers/acpi/container.c +++ b/drivers/acpi/container.c @@ -163,7 +163,7 @@ static void container_notify_cb(acpi_handle handle, u32 type, void *context) case ACPI_NOTIFY_BUS_CHECK: /* Fall through */ case ACPI_NOTIFY_DEVICE_CHECK: - printk("Container driver received %s event\n", + printk(KERN_WARNING "Container driver received %s event\n", (type == ACPI_NOTIFY_BUS_CHECK) ? "ACPI_NOTIFY_BUS_CHECK" : "ACPI_NOTIFY_DEVICE_CHECK"); status = acpi_bus_get_device(handle, &device); @@ -174,7 +174,8 @@ static void container_notify_cb(acpi_handle handle, u32 type, void *context) kobject_uevent(&device->dev.kobj, KOBJ_ONLINE); else - printk("Failed to add container\n"); + printk(KERN_WARNING + "Failed to add container\n"); } } else { if (ACPI_SUCCESS(status)) { diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c index 5b30b8d..35094f2 100644 --- a/drivers/acpi/dock.c +++ b/drivers/acpi/dock.c @@ -855,10 +855,14 @@ fdd_out: static ssize_t show_docked(struct device *dev, struct device_attribute *attr, char *buf) { + struct acpi_device *tmp; + struct dock_station *dock_station = *((struct dock_station **) dev->platform_data); - return snprintf(buf, PAGE_SIZE, "%d\n", dock_present(dock_station)); + if (ACPI_SUCCESS(acpi_bus_get_device(dock_station->handle, &tmp))) + return snprintf(buf, PAGE_SIZE, "1\n"); + return snprintf(buf, PAGE_SIZE, "0\n"); } static DEVICE_ATTR(docked, S_IRUGO, show_docked, NULL); @@ -984,7 +988,7 @@ static int dock_add(acpi_handle handle) ret = device_create_file(&dock_device->dev, &dev_attr_docked); if (ret) { - printk("Error %d adding sysfs file\n", ret); + printk(KERN_ERR "Error %d adding sysfs file\n", ret); platform_device_unregister(dock_device); kfree(dock_station); dock_station = NULL; @@ -992,7 +996,7 @@ static int dock_add(acpi_handle handle) } ret = device_create_file(&dock_device->dev, &dev_attr_undock); if (ret) { - printk("Error %d adding sysfs file\n", ret); + printk(KERN_ERR "Error %d adding sysfs file\n", ret); device_remove_file(&dock_device->dev, &dev_attr_docked); platform_device_unregister(dock_device); kfree(dock_station); @@ -1001,7 +1005,7 @@ static int dock_add(acpi_handle handle) } ret = device_create_file(&dock_device->dev, &dev_attr_uid); if (ret) { - printk("Error %d adding sysfs file\n", ret); + printk(KERN_ERR "Error %d adding sysfs file\n", ret); device_remove_file(&dock_device->dev, &dev_attr_docked); device_remove_file(&dock_device->dev, &dev_attr_undock); platform_device_unregister(dock_device); @@ -1011,7 +1015,7 @@ static int dock_add(acpi_handle handle) } ret = device_create_file(&dock_device->dev, &dev_attr_flags); if (ret) { - printk("Error %d adding sysfs file\n", ret); + printk(KERN_ERR "Error %d adding sysfs file\n", ret); device_remove_file(&dock_device->dev, &dev_attr_docked); device_remove_file(&dock_device->dev, &dev_attr_undock); device_remove_file(&dock_device->dev, &dev_attr_uid); diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index a2b82c9..2fe1506 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c @@ -120,6 +120,8 @@ static struct acpi_ec { spinlock_t curr_lock; } *boot_ec, *first_ec; +static int EC_FLAGS_MSI; /* Out-of-spec MSI controller */ + /* -------------------------------------------------------------------------- Transaction Management -------------------------------------------------------------------------- */ @@ -259,6 +261,8 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec, clear_bit(EC_FLAGS_GPE_MODE, &ec->flags); acpi_disable_gpe(NULL, ec->gpe); } + if (EC_FLAGS_MSI) + udelay(ACPI_EC_DELAY); /* start transaction */ spin_lock_irqsave(&ec->curr_lock, tmp); /* following two actions should be kept atomic */ @@ -967,6 +971,11 @@ int __init acpi_ec_ecdt_probe(void) /* * Generate a boot ec context */ + if (dmi_name_in_vendors("Micro-Star") || + dmi_name_in_vendors("Notebook")) { + pr_info(PREFIX "Enabling special treatment for EC from MSI.\n"); + EC_FLAGS_MSI = 1; + } status = acpi_get_table(ACPI_SIG_ECDT, 1, (struct acpi_table_header **)&ecdt_ptr); if (ACPI_SUCCESS(status)) { @@ -982,7 +991,7 @@ int __init acpi_ec_ecdt_probe(void) saved_ec = kmalloc(sizeof(struct acpi_ec), GFP_KERNEL); if (!saved_ec) return -ENOMEM; - memcpy(&saved_ec, boot_ec, sizeof(saved_ec)); + memcpy(saved_ec, boot_ec, sizeof(*saved_ec)); /* fall through */ } /* This workaround is needed only on some broken machines, diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c index adec3d1..5479b9f 100644 --- a/drivers/acpi/glue.c +++ b/drivers/acpi/glue.c @@ -255,12 +255,12 @@ static int acpi_platform_notify(struct device *dev) } type = acpi_get_bus_type(dev->bus); if (!type) { - DBG("No ACPI bus support for %s\n", dev->bus_id); + DBG("No ACPI bus support for %s\n", dev_name(dev)); ret = -EINVAL; goto end; } if ((ret = type->find_device(dev, &handle)) != 0) - DBG("Can't get handler for %s\n", dev->bus_id); + DBG("Can't get handler for %s\n", dev_name(dev)); end: if (!ret) acpi_bind_one(dev, handle); @@ -271,10 +271,10 @@ static int acpi_platform_notify(struct device *dev) acpi_get_name(dev->archdata.acpi_handle, ACPI_FULL_PATHNAME, &buffer); - DBG("Device %s -> %s\n", dev->bus_id, (char *)buffer.pointer); + DBG("Device %s -> %s\n", dev_name(dev), (char *)buffer.pointer); kfree(buffer.pointer); } else - DBG("Device %s -> No ACPI support\n", dev->bus_id); + DBG("Device %s -> No ACPI support\n", dev_name(dev)); #endif return ret; diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c index c5e292a..3a0d8ef 100644 --- a/drivers/acpi/numa.c +++ b/drivers/acpi/numa.c @@ -277,7 +277,7 @@ int acpi_get_node(acpi_handle *handle) int pxm, node = -1; pxm = acpi_get_pxm(handle); - if (pxm >= 0) + if (pxm >= 0 && pxm < MAX_PXM_DOMAINS) node = acpi_map_pxm_to_node(pxm); return node; diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c index 6729a49..1e35f34 100644 --- a/drivers/acpi/osl.c +++ b/drivers/acpi/osl.c @@ -228,10 +228,10 @@ void acpi_os_vprintf(const char *fmt, va_list args) if (acpi_in_debugger) { kdb_printf("%s", buffer); } else { - printk("%s", buffer); + printk(KERN_CONT "%s", buffer); } #else - printk("%s", buffer); + printk(KERN_CONT "%s", buffer); #endif } diff --git a/drivers/acpi/pci_link.c b/drivers/acpi/pci_link.c index 1c6e73c..6c772ca 100644 --- a/drivers/acpi/pci_link.c +++ b/drivers/acpi/pci_link.c @@ -593,7 +593,7 @@ static int acpi_pci_link_allocate(struct acpi_pci_link *link) return -ENODEV; } else { acpi_irq_penalty[link->irq.active] += PIRQ_PENALTY_PCI_USING; - printk(PREFIX "%s [%s] enabled at IRQ %d\n", + printk(KERN_WARNING PREFIX "%s [%s] enabled at IRQ %d\n", acpi_device_name(link->device), acpi_device_bid(link->device), link->irq.active); } diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index 66a9d81..7bc22a4 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c @@ -66,43 +66,17 @@ ACPI_MODULE_NAME("processor_idle"); #define ACPI_PROCESSOR_FILE_POWER "power" #define US_TO_PM_TIMER_TICKS(t) ((t * (PM_TIMER_FREQUENCY/1000)) / 1000) #define PM_TIMER_TICK_NS (1000000000ULL/PM_TIMER_FREQUENCY) -#ifndef CONFIG_CPU_IDLE -#define C2_OVERHEAD 4 /* 1us (3.579 ticks per us) */ -#define C3_OVERHEAD 4 /* 1us (3.579 ticks per us) */ -static void (*pm_idle_save) (void) __read_mostly; -#else #define C2_OVERHEAD 1 /* 1us */ #define C3_OVERHEAD 1 /* 1us */ -#endif #define PM_TIMER_TICKS_TO_US(p) (((p) * 1000)/(PM_TIMER_FREQUENCY/1000)) static unsigned int max_cstate __read_mostly = ACPI_PROCESSOR_MAX_POWER; -#ifdef CONFIG_CPU_IDLE module_param(max_cstate, uint, 0000); -#else -module_param(max_cstate, uint, 0644); -#endif static unsigned int nocst __read_mostly; module_param(nocst, uint, 0000); -#ifndef CONFIG_CPU_IDLE -/* - * bm_history -- bit-mask with a bit per jiffy of bus-master activity - * 1000 HZ: 0xFFFFFFFF: 32 jiffies = 32ms - * 800 HZ: 0xFFFFFFFF: 32 jiffies = 40ms - * 100 HZ: 0x0000000F: 4 jiffies = 40ms - * reduce history for more aggressive entry into C3 - */ -static unsigned int bm_history __read_mostly = - (HZ >= 800 ? 0xFFFFFFFF : ((1U << (HZ / 25)) - 1)); -module_param(bm_history, uint, 0644); - -static int acpi_processor_set_power_policy(struct acpi_processor *pr); - -#else /* CONFIG_CPU_IDLE */ static unsigned int latency_factor __read_mostly = 2; module_param(latency_factor, uint, 0644); -#endif /* * IBM ThinkPad R40e crashes mysteriously when going into C2 or C3. @@ -224,71 +198,6 @@ static void acpi_safe_halt(void) current_thread_info()->status |= TS_POLLING; } -#ifndef CONFIG_CPU_IDLE - -static void -acpi_processor_power_activate(struct acpi_processor *pr, - struct acpi_processor_cx *new) -{ - struct acpi_processor_cx *old; - - if (!pr || !new) - return; - - old = pr->power.state; - - if (old) - old->promotion.count = 0; - new->demotion.count = 0; - - /* Cleanup from old state. */ - if (old) { - switch (old->type) { - case ACPI_STATE_C3: - /* Disable bus master reload */ - if (new->type != ACPI_STATE_C3 && pr->flags.bm_check) - acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0); - break; - } - } - - /* Prepare to use new state. */ - switch (new->type) { - case ACPI_STATE_C3: - /* Enable bus master reload */ - if (old->type != ACPI_STATE_C3 && pr->flags.bm_check) - acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 1); - break; - } - - pr->power.state = new; - - return; -} - -static atomic_t c3_cpu_count; - -/* Common C-state entry for C2, C3, .. */ -static void acpi_cstate_enter(struct acpi_processor_cx *cstate) -{ - /* Don't trace irqs off for idle */ - stop_critical_timings(); - if (cstate->entry_method == ACPI_CSTATE_FFH) { - /* Call into architectural FFH based C-state */ - acpi_processor_ffh_cstate_enter(cstate); - } else { - int unused; - /* IO port based C-state */ - inb(cstate->address); - /* Dummy wait op - must do something useless after P_LVL2 read - because chipsets cannot guarantee that STPCLK# signal - gets asserted in time to freeze execution properly. */ - unused = inl(acpi_gbl_FADT.xpm_timer_block.address); - } - start_critical_timings(); -} -#endif /* !CONFIG_CPU_IDLE */ - #ifdef ARCH_APICTIMER_STOPS_ON_C3 /* @@ -390,421 +299,6 @@ static int tsc_halts_in_c(int state) } #endif -#ifndef CONFIG_CPU_IDLE -static void acpi_processor_idle(void) -{ - struct acpi_processor *pr = NULL; - struct acpi_processor_cx *cx = NULL; - struct acpi_processor_cx *next_state = NULL; - int sleep_ticks = 0; - u32 t1, t2 = 0; - - /* - * Interrupts must be disabled during bus mastering calculations and - * for C2/C3 transitions. - */ - local_irq_disable(); - - pr = __get_cpu_var(processors); - if (!pr) { - local_irq_enable(); - return; - } - - /* - * Check whether we truly need to go idle, or should - * reschedule: - */ - if (unlikely(need_resched())) { - local_irq_enable(); - return; - } - - cx = pr->power.state; - if (!cx || acpi_idle_suspend) { - if (pm_idle_save) { - pm_idle_save(); /* enables IRQs */ - } else { - acpi_safe_halt(); - local_irq_enable(); - } - - return; - } - - /* - * Check BM Activity - * ----------------- - * Check for bus mastering activity (if required), record, and check - * for demotion. - */ - if (pr->flags.bm_check) { - u32 bm_status = 0; - unsigned long diff = jiffies - pr->power.bm_check_timestamp; - - if (diff > 31) - diff = 31; - - pr->power.bm_activity <<= diff; - - acpi_get_register(ACPI_BITREG_BUS_MASTER_STATUS, &bm_status); - if (bm_status) { - pr->power.bm_activity |= 0x1; - acpi_set_register(ACPI_BITREG_BUS_MASTER_STATUS, 1); - } - /* - * PIIX4 Erratum #18: Note that BM_STS doesn't always reflect - * the true state of bus mastering activity; forcing us to - * manually check the BMIDEA bit of each IDE channel. - */ - else if (errata.piix4.bmisx) { - if ((inb_p(errata.piix4.bmisx + 0x02) & 0x01) - || (inb_p(errata.piix4.bmisx + 0x0A) & 0x01)) - pr->power.bm_activity |= 0x1; - } - - pr->power.bm_check_timestamp = jiffies; - - /* - * If bus mastering is or was active this jiffy, demote - * to avoid a faulty transition. Note that the processor - * won't enter a low-power state during this call (to this - * function) but should upon the next. - * - * TBD: A better policy might be to fallback to the demotion - * state (use it for this quantum only) istead of - * demoting -- and rely on duration as our sole demotion - * qualification. This may, however, introduce DMA - * issues (e.g. floppy DMA transfer overrun/underrun). - */ - if ((pr->power.bm_activity & 0x1) && - cx->demotion.threshold.bm) { - local_irq_enable(); - next_state = cx->demotion.state; - goto end; - } - } - -#ifdef CONFIG_HOTPLUG_CPU - /* - * Check for P_LVL2_UP flag before entering C2 and above on - * an SMP system. We do it here instead of doing it at _CST/P_LVL - * detection phase, to work cleanly with logical CPU hotplug. - */ - if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) && - !pr->flags.has_cst && !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED)) - cx = &pr->power.states[ACPI_STATE_C1]; -#endif - - /* - * Sleep: - * ------ - * Invoke the current Cx state to put the processor to sleep. - */ - if (cx->type == ACPI_STATE_C2 || cx->type == ACPI_STATE_C3) { - current_thread_info()->status &= ~TS_POLLING; - /* - * TS_POLLING-cleared state must be visible before we - * test NEED_RESCHED: - */ - smp_mb(); - if (need_resched()) { - current_thread_info()->status |= TS_POLLING; - local_irq_enable(); - return; - } - } - - switch (cx->type) { - - case ACPI_STATE_C1: - /* - * Invoke C1. - * Use the appropriate idle routine, the one that would - * be used without acpi C-states. - */ - if (pm_idle_save) { - pm_idle_save(); /* enables IRQs */ - } else { - acpi_safe_halt(); - local_irq_enable(); - } - - /* - * TBD: Can't get time duration while in C1, as resumes - * go to an ISR rather than here. Need to instrument - * base interrupt handler. - * - * Note: the TSC better not stop in C1, sched_clock() will - * skew otherwise. - */ - sleep_ticks = 0xFFFFFFFF; - - break; - - case ACPI_STATE_C2: - /* Get start time (ticks) */ - t1 = inl(acpi_gbl_FADT.xpm_timer_block.address); - /* Tell the scheduler that we are going deep-idle: */ - sched_clock_idle_sleep_event(); - /* Invoke C2 */ - acpi_state_timer_broadcast(pr, cx, 1); - acpi_cstate_enter(cx); - /* Get end time (ticks) */ - t2 = inl(acpi_gbl_FADT.xpm_timer_block.address); - -#if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86) - /* TSC halts in C2, so notify users */ - if (tsc_halts_in_c(ACPI_STATE_C2)) - mark_tsc_unstable("possible TSC halt in C2"); -#endif - /* Compute time (ticks) that we were actually asleep */ - sleep_ticks = ticks_elapsed(t1, t2); - - /* Tell the scheduler how much we idled: */ - sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS); - - /* Re-enable interrupts */ - local_irq_enable(); - /* Do not account our idle-switching overhead: */ - sleep_ticks -= cx->latency_ticks + C2_OVERHEAD; - - current_thread_info()->status |= TS_POLLING; - acpi_state_timer_broadcast(pr, cx, 0); - break; - - case ACPI_STATE_C3: - acpi_unlazy_tlb(smp_processor_id()); - /* - * Must be done before busmaster disable as we might - * need to access HPET ! - */ - acpi_state_timer_broadcast(pr, cx, 1); - /* - * disable bus master - * bm_check implies we need ARB_DIS - * !bm_check implies we need cache flush - * bm_control implies whether we can do ARB_DIS - * - * That leaves a case where bm_check is set and bm_control is - * not set. In that case we cannot do much, we enter C3 - * without doing anything. - */ - if (pr->flags.bm_check && pr->flags.bm_control) { - if (atomic_inc_return(&c3_cpu_count) == - num_online_cpus()) { - /* - * All CPUs are trying to go to C3 - * Disable bus master arbitration - */ - acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1); - } - } else if (!pr->flags.bm_check) { - /* SMP with no shared cache... Invalidate cache */ - ACPI_FLUSH_CPU_CACHE(); - } - - /* Get start time (ticks) */ - t1 = inl(acpi_gbl_FADT.xpm_timer_block.address); - /* Invoke C3 */ - /* Tell the scheduler that we are going deep-idle: */ - sched_clock_idle_sleep_event(); - acpi_cstate_enter(cx); - /* Get end time (ticks) */ - t2 = inl(acpi_gbl_FADT.xpm_timer_block.address); - if (pr->flags.bm_check && pr->flags.bm_control) { - /* Enable bus master arbitration */ - atomic_dec(&c3_cpu_count); - acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0); - } - -#if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86) - /* TSC halts in C3, so notify users */ - if (tsc_halts_in_c(ACPI_STATE_C3)) - mark_tsc_unstable("TSC halts in C3"); -#endif - /* Compute time (ticks) that we were actually asleep */ - sleep_ticks = ticks_elapsed(t1, t2); - /* Tell the scheduler how much we idled: */ - sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS); - - /* Re-enable interrupts */ - local_irq_enable(); - /* Do not account our idle-switching overhead: */ - sleep_ticks -= cx->latency_ticks + C3_OVERHEAD; - - current_thread_info()->status |= TS_POLLING; - acpi_state_timer_broadcast(pr, cx, 0); - break; - - default: - local_irq_enable(); - return; - } - cx->usage++; - if ((cx->type != ACPI_STATE_C1) && (sleep_ticks > 0)) - cx->time += sleep_ticks; - - next_state = pr->power.state; - -#ifdef CONFIG_HOTPLUG_CPU - /* Don't do promotion/demotion */ - if ((cx->type == ACPI_STATE_C1) && (num_online_cpus() > 1) && - !pr->flags.has_cst && !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED)) { - next_state = cx; - goto end; - } -#endif - - /* - * Promotion? - * ---------- - * Track the number of longs (time asleep is greater than threshold) - * and promote when the count threshold is reached. Note that bus - * mastering activity may prevent promotions. - * Do not promote above max_cstate. - */ - if (cx->promotion.state && - ((cx->promotion.state - pr->power.states) <= max_cstate)) { - if (sleep_ticks > cx->promotion.threshold.ticks && - cx->promotion.state->latency <= - pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY)) { - cx->promotion.count++; - cx->demotion.count = 0; - if (cx->promotion.count >= - cx->promotion.threshold.count) { - if (pr->flags.bm_check) { - if (! - (pr->power.bm_activity & cx-> - promotion.threshold.bm)) { - next_state = - cx->promotion.state; - goto end; - } - } else { - next_state = cx->promotion.state; - goto end; - } - } - } - } - - /* - * Demotion? - * --------- - * Track the number of shorts (time asleep is less than time threshold) - * and demote when the usage threshold is reached. - */ - if (cx->demotion.state) { - if (sleep_ticks < cx->demotion.threshold.ticks) { - cx->demotion.count++; - cx->promotion.count = 0; - if (cx->demotion.count >= cx->demotion.threshold.count) { - next_state = cx->demotion.state; - goto end; - } - } - } - - end: - /* - * Demote if current state exceeds max_cstate - * or if the latency of the current state is unacceptable - */ - if ((pr->power.state - pr->power.states) > max_cstate || - pr->power.state->latency > - pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY)) { - if (cx->demotion.state) - next_state = cx->demotion.state; - } - - /* - * New Cx State? - * ------------- - * If we're going to start using a new Cx state we must clean up - * from the previous and prepare to use the new. - */ - if (next_state != pr->power.state) - acpi_processor_power_activate(pr, next_state); -} - -static int acpi_processor_set_power_policy(struct acpi_processor *pr) -{ - unsigned int i; - unsigned int state_is_set = 0; - struct acpi_processor_cx *lower = NULL; - struct acpi_processor_cx *higher = NULL; - struct acpi_processor_cx *cx; - - - if (!pr) - return -EINVAL; - - /* - * This function sets the default Cx state policy (OS idle handler). - * Our scheme is to promote quickly to C2 but more conservatively - * to C3. We're favoring C2 for its characteristics of low latency - * (quick response), good power savings, and ability to allow bus - * mastering activity. Note that the Cx state policy is completely - * customizable and can be altered dynamically. - */ - - /* startup state */ - for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) { - cx = &pr->power.states[i]; - if (!cx->valid) - continue; - - if (!state_is_set) - pr->power.state = cx; - state_is_set++; - break; - } - - if (!state_is_set) - return -ENODEV; - - /* demotion */ - for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) { - cx = &pr->power.states[i]; - if (!cx->valid) - continue; - - if (lower) { - cx->demotion.state = lower; - cx->demotion.threshold.ticks = cx->latency_ticks; - cx->demotion.threshold.count = 1; - if (cx->type == ACPI_STATE_C3) - cx->demotion.threshold.bm = bm_history; - } - - lower = cx; - } - - /* promotion */ - for (i = (ACPI_PROCESSOR_MAX_POWER - 1); i > 0; i--) { - cx = &pr->power.states[i]; - if (!cx->valid) - continue; - - if (higher) { - cx->promotion.state = higher; - cx->promotion.threshold.ticks = cx->latency_ticks; - if (cx->type >= ACPI_STATE_C2) - cx->promotion.threshold.count = 4; - else - cx->promotion.threshold.count = 10; - if (higher->type == ACPI_STATE_C3) - cx->promotion.threshold.bm = bm_history; - } - - higher = cx; - } - - return 0; -} -#endif /* !CONFIG_CPU_IDLE */ - static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr) { @@ -1047,11 +541,7 @@ static void acpi_processor_power_verify_c2(struct acpi_processor_cx *cx) */ cx->valid = 1; -#ifndef CONFIG_CPU_IDLE - cx->latency_ticks = US_TO_PM_TIMER_TICKS(cx->latency); -#else cx->latency_ticks = cx->latency; -#endif return; } @@ -1121,7 +611,6 @@ static void acpi_processor_power_verify_c3(struct acpi_processor *pr, " for C3 to be enabled on SMP systems\n")); return; } - acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0); } /* @@ -1132,11 +621,16 @@ static void acpi_processor_power_verify_c3(struct acpi_processor *pr, */ cx->valid = 1; -#ifndef CONFIG_CPU_IDLE - cx->latency_ticks = US_TO_PM_TIMER_TICKS(cx->latency); -#else cx->latency_ticks = cx->latency; -#endif + /* + * On older chipsets, BM_RLD needs to be set + * in order for Bus Master activity to wake the + * system from C3. Newer chipsets handle DMA + * during C3 automatically and BM_RLD is a NOP. + * In either case, the proper way to + * handle BM_RLD is to set it and leave it set. + */ + acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 1); return; } @@ -1201,20 +695,6 @@ static int acpi_processor_get_power_info(struct acpi_processor *pr) pr->power.count = acpi_processor_power_verify(pr); -#ifndef CONFIG_CPU_IDLE - /* - * Set Default Policy - * ------------------ - * Now that we know which states are supported, set the default - * policy. Note that this policy can be changed dynamically - * (e.g. encourage deeper sleeps to conserve battery life when - * not on AC). - */ - result = acpi_processor_set_power_policy(pr); - if (result) - return result; -#endif - /* * if one state of type C2 or C3 is available, mark this * CPU as being "idle manageable" @@ -1312,69 +792,6 @@ static const struct file_operations acpi_processor_power_fops = { .release = single_release, }; -#ifndef CONFIG_CPU_IDLE - -int acpi_processor_cst_has_changed(struct acpi_processor *pr) -{ - int result = 0; - - if (boot_option_idle_override) - return 0; - - if (!pr) - return -EINVAL; - - if (nocst) { - return -ENODEV; - } - - if (!pr->flags.power_setup_done) - return -ENODEV; - - /* - * Fall back to the default idle loop, when pm_idle_save had - * been initialized. - */ - if (pm_idle_save) { - pm_idle = pm_idle_save; - /* Relies on interrupts forcing exit from idle. */ - synchronize_sched(); - } - - pr->flags.power = 0; - result = acpi_processor_get_power_info(pr); - if ((pr->flags.power == 1) && (pr->flags.power_setup_done)) - pm_idle = acpi_processor_idle; - - return result; -} - -#ifdef CONFIG_SMP -static void smp_callback(void *v) -{ - /* we already woke the CPU up, nothing more to do */ -} - -/* - * This function gets called when a part of the kernel has a new latency - * requirement. This means we need to get all processors out of their C-state, - * and then recalculate a new suitable C-state. Just do a cross-cpu IPI; that - * wakes them all right up. - */ -static int acpi_processor_latency_notify(struct notifier_block *b, - unsigned long l, void *v) -{ - smp_call_function(smp_callback, NULL, 1); - return NOTIFY_OK; -} - -static struct notifier_block acpi_processor_latency_notifier = { - .notifier_call = acpi_processor_latency_notify, -}; - -#endif - -#else /* CONFIG_CPU_IDLE */ /** * acpi_idle_bm_check - checks if bus master activity was detected @@ -1383,7 +800,7 @@ static int acpi_idle_bm_check(void) { u32 bm_status = 0; - acpi_get_register(ACPI_BITREG_BUS_MASTER_STATUS, &bm_status); + acpi_get_register_unlocked(ACPI_BITREG_BUS_MASTER_STATUS, &bm_status); if (bm_status) acpi_set_register(ACPI_BITREG_BUS_MASTER_STATUS, 1); /* @@ -1400,25 +817,6 @@ static int acpi_idle_bm_check(void) } /** - * acpi_idle_update_bm_rld - updates the BM_RLD bit depending on target state - * @pr: the processor - * @target: the new target state - */ -static inline void acpi_idle_update_bm_rld(struct acpi_processor *pr, - struct acpi_processor_cx *target) -{ - if (pr->flags.bm_rld_set && target->type != ACPI_STATE_C3) { - acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0); - pr->flags.bm_rld_set = 0; - } - - if (!pr->flags.bm_rld_set && target->type == ACPI_STATE_C3) { - acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 1); - pr->flags.bm_rld_set = 1; - } -} - -/** * acpi_idle_do_entry - a helper function that does C2 and C3 type entry * @cx: cstate data * @@ -1473,9 +871,6 @@ static int acpi_idle_enter_c1(struct cpuidle_device *dev, return 0; } - if (pr->flags.bm_check) - acpi_idle_update_bm_rld(pr, cx); - t1 = inl(acpi_gbl_FADT.xpm_timer_block.address); acpi_idle_do_entry(cx); t2 = inl(acpi_gbl_FADT.xpm_timer_block.address); @@ -1527,9 +922,6 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev, */ acpi_state_timer_broadcast(pr, cx, 1); - if (pr->flags.bm_check) - acpi_idle_update_bm_rld(pr, cx); - if (cx->type == ACPI_STATE_C3) ACPI_FLUSH_CPU_CACHE(); @@ -1621,8 +1013,6 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev, */ acpi_state_timer_broadcast(pr, cx, 1); - acpi_idle_update_bm_rld(pr, cx); - /* * disable bus master * bm_check implies we need ARB_DIS @@ -1795,8 +1185,6 @@ int acpi_processor_cst_has_changed(struct acpi_processor *pr) return ret; } -#endif /* CONFIG_CPU_IDLE */ - int __cpuinit acpi_processor_power_init(struct acpi_processor *pr, struct acpi_device *device) { @@ -1825,10 +1213,6 @@ int __cpuinit acpi_processor_power_init(struct acpi_processor *pr, "ACPI: processor limited to max C-state %d\n", max_cstate); first_run++; -#if !defined(CONFIG_CPU_IDLE) && defined(CONFIG_SMP) - pm_qos_add_notifier(PM_QOS_CPU_DMA_LATENCY, - &acpi_processor_latency_notifier); -#endif } if (!pr) @@ -1852,11 +1236,9 @@ int __cpuinit acpi_processor_power_init(struct acpi_processor *pr, * platforms that only support C1. */ if (pr->flags.power) { -#ifdef CONFIG_CPU_IDLE acpi_processor_setup_cpuidle(pr); if (cpuidle_register_device(&pr->power.dev)) return -EIO; -#endif printk(KERN_INFO PREFIX "CPU%d (power states:", pr->id); for (i = 1; i <= pr->power.count; i++) @@ -1864,13 +1246,6 @@ int __cpuinit acpi_processor_power_init(struct acpi_processor *pr, printk(" C%d[C%d]", i, pr->power.states[i].type); printk(")\n"); - -#ifndef CONFIG_CPU_IDLE - if (pr->id == 0) { - pm_idle_save = pm_idle; - pm_idle = acpi_processor_idle; - } -#endif } /* 'power' [R] */ @@ -1889,34 +1264,12 @@ int acpi_processor_power_exit(struct acpi_processor *pr, if (boot_option_idle_override) return 0; -#ifdef CONFIG_CPU_IDLE cpuidle_unregister_device(&pr->power.dev); -#endif pr->flags.power_setup_done = 0; if (acpi_device_dir(device)) remove_proc_entry(ACPI_PROCESSOR_FILE_POWER, acpi_device_dir(device)); -#ifndef CONFIG_CPU_IDLE - - /* Unregister the idle handler when processor #0 is removed. */ - if (pr->id == 0) { - if (pm_idle_save) - pm_idle = pm_idle_save; - - /* - * We are about to unload the current idle thread pm callback - * (pm_idle), Wait for all processors to update cached/local - * copies of pm_idle before proceeding. - */ - cpu_idle_wait(); -#ifdef CONFIG_SMP - pm_qos_remove_notifier(PM_QOS_CPU_DMA_LATENCY, - &acpi_processor_latency_notifier); -#endif - } -#endif - return 0; } diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c index 846e227..9cc769b 100644 --- a/drivers/acpi/processor_perflib.c +++ b/drivers/acpi/processor_perflib.c @@ -31,14 +31,6 @@ #include <linux/init.h> #include <linux/cpufreq.h> -#ifdef CONFIG_X86_ACPI_CPUFREQ_PROC_INTF -#include <linux/proc_fs.h> -#include <linux/seq_file.h> -#include <linux/mutex.h> - -#include <asm/uaccess.h> -#endif - #ifdef CONFIG_X86 #include <asm/cpufeature.h> #endif @@ -434,96 +426,6 @@ int acpi_processor_notify_smm(struct module *calling_module) EXPORT_SYMBOL(acpi_processor_notify_smm); -#ifdef CONFIG_X86_ACPI_CPUFREQ_PROC_INTF -/* /proc/acpi/processor/../performance interface (DEPRECATED) */ - -static int acpi_processor_perf_open_fs(struct inode *inode, struct file *file); -static struct file_operations acpi_processor_perf_fops = { - .owner = THIS_MODULE, - .open = acpi_processor_perf_open_fs, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; - -static int acpi_processor_perf_seq_show(struct seq_file *seq, void *offset) -{ - struct acpi_processor *pr = seq->private; - int i; - - - if (!pr) - goto end; - - if (!pr->performance) { - seq_puts(seq, "<not supported>\n"); - goto end; - } - - seq_printf(seq, "state count: %d\n" - "active state: P%d\n", - pr->performance->state_count, pr->performance->state); - - seq_puts(seq, "states:\n"); - for (i = 0; i < pr->performance->state_count; i++) - seq_printf(seq, - " %cP%d: %d MHz, %d mW, %d uS\n", - (i == pr->performance->state ? '*' : ' '), i, - (u32) pr->performance->states[i].core_frequency, - (u32) pr->performance->states[i].power, - (u32) pr->performance->states[i].transition_latency); - - end: - return 0; -} - -static int acpi_processor_perf_open_fs(struct inode *inode, struct file *file) -{ - return single_open(file, acpi_processor_perf_seq_show, - PDE(inode)->data); -} - -static void acpi_cpufreq_add_file(struct acpi_processor *pr) -{ - struct acpi_device *device = NULL; - - - if (acpi_bus_get_device(pr->handle, &device)) - return; - - /* add file 'performance' [R/W] */ - proc_create_data(ACPI_PROCESSOR_FILE_PERFORMANCE, S_IFREG | S_IRUGO, - acpi_device_dir(device), - &acpi_processor_perf_fops, acpi_driver_data(device)); - return; -} - -static void acpi_cpufreq_remove_file(struct acpi_processor *pr) -{ - struct acpi_device *device = NULL; - - - if (acpi_bus_get_device(pr->handle, &device)) - return; - - /* remove file 'performance' */ - remove_proc_entry(ACPI_PROCESSOR_FILE_PERFORMANCE, - acpi_device_dir(device)); - - return; -} - -#else -static void acpi_cpufreq_add_file(struct acpi_processor *pr) -{ - return; -} -static void acpi_cpufreq_remove_file(struct acpi_processor *pr) -{ - return; -} -#endif /* CONFIG_X86_ACPI_CPUFREQ_PROC_INTF */ - static int acpi_processor_get_psd(struct acpi_processor *pr) { int result = 0; @@ -747,14 +649,12 @@ err_ret: } EXPORT_SYMBOL(acpi_processor_preregister_performance); - int acpi_processor_register_performance(struct acpi_processor_performance *performance, unsigned int cpu) { struct acpi_processor *pr; - if (!(acpi_processor_ppc_status & PPC_REGISTERED)) return -EINVAL; @@ -781,8 +681,6 @@ acpi_processor_register_performance(struct acpi_processor_performance return -EIO; } - acpi_cpufreq_add_file(pr); - mutex_unlock(&performance_mutex); return 0; } @@ -795,7 +693,6 @@ acpi_processor_unregister_performance(struct acpi_processor_performance { struct acpi_processor *pr; - mutex_lock(&performance_mutex); pr = per_cpu(processors, cpu); @@ -808,8 +705,6 @@ acpi_processor_unregister_performance(struct acpi_processor_performance kfree(pr->performance->states); pr->performance = NULL; - acpi_cpufreq_remove_file(pr); - mutex_unlock(&performance_mutex); return; diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c index 7e3c609..00456fc 100644 --- a/drivers/acpi/sleep.c +++ b/drivers/acpi/sleep.c @@ -90,31 +90,6 @@ void __init acpi_old_suspend_ordering(void) old_suspend_ordering = true; } -/* - * According to the ACPI specification the BIOS should make sure that ACPI is - * enabled and SCI_EN bit is set on wake-up from S1 - S3 sleep states. Still, - * some BIOSes don't do that and therefore we use acpi_enable() to enable ACPI - * on such systems during resume. Unfortunately that doesn't help in - * particularly pathological cases in which SCI_EN has to be set directly on - * resume, although the specification states very clearly that this flag is - * owned by the hardware. The set_sci_en_on_resume variable will be set in such - * cases. - */ -static bool set_sci_en_on_resume; -/* - * The ACPI specification wants us to save NVS memory regions during hibernation - * and to restore them during the subsequent resume. However, it is not certain - * if this mechanism is going to work on all machines, so we allow the user to - * disable this mechanism using the 'acpi_sleep=s4_nonvs' kernel command line - * option. - */ -static bool s4_no_nvs; - -void __init acpi_s4_no_nvs(void) -{ - s4_no_nvs = true; -} - /** * acpi_pm_disable_gpes - Disable the GPEs. */ @@ -193,6 +168,18 @@ static void acpi_pm_end(void) #endif /* CONFIG_ACPI_SLEEP */ #ifdef CONFIG_SUSPEND +/* + * According to the ACPI specification the BIOS should make sure that ACPI is + * enabled and SCI_EN bit is set on wake-up from S1 - S3 sleep states. Still, + * some BIOSes don't do that and therefore we use acpi_enable() to enable ACPI + * on such systems during resume. Unfortunately that doesn't help in + * particularly pathological cases in which SCI_EN has to be set directly on + * resume, although the specification states very clearly that this flag is + * owned by the hardware. The set_sci_en_on_resume variable will be set in such + * cases. + */ +static bool set_sci_en_on_resume; + extern void do_suspend_lowlevel(void); static u32 acpi_suspend_states[] = { @@ -391,11 +378,41 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "Macmini1,1"), }, }, + { + .callback = init_old_suspend_ordering, + .ident = "Asus Pundit P1-AH2 (M2N8L motherboard)", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTek Computer INC."), + DMI_MATCH(DMI_BOARD_NAME, "M2N8L"), + }, + }, + { + .callback = init_set_sci_en_on_resume, + .ident = "Toshiba Satellite L300", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), + DMI_MATCH(DMI_PRODUCT_NAME, "Satellite L300"), + }, + }, {}, }; #endif /* CONFIG_SUSPEND */ #ifdef CONFIG_HIBERNATION +/* + * The ACPI specification wants us to save NVS memory regions during hibernation + * and to restore them during the subsequent resume. However, it is not certain + * if this mechanism is going to work on all machines, so we allow the user to + * disable this mechanism using the 'acpi_sleep=s4_nonvs' kernel command line + * option. + */ +static bool s4_no_nvs; + +void __init acpi_s4_no_nvs(void) +{ + s4_no_nvs = true; +} + static unsigned long s4_hardware_signature; static struct acpi_table_facs *facs; static bool nosigcheck; @@ -679,7 +696,7 @@ static void acpi_power_off_prepare(void) static void acpi_power_off(void) { /* acpi_sleep_prepare(ACPI_STATE_S5) should have already been called */ - printk("%s called\n", __func__); + printk(KERN_DEBUG "%s called\n", __func__); local_irq_disable(); acpi_enable_wakeup_device(ACPI_STATE_S5); acpi_enter_sleep_state(ACPI_STATE_S5); diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c index 775c97a..a885295 100644 --- a/drivers/acpi/tables.c +++ b/drivers/acpi/tables.c @@ -293,7 +293,12 @@ static void __init check_multiple_madt(void) int __init acpi_table_init(void) { - acpi_initialize_tables(initial_tables, ACPI_MAX_TABLES, 0); + acpi_status status; + + status = acpi_initialize_tables(initial_tables, ACPI_MAX_TABLES, 0); + if (ACPI_FAILURE(status)) + return 1; + check_multiple_madt(); return 0; } diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c index f261737..bb5ed05 100644 --- a/drivers/acpi/video.c +++ b/drivers/acpi/video.c @@ -1020,7 +1020,7 @@ acpi_video_device_brightness_seq_show(struct seq_file *seq, void *offset) } seq_printf(seq, "levels: "); - for (i = 0; i < dev->brightness->count; i++) + for (i = 2; i < dev->brightness->count; i++) seq_printf(seq, " %d", dev->brightness->levels[i]); seq_printf(seq, "\ncurrent: %d\n", dev->brightness->curr); @@ -1059,7 +1059,7 @@ acpi_video_device_write_brightness(struct file *file, return -EFAULT; /* validate through the list of available levels */ - for (i = 0; i < dev->brightness->count; i++) + for (i = 2; i < dev->brightness->count; i++) if (level == dev->brightness->levels[i]) { if (ACPI_SUCCESS (acpi_video_device_lcd_set_level(dev, level))) @@ -1260,7 +1260,7 @@ static int acpi_video_bus_POST_info_seq_show(struct seq_file *seq, void *offset) printk(KERN_WARNING PREFIX "This indicates a BIOS bug. Please contact the manufacturer.\n"); } - printk("%llx\n", options); + printk(KERN_WARNING "%llx\n", options); seq_printf(seq, "can POST: <integrated video>"); if (options & 2) seq_printf(seq, " <PCI video>"); @@ -1712,7 +1712,7 @@ acpi_video_get_next_level(struct acpi_video_device *device, max = max_below = 0; min = min_above = 255; /* Find closest level to level_current */ - for (i = 0; i < device->brightness->count; i++) { + for (i = 2; i < device->brightness->count; i++) { l = device->brightness->levels[i]; if (abs(l - level_current) < abs(delta)) { delta = l - level_current; @@ -1722,7 +1722,7 @@ acpi_video_get_next_level(struct acpi_video_device *device, } /* Ajust level_current to closest available level */ level_current += delta; - for (i = 0; i < device->brightness->count; i++) { + for (i = 2; i < device->brightness->count; i++) { l = device->brightness->levels[i]; if (l < min) min = l; @@ -2006,6 +2006,12 @@ static int acpi_video_bus_add(struct acpi_device *device) device->pnp.bus_id[3] = '0' + instance; instance ++; } + /* a hack to fix the duplicate name "VGA" problem on Pa 3553 */ + if (!strcmp(device->pnp.bus_id, "VGA")) { + if (instance) + device->pnp.bus_id[3] = '0' + instance; + instance++; + } video->device = device; strcpy(acpi_device_name(device), ACPI_VIDEO_BUS_NAME); |