diff options
-rw-r--r-- | arch/x86/kernel/io_apic_32.c | 21 | ||||
-rw-r--r-- | arch/x86/kernel/io_apic_64.c | 24 | ||||
-rw-r--r-- | drivers/acpi/battery.c | 2 | ||||
-rw-r--r-- | drivers/acpi/ec.c | 43 | ||||
-rw-r--r-- | drivers/acpi/processor_idle.c | 111 | ||||
-rw-r--r-- | drivers/acpi/sbs.c | 25 | ||||
-rw-r--r-- | drivers/pnp/pnpacpi/rsparser.c | 24 | ||||
-rw-r--r-- | include/acpi/processor.h | 1 | ||||
-rw-r--r-- | include/linux/acpi.h | 5 | ||||
-rw-r--r-- | include/linux/cpuidle.h | 1 |
10 files changed, 176 insertions, 81 deletions
diff --git a/arch/x86/kernel/io_apic_32.c b/arch/x86/kernel/io_apic_32.c index f35c6eb..6bb80ea 100644 --- a/arch/x86/kernel/io_apic_32.c +++ b/arch/x86/kernel/io_apic_32.c @@ -962,7 +962,7 @@ static int EISA_ELCR(unsigned int irq) #define default_MCA_trigger(idx) (1) #define default_MCA_polarity(idx) (0) -static int __init MPBIOS_polarity(int idx) +static int MPBIOS_polarity(int idx) { int bus = mp_irqs[idx].mpc_srcbus; int polarity; @@ -2830,6 +2830,25 @@ int io_apic_set_pci_routing (int ioapic, int pin, int irq, int edge_level, int a return 0; } +int acpi_get_override_irq(int bus_irq, int *trigger, int *polarity) +{ + int i; + + if (skip_ioapic_setup) + return -1; + + for (i = 0; i < mp_irq_entries; i++) + if (mp_irqs[i].mpc_irqtype == mp_INT && + mp_irqs[i].mpc_srcbusirq == bus_irq) + break; + if (i >= mp_irq_entries) + return -1; + + *trigger = irq_trigger(i); + *polarity = irq_polarity(i); + return 0; +} + #endif /* CONFIG_ACPI */ static int __init parse_disable_timer_pin_1(char *arg) diff --git a/arch/x86/kernel/io_apic_64.c b/arch/x86/kernel/io_apic_64.c index 953328b..435a8c9 100644 --- a/arch/x86/kernel/io_apic_64.c +++ b/arch/x86/kernel/io_apic_64.c @@ -546,7 +546,7 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin) #define default_PCI_trigger(idx) (1) #define default_PCI_polarity(idx) (1) -static int __init MPBIOS_polarity(int idx) +static int MPBIOS_polarity(int idx) { int bus = mp_irqs[idx].mpc_srcbus; int polarity; @@ -2222,8 +2222,27 @@ int io_apic_set_pci_routing (int ioapic, int pin, int irq, int triggering, int p return 0; } -#endif /* CONFIG_ACPI */ +int acpi_get_override_irq(int bus_irq, int *trigger, int *polarity) +{ + int i; + + if (skip_ioapic_setup) + return -1; + + for (i = 0; i < mp_irq_entries; i++) + if (mp_irqs[i].mpc_irqtype == mp_INT && + mp_irqs[i].mpc_srcbusirq == bus_irq) + break; + if (i >= mp_irq_entries) + return -1; + + *trigger = irq_trigger(i); + *polarity = irq_polarity(i); + return 0; +} + +#endif /* CONFIG_ACPI */ /* * This function currently is only a helper for the i386 smp boot process where @@ -2260,3 +2279,4 @@ void __init setup_ioapic_dest(void) } } #endif + diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c index b871f28..7d6be23 100644 --- a/drivers/acpi/battery.c +++ b/drivers/acpi/battery.c @@ -153,6 +153,8 @@ static int acpi_battery_get_property(struct power_supply *psy, val->intval = POWER_SUPPLY_STATUS_CHARGING; else if (battery->state == 0) val->intval = POWER_SUPPLY_STATUS_FULL; + else + val->intval = POWER_SUPPLY_STATUS_UNKNOWN; break; case POWER_SUPPLY_PROP_PRESENT: val->intval = acpi_battery_present(battery); diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index 06b78e5..d6ddb54 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c @@ -75,7 +75,8 @@ enum { EC_FLAGS_WAIT_GPE = 0, /* Don't check status until GPE arrives */ EC_FLAGS_QUERY_PENDING, /* Query is pending */ EC_FLAGS_GPE_MODE, /* Expect GPE to be sent for status change */ - EC_FLAGS_ONLY_IBF_GPE, /* Expect GPE only for IBF = 0 event */ + EC_FLAGS_NO_ADDRESS_GPE, /* Expect GPE only for non-address event */ + EC_FLAGS_ADDRESS, /* Address is being written */ }; static int acpi_ec_remove(struct acpi_device *device, int type); @@ -166,38 +167,45 @@ static inline int acpi_ec_check_status(struct acpi_ec *ec, enum ec_event event) static int acpi_ec_wait(struct acpi_ec *ec, enum ec_event event, int force_poll) { + int ret = 0; + if (unlikely(test_bit(EC_FLAGS_ADDRESS, &ec->flags) && + test_bit(EC_FLAGS_NO_ADDRESS_GPE, &ec->flags))) + force_poll = 1; if (likely(test_bit(EC_FLAGS_GPE_MODE, &ec->flags)) && likely(!force_poll)) { if (wait_event_timeout(ec->wait, acpi_ec_check_status(ec, event), msecs_to_jiffies(ACPI_EC_DELAY))) - return 0; + goto end; clear_bit(EC_FLAGS_WAIT_GPE, &ec->flags); if (acpi_ec_check_status(ec, event)) { - if (event == ACPI_EC_EVENT_OBF_1) { - /* miss OBF = 1 GPE, don't expect it anymore */ - printk(KERN_INFO PREFIX "missing OBF_1 confirmation," - "switching to degraded mode.\n"); - set_bit(EC_FLAGS_ONLY_IBF_GPE, &ec->flags); + if (test_bit(EC_FLAGS_ADDRESS, &ec->flags)) { + /* miss address GPE, don't expect it anymore */ + printk(KERN_INFO PREFIX "missing address confirmation," + "don't expect it any longer.\n"); + set_bit(EC_FLAGS_NO_ADDRESS_GPE, &ec->flags); } else { /* missing GPEs, switch back to poll mode */ - printk(KERN_INFO PREFIX "missing IBF_1 confirmations," + printk(KERN_INFO PREFIX "missing confirmations," "switch off interrupt mode.\n"); clear_bit(EC_FLAGS_GPE_MODE, &ec->flags); } - return 0; + goto end; } } else { unsigned long delay = jiffies + msecs_to_jiffies(ACPI_EC_DELAY); clear_bit(EC_FLAGS_WAIT_GPE, &ec->flags); while (time_before(jiffies, delay)) { if (acpi_ec_check_status(ec, event)) - return 0; + goto end; } } printk(KERN_ERR PREFIX "acpi_ec_wait timeout," " status = %d, expect_event = %d\n", acpi_ec_read_status(ec), event); - return -ETIME; + ret = -ETIME; + end: + clear_bit(EC_FLAGS_ADDRESS, &ec->flags); + return ret; } static int acpi_ec_transaction_unlocked(struct acpi_ec *ec, u8 command, @@ -216,6 +224,9 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec, u8 command, "write_cmd timeout, command = %d\n", command); goto end; } + /* mark the address byte written to EC */ + if (rdata_len + wdata_len > 1) + set_bit(EC_FLAGS_ADDRESS, &ec->flags); set_bit(EC_FLAGS_WAIT_GPE, &ec->flags); acpi_ec_write_data(ec, *(wdata++)); } @@ -231,8 +242,6 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec, u8 command, clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags); for (; rdata_len > 0; --rdata_len) { - if (test_bit(EC_FLAGS_ONLY_IBF_GPE, &ec->flags)) - force_poll = 1; result = acpi_ec_wait(ec, ACPI_EC_EVENT_OBF_1, force_poll); if (result) { printk(KERN_ERR PREFIX "read timeout, command = %d\n", @@ -881,12 +890,20 @@ int __init acpi_ec_ecdt_probe(void) boot_ec->gpe = ecdt_ptr->gpe; boot_ec->handle = ACPI_ROOT_OBJECT; } else { + /* This workaround is needed only on some broken machines, + * which require early EC, but fail to provide ECDT */ + acpi_handle x; printk(KERN_DEBUG PREFIX "Look up EC in DSDT\n"); status = acpi_get_devices(ec_device_ids[0].id, ec_parse_device, boot_ec, NULL); /* Check that acpi_get_devices actually find something */ if (ACPI_FAILURE(status) || !boot_ec->handle) goto error; + /* We really need to limit this workaround, the only ASUS, + * which needs it, has fake EC._INI method, so use it as flag. + */ + if (ACPI_FAILURE(acpi_get_handle(boot_ec->handle, "_INI", &x))) + goto error; } ret = ec_install_handlers(boot_ec); diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index f996d0e..7b6c20e 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c @@ -197,6 +197,19 @@ static inline u32 ticks_elapsed_in_us(u32 t1, u32 t2) return PM_TIMER_TICKS_TO_US((0xFFFFFFFF - t1) + t2); } +static void acpi_safe_halt(void) +{ + current_thread_info()->status &= ~TS_POLLING; + /* + * TS_POLLING-cleared state must be visible before we + * test NEED_RESCHED: + */ + smp_mb(); + if (!need_resched()) + safe_halt(); + current_thread_info()->status |= TS_POLLING; +} + #ifndef CONFIG_CPU_IDLE static void @@ -239,19 +252,6 @@ acpi_processor_power_activate(struct acpi_processor *pr, return; } -static void acpi_safe_halt(void) -{ - current_thread_info()->status &= ~TS_POLLING; - /* - * TS_POLLING-cleared state must be visible before we - * test NEED_RESCHED: - */ - smp_mb(); - if (!need_resched()) - safe_halt(); - current_thread_info()->status |= TS_POLLING; -} - static atomic_t c3_cpu_count; /* Common C-state entry for C2, C3, .. */ @@ -1373,15 +1373,7 @@ static int acpi_idle_enter_c1(struct cpuidle_device *dev, if (pr->flags.bm_check) acpi_idle_update_bm_rld(pr, cx); - current_thread_info()->status &= ~TS_POLLING; - /* - * TS_POLLING-cleared state must be visible before we test - * NEED_RESCHED: - */ - smp_mb(); - if (!need_resched()) - safe_halt(); - current_thread_info()->status |= TS_POLLING; + acpi_safe_halt(); cx->usage++; @@ -1399,6 +1391,8 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev, struct acpi_processor *pr; struct acpi_processor_cx *cx = cpuidle_get_statedata(state); u32 t1, t2; + int sleep_ticks = 0; + pr = processors[smp_processor_id()]; if (unlikely(!pr)) @@ -1428,6 +1422,8 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev, ACPI_FLUSH_CPU_CACHE(); t1 = inl(acpi_gbl_FADT.xpm_timer_block.address); + /* Tell the scheduler that we are going deep-idle: */ + sched_clock_idle_sleep_event(); acpi_state_timer_broadcast(pr, cx, 1); acpi_idle_do_entry(cx); t2 = inl(acpi_gbl_FADT.xpm_timer_block.address); @@ -1436,6 +1432,10 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev, /* TSC could halt in idle, so notify users */ mark_tsc_unstable("TSC halts in idle");; #endif + sleep_ticks = ticks_elapsed(t1, t2); + + /* Tell the scheduler how much we idled: */ + sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS); local_irq_enable(); current_thread_info()->status |= TS_POLLING; @@ -1443,7 +1443,7 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev, cx->usage++; acpi_state_timer_broadcast(pr, cx, 0); - cx->time += ticks_elapsed(t1, t2); + cx->time += sleep_ticks; return ticks_elapsed_in_us(t1, t2); } @@ -1463,6 +1463,8 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev, struct acpi_processor *pr; struct acpi_processor_cx *cx = cpuidle_get_statedata(state); u32 t1, t2; + int sleep_ticks = 0; + pr = processors[smp_processor_id()]; if (unlikely(!pr)) @@ -1471,6 +1473,15 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev, if (acpi_idle_suspend) return(acpi_idle_enter_c1(dev, state)); + if (acpi_idle_bm_check()) { + if (dev->safe_state) { + return dev->safe_state->enter(dev, dev->safe_state); + } else { + acpi_safe_halt(); + return 0; + } + } + local_irq_disable(); current_thread_info()->status &= ~TS_POLLING; /* @@ -1485,38 +1496,45 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev, return 0; } + /* Tell the scheduler that we are going deep-idle: */ + sched_clock_idle_sleep_event(); /* * Must be done before busmaster disable as we might need to * access HPET ! */ acpi_state_timer_broadcast(pr, cx, 1); - if (acpi_idle_bm_check()) { - cx = pr->power.bm_state; - - acpi_idle_update_bm_rld(pr, cx); - - t1 = inl(acpi_gbl_FADT.xpm_timer_block.address); - acpi_idle_do_entry(cx); - t2 = inl(acpi_gbl_FADT.xpm_timer_block.address); - } else { - acpi_idle_update_bm_rld(pr, cx); + acpi_idle_update_bm_rld(pr, cx); + /* + * disable bus master + * bm_check implies we need ARB_DIS + * !bm_check implies we need cache flush + * bm_control implies whether we can do ARB_DIS + * + * That leaves a case where bm_check is set and bm_control is + * not set. In that case we cannot do much, we enter C3 + * without doing anything. + */ + if (pr->flags.bm_check && pr->flags.bm_control) { spin_lock(&c3_lock); c3_cpu_count++; /* Disable bus master arbitration when all CPUs are in C3 */ if (c3_cpu_count == num_online_cpus()) acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1); spin_unlock(&c3_lock); + } else if (!pr->flags.bm_check) { + ACPI_FLUSH_CPU_CACHE(); + } - t1 = inl(acpi_gbl_FADT.xpm_timer_block.address); - acpi_idle_do_entry(cx); - t2 = inl(acpi_gbl_FADT.xpm_timer_block.address); + t1 = inl(acpi_gbl_FADT.xpm_timer_block.address); + acpi_idle_do_entry(cx); + t2 = inl(acpi_gbl_FADT.xpm_timer_block.address); + /* Re-enable bus master arbitration */ + if (pr->flags.bm_check && pr->flags.bm_control) { spin_lock(&c3_lock); - /* Re-enable bus master arbitration */ - if (c3_cpu_count == num_online_cpus()) - acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0); + acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0); c3_cpu_count--; spin_unlock(&c3_lock); } @@ -1525,6 +1543,9 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev, /* TSC could halt in idle, so notify users */ mark_tsc_unstable("TSC halts in idle"); #endif + sleep_ticks = ticks_elapsed(t1, t2); + /* Tell the scheduler how much we idled: */ + sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS); local_irq_enable(); current_thread_info()->status |= TS_POLLING; @@ -1532,7 +1553,7 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev, cx->usage++; acpi_state_timer_broadcast(pr, cx, 0); - cx->time += ticks_elapsed(t1, t2); + cx->time += sleep_ticks; return ticks_elapsed_in_us(t1, t2); } @@ -1584,12 +1605,14 @@ static int acpi_processor_setup_cpuidle(struct acpi_processor *pr) case ACPI_STATE_C1: state->flags |= CPUIDLE_FLAG_SHALLOW; state->enter = acpi_idle_enter_c1; + dev->safe_state = state; break; case ACPI_STATE_C2: state->flags |= CPUIDLE_FLAG_BALANCED; state->flags |= CPUIDLE_FLAG_TIME_VALID; state->enter = acpi_idle_enter_simple; + dev->safe_state = state; break; case ACPI_STATE_C3: @@ -1610,14 +1633,6 @@ static int acpi_processor_setup_cpuidle(struct acpi_processor *pr) if (!count) return -EINVAL; - /* find the deepest state that can handle active BM */ - if (pr->flags.bm_check) { - for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) - if (pr->power.states[i].type == ACPI_STATE_C3) - break; - pr->power.bm_state = &pr->power.states[i-1]; - } - return 0; } diff --git a/drivers/acpi/sbs.c b/drivers/acpi/sbs.c index 278d20f..6045cdb 100644 --- a/drivers/acpi/sbs.c +++ b/drivers/acpi/sbs.c @@ -113,6 +113,7 @@ struct acpi_battery { u16 spec; u8 id; u8 present:1; + u8 have_sysfs_alarm:1; }; #define to_acpi_battery(x) container_of(x, struct acpi_battery, bat); @@ -808,7 +809,13 @@ static int acpi_battery_add(struct acpi_sbs *sbs, int id) } battery->bat.get_property = acpi_sbs_battery_get_property; result = power_supply_register(&sbs->device->dev, &battery->bat); - device_create_file(battery->bat.dev, &alarm_attr); + if (result) + goto end; + result = device_create_file(battery->bat.dev, &alarm_attr); + if (result) + goto end; + battery->have_sysfs_alarm = 1; + end: printk(KERN_INFO PREFIX "%s [%s]: Battery Slot [%s] (battery %s)\n", ACPI_SBS_DEVICE_NAME, acpi_device_bid(sbs->device), battery->name, sbs->battery->present ? "present" : "absent"); @@ -817,14 +824,16 @@ static int acpi_battery_add(struct acpi_sbs *sbs, int id) static void acpi_battery_remove(struct acpi_sbs *sbs, int id) { - if (sbs->battery[id].bat.dev) - device_remove_file(sbs->battery[id].bat.dev, &alarm_attr); - power_supply_unregister(&sbs->battery[id].bat); -#ifdef CONFIG_ACPI_PROCFS_POWER - if (sbs->battery[id].proc_entry) { - acpi_sbs_remove_fs(&(sbs->battery[id].proc_entry), - acpi_battery_dir); + struct acpi_battery *battery = &sbs->battery[id]; + + if (battery->bat.dev) { + if (battery->have_sysfs_alarm) + device_remove_file(battery->bat.dev, &alarm_attr); + power_supply_unregister(&battery->bat); } +#ifdef CONFIG_ACPI_PROCFS_POWER + if (battery->proc_entry) + acpi_sbs_remove_fs(&battery->proc_entry, acpi_battery_dir); #endif } diff --git a/drivers/pnp/pnpacpi/rsparser.c b/drivers/pnp/pnpacpi/rsparser.c index cd0a204..11adab1 100644 --- a/drivers/pnp/pnpacpi/rsparser.c +++ b/drivers/pnp/pnpacpi/rsparser.c @@ -75,6 +75,7 @@ static void pnpacpi_parse_allocated_irqresource(struct pnp_resource_table *res, { int i = 0; int irq; + int p, t; if (!valid_IRQ(gsi)) return; @@ -85,15 +86,22 @@ static void pnpacpi_parse_allocated_irqresource(struct pnp_resource_table *res, if (i >= PNP_MAX_IRQ) return; -#ifdef CONFIG_X86 - if (gsi < 16 && (triggering != ACPI_EDGE_SENSITIVE || - polarity != ACPI_ACTIVE_HIGH)) { - pnp_warn("BIOS BUG: legacy PNP IRQ %d should be edge trigger, " - "active high", gsi); - triggering = ACPI_EDGE_SENSITIVE; - polarity = ACPI_ACTIVE_HIGH; + /* + * in IO-APIC mode, use overrided attribute. Two reasons: + * 1. BIOS bug in DSDT + * 2. BIOS uses IO-APIC mode Interrupt Source Override + */ + if (!acpi_get_override_irq(gsi, &t, &p)) { + t = t ? ACPI_LEVEL_SENSITIVE : ACPI_EDGE_SENSITIVE; + p = p ? ACPI_ACTIVE_LOW : ACPI_ACTIVE_HIGH; + + if (triggering != t || polarity != p) { + pnp_warn("IRQ %d override to %s, %s", + gsi, t ? "edge":"level", p ? "low":"high"); + triggering = t; + polarity = p; + } } -#endif res->irq_resource[i].flags = IORESOURCE_IRQ; // Also clears _UNSET flag res->irq_resource[i].flags |= irq_flags(triggering, polarity); diff --git a/include/acpi/processor.h b/include/acpi/processor.h index 26d79f6..76411b1 100644 --- a/include/acpi/processor.h +++ b/include/acpi/processor.h @@ -78,7 +78,6 @@ struct acpi_processor_cx { struct acpi_processor_power { struct cpuidle_device dev; struct acpi_processor_cx *state; - struct acpi_processor_cx *bm_state; unsigned long bm_check_timestamp; u32 default_state; u32 bm_activity; diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 8ccedf7..e3c16c9 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -132,6 +132,11 @@ extern unsigned long acpi_realmode_flags; int acpi_register_gsi (u32 gsi, int triggering, int polarity); int acpi_gsi_to_irq (u32 gsi, unsigned int *irq); +#ifdef CONFIG_X86_IO_APIC +extern int acpi_get_override_irq(int bus_irq, int *trigger, int *polarity); +#else +#define acpi_get_override_irq(bus, trigger, polarity) (-1) +#endif /* * This function undoes the effect of one call to acpi_register_gsi(). * If this matches the last registration, any IRQ resources for gsi diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h index 16a5154..c4e0016 100644 --- a/include/linux/cpuidle.h +++ b/include/linux/cpuidle.h @@ -92,6 +92,7 @@ struct cpuidle_device { struct kobject kobj; struct completion kobj_unregister; void *governor_data; + struct cpuidle_state *safe_state; }; DECLARE_PER_CPU(struct cpuidle_device *, cpuidle_devices); |