summaryrefslogtreecommitdiffstats
path: root/arch/i386
diff options
context:
space:
mode:
Diffstat (limited to 'arch/i386')
-rw-r--r--arch/i386/kernel/acpi/Makefile4
-rw-r--r--arch/i386/kernel/acpi/cstate.c103
-rw-r--r--arch/i386/kernel/acpi/wakeup.S5
-rw-r--r--arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c2
-rw-r--r--arch/i386/pci/irq.c16
-rw-r--r--arch/i386/pci/visws.c2
6 files changed, 122 insertions, 10 deletions
diff --git a/arch/i386/kernel/acpi/Makefile b/arch/i386/kernel/acpi/Makefile
index ee75cb2..5e291a2 100644
--- a/arch/i386/kernel/acpi/Makefile
+++ b/arch/i386/kernel/acpi/Makefile
@@ -2,3 +2,7 @@ obj-$(CONFIG_ACPI_BOOT) := boot.o
obj-$(CONFIG_X86_IO_APIC) += earlyquirk.o
obj-$(CONFIG_ACPI_SLEEP) += sleep.o wakeup.o
+ifneq ($(CONFIG_ACPI_PROCESSOR),)
+obj-y += cstate.o
+endif
+
diff --git a/arch/i386/kernel/acpi/cstate.c b/arch/i386/kernel/acpi/cstate.c
new file mode 100644
index 0000000..4c3036b
--- /dev/null
+++ b/arch/i386/kernel/acpi/cstate.c
@@ -0,0 +1,103 @@
+/*
+ * arch/i386/kernel/acpi/cstate.c
+ *
+ * Copyright (C) 2005 Intel Corporation
+ * Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
+ * - Added _PDC for SMP C-states on Intel CPUs
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/acpi.h>
+
+#include <acpi/processor.h>
+#include <asm/acpi.h>
+
+static void acpi_processor_power_init_intel_pdc(struct acpi_processor_power
+ *pow)
+{
+ struct acpi_object_list *obj_list;
+ union acpi_object *obj;
+ u32 *buf;
+
+ /* allocate and initialize pdc. It will be used later. */
+ obj_list = kmalloc(sizeof(struct acpi_object_list), GFP_KERNEL);
+ if (!obj_list) {
+ printk(KERN_ERR "Memory allocation error\n");
+ return;
+ }
+
+ obj = kmalloc(sizeof(union acpi_object), GFP_KERNEL);
+ if (!obj) {
+ printk(KERN_ERR "Memory allocation error\n");
+ kfree(obj_list);
+ return;
+ }
+
+ buf = kmalloc(12, GFP_KERNEL);
+ if (!buf) {
+ printk(KERN_ERR "Memory allocation error\n");
+ kfree(obj);
+ kfree(obj_list);
+ return;
+ }
+
+ buf[0] = ACPI_PDC_REVISION_ID;
+ buf[1] = 1;
+ buf[2] = ACPI_PDC_C_CAPABILITY_SMP;
+
+ obj->type = ACPI_TYPE_BUFFER;
+ obj->buffer.length = 12;
+ obj->buffer.pointer = (u8 *) buf;
+ obj_list->count = 1;
+ obj_list->pointer = obj;
+ pow->pdc = obj_list;
+
+ return;
+}
+
+/* Initialize _PDC data based on the CPU vendor */
+void acpi_processor_power_init_pdc(struct acpi_processor_power *pow,
+ unsigned int cpu)
+{
+ struct cpuinfo_x86 *c = cpu_data + cpu;
+
+ pow->pdc = NULL;
+ if (c->x86_vendor == X86_VENDOR_INTEL)
+ acpi_processor_power_init_intel_pdc(pow);
+
+ return;
+}
+
+EXPORT_SYMBOL(acpi_processor_power_init_pdc);
+
+/*
+ * Initialize bm_flags based on the CPU cache properties
+ * On SMP it depends on cache configuration
+ * - When cache is not shared among all CPUs, we flush cache
+ * before entering C3.
+ * - When cache is shared among all CPUs, we use bm_check
+ * mechanism as in UP case
+ *
+ * This routine is called only after all the CPUs are online
+ */
+void acpi_processor_power_init_bm_check(struct acpi_processor_flags *flags,
+ unsigned int cpu)
+{
+ struct cpuinfo_x86 *c = cpu_data + cpu;
+
+ flags->bm_check = 0;
+ if (num_online_cpus() == 1)
+ flags->bm_check = 1;
+ else if (c->x86_vendor == X86_VENDOR_INTEL) {
+ /*
+ * Today all CPUs that support C3 share cache.
+ * TBD: This needs to look at cache shared map, once
+ * multi-core detection patch makes to the base.
+ */
+ flags->bm_check = 1;
+ }
+}
+
+EXPORT_SYMBOL(acpi_processor_power_init_bm_check);
diff --git a/arch/i386/kernel/acpi/wakeup.S b/arch/i386/kernel/acpi/wakeup.S
index 39d32484..44d886c 100644
--- a/arch/i386/kernel/acpi/wakeup.S
+++ b/arch/i386/kernel/acpi/wakeup.S
@@ -74,8 +74,9 @@ wakeup_code:
movw %ax,%fs
movw $0x0e00 + 'i', %fs:(0x12)
- # need a gdt
- lgdt real_save_gdt - wakeup_code
+ # need a gdt -- use lgdtl to force 32-bit operands, in case
+ # the GDT is located past 16 megabytes.
+ lgdtl real_save_gdt - wakeup_code
movl real_save_cr0 - wakeup_code, %eax
movl %eax, %cr0
diff --git a/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c b/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c
index 7dcbf70..327a55d 100644
--- a/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c
+++ b/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c
@@ -375,7 +375,7 @@ static int centrino_cpu_init_acpi(struct cpufreq_policy *policy)
arg0.buffer.pointer = (u8 *) arg0_buf;
arg0_buf[0] = ACPI_PDC_REVISION_ID;
arg0_buf[1] = 1;
- arg0_buf[2] = ACPI_PDC_EST_CAPABILITY_SMP | ACPI_PDC_EST_CAPABILITY_MSR;
+ arg0_buf[2] = ACPI_PDC_EST_CAPABILITY_SMP_MSR;
p.pdc = &arg_list;
diff --git a/arch/i386/pci/irq.c b/arch/i386/pci/irq.c
index 78ca1ec..766b104 100644
--- a/arch/i386/pci/irq.c
+++ b/arch/i386/pci/irq.c
@@ -1051,24 +1051,28 @@ static int __init pcibios_irq_init(void)
subsys_initcall(pcibios_irq_init);
-static void pirq_penalize_isa_irq(int irq)
+static void pirq_penalize_isa_irq(int irq, int active)
{
/*
* If any ISAPnP device reports an IRQ in its list of possible
* IRQ's, we try to avoid assigning it to PCI devices.
*/
- if (irq < 16)
- pirq_penalty[irq] += 100;
+ if (irq < 16) {
+ if (active)
+ pirq_penalty[irq] += 1000;
+ else
+ pirq_penalty[irq] += 100;
+ }
}
-void pcibios_penalize_isa_irq(int irq)
+void pcibios_penalize_isa_irq(int irq, int active)
{
#ifdef CONFIG_ACPI_PCI
if (!acpi_noirq)
- acpi_penalize_isa_irq(irq);
+ acpi_penalize_isa_irq(irq, active);
else
#endif
- pirq_penalize_isa_irq(irq);
+ pirq_penalize_isa_irq(irq, active);
}
static int pirq_enable_irq(struct pci_dev *dev)
diff --git a/arch/i386/pci/visws.c b/arch/i386/pci/visws.c
index 6a92487..314c933 100644
--- a/arch/i386/pci/visws.c
+++ b/arch/i386/pci/visws.c
@@ -21,7 +21,7 @@ static int pci_visws_enable_irq(struct pci_dev *dev) { return 0; }
int (*pcibios_enable_irq)(struct pci_dev *dev) = &pci_visws_enable_irq;
-void __init pcibios_penalize_isa_irq(int irq) {}
+void __init pcibios_penalize_isa_irq(int irq, int active) {}
unsigned int pci_bus0, pci_bus1;
OpenPOWER on IntegriCloud