summaryrefslogtreecommitdiffstats
path: root/arch/i386/kernel/cpu
diff options
context:
space:
mode:
authorMauro Carvalho Chehab <mchehab@infradead.org>2006-02-06 10:43:13 -0200
committerMauro Carvalho Chehab <mchehab@infradead.org>2006-02-06 10:43:13 -0200
commitb2faf597d93bdf5e2d12d93ea0815935a73f749e (patch)
tree1876616290ff282b8a0814e2429d23e0104f3701 /arch/i386/kernel/cpu
parent638e174688f58200d0deb7435093435e7d737b09 (diff)
parent410c05427a69f53851637ccb85c2212131409fbd (diff)
downloadop-kernel-dev-b2faf597d93bdf5e2d12d93ea0815935a73f749e.zip
op-kernel-dev-b2faf597d93bdf5e2d12d93ea0815935a73f749e.tar.gz
Merge branch 'origin'
Diffstat (limited to 'arch/i386/kernel/cpu')
-rw-r--r--arch/i386/kernel/cpu/amd.c8
-rw-r--r--arch/i386/kernel/cpu/centaur.c12
-rw-r--r--arch/i386/kernel/cpu/common.c11
-rw-r--r--arch/i386/kernel/cpu/cpufreq/Kconfig1
-rw-r--r--arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c71
-rw-r--r--arch/i386/kernel/cpu/cpufreq/p4-clockmod.c9
-rw-r--r--arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c12
-rw-r--r--arch/i386/kernel/cpu/cyrix.c18
-rw-r--r--arch/i386/kernel/cpu/intel_cacheinfo.c12
-rw-r--r--arch/i386/kernel/cpu/mtrr/main.c15
-rw-r--r--arch/i386/kernel/cpu/nexgen.c8
-rw-r--r--arch/i386/kernel/cpu/rise.c8
-rw-r--r--arch/i386/kernel/cpu/transmeta.c10
-rw-r--r--arch/i386/kernel/cpu/umc.c8
14 files changed, 104 insertions, 99 deletions
diff --git a/arch/i386/kernel/cpu/amd.c b/arch/i386/kernel/cpu/amd.c
index 333578a..0810f81f 100644
--- a/arch/i386/kernel/cpu/amd.c
+++ b/arch/i386/kernel/cpu/amd.c
@@ -282,3 +282,11 @@ int __init amd_init_cpu(void)
}
//early_arch_initcall(amd_init_cpu);
+
+static int __init amd_exit_cpu(void)
+{
+ cpu_devs[X86_VENDOR_AMD] = NULL;
+ return 0;
+}
+
+late_initcall(amd_exit_cpu);
diff --git a/arch/i386/kernel/cpu/centaur.c b/arch/i386/kernel/cpu/centaur.c
index 394814e..f52669e 100644
--- a/arch/i386/kernel/cpu/centaur.c
+++ b/arch/i386/kernel/cpu/centaur.c
@@ -405,10 +405,6 @@ static void __init init_centaur(struct cpuinfo_x86 *c)
winchip2_protect_mcr();
#endif
break;
- case 10:
- name="4";
- /* no info on the WC4 yet */
- break;
default:
name="??";
}
@@ -474,3 +470,11 @@ int __init centaur_init_cpu(void)
}
//early_arch_initcall(centaur_init_cpu);
+
+static int __init centaur_exit_cpu(void)
+{
+ cpu_devs[X86_VENDOR_CENTAUR] = NULL;
+ return 0;
+}
+
+late_initcall(centaur_exit_cpu);
diff --git a/arch/i386/kernel/cpu/common.c b/arch/i386/kernel/cpu/common.c
index 15aee26..7eb9213 100644
--- a/arch/i386/kernel/cpu/common.c
+++ b/arch/i386/kernel/cpu/common.c
@@ -44,6 +44,7 @@ static void default_init(struct cpuinfo_x86 * c)
static struct cpu_dev default_cpu = {
.c_init = default_init,
+ .c_vendor = "Unknown",
};
static struct cpu_dev * this_cpu = &default_cpu;
@@ -150,6 +151,7 @@ static void __devinit get_cpu_vendor(struct cpuinfo_x86 *c, int early)
{
char *v = c->x86_vendor_id;
int i;
+ static int printed;
for (i = 0; i < X86_VENDOR_NUM; i++) {
if (cpu_devs[i]) {
@@ -159,10 +161,17 @@ static void __devinit get_cpu_vendor(struct cpuinfo_x86 *c, int early)
c->x86_vendor = i;
if (!early)
this_cpu = cpu_devs[i];
- break;
+ return;
}
}
}
+ if (!printed) {
+ printed++;
+ printk(KERN_ERR "CPU: Vendor unknown, using generic init.\n");
+ printk(KERN_ERR "CPU: Your system may be unstable.\n");
+ }
+ c->x86_vendor = X86_VENDOR_UNKNOWN;
+ this_cpu = &default_cpu;
}
diff --git a/arch/i386/kernel/cpu/cpufreq/Kconfig b/arch/i386/kernel/cpu/cpufreq/Kconfig
index 0f1eb50..26892d2 100644
--- a/arch/i386/kernel/cpu/cpufreq/Kconfig
+++ b/arch/i386/kernel/cpu/cpufreq/Kconfig
@@ -96,6 +96,7 @@ config X86_POWERNOW_K8_ACPI
config X86_GX_SUSPMOD
tristate "Cyrix MediaGX/NatSemi Geode Suspend Modulation"
+ depends on PCI
help
This add the CPUFreq driver for NatSemi Geode processors which
support suspend modulation.
diff --git a/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c
index 7975e79..3852d0a 100644
--- a/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c
+++ b/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c
@@ -295,68 +295,6 @@ acpi_cpufreq_guess_freq (
}
-/*
- * acpi_processor_cpu_init_pdc_est - let BIOS know about the SMP capabilities
- * of this driver
- * @perf: processor-specific acpi_io_data struct
- * @cpu: CPU being initialized
- *
- * To avoid issues with legacy OSes, some BIOSes require to be informed of
- * the SMP capabilities of OS P-state driver. Here we set the bits in _PDC
- * accordingly, for Enhanced Speedstep. Actual call to _PDC is done in
- * driver/acpi/processor.c
- */
-static void
-acpi_processor_cpu_init_pdc_est(
- struct acpi_processor_performance *perf,
- unsigned int cpu,
- struct acpi_object_list *obj_list
- )
-{
- union acpi_object *obj;
- u32 *buf;
- struct cpuinfo_x86 *c = cpu_data + cpu;
- dprintk("acpi_processor_cpu_init_pdc_est\n");
-
- if (!cpu_has(c, X86_FEATURE_EST))
- return;
-
- /* Initialize pdc. It will be used later. */
- if (!obj_list)
- return;
-
- if (!(obj_list->count && obj_list->pointer))
- return;
-
- obj = obj_list->pointer;
- if ((obj->buffer.length == 12) && obj->buffer.pointer) {
- buf = (u32 *)obj->buffer.pointer;
- buf[0] = ACPI_PDC_REVISION_ID;
- buf[1] = 1;
- buf[2] = ACPI_PDC_EST_CAPABILITY_SMP;
- perf->pdc = obj_list;
- }
- return;
-}
-
-
-/* CPU specific PDC initialization */
-static void
-acpi_processor_cpu_init_pdc(
- struct acpi_processor_performance *perf,
- unsigned int cpu,
- struct acpi_object_list *obj_list
- )
-{
- struct cpuinfo_x86 *c = cpu_data + cpu;
- dprintk("acpi_processor_cpu_init_pdc\n");
- perf->pdc = NULL;
- if (cpu_has(c, X86_FEATURE_EST))
- acpi_processor_cpu_init_pdc_est(perf, cpu, obj_list);
- return;
-}
-
-
static int
acpi_cpufreq_cpu_init (
struct cpufreq_policy *policy)
@@ -367,14 +305,7 @@ acpi_cpufreq_cpu_init (
unsigned int result = 0;
struct cpuinfo_x86 *c = &cpu_data[policy->cpu];
- union acpi_object arg0 = {ACPI_TYPE_BUFFER};
- u32 arg0_buf[3];
- struct acpi_object_list arg_list = {1, &arg0};
-
dprintk("acpi_cpufreq_cpu_init\n");
- /* setup arg_list for _PDC settings */
- arg0.buffer.length = 12;
- arg0.buffer.pointer = (u8 *) arg0_buf;
data = kzalloc(sizeof(struct cpufreq_acpi_io), GFP_KERNEL);
if (!data)
@@ -382,9 +313,7 @@ acpi_cpufreq_cpu_init (
acpi_io_data[cpu] = data;
- acpi_processor_cpu_init_pdc(&data->acpi_data, cpu, &arg_list);
result = acpi_processor_register_performance(&data->acpi_data, cpu);
- data->acpi_data.pdc = NULL;
if (result)
goto err_free;
diff --git a/arch/i386/kernel/cpu/cpufreq/p4-clockmod.c b/arch/i386/kernel/cpu/cpufreq/p4-clockmod.c
index 270f218..cc73a7a 100644
--- a/arch/i386/kernel/cpu/cpufreq/p4-clockmod.c
+++ b/arch/i386/kernel/cpu/cpufreq/p4-clockmod.c
@@ -52,6 +52,7 @@ enum {
static int has_N44_O17_errata[NR_CPUS];
+static int has_N60_errata[NR_CPUS];
static unsigned int stock_freq;
static struct cpufreq_driver p4clockmod_driver;
static unsigned int cpufreq_p4_get(unsigned int cpu);
@@ -226,6 +227,12 @@ static int cpufreq_p4_cpu_init(struct cpufreq_policy *policy)
case 0x0f12:
has_N44_O17_errata[policy->cpu] = 1;
dprintk("has errata -- disabling low frequencies\n");
+ break;
+
+ case 0x0f29:
+ has_N60_errata[policy->cpu] = 1;
+ dprintk("has errata -- disabling frequencies lower than 2ghz\n");
+ break;
}
/* get max frequency */
@@ -237,6 +244,8 @@ static int cpufreq_p4_cpu_init(struct cpufreq_policy *policy)
for (i=1; (p4clockmod_table[i].frequency != CPUFREQ_TABLE_END); i++) {
if ((i<2) && (has_N44_O17_errata[policy->cpu]))
p4clockmod_table[i].frequency = CPUFREQ_ENTRY_INVALID;
+ else if (has_N60_errata[policy->cpu] && p4clockmod_table[i].frequency < 2000000)
+ p4clockmod_table[i].frequency = CPUFREQ_ENTRY_INVALID;
else
p4clockmod_table[i].frequency = (stock_freq * i)/8;
}
diff --git a/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c b/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c
index 9a826cd..c173c0f 100644
--- a/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c
+++ b/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c
@@ -362,22 +362,10 @@ static struct acpi_processor_performance p;
*/
static int centrino_cpu_init_acpi(struct cpufreq_policy *policy)
{
- union acpi_object arg0 = {ACPI_TYPE_BUFFER};
- u32 arg0_buf[3];
- struct acpi_object_list arg_list = {1, &arg0};
unsigned long cur_freq;
int result = 0, i;
unsigned int cpu = policy->cpu;
- /* _PDC settings */
- arg0.buffer.length = 12;
- arg0.buffer.pointer = (u8 *) arg0_buf;
- arg0_buf[0] = ACPI_PDC_REVISION_ID;
- arg0_buf[1] = 1;
- arg0_buf[2] = ACPI_PDC_EST_CAPABILITY_SMP_MSR;
-
- p.pdc = &arg_list;
-
/* register with ACPI core */
if (acpi_processor_register_performance(&p, cpu)) {
dprintk(KERN_INFO PFX "obtaining ACPI data failed\n");
diff --git a/arch/i386/kernel/cpu/cyrix.c b/arch/i386/kernel/cpu/cyrix.c
index 7501597..00f2e05 100644
--- a/arch/i386/kernel/cpu/cyrix.c
+++ b/arch/i386/kernel/cpu/cyrix.c
@@ -345,7 +345,7 @@ static void __init init_cyrix(struct cpuinfo_x86 *c)
/*
* Handle National Semiconductor branded processors
*/
-static void __devinit init_nsc(struct cpuinfo_x86 *c)
+static void __init init_nsc(struct cpuinfo_x86 *c)
{
/* There may be GX1 processors in the wild that are branded
* NSC and not Cyrix.
@@ -444,6 +444,14 @@ int __init cyrix_init_cpu(void)
//early_arch_initcall(cyrix_init_cpu);
+static int __init cyrix_exit_cpu(void)
+{
+ cpu_devs[X86_VENDOR_CYRIX] = NULL;
+ return 0;
+}
+
+late_initcall(cyrix_exit_cpu);
+
static struct cpu_dev nsc_cpu_dev __initdata = {
.c_vendor = "NSC",
.c_ident = { "Geode by NSC" },
@@ -458,3 +466,11 @@ int __init nsc_init_cpu(void)
}
//early_arch_initcall(nsc_init_cpu);
+
+static int __init nsc_exit_cpu(void)
+{
+ cpu_devs[X86_VENDOR_NSC] = NULL;
+ return 0;
+}
+
+late_initcall(nsc_exit_cpu);
diff --git a/arch/i386/kernel/cpu/intel_cacheinfo.c b/arch/i386/kernel/cpu/intel_cacheinfo.c
index fbfd374..ffe58ce 100644
--- a/arch/i386/kernel/cpu/intel_cacheinfo.c
+++ b/arch/i386/kernel/cpu/intel_cacheinfo.c
@@ -43,13 +43,23 @@ static struct _cache_table cache_table[] __cpuinitdata =
{ 0x2c, LVL_1_DATA, 32 }, /* 8-way set assoc, 64 byte line size */
{ 0x30, LVL_1_INST, 32 }, /* 8-way set assoc, 64 byte line size */
{ 0x39, LVL_2, 128 }, /* 4-way set assoc, sectored cache, 64 byte line size */
+ { 0x3a, LVL_2, 192 }, /* 6-way set assoc, sectored cache, 64 byte line size */
{ 0x3b, LVL_2, 128 }, /* 2-way set assoc, sectored cache, 64 byte line size */
{ 0x3c, LVL_2, 256 }, /* 4-way set assoc, sectored cache, 64 byte line size */
+ { 0x3d, LVL_2, 384 }, /* 6-way set assoc, sectored cache, 64 byte line size */
+ { 0x3e, LVL_2, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
{ 0x41, LVL_2, 128 }, /* 4-way set assoc, 32 byte line size */
{ 0x42, LVL_2, 256 }, /* 4-way set assoc, 32 byte line size */
{ 0x43, LVL_2, 512 }, /* 4-way set assoc, 32 byte line size */
{ 0x44, LVL_2, 1024 }, /* 4-way set assoc, 32 byte line size */
{ 0x45, LVL_2, 2048 }, /* 4-way set assoc, 32 byte line size */
+ { 0x46, LVL_3, 4096 }, /* 4-way set assoc, 64 byte line size */
+ { 0x47, LVL_3, 8192 }, /* 8-way set assoc, 64 byte line size */
+ { 0x49, LVL_3, 4096 }, /* 16-way set assoc, 64 byte line size */
+ { 0x4a, LVL_3, 6144 }, /* 12-way set assoc, 64 byte line size */
+ { 0x4b, LVL_3, 8192 }, /* 16-way set assoc, 64 byte line size */
+ { 0x4c, LVL_3, 12288 }, /* 12-way set assoc, 64 byte line size */
+ { 0x4d, LVL_3, 16384 }, /* 16-way set assoc, 64 byte line size */
{ 0x60, LVL_1_DATA, 16 }, /* 8-way set assoc, sectored cache, 64 byte line size */
{ 0x66, LVL_1_DATA, 8 }, /* 4-way set assoc, sectored cache, 64 byte line size */
{ 0x67, LVL_1_DATA, 16 }, /* 4-way set assoc, sectored cache, 64 byte line size */
@@ -57,6 +67,7 @@ static struct _cache_table cache_table[] __cpuinitdata =
{ 0x70, LVL_TRACE, 12 }, /* 8-way set assoc */
{ 0x71, LVL_TRACE, 16 }, /* 8-way set assoc */
{ 0x72, LVL_TRACE, 32 }, /* 8-way set assoc */
+ { 0x73, LVL_TRACE, 64 }, /* 8-way set assoc */
{ 0x78, LVL_2, 1024 }, /* 4-way set assoc, 64 byte line size */
{ 0x79, LVL_2, 128 }, /* 8-way set assoc, sectored cache, 64 byte line size */
{ 0x7a, LVL_2, 256 }, /* 8-way set assoc, sectored cache, 64 byte line size */
@@ -141,6 +152,7 @@ static int __cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_le
return 0;
}
+/* will only be called once; __init is safe here */
static int __init find_num_cache_leaves(void)
{
unsigned int eax, ebx, ecx, edx;
diff --git a/arch/i386/kernel/cpu/mtrr/main.c b/arch/i386/kernel/cpu/mtrr/main.c
index 1e9db19..3b4618b 100644
--- a/arch/i386/kernel/cpu/mtrr/main.c
+++ b/arch/i386/kernel/cpu/mtrr/main.c
@@ -44,12 +44,10 @@
#include <asm/msr.h>
#include "mtrr.h"
-#define MTRR_VERSION "2.0 (20020519)"
-
u32 num_var_ranges = 0;
unsigned int *usage_table;
-static DECLARE_MUTEX(main_lock);
+static DECLARE_MUTEX(mtrr_sem);
u32 size_or_mask, size_and_mask;
@@ -335,7 +333,7 @@ int mtrr_add_page(unsigned long base, unsigned long size,
/* No CPU hotplug when we change MTRR entries */
lock_cpu_hotplug();
/* Search for existing MTRR */
- down(&main_lock);
+ down(&mtrr_sem);
for (i = 0; i < num_var_ranges; ++i) {
mtrr_if->get(i, &lbase, &lsize, &ltype);
if (base >= lbase + lsize)
@@ -373,7 +371,7 @@ int mtrr_add_page(unsigned long base, unsigned long size,
printk(KERN_INFO "mtrr: no more MTRRs available\n");
error = i;
out:
- up(&main_lock);
+ up(&mtrr_sem);
unlock_cpu_hotplug();
return error;
}
@@ -466,7 +464,7 @@ int mtrr_del_page(int reg, unsigned long base, unsigned long size)
max = num_var_ranges;
/* No CPU hotplug when we change MTRR entries */
lock_cpu_hotplug();
- down(&main_lock);
+ down(&mtrr_sem);
if (reg < 0) {
/* Search for existing MTRR */
for (i = 0; i < max; ++i) {
@@ -505,7 +503,7 @@ int mtrr_del_page(int reg, unsigned long base, unsigned long size)
set_mtrr(reg, 0, 0, 0);
error = reg;
out:
- up(&main_lock);
+ up(&mtrr_sem);
unlock_cpu_hotplug();
return error;
}
@@ -671,7 +669,6 @@ void __init mtrr_bp_init(void)
break;
}
}
- printk(KERN_INFO "mtrr: v%s\n",MTRR_VERSION);
if (mtrr_if) {
set_num_var_ranges();
@@ -688,7 +685,7 @@ void mtrr_ap_init(void)
if (!mtrr_if || !use_intel())
return;
/*
- * Ideally we should hold main_lock here to avoid mtrr entries changed,
+ * Ideally we should hold mtrr_sem here to avoid mtrr entries changed,
* but this routine will be called in cpu boot time, holding the lock
* breaks it. This routine is called in two cases: 1.very earily time
* of software resume, when there absolutely isn't mtrr entry changes;
diff --git a/arch/i386/kernel/cpu/nexgen.c b/arch/i386/kernel/cpu/nexgen.c
index 30898a2..ad87fa5 100644
--- a/arch/i386/kernel/cpu/nexgen.c
+++ b/arch/i386/kernel/cpu/nexgen.c
@@ -61,3 +61,11 @@ int __init nexgen_init_cpu(void)
}
//early_arch_initcall(nexgen_init_cpu);
+
+static int __init nexgen_exit_cpu(void)
+{
+ cpu_devs[X86_VENDOR_NEXGEN] = NULL;
+ return 0;
+}
+
+late_initcall(nexgen_exit_cpu);
diff --git a/arch/i386/kernel/cpu/rise.c b/arch/i386/kernel/cpu/rise.c
index 8602425..d08d5a2 100644
--- a/arch/i386/kernel/cpu/rise.c
+++ b/arch/i386/kernel/cpu/rise.c
@@ -51,3 +51,11 @@ int __init rise_init_cpu(void)
}
//early_arch_initcall(rise_init_cpu);
+
+static int __init rise_exit_cpu(void)
+{
+ cpu_devs[X86_VENDOR_RISE] = NULL;
+ return 0;
+}
+
+late_initcall(rise_exit_cpu);
diff --git a/arch/i386/kernel/cpu/transmeta.c b/arch/i386/kernel/cpu/transmeta.c
index fc42638..bdbeb77 100644
--- a/arch/i386/kernel/cpu/transmeta.c
+++ b/arch/i386/kernel/cpu/transmeta.c
@@ -84,7 +84,7 @@ static void __init init_transmeta(struct cpuinfo_x86 *c)
#endif
}
-static void transmeta_identify(struct cpuinfo_x86 * c)
+static void __init transmeta_identify(struct cpuinfo_x86 * c)
{
u32 xlvl;
generic_identify(c);
@@ -111,3 +111,11 @@ int __init transmeta_init_cpu(void)
}
//early_arch_initcall(transmeta_init_cpu);
+
+static int __init transmeta_exit_cpu(void)
+{
+ cpu_devs[X86_VENDOR_TRANSMETA] = NULL;
+ return 0;
+}
+
+late_initcall(transmeta_exit_cpu);
diff --git a/arch/i386/kernel/cpu/umc.c b/arch/i386/kernel/cpu/umc.c
index 264fcad..2cd988f 100644
--- a/arch/i386/kernel/cpu/umc.c
+++ b/arch/i386/kernel/cpu/umc.c
@@ -31,3 +31,11 @@ int __init umc_init_cpu(void)
}
//early_arch_initcall(umc_init_cpu);
+
+static int __init umc_exit_cpu(void)
+{
+ cpu_devs[X86_VENDOR_UMC] = NULL;
+ return 0;
+}
+
+late_initcall(umc_exit_cpu);
OpenPOWER on IntegriCloud