summaryrefslogtreecommitdiffstats
path: root/arch/i386/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/i386/kernel')
-rw-r--r--arch/i386/kernel/.gitignore1
-rw-r--r--arch/i386/kernel/cpu/transmeta.c1
-rw-r--r--arch/i386/kernel/head.S4
-rw-r--r--arch/i386/kernel/mpparse.c2
-rw-r--r--arch/i386/kernel/smpboot.c4
-rw-r--r--arch/i386/kernel/syscall_table.S2
-rw-r--r--arch/i386/kernel/timers/timer_tsc.c5
-rw-r--r--arch/i386/kernel/vsyscall-sysenter.S15
8 files changed, 28 insertions, 6 deletions
diff --git a/arch/i386/kernel/.gitignore b/arch/i386/kernel/.gitignore
new file mode 100644
index 0000000..40836ad
--- /dev/null
+++ b/arch/i386/kernel/.gitignore
@@ -0,0 +1 @@
+vsyscall.lds
diff --git a/arch/i386/kernel/cpu/transmeta.c b/arch/i386/kernel/cpu/transmeta.c
index bdbeb77..7214c9b 100644
--- a/arch/i386/kernel/cpu/transmeta.c
+++ b/arch/i386/kernel/cpu/transmeta.c
@@ -1,4 +1,5 @@
#include <linux/kernel.h>
+#include <linux/mm.h>
#include <linux/init.h>
#include <asm/processor.h>
#include <asm/msr.h>
diff --git a/arch/i386/kernel/head.S b/arch/i386/kernel/head.S
index 5884469..2bee649 100644
--- a/arch/i386/kernel/head.S
+++ b/arch/i386/kernel/head.S
@@ -398,7 +398,11 @@ ignore_int:
pushl 32(%esp)
pushl 40(%esp)
pushl $int_msg
+#ifdef CONFIG_EARLY_PRINTK
+ call early_printk
+#else
call printk
+#endif
addl $(5*4),%esp
popl %ds
popl %es
diff --git a/arch/i386/kernel/mpparse.c b/arch/i386/kernel/mpparse.c
index 0102f3d..e7609ab 100644
--- a/arch/i386/kernel/mpparse.c
+++ b/arch/i386/kernel/mpparse.c
@@ -710,7 +710,7 @@ void __init get_smp_config (void)
* Read the physical hardware table. Anything here will
* override the defaults.
*/
- if (!smp_read_mpc((void *)mpf->mpf_physptr)) {
+ if (!smp_read_mpc(phys_to_virt(mpf->mpf_physptr))) {
smp_found_config = 0;
printk(KERN_ERR "BIOS bug, MP table errors detected!...\n");
printk(KERN_ERR "... disabling SMP support. (tell your hw vendor)\n");
diff --git a/arch/i386/kernel/smpboot.c b/arch/i386/kernel/smpboot.c
index 255adb4..fb00ab7 100644
--- a/arch/i386/kernel/smpboot.c
+++ b/arch/i386/kernel/smpboot.c
@@ -87,11 +87,7 @@ EXPORT_SYMBOL(cpu_online_map);
cpumask_t cpu_callin_map;
cpumask_t cpu_callout_map;
EXPORT_SYMBOL(cpu_callout_map);
-#ifdef CONFIG_HOTPLUG_CPU
-cpumask_t cpu_possible_map = CPU_MASK_ALL;
-#else
cpumask_t cpu_possible_map;
-#endif
EXPORT_SYMBOL(cpu_possible_map);
static cpumask_t smp_commenced_mask;
diff --git a/arch/i386/kernel/syscall_table.S b/arch/i386/kernel/syscall_table.S
index 5a8b3fb..ac687d0 100644
--- a/arch/i386/kernel/syscall_table.S
+++ b/arch/i386/kernel/syscall_table.S
@@ -299,7 +299,7 @@ ENTRY(sys_call_table)
.long sys_mknodat
.long sys_fchownat
.long sys_futimesat
- .long sys_newfstatat /* 300 */
+ .long sys_fstatat64 /* 300 */
.long sys_unlinkat
.long sys_renameat
.long sys_linkat
diff --git a/arch/i386/kernel/timers/timer_tsc.c b/arch/i386/kernel/timers/timer_tsc.c
index 7c86e3c..a7f5a2a 100644
--- a/arch/i386/kernel/timers/timer_tsc.c
+++ b/arch/i386/kernel/timers/timer_tsc.c
@@ -282,6 +282,10 @@ time_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
if (val != CPUFREQ_RESUMECHANGE)
write_seqlock_irq(&xtime_lock);
if (!ref_freq) {
+ if (!freq->old){
+ ref_freq = freq->new;
+ goto end;
+ }
ref_freq = freq->old;
loops_per_jiffy_ref = cpu_data[freq->cpu].loops_per_jiffy;
#ifndef CONFIG_SMP
@@ -307,6 +311,7 @@ time_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
#endif
}
+end:
if (val != CPUFREQ_RESUMECHANGE)
write_sequnlock_irq(&xtime_lock);
diff --git a/arch/i386/kernel/vsyscall-sysenter.S b/arch/i386/kernel/vsyscall-sysenter.S
index 4daefb2..76b7281 100644
--- a/arch/i386/kernel/vsyscall-sysenter.S
+++ b/arch/i386/kernel/vsyscall-sysenter.S
@@ -7,6 +7,21 @@
* for details.
*/
+/*
+ * The caller puts arg2 in %ecx, which gets pushed. The kernel will use
+ * %ecx itself for arg2. The pushing is because the sysexit instruction
+ * (found in entry.S) requires that we clobber %ecx with the desired %esp.
+ * User code might expect that %ecx is unclobbered though, as it would be
+ * for returning via the iret instruction, so we must push and pop.
+ *
+ * The caller puts arg3 in %edx, which the sysexit instruction requires
+ * for %eip. Thus, exactly as for arg2, we must push and pop.
+ *
+ * Arg6 is different. The caller puts arg6 in %ebp. Since the sysenter
+ * instruction clobbers %esp, the user's %esp won't even survive entry
+ * into the kernel. We store %esp in %ebp. Code in entry.S must fetch
+ * arg6 from the stack.
+ */
.text
.globl __kernel_vsyscall
.type __kernel_vsyscall,@function
OpenPOWER on IntegriCloud