summaryrefslogtreecommitdiffstats
path: root/arch/x86_64
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86_64')
-rw-r--r--arch/x86_64/Kconfig.debug7
-rw-r--r--arch/x86_64/defconfig53
-rw-r--r--arch/x86_64/ia32/ia32entry.S7
-rw-r--r--arch/x86_64/ia32/sys_ia32.c22
-rw-r--r--arch/x86_64/kernel/acpi/Makefile5
-rw-r--r--arch/x86_64/kernel/acpi/processor.c72
-rw-r--r--arch/x86_64/kernel/apic.c92
-rw-r--r--arch/x86_64/kernel/entry.S8
-rw-r--r--arch/x86_64/kernel/head.S7
-rw-r--r--arch/x86_64/kernel/io_apic.c34
-rw-r--r--arch/x86_64/kernel/mce.c2
-rw-r--r--arch/x86_64/kernel/mpparse.c12
-rw-r--r--arch/x86_64/kernel/nmi.c26
-rw-r--r--arch/x86_64/kernel/pci-dma.c3
-rw-r--r--arch/x86_64/kernel/pci-gart.c25
-rw-r--r--arch/x86_64/kernel/pci-nommu.c7
-rw-r--r--arch/x86_64/kernel/pci-swiotlb.c2
-rw-r--r--arch/x86_64/kernel/pmtimer.c25
-rw-r--r--arch/x86_64/kernel/process.c4
-rw-r--r--arch/x86_64/kernel/setup.c10
-rw-r--r--arch/x86_64/kernel/smpboot.c3
-rw-r--r--arch/x86_64/kernel/time.c76
-rw-r--r--arch/x86_64/kernel/traps.c39
-rw-r--r--arch/x86_64/kernel/vmlinux.lds.S10
-rw-r--r--arch/x86_64/kernel/x8664_ksyms.c5
-rw-r--r--arch/x86_64/lib/Makefile2
-rw-r--r--arch/x86_64/lib/clear_page.S38
-rw-r--r--arch/x86_64/lib/copy_page.S87
-rw-r--r--arch/x86_64/lib/copy_user.S247
-rw-r--r--arch/x86_64/lib/iomap_copy.S26
-rw-r--r--arch/x86_64/lib/memcpy.S93
-rw-r--r--arch/x86_64/lib/memset.S94
-rw-r--r--arch/x86_64/mm/fault.c3
-rw-r--r--arch/x86_64/mm/k8topology.c2
-rw-r--r--arch/x86_64/mm/numa.c2
-rw-r--r--arch/x86_64/mm/srat.c50
-rw-r--r--arch/x86_64/pci/mmconfig.c19
37 files changed, 1067 insertions, 152 deletions
diff --git a/arch/x86_64/Kconfig.debug b/arch/x86_64/Kconfig.debug
index fcb06a5..ea31b4c 100644
--- a/arch/x86_64/Kconfig.debug
+++ b/arch/x86_64/Kconfig.debug
@@ -2,13 +2,6 @@ menu "Kernel hacking"
source "lib/Kconfig.debug"
-config INIT_DEBUG
- bool "Debug __init statements"
- depends on DEBUG_KERNEL
- help
- Fill __init and __initdata at the end of boot. This helps debugging
- illegal uses of __init and __initdata after initialization.
-
config DEBUG_RODATA
bool "Write protect kernel read-only data structures"
depends on DEBUG_KERNEL
diff --git a/arch/x86_64/defconfig b/arch/x86_64/defconfig
index 09a3eb7..ce4de61 100644
--- a/arch/x86_64/defconfig
+++ b/arch/x86_64/defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.15-git12
-# Mon Jan 16 13:09:08 2006
+# Linux kernel version: 2.6.16-rc3-git9
+# Sat Feb 18 00:27:03 2006
#
CONFIG_X86_64=y
CONFIG_64BIT=y
@@ -21,7 +21,6 @@ CONFIG_DMI=y
# Code maturity level options
#
CONFIG_EXPERIMENTAL=y
-CONFIG_CLEAN_COMPILE=y
CONFIG_LOCK_KERNEL=y
CONFIG_INIT_ENV_ARG_LIMIT=32
@@ -267,6 +266,7 @@ CONFIG_NET=y
#
# Networking options
#
+# CONFIG_NETDEBUG is not set
CONFIG_PACKET=y
# CONFIG_PACKET_MMAP is not set
CONFIG_UNIX=y
@@ -310,6 +310,11 @@ CONFIG_IPV6=y
# SCTP Configuration (EXPERIMENTAL)
#
# CONFIG_IP_SCTP is not set
+
+#
+# TIPC Configuration (EXPERIMENTAL)
+#
+# CONFIG_TIPC is not set
# CONFIG_ATM is not set
# CONFIG_BRIDGE is not set
# CONFIG_VLAN_8021Q is not set
@@ -319,11 +324,6 @@ CONFIG_IPV6=y
# CONFIG_ATALK is not set
# CONFIG_X25 is not set
# CONFIG_LAPB is not set
-
-#
-# TIPC Configuration (EXPERIMENTAL)
-#
-# CONFIG_TIPC is not set
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
@@ -446,7 +446,6 @@ CONFIG_BLK_DEV_PIIX=y
# CONFIG_BLK_DEV_NS87415 is not set
# CONFIG_BLK_DEV_PDC202XX_OLD is not set
CONFIG_BLK_DEV_PDC202XX_NEW=y
-# CONFIG_PDC202XX_FORCE is not set
# CONFIG_BLK_DEV_SVWKS is not set
# CONFIG_BLK_DEV_SIIMAGE is not set
# CONFIG_BLK_DEV_SIS5513 is not set
@@ -573,7 +572,33 @@ CONFIG_FUSION_MAX_SGE=128
#
# IEEE 1394 (FireWire) support
#
-# CONFIG_IEEE1394 is not set
+CONFIG_IEEE1394=y
+
+#
+# Subsystem Options
+#
+# CONFIG_IEEE1394_VERBOSEDEBUG is not set
+# CONFIG_IEEE1394_OUI_DB is not set
+# CONFIG_IEEE1394_EXTRA_CONFIG_ROMS is not set
+# CONFIG_IEEE1394_EXPORT_FULL_API is not set
+
+#
+# Device Drivers
+#
+
+#
+# Texas Instruments PCILynx requires I2C
+#
+CONFIG_IEEE1394_OHCI1394=y
+
+#
+# Protocol Drivers
+#
+# CONFIG_IEEE1394_VIDEO1394 is not set
+# CONFIG_IEEE1394_SBP2 is not set
+# CONFIG_IEEE1394_ETH1394 is not set
+# CONFIG_IEEE1394_DV1394 is not set
+CONFIG_IEEE1394_RAWIO=y
#
# I2O device support
@@ -772,6 +797,7 @@ CONFIG_SERIAL_8250_RUNTIME_UARTS=4
#
CONFIG_SERIAL_CORE=y
CONFIG_SERIAL_CORE_CONSOLE=y
+# CONFIG_SERIAL_JSM is not set
CONFIG_UNIX98_PTYS=y
CONFIG_LEGACY_PTYS=y
CONFIG_LEGACY_PTY_COUNT=256
@@ -871,6 +897,7 @@ CONFIG_HPET_MMAP=y
#
CONFIG_HWMON=y
# CONFIG_HWMON_VID is not set
+# CONFIG_SENSORS_F71805F is not set
# CONFIG_SENSORS_HDAPS is not set
# CONFIG_HWMON_DEBUG_CHIP is not set
@@ -1098,6 +1125,11 @@ CONFIG_USB_MON=y
#
#
+# EDAC - error detection and reporting (RAS)
+#
+# CONFIG_EDAC is not set
+
+#
# Firmware Drivers
#
# CONFIG_EDD is not set
@@ -1291,7 +1323,6 @@ CONFIG_DEBUG_FS=y
# CONFIG_FRAME_POINTER is not set
# CONFIG_FORCED_INLINING is not set
# CONFIG_RCU_TORTURE_TEST is not set
-CONFIG_INIT_DEBUG=y
# CONFIG_DEBUG_RODATA is not set
# CONFIG_IOMMU_DEBUG is not set
diff --git a/arch/x86_64/ia32/ia32entry.S b/arch/x86_64/ia32/ia32entry.S
index f05c2a8..00dee17 100644
--- a/arch/x86_64/ia32/ia32entry.S
+++ b/arch/x86_64/ia32/ia32entry.S
@@ -676,8 +676,8 @@ ia32_sys_call_table:
.quad sys_mkdirat
.quad sys_mknodat
.quad sys_fchownat
- .quad sys_futimesat
- .quad compat_sys_newfstatat /* 300 */
+ .quad compat_sys_futimesat
+ .quad sys32_fstatat /* 300 */
.quad sys_unlinkat
.quad sys_renameat
.quad sys_linkat
@@ -685,6 +685,9 @@ ia32_sys_call_table:
.quad sys_readlinkat /* 305 */
.quad sys_fchmodat
.quad sys_faccessat
+ .quad sys_ni_syscall /* pselect6 for now */
+ .quad sys_ni_syscall /* ppoll for now */
+ .quad sys_unshare /* 310 */
ia32_syscall_end:
.rept IA32_NR_syscalls-(ia32_syscall_end-ia32_sys_call_table)/8
.quad ni_syscall
diff --git a/arch/x86_64/ia32/sys_ia32.c b/arch/x86_64/ia32/sys_ia32.c
index 54481af..2bc55af 100644
--- a/arch/x86_64/ia32/sys_ia32.c
+++ b/arch/x86_64/ia32/sys_ia32.c
@@ -180,6 +180,28 @@ sys32_fstat64(unsigned int fd, struct stat64 __user *statbuf)
return ret;
}
+asmlinkage long
+sys32_fstatat(unsigned int dfd, char __user *filename,
+ struct stat64 __user* statbuf, int flag)
+{
+ struct kstat stat;
+ int error = -EINVAL;
+
+ if ((flag & ~AT_SYMLINK_NOFOLLOW) != 0)
+ goto out;
+
+ if (flag & AT_SYMLINK_NOFOLLOW)
+ error = vfs_lstat_fd(dfd, filename, &stat);
+ else
+ error = vfs_stat_fd(dfd, filename, &stat);
+
+ if (!error)
+ error = cp_stat64(statbuf, &stat);
+
+out:
+ return error;
+}
+
/*
* Linux/i386 didn't use to be able to handle more than
* 4 system call parameters, so these system calls used a memory
diff --git a/arch/x86_64/kernel/acpi/Makefile b/arch/x86_64/kernel/acpi/Makefile
index 7da9ace..4fe9707 100644
--- a/arch/x86_64/kernel/acpi/Makefile
+++ b/arch/x86_64/kernel/acpi/Makefile
@@ -1,3 +1,8 @@
obj-y := boot.o
boot-y := ../../../i386/kernel/acpi/boot.o
obj-$(CONFIG_ACPI_SLEEP) += sleep.o wakeup.o
+
+ifneq ($(CONFIG_ACPI_PROCESSOR),)
+obj-y += processor.o
+endif
+
diff --git a/arch/x86_64/kernel/acpi/processor.c b/arch/x86_64/kernel/acpi/processor.c
new file mode 100644
index 0000000..3bdc2ba
--- /dev/null
+++ b/arch/x86_64/kernel/acpi/processor.c
@@ -0,0 +1,72 @@
+/*
+ * arch/x86_64/kernel/acpi/processor.c
+ *
+ * Copyright (C) 2005 Intel Corporation
+ * Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
+ * - Added _PDC for platforms with Intel CPUs
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/acpi.h>
+
+#include <acpi/processor.h>
+#include <asm/acpi.h>
+
+static void init_intel_pdc(struct acpi_processor *pr, struct cpuinfo_x86 *c)
+{
+ struct acpi_object_list *obj_list;
+ union acpi_object *obj;
+ u32 *buf;
+
+ /* allocate and initialize pdc. It will be used later. */
+ obj_list = kmalloc(sizeof(struct acpi_object_list), GFP_KERNEL);
+ if (!obj_list) {
+ printk(KERN_ERR "Memory allocation error\n");
+ return;
+ }
+
+ obj = kmalloc(sizeof(union acpi_object), GFP_KERNEL);
+ if (!obj) {
+ printk(KERN_ERR "Memory allocation error\n");
+ kfree(obj_list);
+ return;
+ }
+
+ buf = kmalloc(12, GFP_KERNEL);
+ if (!buf) {
+ printk(KERN_ERR "Memory allocation error\n");
+ kfree(obj);
+ kfree(obj_list);
+ return;
+ }
+
+ buf[0] = ACPI_PDC_REVISION_ID;
+ buf[1] = 1;
+ buf[2] = ACPI_PDC_EST_CAPABILITY_SMP;
+
+ obj->type = ACPI_TYPE_BUFFER;
+ obj->buffer.length = 12;
+ obj->buffer.pointer = (u8 *) buf;
+ obj_list->count = 1;
+ obj_list->pointer = obj;
+ pr->pdc = obj_list;
+
+ return;
+}
+
+/* Initialize _PDC data based on the CPU vendor */
+void arch_acpi_processor_init_pdc(struct acpi_processor *pr)
+{
+ unsigned int cpu = pr->id;
+ struct cpuinfo_x86 *c = cpu_data + cpu;
+
+ pr->pdc = NULL;
+ if (c->x86_vendor == X86_VENDOR_INTEL && cpu_has(c, X86_FEATURE_EST))
+ init_intel_pdc(pr, c);
+
+ return;
+}
+
+EXPORT_SYMBOL(arch_acpi_processor_init_pdc);
diff --git a/arch/x86_64/kernel/apic.c b/arch/x86_64/kernel/apic.c
index 5d3c5b0..e5b14c5 100644
--- a/arch/x86_64/kernel/apic.c
+++ b/arch/x86_64/kernel/apic.c
@@ -35,8 +35,12 @@
#include <asm/mach_apic.h>
#include <asm/nmi.h>
#include <asm/idle.h>
+#include <asm/proto.h>
+#include <asm/timex.h>
int apic_verbosity;
+int apic_runs_main_timer;
+int apic_calibrate_pmtmr __initdata;
int disable_apic_timer __initdata;
@@ -68,6 +72,26 @@ int get_maxlvt(void)
return maxlvt;
}
+/*
+ * 'what should we do if we get a hw irq event on an illegal vector'.
+ * each architecture has to answer this themselves.
+ */
+void ack_bad_irq(unsigned int irq)
+{
+ printk("unexpected IRQ trap at vector %02x\n", irq);
+ /*
+ * Currently unexpected vectors happen only on SMP and APIC.
+ * We _must_ ack these because every local APIC has only N
+ * irq slots per priority level, and a 'hanging, unacked' IRQ
+ * holds up an irq slot - in excessive cases (when multiple
+ * unexpected vectors occur) that might lock up the APIC
+ * completely.
+ * But don't ack when the APIC is disabled. -AK
+ */
+ if (!disable_apic)
+ ack_APIC_irq();
+}
+
void clear_local_APIC(void)
{
int maxlvt;
@@ -684,7 +708,7 @@ static void setup_APIC_timer(unsigned int clocks)
local_irq_save(flags);
/* wait for irq slice */
- if (vxtime.hpet_address) {
+ if (vxtime.hpet_address && hpet_use_timer) {
int trigger = hpet_readl(HPET_T0_CMP);
while (hpet_readl(HPET_COUNTER) >= trigger)
/* do nothing */ ;
@@ -702,9 +726,17 @@ static void setup_APIC_timer(unsigned int clocks)
c2 |= inb_p(0x40) << 8;
} while (c2 - c1 < 300);
}
-
__setup_APIC_LVTT(clocks);
-
+ /* Turn off PIT interrupt if we use APIC timer as main timer.
+ Only works with the PM timer right now
+ TBD fix it for HPET too. */
+ if (vxtime.mode == VXTIME_PMTMR &&
+ smp_processor_id() == boot_cpu_id &&
+ apic_runs_main_timer == 1 &&
+ !cpu_isset(boot_cpu_id, timer_interrupt_broadcast_ipi_mask)) {
+ stop_timer_interrupt();
+ apic_runs_main_timer++;
+ }
local_irq_restore(flags);
}
@@ -735,14 +767,27 @@ static int __init calibrate_APIC_clock(void)
__setup_APIC_LVTT(1000000000);
apic_start = apic_read(APIC_TMCCT);
- rdtscl(tsc_start);
-
- do {
+#ifdef CONFIG_X86_PM_TIMER
+ if (apic_calibrate_pmtmr && pmtmr_ioport) {
+ pmtimer_wait(5000); /* 5ms wait */
apic = apic_read(APIC_TMCCT);
- rdtscl(tsc);
- } while ((tsc - tsc_start) < TICK_COUNT && (apic - apic_start) < TICK_COUNT);
+ result = (apic_start - apic) * 1000L / 5;
+ } else
+#endif
+ {
+ rdtscl(tsc_start);
+
+ do {
+ apic = apic_read(APIC_TMCCT);
+ rdtscl(tsc);
+ } while ((tsc - tsc_start) < TICK_COUNT &&
+ (apic - apic_start) < TICK_COUNT);
+
+ result = (apic_start - apic) * 1000L * cpu_khz /
+ (tsc - tsc_start);
+ }
+ printk("result %d\n", result);
- result = (apic_start - apic) * 1000L * cpu_khz / (tsc - tsc_start);
printk(KERN_INFO "Detected %d.%03d MHz APIC timer.\n",
result / 1000 / 1000, result / 1000 % 1000);
@@ -872,6 +917,8 @@ void smp_local_timer_interrupt(struct pt_regs *regs)
#ifdef CONFIG_SMP
update_process_times(user_mode(regs));
#endif
+ if (apic_runs_main_timer > 1 && smp_processor_id() == boot_cpu_id)
+ main_timer_handler(regs);
/*
* We take the 'long' return path, and there every subsystem
* grabs the appropriate locks (kernel lock/ irq lock).
@@ -924,7 +971,7 @@ void smp_apic_timer_interrupt(struct pt_regs *regs)
* multi-chassis. Use available data to take a good guess.
* If in doubt, go HPET.
*/
-__init int oem_force_hpet_timer(void)
+__cpuinit int oem_force_hpet_timer(void)
{
int i, clusters, zeros;
unsigned id;
@@ -1081,10 +1128,35 @@ static __init int setup_nolapic(char *str)
static __init int setup_noapictimer(char *str)
{
+ if (str[0] != ' ' && str[0] != 0)
+ return -1;
disable_apic_timer = 1;
return 0;
}
+static __init int setup_apicmaintimer(char *str)
+{
+ apic_runs_main_timer = 1;
+ nohpet = 1;
+ return 0;
+}
+__setup("apicmaintimer", setup_apicmaintimer);
+
+static __init int setup_noapicmaintimer(char *str)
+{
+ apic_runs_main_timer = -1;
+ return 0;
+}
+__setup("noapicmaintimer", setup_noapicmaintimer);
+
+static __init int setup_apicpmtimer(char *s)
+{
+ apic_calibrate_pmtmr = 1;
+ notsc_setup(NULL);
+ return setup_apicmaintimer(NULL);
+}
+__setup("apicpmtimer", setup_apicpmtimer);
+
/* dummy parsing: see setup.c */
__setup("disableapic", setup_disableapic);
diff --git a/arch/x86_64/kernel/entry.S b/arch/x86_64/kernel/entry.S
index dbdba56..7c10e90 100644
--- a/arch/x86_64/kernel/entry.S
+++ b/arch/x86_64/kernel/entry.S
@@ -499,7 +499,9 @@ ENTRY(stub_rt_sigreturn)
movq %gs:pda_irqstackptr,%rax
cmoveq %rax,%rsp /*todo This needs CFI annotation! */
pushq %rdi # save old stack
+#ifndef CONFIG_DEBUG_INFO
CFI_ADJUST_CFA_OFFSET 8
+#endif
call \func
.endm
@@ -509,7 +511,9 @@ ENTRY(common_interrupt)
/* 0(%rsp): oldrsp-ARGOFFSET */
ret_from_intr:
popq %rdi
+#ifndef CONFIG_DEBUG_INFO
CFI_ADJUST_CFA_OFFSET -8
+#endif
cli
decl %gs:pda_irqcount
#ifdef CONFIG_DEBUG_INFO
@@ -550,6 +554,7 @@ iret_label:
/* running with kernel gs */
bad_iret:
movq $-9999,%rdi /* better code? */
+ sti
jmp do_exit
.previous
@@ -922,7 +927,7 @@ KPROBE_ENTRY(debug)
.previous .text
/* runs on exception stack */
-ENTRY(nmi)
+KPROBE_ENTRY(nmi)
INTR_FRAME
pushq $-1
CFI_ADJUST_CFA_OFFSET 8
@@ -969,6 +974,7 @@ paranoid_schedule:
cli
jmp paranoid_userspace
CFI_ENDPROC
+ .previous .text
KPROBE_ENTRY(int3)
INTR_FRAME
diff --git a/arch/x86_64/kernel/head.S b/arch/x86_64/kernel/head.S
index 692c737..02fc7fa 100644
--- a/arch/x86_64/kernel/head.S
+++ b/arch/x86_64/kernel/head.S
@@ -213,6 +213,11 @@ ENTRY(early_idt_handler)
cmpl $2,early_recursion_flag(%rip)
jz 1f
call dump_stack
+#ifdef CONFIG_KALLSYMS
+ leaq early_idt_ripmsg(%rip),%rdi
+ movq 8(%rsp),%rsi # get rip again
+ call __print_symbol
+#endif
1: hlt
jmp 1b
early_recursion_flag:
@@ -220,6 +225,8 @@ early_recursion_flag:
early_idt_msg:
.asciz "PANIC: early exception rip %lx error %lx cr2 %lx\n"
+early_idt_ripmsg:
+ .asciz "RIP %s\n"
.code32
ENTRY(no_long_mode)
diff --git a/arch/x86_64/kernel/io_apic.c b/arch/x86_64/kernel/io_apic.c
index e8cf44e..2585c1d 100644
--- a/arch/x86_64/kernel/io_apic.c
+++ b/arch/x86_64/kernel/io_apic.c
@@ -30,6 +30,9 @@
#include <linux/mc146818rtc.h>
#include <linux/acpi.h>
#include <linux/sysdev.h>
+#ifdef CONFIG_ACPI
+#include <acpi/acpi_bus.h>
+#endif
#include <asm/io.h>
#include <asm/smp.h>
@@ -260,6 +263,8 @@ __setup("apic", enable_ioapic_setup);
And another hack to disable the IOMMU on VIA chipsets.
+ ... and others. Really should move this somewhere else.
+
Kludge-O-Rama. */
void __init check_ioapic(void)
{
@@ -304,6 +309,25 @@ void __init check_ioapic(void)
#endif
/* RED-PEN skip them on mptables too? */
return;
+ case PCI_VENDOR_ID_ATI:
+ if (apic_runs_main_timer != 0)
+ break;
+#ifdef CONFIG_ACPI
+ /* Don't do this for laptops right
+ right now because their timer
+ doesn't necessarily tick in C2/3 */
+ if (acpi_fadt.revision >= 3 &&
+ (acpi_fadt.plvl2_lat + acpi_fadt.plvl3_lat) < 1100) {
+ printk(KERN_INFO
+"ATI board detected, but seems to be a laptop. Timer might be shakey, sorry\n");
+ break;
+ }
+#endif
+ printk(KERN_INFO
+ "ATI board detected. Using APIC/PM timer.\n");
+ apic_runs_main_timer = 1;
+ nohpet = 1;
+ return;
}
/* No multi-function device? */
@@ -2027,7 +2051,7 @@ int __init io_apic_get_redir_entries (int ioapic)
}
-int io_apic_set_pci_routing (int ioapic, int pin, int irq, int edge_level, int active_high_low)
+int io_apic_set_pci_routing (int ioapic, int pin, int irq, int triggering, int polarity)
{
struct IO_APIC_route_entry entry;
unsigned long flags;
@@ -2049,8 +2073,8 @@ int io_apic_set_pci_routing (int ioapic, int pin, int irq, int edge_level, int a
entry.delivery_mode = INT_DELIVERY_MODE;
entry.dest_mode = INT_DEST_MODE;
entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS);
- entry.trigger = edge_level;
- entry.polarity = active_high_low;
+ entry.trigger = triggering;
+ entry.polarity = polarity;
entry.mask = 1; /* Disabled (masked) */
irq = gsi_irq_sharing(irq);
@@ -2065,9 +2089,9 @@ int io_apic_set_pci_routing (int ioapic, int pin, int irq, int edge_level, int a
apic_printk(APIC_VERBOSE,KERN_DEBUG "IOAPIC[%d]: Set PCI routing entry (%d-%d -> 0x%x -> "
"IRQ %d Mode:%i Active:%i)\n", ioapic,
mp_ioapics[ioapic].mpc_apicid, pin, entry.vector, irq,
- edge_level, active_high_low);
+ triggering, polarity);
- ioapic_register_intr(irq, entry.vector, edge_level);
+ ioapic_register_intr(irq, entry.vector, triggering);
if (!ioapic && (irq < 16))
disable_8259A_irq(irq);
diff --git a/arch/x86_64/kernel/mce.c b/arch/x86_64/kernel/mce.c
index 13a2ead..b8b9529 100644
--- a/arch/x86_64/kernel/mce.c
+++ b/arch/x86_64/kernel/mce.c
@@ -380,7 +380,7 @@ static void __cpuinit mce_cpu_features(struct cpuinfo_x86 *c)
*/
void __cpuinit mcheck_init(struct cpuinfo_x86 *c)
{
- static cpumask_t mce_cpus __initdata = CPU_MASK_NONE;
+ static cpumask_t mce_cpus = CPU_MASK_NONE;
mce_cpu_quirks(c);
diff --git a/arch/x86_64/kernel/mpparse.c b/arch/x86_64/kernel/mpparse.c
index 1105250..9013a90 100644
--- a/arch/x86_64/kernel/mpparse.c
+++ b/arch/x86_64/kernel/mpparse.c
@@ -288,9 +288,9 @@ static int __init smp_read_mpc(struct mp_config_table *mpc)
memcpy(str,mpc->mpc_productid,12);
str[12]=0;
- printk(KERN_INFO "Product ID: %s ",str);
+ printk("Product ID: %s ",str);
- printk(KERN_INFO "APIC at: 0x%X\n",mpc->mpc_lapic);
+ printk("APIC at: 0x%X\n",mpc->mpc_lapic);
/* save the local APIC address, it might be non-default */
if (!acpi_lapic)
@@ -915,7 +915,7 @@ void __init mp_config_acpi_legacy_irqs (void)
#define MAX_GSI_NUM 4096
-int mp_register_gsi(u32 gsi, int edge_level, int active_high_low)
+int mp_register_gsi(u32 gsi, int triggering, int polarity)
{
int ioapic = -1;
int ioapic_pin = 0;
@@ -964,7 +964,7 @@ int mp_register_gsi(u32 gsi, int edge_level, int active_high_low)
mp_ioapic_routing[ioapic].pin_programmed[idx] |= (1<<bit);
- if (edge_level) {
+ if (triggering == ACPI_LEVEL_SENSITIVE) {
/*
* For PCI devices assign IRQs in order, avoiding gaps
* due to unused I/O APIC pins.
@@ -986,8 +986,8 @@ int mp_register_gsi(u32 gsi, int edge_level, int active_high_low)
}
io_apic_set_pci_routing(ioapic, ioapic_pin, gsi,
- edge_level == ACPI_EDGE_SENSITIVE ? 0 : 1,
- active_high_low == ACPI_ACTIVE_HIGH ? 0 : 1);
+ triggering == ACPI_EDGE_SENSITIVE ? 0 : 1,
+ polarity == ACPI_ACTIVE_HIGH ? 0 : 1);
return gsi;
}
diff --git a/arch/x86_64/kernel/nmi.c b/arch/x86_64/kernel/nmi.c
index 5fae6f0..5bf17e4 100644
--- a/arch/x86_64/kernel/nmi.c
+++ b/arch/x86_64/kernel/nmi.c
@@ -24,6 +24,7 @@
#include <linux/sysdev.h>
#include <linux/nmi.h>
#include <linux/sysctl.h>
+#include <linux/kprobes.h>
#include <asm/smp.h>
#include <asm/mtrr.h>
@@ -235,6 +236,7 @@ static void enable_lapic_nmi_watchdog(void)
{
if (nmi_active < 0) {
nmi_watchdog = NMI_LOCAL_APIC;
+ touch_nmi_watchdog();
setup_apic_nmi_watchdog();
}
}
@@ -455,20 +457,22 @@ static DEFINE_PER_CPU(int, nmi_touch);
void touch_nmi_watchdog (void)
{
- int i;
+ if (nmi_watchdog > 0) {
+ unsigned cpu;
- /*
- * Tell other CPUs to reset their alert counters. We cannot
- * do it ourselves because the alert count increase is not
- * atomic.
- */
- for (i = 0; i < NR_CPUS; i++)
- per_cpu(nmi_touch, i) = 1;
+ /*
+ * Tell other CPUs to reset their alert counters. We cannot
+ * do it ourselves because the alert count increase is not
+ * atomic.
+ */
+ for_each_present_cpu (cpu)
+ per_cpu(nmi_touch, cpu) = 1;
+ }
touch_softlockup_watchdog();
}
-void nmi_watchdog_tick (struct pt_regs * regs, unsigned reason)
+void __kprobes nmi_watchdog_tick(struct pt_regs * regs, unsigned reason)
{
int sum;
int touched = 0;
@@ -512,14 +516,14 @@ void nmi_watchdog_tick (struct pt_regs * regs, unsigned reason)
}
}
-static int dummy_nmi_callback(struct pt_regs * regs, int cpu)
+static __kprobes int dummy_nmi_callback(struct pt_regs * regs, int cpu)
{
return 0;
}
static nmi_callback_t nmi_callback = dummy_nmi_callback;
-asmlinkage void do_nmi(struct pt_regs * regs, long error_code)
+asmlinkage __kprobes void do_nmi(struct pt_regs * regs, long error_code)
{
int cpu = safe_smp_processor_id();
diff --git a/arch/x86_64/kernel/pci-dma.c b/arch/x86_64/kernel/pci-dma.c
index 2f5d832..4ed391e 100644
--- a/arch/x86_64/kernel/pci-dma.c
+++ b/arch/x86_64/kernel/pci-dma.c
@@ -107,6 +107,9 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
goto again;
}
+ /* Let low level make its own zone decisions */
+ gfp &= ~(GFP_DMA32|GFP_DMA);
+
if (dma_ops->alloc_coherent)
return dma_ops->alloc_coherent(dev, size,
dma_handle, gfp);
diff --git a/arch/x86_64/kernel/pci-gart.c b/arch/x86_64/kernel/pci-gart.c
index c37fc77..dd0718d 100644
--- a/arch/x86_64/kernel/pci-gart.c
+++ b/arch/x86_64/kernel/pci-gart.c
@@ -310,7 +310,7 @@ void gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, int di
for (i = 0; i < nents; i++) {
struct scatterlist *s = &sg[i];
- if (!s->dma_length || !s->length)
+ if (!s->dma_length)
break;
dma_unmap_single(dev, s->dma_address, s->dma_length, dir);
}
@@ -364,7 +364,6 @@ static int __dma_map_cont(struct scatterlist *sg, int start, int stopat,
BUG_ON(i > start && s->offset);
if (i == start) {
- *sout = *s;
sout->dma_address = iommu_bus_base;
sout->dma_address += iommu_page*PAGE_SIZE + s->offset;
sout->dma_length = s->length;
@@ -379,7 +378,7 @@ static int __dma_map_cont(struct scatterlist *sg, int start, int stopat,
SET_LEAK(iommu_page);
addr += PAGE_SIZE;
iommu_page++;
- }
+ }
}
BUG_ON(iommu_page - iommu_start != pages);
return 0;
@@ -391,7 +390,6 @@ static inline int dma_map_cont(struct scatterlist *sg, int start, int stopat,
{
if (!need) {
BUG_ON(stopat - start != 1);
- *sout = sg[start];
sout->dma_length = sg[start].length;
return 0;
}
@@ -457,9 +455,12 @@ int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
error:
flush_gart(NULL);
gart_unmap_sg(dev, sg, nents, dir);
- /* When it was forced try again unforced */
- if (force_iommu)
- return dma_map_sg_nonforce(dev, sg, nents, dir);
+ /* When it was forced or merged try again in a dumb way */
+ if (force_iommu || iommu_merge) {
+ out = dma_map_sg_nonforce(dev, sg, nents, dir);
+ if (out > 0)
+ return out;
+ }
if (panic_on_overflow)
panic("dma_map_sg: overflow on %lu pages\n", pages);
iommu_full(dev, pages << PAGE_SHIFT, dir);
@@ -642,9 +643,18 @@ static int __init pci_iommu_init(void)
(no_agp && init_k8_gatt(&info) < 0)) {
no_iommu = 1;
no_iommu_init();
+ printk(KERN_INFO "PCI-DMA: Disabling IOMMU.\n");
+ if (end_pfn > MAX_DMA32_PFN) {
+ printk(KERN_ERR "WARNING more than 4GB of memory "
+ "but IOMMU not compiled in.\n"
+ KERN_ERR "WARNING 32bit PCI may malfunction.\n"
+ KERN_ERR "You might want to enable "
+ "CONFIG_GART_IOMMU\n");
+ }
return -1;
}
+ printk(KERN_INFO "PCI-DMA: using GART IOMMU.\n");
aper_size = info.aper_size * 1024 * 1024;
iommu_size = check_iommu_size(info.aper_base, aper_size);
iommu_pages = iommu_size >> PAGE_SHIFT;
@@ -718,7 +728,6 @@ static int __init pci_iommu_init(void)
flush_gart(NULL);
- printk(KERN_INFO "PCI-DMA: using GART IOMMU.\n");
dma_ops = &gart_dma_ops;
return 0;
diff --git a/arch/x86_64/kernel/pci-nommu.c b/arch/x86_64/kernel/pci-nommu.c
index e415649..44adcc2 100644
--- a/arch/x86_64/kernel/pci-nommu.c
+++ b/arch/x86_64/kernel/pci-nommu.c
@@ -88,12 +88,5 @@ void __init no_iommu_init(void)
{
if (dma_ops)
return;
- printk(KERN_INFO "PCI-DMA: Disabling IOMMU.\n");
dma_ops = &nommu_dma_ops;
- if (end_pfn > MAX_DMA32_PFN) {
- printk(KERN_ERR
- "WARNING more than 4GB of memory but IOMMU not compiled in.\n"
- KERN_ERR "WARNING 32bit PCI may malfunction.\n"
- KERN_ERR "You might want to enable CONFIG_GART_IOMMU\n");
- }
}
diff --git a/arch/x86_64/kernel/pci-swiotlb.c b/arch/x86_64/kernel/pci-swiotlb.c
index 3569a25..990ed67 100644
--- a/arch/x86_64/kernel/pci-swiotlb.c
+++ b/arch/x86_64/kernel/pci-swiotlb.c
@@ -35,8 +35,8 @@ void pci_swiotlb_init(void)
(end_pfn > MAX_DMA32_PFN || force_iommu))
swiotlb = 1;
if (swiotlb) {
- swiotlb_init();
printk(KERN_INFO "PCI-DMA: Using software bounce buffering for IO (SWIOTLB)\n");
+ swiotlb_init();
dma_ops = &swiotlb_dma_ops;
}
}
diff --git a/arch/x86_64/kernel/pmtimer.c b/arch/x86_64/kernel/pmtimer.c
index feb5f10..5c51d10 100644
--- a/arch/x86_64/kernel/pmtimer.c
+++ b/arch/x86_64/kernel/pmtimer.c
@@ -80,6 +80,31 @@ int pmtimer_mark_offset(void)
return lost - 1;
}
+static unsigned pmtimer_wait_tick(void)
+{
+ u32 a, b;
+ for (a = b = inl(pmtmr_ioport) & ACPI_PM_MASK;
+ a == b;
+ b = inl(pmtmr_ioport) & ACPI_PM_MASK)
+ ;
+ return b;
+}
+
+/* note: wait time is rounded up to one tick */
+void pmtimer_wait(unsigned us)
+{
+ u32 a, b;
+ a = pmtimer_wait_tick();
+ do {
+ b = inl(pmtmr_ioport);
+ } while (cyc2us(b - a) < us);
+}
+
+void pmtimer_resume(void)
+{
+ last_pmtmr_tick = inl(pmtmr_ioport);
+}
+
unsigned int do_gettimeoffset_pm(void)
{
u32 now, offset, delta = 0;
diff --git a/arch/x86_64/kernel/process.c b/arch/x86_64/kernel/process.c
index 8ded407..22a05de 100644
--- a/arch/x86_64/kernel/process.c
+++ b/arch/x86_64/kernel/process.c
@@ -516,8 +516,10 @@ out:
* This could still be optimized:
* - fold all the options into a flag word and test it with a single test.
* - could test fs/gs bitsliced
+ *
+ * Kprobes not supported here. Set the probe on schedule instead.
*/
-struct task_struct *
+__kprobes struct task_struct *
__switch_to(struct task_struct *prev_p, struct task_struct *next_p)
{
struct thread_struct *prev = &prev_p->thread,
diff --git a/arch/x86_64/kernel/setup.c b/arch/x86_64/kernel/setup.c
index 28895c0..9435ab7 100644
--- a/arch/x86_64/kernel/setup.c
+++ b/arch/x86_64/kernel/setup.c
@@ -94,7 +94,6 @@ unsigned long saved_video_mode;
/*
* Setup options
*/
-struct drive_info_struct { char dummy[32]; } drive_info;
struct screen_info screen_info;
struct sys_desc_table_struct {
unsigned short length;
@@ -572,7 +571,6 @@ void __init setup_arch(char **cmdline_p)
unsigned long kernel_end;
ROOT_DEV = old_decode_dev(ORIG_ROOT_DEV);
- drive_info = DRIVE_INFO;
screen_info = SCREEN_INFO;
edid_info = EDID_INFO;
saved_video_mode = SAVED_VIDEO_MODE;
@@ -741,7 +739,7 @@ void __init setup_arch(char **cmdline_p)
e820_setup_gap();
#ifdef CONFIG_GART_IOMMU
- iommu_hole_init();
+ iommu_hole_init();
#endif
#ifdef CONFIG_VT
@@ -877,6 +875,7 @@ static void __init amd_detect_cmp(struct cpuinfo_x86 *c)
static int __init init_amd(struct cpuinfo_x86 *c)
{
int r;
+ unsigned level;
#ifdef CONFIG_SMP
unsigned long value;
@@ -899,6 +898,11 @@ static int __init init_amd(struct cpuinfo_x86 *c)
3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
clear_bit(0*32+31, &c->x86_capability);
+ /* On C+ stepping K8 rep microcode works well for copy/memset */
+ level = cpuid_eax(1);
+ if (c->x86 == 15 && ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58))
+ set_bit(X86_FEATURE_REP_GOOD, &c->x86_capability);
+
r = get_model_name(c);
if (!r) {
switch (c->x86) {
diff --git a/arch/x86_64/kernel/smpboot.c b/arch/x86_64/kernel/smpboot.c
index a28756e..67e4e28 100644
--- a/arch/x86_64/kernel/smpboot.c
+++ b/arch/x86_64/kernel/smpboot.c
@@ -59,6 +59,7 @@
#include <asm/nmi.h>
#include <asm/irq.h>
#include <asm/hw_irq.h>
+#include <asm/numa.h>
/* Number of siblings per CPU package */
int smp_num_siblings = 1;
@@ -890,6 +891,7 @@ do_rest:
if (boot_error) {
cpu_clear(cpu, cpu_callout_map); /* was set here (do_boot_cpu()) */
clear_bit(cpu, &cpu_initialized); /* was set by cpu_init() */
+ clear_node_cpumask(cpu); /* was set by numa_add_cpu */
cpu_clear(cpu, cpu_present_map);
cpu_clear(cpu, cpu_possible_map);
x86_cpu_to_apicid[cpu] = BAD_APICID;
@@ -1187,6 +1189,7 @@ void remove_cpu_from_maps(void)
cpu_clear(cpu, cpu_callout_map);
cpu_clear(cpu, cpu_callin_map);
clear_bit(cpu, &cpu_initialized); /* was set by cpu_init() */
+ clear_node_cpumask(cpu);
}
int __cpu_disable(void)
diff --git a/arch/x86_64/kernel/time.c b/arch/x86_64/kernel/time.c
index f8c47c6..67841d1 100644
--- a/arch/x86_64/kernel/time.c
+++ b/arch/x86_64/kernel/time.c
@@ -51,7 +51,7 @@ extern int using_apic_timer;
DEFINE_SPINLOCK(rtc_lock);
DEFINE_SPINLOCK(i8253_lock);
-static int nohpet __initdata = 0;
+int nohpet __initdata = 0;
static int notsc __initdata = 0;
#undef HPET_HACK_ENABLE_DANGEROUS
@@ -59,7 +59,7 @@ static int notsc __initdata = 0;
unsigned int cpu_khz; /* TSC clocks / usec, not used here */
static unsigned long hpet_period; /* fsecs / HPET clock */
unsigned long hpet_tick; /* HPET clocks / interrupt */
-static int hpet_use_timer; /* Use counter of hpet for time keeping, otherwise PIT */
+int hpet_use_timer; /* Use counter of hpet for time keeping, otherwise PIT */
unsigned long vxtime_hz = PIT_TICK_RATE;
int report_lost_ticks; /* command line option */
unsigned long long monotonic_base;
@@ -326,7 +326,10 @@ static noinline void handle_lost_ticks(int lost, struct pt_regs *regs)
print_symbol("rip %s\n", regs->rip);
if (vxtime.mode == VXTIME_TSC && vxtime.hpet_address) {
printk(KERN_WARNING "Falling back to HPET\n");
- vxtime.last = hpet_readl(HPET_T0_CMP) - hpet_tick;
+ if (hpet_use_timer)
+ vxtime.last = hpet_readl(HPET_T0_CMP) - hpet_tick;
+ else
+ vxtime.last = hpet_readl(HPET_COUNTER);
vxtime.mode = VXTIME_HPET;
do_gettimeoffset = do_gettimeoffset_hpet;
}
@@ -345,7 +348,7 @@ static noinline void handle_lost_ticks(int lost, struct pt_regs *regs)
#endif
}
-static irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+void main_timer_handler(struct pt_regs *regs)
{
static unsigned long rtc_update = 0;
unsigned long tsc;
@@ -458,12 +461,17 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
}
write_sequnlock(&xtime_lock);
+}
+static irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ if (apic_runs_main_timer > 1)
+ return IRQ_HANDLED;
+ main_timer_handler(regs);
#ifdef CONFIG_X86_LOCAL_APIC
if (using_apic_timer)
smp_send_timer_broadcast_ipi();
#endif
-
return IRQ_HANDLED;
}
@@ -743,7 +751,7 @@ static __init int late_hpet_init(void)
* Timer0 and Timer1 is used by platform.
*/
hd.hd_phys_address = vxtime.hpet_address;
- hd.hd_address = (void *)fix_to_virt(FIX_HPET_BASE);
+ hd.hd_address = (void __iomem *)fix_to_virt(FIX_HPET_BASE);
hd.hd_nirqs = ntimer;
hd.hd_flags = HPET_DATA_PLATFORM;
hpet_reserve_timer(&hd, 0);
@@ -843,17 +851,43 @@ static int hpet_reenable(void)
return hpet_timer_stop_set_go(hpet_tick);
}
-void __init pit_init(void)
+#define PIT_MODE 0x43
+#define PIT_CH0 0x40
+
+static void __init __pit_init(int val, u8 mode)
{
unsigned long flags;
spin_lock_irqsave(&i8253_lock, flags);
- outb_p(0x34, 0x43); /* binary, mode 2, LSB/MSB, ch 0 */
- outb_p(LATCH & 0xff, 0x40); /* LSB */
- outb_p(LATCH >> 8, 0x40); /* MSB */
+ outb_p(mode, PIT_MODE);
+ outb_p(val & 0xff, PIT_CH0); /* LSB */
+ outb_p(val >> 8, PIT_CH0); /* MSB */
spin_unlock_irqrestore(&i8253_lock, flags);
}
+void __init pit_init(void)
+{
+ __pit_init(LATCH, 0x34); /* binary, mode 2, LSB/MSB, ch 0 */
+}
+
+void __init pit_stop_interrupt(void)
+{
+ __pit_init(0, 0x30); /* mode 0 */
+}
+
+void __init stop_timer_interrupt(void)
+{
+ char *name;
+ if (vxtime.hpet_address) {
+ name = "HPET";
+ hpet_timer_stop_set_go(0);
+ } else {
+ name = "PIT";
+ pit_stop_interrupt();
+ }
+ printk(KERN_INFO "timer: %s interrupt stopped.\n", name);
+}
+
int __init time_setup(char *str)
{
report_lost_ticks = 1;
@@ -932,7 +966,7 @@ void __init time_init(void)
* Make an educated guess if the TSC is trustworthy and synchronized
* over all CPUs.
*/
-__init int unsynchronized_tsc(void)
+__cpuinit int unsynchronized_tsc(void)
{
#ifdef CONFIG_SMP
if (oem_force_hpet_timer())
@@ -957,7 +991,10 @@ void __init time_init_gtod(void)
notsc = 1;
if (vxtime.hpet_address && notsc) {
timetype = hpet_use_timer ? "HPET" : "PIT/HPET";
- vxtime.last = hpet_readl(HPET_T0_CMP) - hpet_tick;
+ if (hpet_use_timer)
+ vxtime.last = hpet_readl(HPET_T0_CMP) - hpet_tick;
+ else
+ vxtime.last = hpet_readl(HPET_COUNTER);
vxtime.mode = VXTIME_HPET;
do_gettimeoffset = do_gettimeoffset_hpet;
#ifdef CONFIG_X86_PM_TIMER
@@ -1016,9 +1053,21 @@ static int timer_resume(struct sys_device *dev)
write_seqlock_irqsave(&xtime_lock,flags);
xtime.tv_sec = sec;
xtime.tv_nsec = 0;
+ if (vxtime.mode == VXTIME_HPET) {
+ if (hpet_use_timer)
+ vxtime.last = hpet_readl(HPET_T0_CMP) - hpet_tick;
+ else
+ vxtime.last = hpet_readl(HPET_COUNTER);
+#ifdef CONFIG_X86_PM_TIMER
+ } else if (vxtime.mode == VXTIME_PMTMR) {
+ pmtimer_resume();
+#endif
+ } else
+ vxtime.last_tsc = get_cycles_sync();
write_sequnlock_irqrestore(&xtime_lock,flags);
jiffies += sleep_length;
wall_jiffies += sleep_length;
+ monotonic_base += sleep_length * (NSEC_PER_SEC/HZ);
touch_softlockup_watchdog();
return 0;
}
@@ -1278,8 +1327,7 @@ static int __init nohpet_setup(char *s)
__setup("nohpet", nohpet_setup);
-
-static int __init notsc_setup(char *s)
+int __init notsc_setup(char *s)
{
notsc = 1;
return 0;
diff --git a/arch/x86_64/kernel/traps.c b/arch/x86_64/kernel/traps.c
index 8bb0aed..28d50dc 100644
--- a/arch/x86_64/kernel/traps.c
+++ b/arch/x86_64/kernel/traps.c
@@ -90,6 +90,20 @@ static inline void conditional_sti(struct pt_regs *regs)
local_irq_enable();
}
+static inline void preempt_conditional_sti(struct pt_regs *regs)
+{
+ preempt_disable();
+ if (regs->eflags & X86_EFLAGS_IF)
+ local_irq_enable();
+}
+
+static inline void preempt_conditional_cli(struct pt_regs *regs)
+{
+ if (regs->eflags & X86_EFLAGS_IF)
+ local_irq_disable();
+ preempt_enable_no_resched();
+}
+
static int kstack_depth_to_print = 10;
#ifdef CONFIG_KALLSYMS
@@ -372,7 +386,7 @@ void out_of_line_bug(void)
static DEFINE_SPINLOCK(die_lock);
static int die_owner = -1;
-unsigned long oops_begin(void)
+unsigned __kprobes long oops_begin(void)
{
int cpu = safe_smp_processor_id();
unsigned long flags;
@@ -391,7 +405,7 @@ unsigned long oops_begin(void)
return flags;
}
-void oops_end(unsigned long flags)
+void __kprobes oops_end(unsigned long flags)
{
die_owner = -1;
bust_spinlocks(0);
@@ -400,7 +414,7 @@ void oops_end(unsigned long flags)
panic("Oops");
}
-void __die(const char * str, struct pt_regs * regs, long err)
+void __kprobes __die(const char * str, struct pt_regs * regs, long err)
{
static int die_counter;
printk(KERN_EMERG "%s: %04lx [%u] ", str, err & 0xffff,++die_counter);
@@ -432,7 +446,7 @@ void die(const char * str, struct pt_regs * regs, long err)
do_exit(SIGSEGV);
}
-void die_nmi(char *str, struct pt_regs *regs)
+void __kprobes die_nmi(char *str, struct pt_regs *regs)
{
unsigned long flags = oops_begin();
@@ -575,7 +589,8 @@ asmlinkage void __kprobes do_general_protection(struct pt_regs * regs,
}
}
-static void mem_parity_error(unsigned char reason, struct pt_regs * regs)
+static __kprobes void
+mem_parity_error(unsigned char reason, struct pt_regs * regs)
{
printk("Uhhuh. NMI received. Dazed and confused, but trying to continue\n");
printk("You probably have a hardware problem with your RAM chips\n");
@@ -585,7 +600,8 @@ static void mem_parity_error(unsigned char reason, struct pt_regs * regs)
outb(reason, 0x61);
}
-static void io_check_error(unsigned char reason, struct pt_regs * regs)
+static __kprobes void
+io_check_error(unsigned char reason, struct pt_regs * regs)
{
printk("NMI: IOCK error (debug interrupt?)\n");
show_registers(regs);
@@ -598,7 +614,8 @@ static void io_check_error(unsigned char reason, struct pt_regs * regs)
outb(reason, 0x61);
}
-static void unknown_nmi_error(unsigned char reason, struct pt_regs * regs)
+static __kprobes void
+unknown_nmi_error(unsigned char reason, struct pt_regs * regs)
{ printk("Uhhuh. NMI received for unknown reason %02x.\n", reason);
printk("Dazed and confused, but trying to continue\n");
printk("Do you have a strange power saving mode enabled?\n");
@@ -606,7 +623,7 @@ static void unknown_nmi_error(unsigned char reason, struct pt_regs * regs)
/* Runs on IST stack. This code must keep interrupts off all the time.
Nested NMIs are prevented by the CPU. */
-asmlinkage void default_do_nmi(struct pt_regs *regs)
+asmlinkage __kprobes void default_do_nmi(struct pt_regs *regs)
{
unsigned char reason = 0;
int cpu;
@@ -658,7 +675,7 @@ asmlinkage void __kprobes do_int3(struct pt_regs * regs, long error_code)
/* Help handler running on IST stack to switch back to user stack
for scheduling or signal handling. The actual stack switch is done in
entry.S */
-asmlinkage struct pt_regs *sync_regs(struct pt_regs *eregs)
+asmlinkage __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs)
{
struct pt_regs *regs = eregs;
/* Did already sync */
@@ -690,7 +707,7 @@ asmlinkage void __kprobes do_debug(struct pt_regs * regs,
SIGTRAP) == NOTIFY_STOP)
return;
- conditional_sti(regs);
+ preempt_conditional_sti(regs);
/* Mask out spurious debug traps due to lazy DR7 setting */
if (condition & (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3)) {
@@ -735,11 +752,13 @@ asmlinkage void __kprobes do_debug(struct pt_regs * regs,
clear_dr7:
set_debugreg(0UL, 7);
+ preempt_conditional_cli(regs);
return;
clear_TF_reenable:
set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
regs->eflags &= ~TF_MASK;
+ preempt_conditional_cli(regs);
}
static int kernel_math_error(struct pt_regs *regs, const char *str, int trapnr)
diff --git a/arch/x86_64/kernel/vmlinux.lds.S b/arch/x86_64/kernel/vmlinux.lds.S
index b0eed1f..74db006 100644
--- a/arch/x86_64/kernel/vmlinux.lds.S
+++ b/arch/x86_64/kernel/vmlinux.lds.S
@@ -172,13 +172,15 @@ SECTIONS
. = ALIGN(4096);
__initramfs_start = .;
.init.ramfs : AT(ADDR(.init.ramfs) - LOAD_OFFSET) { *(.init.ramfs) }
- __initramfs_end = .;
- . = ALIGN(32);
+ __initramfs_end = .;
+ /* temporary here to work around NR_CPUS. If you see this comment in 2.6.17+
+ complain */
+ . = ALIGN(4096);
+ __init_end = .;
+ . = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
__per_cpu_start = .;
.data.percpu : AT(ADDR(.data.percpu) - LOAD_OFFSET) { *(.data.percpu) }
__per_cpu_end = .;
- . = ALIGN(4096);
- __init_end = .;
. = ALIGN(4096);
__nosave_begin = .;
diff --git a/arch/x86_64/kernel/x8664_ksyms.c b/arch/x86_64/kernel/x8664_ksyms.c
index b614d54..3496abc 100644
--- a/arch/x86_64/kernel/x8664_ksyms.c
+++ b/arch/x86_64/kernel/x8664_ksyms.c
@@ -39,11 +39,6 @@ extern void __write_lock_failed(rwlock_t *rw);
extern void __read_lock_failed(rwlock_t *rw);
#endif
-#if defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_HD) || defined(CONFIG_BLK_DEV_IDE_MODULE) || defined(CONFIG_BLK_DEV_HD_MODULE)
-extern struct drive_info_struct drive_info;
-EXPORT_SYMBOL(drive_info);
-#endif
-
/* platform dependent support */
EXPORT_SYMBOL(boot_cpu_data);
//EXPORT_SYMBOL(dump_fpu);
diff --git a/arch/x86_64/lib/Makefile b/arch/x86_64/lib/Makefile
index bba5db6..ccef6ae 100644
--- a/arch/x86_64/lib/Makefile
+++ b/arch/x86_64/lib/Makefile
@@ -4,7 +4,7 @@
CFLAGS_csum-partial.o := -funroll-loops
-obj-y := io.o
+obj-y := io.o iomap_copy.o
lib-y := csum-partial.o csum-copy.o csum-wrappers.o delay.o \
usercopy.o getuser.o putuser.o \
diff --git a/arch/x86_64/lib/clear_page.S b/arch/x86_64/lib/clear_page.S
index 43d9fa1..1f81b79 100644
--- a/arch/x86_64/lib/clear_page.S
+++ b/arch/x86_64/lib/clear_page.S
@@ -5,8 +5,46 @@
.globl clear_page
.p2align 4
clear_page:
+ xorl %eax,%eax
+ movl $4096/64,%ecx
+ .p2align 4
+.Lloop:
+ decl %ecx
+#define PUT(x) movq %rax,x*8(%rdi)
+ movq %rax,(%rdi)
+ PUT(1)
+ PUT(2)
+ PUT(3)
+ PUT(4)
+ PUT(5)
+ PUT(6)
+ PUT(7)
+ leaq 64(%rdi),%rdi
+ jnz .Lloop
+ nop
+ ret
+clear_page_end:
+
+ /* Some CPUs run faster using the string instructions.
+ It is also a lot simpler. Use this when possible */
+
+#include <asm/cpufeature.h>
+
+ .section .altinstructions,"a"
+ .align 8
+ .quad clear_page
+ .quad clear_page_c
+ .byte X86_FEATURE_REP_GOOD
+ .byte clear_page_end-clear_page
+ .byte clear_page_c_end-clear_page_c
+ .previous
+
+ .section .altinstr_replacement,"ax"
+clear_page_c:
movl $4096/8,%ecx
xorl %eax,%eax
rep
stosq
ret
+clear_page_c_end:
+ .previous
diff --git a/arch/x86_64/lib/copy_page.S b/arch/x86_64/lib/copy_page.S
index 621a197..8fa19d9 100644
--- a/arch/x86_64/lib/copy_page.S
+++ b/arch/x86_64/lib/copy_page.S
@@ -8,7 +8,94 @@
.globl copy_page
.p2align 4
copy_page:
+ subq $3*8,%rsp
+ movq %rbx,(%rsp)
+ movq %r12,1*8(%rsp)
+ movq %r13,2*8(%rsp)
+
+ movl $(4096/64)-5,%ecx
+ .p2align 4
+.Loop64:
+ dec %rcx
+
+ movq (%rsi), %rax
+ movq 8 (%rsi), %rbx
+ movq 16 (%rsi), %rdx
+ movq 24 (%rsi), %r8
+ movq 32 (%rsi), %r9
+ movq 40 (%rsi), %r10
+ movq 48 (%rsi), %r11
+ movq 56 (%rsi), %r12
+
+ prefetcht0 5*64(%rsi)
+
+ movq %rax, (%rdi)
+ movq %rbx, 8 (%rdi)
+ movq %rdx, 16 (%rdi)
+ movq %r8, 24 (%rdi)
+ movq %r9, 32 (%rdi)
+ movq %r10, 40 (%rdi)
+ movq %r11, 48 (%rdi)
+ movq %r12, 56 (%rdi)
+
+ leaq 64 (%rsi), %rsi
+ leaq 64 (%rdi), %rdi
+
+ jnz .Loop64
+
+ movl $5,%ecx
+ .p2align 4
+.Loop2:
+ decl %ecx
+
+ movq (%rsi), %rax
+ movq 8 (%rsi), %rbx
+ movq 16 (%rsi), %rdx
+ movq 24 (%rsi), %r8
+ movq 32 (%rsi), %r9
+ movq 40 (%rsi), %r10
+ movq 48 (%rsi), %r11
+ movq 56 (%rsi), %r12
+
+ movq %rax, (%rdi)
+ movq %rbx, 8 (%rdi)
+ movq %rdx, 16 (%rdi)
+ movq %r8, 24 (%rdi)
+ movq %r9, 32 (%rdi)
+ movq %r10, 40 (%rdi)
+ movq %r11, 48 (%rdi)
+ movq %r12, 56 (%rdi)
+
+ leaq 64(%rdi),%rdi
+ leaq 64(%rsi),%rsi
+
+ jnz .Loop2
+
+ movq (%rsp),%rbx
+ movq 1*8(%rsp),%r12
+ movq 2*8(%rsp),%r13
+ addq $3*8,%rsp
+ ret
+
+ /* Some CPUs run faster using the string copy instructions.
+ It is also a lot simpler. Use this when possible */
+
+#include <asm/cpufeature.h>
+
+ .section .altinstructions,"a"
+ .align 8
+ .quad copy_page
+ .quad copy_page_c
+ .byte X86_FEATURE_REP_GOOD
+ .byte copy_page_c_end-copy_page_c
+ .byte copy_page_c_end-copy_page_c
+ .previous
+
+ .section .altinstr_replacement,"ax"
+copy_page_c:
movl $4096/8,%ecx
rep
movsq
ret
+copy_page_c_end:
+ .previous
diff --git a/arch/x86_64/lib/copy_user.S b/arch/x86_64/lib/copy_user.S
index 79422b6..f64569b 100644
--- a/arch/x86_64/lib/copy_user.S
+++ b/arch/x86_64/lib/copy_user.S
@@ -4,9 +4,12 @@
* Functions to copy from and to user space.
*/
+#define FIX_ALIGNMENT 1
+
#include <asm/current.h>
#include <asm/asm-offsets.h>
#include <asm/thread_info.h>
+ #include <asm/cpufeature.h>
/* Standard copy_to_user with segment limit checking */
.globl copy_to_user
@@ -18,7 +21,23 @@ copy_to_user:
jc bad_to_user
cmpq threadinfo_addr_limit(%rax),%rcx
jae bad_to_user
- jmp copy_user_generic
+2:
+ .byte 0xe9 /* 32bit jump */
+ .long .Lcug-1f
+1:
+
+ .section .altinstr_replacement,"ax"
+3: .byte 0xe9 /* replacement jmp with 8 bit immediate */
+ .long copy_user_generic_c-1b /* offset */
+ .previous
+ .section .altinstructions,"a"
+ .align 8
+ .quad 2b
+ .quad 3b
+ .byte X86_FEATURE_REP_GOOD
+ .byte 5
+ .byte 5
+ .previous
/* Standard copy_from_user with segment limit checking */
.globl copy_from_user
@@ -53,44 +72,230 @@ bad_to_user:
* rsi source
* rdx count
*
- * Only 4GB of copy is supported. This shouldn't be a problem
- * because the kernel normally only writes from/to page sized chunks
- * even if user space passed a longer buffer.
- * And more would be dangerous because both Intel and AMD have
- * errata with rep movsq > 4GB. If someone feels the need to fix
- * this please consider this.
- *
* Output:
* eax uncopied bytes or 0 if successful.
*/
-
.globl copy_user_generic
+ .p2align 4
copy_user_generic:
+ .byte 0x66,0x66,0x90 /* 5 byte nop for replacement jump */
+ .byte 0x66,0x90
+1:
+ .section .altinstr_replacement,"ax"
+2: .byte 0xe9 /* near jump with 32bit immediate */
+ .long copy_user_generic_c-1b /* offset */
+ .previous
+ .section .altinstructions,"a"
+ .align 8
+ .quad copy_user_generic
+ .quad 2b
+ .byte X86_FEATURE_REP_GOOD
+ .byte 5
+ .byte 5
+ .previous
+.Lcug:
+ pushq %rbx
+ xorl %eax,%eax /*zero for the exception handler */
+
+#ifdef FIX_ALIGNMENT
+ /* check for bad alignment of destination */
+ movl %edi,%ecx
+ andl $7,%ecx
+ jnz .Lbad_alignment
+.Lafter_bad_alignment:
+#endif
+
+ movq %rdx,%rcx
+
+ movl $64,%ebx
+ shrq $6,%rdx
+ decq %rdx
+ js .Lhandle_tail
+
+ .p2align 4
+.Lloop:
+.Ls1: movq (%rsi),%r11
+.Ls2: movq 1*8(%rsi),%r8
+.Ls3: movq 2*8(%rsi),%r9
+.Ls4: movq 3*8(%rsi),%r10
+.Ld1: movq %r11,(%rdi)
+.Ld2: movq %r8,1*8(%rdi)
+.Ld3: movq %r9,2*8(%rdi)
+.Ld4: movq %r10,3*8(%rdi)
+
+.Ls5: movq 4*8(%rsi),%r11
+.Ls6: movq 5*8(%rsi),%r8
+.Ls7: movq 6*8(%rsi),%r9
+.Ls8: movq 7*8(%rsi),%r10
+.Ld5: movq %r11,4*8(%rdi)
+.Ld6: movq %r8,5*8(%rdi)
+.Ld7: movq %r9,6*8(%rdi)
+.Ld8: movq %r10,7*8(%rdi)
+
+ decq %rdx
+
+ leaq 64(%rsi),%rsi
+ leaq 64(%rdi),%rdi
+
+ jns .Lloop
+
+ .p2align 4
+.Lhandle_tail:
+ movl %ecx,%edx
+ andl $63,%ecx
+ shrl $3,%ecx
+ jz .Lhandle_7
+ movl $8,%ebx
+ .p2align 4
+.Lloop_8:
+.Ls9: movq (%rsi),%r8
+.Ld9: movq %r8,(%rdi)
+ decl %ecx
+ leaq 8(%rdi),%rdi
+ leaq 8(%rsi),%rsi
+ jnz .Lloop_8
+
+.Lhandle_7:
+ movl %edx,%ecx
+ andl $7,%ecx
+ jz .Lende
+ .p2align 4
+.Lloop_1:
+.Ls10: movb (%rsi),%bl
+.Ld10: movb %bl,(%rdi)
+ incq %rdi
+ incq %rsi
+ decl %ecx
+ jnz .Lloop_1
+
+.Lende:
+ popq %rbx
+ ret
+
+#ifdef FIX_ALIGNMENT
+ /* align destination */
+ .p2align 4
+.Lbad_alignment:
+ movl $8,%r9d
+ subl %ecx,%r9d
+ movl %r9d,%ecx
+ cmpq %r9,%rdx
+ jz .Lhandle_7
+ js .Lhandle_7
+.Lalign_1:
+.Ls11: movb (%rsi),%bl
+.Ld11: movb %bl,(%rdi)
+ incq %rsi
+ incq %rdi
+ decl %ecx
+ jnz .Lalign_1
+ subq %r9,%rdx
+ jmp .Lafter_bad_alignment
+#endif
+
+ /* table sorted by exception address */
+ .section __ex_table,"a"
+ .align 8
+ .quad .Ls1,.Ls1e
+ .quad .Ls2,.Ls2e
+ .quad .Ls3,.Ls3e
+ .quad .Ls4,.Ls4e
+ .quad .Ld1,.Ls1e
+ .quad .Ld2,.Ls2e
+ .quad .Ld3,.Ls3e
+ .quad .Ld4,.Ls4e
+ .quad .Ls5,.Ls5e
+ .quad .Ls6,.Ls6e
+ .quad .Ls7,.Ls7e
+ .quad .Ls8,.Ls8e
+ .quad .Ld5,.Ls5e
+ .quad .Ld6,.Ls6e
+ .quad .Ld7,.Ls7e
+ .quad .Ld8,.Ls8e
+ .quad .Ls9,.Le_quad
+ .quad .Ld9,.Le_quad
+ .quad .Ls10,.Le_byte
+ .quad .Ld10,.Le_byte
+#ifdef FIX_ALIGNMENT
+ .quad .Ls11,.Lzero_rest
+ .quad .Ld11,.Lzero_rest
+#endif
+ .quad .Le5,.Le_zero
+ .previous
+
+ /* compute 64-offset for main loop. 8 bytes accuracy with error on the
+ pessimistic side. this is gross. it would be better to fix the
+ interface. */
+ /* eax: zero, ebx: 64 */
+.Ls1e: addl $8,%eax
+.Ls2e: addl $8,%eax
+.Ls3e: addl $8,%eax
+.Ls4e: addl $8,%eax
+.Ls5e: addl $8,%eax
+.Ls6e: addl $8,%eax
+.Ls7e: addl $8,%eax
+.Ls8e: addl $8,%eax
+ addq %rbx,%rdi /* +64 */
+ subq %rax,%rdi /* correct destination with computed offset */
+
+ shlq $6,%rdx /* loop counter * 64 (stride length) */
+ addq %rax,%rdx /* add offset to loopcnt */
+ andl $63,%ecx /* remaining bytes */
+ addq %rcx,%rdx /* add them */
+ jmp .Lzero_rest
+
+ /* exception on quad word loop in tail handling */
+ /* ecx: loopcnt/8, %edx: length, rdi: correct */
+.Le_quad:
+ shll $3,%ecx
+ andl $7,%edx
+ addl %ecx,%edx
+ /* edx: bytes to zero, rdi: dest, eax:zero */
+.Lzero_rest:
+ movq %rdx,%rcx
+.Le_byte:
+ xorl %eax,%eax
+.Le5: rep
+ stosb
+ /* when there is another exception while zeroing the rest just return */
+.Le_zero:
+ movq %rdx,%rax
+ jmp .Lende
+
+ /* Some CPUs run faster using the string copy instructions.
+ This is also a lot simpler. Use them when possible.
+ Patch in jmps to this code instead of copying it fully
+ to avoid unwanted aliasing in the exception tables. */
+
+ /* rdi destination
+ * rsi source
+ * rdx count
+ *
+ * Output:
+ * eax uncopied bytes or 0 if successfull.
+ *
+ * Only 4GB of copy is supported. This shouldn't be a problem
+ * because the kernel normally only writes from/to page sized chunks
+ * even if user space passed a longer buffer.
+ * And more would be dangerous because both Intel and AMD have
+ * errata with rep movsq > 4GB. If someone feels the need to fix
+ * this please consider this.
+ */
+copy_user_generic_c:
movl %edx,%ecx
shrl $3,%ecx
andl $7,%edx
- jz 5f
1: rep
movsq
movl %edx,%ecx
- xor %eax,%eax
2: rep
movsb
+4: movl %ecx,%eax
ret
- /* align here? */
-5: xorl %eax,%eax
-6: rep movsq
- ret
-
- .section .fixup,"ax"
3: lea (%rdx,%rcx,8),%rax
ret
-4: movl %ecx,%eax
- ret
- .previous
.section __ex_table,"a"
.quad 1b,3b
.quad 2b,4b
- .quad 6b,4b
.previous
diff --git a/arch/x86_64/lib/iomap_copy.S b/arch/x86_64/lib/iomap_copy.S
new file mode 100644
index 0000000..8bbade5
--- /dev/null
+++ b/arch/x86_64/lib/iomap_copy.S
@@ -0,0 +1,26 @@
+/*
+ * Copyright 2006 PathScale, Inc. All Rights Reserved.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/*
+ * override generic version in lib/iomap_copy.c
+ */
+ .globl __iowrite32_copy
+ .p2align 4
+__iowrite32_copy:
+ movl %edx,%ecx
+ rep movsd
+ ret
diff --git a/arch/x86_64/lib/memcpy.S b/arch/x86_64/lib/memcpy.S
index 92dd805..5554948 100644
--- a/arch/x86_64/lib/memcpy.S
+++ b/arch/x86_64/lib/memcpy.S
@@ -11,8 +11,6 @@
*
* Output:
* rax original destination
- *
- * TODO: check best memcpy for PSC
*/
.globl __memcpy
@@ -20,6 +18,95 @@
.p2align 4
__memcpy:
memcpy:
+ pushq %rbx
+ movq %rdi,%rax
+
+ movl %edx,%ecx
+ shrl $6,%ecx
+ jz .Lhandle_tail
+
+ .p2align 4
+.Lloop_64:
+ decl %ecx
+
+ movq (%rsi),%r11
+ movq 8(%rsi),%r8
+
+ movq %r11,(%rdi)
+ movq %r8,1*8(%rdi)
+
+ movq 2*8(%rsi),%r9
+ movq 3*8(%rsi),%r10
+
+ movq %r9,2*8(%rdi)
+ movq %r10,3*8(%rdi)
+
+ movq 4*8(%rsi),%r11
+ movq 5*8(%rsi),%r8
+
+ movq %r11,4*8(%rdi)
+ movq %r8,5*8(%rdi)
+
+ movq 6*8(%rsi),%r9
+ movq 7*8(%rsi),%r10
+
+ movq %r9,6*8(%rdi)
+ movq %r10,7*8(%rdi)
+
+ leaq 64(%rsi),%rsi
+ leaq 64(%rdi),%rdi
+ jnz .Lloop_64
+
+.Lhandle_tail:
+ movl %edx,%ecx
+ andl $63,%ecx
+ shrl $3,%ecx
+ jz .Lhandle_7
+ .p2align 4
+.Lloop_8:
+ decl %ecx
+ movq (%rsi),%r8
+ movq %r8,(%rdi)
+ leaq 8(%rdi),%rdi
+ leaq 8(%rsi),%rsi
+ jnz .Lloop_8
+
+.Lhandle_7:
+ movl %edx,%ecx
+ andl $7,%ecx
+ jz .Lende
+ .p2align 4
+.Lloop_1:
+ movb (%rsi),%r8b
+ movb %r8b,(%rdi)
+ incq %rdi
+ incq %rsi
+ decl %ecx
+ jnz .Lloop_1
+
+.Lende:
+ popq %rbx
+ ret
+.Lfinal:
+
+ /* Some CPUs run faster using the string copy instructions.
+ It is also a lot simpler. Use this when possible */
+
+ .section .altinstructions,"a"
+ .align 8
+ .quad memcpy
+ .quad memcpy_c
+ .byte X86_FEATURE_REP_GOOD
+ .byte .Lfinal-memcpy
+ .byte memcpy_c_end-memcpy_c
+ .previous
+
+ .section .altinstr_replacement,"ax"
+ /* rdi destination
+ * rsi source
+ * rdx count
+ */
+memcpy_c:
movq %rdi,%rax
movl %edx,%ecx
shrl $3,%ecx
@@ -30,3 +117,5 @@ memcpy:
rep
movsb
ret
+memcpy_c_end:
+ .previous
diff --git a/arch/x86_64/lib/memset.S b/arch/x86_64/lib/memset.S
index 2aa48f2..ad397f2 100644
--- a/arch/x86_64/lib/memset.S
+++ b/arch/x86_64/lib/memset.S
@@ -13,6 +13,98 @@
.p2align 4
memset:
__memset:
+ movq %rdi,%r10
+ movq %rdx,%r11
+
+ /* expand byte value */
+ movzbl %sil,%ecx
+ movabs $0x0101010101010101,%rax
+ mul %rcx /* with rax, clobbers rdx */
+
+ /* align dst */
+ movl %edi,%r9d
+ andl $7,%r9d
+ jnz .Lbad_alignment
+.Lafter_bad_alignment:
+
+ movl %r11d,%ecx
+ shrl $6,%ecx
+ jz .Lhandle_tail
+
+ .p2align 4
+.Lloop_64:
+ decl %ecx
+ movq %rax,(%rdi)
+ movq %rax,8(%rdi)
+ movq %rax,16(%rdi)
+ movq %rax,24(%rdi)
+ movq %rax,32(%rdi)
+ movq %rax,40(%rdi)
+ movq %rax,48(%rdi)
+ movq %rax,56(%rdi)
+ leaq 64(%rdi),%rdi
+ jnz .Lloop_64
+
+ /* Handle tail in loops. The loops should be faster than hard
+ to predict jump tables. */
+ .p2align 4
+.Lhandle_tail:
+ movl %r11d,%ecx
+ andl $63&(~7),%ecx
+ jz .Lhandle_7
+ shrl $3,%ecx
+ .p2align 4
+.Lloop_8:
+ decl %ecx
+ movq %rax,(%rdi)
+ leaq 8(%rdi),%rdi
+ jnz .Lloop_8
+
+.Lhandle_7:
+ movl %r11d,%ecx
+ andl $7,%ecx
+ jz .Lende
+ .p2align 4
+.Lloop_1:
+ decl %ecx
+ movb %al,(%rdi)
+ leaq 1(%rdi),%rdi
+ jnz .Lloop_1
+
+.Lende:
+ movq %r10,%rax
+ ret
+
+.Lbad_alignment:
+ cmpq $7,%r11
+ jbe .Lhandle_7
+ movq %rax,(%rdi) /* unaligned store */
+ movq $8,%r8
+ subq %r9,%r8
+ addq %r8,%rdi
+ subq %r8,%r11
+ jmp .Lafter_bad_alignment
+
+ /* Some CPUs run faster using the string instructions.
+ It is also a lot simpler. Use this when possible */
+
+#include <asm/cpufeature.h>
+
+ .section .altinstructions,"a"
+ .align 8
+ .quad memset
+ .quad memset_c
+ .byte X86_FEATURE_REP_GOOD
+ .byte memset_c_end-memset_c
+ .byte memset_c_end-memset_c
+ .previous
+
+ .section .altinstr_replacement,"ax"
+ /* rdi destination
+ * rsi value
+ * rdx count
+ */
+memset_c:
movq %rdi,%r9
movl %edx,%r8d
andl $7,%r8d
@@ -29,3 +121,5 @@ __memset:
stosb
movq %r9,%rax
ret
+memset_c_end:
+ .previous
diff --git a/arch/x86_64/mm/fault.c b/arch/x86_64/mm/fault.c
index 26eac19..2e7c3c8 100644
--- a/arch/x86_64/mm/fault.c
+++ b/arch/x86_64/mm/fault.c
@@ -33,7 +33,6 @@
#include <asm/proto.h>
#include <asm/kdebug.h>
#include <asm-generic/sections.h>
-#include <asm/kdebug.h>
/* Page fault error code bits */
#define PF_PROT (1<<0) /* or no page found */
@@ -157,8 +156,8 @@ void dump_pagetable(unsigned long address)
pgd = __va((unsigned long)pgd & PHYSICAL_PAGE_MASK);
pgd += pgd_index(address);
- printk("PGD %lx ", pgd_val(*pgd));
if (bad_address(pgd)) goto bad;
+ printk("PGD %lx ", pgd_val(*pgd));
if (!pgd_present(*pgd)) goto ret;
pud = __pud_offset_k((pud_t *)pgd_page(*pgd), address);
diff --git a/arch/x86_64/mm/k8topology.c b/arch/x86_64/mm/k8topology.c
index a5663e0b..dd60e71 100644
--- a/arch/x86_64/mm/k8topology.c
+++ b/arch/x86_64/mm/k8topology.c
@@ -155,7 +155,7 @@ int __init k8_scan_nodes(unsigned long start, unsigned long end)
if (!found)
return -1;
- memnode_shift = compute_hash_shift(nodes, numnodes);
+ memnode_shift = compute_hash_shift(nodes, 8);
if (memnode_shift < 0) {
printk(KERN_ERR "No NUMA node hash function found. Contact maintainer\n");
return -1;
diff --git a/arch/x86_64/mm/numa.c b/arch/x86_64/mm/numa.c
index 6ef9f9a..22e51be 100644
--- a/arch/x86_64/mm/numa.c
+++ b/arch/x86_64/mm/numa.c
@@ -351,7 +351,7 @@ void __init init_cpu_to_node(void)
continue;
if (apicid_to_node[apicid] == NUMA_NO_NODE)
continue;
- cpu_to_node[i] = apicid_to_node[apicid];
+ numa_set_node(i,apicid_to_node[apicid]);
}
}
diff --git a/arch/x86_64/mm/srat.c b/arch/x86_64/mm/srat.c
index 8b7f856..482c257 100644
--- a/arch/x86_64/mm/srat.c
+++ b/arch/x86_64/mm/srat.c
@@ -26,6 +26,10 @@ static nodemask_t nodes_found __initdata;
static struct node nodes[MAX_NUMNODES] __initdata;
static u8 pxm2node[256] = { [0 ... 255] = 0xff };
+/* Too small nodes confuse the VM badly. Usually they result
+ from BIOS bugs. */
+#define NODE_MIN_SIZE (4*1024*1024)
+
static int node_to_pxm(int n);
int pxm_to_node(int pxm)
@@ -131,7 +135,12 @@ void __init
acpi_numa_processor_affinity_init(struct acpi_table_processor_affinity *pa)
{
int pxm, node;
- if (srat_disabled() || pa->flags.enabled == 0)
+ if (srat_disabled())
+ return;
+ if (pa->header.length != sizeof(struct acpi_table_processor_affinity)) { bad_srat();
+ return;
+ }
+ if (pa->flags.enabled == 0)
return;
pxm = pa->proximity_domain;
node = setup_node(pxm);
@@ -155,8 +164,16 @@ acpi_numa_memory_affinity_init(struct acpi_table_memory_affinity *ma)
int node, pxm;
int i;
- if (srat_disabled() || ma->flags.enabled == 0)
+ if (srat_disabled())
+ return;
+ if (ma->header.length != sizeof(struct acpi_table_memory_affinity)) {
+ bad_srat();
return;
+ }
+ if (ma->flags.enabled == 0)
+ return;
+ start = ma->base_addr_lo | ((u64)ma->base_addr_hi << 32);
+ end = start + (ma->length_lo | ((u64)ma->length_hi << 32));
pxm = ma->proximity_domain;
node = setup_node(pxm);
if (node < 0) {
@@ -164,8 +181,6 @@ acpi_numa_memory_affinity_init(struct acpi_table_memory_affinity *ma)
bad_srat();
return;
}
- start = ma->base_addr_lo | ((u64)ma->base_addr_hi << 32);
- end = start + (ma->length_lo | ((u64)ma->length_hi << 32));
/* It is fine to add this area to the nodes data it will be used later*/
if (ma->flags.hot_pluggable == 1)
printk(KERN_INFO "SRAT: hot plug zone found %lx - %lx \n",
@@ -213,7 +228,8 @@ static int nodes_cover_memory(void)
}
e820ram = end_pfn - e820_hole_size(0, end_pfn);
- if (pxmram < e820ram) {
+ /* We seem to lose 3 pages somewhere. Allow a bit of slack. */
+ if ((long)(e820ram - pxmram) >= 1*1024*1024) {
printk(KERN_ERR
"SRAT: PXMs only cover %luMB of your %luMB e820 RAM. Not used.\n",
(pxmram << PAGE_SHIFT) >> 20,
@@ -223,6 +239,16 @@ static int nodes_cover_memory(void)
return 1;
}
+static void unparse_node(int node)
+{
+ int i;
+ node_clear(node, nodes_parsed);
+ for (i = 0; i < MAX_LOCAL_APIC; i++) {
+ if (apicid_to_node[i] == node)
+ apicid_to_node[i] = NUMA_NO_NODE;
+ }
+}
+
void __init acpi_numa_arch_fixup(void) {}
/* Use the information discovered above to actually set up the nodes. */
@@ -230,22 +256,22 @@ int __init acpi_scan_nodes(unsigned long start, unsigned long end)
{
int i;
- if (acpi_numa <= 0)
- return -1;
-
/* First clean up the node list */
- for_each_node_mask(i, nodes_parsed) {
+ for (i = 0; i < MAX_NUMNODES; i++) {
cutoff_node(i, start, end);
- if (nodes[i].start == nodes[i].end)
- node_clear(i, nodes_parsed);
+ if ((nodes[i].end - nodes[i].start) < NODE_MIN_SIZE)
+ unparse_node(i);
}
+ if (acpi_numa <= 0)
+ return -1;
+
if (!nodes_cover_memory()) {
bad_srat();
return -1;
}
- memnode_shift = compute_hash_shift(nodes, nodes_weight(nodes_parsed));
+ memnode_shift = compute_hash_shift(nodes, MAX_NUMNODES);
if (memnode_shift < 0) {
printk(KERN_ERR
"SRAT: No NUMA node hash function found. Contact maintainer\n");
diff --git a/arch/x86_64/pci/mmconfig.c b/arch/x86_64/pci/mmconfig.c
index f16c0d5..18f371f 100644
--- a/arch/x86_64/pci/mmconfig.c
+++ b/arch/x86_64/pci/mmconfig.c
@@ -29,11 +29,8 @@ static char __iomem *get_virt(unsigned int seg, unsigned bus)
while (1) {
++cfg_num;
- if (cfg_num >= pci_mmcfg_config_num) {
- /* Not found - fall back to type 1. This happens
- e.g. on the internal devices of a K8 northbridge. */
- return NULL;
- }
+ if (cfg_num >= pci_mmcfg_config_num)
+ break;
cfg = pci_mmcfg_virt[cfg_num].cfg;
if (cfg->pci_segment_group_number != seg)
continue;
@@ -41,6 +38,18 @@ static char __iomem *get_virt(unsigned int seg, unsigned bus)
(cfg->end_bus_number >= bus))
return pci_mmcfg_virt[cfg_num].virt;
}
+
+ /* Handle more broken MCFG tables on Asus etc.
+ They only contain a single entry for bus 0-0. Assume
+ this applies to all busses. */
+ cfg = &pci_mmcfg_config[0];
+ if (pci_mmcfg_config_num == 1 &&
+ cfg->pci_segment_group_number == 0 &&
+ (cfg->start_bus_number | cfg->end_bus_number) == 0)
+ return pci_mmcfg_virt[0].virt;
+
+ /* Fall back to type 0 */
+ return NULL;
}
static char __iomem *pci_dev_base(unsigned int seg, unsigned int bus, unsigned int devfn)
OpenPOWER on IntegriCloud