From 7c393e7b95bceafcefd5357afdda9f5047b6aa69 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Mon, 26 Jun 2006 13:56:01 +0200 Subject: [PATCH] x86_64: Update defconfig Enable some hwmon drivers as modules and tulip and stack unwinding Kernel image should be somewhat bigger now because of the unwind information being included, but you'll get exact backtraces for that. Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- arch/x86_64/defconfig | 159 ++++++++++++++++++++++++++++++++++++++++++-------- 1 file changed, 134 insertions(+), 25 deletions(-) diff --git a/arch/x86_64/defconfig b/arch/x86_64/defconfig index 69db0c0..e69d403 100644 --- a/arch/x86_64/defconfig +++ b/arch/x86_64/defconfig @@ -1,7 +1,7 @@ # # Automatically generated make config: don't edit -# Linux kernel version: 2.6.17-rc1-git11 -# Sun Apr 16 07:22:36 2006 +# Linux kernel version: 2.6.17-git6 +# Sat Jun 24 00:52:28 2006 # CONFIG_X86_64=y CONFIG_64BIT=y @@ -42,7 +42,6 @@ CONFIG_IKCONFIG_PROC=y # CONFIG_RELAY is not set CONFIG_INITRAMFS_SOURCE="" CONFIG_UID16=y -CONFIG_VM86=y CONFIG_CC_OPTIMIZE_FOR_SIZE=y # CONFIG_EMBEDDED is not set CONFIG_KALLSYMS=y @@ -57,7 +56,6 @@ CONFIG_FUTEX=y CONFIG_EPOLL=y CONFIG_SHMEM=y CONFIG_SLAB=y -CONFIG_DOUBLEFAULT=y # CONFIG_TINY_SHMEM is not set CONFIG_BASE_SMALL=0 # CONFIG_SLOB is not set @@ -144,7 +142,8 @@ CONFIG_NR_CPUS=32 CONFIG_HOTPLUG_CPU=y CONFIG_HPET_TIMER=y CONFIG_HPET_EMULATE_RTC=y -CONFIG_GART_IOMMU=y +CONFIG_IOMMU=y +# CONFIG_CALGARY_IOMMU is not set CONFIG_SWIOTLB=y CONFIG_X86_MCE=y CONFIG_X86_MCE_INTEL=y @@ -158,6 +157,7 @@ CONFIG_HZ_250=y # CONFIG_HZ_1000 is not set CONFIG_HZ=250 # CONFIG_REORDER is not set +CONFIG_K8_NB=y CONFIG_GENERIC_HARDIRQS=y CONFIG_GENERIC_IRQ_PROBE=y CONFIG_ISA_DMA_API=y @@ -293,6 +293,8 @@ CONFIG_IP_PNP_DHCP=y # CONFIG_INET_IPCOMP is not set # CONFIG_INET_XFRM_TUNNEL is not set # CONFIG_INET_TUNNEL is not set +# CONFIG_INET_XFRM_MODE_TRANSPORT is not set +# CONFIG_INET_XFRM_MODE_TUNNEL is not set CONFIG_INET_DIAG=y CONFIG_INET_TCP_DIAG=y # CONFIG_TCP_CONG_ADVANCED is not set @@ -305,7 +307,10 @@ CONFIG_IPV6=y # CONFIG_INET6_IPCOMP is not set # CONFIG_INET6_XFRM_TUNNEL is not set # CONFIG_INET6_TUNNEL is not set +# CONFIG_INET6_XFRM_MODE_TRANSPORT is not set +# CONFIG_INET6_XFRM_MODE_TUNNEL is not set # CONFIG_IPV6_TUNNEL is not set +# CONFIG_NETWORK_SECMARK is not set # CONFIG_NETFILTER is not set # @@ -344,6 +349,7 @@ CONFIG_IPV6=y # Network testing # # CONFIG_NET_PKTGEN is not set +# CONFIG_NET_TCPPROBE is not set # CONFIG_HAMRADIO is not set # CONFIG_IRDA is not set # CONFIG_BT is not set @@ -360,6 +366,7 @@ CONFIG_STANDALONE=y CONFIG_PREVENT_FIRMWARE_BUILD=y CONFIG_FW_LOADER=y # CONFIG_DEBUG_DRIVER is not set +# CONFIG_SYS_HYPERVISOR is not set # # Connector - unified userspace <-> kernelspace linker @@ -526,6 +533,7 @@ CONFIG_SCSI_ATA_PIIX=y # CONFIG_SCSI_SATA_MV is not set CONFIG_SCSI_SATA_NV=y # CONFIG_SCSI_PDC_ADMA is not set +# CONFIG_SCSI_HPTIOP is not set # CONFIG_SCSI_SATA_QSTOR is not set # CONFIG_SCSI_SATA_PROMISE is not set # CONFIG_SCSI_SATA_SX4 is not set @@ -591,10 +599,7 @@ CONFIG_IEEE1394=y # # Device Drivers # - -# -# Texas Instruments PCILynx requires I2C -# +# CONFIG_IEEE1394_PCILYNX is not set CONFIG_IEEE1394_OHCI1394=y # @@ -645,7 +650,16 @@ CONFIG_VORTEX=y # # Tulip family network device support # -# CONFIG_NET_TULIP is not set +CONFIG_NET_TULIP=y +# CONFIG_DE2104X is not set +CONFIG_TULIP=y +# CONFIG_TULIP_MWI is not set +# CONFIG_TULIP_MMIO is not set +# CONFIG_TULIP_NAPI is not set +# CONFIG_DE4X5 is not set +# CONFIG_WINBOND_840 is not set +# CONFIG_DM9102 is not set +# CONFIG_ULI526X is not set # CONFIG_HP100 is not set CONFIG_NET_PCI=y # CONFIG_PCNET32 is not set @@ -697,6 +711,7 @@ CONFIG_TIGON3=y # CONFIG_IXGB is not set CONFIG_S2IO=m # CONFIG_S2IO_NAPI is not set +# CONFIG_MYRI10GE is not set # # Token Ring devices @@ -887,7 +902,56 @@ CONFIG_HPET_MMAP=y # # I2C support # -# CONFIG_I2C is not set +CONFIG_I2C=m +CONFIG_I2C_CHARDEV=m + +# +# I2C Algorithms +# +# CONFIG_I2C_ALGOBIT is not set +# CONFIG_I2C_ALGOPCF is not set +# CONFIG_I2C_ALGOPCA is not set + +# +# I2C Hardware Bus support +# +# CONFIG_I2C_ALI1535 is not set +# CONFIG_I2C_ALI1563 is not set +# CONFIG_I2C_ALI15X3 is not set +# CONFIG_I2C_AMD756 is not set +# CONFIG_I2C_AMD8111 is not set +# CONFIG_I2C_I801 is not set +# CONFIG_I2C_I810 is not set +# CONFIG_I2C_PIIX4 is not set +CONFIG_I2C_ISA=m +# CONFIG_I2C_NFORCE2 is not set +# CONFIG_I2C_OCORES is not set +# CONFIG_I2C_PARPORT_LIGHT is not set +# CONFIG_I2C_PROSAVAGE is not set +# CONFIG_I2C_SAVAGE4 is not set +# CONFIG_I2C_SIS5595 is not set +# CONFIG_I2C_SIS630 is not set +# CONFIG_I2C_SIS96X is not set +# CONFIG_I2C_STUB is not set +# CONFIG_I2C_VIA is not set +# CONFIG_I2C_VIAPRO is not set +# CONFIG_I2C_VOODOO3 is not set +# CONFIG_I2C_PCA_ISA is not set + +# +# Miscellaneous I2C Chip support +# +# CONFIG_SENSORS_DS1337 is not set +# CONFIG_SENSORS_DS1374 is not set +# CONFIG_SENSORS_EEPROM is not set +# CONFIG_SENSORS_PCF8574 is not set +# CONFIG_SENSORS_PCA9539 is not set +# CONFIG_SENSORS_PCF8591 is not set +# CONFIG_SENSORS_MAX6875 is not set +# CONFIG_I2C_DEBUG_CORE is not set +# CONFIG_I2C_DEBUG_ALGO is not set +# CONFIG_I2C_DEBUG_BUS is not set +# CONFIG_I2C_DEBUG_CHIP is not set # # SPI support @@ -898,14 +962,51 @@ CONFIG_HPET_MMAP=y # # Dallas's 1-wire bus # -# CONFIG_W1 is not set # # Hardware Monitoring support # CONFIG_HWMON=y # CONFIG_HWMON_VID is not set +# CONFIG_SENSORS_ABITUGURU is not set +# CONFIG_SENSORS_ADM1021 is not set +# CONFIG_SENSORS_ADM1025 is not set +# CONFIG_SENSORS_ADM1026 is not set +# CONFIG_SENSORS_ADM1031 is not set +# CONFIG_SENSORS_ADM9240 is not set +# CONFIG_SENSORS_ASB100 is not set +# CONFIG_SENSORS_ATXP1 is not set +# CONFIG_SENSORS_DS1621 is not set # CONFIG_SENSORS_F71805F is not set +# CONFIG_SENSORS_FSCHER is not set +# CONFIG_SENSORS_FSCPOS is not set +# CONFIG_SENSORS_GL518SM is not set +# CONFIG_SENSORS_GL520SM is not set +# CONFIG_SENSORS_IT87 is not set +# CONFIG_SENSORS_LM63 is not set +# CONFIG_SENSORS_LM75 is not set +# CONFIG_SENSORS_LM77 is not set +# CONFIG_SENSORS_LM78 is not set +# CONFIG_SENSORS_LM80 is not set +# CONFIG_SENSORS_LM83 is not set +# CONFIG_SENSORS_LM85 is not set +# CONFIG_SENSORS_LM87 is not set +# CONFIG_SENSORS_LM90 is not set +# CONFIG_SENSORS_LM92 is not set +# CONFIG_SENSORS_MAX1619 is not set +# CONFIG_SENSORS_PC87360 is not set +# CONFIG_SENSORS_SIS5595 is not set +# CONFIG_SENSORS_SMSC47M1 is not set +# CONFIG_SENSORS_SMSC47M192 is not set +CONFIG_SENSORS_SMSC47B397=m +# CONFIG_SENSORS_VIA686A is not set +# CONFIG_SENSORS_VT8231 is not set +# CONFIG_SENSORS_W83781D is not set +# CONFIG_SENSORS_W83791D is not set +# CONFIG_SENSORS_W83792D is not set +# CONFIG_SENSORS_W83L785TS is not set +# CONFIG_SENSORS_W83627HF is not set +# CONFIG_SENSORS_W83627EHF is not set # CONFIG_SENSORS_HDAPS is not set # CONFIG_HWMON_DEBUG_CHIP is not set @@ -918,6 +1019,7 @@ CONFIG_HWMON=y # Multimedia devices # # CONFIG_VIDEO_DEV is not set +CONFIG_VIDEO_V4L2=y # # Digital Video Broadcasting Devices @@ -953,28 +1055,17 @@ CONFIG_SOUND=y # Open Sound System # CONFIG_SOUND_PRIME=y -CONFIG_OBSOLETE_OSS_DRIVER=y # CONFIG_SOUND_BT878 is not set -# CONFIG_SOUND_CMPCI is not set # CONFIG_SOUND_EMU10K1 is not set # CONFIG_SOUND_FUSION is not set -# CONFIG_SOUND_CS4281 is not set -# CONFIG_SOUND_ES1370 is not set # CONFIG_SOUND_ES1371 is not set -# CONFIG_SOUND_ESSSOLO1 is not set -# CONFIG_SOUND_MAESTRO is not set -# CONFIG_SOUND_MAESTRO3 is not set CONFIG_SOUND_ICH=y -# CONFIG_SOUND_SONICVIBES is not set # CONFIG_SOUND_TRIDENT is not set # CONFIG_SOUND_MSNDCLAS is not set # CONFIG_SOUND_MSNDPIN is not set # CONFIG_SOUND_VIA82CXXX is not set # CONFIG_SOUND_OSS is not set -# CONFIG_SOUND_ALI5455 is not set -# CONFIG_SOUND_FORTE is not set -# CONFIG_SOUND_RME96XX is not set -# CONFIG_SOUND_AD1980 is not set +# CONFIG_SOUND_TVMIXER is not set # # USB support @@ -1000,6 +1091,7 @@ CONFIG_USB_DEVICEFS=y CONFIG_USB_EHCI_HCD=y # CONFIG_USB_EHCI_SPLIT_ISO is not set # CONFIG_USB_EHCI_ROOT_HUB_TT is not set +# CONFIG_USB_EHCI_TT_NEWSCHED is not set # CONFIG_USB_ISP116X_HCD is not set CONFIG_USB_OHCI_HCD=y # CONFIG_USB_OHCI_BIG_ENDIAN is not set @@ -1089,10 +1181,12 @@ CONFIG_USB_MON=y # CONFIG_USB_LEGOTOWER is not set # CONFIG_USB_LCD is not set # CONFIG_USB_LED is not set +# CONFIG_USB_CY7C63 is not set # CONFIG_USB_CYTHERM is not set # CONFIG_USB_PHIDGETKIT is not set # CONFIG_USB_PHIDGETSERVO is not set # CONFIG_USB_IDMOUSE is not set +# CONFIG_USB_APPLEDISPLAY is not set # CONFIG_USB_SISUSBVGA is not set # CONFIG_USB_LD is not set # CONFIG_USB_TEST is not set @@ -1141,6 +1235,19 @@ CONFIG_USB_MON=y # CONFIG_RTC_CLASS is not set # +# DMA Engine support +# +# CONFIG_DMA_ENGINE is not set + +# +# DMA Clients +# + +# +# DMA Devices +# + +# # Firmware Drivers # # CONFIG_EDD is not set @@ -1175,6 +1282,7 @@ CONFIG_FS_POSIX_ACL=y # CONFIG_MINIX_FS is not set # CONFIG_ROMFS_FS is not set CONFIG_INOTIFY=y +CONFIG_INOTIFY_USER=y # CONFIG_QUOTA is not set CONFIG_DNOTIFY=y CONFIG_AUTOFS_FS=y @@ -1331,7 +1439,8 @@ CONFIG_DETECT_SOFTLOCKUP=y CONFIG_DEBUG_FS=y # CONFIG_DEBUG_VM is not set # CONFIG_FRAME_POINTER is not set -# CONFIG_UNWIND_INFO is not set +CONFIG_UNWIND_INFO=y +CONFIG_STACK_UNWIND=y # CONFIG_FORCED_INLINING is not set # CONFIG_RCU_TORTURE_TEST is not set # CONFIG_DEBUG_RODATA is not set -- cgit v1.1 From 0f4fdb7fba0b3ec66df39d0e743e701404161fb8 Mon Sep 17 00:00:00 2001 From: ravikiran thirumalai Date: Mon, 26 Jun 2006 13:56:04 +0200 Subject: [PATCH] x86_64: Use local APIC ID from local APIC instead of CPUID vSMPowered systems use apic_cluster too. Forcing apic_physflat works on these systems too, but only if we change phys_pkg_id to use hard_smp_prcoessor_id() instead of cpuid_ebx. I am guessing other multichassi cluster systems would need this too. Signed-off-by: ravikiran thirumalai Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- arch/x86_64/kernel/genapic_flat.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/arch/x86_64/kernel/genapic_flat.c b/arch/x86_64/kernel/genapic_flat.c index 1a2ab825..c66ca7b 100644 --- a/arch/x86_64/kernel/genapic_flat.c +++ b/arch/x86_64/kernel/genapic_flat.c @@ -108,10 +108,7 @@ static unsigned int flat_cpu_mask_to_apicid(cpumask_t cpumask) static unsigned int phys_pkg_id(int index_msb) { - u32 ebx; - - ebx = cpuid_ebx(1); - return ((ebx >> 24) & 0xFF) >> index_msb; + return hard_smp_processor_id() >> index_msb; } struct genapic apic_flat = { -- cgit v1.1 From 79121ea9f098934850347eeed3857ffd836810c3 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Mon, 26 Jun 2006 13:56:07 +0200 Subject: [PATCH] x86_64: Use __always_inline for __inline_memcpy Inspired from i386 changes Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- include/asm-x86_64/string.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/include/asm-x86_64/string.h b/include/asm-x86_64/string.h index ee6bf27..9505d9f 100644 --- a/include/asm-x86_64/string.h +++ b/include/asm-x86_64/string.h @@ -6,7 +6,8 @@ /* Written 2002 by Andi Kleen */ /* Only used for special circumstances. Stolen from i386/string.h */ -static inline void * __inline_memcpy(void * to, const void * from, size_t n) +static __always_inline void * +__inline_memcpy(void * to, const void * from, size_t n) { unsigned long d0, d1, d2; __asm__ __volatile__( -- cgit v1.1 From faee9a5dc9d8399cc3b1b8e18b6d7ff7b17f1af1 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Mon, 26 Jun 2006 13:56:10 +0200 Subject: [PATCH] i386/x86-64: Use new official CPUID to get APICID/core split on AMD platforms Previously the apicid<->coreid split was computed based on the max number of cores. Now use a new CPUID AMD defined for that. On most systems right now it should be 0 and the old method will be used. Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- arch/i386/kernel/cpu/amd.c | 14 ++++++++------ arch/x86_64/kernel/setup.c | 20 +++++++++++++------- 2 files changed, 21 insertions(+), 13 deletions(-) diff --git a/arch/i386/kernel/cpu/amd.c b/arch/i386/kernel/cpu/amd.c index 786d1a5..b9c93d9 100644 --- a/arch/i386/kernel/cpu/amd.c +++ b/arch/i386/kernel/cpu/amd.c @@ -224,15 +224,17 @@ static void __init init_amd(struct cpuinfo_x86 *c) #ifdef CONFIG_X86_HT /* - * On a AMD dual core setup the lower bits of the APIC id - * distingush the cores. Assumes number of cores is a power - * of two. + * On a AMD multi core setup the lower bits of the APIC id + * distingush the cores. */ if (c->x86_max_cores > 1) { int cpu = smp_processor_id(); - unsigned bits = 0; - while ((1 << bits) < c->x86_max_cores) - bits++; + unsigned bits = (cpuid_ecx(0x80000008) >> 12) & 0xf; + + if (bits == 0) { + while ((1 << bits) < c->x86_max_cores) + bits++; + } cpu_core_id[cpu] = phys_proc_id[cpu] & ((1<>= bits; printk(KERN_INFO "CPU %d(%d) -> Core %d\n", diff --git a/arch/x86_64/kernel/setup.c b/arch/x86_64/kernel/setup.c index fb850b5..1cb3e21 100644 --- a/arch/x86_64/kernel/setup.c +++ b/arch/x86_64/kernel/setup.c @@ -873,10 +873,18 @@ static void __init amd_detect_cmp(struct cpuinfo_x86 *c) int node = 0; unsigned apicid = hard_smp_processor_id(); #endif + unsigned ecx = cpuid_ecx(0x80000008); - bits = 0; - while ((1 << bits) < c->x86_max_cores) - bits++; + c->x86_max_cores = (ecx & 0xff) + 1; + + /* CPU telling us the core id bits shift? */ + bits = (ecx >> 12) & 0xF; + + /* Otherwise recompute */ + if (bits == 0) { + while ((1 << bits) < c->x86_max_cores) + bits++; + } /* Low order bits define the core id (index of core in socket) */ cpu_core_id[cpu] = phys_proc_id[cpu] & ((1 << bits)-1); @@ -964,11 +972,9 @@ static int __init init_amd(struct cpuinfo_x86 *c) if (c->x86_power & (1<<8)) set_bit(X86_FEATURE_CONSTANT_TSC, &c->x86_capability); - if (c->extended_cpuid_level >= 0x80000008) { - c->x86_max_cores = (cpuid_ecx(0x80000008) & 0xff) + 1; - + /* Multi core CPU? */ + if (c->extended_cpuid_level >= 0x80000008) amd_detect_cmp(c); - } return r; } -- cgit v1.1 From 240cd6a80642da528bfa382ec2ae4e3cb8991ea7 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Mon, 26 Jun 2006 13:56:13 +0200 Subject: [PATCH] i386/x86-64: Emulate CPUID4 on AMD Intel systems report the cache level data from CPUID 4 in sysfs. Add a CPUID 4 emulation for AMD CPUs to report the same information for them. This allows programs to read this information in a uniform way. The AMD way to report this is less flexible so some assumptions are hardcoded (e.g. no L3) Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- arch/i386/kernel/cpu/amd.c | 2 + arch/i386/kernel/cpu/intel_cacheinfo.c | 113 +++++++++++++++++++++++++++++---- arch/x86_64/kernel/setup.c | 3 + include/asm-i386/processor.h | 1 + include/asm-x86_64/processor.h | 1 + 5 files changed, 107 insertions(+), 13 deletions(-) diff --git a/arch/i386/kernel/cpu/amd.c b/arch/i386/kernel/cpu/amd.c index b9c93d9..fd0457c 100644 --- a/arch/i386/kernel/cpu/amd.c +++ b/arch/i386/kernel/cpu/amd.c @@ -242,6 +242,8 @@ static void __init init_amd(struct cpuinfo_x86 *c) } #endif + if (cpuid_eax(0x80000000) >= 0x80000006) + num_cache_leaves = 3; } static unsigned int amd_size_cache(struct cpuinfo_x86 * c, unsigned int size) diff --git a/arch/i386/kernel/cpu/intel_cacheinfo.c b/arch/i386/kernel/cpu/intel_cacheinfo.c index c8547a6..6c37b4f 100644 --- a/arch/i386/kernel/cpu/intel_cacheinfo.c +++ b/arch/i386/kernel/cpu/intel_cacheinfo.c @@ -4,6 +4,7 @@ * Changes: * Venkatesh Pallipadi : Adding cache identification through cpuid(4) * Ashok Raj : Work with CPU hotplug infrastructure. + * Andi Kleen : CPUID4 emulation on AMD. */ #include @@ -130,25 +131,111 @@ struct _cpuid4_info { cpumask_t shared_cpu_map; }; -static unsigned short num_cache_leaves; +unsigned short num_cache_leaves; + +/* AMD doesn't have CPUID4. Emulate it here to report the same + information to the user. This makes some assumptions about the machine: + No L3, L2 not shared, no SMT etc. that is currently true on AMD CPUs. + + In theory the TLBs could be reported as fake type (they are in "dummy"). + Maybe later */ +union l1_cache { + struct { + unsigned line_size : 8; + unsigned lines_per_tag : 8; + unsigned assoc : 8; + unsigned size_in_kb : 8; + }; + unsigned val; +}; + +union l2_cache { + struct { + unsigned line_size : 8; + unsigned lines_per_tag : 4; + unsigned assoc : 4; + unsigned size_in_kb : 16; + }; + unsigned val; +}; + +static unsigned short assocs[] = { + [1] = 1, [2] = 2, [4] = 4, [6] = 8, + [8] = 16, + [0xf] = 0xffff // ?? + }; +static unsigned char levels[] = { 1, 1, 2 }; +static unsigned char types[] = { 1, 2, 3 }; + +static void __cpuinit amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax, + union _cpuid4_leaf_ebx *ebx, + union _cpuid4_leaf_ecx *ecx) +{ + unsigned dummy; + unsigned line_size, lines_per_tag, assoc, size_in_kb; + union l1_cache l1i, l1d; + union l2_cache l2; + + eax->full = 0; + ebx->full = 0; + ecx->full = 0; + + cpuid(0x80000005, &dummy, &dummy, &l1d.val, &l1i.val); + cpuid(0x80000006, &dummy, &dummy, &l2.val, &dummy); + + if (leaf > 2 || !l1d.val || !l1i.val || !l2.val) + return; + + eax->split.is_self_initializing = 1; + eax->split.type = types[leaf]; + eax->split.level = levels[leaf]; + eax->split.num_threads_sharing = 0; + eax->split.num_cores_on_die = current_cpu_data.x86_max_cores - 1; + + if (leaf <= 1) { + union l1_cache *l1 = leaf == 0 ? &l1d : &l1i; + assoc = l1->assoc; + line_size = l1->line_size; + lines_per_tag = l1->lines_per_tag; + size_in_kb = l1->size_in_kb; + } else { + assoc = l2.assoc; + line_size = l2.line_size; + lines_per_tag = l2.lines_per_tag; + /* cpu_data has errata corrections for K7 applied */ + size_in_kb = current_cpu_data.x86_cache_size; + } + + if (assoc == 0xf) + eax->split.is_fully_associative = 1; + ebx->split.coherency_line_size = line_size - 1; + ebx->split.ways_of_associativity = assocs[assoc] - 1; + ebx->split.physical_line_partition = lines_per_tag - 1; + ecx->split.number_of_sets = (size_in_kb * 1024) / line_size / + (ebx->split.ways_of_associativity + 1) - 1; +} static int __cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf) { - unsigned int eax, ebx, ecx, edx; - union _cpuid4_leaf_eax cache_eax; + union _cpuid4_leaf_eax eax; + union _cpuid4_leaf_ebx ebx; + union _cpuid4_leaf_ecx ecx; + unsigned edx; - cpuid_count(4, index, &eax, &ebx, &ecx, &edx); - cache_eax.full = eax; - if (cache_eax.split.type == CACHE_TYPE_NULL) + if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) + amd_cpuid4(index, &eax, &ebx, &ecx); + else + cpuid_count(4, index, &eax.full, &ebx.full, &ecx.full, &edx); + if (eax.split.type == CACHE_TYPE_NULL) return -EIO; /* better error ? */ - this_leaf->eax.full = eax; - this_leaf->ebx.full = ebx; - this_leaf->ecx.full = ecx; - this_leaf->size = (this_leaf->ecx.split.number_of_sets + 1) * - (this_leaf->ebx.split.coherency_line_size + 1) * - (this_leaf->ebx.split.physical_line_partition + 1) * - (this_leaf->ebx.split.ways_of_associativity + 1); + this_leaf->eax = eax; + this_leaf->ebx = ebx; + this_leaf->ecx = ecx; + this_leaf->size = (ecx.split.number_of_sets + 1) * + (ebx.split.coherency_line_size + 1) * + (ebx.split.physical_line_partition + 1) * + (ebx.split.ways_of_associativity + 1); return 0; } diff --git a/arch/x86_64/kernel/setup.c b/arch/x86_64/kernel/setup.c index 1cb3e21..4b7e022 100644 --- a/arch/x86_64/kernel/setup.c +++ b/arch/x86_64/kernel/setup.c @@ -976,6 +976,9 @@ static int __init init_amd(struct cpuinfo_x86 *c) if (c->extended_cpuid_level >= 0x80000008) amd_detect_cmp(c); + /* Fix cpuid4 emulation for more */ + num_cache_leaves = 3; + return r; } diff --git a/include/asm-i386/processor.h b/include/asm-i386/processor.h index 0c83cf1..b796210 100644 --- a/include/asm-i386/processor.h +++ b/include/asm-i386/processor.h @@ -112,6 +112,7 @@ extern char ignore_fpu_irq; extern void identify_cpu(struct cpuinfo_x86 *); extern void print_cpu_info(struct cpuinfo_x86 *); extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c); +extern unsigned short num_cache_leaves; #ifdef CONFIG_X86_HT extern void detect_ht(struct cpuinfo_x86 *c); diff --git a/include/asm-x86_64/processor.h b/include/asm-x86_64/processor.h index 3061a38..e583f0d 100644 --- a/include/asm-x86_64/processor.h +++ b/include/asm-x86_64/processor.h @@ -96,6 +96,7 @@ extern char ignore_irq13; extern void identify_cpu(struct cpuinfo_x86 *); extern void print_cpu_info(struct cpuinfo_x86 *); extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c); +extern unsigned short num_cache_leaves; /* * EFLAGS bits -- cgit v1.1 From d167a51877e94dda73dd656c51f363502309f713 Mon Sep 17 00:00:00 2001 From: Gerd Hoffmann Date: Mon, 26 Jun 2006 13:56:16 +0200 Subject: [PATCH] x86_64: x86_64 version of the smp alternative patch. Changes are largely identical to the i386 version: * alternative #define are moved to the new alternative.h file. * one new elf section with pointers to the lock prefixes which can be nop'ed out for non-smp. * two new elf sections simliar to the "classic" alternatives to replace SMP code with simpler UP code. * fixup headers to use alternative.h instead of defining their own LOCK / LOCK_PREFIX macros. The patch reuses the i386 version of the alternatives code to avoid code duplication. The code in alternatives.c was shuffled around a bit to reduce the number of #ifdefs needed. It also got some tweaks needed for x86_64 (vsyscall page handling) and new features (noreplacement option which was x86_64 only up to now). Debug printk's are changed from compile-time to runtime. Loosely based on a early version from Bastian Blank Signed-off-by: Gerd Hoffmann Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- arch/i386/kernel/alternative.c | 118 ++++++++++++++++++++++--------- arch/x86_64/kernel/Makefile | 4 +- arch/x86_64/kernel/module.c | 38 ++++++---- arch/x86_64/kernel/setup.c | 76 +------------------- arch/x86_64/kernel/smpboot.c | 4 ++ arch/x86_64/kernel/vmlinux.lds.S | 20 ++++++ arch/x86_64/mm/init.c | 27 ++++---- include/asm-i386/alternative.h | 2 + include/asm-x86_64/alternative.h | 146 +++++++++++++++++++++++++++++++++++++++ include/asm-x86_64/atomic.h | 42 +++++------ include/asm-x86_64/bitops.h | 7 +- include/asm-x86_64/cpufeature.h | 2 + include/asm-x86_64/mutex.h | 4 +- include/asm-x86_64/rwlock.h | 8 +-- include/asm-x86_64/semaphore.h | 8 +-- include/asm-x86_64/spinlock.h | 10 ++- include/asm-x86_64/system.h | 86 +---------------------- 17 files changed, 344 insertions(+), 258 deletions(-) create mode 100644 include/asm-x86_64/alternative.h diff --git a/arch/i386/kernel/alternative.c b/arch/i386/kernel/alternative.c index 5cbd6f9..50eb0e0 100644 --- a/arch/i386/kernel/alternative.c +++ b/arch/i386/kernel/alternative.c @@ -4,27 +4,41 @@ #include #include -#define DEBUG 0 -#if DEBUG -# define DPRINTK(fmt, args...) printk(fmt, args) -#else -# define DPRINTK(fmt, args...) -#endif +static int no_replacement = 0; +static int smp_alt_once = 0; +static int debug_alternative = 0; + +static int __init noreplacement_setup(char *s) +{ + no_replacement = 1; + return 1; +} +static int __init bootonly(char *str) +{ + smp_alt_once = 1; + return 1; +} +static int __init debug_alt(char *str) +{ + debug_alternative = 1; + return 1; +} +__setup("noreplacement", noreplacement_setup); +__setup("smp-alt-boot", bootonly); +__setup("debug-alternative", debug_alt); + +#define DPRINTK(fmt, args...) if (debug_alternative) \ + printk(KERN_DEBUG fmt, args) + +#ifdef GENERIC_NOP1 /* Use inline assembly to define this because the nops are defined as inline assembly strings in the include files and we cannot get them easily into strings. */ asm("\t.data\nintelnops: " GENERIC_NOP1 GENERIC_NOP2 GENERIC_NOP3 GENERIC_NOP4 GENERIC_NOP5 GENERIC_NOP6 GENERIC_NOP7 GENERIC_NOP8); -asm("\t.data\nk8nops: " - K8_NOP1 K8_NOP2 K8_NOP3 K8_NOP4 K8_NOP5 K8_NOP6 - K8_NOP7 K8_NOP8); -asm("\t.data\nk7nops: " - K7_NOP1 K7_NOP2 K7_NOP3 K7_NOP4 K7_NOP5 K7_NOP6 - K7_NOP7 K7_NOP8); - -extern unsigned char intelnops[], k8nops[], k7nops[]; +extern unsigned char intelnops[]; static unsigned char *intel_nops[ASM_NOP_MAX+1] = { NULL, intelnops, @@ -36,6 +50,13 @@ static unsigned char *intel_nops[ASM_NOP_MAX+1] = { intelnops + 1 + 2 + 3 + 4 + 5 + 6, intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7, }; +#endif + +#ifdef K8_NOP1 +asm("\t.data\nk8nops: " + K8_NOP1 K8_NOP2 K8_NOP3 K8_NOP4 K8_NOP5 K8_NOP6 + K8_NOP7 K8_NOP8); +extern unsigned char k8nops[]; static unsigned char *k8_nops[ASM_NOP_MAX+1] = { NULL, k8nops, @@ -47,6 +68,13 @@ static unsigned char *k8_nops[ASM_NOP_MAX+1] = { k8nops + 1 + 2 + 3 + 4 + 5 + 6, k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7, }; +#endif + +#ifdef K7_NOP1 +asm("\t.data\nk7nops: " + K7_NOP1 K7_NOP2 K7_NOP3 K7_NOP4 K7_NOP5 K7_NOP6 + K7_NOP7 K7_NOP8); +extern unsigned char k7nops[]; static unsigned char *k7_nops[ASM_NOP_MAX+1] = { NULL, k7nops, @@ -58,6 +86,18 @@ static unsigned char *k7_nops[ASM_NOP_MAX+1] = { k7nops + 1 + 2 + 3 + 4 + 5 + 6, k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7, }; +#endif + +#ifdef CONFIG_X86_64 + +extern char __vsyscall_0; +static inline unsigned char** find_nop_table(void) +{ + return k8_nops; +} + +#else /* CONFIG_X86_64 */ + static struct nop { int cpuid; unsigned char **noptable; @@ -67,14 +107,6 @@ static struct nop { { -1, NULL } }; - -extern struct alt_instr __alt_instructions[], __alt_instructions_end[]; -extern struct alt_instr __smp_alt_instructions[], __smp_alt_instructions_end[]; -extern u8 *__smp_locks[], *__smp_locks_end[]; - -extern u8 __smp_alt_begin[], __smp_alt_end[]; - - static unsigned char** find_nop_table(void) { unsigned char **noptable = intel_nops; @@ -89,6 +121,14 @@ static unsigned char** find_nop_table(void) return noptable; } +#endif /* CONFIG_X86_64 */ + +extern struct alt_instr __alt_instructions[], __alt_instructions_end[]; +extern struct alt_instr __smp_alt_instructions[], __smp_alt_instructions_end[]; +extern u8 *__smp_locks[], *__smp_locks_end[]; + +extern u8 __smp_alt_begin[], __smp_alt_end[]; + /* Replace instructions with better alternatives for this CPU type. This runs before SMP is initialized to avoid SMP problems with self modifying code. This implies that assymetric systems where @@ -99,6 +139,7 @@ void apply_alternatives(struct alt_instr *start, struct alt_instr *end) { unsigned char **noptable = find_nop_table(); struct alt_instr *a; + u8 *instr; int diff, i, k; DPRINTK("%s: alt table %p -> %p\n", __FUNCTION__, start, end); @@ -106,7 +147,16 @@ void apply_alternatives(struct alt_instr *start, struct alt_instr *end) BUG_ON(a->replacementlen > a->instrlen); if (!boot_cpu_has(a->cpuid)) continue; - memcpy(a->instr, a->replacement, a->replacementlen); + instr = a->instr; +#ifdef CONFIG_X86_64 + /* vsyscall code is not mapped yet. resolve it manually. */ + if (instr >= (u8 *)VSYSCALL_START && instr < (u8*)VSYSCALL_END) { + instr = __va(instr - (u8*)VSYSCALL_START + (u8*)__pa_symbol(&__vsyscall_0)); + DPRINTK("%s: vsyscall fixup: %p => %p\n", + __FUNCTION__, a->instr, instr); + } +#endif + memcpy(instr, a->replacement, a->replacementlen); diff = a->instrlen - a->replacementlen; /* Pad the rest with nops */ for (i = a->replacementlen; diff > 0; diff -= k, i += k) { @@ -186,14 +236,6 @@ struct smp_alt_module { static LIST_HEAD(smp_alt_modules); static DEFINE_SPINLOCK(smp_alt); -static int smp_alt_once = 0; -static int __init bootonly(char *str) -{ - smp_alt_once = 1; - return 1; -} -__setup("smp-alt-boot", bootonly); - void alternatives_smp_module_add(struct module *mod, char *name, void *locks, void *locks_end, void *text, void *text_end) @@ -201,6 +243,9 @@ void alternatives_smp_module_add(struct module *mod, char *name, struct smp_alt_module *smp; unsigned long flags; + if (no_replacement) + return; + if (smp_alt_once) { if (boot_cpu_has(X86_FEATURE_UP)) alternatives_smp_unlock(locks, locks_end, @@ -235,7 +280,7 @@ void alternatives_smp_module_del(struct module *mod) struct smp_alt_module *item; unsigned long flags; - if (smp_alt_once) + if (no_replacement || smp_alt_once) return; spin_lock_irqsave(&smp_alt, flags); @@ -256,7 +301,7 @@ void alternatives_smp_switch(int smp) struct smp_alt_module *mod; unsigned long flags; - if (smp_alt_once) + if (no_replacement || smp_alt_once) return; BUG_ON(!smp && (num_online_cpus() > 1)); @@ -285,6 +330,13 @@ void alternatives_smp_switch(int smp) void __init alternative_instructions(void) { + if (no_replacement) { + printk(KERN_INFO "(SMP-)alternatives turned off\n"); + free_init_pages("SMP alternatives", + (unsigned long)__smp_alt_begin, + (unsigned long)__smp_alt_end); + return; + } apply_alternatives(__alt_instructions, __alt_instructions_end); /* switch to patch-once-at-boottime-only mode and free the diff --git a/arch/x86_64/kernel/Makefile b/arch/x86_64/kernel/Makefile index 059c883..381bc6a 100644 --- a/arch/x86_64/kernel/Makefile +++ b/arch/x86_64/kernel/Makefile @@ -8,7 +8,7 @@ obj-y := process.o signal.o entry.o traps.o irq.o \ ptrace.o time.o ioport.o ldt.o setup.o i8259.o sys_x86_64.o \ x8664_ksyms.o i387.o syscall.o vsyscall.o \ setup64.o bootflag.o e820.o reboot.o quirks.o i8237.o \ - pci-dma.o pci-nommu.o + pci-dma.o pci-nommu.o alternative.o obj-$(CONFIG_X86_MCE) += mce.o obj-$(CONFIG_X86_MCE_INTEL) += mce_intel.o @@ -49,3 +49,5 @@ intel_cacheinfo-y += ../../i386/kernel/cpu/intel_cacheinfo.o quirks-y += ../../i386/kernel/quirks.o i8237-y += ../../i386/kernel/i8237.o msr-$(subst m,y,$(CONFIG_X86_MSR)) += ../../i386/kernel/msr.o +alternative-y += ../../i386/kernel/alternative.o + diff --git a/arch/x86_64/kernel/module.c b/arch/x86_64/kernel/module.c index bac195c..9d0958f 100644 --- a/arch/x86_64/kernel/module.c +++ b/arch/x86_64/kernel/module.c @@ -145,26 +145,38 @@ int apply_relocate(Elf_Shdr *sechdrs, return -ENOSYS; } -extern void apply_alternatives(void *start, void *end); - int module_finalize(const Elf_Ehdr *hdr, - const Elf_Shdr *sechdrs, - struct module *me) + const Elf_Shdr *sechdrs, + struct module *me) { - const Elf_Shdr *s; + const Elf_Shdr *s, *text = NULL, *alt = NULL, *locks = NULL; char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; - /* look for .altinstructions to patch */ - for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) { - void *seg; - if (strcmp(".altinstructions", secstrings + s->sh_name)) - continue; - seg = (void *)s->sh_addr; - apply_alternatives(seg, seg + s->sh_size); - } + for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) { + if (!strcmp(".text", secstrings + s->sh_name)) + text = s; + if (!strcmp(".altinstructions", secstrings + s->sh_name)) + alt = s; + if (!strcmp(".smp_locks", secstrings + s->sh_name)) + locks= s; + } + + if (alt) { + /* patch .altinstructions */ + void *aseg = (void *)alt->sh_addr; + apply_alternatives(aseg, aseg + alt->sh_size); + } + if (locks && text) { + void *lseg = (void *)locks->sh_addr; + void *tseg = (void *)text->sh_addr; + alternatives_smp_module_add(me, me->name, + lseg, lseg + locks->sh_size, + tseg, tseg + text->sh_size); + } return 0; } void module_arch_cleanup(struct module *mod) { + alternatives_smp_module_del(mod); } diff --git a/arch/x86_64/kernel/setup.c b/arch/x86_64/kernel/setup.c index 4b7e022..64640c8 100644 --- a/arch/x86_64/kernel/setup.c +++ b/arch/x86_64/kernel/setup.c @@ -473,80 +473,6 @@ contig_initmem_init(unsigned long start_pfn, unsigned long end_pfn) } #endif -/* Use inline assembly to define this because the nops are defined - as inline assembly strings in the include files and we cannot - get them easily into strings. */ -asm("\t.data\nk8nops: " - K8_NOP1 K8_NOP2 K8_NOP3 K8_NOP4 K8_NOP5 K8_NOP6 - K8_NOP7 K8_NOP8); - -extern unsigned char k8nops[]; -static unsigned char *k8_nops[ASM_NOP_MAX+1] = { - NULL, - k8nops, - k8nops + 1, - k8nops + 1 + 2, - k8nops + 1 + 2 + 3, - k8nops + 1 + 2 + 3 + 4, - k8nops + 1 + 2 + 3 + 4 + 5, - k8nops + 1 + 2 + 3 + 4 + 5 + 6, - k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7, -}; - -extern char __vsyscall_0; - -/* Replace instructions with better alternatives for this CPU type. - - This runs before SMP is initialized to avoid SMP problems with - self modifying code. This implies that assymetric systems where - APs have less capabilities than the boot processor are not handled. - In this case boot with "noreplacement". */ -void apply_alternatives(void *start, void *end) -{ - struct alt_instr *a; - int diff, i, k; - for (a = start; (void *)a < end; a++) { - u8 *instr; - - if (!boot_cpu_has(a->cpuid)) - continue; - - BUG_ON(a->replacementlen > a->instrlen); - instr = a->instr; - /* vsyscall code is not mapped yet. resolve it manually. */ - if (instr >= (u8 *)VSYSCALL_START && instr < (u8*)VSYSCALL_END) - instr = __va(instr - (u8*)VSYSCALL_START + (u8*)__pa_symbol(&__vsyscall_0)); - __inline_memcpy(instr, a->replacement, a->replacementlen); - diff = a->instrlen - a->replacementlen; - - /* Pad the rest with nops */ - for (i = a->replacementlen; diff > 0; diff -= k, i += k) { - k = diff; - if (k > ASM_NOP_MAX) - k = ASM_NOP_MAX; - __inline_memcpy(instr + i, k8_nops[k], k); - } - } -} - -static int no_replacement __initdata = 0; - -void __init alternative_instructions(void) -{ - extern struct alt_instr __alt_instructions[], __alt_instructions_end[]; - if (no_replacement) - return; - apply_alternatives(__alt_instructions, __alt_instructions_end); -} - -static int __init noreplacement_setup(char *s) -{ - no_replacement = 1; - return 1; -} - -__setup("noreplacement", noreplacement_setup); - #if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE) struct edd edd; #ifdef CONFIG_EDD_MODULE @@ -1303,7 +1229,7 @@ static int show_cpuinfo(struct seq_file *m, void *v) /* Other (Linux-defined) */ "cxmmx", NULL, "cyrix_arr", "centaur_mcr", NULL, "constant_tsc", NULL, NULL, - NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, + "up", NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, diff --git a/arch/x86_64/kernel/smpboot.c b/arch/x86_64/kernel/smpboot.c index 71a7222..06535e7 100644 --- a/arch/x86_64/kernel/smpboot.c +++ b/arch/x86_64/kernel/smpboot.c @@ -797,6 +797,8 @@ static int __cpuinit do_boot_cpu(int cpu, int apicid) } + alternatives_smp_switch(1); + c_idle.idle = get_idle_for_cpu(cpu); if (c_idle.idle) { @@ -1259,6 +1261,8 @@ void __cpu_die(unsigned int cpu) /* They ack this in play_dead by setting CPU_DEAD */ if (per_cpu(cpu_state, cpu) == CPU_DEAD) { printk ("CPU %d is now offline\n", cpu); + if (1 == num_online_cpus()) + alternatives_smp_switch(0); return; } msleep(100); diff --git a/arch/x86_64/kernel/vmlinux.lds.S b/arch/x86_64/kernel/vmlinux.lds.S index b81f473..5968c24 100644 --- a/arch/x86_64/kernel/vmlinux.lds.S +++ b/arch/x86_64/kernel/vmlinux.lds.S @@ -131,6 +131,26 @@ SECTIONS *(.data.page_aligned) } + /* might get freed after init */ + . = ALIGN(4096); + __smp_alt_begin = .; + __smp_alt_instructions = .; + .smp_altinstructions : AT(ADDR(.smp_altinstructions) - LOAD_OFFSET) { + *(.smp_altinstructions) + } + __smp_alt_instructions_end = .; + . = ALIGN(8); + __smp_locks = .; + .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) { + *(.smp_locks) + } + __smp_locks_end = .; + .smp_altinstr_replacement : AT(ADDR(.smp_altinstr_replacement) - LOAD_OFFSET) { + *(.smp_altinstr_replacement) + } + . = ALIGN(4096); + __smp_alt_end = .; + . = ALIGN(4096); /* Init code and data */ __init_begin = .; .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) { diff --git a/arch/x86_64/mm/init.c b/arch/x86_64/mm/init.c index 4ba34e9..a70a8e3 100644 --- a/arch/x86_64/mm/init.c +++ b/arch/x86_64/mm/init.c @@ -644,20 +644,29 @@ void __init mem_init(void) #endif } -void free_initmem(void) +void free_init_pages(char *what, unsigned long begin, unsigned long end) { unsigned long addr; - addr = (unsigned long)(&__init_begin); - for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) { + if (begin >= end) + return; + + printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10); + for (addr = begin; addr < end; addr += PAGE_SIZE) { ClearPageReserved(virt_to_page(addr)); init_page_count(virt_to_page(addr)); memset((void *)(addr & ~(PAGE_SIZE-1)), 0xcc, PAGE_SIZE); free_page(addr); totalram_pages++; } +} + +void free_initmem(void) +{ memset(__initdata_begin, 0xba, __initdata_end - __initdata_begin); - printk ("Freeing unused kernel memory: %luk freed\n", (__init_end - __init_begin) >> 10); + free_init_pages("unused kernel memory", + (unsigned long)(&__init_begin), + (unsigned long)(&__init_end)); } #ifdef CONFIG_DEBUG_RODATA @@ -686,15 +695,7 @@ void mark_rodata_ro(void) #ifdef CONFIG_BLK_DEV_INITRD void free_initrd_mem(unsigned long start, unsigned long end) { - if (start >= end) - return; - printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10); - for (; start < end; start += PAGE_SIZE) { - ClearPageReserved(virt_to_page(start)); - init_page_count(virt_to_page(start)); - free_page(start); - totalram_pages++; - } + free_init_pages("initrd memory", start, end); } #endif diff --git a/include/asm-i386/alternative.h b/include/asm-i386/alternative.h index d79e9ee..c61bd1a 100644 --- a/include/asm-i386/alternative.h +++ b/include/asm-i386/alternative.h @@ -5,6 +5,8 @@ #include +#include + struct alt_instr { u8 *instr; /* original instruction */ u8 *replacement; diff --git a/include/asm-x86_64/alternative.h b/include/asm-x86_64/alternative.h new file mode 100644 index 0000000..387c8f6 --- /dev/null +++ b/include/asm-x86_64/alternative.h @@ -0,0 +1,146 @@ +#ifndef _X86_64_ALTERNATIVE_H +#define _X86_64_ALTERNATIVE_H + +#ifdef __KERNEL__ + +#include + +struct alt_instr { + u8 *instr; /* original instruction */ + u8 *replacement; + u8 cpuid; /* cpuid bit set for replacement */ + u8 instrlen; /* length of original instruction */ + u8 replacementlen; /* length of new instruction, <= instrlen */ + u8 pad[5]; +}; + +extern void apply_alternatives(struct alt_instr *start, struct alt_instr *end); + +struct module; +extern void alternatives_smp_module_add(struct module *mod, char *name, + void *locks, void *locks_end, + void *text, void *text_end); +extern void alternatives_smp_module_del(struct module *mod); +extern void alternatives_smp_switch(int smp); + +#endif + +/* + * Alternative instructions for different CPU types or capabilities. + * + * This allows to use optimized instructions even on generic binary + * kernels. + * + * length of oldinstr must be longer or equal the length of newinstr + * It can be padded with nops as needed. + * + * For non barrier like inlines please define new variants + * without volatile and memory clobber. + */ +#define alternative(oldinstr, newinstr, feature) \ + asm volatile ("661:\n\t" oldinstr "\n662:\n" \ + ".section .altinstructions,\"a\"\n" \ + " .align 8\n" \ + " .quad 661b\n" /* label */ \ + " .quad 663f\n" /* new instruction */ \ + " .byte %c0\n" /* feature bit */ \ + " .byte 662b-661b\n" /* sourcelen */ \ + " .byte 664f-663f\n" /* replacementlen */ \ + ".previous\n" \ + ".section .altinstr_replacement,\"ax\"\n" \ + "663:\n\t" newinstr "\n664:\n" /* replacement */ \ + ".previous" :: "i" (feature) : "memory") + +/* + * Alternative inline assembly with input. + * + * Pecularities: + * No memory clobber here. + * Argument numbers start with 1. + * Best is to use constraints that are fixed size (like (%1) ... "r") + * If you use variable sized constraints like "m" or "g" in the + * replacement make sure to pad to the worst case length. + */ +#define alternative_input(oldinstr, newinstr, feature, input...) \ + asm volatile ("661:\n\t" oldinstr "\n662:\n" \ + ".section .altinstructions,\"a\"\n" \ + " .align 8\n" \ + " .quad 661b\n" /* label */ \ + " .quad 663f\n" /* new instruction */ \ + " .byte %c0\n" /* feature bit */ \ + " .byte 662b-661b\n" /* sourcelen */ \ + " .byte 664f-663f\n" /* replacementlen */ \ + ".previous\n" \ + ".section .altinstr_replacement,\"ax\"\n" \ + "663:\n\t" newinstr "\n664:\n" /* replacement */ \ + ".previous" :: "i" (feature), ##input) + +/* Like alternative_input, but with a single output argument */ +#define alternative_io(oldinstr, newinstr, feature, output, input...) \ + asm volatile ("661:\n\t" oldinstr "\n662:\n" \ + ".section .altinstructions,\"a\"\n" \ + " .align 8\n" \ + " .quad 661b\n" /* label */ \ + " .quad 663f\n" /* new instruction */ \ + " .byte %c[feat]\n" /* feature bit */ \ + " .byte 662b-661b\n" /* sourcelen */ \ + " .byte 664f-663f\n" /* replacementlen */ \ + ".previous\n" \ + ".section .altinstr_replacement,\"ax\"\n" \ + "663:\n\t" newinstr "\n664:\n" /* replacement */ \ + ".previous" : output : [feat] "i" (feature), ##input) + +/* + * Alternative inline assembly for SMP. + * + * alternative_smp() takes two versions (SMP first, UP second) and is + * for more complex stuff such as spinlocks. + * + * The LOCK_PREFIX macro defined here replaces the LOCK and + * LOCK_PREFIX macros used everywhere in the source tree. + * + * SMP alternatives use the same data structures as the other + * alternatives and the X86_FEATURE_UP flag to indicate the case of a + * UP system running a SMP kernel. The existing apply_alternatives() + * works fine for patching a SMP kernel for UP. + * + * The SMP alternative tables can be kept after boot and contain both + * UP and SMP versions of the instructions to allow switching back to + * SMP at runtime, when hotplugging in a new CPU, which is especially + * useful in virtualized environments. + * + * The very common lock prefix is handled as special case in a + * separate table which is a pure address list without replacement ptr + * and size information. That keeps the table sizes small. + */ + +#ifdef CONFIG_SMP +#define alternative_smp(smpinstr, upinstr, args...) \ + asm volatile ("661:\n\t" smpinstr "\n662:\n" \ + ".section .smp_altinstructions,\"a\"\n" \ + " .align 8\n" \ + " .quad 661b\n" /* label */ \ + " .quad 663f\n" /* new instruction */ \ + " .byte 0x66\n" /* X86_FEATURE_UP */ \ + " .byte 662b-661b\n" /* sourcelen */ \ + " .byte 664f-663f\n" /* replacementlen */ \ + ".previous\n" \ + ".section .smp_altinstr_replacement,\"awx\"\n" \ + "663:\n\t" upinstr "\n" /* replacement */ \ + "664:\n\t.fill 662b-661b,1,0x42\n" /* space for original */ \ + ".previous" : args) + +#define LOCK_PREFIX \ + ".section .smp_locks,\"a\"\n" \ + " .align 8\n" \ + " .quad 661f\n" /* address */ \ + ".previous\n" \ + "661:\n\tlock; " + +#else /* ! CONFIG_SMP */ +#define alternative_smp(smpinstr, upinstr, args...) \ + asm volatile (upinstr : args) +#define LOCK_PREFIX "" +#endif + +#endif /* _X86_64_ALTERNATIVE_H */ diff --git a/include/asm-x86_64/atomic.h b/include/asm-x86_64/atomic.h index bd3fa67..007e88d 100644 --- a/include/asm-x86_64/atomic.h +++ b/include/asm-x86_64/atomic.h @@ -1,7 +1,7 @@ #ifndef __ARCH_X86_64_ATOMIC__ #define __ARCH_X86_64_ATOMIC__ -#include +#include /* atomic_t should be 32 bit signed type */ @@ -52,7 +52,7 @@ typedef struct { volatile int counter; } atomic_t; static __inline__ void atomic_add(int i, atomic_t *v) { __asm__ __volatile__( - LOCK "addl %1,%0" + LOCK_PREFIX "addl %1,%0" :"=m" (v->counter) :"ir" (i), "m" (v->counter)); } @@ -67,7 +67,7 @@ static __inline__ void atomic_add(int i, atomic_t *v) static __inline__ void atomic_sub(int i, atomic_t *v) { __asm__ __volatile__( - LOCK "subl %1,%0" + LOCK_PREFIX "subl %1,%0" :"=m" (v->counter) :"ir" (i), "m" (v->counter)); } @@ -86,7 +86,7 @@ static __inline__ int atomic_sub_and_test(int i, atomic_t *v) unsigned char c; __asm__ __volatile__( - LOCK "subl %2,%0; sete %1" + LOCK_PREFIX "subl %2,%0; sete %1" :"=m" (v->counter), "=qm" (c) :"ir" (i), "m" (v->counter) : "memory"); return c; @@ -101,7 +101,7 @@ static __inline__ int atomic_sub_and_test(int i, atomic_t *v) static __inline__ void atomic_inc(atomic_t *v) { __asm__ __volatile__( - LOCK "incl %0" + LOCK_PREFIX "incl %0" :"=m" (v->counter) :"m" (v->counter)); } @@ -115,7 +115,7 @@ static __inline__ void atomic_inc(atomic_t *v) static __inline__ void atomic_dec(atomic_t *v) { __asm__ __volatile__( - LOCK "decl %0" + LOCK_PREFIX "decl %0" :"=m" (v->counter) :"m" (v->counter)); } @@ -133,7 +133,7 @@ static __inline__ int atomic_dec_and_test(atomic_t *v) unsigned char c; __asm__ __volatile__( - LOCK "decl %0; sete %1" + LOCK_PREFIX "decl %0; sete %1" :"=m" (v->counter), "=qm" (c) :"m" (v->counter) : "memory"); return c != 0; @@ -152,7 +152,7 @@ static __inline__ int atomic_inc_and_test(atomic_t *v) unsigned char c; __asm__ __volatile__( - LOCK "incl %0; sete %1" + LOCK_PREFIX "incl %0; sete %1" :"=m" (v->counter), "=qm" (c) :"m" (v->counter) : "memory"); return c != 0; @@ -172,7 +172,7 @@ static __inline__ int atomic_add_negative(int i, atomic_t *v) unsigned char c; __asm__ __volatile__( - LOCK "addl %2,%0; sets %1" + LOCK_PREFIX "addl %2,%0; sets %1" :"=m" (v->counter), "=qm" (c) :"ir" (i), "m" (v->counter) : "memory"); return c; @@ -189,7 +189,7 @@ static __inline__ int atomic_add_return(int i, atomic_t *v) { int __i = i; __asm__ __volatile__( - LOCK "xaddl %0, %1;" + LOCK_PREFIX "xaddl %0, %1;" :"=r"(i) :"m"(v->counter), "0"(i)); return i + __i; @@ -237,7 +237,7 @@ typedef struct { volatile long counter; } atomic64_t; static __inline__ void atomic64_add(long i, atomic64_t *v) { __asm__ __volatile__( - LOCK "addq %1,%0" + LOCK_PREFIX "addq %1,%0" :"=m" (v->counter) :"ir" (i), "m" (v->counter)); } @@ -252,7 +252,7 @@ static __inline__ void atomic64_add(long i, atomic64_t *v) static __inline__ void atomic64_sub(long i, atomic64_t *v) { __asm__ __volatile__( - LOCK "subq %1,%0" + LOCK_PREFIX "subq %1,%0" :"=m" (v->counter) :"ir" (i), "m" (v->counter)); } @@ -271,7 +271,7 @@ static __inline__ int atomic64_sub_and_test(long i, atomic64_t *v) unsigned char c; __asm__ __volatile__( - LOCK "subq %2,%0; sete %1" + LOCK_PREFIX "subq %2,%0; sete %1" :"=m" (v->counter), "=qm" (c) :"ir" (i), "m" (v->counter) : "memory"); return c; @@ -286,7 +286,7 @@ static __inline__ int atomic64_sub_and_test(long i, atomic64_t *v) static __inline__ void atomic64_inc(atomic64_t *v) { __asm__ __volatile__( - LOCK "incq %0" + LOCK_PREFIX "incq %0" :"=m" (v->counter) :"m" (v->counter)); } @@ -300,7 +300,7 @@ static __inline__ void atomic64_inc(atomic64_t *v) static __inline__ void atomic64_dec(atomic64_t *v) { __asm__ __volatile__( - LOCK "decq %0" + LOCK_PREFIX "decq %0" :"=m" (v->counter) :"m" (v->counter)); } @@ -318,7 +318,7 @@ static __inline__ int atomic64_dec_and_test(atomic64_t *v) unsigned char c; __asm__ __volatile__( - LOCK "decq %0; sete %1" + LOCK_PREFIX "decq %0; sete %1" :"=m" (v->counter), "=qm" (c) :"m" (v->counter) : "memory"); return c != 0; @@ -337,7 +337,7 @@ static __inline__ int atomic64_inc_and_test(atomic64_t *v) unsigned char c; __asm__ __volatile__( - LOCK "incq %0; sete %1" + LOCK_PREFIX "incq %0; sete %1" :"=m" (v->counter), "=qm" (c) :"m" (v->counter) : "memory"); return c != 0; @@ -357,7 +357,7 @@ static __inline__ int atomic64_add_negative(long i, atomic64_t *v) unsigned char c; __asm__ __volatile__( - LOCK "addq %2,%0; sets %1" + LOCK_PREFIX "addq %2,%0; sets %1" :"=m" (v->counter), "=qm" (c) :"ir" (i), "m" (v->counter) : "memory"); return c; @@ -374,7 +374,7 @@ static __inline__ long atomic64_add_return(long i, atomic64_t *v) { long __i = i; __asm__ __volatile__( - LOCK "xaddq %0, %1;" + LOCK_PREFIX "xaddq %0, %1;" :"=r"(i) :"m"(v->counter), "0"(i)); return i + __i; @@ -418,11 +418,11 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t *v) /* These are x86-specific, used by some header files */ #define atomic_clear_mask(mask, addr) \ -__asm__ __volatile__(LOCK "andl %0,%1" \ +__asm__ __volatile__(LOCK_PREFIX "andl %0,%1" \ : : "r" (~(mask)),"m" (*addr) : "memory") #define atomic_set_mask(mask, addr) \ -__asm__ __volatile__(LOCK "orl %0,%1" \ +__asm__ __volatile__(LOCK_PREFIX "orl %0,%1" \ : : "r" ((unsigned)mask),"m" (*(addr)) : "memory") /* Atomic operations are already serializing on x86 */ diff --git a/include/asm-x86_64/bitops.h b/include/asm-x86_64/bitops.h index e9bf933..f7ba57b 100644 --- a/include/asm-x86_64/bitops.h +++ b/include/asm-x86_64/bitops.h @@ -5,12 +5,7 @@ * Copyright 1992, Linus Torvalds. */ - -#ifdef CONFIG_SMP -#define LOCK_PREFIX "lock ; " -#else -#define LOCK_PREFIX "" -#endif +#include #define ADDR (*(volatile long *) addr) diff --git a/include/asm-x86_64/cpufeature.h b/include/asm-x86_64/cpufeature.h index 662964b..afc44e5 100644 --- a/include/asm-x86_64/cpufeature.h +++ b/include/asm-x86_64/cpufeature.h @@ -65,6 +65,8 @@ #define X86_FEATURE_CONSTANT_TSC (3*32+5) /* TSC runs at constant rate */ #define X86_FEATURE_SYNC_RDTSC (3*32+6) /* RDTSC syncs CPU core */ #define X86_FEATURE_FXSAVE_LEAK (3*32+7) /* FIP/FOP/FDP leaks through FXSAVE */ +#define X86_FEATURE_UP (3*32+8) /* SMP kernel running on UP */ + /* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */ #define X86_FEATURE_XMM3 (4*32+ 0) /* Streaming SIMD Extensions-3 */ diff --git a/include/asm-x86_64/mutex.h b/include/asm-x86_64/mutex.h index 11fbee2..06fab6d 100644 --- a/include/asm-x86_64/mutex.h +++ b/include/asm-x86_64/mutex.h @@ -24,7 +24,7 @@ do { \ typecheck_fn(fastcall void (*)(atomic_t *), fail_fn); \ \ __asm__ __volatile__( \ - LOCK " decl (%%rdi) \n" \ + LOCK_PREFIX " decl (%%rdi) \n" \ " js 2f \n" \ "1: \n" \ \ @@ -74,7 +74,7 @@ do { \ typecheck_fn(fastcall void (*)(atomic_t *), fail_fn); \ \ __asm__ __volatile__( \ - LOCK " incl (%%rdi) \n" \ + LOCK_PREFIX " incl (%%rdi) \n" \ " jle 2f \n" \ "1: \n" \ \ diff --git a/include/asm-x86_64/rwlock.h b/include/asm-x86_64/rwlock.h index 9942cc3..dea0e94 100644 --- a/include/asm-x86_64/rwlock.h +++ b/include/asm-x86_64/rwlock.h @@ -24,7 +24,7 @@ #define RW_LOCK_BIAS_STR "0x01000000" #define __build_read_lock_ptr(rw, helper) \ - asm volatile(LOCK "subl $1,(%0)\n\t" \ + asm volatile(LOCK_PREFIX "subl $1,(%0)\n\t" \ "js 2f\n" \ "1:\n" \ LOCK_SECTION_START("") \ @@ -34,7 +34,7 @@ ::"a" (rw) : "memory") #define __build_read_lock_const(rw, helper) \ - asm volatile(LOCK "subl $1,%0\n\t" \ + asm volatile(LOCK_PREFIX "subl $1,%0\n\t" \ "js 2f\n" \ "1:\n" \ LOCK_SECTION_START("") \ @@ -54,7 +54,7 @@ } while (0) #define __build_write_lock_ptr(rw, helper) \ - asm volatile(LOCK "subl $" RW_LOCK_BIAS_STR ",(%0)\n\t" \ + asm volatile(LOCK_PREFIX "subl $" RW_LOCK_BIAS_STR ",(%0)\n\t" \ "jnz 2f\n" \ "1:\n" \ LOCK_SECTION_START("") \ @@ -64,7 +64,7 @@ ::"a" (rw) : "memory") #define __build_write_lock_const(rw, helper) \ - asm volatile(LOCK "subl $" RW_LOCK_BIAS_STR ",%0\n\t" \ + asm volatile(LOCK_PREFIX "subl $" RW_LOCK_BIAS_STR ",%0\n\t" \ "jnz 2f\n" \ "1:\n" \ LOCK_SECTION_START("") \ diff --git a/include/asm-x86_64/semaphore.h b/include/asm-x86_64/semaphore.h index a389aa6..064df08 100644 --- a/include/asm-x86_64/semaphore.h +++ b/include/asm-x86_64/semaphore.h @@ -106,7 +106,7 @@ static inline void down(struct semaphore * sem) __asm__ __volatile__( "# atomic down operation\n\t" - LOCK "decl %0\n\t" /* --sem->count */ + LOCK_PREFIX "decl %0\n\t" /* --sem->count */ "js 2f\n" "1:\n" LOCK_SECTION_START("") @@ -130,7 +130,7 @@ static inline int down_interruptible(struct semaphore * sem) __asm__ __volatile__( "# atomic interruptible down operation\n\t" - LOCK "decl %1\n\t" /* --sem->count */ + LOCK_PREFIX "decl %1\n\t" /* --sem->count */ "js 2f\n\t" "xorl %0,%0\n" "1:\n" @@ -154,7 +154,7 @@ static inline int down_trylock(struct semaphore * sem) __asm__ __volatile__( "# atomic interruptible down operation\n\t" - LOCK "decl %1\n\t" /* --sem->count */ + LOCK_PREFIX "decl %1\n\t" /* --sem->count */ "js 2f\n\t" "xorl %0,%0\n" "1:\n" @@ -178,7 +178,7 @@ static inline void up(struct semaphore * sem) { __asm__ __volatile__( "# atomic up operation\n\t" - LOCK "incl %0\n\t" /* ++sem->count */ + LOCK_PREFIX "incl %0\n\t" /* ++sem->count */ "jle 2f\n" "1:\n" LOCK_SECTION_START("") diff --git a/include/asm-x86_64/spinlock.h b/include/asm-x86_64/spinlock.h index 5d8a5e3..8d34219 100644 --- a/include/asm-x86_64/spinlock.h +++ b/include/asm-x86_64/spinlock.h @@ -31,15 +31,19 @@ "jmp 1b\n" \ LOCK_SECTION_END +#define __raw_spin_lock_string_up \ + "\n\tdecl %0" + #define __raw_spin_unlock_string \ "movl $1,%0" \ :"=m" (lock->slock) : : "memory" static inline void __raw_spin_lock(raw_spinlock_t *lock) { - __asm__ __volatile__( - __raw_spin_lock_string - :"=m" (lock->slock) : : "memory"); + alternative_smp( + __raw_spin_lock_string, + __raw_spin_lock_string_up, + "=m" (lock->slock) : : "memory"); } #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) diff --git a/include/asm-x86_64/system.h b/include/asm-x86_64/system.h index f48e0da..68e559f 100644 --- a/include/asm-x86_64/system.h +++ b/include/asm-x86_64/system.h @@ -3,15 +3,10 @@ #include #include +#include #ifdef __KERNEL__ -#ifdef CONFIG_SMP -#define LOCK_PREFIX "lock ; " -#else -#define LOCK_PREFIX "" -#endif - #define __STR(x) #x #define STR(x) __STR(x) @@ -34,7 +29,7 @@ "thread_return:\n\t" \ "movq %%gs:%P[pda_pcurrent],%%rsi\n\t" \ "movq %P[thread_info](%%rsi),%%r8\n\t" \ - LOCK "btr %[tif_fork],%P[ti_flags](%%r8)\n\t" \ + LOCK_PREFIX "btr %[tif_fork],%P[ti_flags](%%r8)\n\t" \ "movq %%rax,%%rdi\n\t" \ "jc ret_from_fork\n\t" \ RESTORE_CONTEXT \ @@ -69,82 +64,6 @@ extern void load_gs_index(unsigned); ".previous" \ : :"r" (value), "r" (0)) -#ifdef __KERNEL__ -struct alt_instr { - __u8 *instr; /* original instruction */ - __u8 *replacement; - __u8 cpuid; /* cpuid bit set for replacement */ - __u8 instrlen; /* length of original instruction */ - __u8 replacementlen; /* length of new instruction, <= instrlen */ - __u8 pad[5]; -}; -#endif - -/* - * Alternative instructions for different CPU types or capabilities. - * - * This allows to use optimized instructions even on generic binary - * kernels. - * - * length of oldinstr must be longer or equal the length of newinstr - * It can be padded with nops as needed. - * - * For non barrier like inlines please define new variants - * without volatile and memory clobber. - */ -#define alternative(oldinstr, newinstr, feature) \ - asm volatile ("661:\n\t" oldinstr "\n662:\n" \ - ".section .altinstructions,\"a\"\n" \ - " .align 8\n" \ - " .quad 661b\n" /* label */ \ - " .quad 663f\n" /* new instruction */ \ - " .byte %c0\n" /* feature bit */ \ - " .byte 662b-661b\n" /* sourcelen */ \ - " .byte 664f-663f\n" /* replacementlen */ \ - ".previous\n" \ - ".section .altinstr_replacement,\"ax\"\n" \ - "663:\n\t" newinstr "\n664:\n" /* replacement */ \ - ".previous" :: "i" (feature) : "memory") - -/* - * Alternative inline assembly with input. - * - * Peculiarities: - * No memory clobber here. - * Argument numbers start with 1. - * Best is to use constraints that are fixed size (like (%1) ... "r") - * If you use variable sized constraints like "m" or "g" in the - * replacement make sure to pad to the worst case length. - */ -#define alternative_input(oldinstr, newinstr, feature, input...) \ - asm volatile ("661:\n\t" oldinstr "\n662:\n" \ - ".section .altinstructions,\"a\"\n" \ - " .align 8\n" \ - " .quad 661b\n" /* label */ \ - " .quad 663f\n" /* new instruction */ \ - " .byte %c0\n" /* feature bit */ \ - " .byte 662b-661b\n" /* sourcelen */ \ - " .byte 664f-663f\n" /* replacementlen */ \ - ".previous\n" \ - ".section .altinstr_replacement,\"ax\"\n" \ - "663:\n\t" newinstr "\n664:\n" /* replacement */ \ - ".previous" :: "i" (feature), ##input) - -/* Like alternative_input, but with a single output argument */ -#define alternative_io(oldinstr, newinstr, feature, output, input...) \ - asm volatile ("661:\n\t" oldinstr "\n662:\n" \ - ".section .altinstructions,\"a\"\n" \ - " .align 8\n" \ - " .quad 661b\n" /* label */ \ - " .quad 663f\n" /* new instruction */ \ - " .byte %c[feat]\n" /* feature bit */ \ - " .byte 662b-661b\n" /* sourcelen */ \ - " .byte 664f-663f\n" /* replacementlen */ \ - ".previous\n" \ - ".section .altinstr_replacement,\"ax\"\n" \ - "663:\n\t" newinstr "\n664:\n" /* replacement */ \ - ".previous" : output : [feat] "i" (feature), ##input) - /* * Clear and set 'TS' bit respectively */ @@ -366,5 +285,6 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, void cpu_idle_wait(void); extern unsigned long arch_align_stack(unsigned long sp); +extern void free_init_pages(char *what, unsigned long begin, unsigned long end); #endif -- cgit v1.1 From 9f2036f3e280ef1df072c5b2ca115a0e766c1d10 Mon Sep 17 00:00:00 2001 From: Jon Mason Date: Mon, 26 Jun 2006 13:56:19 +0200 Subject: [PATCH] x86_64: pci-dma.c clean-up - trivial Replace hard coded DMA masks with #defines from include/linux/dma-mapping.h Signed-off-by: Jon Mason Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- arch/x86_64/kernel/pci-dma.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/arch/x86_64/kernel/pci-dma.c b/arch/x86_64/kernel/pci-dma.c index a9275c95..117bceb 100644 --- a/arch/x86_64/kernel/pci-dma.c +++ b/arch/x86_64/kernel/pci-dma.c @@ -38,7 +38,7 @@ int force_iommu __read_mostly= 0; to i386. */ struct device fallback_dev = { .bus_id = "fallback device", - .coherent_dma_mask = 0xffffffff, + .coherent_dma_mask = DMA_32BIT_MASK, .dma_mask = &fallback_dev.coherent_dma_mask, }; @@ -77,7 +77,7 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, dev = &fallback_dev; dma_mask = dev->coherent_dma_mask; if (dma_mask == 0) - dma_mask = 0xffffffff; + dma_mask = DMA_32BIT_MASK; /* Don't invoke OOM killer */ gfp |= __GFP_NORETRY; @@ -90,7 +90,7 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, larger than 16MB and in this case we have a chance of finding fitting memory in the next higher zone first. If not retry with true GFP_DMA. -AK */ - if (dma_mask <= 0xffffffff) + if (dma_mask <= DMA_32BIT_MASK) gfp |= GFP_DMA32; again: @@ -111,7 +111,7 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, /* Don't use the 16MB ZONE_DMA unless absolutely needed. It's better to use remapping first. */ - if (dma_mask < 0xffffffff && !(gfp & GFP_DMA)) { + if (dma_mask < DMA_32BIT_MASK && !(gfp & GFP_DMA)) { gfp = (gfp & ~GFP_DMA32) | GFP_DMA; goto again; } @@ -174,7 +174,7 @@ int dma_supported(struct device *dev, u64 mask) /* Copied from i386. Doesn't make much sense, because it will only work for pci_alloc_coherent. The caller just has to use GFP_DMA in this case. */ - if (mask < 0x00ffffff) + if (mask < DMA_24BIT_MASK) return 0; /* Tell the device to use SAC when IOMMU force is on. This @@ -189,7 +189,7 @@ int dma_supported(struct device *dev, u64 mask) SAC for these. Assume all masks <= 40 bits are of this type. Normally this doesn't make any difference, but gives more gentle handling of IOMMU overflow. */ - if (iommu_sac_force && (mask >= 0xffffffffffULL)) { + if (iommu_sac_force && (mask >= DMA_40BIT_MASK)) { printk(KERN_INFO "%s: Force SAC with mask %Lx\n", dev->bus_id,mask); return 0; } -- cgit v1.1 From 0c90bb87730613709e65c03c86d614e31a675d4f Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Mon, 26 Jun 2006 13:56:22 +0200 Subject: [PATCH] x86_64: Remove most of ia32_unistd.h It's only needed for three system calls, no need to maintain a full list forever. Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- include/asm-x86_64/ia32_unistd.h | 308 +-------------------------------------- 1 file changed, 3 insertions(+), 305 deletions(-) diff --git a/include/asm-x86_64/ia32_unistd.h b/include/asm-x86_64/ia32_unistd.h index b4f4b17..5b52ce5 100644 --- a/include/asm-x86_64/ia32_unistd.h +++ b/include/asm-x86_64/ia32_unistd.h @@ -4,317 +4,15 @@ /* * This file contains the system call numbers of the ia32 port, * this is for the kernel only. + * Only add syscalls here where some part of the kernel needs to know + * the number. This should be otherwise in sync with asm-i386/unistd.h. -AK */ #define __NR_ia32_restart_syscall 0 #define __NR_ia32_exit 1 -#define __NR_ia32_fork 2 #define __NR_ia32_read 3 #define __NR_ia32_write 4 -#define __NR_ia32_open 5 -#define __NR_ia32_close 6 -#define __NR_ia32_waitpid 7 -#define __NR_ia32_creat 8 -#define __NR_ia32_link 9 -#define __NR_ia32_unlink 10 -#define __NR_ia32_execve 11 -#define __NR_ia32_chdir 12 -#define __NR_ia32_time 13 -#define __NR_ia32_mknod 14 -#define __NR_ia32_chmod 15 -#define __NR_ia32_lchown 16 -#define __NR_ia32_break 17 -#define __NR_ia32_oldstat 18 -#define __NR_ia32_lseek 19 -#define __NR_ia32_getpid 20 -#define __NR_ia32_mount 21 -#define __NR_ia32_umount 22 -#define __NR_ia32_setuid 23 -#define __NR_ia32_getuid 24 -#define __NR_ia32_stime 25 -#define __NR_ia32_ptrace 26 -#define __NR_ia32_alarm 27 -#define __NR_ia32_oldfstat 28 -#define __NR_ia32_pause 29 -#define __NR_ia32_utime 30 -#define __NR_ia32_stty 31 -#define __NR_ia32_gtty 32 -#define __NR_ia32_access 33 -#define __NR_ia32_nice 34 -#define __NR_ia32_ftime 35 -#define __NR_ia32_sync 36 -#define __NR_ia32_kill 37 -#define __NR_ia32_rename 38 -#define __NR_ia32_mkdir 39 -#define __NR_ia32_rmdir 40 -#define __NR_ia32_dup 41 -#define __NR_ia32_pipe 42 -#define __NR_ia32_times 43 -#define __NR_ia32_prof 44 -#define __NR_ia32_brk 45 -#define __NR_ia32_setgid 46 -#define __NR_ia32_getgid 47 -#define __NR_ia32_signal 48 -#define __NR_ia32_geteuid 49 -#define __NR_ia32_getegid 50 -#define __NR_ia32_acct 51 -#define __NR_ia32_umount2 52 -#define __NR_ia32_lock 53 -#define __NR_ia32_ioctl 54 -#define __NR_ia32_fcntl 55 -#define __NR_ia32_mpx 56 -#define __NR_ia32_setpgid 57 -#define __NR_ia32_ulimit 58 -#define __NR_ia32_oldolduname 59 -#define __NR_ia32_umask 60 -#define __NR_ia32_chroot 61 -#define __NR_ia32_ustat 62 -#define __NR_ia32_dup2 63 -#define __NR_ia32_getppid 64 -#define __NR_ia32_getpgrp 65 -#define __NR_ia32_setsid 66 -#define __NR_ia32_sigaction 67 -#define __NR_ia32_sgetmask 68 -#define __NR_ia32_ssetmask 69 -#define __NR_ia32_setreuid 70 -#define __NR_ia32_setregid 71 -#define __NR_ia32_sigsuspend 72 -#define __NR_ia32_sigpending 73 -#define __NR_ia32_sethostname 74 -#define __NR_ia32_setrlimit 75 -#define __NR_ia32_getrlimit 76 /* Back compatible 2Gig limited rlimit */ -#define __NR_ia32_getrusage 77 -#define __NR_ia32_gettimeofday 78 -#define __NR_ia32_settimeofday 79 -#define __NR_ia32_getgroups 80 -#define __NR_ia32_setgroups 81 -#define __NR_ia32_select 82 -#define __NR_ia32_symlink 83 -#define __NR_ia32_oldlstat 84 -#define __NR_ia32_readlink 85 -#define __NR_ia32_uselib 86 -#define __NR_ia32_swapon 87 -#define __NR_ia32_reboot 88 -#define __NR_ia32_readdir 89 -#define __NR_ia32_mmap 90 -#define __NR_ia32_munmap 91 -#define __NR_ia32_truncate 92 -#define __NR_ia32_ftruncate 93 -#define __NR_ia32_fchmod 94 -#define __NR_ia32_fchown 95 -#define __NR_ia32_getpriority 96 -#define __NR_ia32_setpriority 97 -#define __NR_ia32_profil 98 -#define __NR_ia32_statfs 99 -#define __NR_ia32_fstatfs 100 -#define __NR_ia32_ioperm 101 -#define __NR_ia32_socketcall 102 -#define __NR_ia32_syslog 103 -#define __NR_ia32_setitimer 104 -#define __NR_ia32_getitimer 105 -#define __NR_ia32_stat 106 -#define __NR_ia32_lstat 107 -#define __NR_ia32_fstat 108 -#define __NR_ia32_olduname 109 -#define __NR_ia32_iopl 110 -#define __NR_ia32_vhangup 111 -#define __NR_ia32_idle 112 -#define __NR_ia32_vm86old 113 -#define __NR_ia32_wait4 114 -#define __NR_ia32_swapoff 115 -#define __NR_ia32_sysinfo 116 -#define __NR_ia32_ipc 117 -#define __NR_ia32_fsync 118 -#define __NR_ia32_sigreturn 119 -#define __NR_ia32_clone 120 -#define __NR_ia32_setdomainname 121 -#define __NR_ia32_uname 122 -#define __NR_ia32_modify_ldt 123 -#define __NR_ia32_adjtimex 124 -#define __NR_ia32_mprotect 125 -#define __NR_ia32_sigprocmask 126 -#define __NR_ia32_create_module 127 -#define __NR_ia32_init_module 128 -#define __NR_ia32_delete_module 129 -#define __NR_ia32_get_kernel_syms 130 -#define __NR_ia32_quotactl 131 -#define __NR_ia32_getpgid 132 -#define __NR_ia32_fchdir 133 -#define __NR_ia32_bdflush 134 -#define __NR_ia32_sysfs 135 -#define __NR_ia32_personality 136 -#define __NR_ia32_afs_syscall 137 /* Syscall for Andrew File System */ -#define __NR_ia32_setfsuid 138 -#define __NR_ia32_setfsgid 139 -#define __NR_ia32__llseek 140 -#define __NR_ia32_getdents 141 -#define __NR_ia32__newselect 142 -#define __NR_ia32_flock 143 -#define __NR_ia32_msync 144 -#define __NR_ia32_readv 145 -#define __NR_ia32_writev 146 -#define __NR_ia32_getsid 147 -#define __NR_ia32_fdatasync 148 -#define __NR_ia32__sysctl 149 -#define __NR_ia32_mlock 150 -#define __NR_ia32_munlock 151 -#define __NR_ia32_mlockall 152 -#define __NR_ia32_munlockall 153 -#define __NR_ia32_sched_setparam 154 -#define __NR_ia32_sched_getparam 155 -#define __NR_ia32_sched_setscheduler 156 -#define __NR_ia32_sched_getscheduler 157 -#define __NR_ia32_sched_yield 158 -#define __NR_ia32_sched_get_priority_max 159 -#define __NR_ia32_sched_get_priority_min 160 -#define __NR_ia32_sched_rr_get_interval 161 -#define __NR_ia32_nanosleep 162 -#define __NR_ia32_mremap 163 -#define __NR_ia32_setresuid 164 -#define __NR_ia32_getresuid 165 -#define __NR_ia32_vm86 166 -#define __NR_ia32_query_module 167 -#define __NR_ia32_poll 168 -#define __NR_ia32_nfsservctl 169 -#define __NR_ia32_setresgid 170 -#define __NR_ia32_getresgid 171 -#define __NR_ia32_prctl 172 +#define __NR_ia32_sigreturn 119 #define __NR_ia32_rt_sigreturn 173 -#define __NR_ia32_rt_sigaction 174 -#define __NR_ia32_rt_sigprocmask 175 -#define __NR_ia32_rt_sigpending 176 -#define __NR_ia32_rt_sigtimedwait 177 -#define __NR_ia32_rt_sigqueueinfo 178 -#define __NR_ia32_rt_sigsuspend 179 -#define __NR_ia32_pread 180 -#define __NR_ia32_pwrite 181 -#define __NR_ia32_chown 182 -#define __NR_ia32_getcwd 183 -#define __NR_ia32_capget 184 -#define __NR_ia32_capset 185 -#define __NR_ia32_sigaltstack 186 -#define __NR_ia32_sendfile 187 -#define __NR_ia32_getpmsg 188 /* some people actually want streams */ -#define __NR_ia32_putpmsg 189 /* some people actually want streams */ -#define __NR_ia32_vfork 190 -#define __NR_ia32_ugetrlimit 191 /* SuS compliant getrlimit */ -#define __NR_ia32_mmap2 192 -#define __NR_ia32_truncate64 193 -#define __NR_ia32_ftruncate64 194 -#define __NR_ia32_stat64 195 -#define __NR_ia32_lstat64 196 -#define __NR_ia32_fstat64 197 -#define __NR_ia32_lchown32 198 -#define __NR_ia32_getuid32 199 -#define __NR_ia32_getgid32 200 -#define __NR_ia32_geteuid32 201 -#define __NR_ia32_getegid32 202 -#define __NR_ia32_setreuid32 203 -#define __NR_ia32_setregid32 204 -#define __NR_ia32_getgroups32 205 -#define __NR_ia32_setgroups32 206 -#define __NR_ia32_fchown32 207 -#define __NR_ia32_setresuid32 208 -#define __NR_ia32_getresuid32 209 -#define __NR_ia32_setresgid32 210 -#define __NR_ia32_getresgid32 211 -#define __NR_ia32_chown32 212 -#define __NR_ia32_setuid32 213 -#define __NR_ia32_setgid32 214 -#define __NR_ia32_setfsuid32 215 -#define __NR_ia32_setfsgid32 216 -#define __NR_ia32_pivot_root 217 -#define __NR_ia32_mincore 218 -#define __NR_ia32_madvise 219 -#define __NR_ia32_madvise1 219 /* delete when C lib stub is removed */ -#define __NR_ia32_getdents64 220 -#define __NR_ia32_fcntl64 221 -#define __NR_ia32_tuxcall 222 -#define __NR_ia32_security 223 -#define __NR_ia32_gettid 224 -#define __NR_ia32_readahead 225 -#define __NR_ia32_setxattr 226 -#define __NR_ia32_lsetxattr 227 -#define __NR_ia32_fsetxattr 228 -#define __NR_ia32_getxattr 229 -#define __NR_ia32_lgetxattr 230 -#define __NR_ia32_fgetxattr 231 -#define __NR_ia32_listxattr 232 -#define __NR_ia32_llistxattr 233 -#define __NR_ia32_flistxattr 234 -#define __NR_ia32_removexattr 235 -#define __NR_ia32_lremovexattr 236 -#define __NR_ia32_fremovexattr 237 -#define __NR_ia32_tkill 238 -#define __NR_ia32_sendfile64 239 -#define __NR_ia32_futex 240 -#define __NR_ia32_sched_setaffinity 241 -#define __NR_ia32_sched_getaffinity 242 -#define __NR_ia32_set_thread_area 243 -#define __NR_ia32_get_thread_area 244 -#define __NR_ia32_io_setup 245 -#define __NR_ia32_io_destroy 246 -#define __NR_ia32_io_getevents 247 -#define __NR_ia32_io_submit 248 -#define __NR_ia32_io_cancel 249 -#define __NR_ia32_exit_group 252 -#define __NR_ia32_lookup_dcookie 253 -#define __NR_ia32_sys_epoll_create 254 -#define __NR_ia32_sys_epoll_ctl 255 -#define __NR_ia32_sys_epoll_wait 256 -#define __NR_ia32_remap_file_pages 257 -#define __NR_ia32_set_tid_address 258 -#define __NR_ia32_timer_create 259 -#define __NR_ia32_timer_settime (__NR_ia32_timer_create+1) -#define __NR_ia32_timer_gettime (__NR_ia32_timer_create+2) -#define __NR_ia32_timer_getoverrun (__NR_ia32_timer_create+3) -#define __NR_ia32_timer_delete (__NR_ia32_timer_create+4) -#define __NR_ia32_clock_settime (__NR_ia32_timer_create+5) -#define __NR_ia32_clock_gettime (__NR_ia32_timer_create+6) -#define __NR_ia32_clock_getres (__NR_ia32_timer_create+7) -#define __NR_ia32_clock_nanosleep (__NR_ia32_timer_create+8) -#define __NR_ia32_statfs64 268 -#define __NR_ia32_fstatfs64 269 -#define __NR_ia32_tgkill 270 -#define __NR_ia32_utimes 271 -#define __NR_ia32_fadvise64_64 272 -#define __NR_ia32_vserver 273 -#define __NR_ia32_mbind 274 -#define __NR_ia32_get_mempolicy 275 -#define __NR_ia32_set_mempolicy 276 -#define __NR_ia32_mq_open 277 -#define __NR_ia32_mq_unlink (__NR_ia32_mq_open+1) -#define __NR_ia32_mq_timedsend (__NR_ia32_mq_open+2) -#define __NR_ia32_mq_timedreceive (__NR_ia32_mq_open+3) -#define __NR_ia32_mq_notify (__NR_ia32_mq_open+4) -#define __NR_ia32_mq_getsetattr (__NR_ia32_mq_open+5) -#define __NR_ia32_kexec 283 -#define __NR_ia32_waitid 284 -/* #define __NR_sys_setaltroot 285 */ -#define __NR_ia32_add_key 286 -#define __NR_ia32_request_key 287 -#define __NR_ia32_keyctl 288 -#define __NR_ia32_ioprio_set 289 -#define __NR_ia32_ioprio_get 290 -#define __NR_ia32_inotify_init 291 -#define __NR_ia32_inotify_add_watch 292 -#define __NR_ia32_inotify_rm_watch 293 -#define __NR_ia32_migrate_pages 294 -#define __NR_ia32_openat 295 -#define __NR_ia32_mkdirat 296 -#define __NR_ia32_mknodat 297 -#define __NR_ia32_fchownat 298 -#define __NR_ia32_futimesat 299 -#define __NR_ia32_fstatat64 300 -#define __NR_ia32_unlinkat 301 -#define __NR_ia32_renameat 302 -#define __NR_ia32_linkat 303 -#define __NR_ia32_symlinkat 304 -#define __NR_ia32_readlinkat 305 -#define __NR_ia32_fchmodat 306 -#define __NR_ia32_faccessat 307 -#define __NR_ia32_pselect6 308 -#define __NR_ia32_ppoll 309 -#define __NR_ia32_unshare 310 #endif /* _ASM_X86_64_IA32_UNISTD_H_ */ -- cgit v1.1 From 3c5846470c30580bbcb4d5f2339f75a2c88cfe6e Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 26 Jun 2006 13:56:25 +0200 Subject: [PATCH] x86_64: x86_64-enable-large-bzImage.patch enable large bzImages on x86_64. (fix is from x86's build.c) Using this patch i have successfully built and booted an allyesconfig 13MB+ bzImage on x86_64 too: $ size64 vmlinux text data bss dec hex filename 23444831 8202642 3439360 35086833 21761f1 vmlinux -rw-rw-r-- 1 mingo mingo 13121740 Apr 19 09:32 arch/x86_64/boot/bzImage Signed-off-by: Ingo Molnar Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- arch/x86_64/boot/tools/build.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/arch/x86_64/boot/tools/build.c b/arch/x86_64/boot/tools/build.c index c44f5e2..eae8669 100644 --- a/arch/x86_64/boot/tools/build.c +++ b/arch/x86_64/boot/tools/build.c @@ -149,10 +149,8 @@ int main(int argc, char ** argv) sz = sb.st_size; fprintf (stderr, "System is %d kB\n", sz/1024); sys_size = (sz + 15) / 16; - /* 0x40000*16 = 4.0 MB, reasonable estimate for the current maximum */ - if (sys_size > (is_big_kernel ? 0x40000 : DEF_SYSSIZE)) - die("System is too big. Try using %smodules.", - is_big_kernel ? "" : "bzImage or "); + if (!is_big_kernel && sys_size > DEF_SYSSIZE) + die("System is too big. Try using bzImage or modules."); while (sz > 0) { int l, n; -- cgit v1.1 From ed0a893fc8866baf116323acbcd883a3cc4a36a3 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Mon, 26 Jun 2006 13:56:28 +0200 Subject: [PATCH] x86_64: Remove bogus comment in topology.h Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- include/asm-x86_64/topology.h | 2 -- 1 file changed, 2 deletions(-) diff --git a/include/asm-x86_64/topology.h b/include/asm-x86_64/topology.h index 80c4e44..d4009f8 100644 --- a/include/asm-x86_64/topology.h +++ b/include/asm-x86_64/topology.h @@ -7,8 +7,6 @@ #include #include -/* Map the K8 CPU local memory controllers to a simple 1:1 CPU:NODE topology */ - extern cpumask_t cpu_online_map; extern unsigned char cpu_to_node[]; -- cgit v1.1 From f5adc9c79d3a15478c0028139c54453ff3900488 Mon Sep 17 00:00:00 2001 From: Mike Waychison Date: Mon, 26 Jun 2006 13:56:31 +0200 Subject: [PATCH] x86_64: iommu_gart_bitmap search to cross next_bit Allow search for a contiguous block of iommu space to cross the next_bit marker if we have already committed ourselves to flushing the gart. There shouldn't be any reason why we'd restrict the search. Signed-off-by: Mike Waychison Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- arch/x86_64/kernel/pci-gart.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86_64/kernel/pci-gart.c b/arch/x86_64/kernel/pci-gart.c index 82a7c9b..e0198da 100644 --- a/arch/x86_64/kernel/pci-gart.c +++ b/arch/x86_64/kernel/pci-gart.c @@ -93,7 +93,7 @@ static unsigned long alloc_iommu(int size) offset = find_next_zero_string(iommu_gart_bitmap,next_bit,iommu_pages,size); if (offset == -1) { need_flush = 1; - offset = find_next_zero_string(iommu_gart_bitmap,0,next_bit,size); + offset = find_next_zero_string(iommu_gart_bitmap,0,iommu_pages,size); } if (offset != -1) { set_bit_string(iommu_gart_bitmap, offset, size); -- cgit v1.1 From f0f2d6536e3515b5b1b7ae97dc8f176860c8c2ce Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Mon, 26 Jun 2006 13:56:34 +0200 Subject: [PATCH] x86_64: Implement compat functions for PTRACE_SETSIGINFO/GETSIGINFO Previously we would just silently provide 64 bit services for this to 32bit processes. I also added all the other cases explicitely to the ptrace compat wrapper to make sure this doesn't happen again. And removed one bogus check in the wrapper. Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- arch/x86_64/ia32/ptrace32.c | 41 +++++++++++++++++++++++++++++++++++------ 1 file changed, 35 insertions(+), 6 deletions(-) diff --git a/arch/x86_64/ia32/ptrace32.c b/arch/x86_64/ia32/ptrace32.c index 23a4515..4defe63 100644 --- a/arch/x86_64/ia32/ptrace32.c +++ b/arch/x86_64/ia32/ptrace32.c @@ -27,6 +27,7 @@ #include #include #include +#include /* * Determines which flags the user has access to [1 = access, 0 = no access]. @@ -199,6 +200,24 @@ static int getreg32(struct task_struct *child, unsigned regno, u32 *val) #undef R32 +static long ptrace32_siginfo(unsigned request, u32 pid, u32 addr, u32 data) +{ + int ret; + compat_siginfo_t *si32 = (compat_siginfo_t *)compat_ptr(data); + siginfo_t *si = compat_alloc_user_space(sizeof(siginfo_t)); + if (request == PTRACE_SETSIGINFO) { + ret = copy_siginfo_from_user32(si, si32); + if (ret) + return ret; + } + ret = sys_ptrace(request, pid, addr, (unsigned long)si); + if (ret) + return ret; + if (request == PTRACE_GETSIGINFO) + ret = copy_siginfo_to_user32(si32, si); + return ret; +} + asmlinkage long sys32_ptrace(long request, u32 pid, u32 addr, u32 data) { struct task_struct *child; @@ -208,9 +227,19 @@ asmlinkage long sys32_ptrace(long request, u32 pid, u32 addr, u32 data) __u32 val; switch (request) { - default: + case PTRACE_TRACEME: + case PTRACE_ATTACH: + case PTRACE_KILL: + case PTRACE_CONT: + case PTRACE_SINGLESTEP: + case PTRACE_DETACH: + case PTRACE_SYSCALL: + case PTRACE_SETOPTIONS: return sys_ptrace(request, pid, addr, data); + default: + return -EINVAL; + case PTRACE_PEEKTEXT: case PTRACE_PEEKDATA: case PTRACE_POKEDATA: @@ -225,10 +254,11 @@ asmlinkage long sys32_ptrace(long request, u32 pid, u32 addr, u32 data) case PTRACE_GETFPXREGS: case PTRACE_GETEVENTMSG: break; - } - if (request == PTRACE_TRACEME) - return ptrace_traceme(); + case PTRACE_SETSIGINFO: + case PTRACE_GETSIGINFO: + return ptrace32_siginfo(request, pid, addr, data); + } child = ptrace_get_task_struct(pid); if (IS_ERR(child)) @@ -349,8 +379,7 @@ asmlinkage long sys32_ptrace(long request, u32 pid, u32 addr, u32 data) break; default: - ret = -EINVAL; - break; + BUG(); } out: -- cgit v1.1 From 7c2d9cd218916276e52a5dae827b84a159fe5c96 Mon Sep 17 00:00:00 2001 From: Jon Mason Date: Mon, 26 Jun 2006 13:56:37 +0200 Subject: [PATCH] x86_64: trivial gart clean-up A trivial change to have gart_unmap_sg call gart_unmap_single directly, instead of bouncing through the dma_unmap_single wrapper in dma-mapping.h. This change required moving the gart_unmap_single above gart_unmap_sg, and under gart_map_single (which seems a more logical place that its current location IMHO). Signed-off-by: Jon Mason Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- arch/x86_64/kernel/pci-gart.c | 46 +++++++++++++++++++++---------------------- 1 file changed, 23 insertions(+), 23 deletions(-) diff --git a/arch/x86_64/kernel/pci-gart.c b/arch/x86_64/kernel/pci-gart.c index e0198da..ea8f404 100644 --- a/arch/x86_64/kernel/pci-gart.c +++ b/arch/x86_64/kernel/pci-gart.c @@ -289,6 +289,28 @@ dma_addr_t gart_map_single(struct device *dev, void *addr, size_t size, int dir) } /* + * Free a DMA mapping. + */ +void gart_unmap_single(struct device *dev, dma_addr_t dma_addr, + size_t size, int direction) +{ + unsigned long iommu_page; + int npages; + int i; + + if (dma_addr < iommu_bus_base + EMERGENCY_PAGES*PAGE_SIZE || + dma_addr >= iommu_bus_base + iommu_size) + return; + iommu_page = (dma_addr - iommu_bus_base)>>PAGE_SHIFT; + npages = to_pages(dma_addr, size); + for (i = 0; i < npages; i++) { + iommu_gatt_base[iommu_page + i] = gart_unmapped_entry; + CLEAR_LEAK(iommu_page + i); + } + free_iommu(iommu_page, npages); +} + +/* * Wrapper for pci_unmap_single working with scatterlists. */ void gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, int dir) @@ -299,7 +321,7 @@ void gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, int di struct scatterlist *s = &sg[i]; if (!s->dma_length || !s->length) break; - dma_unmap_single(dev, s->dma_address, s->dma_length, dir); + gart_unmap_single(dev, s->dma_address, s->dma_length, dir); } } @@ -458,28 +480,6 @@ error: return 0; } -/* - * Free a DMA mapping. - */ -void gart_unmap_single(struct device *dev, dma_addr_t dma_addr, - size_t size, int direction) -{ - unsigned long iommu_page; - int npages; - int i; - - if (dma_addr < iommu_bus_base + EMERGENCY_PAGES*PAGE_SIZE || - dma_addr >= iommu_bus_base + iommu_size) - return; - iommu_page = (dma_addr - iommu_bus_base)>>PAGE_SHIFT; - npages = to_pages(dma_addr, size); - for (i = 0; i < npages; i++) { - iommu_gatt_base[iommu_page + i] = gart_unmapped_entry; - CLEAR_LEAK(iommu_page + i); - } - free_iommu(iommu_page, npages); -} - static int no_agp; static __init unsigned long check_iommu_size(unsigned long aper, u64 aper_size) -- cgit v1.1 From a32073bffc656ca4bde6002b6cf7c1a8e0e22712 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Mon, 26 Jun 2006 13:56:40 +0200 Subject: [PATCH] x86_64: Clean and enhance up K8 northbridge access code - Factor out the duplicated access/cache code into a single file * Shared between i386/x86-64. - Share flush code between AGP and IOMMU * Fix a bug: AGP didn't wait for end of flush before - Drop 8 northbridges limit and allocate dynamically - Add lock to serialize AGP and IOMMU GART flushes - Add PCI ID for next AMD northbridge - Random related cleanups The old K8 NUMA discovery code is unchanged. New systems should all use SRAT for this. Cc: "Navin Boppuri" Cc: Dave Jones Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- arch/i386/Kconfig | 4 ++ arch/i386/kernel/Makefile | 4 ++ arch/x86_64/Kconfig | 4 ++ arch/x86_64/kernel/Makefile | 1 + arch/x86_64/kernel/aperture.c | 24 ++++----- arch/x86_64/kernel/k8.c | 118 ++++++++++++++++++++++++++++++++++++++++++ arch/x86_64/kernel/pci-gart.c | 93 ++++++++------------------------- arch/x86_64/pci/k8-bus.c | 10 ++-- drivers/char/agp/amd64-agp.c | 77 +++++++++------------------ include/asm-i386/k8.h | 1 + include/asm-x86_64/k8.h | 14 +++++ 11 files changed, 209 insertions(+), 141 deletions(-) create mode 100644 arch/x86_64/kernel/k8.c create mode 100644 include/asm-i386/k8.h create mode 100644 include/asm-x86_64/k8.h diff --git a/arch/i386/Kconfig b/arch/i386/Kconfig index 1596101..2206bf6 100644 --- a/arch/i386/Kconfig +++ b/arch/i386/Kconfig @@ -1054,6 +1054,10 @@ config SCx200 This support is also available as a module. If compiled as a module, it will be called scx200. +config K8_NB + def_bool y + depends on AGP_AMD64 + source "drivers/pcmcia/Kconfig" source "drivers/pci/hotplug/Kconfig" diff --git a/arch/i386/kernel/Makefile b/arch/i386/kernel/Makefile index 96fb8a0..28b14d6 100644 --- a/arch/i386/kernel/Makefile +++ b/arch/i386/kernel/Makefile @@ -37,6 +37,7 @@ obj-$(CONFIG_EFI) += efi.o efi_stub.o obj-$(CONFIG_DOUBLEFAULT) += doublefault.o obj-$(CONFIG_VM86) += vm86.o obj-$(CONFIG_EARLY_PRINTK) += early_printk.o +obj-$(CONFIG_K8_NB) += k8.o EXTRA_AFLAGS := -traditional @@ -76,3 +77,6 @@ SYSCFLAGS_vsyscall-syms.o = -r $(obj)/vsyscall-syms.o: $(src)/vsyscall.lds \ $(obj)/vsyscall-sysenter.o $(obj)/vsyscall-note.o FORCE $(call if_changed,syscall) + +k8-y += ../../x86_64/kernel/k8.o + diff --git a/arch/x86_64/Kconfig b/arch/x86_64/Kconfig index af44130..fc75275 100644 --- a/arch/x86_64/Kconfig +++ b/arch/x86_64/Kconfig @@ -501,6 +501,10 @@ config REORDER optimal TLB usage. If you have pretty much any version of binutils, this can increase your kernel build time by roughly one minute. +config K8_NB + def_bool y + depends on AGP_AMD64 || GART_IOMMU || (PCI && NUMA) + endmenu # diff --git a/arch/x86_64/kernel/Makefile b/arch/x86_64/kernel/Makefile index 381bc6a..f927d11 100644 --- a/arch/x86_64/kernel/Makefile +++ b/arch/x86_64/kernel/Makefile @@ -33,6 +33,7 @@ obj-$(CONFIG_SWIOTLB) += pci-swiotlb.o obj-$(CONFIG_KPROBES) += kprobes.o obj-$(CONFIG_X86_PM_TIMER) += pmtimer.o obj-$(CONFIG_X86_VSMP) += vsmp.o +obj-$(CONFIG_K8_NB) += k8.o obj-$(CONFIG_MODULES) += module.o diff --git a/arch/x86_64/kernel/aperture.c b/arch/x86_64/kernel/aperture.c index 70b9d21..a7ad03e 100644 --- a/arch/x86_64/kernel/aperture.c +++ b/arch/x86_64/kernel/aperture.c @@ -24,6 +24,7 @@ #include #include #include +#include int iommu_aperture; int iommu_aperture_disabled __initdata = 0; @@ -37,8 +38,6 @@ int fix_aperture __initdata = 1; /* This code runs before the PCI subsystem is initialized, so just access the northbridge directly. */ -#define NB_ID_3 (PCI_VENDOR_ID_AMD | (0x1103<<16)) - static u32 __init allocate_aperture(void) { pg_data_t *nd0 = NODE_DATA(0); @@ -68,20 +67,20 @@ static u32 __init allocate_aperture(void) return (u32)__pa(p); } -static int __init aperture_valid(char *name, u64 aper_base, u32 aper_size) +static int __init aperture_valid(u64 aper_base, u32 aper_size) { if (!aper_base) return 0; if (aper_size < 64*1024*1024) { - printk("Aperture from %s too small (%d MB)\n", name, aper_size>>20); + printk("Aperture too small (%d MB)\n", aper_size>>20); return 0; } if (aper_base + aper_size >= 0xffffffff) { - printk("Aperture from %s beyond 4GB. Ignoring.\n",name); + printk("Aperture beyond 4GB. Ignoring.\n"); return 0; } if (e820_any_mapped(aper_base, aper_base + aper_size, E820_RAM)) { - printk("Aperture from %s pointing to e820 RAM. Ignoring.\n",name); + printk("Aperture pointing to e820 RAM. Ignoring.\n"); return 0; } return 1; @@ -140,7 +139,7 @@ static __u32 __init read_agp(int num, int slot, int func, int cap, u32 *order) printk("Aperture from AGP @ %Lx size %u MB (APSIZE %x)\n", aper, 32 << *order, apsizereg); - if (!aperture_valid("AGP bridge", aper, (32*1024*1024) << *order)) + if (!aperture_valid(aper, (32*1024*1024) << *order)) return 0; return (u32)aper; } @@ -208,9 +207,8 @@ void __init iommu_hole_init(void) fix = 0; for (num = 24; num < 32; num++) { - char name[30]; - if (read_pci_config(0, num, 3, 0x00) != NB_ID_3) - continue; + if (!early_is_k8_nb(read_pci_config(0, num, 3, 0x00))) + continue; iommu_aperture = 1; @@ -222,9 +220,7 @@ void __init iommu_hole_init(void) printk("CPU %d: aperture @ %Lx size %u MB\n", num-24, aper_base, aper_size>>20); - sprintf(name, "northbridge cpu %d", num-24); - - if (!aperture_valid(name, aper_base, aper_size)) { + if (!aperture_valid(aper_base, aper_size)) { fix = 1; break; } @@ -273,7 +269,7 @@ void __init iommu_hole_init(void) /* Fix up the north bridges */ for (num = 24; num < 32; num++) { - if (read_pci_config(0, num, 3, 0x00) != NB_ID_3) + if (!early_is_k8_nb(read_pci_config(0, num, 3, 0x00))) continue; /* Don't enable translation yet. That is done later. diff --git a/arch/x86_64/kernel/k8.c b/arch/x86_64/kernel/k8.c new file mode 100644 index 0000000..6416682 --- /dev/null +++ b/arch/x86_64/kernel/k8.c @@ -0,0 +1,118 @@ +/* + * Shared support code for AMD K8 northbridges and derivates. + * Copyright 2006 Andi Kleen, SUSE Labs. Subject to GPLv2. + */ +#include +#include +#include +#include +#include +#include +#include + +int num_k8_northbridges; +EXPORT_SYMBOL(num_k8_northbridges); + +static u32 *flush_words; + +struct pci_device_id k8_nb_ids[] = { + { PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x1103) }, + { PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x1203) }, + {} +}; +EXPORT_SYMBOL(k8_nb_ids); + +struct pci_dev **k8_northbridges; +EXPORT_SYMBOL(k8_northbridges); + +static struct pci_dev *next_k8_northbridge(struct pci_dev *dev) +{ + do { + dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev); + if (!dev) + break; + } while (!pci_match_id(&k8_nb_ids[0], dev)); + return dev; +} + +int cache_k8_northbridges(void) +{ + int i; + struct pci_dev *dev; + if (num_k8_northbridges) + return 0; + + num_k8_northbridges = 0; + dev = NULL; + while ((dev = next_k8_northbridge(dev)) != NULL) + num_k8_northbridges++; + + k8_northbridges = kmalloc((num_k8_northbridges + 1) * sizeof(void *), + GFP_KERNEL); + if (!k8_northbridges) + return -ENOMEM; + + flush_words = kmalloc(num_k8_northbridges * sizeof(u32), GFP_KERNEL); + if (!flush_words) { + kfree(k8_northbridges); + return -ENOMEM; + } + + dev = NULL; + i = 0; + while ((dev = next_k8_northbridge(dev)) != NULL) { + k8_northbridges[i++] = dev; + pci_read_config_dword(dev, 0x9c, &flush_words[i]); + } + k8_northbridges[i] = NULL; + return 0; +} +EXPORT_SYMBOL_GPL(cache_k8_northbridges); + +/* Ignores subdevice/subvendor but as far as I can figure out + they're useless anyways */ +int __init early_is_k8_nb(u32 device) +{ + struct pci_device_id *id; + u32 vendor = device & 0xffff; + device >>= 16; + for (id = k8_nb_ids; id->vendor; id++) + if (vendor == id->vendor && device == id->device) + return 1; + return 0; +} + +void k8_flush_garts(void) +{ + int flushed, i; + unsigned long flags; + static DEFINE_SPINLOCK(gart_lock); + + /* Avoid races between AGP and IOMMU. In theory it's not needed + but I'm not sure if the hardware won't lose flush requests + when another is pending. This whole thing is so expensive anyways + that it doesn't matter to serialize more. -AK */ + spin_lock_irqsave(&gart_lock, flags); + flushed = 0; + for (i = 0; i < num_k8_northbridges; i++) { + pci_write_config_dword(k8_northbridges[i], 0x9c, + flush_words[i]|1); + flushed++; + } + for (i = 0; i < num_k8_northbridges; i++) { + u32 w; + /* Make sure the hardware actually executed the flush*/ + for (;;) { + pci_read_config_dword(k8_northbridges[i], + 0x9c, &w); + if (!(w & 1)) + break; + cpu_relax(); + } + } + spin_unlock_irqrestore(&gart_lock, flags); + if (!flushed) + printk("nothing to flush?\n"); +} +EXPORT_SYMBOL_GPL(k8_flush_garts); + diff --git a/arch/x86_64/kernel/pci-gart.c b/arch/x86_64/kernel/pci-gart.c index ea8f404..ded3af3 100644 --- a/arch/x86_64/kernel/pci-gart.c +++ b/arch/x86_64/kernel/pci-gart.c @@ -32,6 +32,7 @@ #include #include #include +#include unsigned long iommu_bus_base; /* GART remapping area (physical) */ static unsigned long iommu_size; /* size of remapping area bytes */ @@ -46,8 +47,6 @@ u32 *iommu_gatt_base; /* Remapping table */ also seen with Qlogic at least). */ int iommu_fullflush = 1; -#define MAX_NB 8 - /* Allocation bitmap for the remapping area */ static DEFINE_SPINLOCK(iommu_bitmap_lock); static unsigned long *iommu_gart_bitmap; /* guarded by iommu_bitmap_lock */ @@ -63,13 +62,6 @@ static u32 gart_unmapped_entry; #define to_pages(addr,size) \ (round_up(((addr) & ~PAGE_MASK) + (size), PAGE_SIZE) >> PAGE_SHIFT) -#define for_all_nb(dev) \ - dev = NULL; \ - while ((dev = pci_get_device(PCI_VENDOR_ID_AMD, 0x1103, dev))!=NULL) - -static struct pci_dev *northbridges[MAX_NB]; -static u32 northbridge_flush_word[MAX_NB]; - #define EMERGENCY_PAGES 32 /* = 128KB */ #ifdef CONFIG_AGP @@ -120,44 +112,17 @@ static void free_iommu(unsigned long offset, int size) /* * Use global flush state to avoid races with multiple flushers. */ -static void flush_gart(struct device *dev) +static void flush_gart(void) { unsigned long flags; - int flushed = 0; - int i, max; - spin_lock_irqsave(&iommu_bitmap_lock, flags); - if (need_flush) { - max = 0; - for (i = 0; i < MAX_NB; i++) { - if (!northbridges[i]) - continue; - pci_write_config_dword(northbridges[i], 0x9c, - northbridge_flush_word[i] | 1); - flushed++; - max = i; - } - for (i = 0; i <= max; i++) { - u32 w; - if (!northbridges[i]) - continue; - /* Make sure the hardware actually executed the flush. */ - for (;;) { - pci_read_config_dword(northbridges[i], 0x9c, &w); - if (!(w & 1)) - break; - cpu_relax(); - } - } - if (!flushed) - printk("nothing to flush?\n"); + if (need_flush) { + k8_flush_garts(); need_flush = 0; } spin_unlock_irqrestore(&iommu_bitmap_lock, flags); } - - #ifdef CONFIG_IOMMU_LEAK #define SET_LEAK(x) if (iommu_leak_tab) \ @@ -266,7 +231,7 @@ static dma_addr_t gart_map_simple(struct device *dev, char *buf, size_t size, int dir) { dma_addr_t map = dma_map_area(dev, virt_to_bus(buf), size, dir); - flush_gart(dev); + flush_gart(); return map; } @@ -351,7 +316,7 @@ static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg, s->dma_address = addr; s->dma_length = s->length; } - flush_gart(dev); + flush_gart(); return nents; } @@ -458,13 +423,13 @@ int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir) if (dma_map_cont(sg, start, i, sg+out, pages, need) < 0) goto error; out++; - flush_gart(dev); + flush_gart(); if (out < nents) sg[out].dma_length = 0; return out; error: - flush_gart(NULL); + flush_gart(); gart_unmap_sg(dev, sg, nents, dir); /* When it was forced or merged try again in a dumb way */ if (force_iommu || iommu_merge) { @@ -532,10 +497,13 @@ static __init int init_k8_gatt(struct agp_kern_info *info) void *gatt; unsigned aper_base, new_aper_base; unsigned aper_size, gatt_size, new_aper_size; - + int i; + printk(KERN_INFO "PCI-DMA: Disabling AGP.\n"); aper_size = aper_base = info->aper_size = 0; - for_all_nb(dev) { + dev = NULL; + for (i = 0; i < num_k8_northbridges; i++) { + dev = k8_northbridges[i]; new_aper_base = read_aperture(dev, &new_aper_size); if (!new_aper_base) goto nommu; @@ -558,11 +526,12 @@ static __init int init_k8_gatt(struct agp_kern_info *info) panic("Cannot allocate GATT table"); memset(gatt, 0, gatt_size); agp_gatt_table = gatt; - - for_all_nb(dev) { + + for (i = 0; i < num_k8_northbridges; i++) { u32 ctl; u32 gatt_reg; + dev = k8_northbridges[i]; gatt_reg = __pa(gatt) >> 12; gatt_reg <<= 4; pci_write_config_dword(dev, 0x98, gatt_reg); @@ -573,7 +542,7 @@ static __init int init_k8_gatt(struct agp_kern_info *info) pci_write_config_dword(dev, 0x90, ctl); } - flush_gart(NULL); + flush_gart(); printk("PCI-DMA: aperture base @ %x size %u KB\n",aper_base, aper_size>>10); return 0; @@ -607,10 +576,14 @@ static int __init pci_iommu_init(void) struct agp_kern_info info; unsigned long aper_size; unsigned long iommu_start; - struct pci_dev *dev; unsigned long scratch; long i; + if (cache_k8_northbridges() < 0 || num_k8_northbridges == 0) { + printk(KERN_INFO "PCI-GART: No AMD northbridge found.\n"); + return -1; + } + #ifndef CONFIG_AGP_AMD64 no_agp = 1; #else @@ -637,14 +610,6 @@ static int __init pci_iommu_init(void) return -1; } - i = 0; - for_all_nb(dev) - i++; - if (i > MAX_NB) { - printk(KERN_ERR "PCI-GART: Too many northbridges (%ld). Disabled\n", i); - return -1; - } - printk(KERN_INFO "PCI-DMA: using GART IOMMU.\n"); aper_size = info.aper_size * 1024 * 1024; iommu_size = check_iommu_size(info.aper_base, aper_size); @@ -707,20 +672,8 @@ static int __init pci_iommu_init(void) for (i = EMERGENCY_PAGES; i < iommu_pages; i++) iommu_gatt_base[i] = gart_unmapped_entry; - for_all_nb(dev) { - u32 flag; - int cpu = PCI_SLOT(dev->devfn) - 24; - if (cpu >= MAX_NB) - continue; - northbridges[cpu] = dev; - pci_read_config_dword(dev, 0x9c, &flag); /* cache flush word */ - northbridge_flush_word[cpu] = flag; - } - - flush_gart(NULL); - + flush_gart(); dma_ops = &gart_dma_ops; - return 0; } diff --git a/arch/x86_64/pci/k8-bus.c b/arch/x86_64/pci/k8-bus.c index 3acf60d..b50a7c7 100644 --- a/arch/x86_64/pci/k8-bus.c +++ b/arch/x86_64/pci/k8-bus.c @@ -2,6 +2,7 @@ #include #include #include +#include /* * This discovers the pcibus <-> node mapping on AMD K8. @@ -18,7 +19,6 @@ #define NR_LDT_BUS_NUMBER_REGISTERS 3 #define SECONDARY_LDT_BUS_NUMBER(dword) ((dword >> 8) & 0xFF) #define SUBORDINATE_LDT_BUS_NUMBER(dword) ((dword >> 16) & 0xFF) -#define PCI_DEVICE_ID_K8HTCONFIG 0x1100 /** * fill_mp_bus_to_cpumask() @@ -28,8 +28,7 @@ __init static int fill_mp_bus_to_cpumask(void) { - struct pci_dev *nb_dev = NULL; - int i, j; + int i, j, k; u32 ldtbus, nid; static int lbnr[3] = { LDT_BUS_NUMBER_REGISTER_0, @@ -37,8 +36,9 @@ fill_mp_bus_to_cpumask(void) LDT_BUS_NUMBER_REGISTER_2 }; - while ((nb_dev = pci_get_device(PCI_VENDOR_ID_AMD, - PCI_DEVICE_ID_K8HTCONFIG, nb_dev))) { + cache_k8_northbridges(); + for (k = 0; k < num_k8_northbridges; k++) { + struct pci_dev *nb_dev = k8_northbridges[k]; pci_read_config_dword(nb_dev, NODE_ID_REGISTER, &nid); for (i = 0; i < NR_LDT_BUS_NUMBER_REGISTERS; i++) { diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c index ac3c33a..229d015 100644 --- a/drivers/char/agp/amd64-agp.c +++ b/drivers/char/agp/amd64-agp.c @@ -15,11 +15,9 @@ #include #include #include /* PAGE_SIZE */ +#include #include "agp.h" -/* Will need to be increased if AMD64 ever goes >8-way. */ -#define MAX_HAMMER_GARTS 8 - /* PTE bits. */ #define GPTE_VALID 1 #define GPTE_COHERENT 2 @@ -53,28 +51,12 @@ #define ULI_X86_64_HTT_FEA_REG 0x50 #define ULI_X86_64_ENU_SCR_REG 0x54 -static int nr_garts; -static struct pci_dev * hammers[MAX_HAMMER_GARTS]; - static struct resource *aperture_resource; static int __initdata agp_try_unsupported = 1; -#define for_each_nb() for(gart_iterator=0;gart_iteratorgatt_table_real); - int gart_iterator; + int i; /* Configure AGP regs in each x86-64 host bridge. */ - for_each_nb() { + for (i = 0; i < num_k8_northbridges; i++) { agp_bridge->gart_bus_addr = - amd64_configure(hammers[gart_iterator],gatt_bus); + amd64_configure(k8_northbridges[i], gatt_bus); } + k8_flush_garts(); return 0; } @@ -236,12 +216,13 @@ static int amd_8151_configure(void) static void amd64_cleanup(void) { u32 tmp; - int gart_iterator; - for_each_nb() { + int i; + for (i = 0; i < num_k8_northbridges; i++) { + struct pci_dev *dev = k8_northbridges[i]; /* disable gart translation */ - pci_read_config_dword (hammers[gart_iterator], AMD64_GARTAPERTURECTL, &tmp); + pci_read_config_dword (dev, AMD64_GARTAPERTURECTL, &tmp); tmp &= ~AMD64_GARTEN; - pci_write_config_dword (hammers[gart_iterator], AMD64_GARTAPERTURECTL, tmp); + pci_write_config_dword (dev, AMD64_GARTAPERTURECTL, tmp); } } @@ -361,17 +342,15 @@ static __devinit int fix_northbridge(struct pci_dev *nb, struct pci_dev *agp, static __devinit int cache_nbs (struct pci_dev *pdev, u32 cap_ptr) { - struct pci_dev *loop_dev = NULL; - int i = 0; - - /* cache pci_devs of northbridges. */ - while ((loop_dev = pci_get_device(PCI_VENDOR_ID_AMD, 0x1103, loop_dev)) - != NULL) { - if (i == MAX_HAMMER_GARTS) { - printk(KERN_ERR PFX "Too many northbridges for AGP\n"); - return -1; - } - if (fix_northbridge(loop_dev, pdev, cap_ptr) < 0) { + int i; + + if (cache_k8_northbridges() < 0) + return -ENODEV; + + i = 0; + for (i = 0; i < num_k8_northbridges; i++) { + struct pci_dev *dev = k8_northbridges[i]; + if (fix_northbridge(dev, pdev, cap_ptr) < 0) { printk(KERN_ERR PFX "No usable aperture found.\n"); #ifdef __x86_64__ /* should port this to i386 */ @@ -379,10 +358,8 @@ static __devinit int cache_nbs (struct pci_dev *pdev, u32 cap_ptr) #endif return -1; } - hammers[i++] = loop_dev; } - nr_garts = i; - return i == 0 ? -1 : 0; + return 0; } /* Handle AMD 8151 quirks */ @@ -450,7 +427,7 @@ static int __devinit uli_agp_init(struct pci_dev *pdev) } /* shadow x86-64 registers into ULi registers */ - pci_read_config_dword (hammers[0], AMD64_GARTAPERTUREBASE, &httfea); + pci_read_config_dword (k8_northbridges[0], AMD64_GARTAPERTUREBASE, &httfea); /* if x86-64 aperture base is beyond 4G, exit here */ if ((httfea & 0x7fff) >> (32 - 25)) @@ -513,7 +490,7 @@ static int __devinit nforce3_agp_init(struct pci_dev *pdev) pci_write_config_dword(dev1, NVIDIA_X86_64_1_APSIZE, tmp); /* shadow x86-64 registers into NVIDIA registers */ - pci_read_config_dword (hammers[0], AMD64_GARTAPERTUREBASE, &apbase); + pci_read_config_dword (k8_northbridges[0], AMD64_GARTAPERTUREBASE, &apbase); /* if x86-64 aperture base is beyond 4G, exit here */ if ( (apbase & 0x7fff) >> (32 - 25) ) { @@ -754,10 +731,6 @@ static struct pci_driver agp_amd64_pci_driver = { int __init agp_amd64_init(void) { int err = 0; - static struct pci_device_id amd64nb[] = { - { PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x1103) }, - { }, - }; if (agp_off) return -EINVAL; @@ -774,7 +747,7 @@ int __init agp_amd64_init(void) } /* First check that we have at least one AMD64 NB */ - if (!pci_dev_present(amd64nb)) + if (!pci_dev_present(k8_nb_ids)) return -ENODEV; /* Look for any AGP bridge */ diff --git a/include/asm-i386/k8.h b/include/asm-i386/k8.h new file mode 100644 index 0000000..dfd88a6 --- /dev/null +++ b/include/asm-i386/k8.h @@ -0,0 +1 @@ +#include diff --git a/include/asm-x86_64/k8.h b/include/asm-x86_64/k8.h new file mode 100644 index 0000000..699dd69 --- /dev/null +++ b/include/asm-x86_64/k8.h @@ -0,0 +1,14 @@ +#ifndef _ASM_K8_H +#define _ASM_K8_H 1 + +#include + +extern struct pci_device_id k8_nb_ids[]; + +extern int early_is_k8_nb(u32 value); +extern struct pci_dev **k8_northbridges; +extern int num_k8_northbridges; +extern int cache_k8_northbridges(void); +extern void k8_flush_garts(void); + +#endif -- cgit v1.1 From 0a1ad60d7a7eb433095bc1b2c8b475f3f278f61d Mon Sep 17 00:00:00 2001 From: Jan Beulich Date: Mon, 26 Jun 2006 13:56:43 +0200 Subject: [PATCH] x86_64: serialize assign_irq_vector() use of static variables Since assign_irq_vector() can be called at runtime, its access of static variables should be protected by a lock. Signed-off-by: Jan Beulich Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- arch/i386/kernel/io_apic.c | 24 ++++++++++++++++++------ arch/x86_64/kernel/io_apic.c | 18 ++++++++++++++---- 2 files changed, 32 insertions(+), 10 deletions(-) diff --git a/arch/i386/kernel/io_apic.c b/arch/i386/kernel/io_apic.c index a62df3e..43ffdd0 100644 --- a/arch/i386/kernel/io_apic.c +++ b/arch/i386/kernel/io_apic.c @@ -50,6 +50,7 @@ atomic_t irq_mis_count; static struct { int pin, apic; } ioapic_i8259 = { -1, -1 }; static DEFINE_SPINLOCK(ioapic_lock); +static DEFINE_SPINLOCK(vector_lock); int timer_over_8254 __initdata = 1; @@ -1161,10 +1162,16 @@ u8 irq_vector[NR_IRQ_VECTORS] __read_mostly = { FIRST_DEVICE_VECTOR , 0 }; int assign_irq_vector(int irq) { static int current_vector = FIRST_DEVICE_VECTOR, offset = 0; + int vector; + + BUG_ON(irq != AUTO_ASSIGN && (unsigned)irq >= NR_IRQ_VECTORS); + + spin_lock(&vector_lock); - BUG_ON(irq >= NR_IRQ_VECTORS); - if (irq != AUTO_ASSIGN && IO_APIC_VECTOR(irq) > 0) + if (irq != AUTO_ASSIGN && IO_APIC_VECTOR(irq) > 0) { + spin_unlock(&vector_lock); return IO_APIC_VECTOR(irq); + } next: current_vector += 8; if (current_vector == SYSCALL_VECTOR) @@ -1172,16 +1179,21 @@ next: if (current_vector >= FIRST_SYSTEM_VECTOR) { offset++; - if (!(offset%8)) + if (!(offset%8)) { + spin_unlock(&vector_lock); return -ENOSPC; + } current_vector = FIRST_DEVICE_VECTOR + offset; } - vector_irq[current_vector] = irq; + vector = current_vector; + vector_irq[vector] = irq; if (irq != AUTO_ASSIGN) - IO_APIC_VECTOR(irq) = current_vector; + IO_APIC_VECTOR(irq) = vector; + + spin_unlock(&vector_lock); - return current_vector; + return vector; } static struct hw_interrupt_type ioapic_level_type; diff --git a/arch/x86_64/kernel/io_apic.c b/arch/x86_64/kernel/io_apic.c index 9cc7031..062b10d 100644 --- a/arch/x86_64/kernel/io_apic.c +++ b/arch/x86_64/kernel/io_apic.c @@ -56,6 +56,7 @@ int timer_over_8254 __initdata = 0; static struct { int pin, apic; } ioapic_i8259 = { -1, -1 }; static DEFINE_SPINLOCK(ioapic_lock); +static DEFINE_SPINLOCK(vector_lock); /* * # of IRQ routing registers @@ -834,10 +835,16 @@ u8 irq_vector[NR_IRQ_VECTORS] __read_mostly = { FIRST_DEVICE_VECTOR , 0 }; int assign_irq_vector(int irq) { static int current_vector = FIRST_DEVICE_VECTOR, offset = 0; + int vector; BUG_ON(irq != AUTO_ASSIGN && (unsigned)irq >= NR_IRQ_VECTORS); - if (irq != AUTO_ASSIGN && IO_APIC_VECTOR(irq) > 0) + + spin_lock(&vector_lock); + + if (irq != AUTO_ASSIGN && IO_APIC_VECTOR(irq) > 0) { + spin_unlock(&vector_lock); return IO_APIC_VECTOR(irq); + } next: current_vector += 8; if (current_vector == IA32_SYSCALL_VECTOR) @@ -849,11 +856,14 @@ next: current_vector = FIRST_DEVICE_VECTOR + offset; } - vector_irq[current_vector] = irq; + vector = current_vector; + vector_irq[vector] = irq; if (irq != AUTO_ASSIGN) - IO_APIC_VECTOR(irq) = current_vector; + IO_APIC_VECTOR(irq) = vector; + + spin_unlock(&vector_lock); - return current_vector; + return vector; } extern void (*interrupt[NR_IRQS])(void); -- cgit v1.1 From 6ebcc00e95fa78218a048a88d8d5ee491aa7d6d0 Mon Sep 17 00:00:00 2001 From: Jan Beulich Date: Mon, 26 Jun 2006 13:56:46 +0200 Subject: [PATCH] i386/x86-64: simplify ioapic_register_intr() Simplify (remove duplication of) code in ioapic_register_intr(). Signed-off-by: Jan Beulich Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- arch/i386/kernel/io_apic.c | 23 ++++++++--------------- arch/x86_64/kernel/io_apic.c | 23 ++++++++--------------- 2 files changed, 16 insertions(+), 30 deletions(-) diff --git a/arch/i386/kernel/io_apic.c b/arch/i386/kernel/io_apic.c index 43ffdd0..5a1a541 100644 --- a/arch/i386/kernel/io_apic.c +++ b/arch/i386/kernel/io_apic.c @@ -1205,21 +1205,14 @@ static struct hw_interrupt_type ioapic_edge_type; static inline void ioapic_register_intr(int irq, int vector, unsigned long trigger) { - if (use_pci_vector() && !platform_legacy_irq(irq)) { - if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) || - trigger == IOAPIC_LEVEL) - irq_desc[vector].handler = &ioapic_level_type; - else - irq_desc[vector].handler = &ioapic_edge_type; - set_intr_gate(vector, interrupt[vector]); - } else { - if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) || - trigger == IOAPIC_LEVEL) - irq_desc[irq].handler = &ioapic_level_type; - else - irq_desc[irq].handler = &ioapic_edge_type; - set_intr_gate(vector, interrupt[irq]); - } + unsigned idx = use_pci_vector() && !platform_legacy_irq(irq) ? vector : irq; + + if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) || + trigger == IOAPIC_LEVEL) + irq_desc[idx].handler = &ioapic_level_type; + else + irq_desc[idx].handler = &ioapic_edge_type; + set_intr_gate(vector, interrupt[idx]); } static void __init setup_IO_APIC_irqs(void) diff --git a/arch/x86_64/kernel/io_apic.c b/arch/x86_64/kernel/io_apic.c index 062b10d..15b2f69 100644 --- a/arch/x86_64/kernel/io_apic.c +++ b/arch/x86_64/kernel/io_apic.c @@ -876,21 +876,14 @@ static struct hw_interrupt_type ioapic_edge_type; static inline void ioapic_register_intr(int irq, int vector, unsigned long trigger) { - if (use_pci_vector() && !platform_legacy_irq(irq)) { - if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) || - trigger == IOAPIC_LEVEL) - irq_desc[vector].handler = &ioapic_level_type; - else - irq_desc[vector].handler = &ioapic_edge_type; - set_intr_gate(vector, interrupt[vector]); - } else { - if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) || - trigger == IOAPIC_LEVEL) - irq_desc[irq].handler = &ioapic_level_type; - else - irq_desc[irq].handler = &ioapic_edge_type; - set_intr_gate(vector, interrupt[irq]); - } + unsigned idx = use_pci_vector() && !platform_legacy_irq(irq) ? vector : irq; + + if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) || + trigger == IOAPIC_LEVEL) + irq_desc[idx].handler = &ioapic_level_type; + else + irq_desc[idx].handler = &ioapic_edge_type; + set_intr_gate(vector, interrupt[idx]); } static void __init setup_IO_APIC_irqs(void) -- cgit v1.1 From f201611fcecdfa825471dc425ee007997228fae4 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Mon, 26 Jun 2006 13:56:49 +0200 Subject: [PATCH] x86_64: Use -ENODEV in IOMMU initialization Fix initcall at 0xffffffff806c5b89: pci_iommu_init+0x0/0x53c(): returned with error code -1 Return -ENODEV instead when the IOMMU is not used. Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- arch/x86_64/kernel/pci-gart.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/x86_64/kernel/pci-gart.c b/arch/x86_64/kernel/pci-gart.c index ded3af3..82a346e 100644 --- a/arch/x86_64/kernel/pci-gart.c +++ b/arch/x86_64/kernel/pci-gart.c @@ -581,7 +581,7 @@ static int __init pci_iommu_init(void) if (cache_k8_northbridges() < 0 || num_k8_northbridges == 0) { printk(KERN_INFO "PCI-GART: No AMD northbridge found.\n"); - return -1; + return -ENODEV; } #ifndef CONFIG_AGP_AMD64 @@ -595,7 +595,7 @@ static int __init pci_iommu_init(void) #endif if (swiotlb) - return -1; + return -ENODEV; if (no_iommu || (!force_iommu && end_pfn <= MAX_DMA32_PFN) || @@ -607,7 +607,7 @@ static int __init pci_iommu_init(void) "but IOMMU not available.\n" KERN_ERR "WARNING 32bit PCI may malfunction.\n"); } - return -1; + return -ENODEV; } printk(KERN_INFO "PCI-DMA: using GART IOMMU.\n"); -- cgit v1.1 From bebfa1013eee1d91b3242e5801cc8fbdfaf148ec Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Mon, 26 Jun 2006 13:56:52 +0200 Subject: [PATCH] x86_64: Add compat_printk and sysctl to turn off compat layer warnings Sometimes e.g. with crashme the compat layer warnings can be noisy. Add a way to turn them off by gating all output through compat_printk that checks a global sysctl. The default is not changed. Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- arch/x86_64/ia32/sys_ia32.c | 4 ++-- fs/compat.c | 16 +++++++++++++++- include/linux/compat.h | 2 ++ include/linux/sysctl.h | 2 ++ kernel/sysctl.c | 11 +++++++++++ 5 files changed, 32 insertions(+), 3 deletions(-) diff --git a/arch/x86_64/ia32/sys_ia32.c b/arch/x86_64/ia32/sys_ia32.c index f182b20..ee30557 100644 --- a/arch/x86_64/ia32/sys_ia32.c +++ b/arch/x86_64/ia32/sys_ia32.c @@ -514,7 +514,7 @@ int sys32_ni_syscall(int call) static char lastcomm[sizeof(me->comm)]; if (strncmp(lastcomm, me->comm, sizeof(lastcomm))) { - printk(KERN_INFO "IA32 syscall %d from %s not implemented\n", + compat_printk(KERN_INFO "IA32 syscall %d from %s not implemented\n", call, me->comm); strncpy(lastcomm, me->comm, sizeof(lastcomm)); } @@ -916,7 +916,7 @@ long sys32_vm86_warning(void) struct task_struct *me = current; static char lastcomm[sizeof(me->comm)]; if (strncmp(lastcomm, me->comm, sizeof(lastcomm))) { - printk(KERN_INFO "%s: vm86 mode not supported on 64 bit kernel\n", + compat_printk(KERN_INFO "%s: vm86 mode not supported on 64 bit kernel\n", me->comm); strncpy(lastcomm, me->comm, sizeof(lastcomm)); } diff --git a/fs/compat.c b/fs/compat.c index 7e7e5bc..e31e9cf 100644 --- a/fs/compat.c +++ b/fs/compat.c @@ -55,6 +55,20 @@ extern void sigset_from_compat(sigset_t *set, compat_sigset_t *compat); +int compat_log = 1; + +int compat_printk(const char *fmt, ...) +{ + va_list ap; + int ret; + if (!compat_log) + return 0; + va_start(ap, fmt); + ret = vprintk(fmt, ap); + va_end(ap); + return ret; +} + /* * Not all architectures have sys_utime, so implement this in terms * of sys_utimes. @@ -359,7 +373,7 @@ static void compat_ioctl_error(struct file *filp, unsigned int fd, sprintf(buf,"'%c'", (cmd>>24) & 0x3f); if (!isprint(buf[1])) sprintf(buf, "%02x", buf[1]); - printk("ioctl32(%s:%d): Unknown cmd fd(%d) " + compat_printk("ioctl32(%s:%d): Unknown cmd fd(%d) " "cmd(%08x){%s} arg(%08x) on %s\n", current->comm, current->pid, (int)fd, (unsigned int)cmd, buf, diff --git a/include/linux/compat.h b/include/linux/compat.h index dda1697..9760753 100644 --- a/include/linux/compat.h +++ b/include/linux/compat.h @@ -226,5 +226,7 @@ static inline int compat_timespec_compare(struct compat_timespec *lhs, asmlinkage long compat_sys_adjtimex(struct compat_timex __user *utp); +extern int compat_printk(const char *fmt, ...); + #endif /* CONFIG_COMPAT */ #endif /* _LINUX_COMPAT_H */ diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h index 6a60770..349ef90 100644 --- a/include/linux/sysctl.h +++ b/include/linux/sysctl.h @@ -148,9 +148,11 @@ enum KERN_SPIN_RETRY=70, /* int: number of spinlock retries */ KERN_ACPI_VIDEO_FLAGS=71, /* int: flags for setting up video after ACPI sleep */ KERN_IA64_UNALIGNED=72, /* int: ia64 unaligned userland trap enable */ + KERN_COMPAT_LOG=73, /* int: print compat layer messages */ }; + /* CTL_VM names: */ enum { diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 2c0e658..f1a4eb1 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -73,6 +73,7 @@ extern int printk_ratelimit_burst; extern int pid_max_min, pid_max_max; extern int sysctl_drop_caches; extern int percpu_pagelist_fraction; +extern int compat_log; #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86) int unknown_nmi_panic; @@ -677,6 +678,16 @@ static ctl_table kern_table[] = { .proc_handler = &proc_dointvec, }, #endif +#ifdef CONFIG_COMPAT + { + .ctl_name = KERN_COMPAT_LOG, + .procname = "compat-log", + .data = &compat_log, + .maxlen = sizeof (int), + .mode = 0644, + .proc_handler = &proc_dointvec, + }, +#endif { .ctl_name = 0 } }; -- cgit v1.1 From 4b787e0b831c71c6b09902b66575dadb2260a7c8 Mon Sep 17 00:00:00 2001 From: Jan Beulich Date: Mon, 26 Jun 2006 13:56:55 +0200 Subject: [PATCH] x86_64: add END()/ENDPROC() annotations to entry.S Since END()/ENDPROC() are now available, add respective annotations to x86_64's entry.S. This should help debugging activities. Signed-off-by: Jan Beulich Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- arch/x86_64/ia32/ia32entry.S | 4 ++++ arch/x86_64/kernel/entry.S | 45 +++++++++++++++++++++++++++++++++++++++++++- 2 files changed, 48 insertions(+), 1 deletion(-) diff --git a/arch/x86_64/ia32/ia32entry.S b/arch/x86_64/ia32/ia32entry.S index 4ec594a..8b502eb 100644 --- a/arch/x86_64/ia32/ia32entry.S +++ b/arch/x86_64/ia32/ia32entry.S @@ -155,6 +155,7 @@ sysenter_tracesys: .previous jmp sysenter_do_call CFI_ENDPROC +ENDPROC(ia32_sysenter_target) /* * 32bit SYSCALL instruction entry. @@ -249,6 +250,7 @@ cstar_tracesys: .quad 1b,ia32_badarg .previous jmp cstar_do_call +END(ia32_cstar_target) ia32_badarg: movq $-EFAULT,%rax @@ -314,6 +316,7 @@ ia32_tracesys: LOAD_ARGS ARGOFFSET /* reload args from stack in case ptrace changed it */ RESTORE_REST jmp ia32_do_syscall +END(ia32_syscall) ia32_badsys: movq $0,ORIG_RAX-ARGOFFSET(%rsp) @@ -370,6 +373,7 @@ ENTRY(ia32_ptregs_common) RESTORE_REST jmp ia32_sysret /* misbalances the return cache */ CFI_ENDPROC +END(ia32_ptregs_common) .section .rodata,"a" .align 8 diff --git a/arch/x86_64/kernel/entry.S b/arch/x86_64/kernel/entry.S index 586b34c..9999d70 100644 --- a/arch/x86_64/kernel/entry.S +++ b/arch/x86_64/kernel/entry.S @@ -154,6 +154,7 @@ rff_trace: GET_THREAD_INFO(%rcx) jmp rff_action CFI_ENDPROC +END(ret_from_fork) /* * System call entry. Upto 6 arguments in registers are supported. @@ -285,6 +286,7 @@ tracesys: /* Use IRET because user could have changed frame */ jmp int_ret_from_sys_call CFI_ENDPROC +END(system_call) /* * Syscall return path ending with IRET. @@ -364,6 +366,7 @@ int_restore_rest: cli jmp int_with_check CFI_ENDPROC +END(int_ret_from_sys_call) /* * Certain special system calls that need to save a complete full stack frame. @@ -375,6 +378,7 @@ int_restore_rest: leaq \func(%rip),%rax leaq -ARGOFFSET+8(%rsp),\arg /* 8 for return address */ jmp ptregscall_common +END(\label) .endm CFI_STARTPROC @@ -404,6 +408,7 @@ ENTRY(ptregscall_common) CFI_REL_OFFSET rip, 0 ret CFI_ENDPROC +END(ptregscall_common) ENTRY(stub_execve) CFI_STARTPROC @@ -418,6 +423,7 @@ ENTRY(stub_execve) RESTORE_REST jmp int_ret_from_sys_call CFI_ENDPROC +END(stub_execve) /* * sigreturn is special because it needs to restore all registers on return. @@ -435,6 +441,7 @@ ENTRY(stub_rt_sigreturn) RESTORE_REST jmp int_ret_from_sys_call CFI_ENDPROC +END(stub_rt_sigreturn) /* * initial frame state for interrupts and exceptions @@ -589,7 +596,9 @@ retint_kernel: call preempt_schedule_irq jmp exit_intr #endif + CFI_ENDPROC +END(common_interrupt) /* * APIC interrupts. @@ -605,17 +614,21 @@ retint_kernel: ENTRY(thermal_interrupt) apicinterrupt THERMAL_APIC_VECTOR,smp_thermal_interrupt +END(thermal_interrupt) ENTRY(threshold_interrupt) apicinterrupt THRESHOLD_APIC_VECTOR,mce_threshold_interrupt +END(threshold_interrupt) #ifdef CONFIG_SMP ENTRY(reschedule_interrupt) apicinterrupt RESCHEDULE_VECTOR,smp_reschedule_interrupt +END(reschedule_interrupt) .macro INVALIDATE_ENTRY num ENTRY(invalidate_interrupt\num) apicinterrupt INVALIDATE_TLB_VECTOR_START+\num,smp_invalidate_interrupt +END(invalidate_interrupt\num) .endm INVALIDATE_ENTRY 0 @@ -629,17 +642,21 @@ ENTRY(invalidate_interrupt\num) ENTRY(call_function_interrupt) apicinterrupt CALL_FUNCTION_VECTOR,smp_call_function_interrupt +END(call_function_interrupt) #endif #ifdef CONFIG_X86_LOCAL_APIC ENTRY(apic_timer_interrupt) apicinterrupt LOCAL_TIMER_VECTOR,smp_apic_timer_interrupt +END(apic_timer_interrupt) ENTRY(error_interrupt) apicinterrupt ERROR_APIC_VECTOR,smp_error_interrupt +END(error_interrupt) ENTRY(spurious_interrupt) apicinterrupt SPURIOUS_APIC_VECTOR,smp_spurious_interrupt +END(spurious_interrupt) #endif /* @@ -777,6 +794,7 @@ error_kernelspace: cmpq $gs_change,RIP(%rsp) je error_swapgs jmp error_sti +END(error_entry) /* Reload gs selector with exception handling */ /* edi: new selector */ @@ -794,6 +812,7 @@ gs_change: CFI_ADJUST_CFA_OFFSET -8 ret CFI_ENDPROC +ENDPROC(load_gs_index) .section __ex_table,"a" .align 8 @@ -847,7 +866,7 @@ ENTRY(kernel_thread) UNFAKE_STACK_FRAME ret CFI_ENDPROC - +ENDPROC(kernel_thread) child_rip: /* @@ -860,6 +879,7 @@ child_rip: # exit xorl %edi, %edi call do_exit +ENDPROC(child_rip) /* * execve(). This function needs to use IRET, not SYSRET, to set up all state properly. @@ -889,19 +909,24 @@ ENTRY(execve) UNFAKE_STACK_FRAME ret CFI_ENDPROC +ENDPROC(execve) KPROBE_ENTRY(page_fault) errorentry do_page_fault +END(page_fault) .previous .text ENTRY(coprocessor_error) zeroentry do_coprocessor_error +END(coprocessor_error) ENTRY(simd_coprocessor_error) zeroentry do_simd_coprocessor_error +END(simd_coprocessor_error) ENTRY(device_not_available) zeroentry math_state_restore +END(device_not_available) /* runs on exception stack */ KPROBE_ENTRY(debug) @@ -911,6 +936,7 @@ KPROBE_ENTRY(debug) paranoidentry do_debug, DEBUG_STACK jmp paranoid_exit CFI_ENDPROC +END(debug) .previous .text /* runs on exception stack */ @@ -961,6 +987,7 @@ paranoid_schedule: cli jmp paranoid_userspace CFI_ENDPROC +END(nmi) .previous .text KPROBE_ENTRY(int3) @@ -970,22 +997,28 @@ KPROBE_ENTRY(int3) paranoidentry do_int3, DEBUG_STACK jmp paranoid_exit CFI_ENDPROC +END(int3) .previous .text ENTRY(overflow) zeroentry do_overflow +END(overflow) ENTRY(bounds) zeroentry do_bounds +END(bounds) ENTRY(invalid_op) zeroentry do_invalid_op +END(invalid_op) ENTRY(coprocessor_segment_overrun) zeroentry do_coprocessor_segment_overrun +END(coprocessor_segment_overrun) ENTRY(reserved) zeroentry do_reserved +END(reserved) /* runs on exception stack */ ENTRY(double_fault) @@ -993,12 +1026,15 @@ ENTRY(double_fault) paranoidentry do_double_fault jmp paranoid_exit CFI_ENDPROC +END(double_fault) ENTRY(invalid_TSS) errorentry do_invalid_TSS +END(invalid_TSS) ENTRY(segment_not_present) errorentry do_segment_not_present +END(segment_not_present) /* runs on exception stack */ ENTRY(stack_segment) @@ -1006,19 +1042,24 @@ ENTRY(stack_segment) paranoidentry do_stack_segment jmp paranoid_exit CFI_ENDPROC +END(stack_segment) KPROBE_ENTRY(general_protection) errorentry do_general_protection +END(general_protection) .previous .text ENTRY(alignment_check) errorentry do_alignment_check +END(alignment_check) ENTRY(divide_error) zeroentry do_divide_error +END(divide_error) ENTRY(spurious_interrupt_bug) zeroentry do_spurious_interrupt_bug +END(spurious_interrupt_bug) #ifdef CONFIG_X86_MCE /* runs on exception stack */ @@ -1029,6 +1070,7 @@ ENTRY(machine_check) paranoidentry do_machine_check jmp paranoid_exit CFI_ENDPROC +END(machine_check) #endif ENTRY(call_softirq) @@ -1046,3 +1088,4 @@ ENTRY(call_softirq) decl %gs:pda_irqcount ret CFI_ENDPROC +ENDPROC(call_softirq) -- cgit v1.1 From 14118c3cdd46d72e503ee2f727b11d881f72f755 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 26 Jun 2006 13:56:58 +0200 Subject: [PATCH] x86_64: fix unlikely profiling & vsyscalls on x86_64 fix unlikely profiling in vsyscalls ... Signed-off-by: Ingo Molnar Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- arch/x86_64/kernel/vsyscall.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/x86_64/kernel/vsyscall.c b/arch/x86_64/kernel/vsyscall.c index 9468fb2..f603037 100644 --- a/arch/x86_64/kernel/vsyscall.c +++ b/arch/x86_64/kernel/vsyscall.c @@ -107,7 +107,7 @@ static __always_inline long time_syscall(long *t) int __vsyscall(0) vgettimeofday(struct timeval * tv, struct timezone * tz) { - if (unlikely(!__sysctl_vsyscall)) + if (!__sysctl_vsyscall) return gettimeofday(tv,tz); if (tv) do_vgettimeofday(tv); @@ -120,7 +120,7 @@ int __vsyscall(0) vgettimeofday(struct timeval * tv, struct timezone * tz) * unlikely */ time_t __vsyscall(1) vtime(time_t *t) { - if (unlikely(!__sysctl_vsyscall)) + if (!__sysctl_vsyscall) return time_syscall(t); else if (t) *t = __xtime.tv_sec; -- cgit v1.1 From 3e4ff115740c28dea463561aa1405a3c0de0d2d0 Mon Sep 17 00:00:00 2001 From: Don Zickus Date: Mon, 26 Jun 2006 13:57:01 +0200 Subject: [PATCH] x86_64: nmi watchdog header cleanup Misc header cleanup for nmi watchdog. Signed-off-by: Don Zickus Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- arch/i386/kernel/apic.c | 1 + arch/i386/kernel/io_apic.c | 1 + arch/i386/kernel/nmi.c | 7 +------ arch/i386/kernel/smpboot.c | 1 + arch/i386/oprofile/op_model_athlon.c | 1 + arch/i386/oprofile/op_model_p4.c | 1 + arch/i386/oprofile/op_model_ppro.c | 1 + arch/x86_64/kernel/io_apic.c | 1 + arch/x86_64/kernel/nmi.c | 8 -------- include/asm-i386/apic.h | 12 ------------ include/asm-i386/nmi.h | 28 +++++++++++++++++++++------- include/asm-x86_64/apic.h | 16 ---------------- include/asm-x86_64/nmi.h | 30 ++++++++++++++++++++++++------ 13 files changed, 53 insertions(+), 55 deletions(-) diff --git a/arch/i386/kernel/apic.c b/arch/i386/kernel/apic.c index 5ab59c1..5f197e1 100644 --- a/arch/i386/kernel/apic.c +++ b/arch/i386/kernel/apic.c @@ -36,6 +36,7 @@ #include #include #include +#include #include #include diff --git a/arch/i386/kernel/io_apic.c b/arch/i386/kernel/io_apic.c index 5a1a541..61317f4 100644 --- a/arch/i386/kernel/io_apic.c +++ b/arch/i386/kernel/io_apic.c @@ -38,6 +38,7 @@ #include #include #include +#include #include diff --git a/arch/i386/kernel/nmi.c b/arch/i386/kernel/nmi.c index d43b498..bd38754 100644 --- a/arch/i386/kernel/nmi.c +++ b/arch/i386/kernel/nmi.c @@ -14,20 +14,15 @@ */ #include -#include #include -#include -#include #include -#include -#include #include #include #include #include +#include #include -#include #include #include "mach_traps.h" diff --git a/arch/i386/kernel/smpboot.c b/arch/i386/kernel/smpboot.c index bd0ca5c..bce5470 100644 --- a/arch/i386/kernel/smpboot.c +++ b/arch/i386/kernel/smpboot.c @@ -52,6 +52,7 @@ #include #include #include +#include #include #include diff --git a/arch/i386/oprofile/op_model_athlon.c b/arch/i386/oprofile/op_model_athlon.c index 3ad9a72..693bdea 100644 --- a/arch/i386/oprofile/op_model_athlon.c +++ b/arch/i386/oprofile/op_model_athlon.c @@ -13,6 +13,7 @@ #include #include #include +#include #include "op_x86_model.h" #include "op_counter.h" diff --git a/arch/i386/oprofile/op_model_p4.c b/arch/i386/oprofile/op_model_p4.c index ac8a066..7c61d35 100644 --- a/arch/i386/oprofile/op_model_p4.c +++ b/arch/i386/oprofile/op_model_p4.c @@ -14,6 +14,7 @@ #include #include #include +#include #include "op_x86_model.h" #include "op_counter.h" diff --git a/arch/i386/oprofile/op_model_ppro.c b/arch/i386/oprofile/op_model_ppro.c index d719015..5c3ab4b 100644 --- a/arch/i386/oprofile/op_model_ppro.c +++ b/arch/i386/oprofile/op_model_ppro.c @@ -14,6 +14,7 @@ #include #include #include +#include #include "op_x86_model.h" #include "op_counter.h" diff --git a/arch/x86_64/kernel/io_apic.c b/arch/x86_64/kernel/io_apic.c index 15b2f69..38a3ff3 100644 --- a/arch/x86_64/kernel/io_apic.c +++ b/arch/x86_64/kernel/io_apic.c @@ -41,6 +41,7 @@ #include #include #include +#include #define __apicdebuginit __init diff --git a/arch/x86_64/kernel/nmi.c b/arch/x86_64/kernel/nmi.c index 4e6357f..ab421e2 100644 --- a/arch/x86_64/kernel/nmi.c +++ b/arch/x86_64/kernel/nmi.c @@ -15,11 +15,7 @@ #include #include #include -#include -#include #include -#include -#include #include #include #include @@ -27,13 +23,9 @@ #include #include -#include -#include #include -#include #include #include -#include #include /* diff --git a/include/asm-i386/apic.h b/include/asm-i386/apic.h index 1d8362c..2c1e371 100644 --- a/include/asm-i386/apic.h +++ b/include/asm-i386/apic.h @@ -111,24 +111,12 @@ extern void init_apic_mappings (void); extern void smp_local_timer_interrupt (struct pt_regs * regs); extern void setup_boot_APIC_clock (void); extern void setup_secondary_APIC_clock (void); -extern void setup_apic_nmi_watchdog (void); -extern int reserve_lapic_nmi(void); -extern void release_lapic_nmi(void); -extern void disable_timer_nmi_watchdog(void); -extern void enable_timer_nmi_watchdog(void); -extern void nmi_watchdog_tick (struct pt_regs * regs); extern int APIC_init_uniprocessor (void); extern void disable_APIC_timer(void); extern void enable_APIC_timer(void); extern void enable_NMI_through_LVT0 (void * dummy); -extern unsigned int nmi_watchdog; -#define NMI_NONE 0 -#define NMI_IO_APIC 1 -#define NMI_LOCAL_APIC 2 -#define NMI_INVALID 3 - extern int disable_timer_pin_1; void smp_send_timer_broadcast_ipi(struct pt_regs *regs); diff --git a/include/asm-i386/nmi.h b/include/asm-i386/nmi.h index 21f1663..67d9947 100644 --- a/include/asm-i386/nmi.h +++ b/include/asm-i386/nmi.h @@ -5,24 +5,38 @@ #define ASM_NMI_H #include - + struct pt_regs; - + typedef int (*nmi_callback_t)(struct pt_regs * regs, int cpu); - -/** + +/** * set_nmi_callback * * Set a handler for an NMI. Only one handler may be * set. Return 1 if the NMI was handled. */ void set_nmi_callback(nmi_callback_t callback); - -/** + +/** * unset_nmi_callback * * Remove the handler previously set. */ void unset_nmi_callback(void); - + +extern void setup_apic_nmi_watchdog (void); +extern int reserve_lapic_nmi(void); +extern void release_lapic_nmi(void); +extern void disable_timer_nmi_watchdog(void); +extern void enable_timer_nmi_watchdog(void); +extern void nmi_watchdog_tick (struct pt_regs * regs); + +extern unsigned int nmi_watchdog; +#define NMI_DEFAULT -1 +#define NMI_NONE 0 +#define NMI_IO_APIC 1 +#define NMI_LOCAL_APIC 2 +#define NMI_INVALID 3 + #endif /* ASM_NMI_H */ diff --git a/include/asm-x86_64/apic.h b/include/asm-x86_64/apic.h index a731be2..c9e6c25 100644 --- a/include/asm-x86_64/apic.h +++ b/include/asm-x86_64/apic.h @@ -79,27 +79,11 @@ extern void init_apic_mappings (void); extern void smp_local_timer_interrupt (struct pt_regs * regs); extern void setup_boot_APIC_clock (void); extern void setup_secondary_APIC_clock (void); -extern void setup_apic_nmi_watchdog (void); -extern int reserve_lapic_nmi(void); -extern void release_lapic_nmi(void); -extern void disable_timer_nmi_watchdog(void); -extern void enable_timer_nmi_watchdog(void); -extern void nmi_watchdog_tick (struct pt_regs * regs, unsigned reason); extern int APIC_init_uniprocessor (void); extern void disable_APIC_timer(void); extern void enable_APIC_timer(void); extern void clustered_apic_check(void); -extern void nmi_watchdog_default(void); -extern int setup_nmi_watchdog(char *); - -extern unsigned int nmi_watchdog; -#define NMI_DEFAULT -1 -#define NMI_NONE 0 -#define NMI_IO_APIC 1 -#define NMI_LOCAL_APIC 2 -#define NMI_INVALID 3 - extern int disable_timer_pin_1; extern void setup_threshold_lvt(unsigned long lvt_off); diff --git a/include/asm-x86_64/nmi.h b/include/asm-x86_64/nmi.h index d3abfc6..efb45c8 100644 --- a/include/asm-x86_64/nmi.h +++ b/include/asm-x86_64/nmi.h @@ -5,26 +5,27 @@ #define ASM_NMI_H #include +#include struct pt_regs; - + typedef int (*nmi_callback_t)(struct pt_regs * regs, int cpu); - -/** + +/** * set_nmi_callback * * Set a handler for an NMI. Only one handler may be * set. Return 1 if the NMI was handled. */ void set_nmi_callback(nmi_callback_t callback); - -/** + +/** * unset_nmi_callback * * Remove the handler previously set. */ void unset_nmi_callback(void); - + #ifdef CONFIG_PM /** Replace the PM callback routine for NMI. */ @@ -56,4 +57,21 @@ extern int unknown_nmi_panic; extern int check_nmi_watchdog(void); +extern void setup_apic_nmi_watchdog (void); +extern int reserve_lapic_nmi(void); +extern void release_lapic_nmi(void); +extern void disable_timer_nmi_watchdog(void); +extern void enable_timer_nmi_watchdog(void); +extern void nmi_watchdog_tick (struct pt_regs * regs, unsigned reason); + +extern void nmi_watchdog_default(void); +extern int setup_nmi_watchdog(char *); + +extern unsigned int nmi_watchdog; +#define NMI_DEFAULT -1 +#define NMI_NONE 0 +#define NMI_IO_APIC 1 +#define NMI_LOCAL_APIC 2 +#define NMI_INVALID 3 + #endif /* ASM_NMI_H */ -- cgit v1.1 From 5c0f80fab3724aa44b3352d88155fe0eaae0e54a Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Mon, 26 Jun 2006 13:57:04 +0200 Subject: [PATCH] x86_64: Remove long obsolete CVS Early development of x86-64 Linux was in CVS, but that hasn't been the case for a long time now. Remove the obsolete $Id$s. Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- arch/x86_64/ia32/fpu32.c | 1 - arch/x86_64/ia32/ia32_signal.c | 2 -- arch/x86_64/ia32/ptrace32.c | 2 -- arch/x86_64/ia32/sys_ia32.c | 8 -------- arch/x86_64/kernel/aperture.c | 1 - arch/x86_64/kernel/e820.c | 1 - arch/x86_64/kernel/head64.c | 2 -- arch/x86_64/kernel/process.c | 1 - arch/x86_64/kernel/setup.c | 2 -- arch/x86_64/kernel/setup64.c | 1 - arch/x86_64/kernel/signal.c | 2 -- arch/x86_64/kernel/traps.c | 2 -- include/asm-x86_64/dma.h | 2 +- include/asm-x86_64/hw_irq.h | 2 -- 14 files changed, 1 insertion(+), 28 deletions(-) diff --git a/arch/x86_64/ia32/fpu32.c b/arch/x86_64/ia32/fpu32.c index 1c23095..2c8209a 100644 --- a/arch/x86_64/ia32/fpu32.c +++ b/arch/x86_64/ia32/fpu32.c @@ -2,7 +2,6 @@ * Copyright 2002 Andi Kleen, SuSE Labs. * FXSAVE<->i387 conversion support. Based on code by Gareth Hughes. * This is used for ptrace, signals and coredumps in 32bit emulation. - * $Id: fpu32.c,v 1.1 2002/03/21 14:16:32 ak Exp $ */ #include diff --git a/arch/x86_64/ia32/ia32_signal.c b/arch/x86_64/ia32/ia32_signal.c index e0a9243..25e5ca2 100644 --- a/arch/x86_64/ia32/ia32_signal.c +++ b/arch/x86_64/ia32/ia32_signal.c @@ -6,8 +6,6 @@ * 1997-11-28 Modified for POSIX.1b signals by Richard Henderson * 2000-06-20 Pentium III FXSR, SSE support by Gareth Hughes * 2000-12-* x86-64 compatibility mode signal handling by Andi Kleen - * - * $Id: ia32_signal.c,v 1.22 2002/07/29 10:34:03 ak Exp $ */ #include diff --git a/arch/x86_64/ia32/ptrace32.c b/arch/x86_64/ia32/ptrace32.c index 4defe63..a590b7a 100644 --- a/arch/x86_64/ia32/ptrace32.c +++ b/arch/x86_64/ia32/ptrace32.c @@ -7,8 +7,6 @@ * * This allows to access 64bit processes too; but there is no way to see the extended * register contents. - * - * $Id: ptrace32.c,v 1.16 2003/03/14 16:06:35 ak Exp $ */ #include diff --git a/arch/x86_64/ia32/sys_ia32.c b/arch/x86_64/ia32/sys_ia32.c index ee30557..afbebde 100644 --- a/arch/x86_64/ia32/sys_ia32.c +++ b/arch/x86_64/ia32/sys_ia32.c @@ -929,13 +929,5 @@ long sys32_lookup_dcookie(u32 addr_low, u32 addr_high, return sys_lookup_dcookie(((u64)addr_high << 32) | addr_low, buf, len); } -static int __init ia32_init (void) -{ - printk("IA32 emulation $Id: sys_ia32.c,v 1.32 2002/03/24 13:02:28 ak Exp $\n"); - return 0; -} - -__initcall(ia32_init); - extern unsigned long ia32_sys_call_table[]; EXPORT_SYMBOL(ia32_sys_call_table); diff --git a/arch/x86_64/kernel/aperture.c b/arch/x86_64/kernel/aperture.c index a7ad03e..9ea723b 100644 --- a/arch/x86_64/kernel/aperture.c +++ b/arch/x86_64/kernel/aperture.c @@ -8,7 +8,6 @@ * because only the bootmem allocator can allocate 32+MB. * * Copyright 2002 Andi Kleen, SuSE Labs. - * $Id: aperture.c,v 1.7 2003/08/01 03:36:18 ak Exp $ */ #include #include diff --git a/arch/x86_64/kernel/e820.c b/arch/x86_64/kernel/e820.c index 1ef6028f..d8ea250 100644 --- a/arch/x86_64/kernel/e820.c +++ b/arch/x86_64/kernel/e820.c @@ -1,7 +1,6 @@ /* * Handle the memory map. * The functions here do the job until bootmem takes over. - * $Id: e820.c,v 1.4 2002/09/19 19:25:32 ak Exp $ * * Getting sanitize_e820_map() in sync with i386 version by applying change: * - Provisions for empty E820 memory regions (reported by certain BIOSes). diff --git a/arch/x86_64/kernel/head64.c b/arch/x86_64/kernel/head64.c index cea20a6..e6a71c9 100644 --- a/arch/x86_64/kernel/head64.c +++ b/arch/x86_64/kernel/head64.c @@ -2,8 +2,6 @@ * linux/arch/x86_64/kernel/head64.c -- prepare to run common code * * Copyright (C) 2000 Andrea Arcangeli SuSE - * - * $Id: head64.c,v 1.22 2001/07/06 14:28:20 ak Exp $ */ #include diff --git a/arch/x86_64/kernel/process.c b/arch/x86_64/kernel/process.c index fb903e6..52c03f6 100644 --- a/arch/x86_64/kernel/process.c +++ b/arch/x86_64/kernel/process.c @@ -10,7 +10,6 @@ * Andi Kleen. * * CPU hotplug support - ashok.raj@intel.com - * $Id: process.c,v 1.38 2002/01/15 10:08:03 ak Exp $ */ /* diff --git a/arch/x86_64/kernel/setup.c b/arch/x86_64/kernel/setup.c index 64640c8..a91f1d9 100644 --- a/arch/x86_64/kernel/setup.c +++ b/arch/x86_64/kernel/setup.c @@ -5,8 +5,6 @@ * * Nov 2001 Dave Jones * Forked from i386 setup code. - * - * $Id$ */ /* diff --git a/arch/x86_64/kernel/setup64.c b/arch/x86_64/kernel/setup64.c index 8a691fa..e5bf22a 100644 --- a/arch/x86_64/kernel/setup64.c +++ b/arch/x86_64/kernel/setup64.c @@ -3,7 +3,6 @@ * Copyright (C) 1995 Linus Torvalds * Copyright 2001, 2002, 2003 SuSE Labs / Andi Kleen. * See setup.c for older changelog. - * $Id: setup64.c,v 1.12 2002/03/21 10:09:17 ak Exp $ */ #include #include diff --git a/arch/x86_64/kernel/signal.c b/arch/x86_64/kernel/signal.c index e5f5ce79..94b4b11 100644 --- a/arch/x86_64/kernel/signal.c +++ b/arch/x86_64/kernel/signal.c @@ -7,8 +7,6 @@ * 1997-11-28 Modified for POSIX.1b signals by Richard Henderson * 2000-06-20 Pentium III FXSR, SSE support by Gareth Hughes * 2000-2002 x86-64 support by Andi Kleen - * - * $Id: signal.c,v 1.18 2001/10/17 22:30:37 ak Exp $ */ #include diff --git a/arch/x86_64/kernel/traps.c b/arch/x86_64/kernel/traps.c index cea335e..9f8f1ef 100644 --- a/arch/x86_64/kernel/traps.c +++ b/arch/x86_64/kernel/traps.c @@ -6,8 +6,6 @@ * * Pentium III FXSR, SSE support * Gareth Hughes , May 2000 - * - * $Id: traps.c,v 1.36 2002/03/24 11:09:10 ak Exp $ */ /* diff --git a/include/asm-x86_64/dma.h b/include/asm-x86_64/dma.h index c556208..a37c16f 100644 --- a/include/asm-x86_64/dma.h +++ b/include/asm-x86_64/dma.h @@ -1,4 +1,4 @@ -/* $Id: dma.h,v 1.1.1.1 2001/04/19 20:00:38 ak Exp $ +/* * linux/include/asm/dma.h: Defines for using and allocating dma channels. * Written by Hennus Bergman, 1992. * High DMA channel support & info by Hannu Savolainen diff --git a/include/asm-x86_64/hw_irq.h b/include/asm-x86_64/hw_irq.h index 3de96fd..1b2ac55 100644 --- a/include/asm-x86_64/hw_irq.h +++ b/include/asm-x86_64/hw_irq.h @@ -12,8 +12,6 @@ * * * hacked by Andi Kleen for x86-64. - * - * $Id: hw_irq.h,v 1.24 2001/09/14 20:55:03 vojtech Exp $ */ #ifndef __ASSEMBLY__ -- cgit v1.1 From 5282aab87a8be3f5e6c36a4c2ee4b71852def1bb Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Mon, 26 Jun 2006 13:57:07 +0200 Subject: [PATCH] x86_64: Remove ia32_sys_call_table export It was originally added for 2.4 oprofile, but 2.6 oprofile doesn't need that anymore. Shouldn't be any use in tree anymore and it doesn't make much sense to export the ia32 syscalls when the main syscalls are not exported. I think Adrian Bunk asked for removing it several times. Also included hunk from Adrian to remove the .globl ia32_sys_call_table Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- arch/x86_64/ia32/ia32entry.S | 1 - arch/x86_64/ia32/sys_ia32.c | 2 -- 2 files changed, 3 deletions(-) diff --git a/arch/x86_64/ia32/ia32entry.S b/arch/x86_64/ia32/ia32entry.S index 8b502eb..d073ca3 100644 --- a/arch/x86_64/ia32/ia32entry.S +++ b/arch/x86_64/ia32/ia32entry.S @@ -377,7 +377,6 @@ END(ia32_ptregs_common) .section .rodata,"a" .align 8 - .globl ia32_sys_call_table ia32_sys_call_table: .quad sys_restart_syscall .quad sys_exit diff --git a/arch/x86_64/ia32/sys_ia32.c b/arch/x86_64/ia32/sys_ia32.c index afbebde..a1adaa4 100644 --- a/arch/x86_64/ia32/sys_ia32.c +++ b/arch/x86_64/ia32/sys_ia32.c @@ -929,5 +929,3 @@ long sys32_lookup_dcookie(u32 addr_low, u32 addr_high, return sys_lookup_dcookie(((u64)addr_high << 32) | addr_low, buf, len); } -extern unsigned long ia32_sys_call_table[]; -EXPORT_SYMBOL(ia32_sys_call_table); -- cgit v1.1 From 08cd36570e47176c7b6bd3e80125aa46c4638097 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Mon, 26 Jun 2006 13:57:10 +0200 Subject: [PATCH] x86_64: Optimize bitmap_weight for small bitmaps Use inline code bitmaps <= BITS_PER_LONG in bitmap_weight. This gives _much_ better code. Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- include/linux/bitmap.h | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h index d9ed279..dcc5de7 100644 --- a/include/linux/bitmap.h +++ b/include/linux/bitmap.h @@ -24,6 +24,9 @@ * The available bitmap operations and their rough meaning in the * case that the bitmap is a single unsigned long are thus: * + * Note that nbits should be always a compile time evaluable constant. + * Otherwise many inlines will generate horrible code. + * * bitmap_zero(dst, nbits) *dst = 0UL * bitmap_fill(dst, nbits) *dst = ~0UL * bitmap_copy(dst, src, nbits) *dst = *src @@ -244,6 +247,8 @@ static inline int bitmap_full(const unsigned long *src, int nbits) static inline int bitmap_weight(const unsigned long *src, int nbits) { + if (nbits <= BITS_PER_LONG) + return hweight_long(*src & BITMAP_LAST_WORD_MASK(nbits)); return __bitmap_weight(src, nbits); } -- cgit v1.1 From 357c2b9056df447390b7df3e49960a4c609a89a9 Mon Sep 17 00:00:00 2001 From: Jon Mason Date: Mon, 26 Jun 2006 13:57:13 +0200 Subject: [PATCH] x86_64: remove unused gart header file include/asm-x86_64/gart-mapping.h is only ever used in arch/x86_64/kernel/setup.c and none of its contents are referenced. Looks to be leftover cruft not removed in the dma_ops patch. Signed-off-by: Jon Mason Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- arch/x86_64/kernel/setup.c | 1 - include/asm-x86_64/gart-mapping.h | 16 ---------------- 2 files changed, 17 deletions(-) delete mode 100644 include/asm-x86_64/gart-mapping.h diff --git a/arch/x86_64/kernel/setup.c b/arch/x86_64/kernel/setup.c index a91f1d9..9dadb9a 100644 --- a/arch/x86_64/kernel/setup.c +++ b/arch/x86_64/kernel/setup.c @@ -65,7 +65,6 @@ #include #include #include -#include #include /* diff --git a/include/asm-x86_64/gart-mapping.h b/include/asm-x86_64/gart-mapping.h deleted file mode 100644 index ada497b..0000000 --- a/include/asm-x86_64/gart-mapping.h +++ /dev/null @@ -1,16 +0,0 @@ -#ifndef _X8664_GART_MAPPING_H -#define _X8664_GART_MAPPING_H 1 - -#include -#include - -struct device; - -extern void* -gart_alloc_coherent(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t gfp); - -extern int -gart_dma_supported(struct device *hwdev, u64 mask); - -#endif /* _X8664_GART_MAPPING_H */ -- cgit v1.1 From 26a3c49cec96ffb9cfcc30dfa0cd05ccc25dcb3a Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 26 Jun 2006 13:57:16 +0200 Subject: [PATCH] x86_64: fix vector_lock deadlock in io_apic.c Fix a potential deadlock scenario introduced by io_apic.c's new vector_lock on i386 and x86_64. Found by the locking correctness validator. The patch was boot-tested on x86. For details of the deadlock scenario, see the validator output: ====================================================== [ BUG: hard-safe -> hard-unsafe lock order detected! ] ------------------------------------------------------ idle/1 [HC0[0]:SC0[0]:HE0:SE1] is trying to acquire: (msi_lock){....}, at: [] startup_msi_irq_wo_maskbit+0x10/0x35 and this task is already holding: (&irq_desc[i].lock){++..}, at: [] probe_irq_on+0x36/0x107 which would create a new lock dependency: (&irq_desc[i].lock){++..} -> (msi_lock){....} but this new dependency connects a hard-irq-safe lock: (&irq_desc[i].lock){++..} ... which became hard-irq-safe at: [] lockdep_acquire+0x68/0x84 [] _spin_lock+0x21/0x2f [] __do_IRQ+0x3d/0x113 [] do_IRQ+0x8c/0xad to a hard-irq-unsafe lock: (vector_lock){--..} ... which became hard-irq-unsafe at: ... [] lockdep_acquire+0x68/0x84 [] _spin_lock+0x21/0x2f [] assign_irq_vector+0x34/0xc8 [] setup_IO_APIC+0x45a/0xcff [] smp_prepare_cpus+0x5ea/0x8aa [] init+0x32/0x2cb [] kernel_thread_helper+0x5/0xb which could potentially lead to deadlocks! other info that might help us debug this: 3 locks held by idle/1: #0: (port_mutex){--..}, at: [] uart_add_one_port+0x61/0x289 #1: (&state->mutex){--..}, at: [] uart_add_one_port+0x73/0x289 #2: (&irq_desc[i].lock){++..}, at: [] probe_irq_on+0x36/0x107 the hard-irq-safe lock's dependencies: -> (&irq_desc[i].lock){++..} ops: 9861 { initial-use at: [] lockdep_acquire+0x68/0x84 [] _spin_lock_irqsave+0x2a/0x3a [] setup_irq+0x9b/0x14d [] time_init_hook+0xf/0x11 [] time_init+0x44/0x46 [] start_kernel+0x191/0x38f [] 0xc0100210 in-hardirq-W at: [] lockdep_acquire+0x68/0x84 [] _spin_lock+0x21/0x2f [] __do_IRQ+0x3d/0x113 [] do_IRQ+0x8c/0xad in-softirq-W at: [] lockdep_acquire+0x68/0x84 [] _spin_lock+0x21/0x2f [] __do_IRQ+0x3d/0x113 [] do_IRQ+0x8c/0xad } ... key at: [] irq_desc_lock_type+0x0/0x20 -> (i8259A_lock){++..} ops: 5149 { initial-use at: [] lockdep_acquire+0x68/0x84 [] _spin_lock_irqsave+0x2a/0x3a [] init_8259A+0x11/0x8f [] init_ISA_irqs+0x12/0x4d [] pre_intr_init_hook+0x8/0xa [] init_IRQ+0xe/0x65 [] start_kernel+0x178/0x38f [] 0xc0100210 in-hardirq-W at: [] lockdep_acquire+0x68/0x84 [] _spin_lock_irqsave+0x2a/0x3a [] mask_and_ack_8259A+0x1b/0xcc [] __do_IRQ+0x4f/0x113 [] do_IRQ+0x8c/0xad in-softirq-W at: [] lockdep_acquire+0x68/0x84 [] _spin_lock_irqsave+0x2a/0x3a [] mask_and_ack_8259A+0x1b/0xcc [] __do_IRQ+0x4f/0x113 [] do_IRQ+0x8c/0xad } ... key at: [] i8259A_lock+0x14/0x40 ... acquired at: [] lockdep_acquire+0x68/0x84 [] _spin_lock_irqsave+0x2a/0x3a [] enable_8259A_irq+0x10/0x47 [] startup_8259A_irq+0x8/0xc [] setup_irq+0xe4/0x14d [] time_init_hook+0xf/0x11 [] time_init+0x44/0x46 [] start_kernel+0x191/0x38f [] 0xc0100210 -> (ioapic_lock){+...} ops: 122 { initial-use at: [] lockdep_acquire+0x68/0x84 [] _spin_lock_irqsave+0x2a/0x3a [] io_apic_get_version+0x16/0x55 [] mp_register_ioapic+0xc6/0x127 [] acpi_parse_ioapic+0x2d/0x39 [] acpi_table_parse_madt_family+0xb4/0x100 [] acpi_table_parse_madt+0x16/0x18 [] acpi_boot_init+0x132/0x251 [] setup_arch+0xd36/0xe37 [] start_kernel+0x66/0x38f [] 0xc0100210 in-hardirq-W at: [] lockdep_acquire+0x68/0x84 [] _spin_lock_irqsave+0x2a/0x3a [] mask_IO_APIC_irq+0x11/0x31 [] ack_edge_ioapic_vector+0x31/0x41 [] __do_IRQ+0x4f/0x113 [] do_IRQ+0x8c/0xad } ... key at: [] ioapic_lock+0x14/0x3c -> (i8259A_lock){++..} ops: 5149 { initial-use at: [] lockdep_acquire+0x68/0x84 [] _spin_lock_irqsave+0x2a/0x3a [] init_8259A+0x11/0x8f [] init_ISA_irqs+0x12/0x4d [] pre_intr_init_hook+0x8/0xa [] init_IRQ+0xe/0x65 [] start_kernel+0x178/0x38f [] 0xc0100210 in-hardirq-W at: [] lockdep_acquire+0x68/0x84 [] _spin_lock_irqsave+0x2a/0x3a [] mask_and_ack_8259A+0x1b/0xcc [] __do_IRQ+0x4f/0x113 [] do_IRQ+0x8c/0xad in-softirq-W at: [] lockdep_acquire+0x68/0x84 [] _spin_lock_irqsave+0x2a/0x3a [] mask_and_ack_8259A+0x1b/0xcc [] __do_IRQ+0x4f/0x113 [] do_IRQ+0x8c/0xad } ... key at: [] i8259A_lock+0x14/0x40 ... acquired at: [] lockdep_acquire+0x68/0x84 [] _spin_lock_irqsave+0x2a/0x3a [] disable_8259A_irq+0x10/0x47 [] startup_edge_ioapic_vector+0x31/0x58 [] setup_irq+0xe4/0x14d [] request_irq+0xda/0xf9 [] rtc_init+0x6a/0x1a7 [] init+0x14a/0x2cb [] kernel_thread_helper+0x5/0xb ... acquired at: [] lockdep_acquire+0x68/0x84 [] _spin_lock_irqsave+0x2a/0x3a [] mask_IO_APIC_irq+0x11/0x31 [] ack_edge_ioapic_vector+0x31/0x41 [] __do_IRQ+0x4f/0x113 [] do_IRQ+0x8c/0xad the hard-irq-unsafe lock's dependencies: -> (vector_lock){--..} ops: 31 { initial-use at: [] lockdep_acquire+0x68/0x84 [] _spin_lock+0x21/0x2f [] assign_irq_vector+0x34/0xc8 [] setup_IO_APIC+0x45a/0xcff [] smp_prepare_cpus+0x5ea/0x8aa [] init+0x32/0x2cb [] kernel_thread_helper+0x5/0xb softirq-on-W at: [] lockdep_acquire+0x68/0x84 [] _spin_lock+0x21/0x2f [] assign_irq_vector+0x34/0xc8 [] setup_IO_APIC+0x45a/0xcff [] smp_prepare_cpus+0x5ea/0x8aa [] init+0x32/0x2cb [] kernel_thread_helper+0x5/0xb hardirq-on-W at: [] lockdep_acquire+0x68/0x84 [] _spin_lock+0x21/0x2f [] assign_irq_vector+0x34/0xc8 [] setup_IO_APIC+0x45a/0xcff [] smp_prepare_cpus+0x5ea/0x8aa [] init+0x32/0x2cb [] kernel_thread_helper+0x5/0xb } ... key at: [] vector_lock+0x14/0x3c stack backtrace: [] show_trace+0xd/0xf [] dump_stack+0x17/0x19 [] check_usage+0x1f6/0x203 [] __lockdep_acquire+0x8c2/0xaa5 [] lockdep_acquire+0x68/0x84 [] _spin_lock_irqsave+0x2a/0x3a [] startup_msi_irq_wo_maskbit+0x10/0x35 [] probe_irq_on+0x44/0x107 [] serial8250_config_port+0x84b/0x986 [] uart_add_one_port+0x105/0x289 [] serial8250_init+0xc3/0x10a [] init+0x14a/0x2cb [] kernel_thread_helper+0x5/0xb Signed-off-by: Ingo Molnar Cc: Jan Beulich Cc: Andi Kleen Signed-off-by: Andrew Morton Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- arch/i386/kernel/io_apic.c | 9 +++++---- arch/x86_64/kernel/io_apic.c | 7 ++++--- 2 files changed, 9 insertions(+), 7 deletions(-) diff --git a/arch/i386/kernel/io_apic.c b/arch/i386/kernel/io_apic.c index 61317f4..72ae414 100644 --- a/arch/i386/kernel/io_apic.c +++ b/arch/i386/kernel/io_apic.c @@ -1163,14 +1163,15 @@ u8 irq_vector[NR_IRQ_VECTORS] __read_mostly = { FIRST_DEVICE_VECTOR , 0 }; int assign_irq_vector(int irq) { static int current_vector = FIRST_DEVICE_VECTOR, offset = 0; + unsigned long flags; int vector; BUG_ON(irq != AUTO_ASSIGN && (unsigned)irq >= NR_IRQ_VECTORS); - spin_lock(&vector_lock); + spin_lock_irqsave(&vector_lock, flags); if (irq != AUTO_ASSIGN && IO_APIC_VECTOR(irq) > 0) { - spin_unlock(&vector_lock); + spin_unlock_irqrestore(&vector_lock, flags); return IO_APIC_VECTOR(irq); } next: @@ -1181,7 +1182,7 @@ next: if (current_vector >= FIRST_SYSTEM_VECTOR) { offset++; if (!(offset%8)) { - spin_unlock(&vector_lock); + spin_unlock_irqrestore(&vector_lock, flags); return -ENOSPC; } current_vector = FIRST_DEVICE_VECTOR + offset; @@ -1192,7 +1193,7 @@ next: if (irq != AUTO_ASSIGN) IO_APIC_VECTOR(irq) = vector; - spin_unlock(&vector_lock); + spin_unlock_irqrestore(&vector_lock, flags); return vector; } diff --git a/arch/x86_64/kernel/io_apic.c b/arch/x86_64/kernel/io_apic.c index 38a3ff3..519cd4e 100644 --- a/arch/x86_64/kernel/io_apic.c +++ b/arch/x86_64/kernel/io_apic.c @@ -836,14 +836,15 @@ u8 irq_vector[NR_IRQ_VECTORS] __read_mostly = { FIRST_DEVICE_VECTOR , 0 }; int assign_irq_vector(int irq) { static int current_vector = FIRST_DEVICE_VECTOR, offset = 0; + unsigned long flags; int vector; BUG_ON(irq != AUTO_ASSIGN && (unsigned)irq >= NR_IRQ_VECTORS); - spin_lock(&vector_lock); + spin_lock_irqsave(&vector_lock, flags); if (irq != AUTO_ASSIGN && IO_APIC_VECTOR(irq) > 0) { - spin_unlock(&vector_lock); + spin_unlock_irqrestore(&vector_lock, flags); return IO_APIC_VECTOR(irq); } next: @@ -862,7 +863,7 @@ next: if (irq != AUTO_ASSIGN) IO_APIC_VECTOR(irq) = vector; - spin_unlock(&vector_lock); + spin_unlock_irqrestore(&vector_lock, flags); return vector; } -- cgit v1.1 From 4d9bc79cd28b779610d9590b3a96a28a0f64a25a Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Mon, 26 Jun 2006 13:57:19 +0200 Subject: [PATCH] x86_64: Make sure is_compat_task works early Previously it would only work in the first 32bit system call, not during early process setup. Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- arch/x86_64/kernel/process.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/arch/x86_64/kernel/process.c b/arch/x86_64/kernel/process.c index 52c03f6..dcb77b4 100644 --- a/arch/x86_64/kernel/process.c +++ b/arch/x86_64/kernel/process.c @@ -364,8 +364,11 @@ void flush_thread(void) struct task_struct *tsk = current; struct thread_info *t = current_thread_info(); - if (t->flags & _TIF_ABI_PENDING) + if (t->flags & _TIF_ABI_PENDING) { t->flags ^= (_TIF_ABI_PENDING | _TIF_IA32); + if (t->flags & _TIF_IA32) + current_thread_info()->status |= TS_COMPAT; + } tsk->thread.debugreg0 = 0; tsk->thread.debugreg1 = 0; -- cgit v1.1 From a813ce432f27c4f5011c7b5ac9d2bbbfeb41d9a7 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Mon, 26 Jun 2006 13:57:22 +0200 Subject: [PATCH] x86_64: Rename IOMMU option, fix help and mark option embedded. - Rename the GART_IOMMU option to IOMMU to make clear it's not just for AMD - Rewrite the help text to better emphatise this fact - Make it an embedded option because too many people get it wrong. To my astonishment I discovered the aacraid driver tests this symbol directly. This looks quite broken to me - it's an internal implementation detail of the PCI DMA API. Can the maintainer please clarify what this test was intended to do? Cc: linux-scsi@vger.kernel.org Cc: alan@redhat.com Cc: markh@osdl.org Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- arch/x86_64/Kconfig | 30 ++++++++++++++++-------------- arch/x86_64/Kconfig.debug | 2 +- arch/x86_64/kernel/Makefile | 2 +- arch/x86_64/kernel/io_apic.c | 2 +- arch/x86_64/kernel/pci-dma.c | 2 +- arch/x86_64/kernel/setup.c | 2 +- drivers/char/agp/Kconfig | 4 ++-- drivers/char/agp/amd64-agp.c | 4 ++-- drivers/scsi/aacraid/comminit.c | 5 ++++- include/asm-x86_64/pci.h | 2 +- include/asm-x86_64/proto.h | 2 +- 11 files changed, 31 insertions(+), 26 deletions(-) diff --git a/arch/x86_64/Kconfig b/arch/x86_64/Kconfig index fc75275..e8c52a1 100644 --- a/arch/x86_64/Kconfig +++ b/arch/x86_64/Kconfig @@ -386,24 +386,26 @@ config HPET_EMULATE_RTC bool "Provide RTC interrupt" depends on HPET_TIMER && RTC=y -config GART_IOMMU - bool "K8 GART IOMMU support" +# Mark as embedded because too many people got it wrong. +# The code disables itself when not needed. +config IOMMU + bool "IOMMU support" if EMBEDDED default y select SWIOTLB select AGP depends on PCI help - Support for hardware IOMMU in AMD's Opteron/Athlon64 Processors - and for the bounce buffering software IOMMU. - Needed to run systems with more than 3GB of memory properly with - 32-bit PCI devices that do not support DAC (Double Address Cycle). - The IOMMU can be turned off at runtime with the iommu=off parameter. - Normally the kernel will take the right choice by itself. - This option includes a driver for the AMD Opteron/Athlon64 IOMMU - northbridge and a software emulation used on other systems without - hardware IOMMU. If unsure, say Y. - -# need this always selected by GART_IOMMU for the VIA workaround + Support for full DMA access of devices with 32bit memory access only + on systems with more than 3GB. This is usually needed for USB, + sound, many IDE/SATA chipsets and some other devices. + Provides a driver for the AMD Athlon64/Opteron/Turion/Sempron GART + based IOMMU and a software bounce buffer based IOMMU used on Intel + systems and as fallback. + The code is only active when needed (enough memory and limited + device) unless CONFIG_IOMMU_DEBUG or iommu=force is specified + too. + +# need this always selected by IOMMU for the VIA workaround config SWIOTLB bool @@ -503,7 +505,7 @@ config REORDER config K8_NB def_bool y - depends on AGP_AMD64 || GART_IOMMU || (PCI && NUMA) + depends on AGP_AMD64 || IOMMU || (PCI && NUMA) endmenu diff --git a/arch/x86_64/Kconfig.debug b/arch/x86_64/Kconfig.debug index ea31b4c..0226f08 100644 --- a/arch/x86_64/Kconfig.debug +++ b/arch/x86_64/Kconfig.debug @@ -13,7 +13,7 @@ config DEBUG_RODATA If in doubt, say "N". config IOMMU_DEBUG - depends on GART_IOMMU && DEBUG_KERNEL + depends on IOMMU && DEBUG_KERNEL bool "Enable IOMMU debugging" help Force the IOMMU to on even when you have less than 4GB of diff --git a/arch/x86_64/kernel/Makefile b/arch/x86_64/kernel/Makefile index f927d11..fd106bd 100644 --- a/arch/x86_64/kernel/Makefile +++ b/arch/x86_64/kernel/Makefile @@ -28,7 +28,7 @@ obj-$(CONFIG_PM) += suspend.o obj-$(CONFIG_SOFTWARE_SUSPEND) += suspend_asm.o obj-$(CONFIG_CPU_FREQ) += cpufreq/ obj-$(CONFIG_EARLY_PRINTK) += early_printk.o -obj-$(CONFIG_GART_IOMMU) += pci-gart.o aperture.o +obj-$(CONFIG_IOMMU) += pci-gart.o aperture.o obj-$(CONFIG_SWIOTLB) += pci-swiotlb.o obj-$(CONFIG_KPROBES) += kprobes.o obj-$(CONFIG_X86_PM_TIMER) += pmtimer.o diff --git a/arch/x86_64/kernel/io_apic.c b/arch/x86_64/kernel/io_apic.c index 519cd4e..c768d8a 100644 --- a/arch/x86_64/kernel/io_apic.c +++ b/arch/x86_64/kernel/io_apic.c @@ -319,7 +319,7 @@ void __init check_ioapic(void) vendor &= 0xffff; switch (vendor) { case PCI_VENDOR_ID_VIA: -#ifdef CONFIG_GART_IOMMU +#ifdef CONFIG_IOMMU if ((end_pfn > MAX_DMA32_PFN || force_iommu) && !iommu_aperture_allowed) { diff --git a/arch/x86_64/kernel/pci-dma.c b/arch/x86_64/kernel/pci-dma.c index 117bceb..33926c3 100644 --- a/arch/x86_64/kernel/pci-dma.c +++ b/arch/x86_64/kernel/pci-dma.c @@ -266,7 +266,7 @@ __init int iommu_setup(char *p) swiotlb = 1; #endif -#ifdef CONFIG_GART_IOMMU +#ifdef CONFIG_IOMMU gart_parse_options(p); #endif diff --git a/arch/x86_64/kernel/setup.c b/arch/x86_64/kernel/setup.c index 9dadb9a..816203d 100644 --- a/arch/x86_64/kernel/setup.c +++ b/arch/x86_64/kernel/setup.c @@ -702,7 +702,7 @@ void __init setup_arch(char **cmdline_p) e820_setup_gap(); -#ifdef CONFIG_GART_IOMMU +#ifdef CONFIG_IOMMU iommu_hole_init(); #endif diff --git a/drivers/char/agp/Kconfig b/drivers/char/agp/Kconfig index 46685a5..9826a39 100644 --- a/drivers/char/agp/Kconfig +++ b/drivers/char/agp/Kconfig @@ -55,9 +55,9 @@ config AGP_AMD X on AMD Irongate, 761, and 762 chipsets. config AGP_AMD64 - tristate "AMD Opteron/Athlon64 on-CPU GART support" if !GART_IOMMU + tristate "AMD Opteron/Athlon64 on-CPU GART support" if !IOMMU depends on AGP && X86 - default y if GART_IOMMU + default y if IOMMU help This option gives you AGP support for the GLX component of X using the on-CPU northbridge of the AMD Athlon64/Opteron CPUs. diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c index 229d015..f690ee8 100644 --- a/drivers/char/agp/amd64-agp.c +++ b/drivers/char/agp/amd64-agp.c @@ -292,7 +292,7 @@ static int __devinit aperture_valid(u64 aper, u32 size) /* * W*s centric BIOS sometimes only set up the aperture in the AGP * bridge, not the northbridge. On AMD64 this is handled early - * in aperture.c, but when GART_IOMMU is not enabled or we run + * in aperture.c, but when IOMMU is not enabled or we run * on a 32bit kernel this needs to be redone. * Unfortunately it is impossible to fix the aperture here because it's too late * to allocate that much memory. But at least error out cleanly instead of @@ -775,7 +775,7 @@ static void __exit agp_amd64_cleanup(void) /* On AMD64 the PCI driver needs to initialize this driver early for the IOMMU, so it has to be called via a backdoor. */ -#ifndef CONFIG_GART_IOMMU +#ifndef CONFIG_IOMMU module_init(agp_amd64_init); module_exit(agp_amd64_cleanup); #endif diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c index 35b0a6e..7cea514 100644 --- a/drivers/scsi/aacraid/comminit.c +++ b/drivers/scsi/aacraid/comminit.c @@ -104,8 +104,11 @@ static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long co * always true on real computers. It also has some slight problems * with the GART on x86-64. I've btw never tried DMA from PCI space * on this platform but don't be surprised if its problematic. + * [AK: something is very very wrong when a driver tests this symbol. + * Someone should figure out what the comment writer really meant here and fix + * the code. Or just remove that bad code. ] */ -#ifndef CONFIG_GART_IOMMU +#ifndef CONFIG_IOMMU if ((num_physpages << (PAGE_SHIFT - 12)) <= AAC_MAX_HOSTPHYSMEMPAGES) { init->HostPhysMemPages = cpu_to_le32(num_physpages << (PAGE_SHIFT-12)); diff --git a/include/asm-x86_64/pci.h b/include/asm-x86_64/pci.h index 2db0620..3374d34 100644 --- a/include/asm-x86_64/pci.h +++ b/include/asm-x86_64/pci.h @@ -52,7 +52,7 @@ extern int iommu_setup(char *opt); */ #define PCI_DMA_BUS_IS_PHYS (dma_ops->is_phys) -#ifdef CONFIG_GART_IOMMU +#ifdef CONFIG_IOMMU /* * x86-64 always supports DAC, but sometimes it is useful to force diff --git a/include/asm-x86_64/proto.h b/include/asm-x86_64/proto.h index 8abf2a4..1064533 100644 --- a/include/asm-x86_64/proto.h +++ b/include/asm-x86_64/proto.h @@ -116,7 +116,7 @@ extern int skip_ioapic_setup; extern int acpi_ht; extern int acpi_disabled; -#ifdef CONFIG_GART_IOMMU +#ifdef CONFIG_IOMMU extern int fallback_aper_order; extern int fallback_aper_force; extern int iommu_aperture; -- cgit v1.1 From 2b28592b07223d7fc0691ce3fe57d495dc9cbe3a Mon Sep 17 00:00:00 2001 From: "bibo,mao" Date: Mon, 26 Jun 2006 13:57:25 +0200 Subject: [PATCH] x86_64: x86_86 msi miss one entry handler In x86_64 architecture, if device driver with msi function gets 0xee vector by assign_irq_vector() function, system will crash if this interrupt happens. It is because 0xee interrupt entry is empty. This patch modifies this. This patch is based on 2.6.17-rc6. Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- arch/x86_64/kernel/i8259.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/arch/x86_64/kernel/i8259.c b/arch/x86_64/kernel/i8259.c index 5ecd34a..9b1a4e1 100644 --- a/arch/x86_64/kernel/i8259.c +++ b/arch/x86_64/kernel/i8259.c @@ -44,11 +44,11 @@ BI(x,8) BI(x,9) BI(x,a) BI(x,b) \ BI(x,c) BI(x,d) BI(x,e) BI(x,f) -#define BUILD_14_IRQS(x) \ +#define BUILD_15_IRQS(x) \ BI(x,0) BI(x,1) BI(x,2) BI(x,3) \ BI(x,4) BI(x,5) BI(x,6) BI(x,7) \ BI(x,8) BI(x,9) BI(x,a) BI(x,b) \ - BI(x,c) BI(x,d) + BI(x,c) BI(x,d) BI(x,e) /* * ISA PIC or low IO-APIC triggered (INTA-cycle or APIC) interrupts: @@ -73,13 +73,13 @@ BUILD_16_IRQS(0x8) BUILD_16_IRQS(0x9) BUILD_16_IRQS(0xa) BUILD_16_IRQS(0xb) BUILD_16_IRQS(0xc) BUILD_16_IRQS(0xd) #ifdef CONFIG_PCI_MSI - BUILD_14_IRQS(0xe) + BUILD_15_IRQS(0xe) #endif #endif #undef BUILD_16_IRQS -#undef BUILD_14_IRQS +#undef BUILD_15_IRQS #undef BI @@ -92,11 +92,11 @@ BUILD_16_IRQS(0xc) BUILD_16_IRQS(0xd) IRQ(x,8), IRQ(x,9), IRQ(x,a), IRQ(x,b), \ IRQ(x,c), IRQ(x,d), IRQ(x,e), IRQ(x,f) -#define IRQLIST_14(x) \ +#define IRQLIST_15(x) \ IRQ(x,0), IRQ(x,1), IRQ(x,2), IRQ(x,3), \ IRQ(x,4), IRQ(x,5), IRQ(x,6), IRQ(x,7), \ IRQ(x,8), IRQ(x,9), IRQ(x,a), IRQ(x,b), \ - IRQ(x,c), IRQ(x,d) + IRQ(x,c), IRQ(x,d), IRQ(x,e) void (*interrupt[NR_IRQS])(void) = { IRQLIST_16(0x0), @@ -108,7 +108,7 @@ void (*interrupt[NR_IRQS])(void) = { IRQLIST_16(0xc), IRQLIST_16(0xd) #ifdef CONFIG_PCI_MSI - , IRQLIST_14(0xe) + , IRQLIST_15(0xe) #endif #endif -- cgit v1.1 From 4552d5dc08b79868829b4be8951b29b07284753f Mon Sep 17 00:00:00 2001 From: Jan Beulich Date: Mon, 26 Jun 2006 13:57:28 +0200 Subject: [PATCH] x86_64: reliable stack trace support These are the generic bits needed to enable reliable stack traces based on Dwarf2-like (.eh_frame) unwind information. Subsequent patches will enable x86-64 and i386 to make use of this. Thanks to Andi Kleen and Ingo Molnar, who pointed out several possibilities for improvement. Signed-off-by: Jan Beulich Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- include/linux/kernel.h | 7 + include/linux/module.h | 3 + include/linux/unwind.h | 119 +++++++ init/main.c | 2 + kernel/Makefile | 1 + kernel/module.c | 16 +- kernel/unwind.c | 915 +++++++++++++++++++++++++++++++++++++++++++++++++ lib/Kconfig.debug | 12 +- 8 files changed, 1072 insertions(+), 3 deletions(-) create mode 100644 include/linux/unwind.h create mode 100644 kernel/unwind.c diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 3c5e4c2..5c1ec1f 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -32,6 +32,7 @@ extern const char linux_banner[]; #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) #define ALIGN(x,a) (((x)+(a)-1)&~((a)-1)) +#define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f)) #define KERN_EMERG "<0>" /* system is unusable */ #define KERN_ALERT "<1>" /* action must be taken immediately */ @@ -336,6 +337,12 @@ struct sysinfo { /* Force a compilation error if condition is true */ #define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)])) +/* Force a compilation error if condition is true, but also produce a + result (of value 0 and type size_t), so the expression can be used + e.g. in a structure initializer (or where-ever else comma expressions + aren't permitted). */ +#define BUILD_BUG_ON_ZERO(e) (sizeof(char[1 - 2 * !!(e)]) - 1) + /* Trap pasters of __FUNCTION__ at compile-time */ #define __FUNCTION__ (__func__) diff --git a/include/linux/module.h b/include/linux/module.h index 2d36609..9ebbb74 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -285,6 +285,9 @@ struct module /* The size of the executable code in each section. */ unsigned long init_text_size, core_text_size; + /* The handle returned from unwind_add_table. */ + void *unwind_info; + /* Arch-specific module values */ struct mod_arch_specific arch; diff --git a/include/linux/unwind.h b/include/linux/unwind.h new file mode 100644 index 0000000..0295aa7 --- /dev/null +++ b/include/linux/unwind.h @@ -0,0 +1,119 @@ +#ifndef _LINUX_UNWIND_H +#define _LINUX_UNWIND_H + +/* + * Copyright (C) 2002-2006 Novell, Inc. + * Jan Beulich + * This code is released under version 2 of the GNU GPL. + * + * A simple API for unwinding kernel stacks. This is used for + * debugging and error reporting purposes. The kernel doesn't need + * full-blown stack unwinding with all the bells and whistles, so there + * is not much point in implementing the full Dwarf2 unwind API. + */ + +#include + +struct module; + +#ifdef CONFIG_STACK_UNWIND + +#include + +#ifndef ARCH_UNWIND_SECTION_NAME +#define ARCH_UNWIND_SECTION_NAME ".eh_frame" +#endif + +/* + * Initialize unwind support. + */ +extern void unwind_init(void); + +extern void *unwind_add_table(struct module *, + const void *table_start, + unsigned long table_size); + +extern void unwind_remove_table(void *handle, int init_only); + +extern int unwind_init_frame_info(struct unwind_frame_info *, + struct task_struct *, + /*const*/ struct pt_regs *); + +/* + * Prepare to unwind a blocked task. + */ +extern int unwind_init_blocked(struct unwind_frame_info *, + struct task_struct *); + +/* + * Prepare to unwind the currently running thread. + */ +extern int unwind_init_running(struct unwind_frame_info *, + asmlinkage void (*callback)(struct unwind_frame_info *, + void *arg), + void *arg); + +/* + * Unwind to previous to frame. Returns 0 if successful, negative + * number in case of an error. + */ +extern int unwind(struct unwind_frame_info *); + +/* + * Unwind until the return pointer is in user-land (or until an error + * occurs). Returns 0 if successful, negative number in case of + * error. + */ +extern int unwind_to_user(struct unwind_frame_info *); + +#else + +struct unwind_frame_info {}; + +static inline void unwind_init(void) {} + +static inline void *unwind_add_table(struct module *mod, + const void *table_start, + unsigned long table_size) +{ + return NULL; +} + +static inline void unwind_remove_table(void *handle, int init_only) +{ +} + +static inline int unwind_init_frame_info(struct unwind_frame_info *info, + struct task_struct *tsk, + const struct pt_regs *regs) +{ + return -ENOSYS; +} + +static inline int unwind_init_blocked(struct unwind_frame_info *info, + struct task_struct *tsk) +{ + return -ENOSYS; +} + +static inline int unwind_init_running(struct unwind_frame_info *info, + asmlinkage void (*cb)(struct unwind_frame_info *, + void *arg), + void *arg) +{ + return -ENOSYS; +} + +static inline int unwind(struct unwind_frame_info *info) +{ + return -ENOSYS; +} + +static inline int unwind_to_user(struct unwind_frame_info *info) +{ + return -ENOSYS; +} + +#endif + +#endif /* _LINUX_UNWIND_H */ diff --git a/init/main.c b/init/main.c index f715b9b..f556fd0 100644 --- a/init/main.c +++ b/init/main.c @@ -47,6 +47,7 @@ #include #include #include +#include #include #include @@ -482,6 +483,7 @@ asmlinkage void __init start_kernel(void) __stop___param - __start___param, &unknown_bootoption); sort_main_extable(); + unwind_init(); trap_init(); rcu_init(); init_IRQ(); diff --git a/kernel/Makefile b/kernel/Makefile index f6ef00f..a31276e 100644 --- a/kernel/Makefile +++ b/kernel/Makefile @@ -21,6 +21,7 @@ obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock.o obj-$(CONFIG_UID16) += uid16.o obj-$(CONFIG_MODULES) += module.o obj-$(CONFIG_KALLSYMS) += kallsyms.o +obj-$(CONFIG_STACK_UNWIND) += unwind.o obj-$(CONFIG_PM) += power/ obj-$(CONFIG_BSD_PROCESS_ACCT) += acct.o obj-$(CONFIG_KEXEC) += kexec.o diff --git a/kernel/module.c b/kernel/module.c index d75275d..08811e2 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -40,6 +40,7 @@ #include #include #include +#include #include #include #include @@ -1051,6 +1052,8 @@ static void free_module(struct module *mod) remove_sect_attrs(mod); mod_kobject_remove(mod); + unwind_remove_table(mod->unwind_info, 0); + /* Arch-specific cleanup. */ module_arch_cleanup(mod); @@ -1412,7 +1415,7 @@ static struct module *load_module(void __user *umod, unsigned int i, symindex = 0, strindex = 0, setupindex, exindex, exportindex, modindex, obsparmindex, infoindex, gplindex, crcindex, gplcrcindex, versindex, pcpuindex, gplfutureindex, - gplfuturecrcindex; + gplfuturecrcindex, unwindex = 0; struct module *mod; long err = 0; void *percpu = NULL, *ptr = NULL; /* Stops spurious gcc warning */ @@ -1502,6 +1505,9 @@ static struct module *load_module(void __user *umod, versindex = find_sec(hdr, sechdrs, secstrings, "__versions"); infoindex = find_sec(hdr, sechdrs, secstrings, ".modinfo"); pcpuindex = find_pcpusec(hdr, sechdrs, secstrings); +#ifdef ARCH_UNWIND_SECTION_NAME + unwindex = find_sec(hdr, sechdrs, secstrings, ARCH_UNWIND_SECTION_NAME); +#endif /* Don't keep modinfo section */ sechdrs[infoindex].sh_flags &= ~(unsigned long)SHF_ALLOC; @@ -1510,6 +1516,8 @@ static struct module *load_module(void __user *umod, sechdrs[symindex].sh_flags |= SHF_ALLOC; sechdrs[strindex].sh_flags |= SHF_ALLOC; #endif + if (unwindex) + sechdrs[unwindex].sh_flags |= SHF_ALLOC; /* Check module struct version now, before we try to use module. */ if (!check_modstruct_version(sechdrs, versindex, mod)) { @@ -1738,6 +1746,11 @@ static struct module *load_module(void __user *umod, goto arch_cleanup; add_sect_attrs(mod, hdr->e_shnum, secstrings, sechdrs); + /* Size of section 0 is 0, so this works well if no unwind info. */ + mod->unwind_info = unwind_add_table(mod, + (void *)sechdrs[unwindex].sh_addr, + sechdrs[unwindex].sh_size); + /* Get rid of temporary copy */ vfree(hdr); @@ -1836,6 +1849,7 @@ sys_init_module(void __user *umod, mod->state = MODULE_STATE_LIVE; /* Drop initial reference. */ module_put(mod); + unwind_remove_table(mod->unwind_info, 1); module_free(mod, mod->module_init); mod->module_init = NULL; mod->init_size = 0; diff --git a/kernel/unwind.c b/kernel/unwind.c new file mode 100644 index 0000000..d36bcd3 --- /dev/null +++ b/kernel/unwind.c @@ -0,0 +1,915 @@ +/* + * Copyright (C) 2002-2006 Novell, Inc. + * Jan Beulich + * This code is released under version 2 of the GNU GPL. + * + * A simple API for unwinding kernel stacks. This is used for + * debugging and error reporting purposes. The kernel doesn't need + * full-blown stack unwinding with all the bells and whistles, so there + * is not much point in implementing the full Dwarf2 unwind API. + */ + +#include +#include +#include +#include +#include +#include +#include + +extern char __start_unwind[], __end_unwind[]; + +#define MAX_STACK_DEPTH 8 + +#define EXTRA_INFO(f) { \ + BUILD_BUG_ON_ZERO(offsetof(struct unwind_frame_info, f) \ + % FIELD_SIZEOF(struct unwind_frame_info, f)) \ + + offsetof(struct unwind_frame_info, f) \ + / FIELD_SIZEOF(struct unwind_frame_info, f), \ + FIELD_SIZEOF(struct unwind_frame_info, f) \ + } +#define PTREGS_INFO(f) EXTRA_INFO(regs.f) + +static const struct { + unsigned offs:BITS_PER_LONG / 2; + unsigned width:BITS_PER_LONG / 2; +} reg_info[] = { + UNW_REGISTER_INFO +}; + +#undef PTREGS_INFO +#undef EXTRA_INFO + +#ifndef REG_INVALID +#define REG_INVALID(r) (reg_info[r].width == 0) +#endif + +#define DW_CFA_nop 0x00 +#define DW_CFA_set_loc 0x01 +#define DW_CFA_advance_loc1 0x02 +#define DW_CFA_advance_loc2 0x03 +#define DW_CFA_advance_loc4 0x04 +#define DW_CFA_offset_extended 0x05 +#define DW_CFA_restore_extended 0x06 +#define DW_CFA_undefined 0x07 +#define DW_CFA_same_value 0x08 +#define DW_CFA_register 0x09 +#define DW_CFA_remember_state 0x0a +#define DW_CFA_restore_state 0x0b +#define DW_CFA_def_cfa 0x0c +#define DW_CFA_def_cfa_register 0x0d +#define DW_CFA_def_cfa_offset 0x0e +#define DW_CFA_def_cfa_expression 0x0f +#define DW_CFA_expression 0x10 +#define DW_CFA_offset_extended_sf 0x11 +#define DW_CFA_def_cfa_sf 0x12 +#define DW_CFA_def_cfa_offset_sf 0x13 +#define DW_CFA_val_offset 0x14 +#define DW_CFA_val_offset_sf 0x15 +#define DW_CFA_val_expression 0x16 +#define DW_CFA_lo_user 0x1c +#define DW_CFA_GNU_window_save 0x2d +#define DW_CFA_GNU_args_size 0x2e +#define DW_CFA_GNU_negative_offset_extended 0x2f +#define DW_CFA_hi_user 0x3f + +#define DW_EH_PE_FORM 0x07 +#define DW_EH_PE_native 0x00 +#define DW_EH_PE_leb128 0x01 +#define DW_EH_PE_data2 0x02 +#define DW_EH_PE_data4 0x03 +#define DW_EH_PE_data8 0x04 +#define DW_EH_PE_signed 0x08 +#define DW_EH_PE_ADJUST 0x70 +#define DW_EH_PE_abs 0x00 +#define DW_EH_PE_pcrel 0x10 +#define DW_EH_PE_textrel 0x20 +#define DW_EH_PE_datarel 0x30 +#define DW_EH_PE_funcrel 0x40 +#define DW_EH_PE_aligned 0x50 +#define DW_EH_PE_indirect 0x80 +#define DW_EH_PE_omit 0xff + +typedef unsigned long uleb128_t; +typedef signed long sleb128_t; + +static struct unwind_table { + struct { + unsigned long pc; + unsigned long range; + } core, init; + const void *address; + unsigned long size; + struct unwind_table *link; + const char *name; +} root_table, *last_table; + +struct unwind_item { + enum item_location { + Nowhere, + Memory, + Register, + Value + } where; + uleb128_t value; +}; + +struct unwind_state { + uleb128_t loc, org; + const u8 *cieStart, *cieEnd; + uleb128_t codeAlign; + sleb128_t dataAlign; + struct cfa { + uleb128_t reg, offs; + } cfa; + struct unwind_item regs[ARRAY_SIZE(reg_info)]; + unsigned stackDepth:8; + unsigned version:8; + const u8 *label; + const u8 *stack[MAX_STACK_DEPTH]; +}; + +static const struct cfa badCFA = { ARRAY_SIZE(reg_info), 1 }; + +static struct unwind_table *find_table(unsigned long pc) +{ + struct unwind_table *table; + + for (table = &root_table; table; table = table->link) + if ((pc >= table->core.pc + && pc < table->core.pc + table->core.range) + || (pc >= table->init.pc + && pc < table->init.pc + table->init.range)) + break; + + return table; +} + +static void init_unwind_table(struct unwind_table *table, + const char *name, + const void *core_start, + unsigned long core_size, + const void *init_start, + unsigned long init_size, + const void *table_start, + unsigned long table_size) +{ + table->core.pc = (unsigned long)core_start; + table->core.range = core_size; + table->init.pc = (unsigned long)init_start; + table->init.range = init_size; + table->address = table_start; + table->size = table_size; + table->link = NULL; + table->name = name; +} + +void __init unwind_init(void) +{ + init_unwind_table(&root_table, "kernel", + _text, _end - _text, + NULL, 0, + __start_unwind, __end_unwind - __start_unwind); +} + +/* Must be called with module_mutex held. */ +void *unwind_add_table(struct module *module, + const void *table_start, + unsigned long table_size) +{ + struct unwind_table *table; + + if (table_size <= 0) + return NULL; + + table = kmalloc(sizeof(*table), GFP_KERNEL); + if (!table) + return NULL; + + init_unwind_table(table, module->name, + module->module_core, module->core_size, + module->module_init, module->init_size, + table_start, table_size); + + if (last_table) + last_table->link = table; + else + root_table.link = table; + last_table = table; + + return table; +} + +struct unlink_table_info +{ + struct unwind_table *table; + int init_only; +}; + +static int unlink_table(void *arg) +{ + struct unlink_table_info *info = arg; + struct unwind_table *table = info->table, *prev; + + for (prev = &root_table; prev->link && prev->link != table; prev = prev->link) + ; + + if (prev->link) { + if (info->init_only) { + table->init.pc = 0; + table->init.range = 0; + info->table = NULL; + } else { + prev->link = table->link; + if (!prev->link) + last_table = prev; + } + } else + info->table = NULL; + + return 0; +} + +/* Must be called with module_mutex held. */ +void unwind_remove_table(void *handle, int init_only) +{ + struct unwind_table *table = handle; + struct unlink_table_info info; + + if (!table || table == &root_table) + return; + + if (init_only && table == last_table) { + table->init.pc = 0; + table->init.range = 0; + return; + } + + info.table = table; + info.init_only = init_only; + stop_machine_run(unlink_table, &info, NR_CPUS); + + if (info.table) + kfree(table); +} + +static uleb128_t get_uleb128(const u8 **pcur, const u8 *end) +{ + const u8 *cur = *pcur; + uleb128_t value; + unsigned shift; + + for (shift = 0, value = 0; cur < end; shift += 7) { + if (shift + 7 > 8 * sizeof(value) + && (*cur & 0x7fU) >= (1U << (8 * sizeof(value) - shift))) { + cur = end + 1; + break; + } + value |= (uleb128_t)(*cur & 0x7f) << shift; + if (!(*cur++ & 0x80)) + break; + } + *pcur = cur; + + return value; +} + +static sleb128_t get_sleb128(const u8 **pcur, const u8 *end) +{ + const u8 *cur = *pcur; + sleb128_t value; + unsigned shift; + + for (shift = 0, value = 0; cur < end; shift += 7) { + if (shift + 7 > 8 * sizeof(value) + && (*cur & 0x7fU) >= (1U << (8 * sizeof(value) - shift))) { + cur = end + 1; + break; + } + value |= (sleb128_t)(*cur & 0x7f) << shift; + if (!(*cur & 0x80)) { + value |= -(*cur++ & 0x40) << shift; + break; + } + } + *pcur = cur; + + return value; +} + +static unsigned long read_pointer(const u8 **pLoc, + const void *end, + signed ptrType) +{ + unsigned long value = 0; + union { + const u8 *p8; + const u16 *p16u; + const s16 *p16s; + const u32 *p32u; + const s32 *p32s; + const unsigned long *pul; + } ptr; + + if (ptrType < 0 || ptrType == DW_EH_PE_omit) + return 0; + ptr.p8 = *pLoc; + switch(ptrType & DW_EH_PE_FORM) { + case DW_EH_PE_data2: + if (end < (const void *)(ptr.p16u + 1)) + return 0; + if(ptrType & DW_EH_PE_signed) + value = get_unaligned(ptr.p16s++); + else + value = get_unaligned(ptr.p16u++); + break; + case DW_EH_PE_data4: +#ifdef CONFIG_64BIT + if (end < (const void *)(ptr.p32u + 1)) + return 0; + if(ptrType & DW_EH_PE_signed) + value = get_unaligned(ptr.p32s++); + else + value = get_unaligned(ptr.p32u++); + break; + case DW_EH_PE_data8: + BUILD_BUG_ON(sizeof(u64) != sizeof(value)); +#else + BUILD_BUG_ON(sizeof(u32) != sizeof(value)); +#endif + case DW_EH_PE_native: + if (end < (const void *)(ptr.pul + 1)) + return 0; + value = get_unaligned(ptr.pul++); + break; + case DW_EH_PE_leb128: + BUILD_BUG_ON(sizeof(uleb128_t) > sizeof(value)); + value = ptrType & DW_EH_PE_signed + ? get_sleb128(&ptr.p8, end) + : get_uleb128(&ptr.p8, end); + if ((const void *)ptr.p8 > end) + return 0; + break; + default: + return 0; + } + switch(ptrType & DW_EH_PE_ADJUST) { + case DW_EH_PE_abs: + break; + case DW_EH_PE_pcrel: + value += (unsigned long)*pLoc; + break; + default: + return 0; + } + if ((ptrType & DW_EH_PE_indirect) + && __get_user(value, (unsigned long *)value)) + return 0; + *pLoc = ptr.p8; + + return value; +} + +static signed fde_pointer_type(const u32 *cie) +{ + const u8 *ptr = (const u8 *)(cie + 2); + unsigned version = *ptr; + + if (version != 1) + return -1; /* unsupported */ + if (*++ptr) { + const char *aug; + const u8 *end = (const u8 *)(cie + 1) + *cie; + uleb128_t len; + + /* check if augmentation size is first (and thus present) */ + if (*ptr != 'z') + return -1; + /* check if augmentation string is nul-terminated */ + if ((ptr = memchr(aug = (const void *)ptr, 0, end - ptr)) == NULL) + return -1; + ++ptr; /* skip terminator */ + get_uleb128(&ptr, end); /* skip code alignment */ + get_sleb128(&ptr, end); /* skip data alignment */ + /* skip return address column */ + version <= 1 ? (void)++ptr : (void)get_uleb128(&ptr, end); + len = get_uleb128(&ptr, end); /* augmentation length */ + if (ptr + len < ptr || ptr + len > end) + return -1; + end = ptr + len; + while (*++aug) { + if (ptr >= end) + return -1; + switch(*aug) { + case 'L': + ++ptr; + break; + case 'P': { + signed ptrType = *ptr++; + + if (!read_pointer(&ptr, end, ptrType) || ptr > end) + return -1; + } + break; + case 'R': + return *ptr; + default: + return -1; + } + } + } + return DW_EH_PE_native|DW_EH_PE_abs; +} + +static int advance_loc(unsigned long delta, struct unwind_state *state) +{ + state->loc += delta * state->codeAlign; + + return delta > 0; +} + +static void set_rule(uleb128_t reg, + enum item_location where, + uleb128_t value, + struct unwind_state *state) +{ + if (reg < ARRAY_SIZE(state->regs)) { + state->regs[reg].where = where; + state->regs[reg].value = value; + } +} + +static int processCFI(const u8 *start, + const u8 *end, + unsigned long targetLoc, + signed ptrType, + struct unwind_state *state) +{ + union { + const u8 *p8; + const u16 *p16; + const u32 *p32; + } ptr; + int result = 1; + + if (start != state->cieStart) { + state->loc = state->org; + result = processCFI(state->cieStart, state->cieEnd, 0, ptrType, state); + if (targetLoc == 0 && state->label == NULL) + return result; + } + for (ptr.p8 = start; result && ptr.p8 < end; ) { + switch(*ptr.p8 >> 6) { + uleb128_t value; + + case 0: + switch(*ptr.p8++) { + case DW_CFA_nop: + break; + case DW_CFA_set_loc: + if ((state->loc = read_pointer(&ptr.p8, end, ptrType)) == 0) + result = 0; + break; + case DW_CFA_advance_loc1: + result = ptr.p8 < end && advance_loc(*ptr.p8++, state); + break; + case DW_CFA_advance_loc2: + result = ptr.p8 <= end + 2 + && advance_loc(*ptr.p16++, state); + break; + case DW_CFA_advance_loc4: + result = ptr.p8 <= end + 4 + && advance_loc(*ptr.p32++, state); + break; + case DW_CFA_offset_extended: + value = get_uleb128(&ptr.p8, end); + set_rule(value, Memory, get_uleb128(&ptr.p8, end), state); + break; + case DW_CFA_val_offset: + value = get_uleb128(&ptr.p8, end); + set_rule(value, Value, get_uleb128(&ptr.p8, end), state); + break; + case DW_CFA_offset_extended_sf: + value = get_uleb128(&ptr.p8, end); + set_rule(value, Memory, get_sleb128(&ptr.p8, end), state); + break; + case DW_CFA_val_offset_sf: + value = get_uleb128(&ptr.p8, end); + set_rule(value, Value, get_sleb128(&ptr.p8, end), state); + break; + case DW_CFA_restore_extended: + case DW_CFA_undefined: + case DW_CFA_same_value: + set_rule(get_uleb128(&ptr.p8, end), Nowhere, 0, state); + break; + case DW_CFA_register: + value = get_uleb128(&ptr.p8, end); + set_rule(value, + Register, + get_uleb128(&ptr.p8, end), state); + break; + case DW_CFA_remember_state: + if (ptr.p8 == state->label) { + state->label = NULL; + return 1; + } + if (state->stackDepth >= MAX_STACK_DEPTH) + return 0; + state->stack[state->stackDepth++] = ptr.p8; + break; + case DW_CFA_restore_state: + if (state->stackDepth) { + const uleb128_t loc = state->loc; + const u8 *label = state->label; + + state->label = state->stack[state->stackDepth - 1]; + memcpy(&state->cfa, &badCFA, sizeof(state->cfa)); + memset(state->regs, 0, sizeof(state->regs)); + state->stackDepth = 0; + result = processCFI(start, end, 0, ptrType, state); + state->loc = loc; + state->label = label; + } else + return 0; + break; + case DW_CFA_def_cfa: + state->cfa.reg = get_uleb128(&ptr.p8, end); + /*nobreak*/ + case DW_CFA_def_cfa_offset: + state->cfa.offs = get_uleb128(&ptr.p8, end); + break; + case DW_CFA_def_cfa_sf: + state->cfa.reg = get_uleb128(&ptr.p8, end); + /*nobreak*/ + case DW_CFA_def_cfa_offset_sf: + state->cfa.offs = get_sleb128(&ptr.p8, end) + * state->dataAlign; + break; + case DW_CFA_def_cfa_register: + state->cfa.reg = get_uleb128(&ptr.p8, end); + break; + /*todo case DW_CFA_def_cfa_expression: */ + /*todo case DW_CFA_expression: */ + /*todo case DW_CFA_val_expression: */ + case DW_CFA_GNU_args_size: + get_uleb128(&ptr.p8, end); + break; + case DW_CFA_GNU_negative_offset_extended: + value = get_uleb128(&ptr.p8, end); + set_rule(value, + Memory, + (uleb128_t)0 - get_uleb128(&ptr.p8, end), state); + break; + case DW_CFA_GNU_window_save: + default: + result = 0; + break; + } + break; + case 1: + result = advance_loc(*ptr.p8++ & 0x3f, state); + break; + case 2: + value = *ptr.p8++ & 0x3f; + set_rule(value, Memory, get_uleb128(&ptr.p8, end), state); + break; + case 3: + set_rule(*ptr.p8++ & 0x3f, Nowhere, 0, state); + break; + } + if (ptr.p8 > end) + result = 0; + if (result && targetLoc != 0 && targetLoc < state->loc) + return 1; + } + + return result + && ptr.p8 == end + && (targetLoc == 0 + || (/*todo While in theory this should apply, gcc in practice omits + everything past the function prolog, and hence the location + never reaches the end of the function. + targetLoc < state->loc &&*/ state->label == NULL)); +} + +/* Unwind to previous to frame. Returns 0 if successful, negative + * number in case of an error. */ +int unwind(struct unwind_frame_info *frame) +{ +#define FRAME_REG(r, t) (((t *)frame)[reg_info[r].offs]) + const u32 *fde = NULL, *cie = NULL; + const u8 *ptr = NULL, *end = NULL; + unsigned long startLoc = 0, endLoc = 0, cfa; + unsigned i; + signed ptrType = -1; + uleb128_t retAddrReg = 0; + struct unwind_table *table; + struct unwind_state state; + + if (UNW_PC(frame) == 0) + return -EINVAL; + if ((table = find_table(UNW_PC(frame))) != NULL + && !(table->size & (sizeof(*fde) - 1))) { + unsigned long tableSize = table->size; + + for (fde = table->address; + tableSize > sizeof(*fde) && tableSize - sizeof(*fde) >= *fde; + tableSize -= sizeof(*fde) + *fde, + fde += 1 + *fde / sizeof(*fde)) { + if (!*fde || (*fde & (sizeof(*fde) - 1))) + break; + if (!fde[1]) + continue; /* this is a CIE */ + if ((fde[1] & (sizeof(*fde) - 1)) + || fde[1] > (unsigned long)(fde + 1) + - (unsigned long)table->address) + continue; /* this is not a valid FDE */ + cie = fde + 1 - fde[1] / sizeof(*fde); + if (*cie <= sizeof(*cie) + 4 + || *cie >= fde[1] - sizeof(*fde) + || (*cie & (sizeof(*cie) - 1)) + || cie[1] + || (ptrType = fde_pointer_type(cie)) < 0) { + cie = NULL; /* this is not a (valid) CIE */ + continue; + } + ptr = (const u8 *)(fde + 2); + startLoc = read_pointer(&ptr, + (const u8 *)(fde + 1) + *fde, + ptrType); + endLoc = startLoc + + read_pointer(&ptr, + (const u8 *)(fde + 1) + *fde, + ptrType & DW_EH_PE_indirect + ? ptrType + : ptrType & (DW_EH_PE_FORM|DW_EH_PE_signed)); + if (UNW_PC(frame) >= startLoc && UNW_PC(frame) < endLoc) + break; + cie = NULL; + } + } + if (cie != NULL) { + memset(&state, 0, sizeof(state)); + state.cieEnd = ptr; /* keep here temporarily */ + ptr = (const u8 *)(cie + 2); + end = (const u8 *)(cie + 1) + *cie; + if ((state.version = *ptr) != 1) + cie = NULL; /* unsupported version */ + else if (*++ptr) { + /* check if augmentation size is first (and thus present) */ + if (*ptr == 'z') { + /* check for ignorable (or already handled) + * nul-terminated augmentation string */ + while (++ptr < end && *ptr) + if (strchr("LPR", *ptr) == NULL) + break; + } + if (ptr >= end || *ptr) + cie = NULL; + } + ++ptr; + } + if (cie != NULL) { + /* get code aligment factor */ + state.codeAlign = get_uleb128(&ptr, end); + /* get data aligment factor */ + state.dataAlign = get_sleb128(&ptr, end); + if (state.codeAlign == 0 || state.dataAlign == 0 || ptr >= end) + cie = NULL; + else { + retAddrReg = state.version <= 1 ? *ptr++ : get_uleb128(&ptr, end); + /* skip augmentation */ + if (((const char *)(cie + 2))[1] == 'z') + ptr += get_uleb128(&ptr, end); + if (ptr > end + || retAddrReg >= ARRAY_SIZE(reg_info) + || REG_INVALID(retAddrReg) + || reg_info[retAddrReg].width != sizeof(unsigned long)) + cie = NULL; + } + } + if (cie != NULL) { + state.cieStart = ptr; + ptr = state.cieEnd; + state.cieEnd = end; + end = (const u8 *)(fde + 1) + *fde; + /* skip augmentation */ + if (((const char *)(cie + 2))[1] == 'z') { + uleb128_t augSize = get_uleb128(&ptr, end); + + if ((ptr += augSize) > end) + fde = NULL; + } + } + if (cie == NULL || fde == NULL) { +#ifdef CONFIG_FRAME_POINTER + unsigned long top, bottom; +#endif + +#ifdef CONFIG_FRAME_POINTER + top = STACK_TOP(frame->task); + bottom = STACK_BOTTOM(frame->task); +# if FRAME_RETADDR_OFFSET < 0 + if (UNW_SP(frame) < top + && UNW_FP(frame) <= UNW_SP(frame) + && bottom < UNW_FP(frame) +# else + if (UNW_SP(frame) > top + && UNW_FP(frame) >= UNW_SP(frame) + && bottom > UNW_FP(frame) +# endif + && !((UNW_SP(frame) | UNW_FP(frame)) + & (sizeof(unsigned long) - 1))) { + unsigned long link; + + if (!__get_user(link, + (unsigned long *)(UNW_FP(frame) + + FRAME_LINK_OFFSET)) +# if FRAME_RETADDR_OFFSET < 0 + && link > bottom && link < UNW_FP(frame) +# else + && link > UNW_FP(frame) && link < bottom +# endif + && !(link & (sizeof(link) - 1)) + && !__get_user(UNW_PC(frame), + (unsigned long *)(UNW_FP(frame) + + FRAME_RETADDR_OFFSET))) { + UNW_SP(frame) = UNW_FP(frame) + FRAME_RETADDR_OFFSET +# if FRAME_RETADDR_OFFSET < 0 + - +# else + + +# endif + sizeof(UNW_PC(frame)); + UNW_FP(frame) = link; + return 0; + } + } +#endif + return -ENXIO; + } + state.org = startLoc; + memcpy(&state.cfa, &badCFA, sizeof(state.cfa)); + /* process instructions */ + if (!processCFI(ptr, end, UNW_PC(frame), ptrType, &state) + || state.loc > endLoc + || state.regs[retAddrReg].where == Nowhere + || state.cfa.reg >= ARRAY_SIZE(reg_info) + || reg_info[state.cfa.reg].width != sizeof(unsigned long) + || state.cfa.offs % sizeof(unsigned long)) + return -EIO; + /* update frame */ + cfa = FRAME_REG(state.cfa.reg, unsigned long) + state.cfa.offs; + startLoc = min((unsigned long)UNW_SP(frame), cfa); + endLoc = max((unsigned long)UNW_SP(frame), cfa); + if (STACK_LIMIT(startLoc) != STACK_LIMIT(endLoc)) { + startLoc = min(STACK_LIMIT(cfa), cfa); + endLoc = max(STACK_LIMIT(cfa), cfa); + } +#ifndef CONFIG_64BIT +# define CASES CASE(8); CASE(16); CASE(32) +#else +# define CASES CASE(8); CASE(16); CASE(32); CASE(64) +#endif + for (i = 0; i < ARRAY_SIZE(state.regs); ++i) { + if (REG_INVALID(i)) { + if (state.regs[i].where == Nowhere) + continue; + return -EIO; + } + switch(state.regs[i].where) { + default: + break; + case Register: + if (state.regs[i].value >= ARRAY_SIZE(reg_info) + || REG_INVALID(state.regs[i].value) + || reg_info[i].width > reg_info[state.regs[i].value].width) + return -EIO; + switch(reg_info[state.regs[i].value].width) { +#define CASE(n) \ + case sizeof(u##n): \ + state.regs[i].value = FRAME_REG(state.regs[i].value, \ + const u##n); \ + break + CASES; +#undef CASE + default: + return -EIO; + } + break; + } + } + for (i = 0; i < ARRAY_SIZE(state.regs); ++i) { + if (REG_INVALID(i)) + continue; + switch(state.regs[i].where) { + case Nowhere: + if (reg_info[i].width != sizeof(UNW_SP(frame)) + || &FRAME_REG(i, __typeof__(UNW_SP(frame))) + != &UNW_SP(frame)) + continue; + UNW_SP(frame) = cfa; + break; + case Register: + switch(reg_info[i].width) { +#define CASE(n) case sizeof(u##n): \ + FRAME_REG(i, u##n) = state.regs[i].value; \ + break + CASES; +#undef CASE + default: + return -EIO; + } + break; + case Value: + if (reg_info[i].width != sizeof(unsigned long)) + return -EIO; + FRAME_REG(i, unsigned long) = cfa + state.regs[i].value + * state.dataAlign; + break; + case Memory: { + unsigned long addr = cfa + state.regs[i].value + * state.dataAlign; + + if ((state.regs[i].value * state.dataAlign) + % sizeof(unsigned long) + || addr < startLoc + || addr + sizeof(unsigned long) < addr + || addr + sizeof(unsigned long) > endLoc) + return -EIO; + switch(reg_info[i].width) { +#define CASE(n) case sizeof(u##n): \ + __get_user(FRAME_REG(i, u##n), (u##n *)addr); \ + break + CASES; +#undef CASE + default: + return -EIO; + } + } + break; + } + } + + return 0; +#undef CASES +#undef FRAME_REG +} +EXPORT_SYMBOL(unwind); + +int unwind_init_frame_info(struct unwind_frame_info *info, + struct task_struct *tsk, + /*const*/ struct pt_regs *regs) +{ + info->task = tsk; + arch_unw_init_frame_info(info, regs); + + return 0; +} +EXPORT_SYMBOL(unwind_init_frame_info); + +/* + * Prepare to unwind a blocked task. + */ +int unwind_init_blocked(struct unwind_frame_info *info, + struct task_struct *tsk) +{ + info->task = tsk; + arch_unw_init_blocked(info); + + return 0; +} +EXPORT_SYMBOL(unwind_init_blocked); + +/* + * Prepare to unwind the currently running thread. + */ +int unwind_init_running(struct unwind_frame_info *info, + asmlinkage void (*callback)(struct unwind_frame_info *, + void *arg), + void *arg) +{ + info->task = current; + arch_unwind_init_running(info, callback, arg); + + return 0; +} +EXPORT_SYMBOL(unwind_init_running); + +/* + * Unwind until the return pointer is in user-land (or until an error + * occurs). Returns 0 if successful, negative number in case of + * error. + */ +int unwind_to_user(struct unwind_frame_info *info) +{ + while (!arch_unw_user_mode(info)) { + int err = unwind(info); + + if (err < 0) + return err; + } + + return 0; +} +EXPORT_SYMBOL(unwind_to_user); diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index ccb0c1f..256b3b8 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -188,14 +188,22 @@ config FRAME_POINTER config UNWIND_INFO bool "Compile the kernel with frame unwind information" - depends on !IA64 - depends on !MODULES || !(MIPS || PARISC || PPC || SUPERH || V850) + depends on !IA64 && !PARISC + depends on !MODULES || !(MIPS || PPC || SUPERH || V850) help If you say Y here the resulting kernel image will be slightly larger but not slower, and it will give very useful debugging information. If you don't debug the kernel, you can say N, but we may not be able to solve problems without frame unwind information or frame pointers. +config STACK_UNWIND + bool "Stack unwind support" + depends on UNWIND_INFO + depends on n + help + This enables more precise stack traces, omitting all unrelated + occurrences of pointers into kernel code from the dump. + config FORCED_INLINING bool "Force gcc to inline functions marked 'inline'" depends on DEBUG_KERNEL -- cgit v1.1 From b538ed278b80641ee6d7b75497d5e14de1bb1137 Mon Sep 17 00:00:00 2001 From: Jan Beulich Date: Mon, 26 Jun 2006 13:57:32 +0200 Subject: [PATCH] x86_64: reliable stack trace support (x86-64) These are the x86_64-specific pieces to enable reliable stack traces. The only restriction with this is that it currently cannot unwind across the interrupt->normal stack boundary, as that transition is lacking proper annotation. Signed-off-by: Jan Beulich Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- arch/x86_64/kernel/entry.S | 33 ++++++++++++ arch/x86_64/kernel/process.c | 2 +- arch/x86_64/kernel/traps.c | 54 +++++++++++++++++--- arch/x86_64/kernel/vmlinux.lds.S | 9 ++++ include/asm-x86_64/proto.h | 2 +- include/asm-x86_64/unwind.h | 106 +++++++++++++++++++++++++++++++++++++++ lib/Kconfig.debug | 2 +- 7 files changed, 199 insertions(+), 9 deletions(-) create mode 100644 include/asm-x86_64/unwind.h diff --git a/arch/x86_64/kernel/entry.S b/arch/x86_64/kernel/entry.S index 9999d70..6c68bee 100644 --- a/arch/x86_64/kernel/entry.S +++ b/arch/x86_64/kernel/entry.S @@ -1089,3 +1089,36 @@ ENTRY(call_softirq) ret CFI_ENDPROC ENDPROC(call_softirq) + +#ifdef CONFIG_STACK_UNWIND +ENTRY(arch_unwind_init_running) + CFI_STARTPROC + movq %r15, R15(%rdi) + movq %r14, R14(%rdi) + xchgq %rsi, %rdx + movq %r13, R13(%rdi) + movq %r12, R12(%rdi) + xorl %eax, %eax + movq %rbp, RBP(%rdi) + movq %rbx, RBX(%rdi) + movq (%rsp), %rcx + movq %rax, R11(%rdi) + movq %rax, R10(%rdi) + movq %rax, R9(%rdi) + movq %rax, R8(%rdi) + movq %rax, RAX(%rdi) + movq %rax, RCX(%rdi) + movq %rax, RDX(%rdi) + movq %rax, RSI(%rdi) + movq %rax, RDI(%rdi) + movq %rax, ORIG_RAX(%rdi) + movq %rcx, RIP(%rdi) + leaq 8(%rsp), %rcx + movq $__KERNEL_CS, CS(%rdi) + movq %rax, EFLAGS(%rdi) + movq %rcx, RSP(%rdi) + movq $__KERNEL_DS, SS(%rdi) + jmpq *%rdx + CFI_ENDPROC +ENDPROC(arch_unwind_init_running) +#endif diff --git a/arch/x86_64/kernel/process.c b/arch/x86_64/kernel/process.c index dcb77b4..d6fa414 100644 --- a/arch/x86_64/kernel/process.c +++ b/arch/x86_64/kernel/process.c @@ -334,7 +334,7 @@ void show_regs(struct pt_regs *regs) { printk("CPU %d:", smp_processor_id()); __show_regs(regs); - show_trace(®s->rsp); + show_trace(NULL, regs, (void *)(regs + 1)); } /* diff --git a/arch/x86_64/kernel/traps.c b/arch/x86_64/kernel/traps.c index 9f8f1ef..eb1534f 100644 --- a/arch/x86_64/kernel/traps.c +++ b/arch/x86_64/kernel/traps.c @@ -29,6 +29,7 @@ #include #include #include +#include #include #include @@ -39,7 +40,7 @@ #include #include #include - +#include #include #include #include @@ -189,6 +190,23 @@ static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack, return NULL; } +static void show_trace_unwind(struct unwind_frame_info *info, void *context) +{ + int i = 11; + + while (unwind(info) == 0 && UNW_PC(info)) { + if (i > 50) { + printk("\n "); + i = 7; + } else + i += printk(" "); + i += printk_address(UNW_PC(info)); + if (arch_unw_user_mode(info)) + break; + } + printk("\n"); +} + /* * x86-64 can have upto three kernel stacks: * process stack @@ -196,15 +214,34 @@ static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack, * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack */ -void show_trace(unsigned long *stack) +void show_trace(struct task_struct *tsk, struct pt_regs *regs, unsigned long * stack) { const unsigned cpu = safe_smp_processor_id(); unsigned long *irqstack_end = (unsigned long *)cpu_pda(cpu)->irqstackptr; int i; unsigned used = 0; + struct unwind_frame_info info; printk("\nCall Trace:"); + if (!tsk) + tsk = current; + + if (regs) { + if (unwind_init_frame_info(&info, tsk, regs) == 0) { + show_trace_unwind(&info, NULL); + return; + } + } else if (tsk == current) { + if (unwind_init_running(&info, show_trace_unwind, NULL) == 0) + return; + } else { + if (unwind_init_blocked(&info, tsk) == 0) { + show_trace_unwind(&info, NULL); + return; + } + } + #define HANDLE_STACK(cond) \ do while (cond) { \ unsigned long addr = *stack++; \ @@ -262,7 +299,7 @@ void show_trace(unsigned long *stack) printk("\n"); } -void show_stack(struct task_struct *tsk, unsigned long * rsp) +static void _show_stack(struct task_struct *tsk, struct pt_regs *regs, unsigned long * rsp) { unsigned long *stack; int i; @@ -296,7 +333,12 @@ void show_stack(struct task_struct *tsk, unsigned long * rsp) printk("%016lx ", *stack++); touch_nmi_watchdog(); } - show_trace((unsigned long *)rsp); + show_trace(tsk, regs, rsp); +} + +void show_stack(struct task_struct *tsk, unsigned long * rsp) +{ + _show_stack(tsk, NULL, rsp); } /* @@ -305,7 +347,7 @@ void show_stack(struct task_struct *tsk, unsigned long * rsp) void dump_stack(void) { unsigned long dummy; - show_trace(&dummy); + show_trace(NULL, NULL, &dummy); } EXPORT_SYMBOL(dump_stack); @@ -332,7 +374,7 @@ void show_registers(struct pt_regs *regs) if (in_kernel) { printk("Stack: "); - show_stack(NULL, (unsigned long*)rsp); + _show_stack(NULL, regs, (unsigned long*)rsp); printk("\nCode: "); if (regs->rip < PAGE_OFFSET) diff --git a/arch/x86_64/kernel/vmlinux.lds.S b/arch/x86_64/kernel/vmlinux.lds.S index 5968c24..1c6a5f3 100644 --- a/arch/x86_64/kernel/vmlinux.lds.S +++ b/arch/x86_64/kernel/vmlinux.lds.S @@ -45,6 +45,15 @@ SECTIONS RODATA +#ifdef CONFIG_STACK_UNWIND + . = ALIGN(8); + .eh_frame : AT(ADDR(.eh_frame) - LOAD_OFFSET) { + __start_unwind = .; + *(.eh_frame) + __end_unwind = .; + } +#endif + /* Data */ .data : AT(ADDR(.data) - LOAD_OFFSET) { *(.data) diff --git a/include/asm-x86_64/proto.h b/include/asm-x86_64/proto.h index 1064533..105476f 100644 --- a/include/asm-x86_64/proto.h +++ b/include/asm-x86_64/proto.h @@ -75,7 +75,7 @@ extern void main_timer_handler(struct pt_regs *regs); extern unsigned long end_pfn_map; -extern void show_trace(unsigned long * rsp); +extern void show_trace(struct task_struct *, struct pt_regs *, unsigned long * rsp); extern void show_registers(struct pt_regs *regs); extern void exception_table_check(void); diff --git a/include/asm-x86_64/unwind.h b/include/asm-x86_64/unwind.h new file mode 100644 index 0000000..4f61de2 --- /dev/null +++ b/include/asm-x86_64/unwind.h @@ -0,0 +1,106 @@ +#ifndef _ASM_X86_64_UNWIND_H +#define _ASM_X86_64_UNWIND_H + +/* + * Copyright (C) 2002-2006 Novell, Inc. + * Jan Beulich + * This code is released under version 2 of the GNU GPL. + */ + +#ifdef CONFIG_STACK_UNWIND + +#include +#include +#include +#include + +struct unwind_frame_info +{ + struct pt_regs regs; + struct task_struct *task; +}; + +#define UNW_PC(frame) (frame)->regs.rip +#define UNW_SP(frame) (frame)->regs.rsp +#ifdef CONFIG_FRAME_POINTER +#define UNW_FP(frame) (frame)->regs.rbp +#define FRAME_RETADDR_OFFSET 8 +#define FRAME_LINK_OFFSET 0 +#define STACK_BOTTOM(tsk) (((tsk)->thread.rsp0 - 1) & ~(THREAD_SIZE - 1)) +#define STACK_TOP(tsk) ((tsk)->thread.rsp0) +#endif +/* Might need to account for the special exception and interrupt handling + stacks here, since normally + EXCEPTION_STACK_ORDER < THREAD_ORDER < IRQSTACK_ORDER, + but the construct is needed only for getting across the stack switch to + the interrupt stack - thus considering the IRQ stack itself is unnecessary, + and the overhead of comparing against all exception handling stacks seems + not desirable. */ +#define STACK_LIMIT(ptr) (((ptr) - 1) & ~(THREAD_SIZE - 1)) + +#define UNW_REGISTER_INFO \ + PTREGS_INFO(rax), \ + PTREGS_INFO(rdx), \ + PTREGS_INFO(rcx), \ + PTREGS_INFO(rbx), \ + PTREGS_INFO(rsi), \ + PTREGS_INFO(rdi), \ + PTREGS_INFO(rbp), \ + PTREGS_INFO(rsp), \ + PTREGS_INFO(r8), \ + PTREGS_INFO(r9), \ + PTREGS_INFO(r10), \ + PTREGS_INFO(r11), \ + PTREGS_INFO(r12), \ + PTREGS_INFO(r13), \ + PTREGS_INFO(r14), \ + PTREGS_INFO(r15), \ + PTREGS_INFO(rip) + +static inline void arch_unw_init_frame_info(struct unwind_frame_info *info, + /*const*/ struct pt_regs *regs) +{ + info->regs = *regs; +} + +static inline void arch_unw_init_blocked(struct unwind_frame_info *info) +{ + extern const char thread_return[]; + + memset(&info->regs, 0, sizeof(info->regs)); + info->regs.rip = (unsigned long)thread_return; + info->regs.cs = __KERNEL_CS; + __get_user(info->regs.rbp, (unsigned long *)info->task->thread.rsp); + info->regs.rsp = info->task->thread.rsp; + info->regs.ss = __KERNEL_DS; +} + +extern void arch_unwind_init_running(struct unwind_frame_info *, + void (*callback)(struct unwind_frame_info *, + void *arg), + void *arg); + +static inline int arch_unw_user_mode(const struct unwind_frame_info *info) +{ +#if 0 /* This can only work when selector register saves/restores + are properly annotated (and tracked in UNW_REGISTER_INFO). */ + return user_mode(&info->regs); +#else + return (long)info->regs.rip >= 0 + || (info->regs.rip >= VSYSCALL_START && info->regs.rip < VSYSCALL_END) + || (long)info->regs.rsp >= 0; +#endif +} + +#else + +#define UNW_PC(frame) ((void)(frame), 0) + +static inline int arch_unw_user_mode(const void *info) +{ + return 0; +} + +#endif + +#endif /* _ASM_X86_64_UNWIND_H */ diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 256b3b8..bbaed84 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -199,7 +199,7 @@ config UNWIND_INFO config STACK_UNWIND bool "Stack unwind support" depends on UNWIND_INFO - depends on n + depends on X86_64 help This enables more precise stack traces, omitting all unrelated occurrences of pointers into kernel code from the dump. -- cgit v1.1 From 1de9c3f67ec6fddfb85325bab551f4094f55f410 Mon Sep 17 00:00:00 2001 From: Jan Beulich Date: Mon, 26 Jun 2006 13:57:35 +0200 Subject: [PATCH] x86_64: reliable stack trace support (x86-64 IRQ stack Change the switching to/from the IRQ stack so that unwind annotations can be added for it without requiring CFA expressions. AK: I cleaned it up a bit, making it unconditional and removing the obsolete DEBUG_INFO full frame code. Signed-off-by: Jan Beulich Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- arch/x86_64/kernel/entry.S | 33 ++++++++------------------------- 1 file changed, 8 insertions(+), 25 deletions(-) diff --git a/arch/x86_64/kernel/entry.S b/arch/x86_64/kernel/entry.S index 6c68bee..4f407ef 100644 --- a/arch/x86_64/kernel/entry.S +++ b/arch/x86_64/kernel/entry.S @@ -473,29 +473,18 @@ END(stub_rt_sigreturn) /* 0(%rsp): interrupt number */ .macro interrupt func cld -#ifdef CONFIG_DEBUG_INFO - SAVE_ALL - movq %rsp,%rdi - /* - * Setup a stack frame pointer. This allows gdb to trace - * back to the original stack. - */ - movq %rsp,%rbp - CFI_DEF_CFA_REGISTER rbp -#else SAVE_ARGS leaq -ARGOFFSET(%rsp),%rdi # arg1 for handler -#endif + pushq %rbp + CFI_ADJUST_CFA_OFFSET 8 + CFI_REL_OFFSET rbp, 0 + movq %rsp,%rbp + CFI_DEF_CFA_REGISTER rbp testl $3,CS(%rdi) je 1f swapgs 1: incl %gs:pda_irqcount # RED-PEN should check preempt count - movq %gs:pda_irqstackptr,%rax - cmoveq %rax,%rsp /*todo This needs CFI annotation! */ - pushq %rdi # save old stack -#ifndef CONFIG_DEBUG_INFO - CFI_ADJUST_CFA_OFFSET 8 -#endif + cmoveq %gs:pda_irqstackptr,%rsp call \func .endm @@ -504,17 +493,11 @@ ENTRY(common_interrupt) interrupt do_IRQ /* 0(%rsp): oldrsp-ARGOFFSET */ ret_from_intr: - popq %rdi -#ifndef CONFIG_DEBUG_INFO - CFI_ADJUST_CFA_OFFSET -8 -#endif cli decl %gs:pda_irqcount -#ifdef CONFIG_DEBUG_INFO - movq RBP(%rdi),%rbp + leaveq CFI_DEF_CFA_REGISTER rsp -#endif - leaq ARGOFFSET(%rdi),%rsp /*todo This needs CFI annotation! */ + CFI_ADJUST_CFA_OFFSET -8 exit_intr: GET_THREAD_INFO(%rcx) testl $3,CS-ARGOFFSET(%rsp) -- cgit v1.1 From dffead4e421e289c8434351400d24fd35723e874 Mon Sep 17 00:00:00 2001 From: Jan Beulich Date: Mon, 26 Jun 2006 13:57:38 +0200 Subject: [PATCH] x86_64: reliable stack trace support (x86-64 syscall Adjust the CFA offset for 64- and 32-bit syscall entries so that the five slots pre-subtracted from the stack pointer do not appear to reside outside of the current frame. Signed-off-by: Jan Beulich Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- arch/x86_64/ia32/ia32entry.S | 2 +- arch/x86_64/kernel/entry.S | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/x86_64/ia32/ia32entry.S b/arch/x86_64/ia32/ia32entry.S index d073ca3..222777b 100644 --- a/arch/x86_64/ia32/ia32entry.S +++ b/arch/x86_64/ia32/ia32entry.S @@ -179,7 +179,7 @@ ENDPROC(ia32_sysenter_target) */ ENTRY(ia32_cstar_target) CFI_STARTPROC32 simple - CFI_DEF_CFA rsp,0 + CFI_DEF_CFA rsp,PDA_STACKOFFSET CFI_REGISTER rip,rcx /*CFI_REGISTER rflags,r11*/ swapgs diff --git a/arch/x86_64/kernel/entry.S b/arch/x86_64/kernel/entry.S index 4f407ef..7290e72 100644 --- a/arch/x86_64/kernel/entry.S +++ b/arch/x86_64/kernel/entry.S @@ -189,7 +189,7 @@ END(ret_from_fork) ENTRY(system_call) CFI_STARTPROC simple - CFI_DEF_CFA rsp,0 + CFI_DEF_CFA rsp,PDA_STACKOFFSET CFI_REGISTER rip,rcx /*CFI_REGISTER rflags,r11*/ swapgs -- cgit v1.1 From 176a2718f408ce92788b29127050b04dfd6e4f68 Mon Sep 17 00:00:00 2001 From: Jan Beulich Date: Mon, 26 Jun 2006 13:57:41 +0200 Subject: [PATCH] i386: reliable stack trace support (i386) These are the i386-specific pieces to enable reliable stack traces. This is going to be even more useful once CFI annotations get added to he assembly code, namely to entry.S. Signed-off-by: Jan Beulich Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- arch/i386/kernel/entry.S | 29 +++++++++++++ arch/i386/kernel/process.c | 2 +- arch/i386/kernel/traps.c | 50 ++++++++++++++++----- arch/i386/kernel/vmlinux.lds.S | 9 ++++ include/asm-i386/processor.h | 2 +- include/asm-i386/unwind.h | 98 ++++++++++++++++++++++++++++++++++++++++++ lib/Kconfig.debug | 2 +- 7 files changed, 179 insertions(+), 13 deletions(-) create mode 100644 include/asm-i386/unwind.h diff --git a/arch/i386/kernel/entry.S b/arch/i386/kernel/entry.S index cfc683f..e802f3c 100644 --- a/arch/i386/kernel/entry.S +++ b/arch/i386/kernel/entry.S @@ -663,6 +663,35 @@ ENTRY(spurious_interrupt_bug) pushl $do_spurious_interrupt_bug jmp error_code +#ifdef CONFIG_STACK_UNWIND +ENTRY(arch_unwind_init_running) + movl 4(%esp), %edx + movl (%esp), %ecx + leal 4(%esp), %eax + movl %ebx, EBX(%edx) + xorl %ebx, %ebx + movl %ebx, ECX(%edx) + movl %ebx, EDX(%edx) + movl %esi, ESI(%edx) + movl %edi, EDI(%edx) + movl %ebp, EBP(%edx) + movl %ebx, EAX(%edx) + movl $__USER_DS, DS(%edx) + movl $__USER_DS, ES(%edx) + movl %ebx, ORIG_EAX(%edx) + movl %ecx, EIP(%edx) + movl 12(%esp), %ecx + movl $__KERNEL_CS, CS(%edx) + movl %ebx, EFLAGS(%edx) + movl %eax, OLDESP(%edx) + movl 8(%esp), %eax + movl %ecx, 8(%esp) + movl EBX(%edx), %ebx + movl $__KERNEL_DS, OLDSS(%edx) + jmpl *%eax +ENDPROC(arch_unwind_init_running) +#endif + .section .rodata,"a" #include "syscall_table.S" diff --git a/arch/i386/kernel/process.c b/arch/i386/kernel/process.c index 6259afe..525432e 100644 --- a/arch/i386/kernel/process.c +++ b/arch/i386/kernel/process.c @@ -312,7 +312,7 @@ void show_regs(struct pt_regs * regs) cr3 = read_cr3(); cr4 = read_cr4_safe(); printk("CR0: %08lx CR2: %08lx CR3: %08lx CR4: %08lx\n", cr0, cr2, cr3, cr4); - show_trace(NULL, ®s->esp); + show_trace(NULL, regs, ®s->esp); } /* diff --git a/arch/i386/kernel/traps.c b/arch/i386/kernel/traps.c index dcc1447..2865846 100644 --- a/arch/i386/kernel/traps.c +++ b/arch/i386/kernel/traps.c @@ -28,6 +28,7 @@ #include #include #include +#include #ifdef CONFIG_EISA #include @@ -47,7 +48,7 @@ #include #include #include - +#include #include #include #include @@ -170,14 +171,43 @@ static inline unsigned long print_context_stack(struct thread_info *tinfo, return ebp; } -static void show_trace_log_lvl(struct task_struct *task, +static asmlinkage void show_trace_unwind(struct unwind_frame_info *info, void *log_lvl) +{ + int printed = 0; /* nr of entries already printed on current line */ + + while (unwind(info) == 0 && UNW_PC(info)) { + printed = print_addr_and_symbol(UNW_PC(info), log_lvl, printed); + if (arch_unw_user_mode(info)) + break; + } + if (printed) + printk("\n"); +} + +static void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs, unsigned long *stack, char *log_lvl) { unsigned long ebp; + struct unwind_frame_info info; if (!task) task = current; + if (regs) { + if (unwind_init_frame_info(&info, task, regs) == 0) { + show_trace_unwind(&info, log_lvl); + return; + } + } else if (task == current) { + if (unwind_init_running(&info, show_trace_unwind, log_lvl) == 0) + return; + } else { + if (unwind_init_blocked(&info, task) == 0) { + show_trace_unwind(&info, log_lvl); + return; + } + } + if (task == current) { /* Grab ebp right from our regs */ asm ("movl %%ebp, %0" : "=r" (ebp) : ); @@ -198,13 +228,13 @@ static void show_trace_log_lvl(struct task_struct *task, } } -void show_trace(struct task_struct *task, unsigned long * stack) +void show_trace(struct task_struct *task, struct pt_regs *regs, unsigned long * stack) { - show_trace_log_lvl(task, stack, ""); + show_trace_log_lvl(task, regs, stack, ""); } -static void show_stack_log_lvl(struct task_struct *task, unsigned long *esp, - char *log_lvl) +static void show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs, + unsigned long *esp, char *log_lvl) { unsigned long *stack; int i; @@ -225,13 +255,13 @@ static void show_stack_log_lvl(struct task_struct *task, unsigned long *esp, printk("%08lx ", *stack++); } printk("\n%sCall Trace:\n", log_lvl); - show_trace_log_lvl(task, esp, log_lvl); + show_trace_log_lvl(task, regs, esp, log_lvl); } void show_stack(struct task_struct *task, unsigned long *esp) { printk(" "); - show_stack_log_lvl(task, esp, ""); + show_stack_log_lvl(task, NULL, esp, ""); } /* @@ -241,7 +271,7 @@ void dump_stack(void) { unsigned long stack; - show_trace(current, &stack); + show_trace(current, NULL, &stack); } EXPORT_SYMBOL(dump_stack); @@ -285,7 +315,7 @@ void show_registers(struct pt_regs *regs) u8 __user *eip; printk("\n" KERN_EMERG "Stack: "); - show_stack_log_lvl(NULL, (unsigned long *)esp, KERN_EMERG); + show_stack_log_lvl(NULL, regs, (unsigned long *)esp, KERN_EMERG); printk(KERN_EMERG "Code: "); diff --git a/arch/i386/kernel/vmlinux.lds.S b/arch/i386/kernel/vmlinux.lds.S index 7512f39..2d4f138 100644 --- a/arch/i386/kernel/vmlinux.lds.S +++ b/arch/i386/kernel/vmlinux.lds.S @@ -71,6 +71,15 @@ SECTIONS .data.read_mostly : AT(ADDR(.data.read_mostly) - LOAD_OFFSET) { *(.data.read_mostly) } _edata = .; /* End of data section */ +#ifdef CONFIG_STACK_UNWIND + . = ALIGN(4); + .eh_frame : AT(ADDR(.eh_frame) - LOAD_OFFSET) { + __start_unwind = .; + *(.eh_frame) + __end_unwind = .; + } +#endif + . = ALIGN(THREAD_SIZE); /* init_task */ .data.init_task : AT(ADDR(.data.init_task) - LOAD_OFFSET) { *(.data.init_task) diff --git a/include/asm-i386/processor.h b/include/asm-i386/processor.h index b796210..55ea992 100644 --- a/include/asm-i386/processor.h +++ b/include/asm-i386/processor.h @@ -555,7 +555,7 @@ extern void prepare_to_copy(struct task_struct *tsk); extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags); extern unsigned long thread_saved_pc(struct task_struct *tsk); -void show_trace(struct task_struct *task, unsigned long *stack); +void show_trace(struct task_struct *task, struct pt_regs *regs, unsigned long *stack); unsigned long get_wchan(struct task_struct *p); diff --git a/include/asm-i386/unwind.h b/include/asm-i386/unwind.h new file mode 100644 index 0000000..1c07689 --- /dev/null +++ b/include/asm-i386/unwind.h @@ -0,0 +1,98 @@ +#ifndef _ASM_I386_UNWIND_H +#define _ASM_I386_UNWIND_H + +/* + * Copyright (C) 2002-2006 Novell, Inc. + * Jan Beulich + * This code is released under version 2 of the GNU GPL. + */ + +#ifdef CONFIG_STACK_UNWIND + +#include +#include +#include +#include + +struct unwind_frame_info +{ + struct pt_regs regs; + struct task_struct *task; +}; + +#define UNW_PC(frame) (frame)->regs.eip +#define UNW_SP(frame) (frame)->regs.esp +#ifdef CONFIG_FRAME_POINTER +#define UNW_FP(frame) (frame)->regs.ebp +#define FRAME_RETADDR_OFFSET 4 +#define FRAME_LINK_OFFSET 0 +#define STACK_BOTTOM(tsk) STACK_LIMIT((tsk)->thread.esp0) +#define STACK_TOP(tsk) ((tsk)->thread.esp0) +#endif +#define STACK_LIMIT(ptr) (((ptr) - 1) & ~(THREAD_SIZE - 1)) + +#define UNW_REGISTER_INFO \ + PTREGS_INFO(eax), \ + PTREGS_INFO(ecx), \ + PTREGS_INFO(edx), \ + PTREGS_INFO(ebx), \ + PTREGS_INFO(esp), \ + PTREGS_INFO(ebp), \ + PTREGS_INFO(esi), \ + PTREGS_INFO(edi), \ + PTREGS_INFO(eip) + +static inline void arch_unw_init_frame_info(struct unwind_frame_info *info, + /*const*/ struct pt_regs *regs) +{ + if (user_mode_vm(regs)) + info->regs = *regs; + else { + memcpy(&info->regs, regs, offsetof(struct pt_regs, esp)); + info->regs.esp = (unsigned long)®s->esp; + info->regs.xss = __KERNEL_DS; + } +} + +static inline void arch_unw_init_blocked(struct unwind_frame_info *info) +{ + memset(&info->regs, 0, sizeof(info->regs)); + info->regs.eip = info->task->thread.eip; + info->regs.xcs = __KERNEL_CS; + __get_user(info->regs.ebp, (long *)info->task->thread.esp); + info->regs.esp = info->task->thread.esp; + info->regs.xss = __KERNEL_DS; + info->regs.xds = __USER_DS; + info->regs.xes = __USER_DS; +} + +extern asmlinkage void arch_unwind_init_running(struct unwind_frame_info *, + asmlinkage void (*callback)(struct unwind_frame_info *, + void *arg), + void *arg); + +static inline int arch_unw_user_mode(const struct unwind_frame_info *info) +{ +#if 0 /* This can only work when selector register and EFLAGS saves/restores + are properly annotated (and tracked in UNW_REGISTER_INFO). */ + return user_mode_vm(&info->regs); +#else + return info->regs.eip < PAGE_OFFSET + || (info->regs.eip >= __fix_to_virt(FIX_VSYSCALL) + && info->regs.eip < __fix_to_virt(FIX_VSYSCALL) + PAGE_SIZE) + || info->regs.esp < PAGE_OFFSET; +#endif +} + +#else + +#define UNW_PC(frame) ((void)(frame), 0) + +static inline int arch_unw_user_mode(const void *info) +{ + return 0; +} + +#endif + +#endif /* _ASM_I386_UNWIND_H */ diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index bbaed84..8bab010 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -199,7 +199,7 @@ config UNWIND_INFO config STACK_UNWIND bool "Stack unwind support" depends on UNWIND_INFO - depends on X86_64 + depends on X86 help This enables more precise stack traces, omitting all unrelated occurrences of pointers into kernel code from the dump. -- cgit v1.1 From fe7cacc1c25e286872b878c5d46880b620cd1e2d Mon Sep 17 00:00:00 2001 From: Jan Beulich Date: Mon, 26 Jun 2006 13:57:44 +0200 Subject: [PATCH] i386: reliable stack trace support i386 entry.S To increase the usefulness of reliable stack unwinding, this adds CFI unwind annotations to many low-level i386 routines. Signed-off-by: Jan Beulich Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- arch/i386/kernel/entry.S | 234 +++++++++++++++++++++++++++++++++++++++++++--- include/asm-i386/dwarf2.h | 54 +++++++++++ 2 files changed, 277 insertions(+), 11 deletions(-) create mode 100644 include/asm-i386/dwarf2.h diff --git a/arch/i386/kernel/entry.S b/arch/i386/kernel/entry.S index e802f3c..e6e4506 100644 --- a/arch/i386/kernel/entry.S +++ b/arch/i386/kernel/entry.S @@ -48,6 +48,7 @@ #include #include #include +#include #include "irq_vectors.h" #define nr_syscalls ((syscall_table_size)/4) @@ -85,31 +86,67 @@ VM_MASK = 0x00020000 #define SAVE_ALL \ cld; \ pushl %es; \ + CFI_ADJUST_CFA_OFFSET 4;\ + /*CFI_REL_OFFSET es, 0;*/\ pushl %ds; \ + CFI_ADJUST_CFA_OFFSET 4;\ + /*CFI_REL_OFFSET ds, 0;*/\ pushl %eax; \ + CFI_ADJUST_CFA_OFFSET 4;\ + CFI_REL_OFFSET eax, 0;\ pushl %ebp; \ + CFI_ADJUST_CFA_OFFSET 4;\ + CFI_REL_OFFSET ebp, 0;\ pushl %edi; \ + CFI_ADJUST_CFA_OFFSET 4;\ + CFI_REL_OFFSET edi, 0;\ pushl %esi; \ + CFI_ADJUST_CFA_OFFSET 4;\ + CFI_REL_OFFSET esi, 0;\ pushl %edx; \ + CFI_ADJUST_CFA_OFFSET 4;\ + CFI_REL_OFFSET edx, 0;\ pushl %ecx; \ + CFI_ADJUST_CFA_OFFSET 4;\ + CFI_REL_OFFSET ecx, 0;\ pushl %ebx; \ + CFI_ADJUST_CFA_OFFSET 4;\ + CFI_REL_OFFSET ebx, 0;\ movl $(__USER_DS), %edx; \ movl %edx, %ds; \ movl %edx, %es; #define RESTORE_INT_REGS \ popl %ebx; \ + CFI_ADJUST_CFA_OFFSET -4;\ + CFI_RESTORE ebx;\ popl %ecx; \ + CFI_ADJUST_CFA_OFFSET -4;\ + CFI_RESTORE ecx;\ popl %edx; \ + CFI_ADJUST_CFA_OFFSET -4;\ + CFI_RESTORE edx;\ popl %esi; \ + CFI_ADJUST_CFA_OFFSET -4;\ + CFI_RESTORE esi;\ popl %edi; \ + CFI_ADJUST_CFA_OFFSET -4;\ + CFI_RESTORE edi;\ popl %ebp; \ - popl %eax + CFI_ADJUST_CFA_OFFSET -4;\ + CFI_RESTORE ebp;\ + popl %eax; \ + CFI_ADJUST_CFA_OFFSET -4;\ + CFI_RESTORE eax #define RESTORE_REGS \ RESTORE_INT_REGS; \ 1: popl %ds; \ + CFI_ADJUST_CFA_OFFSET -4;\ + /*CFI_RESTORE ds;*/\ 2: popl %es; \ + CFI_ADJUST_CFA_OFFSET -4;\ + /*CFI_RESTORE es;*/\ .section .fixup,"ax"; \ 3: movl $0,(%esp); \ jmp 1b; \ @@ -122,13 +159,43 @@ VM_MASK = 0x00020000 .long 2b,4b; \ .previous +#define RING0_INT_FRAME \ + CFI_STARTPROC simple;\ + CFI_DEF_CFA esp, 3*4;\ + /*CFI_OFFSET cs, -2*4;*/\ + CFI_OFFSET eip, -3*4 + +#define RING0_EC_FRAME \ + CFI_STARTPROC simple;\ + CFI_DEF_CFA esp, 4*4;\ + /*CFI_OFFSET cs, -2*4;*/\ + CFI_OFFSET eip, -3*4 + +#define RING0_PTREGS_FRAME \ + CFI_STARTPROC simple;\ + CFI_DEF_CFA esp, OLDESP-EBX;\ + /*CFI_OFFSET cs, CS-OLDESP;*/\ + CFI_OFFSET eip, EIP-OLDESP;\ + /*CFI_OFFSET es, ES-OLDESP;*/\ + /*CFI_OFFSET ds, DS-OLDESP;*/\ + CFI_OFFSET eax, EAX-OLDESP;\ + CFI_OFFSET ebp, EBP-OLDESP;\ + CFI_OFFSET edi, EDI-OLDESP;\ + CFI_OFFSET esi, ESI-OLDESP;\ + CFI_OFFSET edx, EDX-OLDESP;\ + CFI_OFFSET ecx, ECX-OLDESP;\ + CFI_OFFSET ebx, EBX-OLDESP ENTRY(ret_from_fork) + CFI_STARTPROC pushl %eax + CFI_ADJUST_CFA_OFFSET -4 call schedule_tail GET_THREAD_INFO(%ebp) popl %eax + CFI_ADJUST_CFA_OFFSET -4 jmp syscall_exit + CFI_ENDPROC /* * Return to user mode is not as complex as all this looks, @@ -139,6 +206,7 @@ ENTRY(ret_from_fork) # userspace resumption stub bypassing syscall exit tracing ALIGN + RING0_PTREGS_FRAME ret_from_exception: preempt_stop ret_from_intr: @@ -171,20 +239,33 @@ need_resched: call preempt_schedule_irq jmp need_resched #endif + CFI_ENDPROC /* SYSENTER_RETURN points to after the "sysenter" instruction in the vsyscall page. See vsyscall-sysentry.S, which defines the symbol. */ # sysenter call handler stub ENTRY(sysenter_entry) + CFI_STARTPROC simple + CFI_DEF_CFA esp, 0 + CFI_REGISTER esp, ebp movl TSS_sysenter_esp0(%esp),%esp sysenter_past_esp: sti pushl $(__USER_DS) + CFI_ADJUST_CFA_OFFSET 4 + /*CFI_REL_OFFSET ss, 0*/ pushl %ebp + CFI_ADJUST_CFA_OFFSET 4 + CFI_REL_OFFSET esp, 0 pushfl + CFI_ADJUST_CFA_OFFSET 4 pushl $(__USER_CS) + CFI_ADJUST_CFA_OFFSET 4 + /*CFI_REL_OFFSET cs, 0*/ pushl $SYSENTER_RETURN + CFI_ADJUST_CFA_OFFSET 4 + CFI_REL_OFFSET eip, 0 /* * Load the potential sixth argument from user stack. @@ -199,6 +280,7 @@ sysenter_past_esp: .previous pushl %eax + CFI_ADJUST_CFA_OFFSET 4 SAVE_ALL GET_THREAD_INFO(%ebp) @@ -219,11 +301,14 @@ sysenter_past_esp: xorl %ebp,%ebp sti sysexit + CFI_ENDPROC # system call handler stub ENTRY(system_call) + RING0_INT_FRAME # can't unwind into user space anyway pushl %eax # save orig_eax + CFI_ADJUST_CFA_OFFSET 4 SAVE_ALL GET_THREAD_INFO(%ebp) testl $TF_MASK,EFLAGS(%esp) @@ -256,10 +341,12 @@ restore_all: movb CS(%esp), %al andl $(VM_MASK | (4 << 8) | 3), %eax cmpl $((4 << 8) | 3), %eax + CFI_REMEMBER_STATE je ldt_ss # returning to user-space with LDT SS restore_nocheck: RESTORE_REGS addl $4, %esp + CFI_ADJUST_CFA_OFFSET -4 1: iret .section .fixup,"ax" iret_exc: @@ -273,6 +360,7 @@ iret_exc: .long 1b,iret_exc .previous + CFI_RESTORE_STATE ldt_ss: larl OLDSS(%esp), %eax jnz restore_nocheck @@ -285,11 +373,13 @@ ldt_ss: * CPUs, which we can try to work around to make * dosemu and wine happy. */ subl $8, %esp # reserve space for switch16 pointer + CFI_ADJUST_CFA_OFFSET 8 cli movl %esp, %eax /* Set up the 16bit stack frame with switch32 pointer on top, * and a switch16 pointer on top of the current frame. */ call setup_x86_bogus_stack + CFI_ADJUST_CFA_OFFSET -8 # frame has moved RESTORE_REGS lss 20+4(%esp), %esp # switch to 16bit stack 1: iret @@ -297,9 +387,11 @@ ldt_ss: .align 4 .long 1b,iret_exc .previous + CFI_ENDPROC # perform work that needs to be done immediately before resumption ALIGN + RING0_PTREGS_FRAME # can't unwind into user space anyway work_pending: testb $_TIF_NEED_RESCHED, %cl jz work_notifysig @@ -329,8 +421,10 @@ work_notifysig: # deal with pending signals and work_notifysig_v86: #ifdef CONFIG_VM86 pushl %ecx # save ti_flags for do_notify_resume + CFI_ADJUST_CFA_OFFSET 4 call save_v86_state # %eax contains pt_regs pointer popl %ecx + CFI_ADJUST_CFA_OFFSET -4 movl %eax, %esp xorl %edx, %edx call do_notify_resume @@ -363,19 +457,21 @@ syscall_exit_work: movl $1, %edx call do_syscall_trace jmp resume_userspace + CFI_ENDPROC - ALIGN + RING0_INT_FRAME # can't unwind into user space anyway syscall_fault: pushl %eax # save orig_eax + CFI_ADJUST_CFA_OFFSET 4 SAVE_ALL GET_THREAD_INFO(%ebp) movl $-EFAULT,EAX(%esp) jmp resume_userspace - ALIGN syscall_badsys: movl $-ENOSYS,EAX(%esp) jmp resume_userspace + CFI_ENDPROC #define FIXUP_ESPFIX_STACK \ movl %esp, %eax; \ @@ -387,16 +483,21 @@ syscall_badsys: movl %eax, %esp; #define UNWIND_ESPFIX_STACK \ pushl %eax; \ + CFI_ADJUST_CFA_OFFSET 4; \ movl %ss, %eax; \ /* see if on 16bit stack */ \ cmpw $__ESPFIX_SS, %ax; \ - jne 28f; \ - movl $__KERNEL_DS, %edx; \ - movl %edx, %ds; \ - movl %edx, %es; \ + je 28f; \ +27: popl %eax; \ + CFI_ADJUST_CFA_OFFSET -4; \ +.section .fixup,"ax"; \ +28: movl $__KERNEL_DS, %eax; \ + movl %eax, %ds; \ + movl %eax, %es; \ /* switch to 32bit stack */ \ - FIXUP_ESPFIX_STACK \ -28: popl %eax; + FIXUP_ESPFIX_STACK; \ + jmp 27b; \ +.previous /* * Build the entry stubs and pointer table with @@ -408,9 +509,14 @@ ENTRY(interrupt) vector=0 ENTRY(irq_entries_start) + RING0_INT_FRAME .rept NR_IRQS ALIGN + .if vector + CFI_ADJUST_CFA_OFFSET -4 + .endif 1: pushl $vector-256 + CFI_ADJUST_CFA_OFFSET 4 jmp common_interrupt .data .long 1b @@ -424,60 +530,99 @@ common_interrupt: movl %esp,%eax call do_IRQ jmp ret_from_intr + CFI_ENDPROC #define BUILD_INTERRUPT(name, nr) \ ENTRY(name) \ + RING0_INT_FRAME; \ pushl $nr-256; \ - SAVE_ALL \ + CFI_ADJUST_CFA_OFFSET 4; \ + SAVE_ALL; \ movl %esp,%eax; \ call smp_/**/name; \ - jmp ret_from_intr; + jmp ret_from_intr; \ + CFI_ENDPROC /* The include is where all of the SMP etc. interrupts come from */ #include "entry_arch.h" ENTRY(divide_error) + RING0_INT_FRAME pushl $0 # no error code + CFI_ADJUST_CFA_OFFSET 4 pushl $do_divide_error + CFI_ADJUST_CFA_OFFSET 4 ALIGN error_code: pushl %ds + CFI_ADJUST_CFA_OFFSET 4 + /*CFI_REL_OFFSET ds, 0*/ pushl %eax + CFI_ADJUST_CFA_OFFSET 4 + CFI_REL_OFFSET eax, 0 xorl %eax, %eax pushl %ebp + CFI_ADJUST_CFA_OFFSET 4 + CFI_REL_OFFSET ebp, 0 pushl %edi + CFI_ADJUST_CFA_OFFSET 4 + CFI_REL_OFFSET edi, 0 pushl %esi + CFI_ADJUST_CFA_OFFSET 4 + CFI_REL_OFFSET esi, 0 pushl %edx + CFI_ADJUST_CFA_OFFSET 4 + CFI_REL_OFFSET edx, 0 decl %eax # eax = -1 pushl %ecx + CFI_ADJUST_CFA_OFFSET 4 + CFI_REL_OFFSET ecx, 0 pushl %ebx + CFI_ADJUST_CFA_OFFSET 4 + CFI_REL_OFFSET ebx, 0 cld pushl %es + CFI_ADJUST_CFA_OFFSET 4 + /*CFI_REL_OFFSET es, 0*/ UNWIND_ESPFIX_STACK popl %ecx + CFI_ADJUST_CFA_OFFSET -4 + /*CFI_REGISTER es, ecx*/ movl ES(%esp), %edi # get the function address movl ORIG_EAX(%esp), %edx # get the error code movl %eax, ORIG_EAX(%esp) movl %ecx, ES(%esp) + /*CFI_REL_OFFSET es, ES*/ movl $(__USER_DS), %ecx movl %ecx, %ds movl %ecx, %es movl %esp,%eax # pt_regs pointer call *%edi jmp ret_from_exception + CFI_ENDPROC ENTRY(coprocessor_error) + RING0_INT_FRAME pushl $0 + CFI_ADJUST_CFA_OFFSET 4 pushl $do_coprocessor_error + CFI_ADJUST_CFA_OFFSET 4 jmp error_code + CFI_ENDPROC ENTRY(simd_coprocessor_error) + RING0_INT_FRAME pushl $0 + CFI_ADJUST_CFA_OFFSET 4 pushl $do_simd_coprocessor_error + CFI_ADJUST_CFA_OFFSET 4 jmp error_code + CFI_ENDPROC ENTRY(device_not_available) + RING0_INT_FRAME pushl $-1 # mark this as an int + CFI_ADJUST_CFA_OFFSET 4 SAVE_ALL movl %cr0, %eax testl $0x4, %eax # EM (math emulation bit) @@ -487,9 +632,12 @@ ENTRY(device_not_available) jmp ret_from_exception device_not_available_emulate: pushl $0 # temporary storage for ORIG_EIP + CFI_ADJUST_CFA_OFFSET 4 call math_emulate addl $4, %esp + CFI_ADJUST_CFA_OFFSET -4 jmp ret_from_exception + CFI_ENDPROC /* * Debug traps and NMI can happen at the one SYSENTER instruction @@ -514,16 +662,19 @@ label: \ pushl $sysenter_past_esp KPROBE_ENTRY(debug) + RING0_INT_FRAME cmpl $sysenter_entry,(%esp) jne debug_stack_correct FIX_STACK(12, debug_stack_correct, debug_esp_fix_insn) debug_stack_correct: pushl $-1 # mark this as an int + CFI_ADJUST_CFA_OFFSET 4 SAVE_ALL xorl %edx,%edx # error code 0 movl %esp,%eax # pt_regs pointer call do_debug jmp ret_from_exception + CFI_ENDPROC .previous .text /* * NMI is doubly nasty. It can happen _while_ we're handling @@ -534,14 +685,18 @@ debug_stack_correct: * fault happened on the sysenter path. */ ENTRY(nmi) + RING0_INT_FRAME pushl %eax + CFI_ADJUST_CFA_OFFSET 4 movl %ss, %eax cmpw $__ESPFIX_SS, %ax popl %eax + CFI_ADJUST_CFA_OFFSET -4 je nmi_16bit_stack cmpl $sysenter_entry,(%esp) je nmi_stack_fixup pushl %eax + CFI_ADJUST_CFA_OFFSET 4 movl %esp,%eax /* Do not access memory above the end of our stack page, * it might not exist. @@ -549,16 +704,19 @@ ENTRY(nmi) andl $(THREAD_SIZE-1),%eax cmpl $(THREAD_SIZE-20),%eax popl %eax + CFI_ADJUST_CFA_OFFSET -4 jae nmi_stack_correct cmpl $sysenter_entry,12(%esp) je nmi_debug_stack_check nmi_stack_correct: pushl %eax + CFI_ADJUST_CFA_OFFSET 4 SAVE_ALL xorl %edx,%edx # zero error code movl %esp,%eax # pt_regs pointer call do_nmi jmp restore_all + CFI_ENDPROC nmi_stack_fixup: FIX_STACK(12,nmi_stack_correct, 1) @@ -574,97 +732,150 @@ nmi_debug_stack_check: jmp nmi_stack_correct nmi_16bit_stack: + RING0_INT_FRAME /* create the pointer to lss back */ pushl %ss + CFI_ADJUST_CFA_OFFSET 4 pushl %esp + CFI_ADJUST_CFA_OFFSET 4 movzwl %sp, %esp addw $4, (%esp) /* copy the iret frame of 12 bytes */ .rept 3 pushl 16(%esp) + CFI_ADJUST_CFA_OFFSET 4 .endr pushl %eax + CFI_ADJUST_CFA_OFFSET 4 SAVE_ALL FIXUP_ESPFIX_STACK # %eax == %esp + CFI_ADJUST_CFA_OFFSET -20 # the frame has now moved xorl %edx,%edx # zero error code call do_nmi RESTORE_REGS lss 12+4(%esp), %esp # back to 16bit stack 1: iret + CFI_ENDPROC .section __ex_table,"a" .align 4 .long 1b,iret_exc .previous KPROBE_ENTRY(int3) + RING0_INT_FRAME pushl $-1 # mark this as an int + CFI_ADJUST_CFA_OFFSET 4 SAVE_ALL xorl %edx,%edx # zero error code movl %esp,%eax # pt_regs pointer call do_int3 jmp ret_from_exception + CFI_ENDPROC .previous .text ENTRY(overflow) + RING0_INT_FRAME pushl $0 + CFI_ADJUST_CFA_OFFSET 4 pushl $do_overflow + CFI_ADJUST_CFA_OFFSET 4 jmp error_code + CFI_ENDPROC ENTRY(bounds) + RING0_INT_FRAME pushl $0 + CFI_ADJUST_CFA_OFFSET 4 pushl $do_bounds + CFI_ADJUST_CFA_OFFSET 4 jmp error_code + CFI_ENDPROC ENTRY(invalid_op) + RING0_INT_FRAME pushl $0 + CFI_ADJUST_CFA_OFFSET 4 pushl $do_invalid_op + CFI_ADJUST_CFA_OFFSET 4 jmp error_code + CFI_ENDPROC ENTRY(coprocessor_segment_overrun) + RING0_INT_FRAME pushl $0 + CFI_ADJUST_CFA_OFFSET 4 pushl $do_coprocessor_segment_overrun + CFI_ADJUST_CFA_OFFSET 4 jmp error_code + CFI_ENDPROC ENTRY(invalid_TSS) + RING0_EC_FRAME pushl $do_invalid_TSS + CFI_ADJUST_CFA_OFFSET 4 jmp error_code + CFI_ENDPROC ENTRY(segment_not_present) + RING0_EC_FRAME pushl $do_segment_not_present + CFI_ADJUST_CFA_OFFSET 4 jmp error_code + CFI_ENDPROC ENTRY(stack_segment) + RING0_EC_FRAME pushl $do_stack_segment + CFI_ADJUST_CFA_OFFSET 4 jmp error_code + CFI_ENDPROC KPROBE_ENTRY(general_protection) + RING0_EC_FRAME pushl $do_general_protection + CFI_ADJUST_CFA_OFFSET 4 jmp error_code + CFI_ENDPROC .previous .text ENTRY(alignment_check) + RING0_EC_FRAME pushl $do_alignment_check + CFI_ADJUST_CFA_OFFSET 4 jmp error_code + CFI_ENDPROC KPROBE_ENTRY(page_fault) + RING0_EC_FRAME pushl $do_page_fault + CFI_ADJUST_CFA_OFFSET 4 jmp error_code + CFI_ENDPROC .previous .text #ifdef CONFIG_X86_MCE ENTRY(machine_check) + RING0_INT_FRAME pushl $0 + CFI_ADJUST_CFA_OFFSET 4 pushl machine_check_vector + CFI_ADJUST_CFA_OFFSET 4 jmp error_code + CFI_ENDPROC #endif ENTRY(spurious_interrupt_bug) + RING0_INT_FRAME pushl $0 + CFI_ADJUST_CFA_OFFSET 4 pushl $do_spurious_interrupt_bug + CFI_ADJUST_CFA_OFFSET 4 jmp error_code + CFI_ENDPROC #ifdef CONFIG_STACK_UNWIND ENTRY(arch_unwind_init_running) + CFI_STARTPROC movl 4(%esp), %edx movl (%esp), %ecx leal 4(%esp), %eax @@ -689,6 +900,7 @@ ENTRY(arch_unwind_init_running) movl EBX(%edx), %ebx movl $__KERNEL_DS, OLDSS(%edx) jmpl *%eax + CFI_ENDPROC ENDPROC(arch_unwind_init_running) #endif diff --git a/include/asm-i386/dwarf2.h b/include/asm-i386/dwarf2.h new file mode 100644 index 0000000..2280f62 --- /dev/null +++ b/include/asm-i386/dwarf2.h @@ -0,0 +1,54 @@ +#ifndef _DWARF2_H +#define _DWARF2_H + +#include + +#ifndef __ASSEMBLY__ +#warning "asm/dwarf2.h should be only included in pure assembly files" +#endif + +/* + Macros for dwarf2 CFI unwind table entries. + See "as.info" for details on these pseudo ops. Unfortunately + they are only supported in very new binutils, so define them + away for older version. + */ + +#ifdef CONFIG_UNWIND_INFO + +#define CFI_STARTPROC .cfi_startproc +#define CFI_ENDPROC .cfi_endproc +#define CFI_DEF_CFA .cfi_def_cfa +#define CFI_DEF_CFA_REGISTER .cfi_def_cfa_register +#define CFI_DEF_CFA_OFFSET .cfi_def_cfa_offset +#define CFI_ADJUST_CFA_OFFSET .cfi_adjust_cfa_offset +#define CFI_OFFSET .cfi_offset +#define CFI_REL_OFFSET .cfi_rel_offset +#define CFI_REGISTER .cfi_register +#define CFI_RESTORE .cfi_restore +#define CFI_REMEMBER_STATE .cfi_remember_state +#define CFI_RESTORE_STATE .cfi_restore_state + +#else + +/* Due to the structure of pre-exisiting code, don't use assembler line + comment character # to ignore the arguments. Instead, use a dummy macro. */ +.macro ignore a=0, b=0, c=0, d=0 +.endm + +#define CFI_STARTPROC ignore +#define CFI_ENDPROC ignore +#define CFI_DEF_CFA ignore +#define CFI_DEF_CFA_REGISTER ignore +#define CFI_DEF_CFA_OFFSET ignore +#define CFI_ADJUST_CFA_OFFSET ignore +#define CFI_OFFSET ignore +#define CFI_REL_OFFSET ignore +#define CFI_REGISTER ignore +#define CFI_RESTORE ignore +#define CFI_REMEMBER_STATE ignore +#define CFI_RESTORE_STATE ignore + +#endif + +#endif -- cgit v1.1 From c33bd9aac0597eeedaaa01ea5aafe456894b2f2b Mon Sep 17 00:00:00 2001 From: Jan Beulich Date: Mon, 26 Jun 2006 13:57:47 +0200 Subject: [PATCH] i386/x86-64: fall back to old-style call trace if no unwinding If no unwinding is possible at all for a certain exception instance, fall back to the old style call trace instead of not showing any trace at all. Also, allow setting the stack trace mode at the command line. Signed-off-by: Jan Beulich Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- arch/i386/kernel/traps.c | 46 ++++++++++++++++++++++++++++------------ arch/x86_64/kernel/traps.c | 51 +++++++++++++++++++++++++++++++-------------- include/asm-i386/unwind.h | 8 +++---- include/asm-x86_64/unwind.h | 8 +++---- include/linux/unwind.h | 8 +++---- kernel/unwind.c | 7 +++---- 6 files changed, 83 insertions(+), 45 deletions(-) diff --git a/arch/i386/kernel/traps.c b/arch/i386/kernel/traps.c index 2865846..7846409 100644 --- a/arch/i386/kernel/traps.c +++ b/arch/i386/kernel/traps.c @@ -93,6 +93,7 @@ asmlinkage void spurious_interrupt_bug(void); asmlinkage void machine_check(void); static int kstack_depth_to_print = 24; +static int call_trace = 1; ATOMIC_NOTIFIER_HEAD(i386die_chain); int register_die_notifier(struct notifier_block *nb) @@ -171,40 +172,47 @@ static inline unsigned long print_context_stack(struct thread_info *tinfo, return ebp; } -static asmlinkage void show_trace_unwind(struct unwind_frame_info *info, void *log_lvl) +static asmlinkage int show_trace_unwind(struct unwind_frame_info *info, void *log_lvl) { + int n = 0; int printed = 0; /* nr of entries already printed on current line */ while (unwind(info) == 0 && UNW_PC(info)) { + ++n; printed = print_addr_and_symbol(UNW_PC(info), log_lvl, printed); if (arch_unw_user_mode(info)) break; } if (printed) printk("\n"); + return n; } static void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs, unsigned long *stack, char *log_lvl) { unsigned long ebp; - struct unwind_frame_info info; if (!task) task = current; - if (regs) { - if (unwind_init_frame_info(&info, task, regs) == 0) { - show_trace_unwind(&info, log_lvl); - return; + if (call_trace >= 0) { + int unw_ret = 0; + struct unwind_frame_info info; + + if (regs) { + if (unwind_init_frame_info(&info, task, regs) == 0) + unw_ret = show_trace_unwind(&info, log_lvl); + } else if (task == current) + unw_ret = unwind_init_running(&info, show_trace_unwind, log_lvl); + else { + if (unwind_init_blocked(&info, task) == 0) + unw_ret = show_trace_unwind(&info, log_lvl); } - } else if (task == current) { - if (unwind_init_running(&info, show_trace_unwind, log_lvl) == 0) - return; - } else { - if (unwind_init_blocked(&info, task) == 0) { - show_trace_unwind(&info, log_lvl); - return; + if (unw_ret > 0) { + if (call_trace > 0) + return; + printk("%sLegacy call trace:\n", log_lvl); } } @@ -1245,3 +1253,15 @@ static int __init kstack_setup(char *s) return 1; } __setup("kstack=", kstack_setup); + +static int __init call_trace_setup(char *s) +{ + if (strcmp(s, "old") == 0) + call_trace = -1; + else if (strcmp(s, "both") == 0) + call_trace = 0; + else if (strcmp(s, "new") == 0) + call_trace = 1; + return 1; +} +__setup("call_trace=", call_trace_setup); diff --git a/arch/x86_64/kernel/traps.c b/arch/x86_64/kernel/traps.c index eb1534f..bd0891f 100644 --- a/arch/x86_64/kernel/traps.c +++ b/arch/x86_64/kernel/traps.c @@ -107,6 +107,7 @@ static inline void preempt_conditional_cli(struct pt_regs *regs) } static int kstack_depth_to_print = 10; +static int call_trace = 1; #ifdef CONFIG_KALLSYMS #include @@ -190,11 +191,12 @@ static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack, return NULL; } -static void show_trace_unwind(struct unwind_frame_info *info, void *context) +static int show_trace_unwind(struct unwind_frame_info *info, void *context) { - int i = 11; + int i = 11, n = 0; while (unwind(info) == 0 && UNW_PC(info)) { + ++n; if (i > 50) { printk("\n "); i = 7; @@ -205,6 +207,7 @@ static void show_trace_unwind(struct unwind_frame_info *info, void *context) break; } printk("\n"); + return n; } /* @@ -218,27 +221,32 @@ void show_trace(struct task_struct *tsk, struct pt_regs *regs, unsigned long * s { const unsigned cpu = safe_smp_processor_id(); unsigned long *irqstack_end = (unsigned long *)cpu_pda(cpu)->irqstackptr; - int i; + int i = 11; unsigned used = 0; - struct unwind_frame_info info; printk("\nCall Trace:"); if (!tsk) tsk = current; - if (regs) { - if (unwind_init_frame_info(&info, tsk, regs) == 0) { - show_trace_unwind(&info, NULL); - return; + if (call_trace >= 0) { + int unw_ret = 0; + struct unwind_frame_info info; + + if (regs) { + if (unwind_init_frame_info(&info, tsk, regs) == 0) + unw_ret = show_trace_unwind(&info, NULL); + } else if (tsk == current) + unw_ret = unwind_init_running(&info, show_trace_unwind, NULL); + else { + if (unwind_init_blocked(&info, tsk) == 0) + unw_ret = show_trace_unwind(&info, NULL); } - } else if (tsk == current) { - if (unwind_init_running(&info, show_trace_unwind, NULL) == 0) - return; - } else { - if (unwind_init_blocked(&info, tsk) == 0) { - show_trace_unwind(&info, NULL); - return; + if (unw_ret > 0) { + if (call_trace > 0) + return; + printk("Legacy call trace:"); + i = 18; } } @@ -264,7 +272,7 @@ void show_trace(struct task_struct *tsk, struct pt_regs *regs, unsigned long * s } \ } while (0) - for(i = 11; ; ) { + for(; ; ) { const char *id; unsigned long *estack_end; estack_end = in_exception_stack(cpu, (unsigned long)stack, @@ -1052,3 +1060,14 @@ static int __init kstack_setup(char *s) } __setup("kstack=", kstack_setup); +static int __init call_trace_setup(char *s) +{ + if (strcmp(s, "old") == 0) + call_trace = -1; + else if (strcmp(s, "both") == 0) + call_trace = 0; + else if (strcmp(s, "new") == 0) + call_trace = 1; + return 1; +} +__setup("call_trace=", call_trace_setup); diff --git a/include/asm-i386/unwind.h b/include/asm-i386/unwind.h index 1c07689..d480f2e 100644 --- a/include/asm-i386/unwind.h +++ b/include/asm-i386/unwind.h @@ -66,10 +66,10 @@ static inline void arch_unw_init_blocked(struct unwind_frame_info *info) info->regs.xes = __USER_DS; } -extern asmlinkage void arch_unwind_init_running(struct unwind_frame_info *, - asmlinkage void (*callback)(struct unwind_frame_info *, - void *arg), - void *arg); +extern asmlinkage int arch_unwind_init_running(struct unwind_frame_info *, + asmlinkage int (*callback)(struct unwind_frame_info *, + void *arg), + void *arg); static inline int arch_unw_user_mode(const struct unwind_frame_info *info) { diff --git a/include/asm-x86_64/unwind.h b/include/asm-x86_64/unwind.h index 4f61de2..f3e7124 100644 --- a/include/asm-x86_64/unwind.h +++ b/include/asm-x86_64/unwind.h @@ -75,10 +75,10 @@ static inline void arch_unw_init_blocked(struct unwind_frame_info *info) info->regs.ss = __KERNEL_DS; } -extern void arch_unwind_init_running(struct unwind_frame_info *, - void (*callback)(struct unwind_frame_info *, - void *arg), - void *arg); +extern int arch_unwind_init_running(struct unwind_frame_info *, + int (*callback)(struct unwind_frame_info *, + void *arg), + void *arg); static inline int arch_unw_user_mode(const struct unwind_frame_info *info) { diff --git a/include/linux/unwind.h b/include/linux/unwind.h index 0295aa7..13c7b2c 100644 --- a/include/linux/unwind.h +++ b/include/linux/unwind.h @@ -49,8 +49,8 @@ extern int unwind_init_blocked(struct unwind_frame_info *, * Prepare to unwind the currently running thread. */ extern int unwind_init_running(struct unwind_frame_info *, - asmlinkage void (*callback)(struct unwind_frame_info *, - void *arg), + asmlinkage int (*callback)(struct unwind_frame_info *, + void *arg), void *arg); /* @@ -97,8 +97,8 @@ static inline int unwind_init_blocked(struct unwind_frame_info *info, } static inline int unwind_init_running(struct unwind_frame_info *info, - asmlinkage void (*cb)(struct unwind_frame_info *, - void *arg), + asmlinkage int (*cb)(struct unwind_frame_info *, + void *arg), void *arg) { return -ENOSYS; diff --git a/kernel/unwind.c b/kernel/unwind.c index d36bcd3..0421035 100644 --- a/kernel/unwind.c +++ b/kernel/unwind.c @@ -885,14 +885,13 @@ EXPORT_SYMBOL(unwind_init_blocked); * Prepare to unwind the currently running thread. */ int unwind_init_running(struct unwind_frame_info *info, - asmlinkage void (*callback)(struct unwind_frame_info *, - void *arg), + asmlinkage int (*callback)(struct unwind_frame_info *, + void *arg), void *arg) { info->task = current; - arch_unwind_init_running(info, callback, arg); - return 0; + return arch_unwind_init_running(info, callback, arg); } EXPORT_SYMBOL(unwind_init_running); -- cgit v1.1 From 83f4fcce7fdd213bd570b899862c3838871f8cf7 Mon Sep 17 00:00:00 2001 From: Jan Beulich Date: Mon, 26 Jun 2006 13:57:50 +0200 Subject: [PATCH] x86_64: allow unwinder to build without module support Add proper conditionals to be able to build with CONFIG_MODULES=n. Signed-off-by: Jan Beulich Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- include/linux/unwind.h | 8 ++++++++ kernel/unwind.c | 4 ++++ 2 files changed, 12 insertions(+) diff --git a/include/linux/unwind.h b/include/linux/unwind.h index 13c7b2c..ce48e2c 100644 --- a/include/linux/unwind.h +++ b/include/linux/unwind.h @@ -29,12 +29,16 @@ struct module; */ extern void unwind_init(void); +#ifdef CONFIG_MODULES + extern void *unwind_add_table(struct module *, const void *table_start, unsigned long table_size); extern void unwind_remove_table(void *handle, int init_only); +#endif + extern int unwind_init_frame_info(struct unwind_frame_info *, struct task_struct *, /*const*/ struct pt_regs *); @@ -72,6 +76,8 @@ struct unwind_frame_info {}; static inline void unwind_init(void) {} +#ifdef CONFIG_MODULES + static inline void *unwind_add_table(struct module *mod, const void *table_start, unsigned long table_size) @@ -79,6 +85,8 @@ static inline void *unwind_add_table(struct module *mod, return NULL; } +#endif + static inline void unwind_remove_table(void *handle, int init_only) { } diff --git a/kernel/unwind.c b/kernel/unwind.c index 0421035..f69c804 100644 --- a/kernel/unwind.c +++ b/kernel/unwind.c @@ -172,6 +172,8 @@ void __init unwind_init(void) __start_unwind, __end_unwind - __start_unwind); } +#ifdef CONFIG_MODULES + /* Must be called with module_mutex held. */ void *unwind_add_table(struct module *module, const void *table_start, @@ -253,6 +255,8 @@ void unwind_remove_table(void *handle, int init_only) kfree(table); } +#endif /* CONFIG_MODULES */ + static uleb128_t get_uleb128(const u8 **pcur, const u8 *end) { const u8 *cur = *pcur; -- cgit v1.1 From b79c4df714ff190f7149d3d5a86278bdc25a90ec Mon Sep 17 00:00:00 2001 From: Carl-Daniel Hailfinger Date: Mon, 26 Jun 2006 13:57:53 +0200 Subject: [PATCH] i386/x86-64: Consolidate arch/{i386,x86_64}/boot/compressed/misc.c Clean up arch/{i386,x86_64}/boot/compressed/misc.c a bit to reduce their differences. Should have zero effect on code generation. Signed-off-by: Carl-Daniel Hailfinger Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- arch/i386/boot/compressed/misc.c | 32 ++++++++++---------------- arch/x86_64/boot/compressed/misc.c | 46 +++++++++++++++++++------------------- 2 files changed, 35 insertions(+), 43 deletions(-) diff --git a/arch/i386/boot/compressed/misc.c b/arch/i386/boot/compressed/misc.c index f19f3a7..b2ccd54 100644 --- a/arch/i386/boot/compressed/misc.c +++ b/arch/i386/boot/compressed/misc.c @@ -24,14 +24,6 @@ #undef memset #undef memcpy - -/* - * Why do we do this? Don't ask me.. - * - * Incomprehensible are the ways of bootloaders. - */ -static void* memset(void *, int, size_t); -static void* memcpy(void *, __const void *, size_t); #define memzero(s, n) memset ((s), 0, (n)) typedef unsigned char uch; @@ -93,7 +85,7 @@ static unsigned char *real_mode; /* Pointer to real-mode data */ #endif #define RM_SCREEN_INFO (*(struct screen_info *)(real_mode+0)) -extern char input_data[]; +extern unsigned char input_data[]; extern int input_len; static long bytes_out = 0; @@ -103,6 +95,9 @@ static unsigned long output_ptr = 0; static void *malloc(int size); static void free(void *where); +static void *memset(void *s, int c, unsigned n); +static void *memcpy(void *dest, const void *src, unsigned n); + static void putstr(const char *); extern int end; @@ -205,7 +200,7 @@ static void putstr(const char *s) outb_p(0xff & (pos >> 1), vidport+1); } -static void* memset(void* s, int c, size_t n) +static void* memset(void* s, int c, unsigned n) { int i; char *ss = (char*)s; @@ -214,14 +209,13 @@ static void* memset(void* s, int c, size_t n) return s; } -static void* memcpy(void* __dest, __const void* __src, - size_t __n) +static void* memcpy(void* dest, const void* src, unsigned n) { int i; - char *d = (char *)__dest, *s = (char *)__src; + char *d = (char *)dest, *s = (char *)src; - for (i=0;i<__n;i++) d[i] = s[i]; - return __dest; + for (i=0;i RM_EXT_MEM_K ? RM_ALT_MEM_K : RM_EXT_MEM_K) < 1024) error("Less than 2MB of memory"); #endif - output_data = (char *)__PHYSICAL_START; /* Normally Points to 1M */ + output_data = (unsigned char *)__PHYSICAL_START; /* Normally Points to 1M */ free_mem_end_ptr = (long)real_mode; } @@ -324,11 +318,9 @@ static void setup_output_buffer_if_we_run_high(struct moveparams *mv) #ifdef STANDARD_MEMORY_BIOS_CALL if (RM_EXT_MEM_K < (3*1024)) error("Less than 4MB of memory"); #else - if ((RM_ALT_MEM_K > RM_EXT_MEM_K ? RM_ALT_MEM_K : RM_EXT_MEM_K) < - (3*1024)) - error("Less than 4MB of memory"); + if ((RM_ALT_MEM_K > RM_EXT_MEM_K ? RM_ALT_MEM_K : RM_EXT_MEM_K) < (3*1024)) error("Less than 4MB of memory"); #endif - mv->low_buffer_start = output_data = (char *)LOW_BUFFER_START; + mv->low_buffer_start = output_data = (unsigned char *)LOW_BUFFER_START; low_buffer_end = ((unsigned int)real_mode > LOW_BUFFER_MAX ? LOW_BUFFER_MAX : (unsigned int)real_mode) & ~0xfff; low_buffer_size = low_buffer_end - LOW_BUFFER_START; diff --git a/arch/x86_64/boot/compressed/misc.c b/arch/x86_64/boot/compressed/misc.c index cf4b88c..3755b2e 100644 --- a/arch/x86_64/boot/compressed/misc.c +++ b/arch/x86_64/boot/compressed/misc.c @@ -77,11 +77,11 @@ static void gzip_release(void **); */ static unsigned char *real_mode; /* Pointer to real-mode data */ -#define EXT_MEM_K (*(unsigned short *)(real_mode + 0x2)) +#define RM_EXT_MEM_K (*(unsigned short *)(real_mode + 0x2)) #ifndef STANDARD_MEMORY_BIOS_CALL -#define ALT_MEM_K (*(unsigned long *)(real_mode + 0x1e0)) +#define RM_ALT_MEM_K (*(unsigned long *)(real_mode + 0x1e0)) #endif -#define SCREEN_INFO (*(struct screen_info *)(real_mode+0)) +#define RM_SCREEN_INFO (*(struct screen_info *)(real_mode+0)) extern unsigned char input_data[]; extern int input_len; @@ -92,9 +92,9 @@ static unsigned long output_ptr = 0; static void *malloc(int size); static void free(void *where); - -void* memset(void* s, int c, unsigned n); -void* memcpy(void* dest, const void* src, unsigned n); + +static void *memset(void *s, int c, unsigned n); +static void *memcpy(void *dest, const void *src, unsigned n); static void putstr(const char *); @@ -162,8 +162,8 @@ static void putstr(const char *s) int x,y,pos; char c; - x = SCREEN_INFO.orig_x; - y = SCREEN_INFO.orig_y; + x = RM_SCREEN_INFO.orig_x; + y = RM_SCREEN_INFO.orig_y; while ( ( c = *s++ ) != '\0' ) { if ( c == '\n' ) { @@ -184,8 +184,8 @@ static void putstr(const char *s) } } - SCREEN_INFO.orig_x = x; - SCREEN_INFO.orig_y = y; + RM_SCREEN_INFO.orig_x = x; + RM_SCREEN_INFO.orig_y = y; pos = (x + cols * y) * 2; /* Update cursor position */ outb_p(14, vidport); @@ -194,7 +194,7 @@ static void putstr(const char *s) outb_p(0xff & (pos >> 1), vidport+1); } -void* memset(void* s, int c, unsigned n) +static void* memset(void* s, int c, unsigned n) { int i; char *ss = (char*)s; @@ -203,7 +203,7 @@ void* memset(void* s, int c, unsigned n) return s; } -void* memcpy(void* dest, const void* src, unsigned n) +static void* memcpy(void* dest, const void* src, unsigned n) { int i; char *d = (char *)dest, *s = (char *)src; @@ -278,15 +278,15 @@ static void error(char *x) putstr(x); putstr("\n\n -- System halted"); - while(1); + while(1); /* Halt */ } -void setup_normal_output_buffer(void) +static void setup_normal_output_buffer(void) { #ifdef STANDARD_MEMORY_BIOS_CALL - if (EXT_MEM_K < 1024) error("Less than 2MB of memory"); + if (RM_EXT_MEM_K < 1024) error("Less than 2MB of memory"); #else - if ((ALT_MEM_K > EXT_MEM_K ? ALT_MEM_K : EXT_MEM_K) < 1024) error("Less than 2MB of memory"); + if ((RM_ALT_MEM_K > RM_EXT_MEM_K ? RM_ALT_MEM_K : RM_EXT_MEM_K) < 1024) error("Less than 2MB of memory"); #endif output_data = (unsigned char *)__PHYSICAL_START; /* Normally Points to 1M */ free_mem_end_ptr = (long)real_mode; @@ -297,13 +297,13 @@ struct moveparams { uch *high_buffer_start; int hcount; }; -void setup_output_buffer_if_we_run_high(struct moveparams *mv) +static void setup_output_buffer_if_we_run_high(struct moveparams *mv) { high_buffer_start = (uch *)(((ulg)&end) + HEAP_SIZE); #ifdef STANDARD_MEMORY_BIOS_CALL - if (EXT_MEM_K < (3*1024)) error("Less than 4MB of memory"); + if (RM_EXT_MEM_K < (3*1024)) error("Less than 4MB of memory"); #else - if ((ALT_MEM_K > EXT_MEM_K ? ALT_MEM_K : EXT_MEM_K) < (3*1024)) error("Less than 4MB of memory"); + if ((RM_ALT_MEM_K > RM_EXT_MEM_K ? RM_ALT_MEM_K : RM_EXT_MEM_K) < (3*1024)) error("Less than 4MB of memory"); #endif mv->low_buffer_start = output_data = (unsigned char *)LOW_BUFFER_START; low_buffer_end = ((unsigned int)real_mode > LOW_BUFFER_MAX @@ -319,7 +319,7 @@ void setup_output_buffer_if_we_run_high(struct moveparams *mv) mv->high_buffer_start = high_buffer_start; } -void close_output_buffer_if_we_run_high(struct moveparams *mv) +static void close_output_buffer_if_we_run_high(struct moveparams *mv) { if (bytes_out > low_buffer_size) { mv->lcount = low_buffer_size; @@ -335,7 +335,7 @@ int decompress_kernel(struct moveparams *mv, void *rmode) { real_mode = rmode; - if (SCREEN_INFO.orig_video_mode == 7) { + if (RM_SCREEN_INFO.orig_video_mode == 7) { vidmem = (char *) 0xb0000; vidport = 0x3b4; } else { @@ -343,8 +343,8 @@ int decompress_kernel(struct moveparams *mv, void *rmode) vidport = 0x3d4; } - lines = SCREEN_INFO.orig_video_lines; - cols = SCREEN_INFO.orig_video_cols; + lines = RM_SCREEN_INFO.orig_video_lines; + cols = RM_SCREEN_INFO.orig_video_cols; if (free_mem_ptr < 0x100000) setup_normal_output_buffer(); else setup_output_buffer_if_we_run_high(mv); -- cgit v1.1 From d2ae5b5f6afd3c4caaf82444102a4372135cb994 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Mon, 26 Jun 2006 13:57:56 +0200 Subject: [PATCH] x86_64: Get rid of pud_offset_k / __pud_offset_k pud_offset_k() equivalent to pud_offset() now. Pointed out by Jan Beulich Similar for __pud_offset_ok, which needs a small change in the callers. Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- arch/x86_64/mm/fault.c | 2 +- arch/x86_64/mm/init.c | 2 +- include/asm-x86_64/pgtable.h | 6 ------ 3 files changed, 2 insertions(+), 8 deletions(-) diff --git a/arch/x86_64/mm/fault.c b/arch/x86_64/mm/fault.c index 55250593..6472889 100644 --- a/arch/x86_64/mm/fault.c +++ b/arch/x86_64/mm/fault.c @@ -160,7 +160,7 @@ void dump_pagetable(unsigned long address) printk("PGD %lx ", pgd_val(*pgd)); if (!pgd_present(*pgd)) goto ret; - pud = __pud_offset_k((pud_t *)pgd_page(*pgd), address); + pud = pud_offset(pgd, address); if (bad_address(pud)) goto bad; printk("PUD %lx ", pud_val(*pud)); if (!pud_present(*pud)) goto ret; diff --git a/arch/x86_64/mm/init.c b/arch/x86_64/mm/init.c index a70a8e3..b83645a 100644 --- a/arch/x86_64/mm/init.c +++ b/arch/x86_64/mm/init.c @@ -372,7 +372,7 @@ void __meminit init_memory_mapping(unsigned long start, unsigned long end) pud_t *pud; if (after_bootmem) - pud = pud_offset_k(pgd, start & PGDIR_MASK); + pud = pud_offset(pgd, start & PGDIR_MASK); else pud = alloc_low_page(&map, &pud_phys); diff --git a/include/asm-x86_64/pgtable.h b/include/asm-x86_64/pgtable.h index 31e83c3..a31ab4e 100644 --- a/include/asm-x86_64/pgtable.h +++ b/include/asm-x86_64/pgtable.h @@ -337,14 +337,8 @@ static inline int pmd_large(pmd_t pte) { /* to find an entry in a page-table-directory. */ #define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1)) #define pud_offset(pgd, address) ((pud_t *) pgd_page(*(pgd)) + pud_index(address)) -#define pud_offset_k(pgd, addr) pud_offset(pgd, addr) #define pud_present(pud) (pud_val(pud) & _PAGE_PRESENT) -static inline pud_t *__pud_offset_k(pud_t *pud, unsigned long address) -{ - return pud + pud_index(address); -} - /* PMD - Level 2 access */ #define pmd_page_kernel(pmd) ((unsigned long) __va(pmd_val(pmd) & PTE_MASK)) #define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)) -- cgit v1.1 From 46d13a384bc695ec61458e5dcbac1eee6d623a9b Mon Sep 17 00:00:00 2001 From: Jan Beulich Date: Mon, 26 Jun 2006 13:57:59 +0200 Subject: [PATCH] x86_64: use halt() instead of raw inline assembly Use abstractions whenever possible. Signed-off-by: Jan Beulich Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- arch/x86_64/kernel/crash.c | 2 +- arch/x86_64/kernel/smp.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/x86_64/kernel/crash.c b/arch/x86_64/kernel/crash.c index 4e6c3b7..ec1c743 100644 --- a/arch/x86_64/kernel/crash.c +++ b/arch/x86_64/kernel/crash.c @@ -111,7 +111,7 @@ static int crash_nmi_callback(struct pt_regs *regs, int cpu) atomic_dec(&waiting_for_crash_ipi); /* Assume hlt works */ for(;;) - asm("hlt"); + halt(); return 1; } diff --git a/arch/x86_64/kernel/smp.c b/arch/x86_64/kernel/smp.c index 4a6628b..fe906ba 100644 --- a/arch/x86_64/kernel/smp.c +++ b/arch/x86_64/kernel/smp.c @@ -460,7 +460,7 @@ static void smp_really_stop_cpu(void *dummy) { smp_stop_cpu(); for (;;) - asm("hlt"); + halt(); } void smp_send_stop(void) -- cgit v1.1 From 2bbc419f9d51b44f3fdeea12c5b786bdace82b8e Mon Sep 17 00:00:00 2001 From: Rohit Seth Date: Mon, 26 Jun 2006 13:58:02 +0200 Subject: [PATCH] x86_64: Change assembly to use regular cpuid_count macro Minor cleanup patch: Replacing the asm statement with cpuid_count macro(which already provides the same functionality). Signed-off-by: Rohit Seth Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- arch/x86_64/kernel/setup.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/arch/x86_64/kernel/setup.c b/arch/x86_64/kernel/setup.c index 816203d..a9de8f0 100644 --- a/arch/x86_64/kernel/setup.c +++ b/arch/x86_64/kernel/setup.c @@ -957,15 +957,12 @@ static void __cpuinit detect_ht(struct cpuinfo_x86 *c) */ static int __cpuinit intel_num_cpu_cores(struct cpuinfo_x86 *c) { - unsigned int eax; + unsigned int eax, t; if (c->cpuid_level < 4) return 1; - __asm__("cpuid" - : "=a" (eax) - : "0" (4), "c" (0) - : "bx", "dx"); + cpuid_count(4, 0, &eax, &t, &t, &t); if (eax & 0x1f) return ((eax >> 26) + 1); -- cgit v1.1 From 8d4f6b93a4aaa6b56b600cd1165c971f4395e4b3 Mon Sep 17 00:00:00 2001 From: Jon Mason Date: Mon, 26 Jun 2006 13:58:05 +0200 Subject: [PATCH] x86_64: Calgary IOMMU - introduce iommu_detected swiotlb relies on the gart specific iommu_aperture variable to know if we discovered a hardware IOMMU before swiotlb initialization. Introduce iommu_detected to do the same thing, but in a HW IOMMU neutral manner, in preparation for adding the Calgary HW IOMMU. Signed-Off-By: Muli Ben-Yehuda Signed-Off-By: Jon Mason Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- arch/x86_64/kernel/aperture.c | 1 + arch/x86_64/kernel/pci-dma.c | 3 +++ arch/x86_64/kernel/pci-gart.c | 4 ++++ arch/x86_64/kernel/pci-swiotlb.c | 2 +- include/asm-x86_64/proto.h | 1 + 5 files changed, 10 insertions(+), 1 deletion(-) diff --git a/arch/x86_64/kernel/aperture.c b/arch/x86_64/kernel/aperture.c index 9ea723b..a195ef0 100644 --- a/arch/x86_64/kernel/aperture.c +++ b/arch/x86_64/kernel/aperture.c @@ -209,6 +209,7 @@ void __init iommu_hole_init(void) if (!early_is_k8_nb(read_pci_config(0, num, 3, 0x00))) continue; + iommu_detected = 1; iommu_aperture = 1; aper_order = (read_pci_config(0, num, 3, 0x90) >> 1) & 7; diff --git a/arch/x86_64/kernel/pci-dma.c b/arch/x86_64/kernel/pci-dma.c index 33926c3..7edd1a4 100644 --- a/arch/x86_64/kernel/pci-dma.c +++ b/arch/x86_64/kernel/pci-dma.c @@ -33,6 +33,9 @@ int panic_on_overflow __read_mostly = 0; int force_iommu __read_mostly= 0; #endif +/* Set this to 1 if there is a HW IOMMU in the system */ +int iommu_detected __read_mostly = 0; + /* Dummy device used for NULL arguments (normally ISA). Better would be probably a smaller DMA mask, but this is bug-to-bug compatible to i386. */ diff --git a/arch/x86_64/kernel/pci-gart.c b/arch/x86_64/kernel/pci-gart.c index 82a346e..4f67957 100644 --- a/arch/x86_64/kernel/pci-gart.c +++ b/arch/x86_64/kernel/pci-gart.c @@ -597,6 +597,10 @@ static int __init pci_iommu_init(void) if (swiotlb) return -ENODEV; + /* Did we detect a different HW IOMMU? */ + if (iommu_detected && !iommu_aperture) + return -1; + if (no_iommu || (!force_iommu && end_pfn <= MAX_DMA32_PFN) || !iommu_aperture || diff --git a/arch/x86_64/kernel/pci-swiotlb.c b/arch/x86_64/kernel/pci-swiotlb.c index 990ed67..ebdb77f 100644 --- a/arch/x86_64/kernel/pci-swiotlb.c +++ b/arch/x86_64/kernel/pci-swiotlb.c @@ -31,7 +31,7 @@ struct dma_mapping_ops swiotlb_dma_ops = { void pci_swiotlb_init(void) { /* don't initialize swiotlb if iommu=off (no_iommu=1) */ - if (!iommu_aperture && !no_iommu && + if (!iommu_detected && !no_iommu && (end_pfn > MAX_DMA32_PFN || force_iommu)) swiotlb = 1; if (swiotlb) { diff --git a/include/asm-x86_64/proto.h b/include/asm-x86_64/proto.h index 105476f..9d3335b 100644 --- a/include/asm-x86_64/proto.h +++ b/include/asm-x86_64/proto.h @@ -116,6 +116,7 @@ extern int skip_ioapic_setup; extern int acpi_ht; extern int acpi_disabled; +extern int iommu_detected; #ifdef CONFIG_IOMMU extern int fallback_aper_order; extern int fallback_aper_force; -- cgit v1.1 From a3c042a0f022dade8e02bf6c9be5d2379d7e133c Mon Sep 17 00:00:00 2001 From: Jon Mason Date: Mon, 26 Jun 2006 13:58:08 +0200 Subject: [PATCH] x86_64: Calgary IOMMU - move valid_dma_direction into the callers Based on Andi Kleen's comments on the original Calgary patch, move valid_dma_direction into the calling functions. Signed-off-by: Muli Ben-Yehuda Signed-off-by: Jon Mason Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- include/asm-x86_64/dma-mapping.h | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/include/asm-x86_64/dma-mapping.h b/include/asm-x86_64/dma-mapping.h index 498f66d..b6da83d 100644 --- a/include/asm-x86_64/dma-mapping.h +++ b/include/asm-x86_64/dma-mapping.h @@ -55,6 +55,13 @@ extern dma_addr_t bad_dma_address; extern struct dma_mapping_ops* dma_ops; extern int iommu_merge; +static inline int valid_dma_direction(int dma_direction) +{ + return ((dma_direction == DMA_BIDIRECTIONAL) || + (dma_direction == DMA_TO_DEVICE) || + (dma_direction == DMA_FROM_DEVICE)); +} + static inline int dma_mapping_error(dma_addr_t dma_addr) { if (dma_ops->mapping_error) @@ -72,6 +79,7 @@ static inline dma_addr_t dma_map_single(struct device *hwdev, void *ptr, size_t size, int direction) { + BUG_ON(!valid_dma_direction(direction)); return dma_ops->map_single(hwdev, ptr, size, direction); } @@ -79,6 +87,7 @@ static inline void dma_unmap_single(struct device *dev, dma_addr_t addr,size_t size, int direction) { + BUG_ON(!valid_dma_direction(direction)); dma_ops->unmap_single(dev, addr, size, direction); } @@ -91,6 +100,7 @@ static inline void dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle, size_t size, int direction) { + BUG_ON(!valid_dma_direction(direction)); if (dma_ops->sync_single_for_cpu) dma_ops->sync_single_for_cpu(hwdev, dma_handle, size, direction); @@ -101,6 +111,7 @@ static inline void dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle, size_t size, int direction) { + BUG_ON(!valid_dma_direction(direction)); if (dma_ops->sync_single_for_device) dma_ops->sync_single_for_device(hwdev, dma_handle, size, direction); @@ -111,6 +122,7 @@ static inline void dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle, unsigned long offset, size_t size, int direction) { + BUG_ON(!valid_dma_direction(direction)); if (dma_ops->sync_single_range_for_cpu) { dma_ops->sync_single_range_for_cpu(hwdev, dma_handle, offset, size, direction); } @@ -122,6 +134,7 @@ static inline void dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle, unsigned long offset, size_t size, int direction) { + BUG_ON(!valid_dma_direction(direction)); if (dma_ops->sync_single_range_for_device) dma_ops->sync_single_range_for_device(hwdev, dma_handle, offset, size, direction); @@ -133,6 +146,7 @@ static inline void dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg, int nelems, int direction) { + BUG_ON(!valid_dma_direction(direction)); if (dma_ops->sync_sg_for_cpu) dma_ops->sync_sg_for_cpu(hwdev, sg, nelems, direction); flush_write_buffers(); @@ -142,6 +156,7 @@ static inline void dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, int nelems, int direction) { + BUG_ON(!valid_dma_direction(direction)); if (dma_ops->sync_sg_for_device) { dma_ops->sync_sg_for_device(hwdev, sg, nelems, direction); } @@ -152,6 +167,7 @@ dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, static inline int dma_map_sg(struct device *hwdev, struct scatterlist *sg, int nents, int direction) { + BUG_ON(!valid_dma_direction(direction)); return dma_ops->map_sg(hwdev, sg, nents, direction); } @@ -159,6 +175,7 @@ static inline void dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents, int direction) { + BUG_ON(!valid_dma_direction(direction)); dma_ops->unmap_sg(hwdev, sg, nents, direction); } -- cgit v1.1 From 0dc243ae10c8309c170a3af9f1adad1924a9f217 Mon Sep 17 00:00:00 2001 From: Jon Mason Date: Mon, 26 Jun 2006 13:58:11 +0200 Subject: [PATCH] x86_64: Calgary IOMMU - IOMMU abstractions This patch creates a new interface for IOMMUs by adding a centralized location for IOMMU allocation (for translation tables/apertures) and IOMMU initialization. In creating these, code was moved around for abstraction, uniformity, and consiceness. Take note of the move of the iommu_setup bootarg parsing code to __setup. This is enabled by moving back the location of the aperture allocation/detection to mem init (which while ugly, was already the location of the swiotlb_init). While a slight departure from the previous patch, I belive this provides the true intention of the previous versions of the patch which changed this code. It also makes the addition of the upcoming calgary code much cleaner than previous patches. [AK: Removed one broken change. iommu_setup still has to be called early] Signed-off-by: Muli Ben-Yehuda Signed-off-by: Jon Mason Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- arch/x86_64/kernel/pci-dma.c | 29 +++++++++++++++++++++++++++++ arch/x86_64/kernel/pci-gart.c | 13 ++++++------- arch/x86_64/kernel/setup.c | 5 ----- arch/x86_64/mm/init.c | 7 +------ include/asm-x86_64/pci.h | 2 +- include/asm-x86_64/proto.h | 10 +++++----- 6 files changed, 42 insertions(+), 24 deletions(-) diff --git a/arch/x86_64/kernel/pci-dma.c b/arch/x86_64/kernel/pci-dma.c index 7edd1a4..a45844c 100644 --- a/arch/x86_64/kernel/pci-dma.c +++ b/arch/x86_64/kernel/pci-dma.c @@ -279,3 +279,32 @@ __init int iommu_setup(char *p) } return 1; } +__setup("iommu=", iommu_setup); + +void __init pci_iommu_alloc(void) +{ + /* + * The order of these functions is important for + * fall-back/fail-over reasons + */ +#ifdef CONFIG_IOMMU + iommu_hole_init(); +#endif + +#ifdef CONFIG_SWIOTLB + pci_swiotlb_init(); +#endif +} + +static int __init pci_iommu_init(void) +{ +#ifdef CONFIG_IOMMU + gart_iommu_init(); +#endif + + no_iommu_init(); + return 0; +} + +/* Must execute after PCI subsystem */ +fs_initcall(pci_iommu_init); diff --git a/arch/x86_64/kernel/pci-gart.c b/arch/x86_64/kernel/pci-gart.c index 4f67957..9a93954 100644 --- a/arch/x86_64/kernel/pci-gart.c +++ b/arch/x86_64/kernel/pci-gart.c @@ -571,7 +571,7 @@ static struct dma_mapping_ops gart_dma_ops = { .unmap_sg = gart_unmap_sg, }; -static int __init pci_iommu_init(void) +void __init gart_iommu_init(void) { struct agp_kern_info info; unsigned long aper_size; @@ -581,7 +581,7 @@ static int __init pci_iommu_init(void) if (cache_k8_northbridges() < 0 || num_k8_northbridges == 0) { printk(KERN_INFO "PCI-GART: No AMD northbridge found.\n"); - return -ENODEV; + return; } #ifndef CONFIG_AGP_AMD64 @@ -595,11 +595,11 @@ static int __init pci_iommu_init(void) #endif if (swiotlb) - return -ENODEV; + return; /* Did we detect a different HW IOMMU? */ if (iommu_detected && !iommu_aperture) - return -1; + return; if (no_iommu || (!force_iommu && end_pfn <= MAX_DMA32_PFN) || @@ -611,7 +611,7 @@ static int __init pci_iommu_init(void) "but IOMMU not available.\n" KERN_ERR "WARNING 32bit PCI may malfunction.\n"); } - return -ENODEV; + return; } printk(KERN_INFO "PCI-DMA: using GART IOMMU.\n"); @@ -678,11 +678,10 @@ static int __init pci_iommu_init(void) flush_gart(); dma_ops = &gart_dma_ops; - return 0; } /* Must execute after PCI subsystem */ -fs_initcall(pci_iommu_init); +fs_initcall(gart_iommu_init); void gart_parse_options(char *p) { diff --git a/arch/x86_64/kernel/setup.c b/arch/x86_64/kernel/setup.c index a9de8f0..04b2d7b 100644 --- a/arch/x86_64/kernel/setup.c +++ b/arch/x86_64/kernel/setup.c @@ -63,7 +63,6 @@ #include #include #include -#include #include #include @@ -702,10 +701,6 @@ void __init setup_arch(char **cmdline_p) e820_setup_gap(); -#ifdef CONFIG_IOMMU - iommu_hole_init(); -#endif - #ifdef CONFIG_VT #if defined(CONFIG_VGA_CONSOLE) conswitchp = &vga_con; diff --git a/arch/x86_64/mm/init.c b/arch/x86_64/mm/init.c index b83645a..2f5f5b1 100644 --- a/arch/x86_64/mm/init.c +++ b/arch/x86_64/mm/init.c @@ -41,8 +41,6 @@ #include #include #include -#include -#include #ifndef Dprintk #define Dprintk(x...) @@ -587,10 +585,7 @@ void __init mem_init(void) { long codesize, reservedpages, datasize, initsize; -#ifdef CONFIG_SWIOTLB - pci_swiotlb_init(); -#endif - no_iommu_init(); + pci_iommu_alloc(); /* How many end-of-memory variables you have, grandma! */ max_low_pfn = end_pfn; diff --git a/include/asm-x86_64/pci.h b/include/asm-x86_64/pci.h index 3374d34..4dbc07c 100644 --- a/include/asm-x86_64/pci.h +++ b/include/asm-x86_64/pci.h @@ -39,8 +39,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq); #include #include #include -#include /* for have_iommu */ +extern void pci_iommu_alloc(void); extern int iommu_setup(char *opt); /* The PCI address space does equal the physical memory diff --git a/include/asm-x86_64/proto.h b/include/asm-x86_64/proto.h index 9d3335b..038fe1f4 100644 --- a/include/asm-x86_64/proto.h +++ b/include/asm-x86_64/proto.h @@ -37,7 +37,6 @@ extern void ia32_sysenter_target(void); extern void config_acpi_tables(void); extern void ia32_syscall(void); -extern void iommu_hole_init(void); extern int pmtimer_mark_offset(void); extern void pmtimer_resume(void); @@ -101,13 +100,9 @@ extern int unsynchronized_tsc(void); extern void select_idle_routine(const struct cpuinfo_x86 *c); -extern void gart_parse_options(char *); -extern void __init no_iommu_init(void); - extern unsigned long table_start, table_end; extern int exception_trace; -extern int force_iommu, no_iommu; extern int using_apic_timer; extern int disable_apic; extern unsigned cpu_khz; @@ -116,8 +111,13 @@ extern int skip_ioapic_setup; extern int acpi_ht; extern int acpi_disabled; +extern void no_iommu_init(void); +extern int force_iommu, no_iommu; extern int iommu_detected; #ifdef CONFIG_IOMMU +extern void gart_iommu_init(void); +extern void gart_parse_options(char *); +extern void iommu_hole_init(void); extern int fallback_aper_order; extern int fallback_aper_force; extern int iommu_aperture; -- cgit v1.1 From e465058d55a88feb4c7ecabe63eea7ea7147e206 Mon Sep 17 00:00:00 2001 From: Jon Mason Date: Mon, 26 Jun 2006 13:58:14 +0200 Subject: [PATCH] x86_64: Calgary IOMMU - Calgary specific bits This patch hooks Calgary into the build, the x86-64 IOMMU initialization paths, and introduces the Calgary specific bits. The implementation draws inspiration from both PPC (which has support for the same chip but requires firmware support which we don't have on x86-64) and gart. Calgary is different from gart in that it support a translation table per PHB, as opposed to the single gart aperture. Changes from previous version: * Addition of boot-time disablement for bus-level translation/isolation (e.g, enable userspace DMA for things like X) * Usage of newer IOMMU abstraction functions Signed-off-by: Muli Ben-Yehuda Signed-off-by: Jon Mason Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- Documentation/x86_64/boot-options.txt | 21 + arch/x86_64/Kconfig | 19 + arch/x86_64/kernel/Makefile | 1 + arch/x86_64/kernel/pci-calgary.c | 1018 +++++++++++++++++++++++++++++++++ arch/x86_64/kernel/pci-dma.c | 9 + arch/x86_64/kernel/tce.c | 202 +++++++ include/asm-x86_64/calgary.h | 66 +++ include/asm-x86_64/pci.h | 2 +- include/asm-x86_64/tce.h | 47 ++ 9 files changed, 1384 insertions(+), 1 deletion(-) create mode 100644 arch/x86_64/kernel/pci-calgary.c create mode 100644 arch/x86_64/kernel/tce.c create mode 100644 include/asm-x86_64/calgary.h create mode 100644 include/asm-x86_64/tce.h diff --git a/Documentation/x86_64/boot-options.txt b/Documentation/x86_64/boot-options.txt index f2cd6ef..6887d44 100644 --- a/Documentation/x86_64/boot-options.txt +++ b/Documentation/x86_64/boot-options.txt @@ -205,6 +205,27 @@ IOMMU pages Prereserve that many 128K pages for the software IO bounce buffering. force Force all IO through the software TLB. + calgary=[64k,128k,256k,512k,1M,2M,4M,8M] + calgary=[translate_empty_slots] + calgary=[disable=] + + 64k,...,8M - Set the size of each PCI slot's translation table + when using the Calgary IOMMU. This is the size of the translation + table itself in main memory. The smallest table, 64k, covers an IO + space of 32MB; the largest, 8MB table, can cover an IO space of + 4GB. Normally the kernel will make the right choice by itself. + + translate_empty_slots - Enable translation even on slots that have + no devices attached to them, in case a device will be hotplugged + in the future. + + disable= - Disable translation on a given PHB. For + example, the built-in graphics adapter resides on the first bridge + (PCI bus number 0); if translation (isolation) is enabled on this + bridge, X servers that access the hardware directly from user + space might stop working. Use this option if you have devices that + are accessed from userspace directly on some PCI host bridge. + Debugging oops=panic Always panic on oopses. Default is to just kill the process, diff --git a/arch/x86_64/Kconfig b/arch/x86_64/Kconfig index e8c52a1..ccc4a7f 100644 --- a/arch/x86_64/Kconfig +++ b/arch/x86_64/Kconfig @@ -405,6 +405,25 @@ config IOMMU device) unless CONFIG_IOMMU_DEBUG or iommu=force is specified too. +config CALGARY_IOMMU + bool "IBM Calgary IOMMU support" + default y + select SWIOTLB + depends on PCI && EXPERIMENTAL + help + Support for hardware IOMMUs in IBM's xSeries x366 and x460 + systems. Needed to run systems with more than 3GB of memory + properly with 32-bit PCI devices that do not support DAC + (Double Address Cycle). Calgary also supports bus level + isolation, where all DMAs pass through the IOMMU. This + prevents them from going anywhere except their intended + destination. This catches hard-to-find kernel bugs and + mis-behaving drivers and devices that do not use the DMA-API + properly to set up their DMA buffers. The IOMMU can be + turned off at boot time with the iommu=off parameter. + Normally the kernel will make the right choice by itself. + If unsure, say Y. + # need this always selected by IOMMU for the VIA workaround config SWIOTLB bool diff --git a/arch/x86_64/kernel/Makefile b/arch/x86_64/kernel/Makefile index fd106bd..aeb9c56 100644 --- a/arch/x86_64/kernel/Makefile +++ b/arch/x86_64/kernel/Makefile @@ -29,6 +29,7 @@ obj-$(CONFIG_SOFTWARE_SUSPEND) += suspend_asm.o obj-$(CONFIG_CPU_FREQ) += cpufreq/ obj-$(CONFIG_EARLY_PRINTK) += early_printk.o obj-$(CONFIG_IOMMU) += pci-gart.o aperture.o +obj-$(CONFIG_CALGARY_IOMMU) += pci-calgary.o tce.o obj-$(CONFIG_SWIOTLB) += pci-swiotlb.o obj-$(CONFIG_KPROBES) += kprobes.o obj-$(CONFIG_X86_PM_TIMER) += pmtimer.o diff --git a/arch/x86_64/kernel/pci-calgary.c b/arch/x86_64/kernel/pci-calgary.c new file mode 100644 index 0000000..d91cb843 --- /dev/null +++ b/arch/x86_64/kernel/pci-calgary.c @@ -0,0 +1,1018 @@ +/* + * Derived from arch/powerpc/kernel/iommu.c + * + * Copyright (C) 2006 Jon Mason , IBM Corporation + * Copyright (C) 2006 Muli Ben-Yehuda , IBM Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define PCI_DEVICE_ID_IBM_CALGARY 0x02a1 +#define PCI_VENDOR_DEVICE_ID_CALGARY \ + (PCI_VENDOR_ID_IBM | PCI_DEVICE_ID_IBM_CALGARY << 16) + +/* we need these for register space address calculation */ +#define START_ADDRESS 0xfe000000 +#define CHASSIS_BASE 0 +#define ONE_BASED_CHASSIS_NUM 1 + +/* register offsets inside the host bridge space */ +#define PHB_CSR_OFFSET 0x0110 +#define PHB_PLSSR_OFFSET 0x0120 +#define PHB_CONFIG_RW_OFFSET 0x0160 +#define PHB_IOBASE_BAR_LOW 0x0170 +#define PHB_IOBASE_BAR_HIGH 0x0180 +#define PHB_MEM_1_LOW 0x0190 +#define PHB_MEM_1_HIGH 0x01A0 +#define PHB_IO_ADDR_SIZE 0x01B0 +#define PHB_MEM_1_SIZE 0x01C0 +#define PHB_MEM_ST_OFFSET 0x01D0 +#define PHB_AER_OFFSET 0x0200 +#define PHB_CONFIG_0_HIGH 0x0220 +#define PHB_CONFIG_0_LOW 0x0230 +#define PHB_CONFIG_0_END 0x0240 +#define PHB_MEM_2_LOW 0x02B0 +#define PHB_MEM_2_HIGH 0x02C0 +#define PHB_MEM_2_SIZE_HIGH 0x02D0 +#define PHB_MEM_2_SIZE_LOW 0x02E0 +#define PHB_DOSHOLE_OFFSET 0x08E0 + +/* PHB_CONFIG_RW */ +#define PHB_TCE_ENABLE 0x20000000 +#define PHB_SLOT_DISABLE 0x1C000000 +#define PHB_DAC_DISABLE 0x01000000 +#define PHB_MEM2_ENABLE 0x00400000 +#define PHB_MCSR_ENABLE 0x00100000 +/* TAR (Table Address Register) */ +#define TAR_SW_BITS 0x0000ffffffff800fUL +#define TAR_VALID 0x0000000000000008UL +/* CSR (Channel/DMA Status Register) */ +#define CSR_AGENT_MASK 0xffe0ffff + +#define MAX_NUM_OF_PHBS 8 /* how many PHBs in total? */ +#define MAX_PHB_BUS_NUM (MAX_NUM_OF_PHBS * 2) /* max dev->bus->number */ +#define PHBS_PER_CALGARY 4 + +/* register offsets in Calgary's internal register space */ +static const unsigned long tar_offsets[] = { + 0x0580 /* TAR0 */, + 0x0588 /* TAR1 */, + 0x0590 /* TAR2 */, + 0x0598 /* TAR3 */ +}; + +static const unsigned long split_queue_offsets[] = { + 0x4870 /* SPLIT QUEUE 0 */, + 0x5870 /* SPLIT QUEUE 1 */, + 0x6870 /* SPLIT QUEUE 2 */, + 0x7870 /* SPLIT QUEUE 3 */ +}; + +static const unsigned long phb_offsets[] = { + 0x8000 /* PHB0 */, + 0x9000 /* PHB1 */, + 0xA000 /* PHB2 */, + 0xB000 /* PHB3 */ +}; + +void* tce_table_kva[MAX_NUM_OF_PHBS * MAX_NUMNODES]; +unsigned int specified_table_size = TCE_TABLE_SIZE_UNSPECIFIED; +static int translate_empty_slots __read_mostly = 0; +static int calgary_detected __read_mostly = 0; + +/* + * the bitmap of PHBs the user requested that we disable + * translation on. + */ +static DECLARE_BITMAP(translation_disabled, MAX_NUMNODES * MAX_PHB_BUS_NUM); + +static void tce_cache_blast(struct iommu_table *tbl); + +/* enable this to stress test the chip's TCE cache */ +#ifdef CONFIG_IOMMU_DEBUG +static inline void tce_cache_blast_stress(struct iommu_table *tbl) +{ + tce_cache_blast(tbl); +} +#else +static inline void tce_cache_blast_stress(struct iommu_table *tbl) +{ +} +#endif /* BLAST_TCE_CACHE_ON_UNMAP */ + +static inline unsigned int num_dma_pages(unsigned long dma, unsigned int dmalen) +{ + unsigned int npages; + + npages = PAGE_ALIGN(dma + dmalen) - (dma & PAGE_MASK); + npages >>= PAGE_SHIFT; + + return npages; +} + +static inline int translate_phb(struct pci_dev* dev) +{ + int disabled = test_bit(dev->bus->number, translation_disabled); + return !disabled; +} + +static void iommu_range_reserve(struct iommu_table *tbl, + unsigned long start_addr, unsigned int npages) +{ + unsigned long index; + unsigned long end; + + index = start_addr >> PAGE_SHIFT; + + /* bail out if we're asked to reserve a region we don't cover */ + if (index >= tbl->it_size) + return; + + end = index + npages; + if (end > tbl->it_size) /* don't go off the table */ + end = tbl->it_size; + + while (index < end) { + if (test_bit(index, tbl->it_map)) + printk(KERN_ERR "Calgary: entry already allocated at " + "0x%lx tbl %p dma 0x%lx npages %u\n", + index, tbl, start_addr, npages); + ++index; + } + set_bit_string(tbl->it_map, start_addr >> PAGE_SHIFT, npages); +} + +static unsigned long iommu_range_alloc(struct iommu_table *tbl, + unsigned int npages) +{ + unsigned long offset; + + BUG_ON(npages == 0); + + offset = find_next_zero_string(tbl->it_map, tbl->it_hint, + tbl->it_size, npages); + if (offset == ~0UL) { + tce_cache_blast(tbl); + offset = find_next_zero_string(tbl->it_map, 0, + tbl->it_size, npages); + if (offset == ~0UL) { + printk(KERN_WARNING "Calgary: IOMMU full.\n"); + if (panic_on_overflow) + panic("Calgary: fix the allocator.\n"); + else + return bad_dma_address; + } + } + + set_bit_string(tbl->it_map, offset, npages); + tbl->it_hint = offset + npages; + BUG_ON(tbl->it_hint > tbl->it_size); + + return offset; +} + +static dma_addr_t iommu_alloc(struct iommu_table *tbl, void *vaddr, + unsigned int npages, int direction) +{ + unsigned long entry, flags; + dma_addr_t ret = bad_dma_address; + + spin_lock_irqsave(&tbl->it_lock, flags); + + entry = iommu_range_alloc(tbl, npages); + + if (unlikely(entry == bad_dma_address)) + goto error; + + /* set the return dma address */ + ret = (entry << PAGE_SHIFT) | ((unsigned long)vaddr & ~PAGE_MASK); + + /* put the TCEs in the HW table */ + tce_build(tbl, entry, npages, (unsigned long)vaddr & PAGE_MASK, + direction); + + spin_unlock_irqrestore(&tbl->it_lock, flags); + + return ret; + +error: + spin_unlock_irqrestore(&tbl->it_lock, flags); + printk(KERN_WARNING "Calgary: failed to allocate %u pages in " + "iommu %p\n", npages, tbl); + return bad_dma_address; +} + +static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr, + unsigned int npages) +{ + unsigned long entry; + unsigned long i; + + entry = dma_addr >> PAGE_SHIFT; + + BUG_ON(entry + npages > tbl->it_size); + + tce_free(tbl, entry, npages); + + for (i = 0; i < npages; ++i) { + if (!test_bit(entry + i, tbl->it_map)) + printk(KERN_ERR "Calgary: bit is off at 0x%lx " + "tbl %p dma 0x%Lx entry 0x%lx npages %u\n", + entry + i, tbl, dma_addr, entry, npages); + } + + __clear_bit_string(tbl->it_map, entry, npages); + + tce_cache_blast_stress(tbl); +} + +static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr, + unsigned int npages) +{ + unsigned long flags; + + spin_lock_irqsave(&tbl->it_lock, flags); + + __iommu_free(tbl, dma_addr, npages); + + spin_unlock_irqrestore(&tbl->it_lock, flags); +} + +static void __calgary_unmap_sg(struct iommu_table *tbl, + struct scatterlist *sglist, int nelems, int direction) +{ + while (nelems--) { + unsigned int npages; + dma_addr_t dma = sglist->dma_address; + unsigned int dmalen = sglist->dma_length; + + if (dmalen == 0) + break; + + npages = num_dma_pages(dma, dmalen); + __iommu_free(tbl, dma, npages); + sglist++; + } +} + +void calgary_unmap_sg(struct device *dev, struct scatterlist *sglist, + int nelems, int direction) +{ + unsigned long flags; + struct iommu_table *tbl = to_pci_dev(dev)->bus->self->sysdata; + + if (!translate_phb(to_pci_dev(dev))) + return; + + spin_lock_irqsave(&tbl->it_lock, flags); + + __calgary_unmap_sg(tbl, sglist, nelems, direction); + + spin_unlock_irqrestore(&tbl->it_lock, flags); +} + +static int calgary_nontranslate_map_sg(struct device* dev, + struct scatterlist *sg, int nelems, int direction) +{ + int i; + + for (i = 0; i < nelems; i++ ) { + struct scatterlist *s = &sg[i]; + BUG_ON(!s->page); + s->dma_address = virt_to_bus(page_address(s->page) +s->offset); + s->dma_length = s->length; + } + return nelems; +} + +int calgary_map_sg(struct device *dev, struct scatterlist *sg, + int nelems, int direction) +{ + struct iommu_table *tbl = to_pci_dev(dev)->bus->self->sysdata; + unsigned long flags; + unsigned long vaddr; + unsigned int npages; + unsigned long entry; + int i; + + if (!translate_phb(to_pci_dev(dev))) + return calgary_nontranslate_map_sg(dev, sg, nelems, direction); + + spin_lock_irqsave(&tbl->it_lock, flags); + + for (i = 0; i < nelems; i++ ) { + struct scatterlist *s = &sg[i]; + BUG_ON(!s->page); + + vaddr = (unsigned long)page_address(s->page) + s->offset; + npages = num_dma_pages(vaddr, s->length); + + entry = iommu_range_alloc(tbl, npages); + if (entry == bad_dma_address) { + /* makes sure unmap knows to stop */ + s->dma_length = 0; + goto error; + } + + s->dma_address = (entry << PAGE_SHIFT) | s->offset; + + /* insert into HW table */ + tce_build(tbl, entry, npages, vaddr & PAGE_MASK, + direction); + + s->dma_length = s->length; + } + + spin_unlock_irqrestore(&tbl->it_lock, flags); + + return nelems; +error: + __calgary_unmap_sg(tbl, sg, nelems, direction); + for (i = 0; i < nelems; i++) { + sg[i].dma_address = bad_dma_address; + sg[i].dma_length = 0; + } + spin_unlock_irqrestore(&tbl->it_lock, flags); + return 0; +} + +dma_addr_t calgary_map_single(struct device *dev, void *vaddr, + size_t size, int direction) +{ + dma_addr_t dma_handle = bad_dma_address; + unsigned long uaddr; + unsigned int npages; + struct iommu_table *tbl = to_pci_dev(dev)->bus->self->sysdata; + + uaddr = (unsigned long)vaddr; + npages = num_dma_pages(uaddr, size); + + if (translate_phb(to_pci_dev(dev))) + dma_handle = iommu_alloc(tbl, vaddr, npages, direction); + else + dma_handle = virt_to_bus(vaddr); + + return dma_handle; +} + +void calgary_unmap_single(struct device *dev, dma_addr_t dma_handle, + size_t size, int direction) +{ + struct iommu_table *tbl = to_pci_dev(dev)->bus->self->sysdata; + unsigned int npages; + + if (!translate_phb(to_pci_dev(dev))) + return; + + npages = num_dma_pages(dma_handle, size); + iommu_free(tbl, dma_handle, npages); +} + +void* calgary_alloc_coherent(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t flag) +{ + void *ret = NULL; + dma_addr_t mapping; + unsigned int npages, order; + struct iommu_table *tbl; + + tbl = to_pci_dev(dev)->bus->self->sysdata; + + size = PAGE_ALIGN(size); /* size rounded up to full pages */ + npages = size >> PAGE_SHIFT; + order = get_order(size); + + /* alloc enough pages (and possibly more) */ + ret = (void *)__get_free_pages(flag, order); + if (!ret) + goto error; + memset(ret, 0, size); + + if (translate_phb(to_pci_dev(dev))) { + /* set up tces to cover the allocated range */ + mapping = iommu_alloc(tbl, ret, npages, DMA_BIDIRECTIONAL); + if (mapping == bad_dma_address) + goto free; + + *dma_handle = mapping; + } else /* non translated slot */ + *dma_handle = virt_to_bus(ret); + + return ret; + +free: + free_pages((unsigned long)ret, get_order(size)); + ret = NULL; +error: + return ret; +} + +static struct dma_mapping_ops calgary_dma_ops = { + .alloc_coherent = calgary_alloc_coherent, + .map_single = calgary_map_single, + .unmap_single = calgary_unmap_single, + .map_sg = calgary_map_sg, + .unmap_sg = calgary_unmap_sg, +}; + +static inline int busno_to_phbid(unsigned char num) +{ + return bus_to_phb(num) % PHBS_PER_CALGARY; +} + +static inline unsigned long split_queue_offset(unsigned char num) +{ + size_t idx = busno_to_phbid(num); + + return split_queue_offsets[idx]; +} + +static inline unsigned long tar_offset(unsigned char num) +{ + size_t idx = busno_to_phbid(num); + + return tar_offsets[idx]; +} + +static inline unsigned long phb_offset(unsigned char num) +{ + size_t idx = busno_to_phbid(num); + + return phb_offsets[idx]; +} + +static inline void __iomem* calgary_reg(void __iomem *bar, unsigned long offset) +{ + unsigned long target = ((unsigned long)bar) | offset; + return (void __iomem*)target; +} + +static void tce_cache_blast(struct iommu_table *tbl) +{ + u64 val; + u32 aer; + int i = 0; + void __iomem *bbar = tbl->bbar; + void __iomem *target; + + /* disable arbitration on the bus */ + target = calgary_reg(bbar, phb_offset(tbl->it_busno) | PHB_AER_OFFSET); + aer = readl(target); + writel(0, target); + + /* read plssr to ensure it got there */ + target = calgary_reg(bbar, phb_offset(tbl->it_busno) | PHB_PLSSR_OFFSET); + val = readl(target); + + /* poll split queues until all DMA activity is done */ + target = calgary_reg(bbar, split_queue_offset(tbl->it_busno)); + do { + val = readq(target); + i++; + } while ((val & 0xff) != 0xff && i < 100); + if (i == 100) + printk(KERN_WARNING "Calgary: PCI bus not quiesced, " + "continuing anyway\n"); + + /* invalidate TCE cache */ + target = calgary_reg(bbar, tar_offset(tbl->it_busno)); + writeq(tbl->tar_val, target); + + /* enable arbitration */ + target = calgary_reg(bbar, phb_offset(tbl->it_busno) | PHB_AER_OFFSET); + writel(aer, target); + (void)readl(target); /* flush */ +} + +static void __init calgary_reserve_mem_region(struct pci_dev *dev, u64 start, + u64 limit) +{ + unsigned int numpages; + + limit = limit | 0xfffff; + limit++; + + numpages = ((limit - start) >> PAGE_SHIFT); + iommu_range_reserve(dev->sysdata, start, numpages); +} + +static void __init calgary_reserve_peripheral_mem_1(struct pci_dev *dev) +{ + void __iomem *target; + u64 low, high, sizelow; + u64 start, limit; + struct iommu_table *tbl = dev->sysdata; + unsigned char busnum = dev->bus->number; + void __iomem *bbar = tbl->bbar; + + /* peripheral MEM_1 region */ + target = calgary_reg(bbar, phb_offset(busnum) | PHB_MEM_1_LOW); + low = be32_to_cpu(readl(target)); + target = calgary_reg(bbar, phb_offset(busnum) | PHB_MEM_1_HIGH); + high = be32_to_cpu(readl(target)); + target = calgary_reg(bbar, phb_offset(busnum) | PHB_MEM_1_SIZE); + sizelow = be32_to_cpu(readl(target)); + + start = (high << 32) | low; + limit = sizelow; + + calgary_reserve_mem_region(dev, start, limit); +} + +static void __init calgary_reserve_peripheral_mem_2(struct pci_dev *dev) +{ + void __iomem *target; + u32 val32; + u64 low, high, sizelow, sizehigh; + u64 start, limit; + struct iommu_table *tbl = dev->sysdata; + unsigned char busnum = dev->bus->number; + void __iomem *bbar = tbl->bbar; + + /* is it enabled? */ + target = calgary_reg(bbar, phb_offset(busnum) | PHB_CONFIG_RW_OFFSET); + val32 = be32_to_cpu(readl(target)); + if (!(val32 & PHB_MEM2_ENABLE)) + return; + + target = calgary_reg(bbar, phb_offset(busnum) | PHB_MEM_2_LOW); + low = be32_to_cpu(readl(target)); + target = calgary_reg(bbar, phb_offset(busnum) | PHB_MEM_2_HIGH); + high = be32_to_cpu(readl(target)); + target = calgary_reg(bbar, phb_offset(busnum) | PHB_MEM_2_SIZE_LOW); + sizelow = be32_to_cpu(readl(target)); + target = calgary_reg(bbar, phb_offset(busnum) | PHB_MEM_2_SIZE_HIGH); + sizehigh = be32_to_cpu(readl(target)); + + start = (high << 32) | low; + limit = (sizehigh << 32) | sizelow; + + calgary_reserve_mem_region(dev, start, limit); +} + +/* + * some regions of the IO address space do not get translated, so we + * must not give devices IO addresses in those regions. The regions + * are the 640KB-1MB region and the two PCI peripheral memory holes. + * Reserve all of them in the IOMMU bitmap to avoid giving them out + * later. + */ +static void __init calgary_reserve_regions(struct pci_dev *dev) +{ + unsigned int npages; + void __iomem *bbar; + unsigned char busnum; + u64 start; + struct iommu_table *tbl = dev->sysdata; + + bbar = tbl->bbar; + busnum = dev->bus->number; + + /* reserve bad_dma_address in case it's a legal address */ + iommu_range_reserve(tbl, bad_dma_address, 1); + + /* avoid the BIOS/VGA first 640KB-1MB region */ + start = (640 * 1024); + npages = ((1024 - 640) * 1024) >> PAGE_SHIFT; + iommu_range_reserve(tbl, start, npages); + + /* reserve the two PCI peripheral memory regions in IO space */ + calgary_reserve_peripheral_mem_1(dev); + calgary_reserve_peripheral_mem_2(dev); +} + +static int __init calgary_setup_tar(struct pci_dev *dev, void __iomem *bbar) +{ + u64 val64; + u64 table_phys; + void __iomem *target; + int ret; + struct iommu_table *tbl; + + /* build TCE tables for each PHB */ + ret = build_tce_table(dev, bbar); + if (ret) + return ret; + + calgary_reserve_regions(dev); + + /* set TARs for each PHB */ + target = calgary_reg(bbar, tar_offset(dev->bus->number)); + val64 = be64_to_cpu(readq(target)); + + /* zero out all TAR bits under sw control */ + val64 &= ~TAR_SW_BITS; + + tbl = dev->sysdata; + table_phys = (u64)__pa(tbl->it_base); + val64 |= table_phys; + + BUG_ON(specified_table_size > TCE_TABLE_SIZE_8M); + val64 |= (u64) specified_table_size; + + tbl->tar_val = cpu_to_be64(val64); + writeq(tbl->tar_val, target); + readq(target); /* flush */ + + return 0; +} + +static void __init calgary_free_tar(struct pci_dev *dev) +{ + u64 val64; + struct iommu_table *tbl = dev->sysdata; + void __iomem *target; + + target = calgary_reg(tbl->bbar, tar_offset(dev->bus->number)); + val64 = be64_to_cpu(readq(target)); + val64 &= ~TAR_SW_BITS; + writeq(cpu_to_be64(val64), target); + readq(target); /* flush */ + + kfree(tbl); + dev->sysdata = NULL; +} + +static void calgary_watchdog(unsigned long data) +{ + struct pci_dev *dev = (struct pci_dev *)data; + struct iommu_table *tbl = dev->sysdata; + void __iomem *bbar = tbl->bbar; + u32 val32; + void __iomem *target; + + target = calgary_reg(bbar, phb_offset(tbl->it_busno) | PHB_CSR_OFFSET); + val32 = be32_to_cpu(readl(target)); + + /* If no error, the agent ID in the CSR is not valid */ + if (val32 & CSR_AGENT_MASK) { + printk(KERN_EMERG "calgary_watchdog: DMA error on bus %d, " + "CSR = %#x\n", dev->bus->number, val32); + writel(0, target); + + /* Disable bus that caused the error */ + target = calgary_reg(bbar, phb_offset(tbl->it_busno) | + PHB_CONFIG_RW_OFFSET); + val32 = be32_to_cpu(readl(target)); + val32 |= PHB_SLOT_DISABLE; + writel(cpu_to_be32(val32), target); + readl(target); /* flush */ + } else { + /* Reset the timer */ + mod_timer(&tbl->watchdog_timer, jiffies + 2 * HZ); + } +} + +static void __init calgary_enable_translation(struct pci_dev *dev) +{ + u32 val32; + unsigned char busnum; + void __iomem *target; + void __iomem *bbar; + struct iommu_table *tbl; + + busnum = dev->bus->number; + tbl = dev->sysdata; + bbar = tbl->bbar; + + /* enable TCE in PHB Config Register */ + target = calgary_reg(bbar, phb_offset(busnum) | PHB_CONFIG_RW_OFFSET); + val32 = be32_to_cpu(readl(target)); + val32 |= PHB_TCE_ENABLE | PHB_DAC_DISABLE | PHB_MCSR_ENABLE; + + printk(KERN_INFO "Calgary: enabling translation on PHB %d\n", busnum); + printk(KERN_INFO "Calgary: errant DMAs will now be prevented on this " + "bus.\n"); + + writel(cpu_to_be32(val32), target); + readl(target); /* flush */ + + init_timer(&tbl->watchdog_timer); + tbl->watchdog_timer.function = &calgary_watchdog; + tbl->watchdog_timer.data = (unsigned long)dev; + mod_timer(&tbl->watchdog_timer, jiffies); +} + +static void __init calgary_disable_translation(struct pci_dev *dev) +{ + u32 val32; + unsigned char busnum; + void __iomem *target; + void __iomem *bbar; + struct iommu_table *tbl; + + busnum = dev->bus->number; + tbl = dev->sysdata; + bbar = tbl->bbar; + + /* disable TCE in PHB Config Register */ + target = calgary_reg(bbar, phb_offset(busnum) | PHB_CONFIG_RW_OFFSET); + val32 = be32_to_cpu(readl(target)); + val32 &= ~(PHB_TCE_ENABLE | PHB_DAC_DISABLE | PHB_MCSR_ENABLE); + + printk(KERN_INFO "Calgary: disabling translation on PHB %d!\n", busnum); + writel(cpu_to_be32(val32), target); + readl(target); /* flush */ + + del_timer_sync(&tbl->watchdog_timer); +} + +static inline unsigned int __init locate_register_space(struct pci_dev *dev) +{ + int rionodeid; + u32 address; + + rionodeid = (dev->bus->number % 15 > 4) ? 3 : 2; + /* + * register space address calculation as follows: + * FE0MB-8MB*OneBasedChassisNumber+1MB*(RioNodeId-ChassisBase) + * ChassisBase is always zero for x366/x260/x460 + * RioNodeId is 2 for first Calgary, 3 for second Calgary + */ + address = START_ADDRESS - + (0x800000 * (ONE_BASED_CHASSIS_NUM + dev->bus->number / 15)) + + (0x100000) * (rionodeid - CHASSIS_BASE); + return address; +} + +static int __init calgary_init_one_nontraslated(struct pci_dev *dev) +{ + dev->sysdata = NULL; + dev->bus->self = dev; + + return 0; +} + +static int __init calgary_init_one(struct pci_dev *dev) +{ + u32 address; + void __iomem *bbar; + int ret; + + address = locate_register_space(dev); + /* map entire 1MB of Calgary config space */ + bbar = ioremap_nocache(address, 1024 * 1024); + if (!bbar) { + ret = -ENODATA; + goto done; + } + + ret = calgary_setup_tar(dev, bbar); + if (ret) + goto iounmap; + + dev->bus->self = dev; + calgary_enable_translation(dev); + + return 0; + +iounmap: + iounmap(bbar); +done: + return ret; +} + +static int __init calgary_init(void) +{ + int i, ret = -ENODEV; + struct pci_dev *dev = NULL; + + for (i = 0; i <= num_online_nodes() * MAX_NUM_OF_PHBS; i++) { + dev = pci_get_device(PCI_VENDOR_ID_IBM, + PCI_DEVICE_ID_IBM_CALGARY, + dev); + if (!dev) + break; + if (!translate_phb(dev)) { + calgary_init_one_nontraslated(dev); + continue; + } + if (!tce_table_kva[i] && !translate_empty_slots) { + pci_dev_put(dev); + continue; + } + ret = calgary_init_one(dev); + if (ret) + goto error; + } + + return ret; + +error: + for (i--; i >= 0; i--) { + dev = pci_find_device_reverse(PCI_VENDOR_ID_IBM, + PCI_DEVICE_ID_IBM_CALGARY, + dev); + if (!translate_phb(dev)) { + pci_dev_put(dev); + continue; + } + if (!tce_table_kva[i] && !translate_empty_slots) + continue; + calgary_disable_translation(dev); + calgary_free_tar(dev); + pci_dev_put(dev); + } + + return ret; +} + +static inline int __init determine_tce_table_size(u64 ram) +{ + int ret; + + if (specified_table_size != TCE_TABLE_SIZE_UNSPECIFIED) + return specified_table_size; + + /* + * Table sizes are from 0 to 7 (TCE_TABLE_SIZE_64K to + * TCE_TABLE_SIZE_8M). Table size 0 has 8K entries and each + * larger table size has twice as many entries, so shift the + * max ram address by 13 to divide by 8K and then look at the + * order of the result to choose between 0-7. + */ + ret = get_order(ram >> 13); + if (ret > TCE_TABLE_SIZE_8M) + ret = TCE_TABLE_SIZE_8M; + + return ret; +} + +void __init detect_calgary(void) +{ + u32 val; + int bus, table_idx; + void *tbl; + int detected = 0; + + /* + * if the user specified iommu=off or iommu=soft or we found + * another HW IOMMU already, bail out. + */ + if (swiotlb || no_iommu || iommu_detected) + return; + + specified_table_size = determine_tce_table_size(end_pfn * PAGE_SIZE); + + for (bus = 0, table_idx = 0; + bus <= num_online_nodes() * MAX_PHB_BUS_NUM; + bus++) { + BUG_ON(bus > MAX_NUMNODES * MAX_PHB_BUS_NUM); + if (read_pci_config(bus, 0, 0, 0) != PCI_VENDOR_DEVICE_ID_CALGARY) + continue; + if (test_bit(bus, translation_disabled)) { + printk(KERN_INFO "Calgary: translation is disabled for " + "PHB 0x%x\n", bus); + /* skip this phb, don't allocate a tbl for it */ + tce_table_kva[table_idx] = NULL; + table_idx++; + continue; + } + /* + * scan the first slot of the PCI bus to see if there + * are any devices present + */ + val = read_pci_config(bus, 1, 0, 0); + if (val != 0xffffffff || translate_empty_slots) { + tbl = alloc_tce_table(); + if (!tbl) + goto cleanup; + detected = 1; + } else + tbl = NULL; + + tce_table_kva[table_idx] = tbl; + table_idx++; + } + + if (detected) { + iommu_detected = 1; + calgary_detected = 1; + printk(KERN_INFO "PCI-DMA: Calgary IOMMU detected. " + "TCE table spec is %d.\n", specified_table_size); + } + return; + +cleanup: + for (--table_idx; table_idx >= 0; --table_idx) + if (tce_table_kva[table_idx]) + free_tce_table(tce_table_kva[table_idx]); +} + +int __init calgary_iommu_init(void) +{ + int ret; + + if (no_iommu || swiotlb) + return -ENODEV; + + if (!calgary_detected) + return -ENODEV; + + /* ok, we're trying to use Calgary - let's roll */ + printk(KERN_INFO "PCI-DMA: Using Calgary IOMMU\n"); + + ret = calgary_init(); + if (ret) { + printk(KERN_ERR "PCI-DMA: Calgary init failed %d, " + "falling back to no_iommu\n", ret); + if (end_pfn > MAX_DMA32_PFN) + printk(KERN_ERR "WARNING more than 4GB of memory, " + "32bit PCI may malfunction.\n"); + return ret; + } + + force_iommu = 1; + dma_ops = &calgary_dma_ops; + + return 0; +} + +static int __init calgary_parse_options(char *p) +{ + unsigned int bridge; + size_t len; + char* endp; + + while (*p) { + if (!strncmp(p, "64k", 3)) + specified_table_size = TCE_TABLE_SIZE_64K; + else if (!strncmp(p, "128k", 4)) + specified_table_size = TCE_TABLE_SIZE_128K; + else if (!strncmp(p, "256k", 4)) + specified_table_size = TCE_TABLE_SIZE_256K; + else if (!strncmp(p, "512k", 4)) + specified_table_size = TCE_TABLE_SIZE_512K; + else if (!strncmp(p, "1M", 2)) + specified_table_size = TCE_TABLE_SIZE_1M; + else if (!strncmp(p, "2M", 2)) + specified_table_size = TCE_TABLE_SIZE_2M; + else if (!strncmp(p, "4M", 2)) + specified_table_size = TCE_TABLE_SIZE_4M; + else if (!strncmp(p, "8M", 2)) + specified_table_size = TCE_TABLE_SIZE_8M; + + len = strlen("translate_empty_slots"); + if (!strncmp(p, "translate_empty_slots", len)) + translate_empty_slots = 1; + + len = strlen("disable"); + if (!strncmp(p, "disable", len)) { + p += len; + if (*p == '=') + ++p; + if (*p == '\0') + break; + bridge = simple_strtol(p, &endp, 0); + if (p == endp) + break; + + if (bridge <= (num_online_nodes() * MAX_PHB_BUS_NUM)) { + printk(KERN_INFO "Calgary: disabling " + "translation for PHB 0x%x\n", bridge); + set_bit(bridge, translation_disabled); + } + } + + p = strpbrk(p, ","); + if (!p) + break; + + p++; /* skip ',' */ + } + return 1; +} +__setup("calgary=", calgary_parse_options); diff --git a/arch/x86_64/kernel/pci-dma.c b/arch/x86_64/kernel/pci-dma.c index a45844c..9c44f4f 100644 --- a/arch/x86_64/kernel/pci-dma.c +++ b/arch/x86_64/kernel/pci-dma.c @@ -9,6 +9,7 @@ #include #include #include +#include int iommu_merge __read_mostly = 0; EXPORT_SYMBOL(iommu_merge); @@ -291,6 +292,10 @@ void __init pci_iommu_alloc(void) iommu_hole_init(); #endif +#ifdef CONFIG_CALGARY_IOMMU + detect_calgary(); +#endif + #ifdef CONFIG_SWIOTLB pci_swiotlb_init(); #endif @@ -298,6 +303,10 @@ void __init pci_iommu_alloc(void) static int __init pci_iommu_init(void) { +#ifdef CONFIG_CALGARY_IOMMU + calgary_iommu_init(); +#endif + #ifdef CONFIG_IOMMU gart_iommu_init(); #endif diff --git a/arch/x86_64/kernel/tce.c b/arch/x86_64/kernel/tce.c new file mode 100644 index 0000000..8d4c67f --- /dev/null +++ b/arch/x86_64/kernel/tce.c @@ -0,0 +1,202 @@ +/* + * Derived from arch/powerpc/platforms/pseries/iommu.c + * + * Copyright (C) 2006 Jon Mason , IBM Corporation + * Copyright (C) 2006 Muli Ben-Yehuda , IBM Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* flush a tce at 'tceaddr' to main memory */ +static inline void flush_tce(void* tceaddr) +{ + /* a single tce can't cross a cache line */ + if (cpu_has_clflush) + asm volatile("clflush (%0)" :: "r" (tceaddr)); + else + asm volatile("wbinvd":::"memory"); +} + +void tce_build(struct iommu_table *tbl, unsigned long index, + unsigned int npages, unsigned long uaddr, int direction) +{ + u64* tp; + u64 t; + u64 rpn; + + t = (1 << TCE_READ_SHIFT); + if (direction != DMA_TO_DEVICE) + t |= (1 << TCE_WRITE_SHIFT); + + tp = ((u64*)tbl->it_base) + index; + + while (npages--) { + rpn = (virt_to_bus((void*)uaddr)) >> PAGE_SHIFT; + t &= ~TCE_RPN_MASK; + t |= (rpn << TCE_RPN_SHIFT); + + *tp = cpu_to_be64(t); + flush_tce(tp); + + uaddr += PAGE_SIZE; + tp++; + } +} + +void tce_free(struct iommu_table *tbl, long index, unsigned int npages) +{ + u64* tp; + + tp = ((u64*)tbl->it_base) + index; + + while (npages--) { + *tp = cpu_to_be64(0); + flush_tce(tp); + tp++; + } +} + +static inline unsigned int table_size_to_number_of_entries(unsigned char size) +{ + /* + * size is the order of the table, 0-7 + * smallest table is 8K entries, so shift result by 13 to + * multiply by 8K + */ + return (1 << size) << 13; +} + +static int tce_table_setparms(struct pci_dev *dev, struct iommu_table *tbl) +{ + unsigned int bitmapsz; + unsigned int tce_table_index; + unsigned long bmppages; + int ret; + + tbl->it_busno = dev->bus->number; + + /* set the tce table size - measured in entries */ + tbl->it_size = table_size_to_number_of_entries(specified_table_size); + + tce_table_index = bus_to_phb(tbl->it_busno); + tbl->it_base = (unsigned long)tce_table_kva[tce_table_index]; + if (!tbl->it_base) { + printk(KERN_ERR "Calgary: iommu_table_setparms: " + "no table allocated?!\n"); + ret = -ENOMEM; + goto done; + } + + /* + * number of bytes needed for the bitmap size in number of + * entries; we need one bit per entry + */ + bitmapsz = tbl->it_size / BITS_PER_BYTE; + bmppages = __get_free_pages(GFP_KERNEL, get_order(bitmapsz)); + if (!bmppages) { + printk(KERN_ERR "Calgary: cannot allocate bitmap\n"); + ret = -ENOMEM; + goto done; + } + + tbl->it_map = (unsigned long*)bmppages; + + memset(tbl->it_map, 0, bitmapsz); + + tbl->it_hint = 0; + + spin_lock_init(&tbl->it_lock); + + return 0; + +done: + return ret; +} + +int build_tce_table(struct pci_dev *dev, void __iomem *bbar) +{ + struct iommu_table *tbl; + int ret; + + if (dev->sysdata) { + printk(KERN_ERR "Calgary: dev %p has sysdata %p\n", + dev, dev->sysdata); + BUG(); + } + + tbl = kzalloc(sizeof(struct iommu_table), GFP_KERNEL); + if (!tbl) { + printk(KERN_ERR "Calgary: error allocating iommu_table\n"); + ret = -ENOMEM; + goto done; + } + + ret = tce_table_setparms(dev, tbl); + if (ret) + goto free_tbl; + + tce_free(tbl, 0, tbl->it_size); + + tbl->bbar = bbar; + + /* + * NUMA is already using the bus's sysdata pointer, so we use + * the bus's pci_dev's sysdata instead. + */ + dev->sysdata = tbl; + + return 0; + +free_tbl: + kfree(tbl); +done: + return ret; +} + +void* alloc_tce_table(void) +{ + unsigned int size; + + size = table_size_to_number_of_entries(specified_table_size); + size *= TCE_ENTRY_SIZE; + + return __alloc_bootmem_low(size, size, 0); +} + +void free_tce_table(void *tbl) +{ + unsigned int size; + + if (!tbl) + return; + + size = table_size_to_number_of_entries(specified_table_size); + size *= TCE_ENTRY_SIZE; + + free_bootmem(__pa(tbl), size); +} diff --git a/include/asm-x86_64/calgary.h b/include/asm-x86_64/calgary.h new file mode 100644 index 0000000..6e1654f --- /dev/null +++ b/include/asm-x86_64/calgary.h @@ -0,0 +1,66 @@ +/* + * Derived from include/asm-powerpc/iommu.h + * + * Copyright (C) 2006 Jon Mason , IBM Corporation + * Copyright (C) 2006 Muli Ben-Yehuda , IBM Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef _ASM_X86_64_CALGARY_H +#define _ASM_X86_64_CALGARY_H + +#include +#include +#include +#include +#include + +struct iommu_table { + unsigned long it_base; /* mapped address of tce table */ + unsigned long it_hint; /* Hint for next alloc */ + unsigned long *it_map; /* A simple allocation bitmap for now */ + spinlock_t it_lock; /* Protects it_map */ + unsigned int it_size; /* Size of iommu table in entries */ + unsigned char it_busno; /* Bus number this table belongs to */ + void __iomem *bbar; + u64 tar_val; + struct timer_list watchdog_timer; +}; + +#define TCE_TABLE_SIZE_UNSPECIFIED ~0 +#define TCE_TABLE_SIZE_64K 0 +#define TCE_TABLE_SIZE_128K 1 +#define TCE_TABLE_SIZE_256K 2 +#define TCE_TABLE_SIZE_512K 3 +#define TCE_TABLE_SIZE_1M 4 +#define TCE_TABLE_SIZE_2M 5 +#define TCE_TABLE_SIZE_4M 6 +#define TCE_TABLE_SIZE_8M 7 + +#ifdef CONFIG_CALGARY_IOMMU +extern int calgary_iommu_init(void); +extern void detect_calgary(void); +#else +static inline int calgary_iommu_init(void) { return 1; } +static inline void detect_calgary(void) { return; } +#endif + +static inline unsigned int bus_to_phb(unsigned char busno) +{ + return ((busno % 15 == 0) ? 0 : busno / 2 + 1); +} + +#endif /* _ASM_X86_64_CALGARY_H */ diff --git a/include/asm-x86_64/pci.h b/include/asm-x86_64/pci.h index 4dbc07c..49c5e92 100644 --- a/include/asm-x86_64/pci.h +++ b/include/asm-x86_64/pci.h @@ -52,7 +52,7 @@ extern int iommu_setup(char *opt); */ #define PCI_DMA_BUS_IS_PHYS (dma_ops->is_phys) -#ifdef CONFIG_IOMMU +#if defined(CONFIG_IOMMU) || defined(CONFIG_CALGARY_IOMMU) /* * x86-64 always supports DAC, but sometimes it is useful to force diff --git a/include/asm-x86_64/tce.h b/include/asm-x86_64/tce.h new file mode 100644 index 0000000..ee51d31 --- /dev/null +++ b/include/asm-x86_64/tce.h @@ -0,0 +1,47 @@ +/* + * Copyright (C) 2006 Muli Ben-Yehuda , IBM Corporation + * Copyright (C) 2006 Jon Mason , IBM Corporation + * + * This file is derived from asm-powerpc/tce.h. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef _ASM_X86_64_TCE_H +#define _ASM_X86_64_TCE_H + +extern void* tce_table_kva[]; +extern unsigned int specified_table_size; +struct iommu_table; + +#define TCE_ENTRY_SIZE 8 /* in bytes */ + +#define TCE_READ_SHIFT 0 +#define TCE_WRITE_SHIFT 1 +#define TCE_HUBID_SHIFT 2 /* unused */ +#define TCE_RSVD_SHIFT 8 /* unused */ +#define TCE_RPN_SHIFT 12 +#define TCE_UNUSED_SHIFT 48 /* unused */ + +#define TCE_RPN_MASK 0x0000fffffffff000ULL + +extern void tce_build(struct iommu_table *tbl, unsigned long index, + unsigned int npages, unsigned long uaddr, int direction); +extern void tce_free(struct iommu_table *tbl, long index, unsigned int npages); +extern void* alloc_tce_table(void); +extern void free_tce_table(void *tbl); +extern int build_tce_table(struct pci_dev *dev, void __iomem *bbar); + +#endif /* _ASM_X86_64_TCE_H */ -- cgit v1.1 From f3fa8ebc25129bb69929e20b0c84049c39029d8d Mon Sep 17 00:00:00 2001 From: Rohit Seth Date: Mon, 26 Jun 2006 13:58:17 +0200 Subject: [PATCH] x86_64: moving phys_proc_id and cpu_core_id to cpuinfo_x86 Most of the fields of cpuinfo are defined in cpuinfo_x86 structure. This patch moves the phys_proc_id and cpu_core_id for each processor to cpuinfo_x86 structure as well. Signed-off-by: Rohit Seth Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- arch/x86_64/kernel/mce_amd.c | 10 +++++----- arch/x86_64/kernel/pci-gart.c | 3 --- arch/x86_64/kernel/setup.c | 27 +++++++++++++-------------- arch/x86_64/kernel/smpboot.c | 14 +++++--------- include/asm-x86_64/processor.h | 4 ++++ include/asm-x86_64/smp.h | 2 -- include/asm-x86_64/topology.h | 6 ++---- 7 files changed, 29 insertions(+), 37 deletions(-) diff --git a/arch/x86_64/kernel/mce_amd.c b/arch/x86_64/kernel/mce_amd.c index d13b241..86e1e02 100644 --- a/arch/x86_64/kernel/mce_amd.c +++ b/arch/x86_64/kernel/mce_amd.c @@ -115,7 +115,7 @@ void __cpuinit mce_amd_feature_init(struct cpuinfo_x86 *c) per_cpu(bank_map, cpu) |= (1 << bank); #ifdef CONFIG_SMP - if (shared_bank[bank] && cpu_core_id[cpu]) + if (shared_bank[bank] && c->cpu_core_id) continue; #endif @@ -323,10 +323,10 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, int bank) struct threshold_bank *b = NULL; #ifdef CONFIG_SMP - if (cpu_core_id[cpu] && shared_bank[bank]) { /* symlink */ + if (cpu_data[cpu].cpu_core_id && shared_bank[bank]) { /* symlink */ char name[16]; unsigned lcpu = first_cpu(cpu_core_map[cpu]); - if (cpu_core_id[lcpu]) + if (cpu_data[lcpu].cpu_core_id) goto out; /* first core not up yet */ b = per_cpu(threshold_banks, lcpu)[bank]; @@ -434,7 +434,7 @@ static __cpuinit int threshold_create_symlinks(unsigned int cpu) int bank, err = 0; unsigned int lcpu = 0; - if (cpu_core_id[cpu]) + if (cpu_data[cpu].cpu_core_id) return 0; for_each_cpu_mask(lcpu, cpu_core_map[cpu]) { if (lcpu == cpu) @@ -455,7 +455,7 @@ static __cpuinit void threshold_remove_symlinks(unsigned int cpu) { int bank; unsigned int lcpu = 0; - if (cpu_core_id[cpu]) + if (cpu_data[cpu].cpu_core_id) return; for_each_cpu_mask(lcpu, cpu_core_map[cpu]) { if (lcpu == cpu) diff --git a/arch/x86_64/kernel/pci-gart.c b/arch/x86_64/kernel/pci-gart.c index 9a93954..4ca674d 100644 --- a/arch/x86_64/kernel/pci-gart.c +++ b/arch/x86_64/kernel/pci-gart.c @@ -680,9 +680,6 @@ void __init gart_iommu_init(void) dma_ops = &gart_dma_ops; } -/* Must execute after PCI subsystem */ -fs_initcall(gart_iommu_init); - void gart_parse_options(char *p) { int arg; diff --git a/arch/x86_64/kernel/setup.c b/arch/x86_64/kernel/setup.c index 04b2d7b..24aa25e 100644 --- a/arch/x86_64/kernel/setup.c +++ b/arch/x86_64/kernel/setup.c @@ -785,9 +785,9 @@ static int nearby_node(int apicid) static void __init amd_detect_cmp(struct cpuinfo_x86 *c) { #ifdef CONFIG_SMP - int cpu = smp_processor_id(); unsigned bits; #ifdef CONFIG_NUMA + int cpu = smp_processor_id(); int node = 0; unsigned apicid = hard_smp_processor_id(); #endif @@ -805,12 +805,12 @@ static void __init amd_detect_cmp(struct cpuinfo_x86 *c) } /* Low order bits define the core id (index of core in socket) */ - cpu_core_id[cpu] = phys_proc_id[cpu] & ((1 << bits)-1); + c->cpu_core_id = c->phys_proc_id & ((1 << bits)-1); /* Convert the APIC ID into the socket ID */ - phys_proc_id[cpu] = phys_pkg_id(bits); + c->phys_proc_id = phys_pkg_id(bits); #ifdef CONFIG_NUMA - node = phys_proc_id[cpu]; + node = c->phys_proc_id; if (apicid_to_node[apicid] != NUMA_NO_NODE) node = apicid_to_node[apicid]; if (!node_online(node)) { @@ -823,7 +823,7 @@ static void __init amd_detect_cmp(struct cpuinfo_x86 *c) but in the same order as the HT nodeids. If that doesn't result in a usable node fall back to the path for the previous case. */ - int ht_nodeid = apicid - (phys_proc_id[0] << bits); + int ht_nodeid = apicid - (cpu_data[0].phys_proc_id << bits); if (ht_nodeid >= 0 && apicid_to_node[ht_nodeid] != NUMA_NO_NODE) node = apicid_to_node[ht_nodeid]; @@ -834,7 +834,7 @@ static void __init amd_detect_cmp(struct cpuinfo_x86 *c) numa_set_node(cpu, node); printk(KERN_INFO "CPU %d/%x(%d) -> Node %d -> Core %d\n", - cpu, apicid, c->x86_max_cores, node, cpu_core_id[cpu]); + cpu, apicid, c->x86_max_cores, node, c->cpu_core_id); #endif #endif } @@ -905,7 +905,6 @@ static void __cpuinit detect_ht(struct cpuinfo_x86 *c) #ifdef CONFIG_SMP u32 eax, ebx, ecx, edx; int index_msb, core_bits; - int cpu = smp_processor_id(); cpuid(1, &eax, &ebx, &ecx, &edx); @@ -926,10 +925,10 @@ static void __cpuinit detect_ht(struct cpuinfo_x86 *c) } index_msb = get_count_order(smp_num_siblings); - phys_proc_id[cpu] = phys_pkg_id(index_msb); + c->phys_proc_id = phys_pkg_id(index_msb); printk(KERN_INFO "CPU: Physical Processor ID: %d\n", - phys_proc_id[cpu]); + c->phys_proc_id); smp_num_siblings = smp_num_siblings / c->x86_max_cores; @@ -937,12 +936,12 @@ static void __cpuinit detect_ht(struct cpuinfo_x86 *c) core_bits = get_count_order(c->x86_max_cores); - cpu_core_id[cpu] = phys_pkg_id(index_msb) & + c->cpu_core_id = phys_pkg_id(index_msb) & ((1 << core_bits) - 1); if (c->x86_max_cores > 1) printk(KERN_INFO "CPU: Processor Core ID: %d\n", - cpu_core_id[cpu]); + c->cpu_core_id); } #endif } @@ -1080,7 +1079,7 @@ void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c) } #ifdef CONFIG_SMP - phys_proc_id[smp_processor_id()] = (cpuid_ebx(1) >> 24) & 0xff; + c->phys_proc_id = (cpuid_ebx(1) >> 24) & 0xff; #endif } @@ -1288,9 +1287,9 @@ static int show_cpuinfo(struct seq_file *m, void *v) #ifdef CONFIG_SMP if (smp_num_siblings * c->x86_max_cores > 1) { int cpu = c - cpu_data; - seq_printf(m, "physical id\t: %d\n", phys_proc_id[cpu]); + seq_printf(m, "physical id\t: %d\n", c->phys_proc_id); seq_printf(m, "siblings\t: %d\n", cpus_weight(cpu_core_map[cpu])); - seq_printf(m, "core id\t\t: %d\n", cpu_core_id[cpu]); + seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id); seq_printf(m, "cpu cores\t: %d\n", c->booted_cores); } #endif diff --git a/arch/x86_64/kernel/smpboot.c b/arch/x86_64/kernel/smpboot.c index 06535e7..b1c10b1 100644 --- a/arch/x86_64/kernel/smpboot.c +++ b/arch/x86_64/kernel/smpboot.c @@ -63,10 +63,6 @@ /* Number of siblings per CPU package */ int smp_num_siblings = 1; -/* Package ID of each logical CPU */ -u8 phys_proc_id[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = BAD_APICID }; -/* core ID of each logical CPU */ -u8 cpu_core_id[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = BAD_APICID }; /* Last level cache ID of each logical CPU */ u8 cpu_llc_id[NR_CPUS] __cpuinitdata = {[0 ... NR_CPUS-1] = BAD_APICID}; @@ -472,8 +468,8 @@ static inline void set_cpu_sibling_map(int cpu) if (smp_num_siblings > 1) { for_each_cpu_mask(i, cpu_sibling_setup_map) { - if (phys_proc_id[cpu] == phys_proc_id[i] && - cpu_core_id[cpu] == cpu_core_id[i]) { + if (c[cpu].phys_proc_id == c[i].phys_proc_id && + c[cpu].cpu_core_id == c[i].cpu_core_id) { cpu_set(i, cpu_sibling_map[cpu]); cpu_set(cpu, cpu_sibling_map[i]); cpu_set(i, cpu_core_map[cpu]); @@ -500,7 +496,7 @@ static inline void set_cpu_sibling_map(int cpu) cpu_set(i, c[cpu].llc_shared_map); cpu_set(cpu, c[i].llc_shared_map); } - if (phys_proc_id[cpu] == phys_proc_id[i]) { + if (c[cpu].phys_proc_id == c[i].phys_proc_id) { cpu_set(i, cpu_core_map[cpu]); cpu_set(cpu, cpu_core_map[i]); /* @@ -1201,8 +1197,8 @@ static void remove_siblinginfo(int cpu) cpu_clear(cpu, cpu_sibling_map[sibling]); cpus_clear(cpu_sibling_map[cpu]); cpus_clear(cpu_core_map[cpu]); - phys_proc_id[cpu] = BAD_APICID; - cpu_core_id[cpu] = BAD_APICID; + c[cpu].phys_proc_id = 0; + c[cpu].cpu_core_id = 0; cpu_clear(cpu, cpu_sibling_setup_map); } diff --git a/include/asm-x86_64/processor.h b/include/asm-x86_64/processor.h index e583f0d..3b3c121 100644 --- a/include/asm-x86_64/processor.h +++ b/include/asm-x86_64/processor.h @@ -69,7 +69,11 @@ struct cpuinfo_x86 { cpumask_t llc_shared_map; /* cpus sharing the last level cache */ #endif __u8 apicid; +#ifdef CONFIG_SMP __u8 booted_cores; /* number of cores as seen by OS */ + __u8 phys_proc_id; /* Physical Processor id. */ + __u8 cpu_core_id; /* Core id. */ +#endif } ____cacheline_aligned; #define X86_VENDOR_INTEL 0 diff --git a/include/asm-x86_64/smp.h b/include/asm-x86_64/smp.h index 7686b9b..6805e1f 100644 --- a/include/asm-x86_64/smp.h +++ b/include/asm-x86_64/smp.h @@ -53,8 +53,6 @@ extern int smp_call_function_single(int cpuid, void (*func) (void *info), extern cpumask_t cpu_sibling_map[NR_CPUS]; extern cpumask_t cpu_core_map[NR_CPUS]; -extern u8 phys_proc_id[NR_CPUS]; -extern u8 cpu_core_id[NR_CPUS]; extern u8 cpu_llc_id[NR_CPUS]; #define SMP_TRAMPOLINE_BASE 0x6000 diff --git a/include/asm-x86_64/topology.h b/include/asm-x86_64/topology.h index d4009f8..c4e46e7 100644 --- a/include/asm-x86_64/topology.h +++ b/include/asm-x86_64/topology.h @@ -55,10 +55,8 @@ extern int __node_distance(int, int); #endif #ifdef CONFIG_SMP -#define topology_physical_package_id(cpu) \ - (phys_proc_id[cpu] == BAD_APICID ? -1 : phys_proc_id[cpu]) -#define topology_core_id(cpu) \ - (cpu_core_id[cpu] == BAD_APICID ? 0 : cpu_core_id[cpu]) +#define topology_physical_package_id(cpu) (cpu_data[cpu].phys_proc_id) +#define topology_core_id(cpu) (cpu_data[cpu].cpu_core_id) #define topology_core_siblings(cpu) (cpu_core_map[cpu]) #define topology_thread_siblings(cpu) (cpu_sibling_map[cpu]) #endif -- cgit v1.1 From 05ebb76109f302b949e745724bbf0f0634dba43f Mon Sep 17 00:00:00 2001 From: Vojtech Pavlik Date: Mon, 26 Jun 2006 13:58:20 +0200 Subject: [PATCH] x86_64: Add useful constants to time.h In timekeeping code, one often does need to use conversion constants. Naming these leads to code that's easier to understand, showing the reader between which units the conversion is made. Signed-off-by: Vojtech Pavlik Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- include/linux/time.h | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/include/linux/time.h b/include/linux/time.h index 0cd696c..2fa2987 100644 --- a/include/linux/time.h +++ b/include/linux/time.h @@ -28,10 +28,13 @@ struct timezone { #ifdef __KERNEL__ /* Parameters used to convert the timespec values: */ -#define MSEC_PER_SEC 1000L -#define USEC_PER_SEC 1000000L -#define NSEC_PER_SEC 1000000000L -#define NSEC_PER_USEC 1000L +#define MSEC_PER_SEC 1000L +#define USEC_PER_MSEC 1000L +#define NSEC_PER_USEC 1000L +#define NSEC_PER_MSEC 1000000L +#define USEC_PER_SEC 1000000L +#define NSEC_PER_SEC 1000000000L +#define FSEC_PER_SEC 1000000000000000L static inline int timespec_equal(struct timespec *a, struct timespec *b) { -- cgit v1.1 From f8bf3c65a962530821fa9a9b4bad43d8c7e13574 Mon Sep 17 00:00:00 2001 From: Vojtech Pavlik Date: Mon, 26 Jun 2006 13:58:23 +0200 Subject: [PATCH] x86_64: Rename oem_force_hpet_timer to apic_is_clustered_box Rename oem_force_hpet_timer to apic_is_clustered_box, to give the function a better fitting name - it really isn't at all about HPET. Signed-off-by: Vojtech Pavlik Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- arch/x86_64/kernel/apic.c | 7 +++---- arch/x86_64/kernel/time.c | 4 ++-- include/asm-x86_64/hpet.h | 2 +- 3 files changed, 6 insertions(+), 7 deletions(-) diff --git a/arch/x86_64/kernel/apic.c b/arch/x86_64/kernel/apic.c index 29ef990..53fc17d 100644 --- a/arch/x86_64/kernel/apic.c +++ b/arch/x86_64/kernel/apic.c @@ -983,7 +983,7 @@ void smp_apic_timer_interrupt(struct pt_regs *regs) } /* - * oem_force_hpet_timer -- force HPET mode for some boxes. + * apic_is_clustered_box() -- Check if we can expect good TSC * * Thus far, the major user of this is IBM's Summit2 series: * @@ -991,7 +991,7 @@ void smp_apic_timer_interrupt(struct pt_regs *regs) * multi-chassis. Use available data to take a good guess. * If in doubt, go HPET. */ -__cpuinit int oem_force_hpet_timer(void) +__cpuinit int apic_is_clustered_box(void) { int i, clusters, zeros; unsigned id; @@ -1022,8 +1022,7 @@ __cpuinit int oem_force_hpet_timer(void) } /* - * If clusters > 2, then should be multi-chassis. Return 1 for HPET. - * Else return 0 to use TSC. + * If clusters > 2, then should be multi-chassis. * May have to revisit this when multi-core + hyperthreaded CPUs come * out, but AFAIK this will work even for them. */ diff --git a/arch/x86_64/kernel/time.c b/arch/x86_64/kernel/time.c index 7392570..54b14e3 100644 --- a/arch/x86_64/kernel/time.c +++ b/arch/x86_64/kernel/time.c @@ -956,10 +956,10 @@ void __init time_init(void) __cpuinit int unsynchronized_tsc(void) { #ifdef CONFIG_SMP - if (oem_force_hpet_timer()) + if (apic_is_clustered_box()) return 1; /* Intel systems are normally all synchronized. Exceptions - are handled in the OEM check above. */ + are handled in the check above. */ if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) return 0; #endif diff --git a/include/asm-x86_64/hpet.h b/include/asm-x86_64/hpet.h index 18ff7ee..b390984 100644 --- a/include/asm-x86_64/hpet.h +++ b/include/asm-x86_64/hpet.h @@ -55,7 +55,7 @@ extern int is_hpet_enabled(void); extern int hpet_rtc_timer_init(void); -extern int oem_force_hpet_timer(void); +extern int apic_is_clustered_box(void); extern int hpet_use_timer; -- cgit v1.1 From 7b0e85012581a9a8b12a8191d365716bf3ca3d3c Mon Sep 17 00:00:00 2001 From: Vojtech Pavlik Date: Mon, 26 Jun 2006 13:58:26 +0200 Subject: [PATCH] x86_64: Add X86_FEATURE_RDTSCP, fix rdtscp in /proc/cpuinfo This patch adds the X86_FEATURE_RDTSCP #define, so that kernel code can check for the feature easily and also fixes the location of the "rdtscp" string in the cpuinfo tables. Signed-off-by: Vojtech Pavlik Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- arch/x86_64/kernel/setup.c | 2 +- include/asm-x86_64/cpufeature.h | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/x86_64/kernel/setup.c b/arch/x86_64/kernel/setup.c index 24aa25e..923b76f 100644 --- a/arch/x86_64/kernel/setup.c +++ b/arch/x86_64/kernel/setup.c @@ -1206,7 +1206,7 @@ static int show_cpuinfo(struct seq_file *m, void *v) NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, "syscall", NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, "nx", NULL, "mmxext", NULL, - NULL, "fxsr_opt", "rdtscp", NULL, NULL, "lm", "3dnowext", "3dnow", + NULL, "fxsr_opt", NULL, "rdtscp", NULL, "lm", "3dnowext", "3dnow", /* Transmeta-defined */ "recovery", "longrun", NULL, "lrti", NULL, NULL, NULL, NULL, diff --git a/include/asm-x86_64/cpufeature.h b/include/asm-x86_64/cpufeature.h index afc44e5..fbf5a19 100644 --- a/include/asm-x86_64/cpufeature.h +++ b/include/asm-x86_64/cpufeature.h @@ -46,6 +46,7 @@ #define X86_FEATURE_SYSCALL (1*32+11) /* SYSCALL/SYSRET */ #define X86_FEATURE_MMXEXT (1*32+22) /* AMD MMX extensions */ #define X86_FEATURE_FXSR_OPT (1*32+25) /* FXSR optimizations */ +#define X86_FEATURE_RDTSCP (1*32+27) /* RDTSCP */ #define X86_FEATURE_LM (1*32+29) /* Long Mode (x86-64) */ #define X86_FEATURE_3DNOWEXT (1*32+30) /* AMD 3DNow! extensions */ #define X86_FEATURE_3DNOW (1*32+31) /* 3DNow! */ -- cgit v1.1 From e30db3e69948dafb616d121e52c8d62d38dab68c Mon Sep 17 00:00:00 2001 From: Vojtech Pavlik Date: Mon, 26 Jun 2006 13:58:29 +0200 Subject: [PATCH] x86_64: Remove hack to manually enable HPET on AMD8111 southbridges Remove #ifdefed code to manually enable HPET on AMD8111, where the BIOS doesn't have ACPI HPET tables and doesn't enable it for us. Signed-off-by: Vojtech Pavlik Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- arch/x86_64/kernel/time.c | 14 -------------- 1 file changed, 14 deletions(-) diff --git a/arch/x86_64/kernel/time.c b/arch/x86_64/kernel/time.c index 54b14e3..ab9bea8 100644 --- a/arch/x86_64/kernel/time.c +++ b/arch/x86_64/kernel/time.c @@ -56,8 +56,6 @@ DEFINE_SPINLOCK(i8253_lock); int nohpet __initdata = 0; static int notsc __initdata = 0; -#undef HPET_HACK_ENABLE_DANGEROUS - unsigned int cpu_khz; /* TSC clocks / usec, not used here */ static unsigned long hpet_period; /* fsecs / HPET clock */ unsigned long hpet_tick; /* HPET clocks / interrupt */ @@ -890,18 +888,6 @@ void __init time_init(void) char *timename; char *gtod; -#ifdef HPET_HACK_ENABLE_DANGEROUS - if (!vxtime.hpet_address) { - printk(KERN_WARNING "time.c: WARNING: Enabling HPET base " - "manually!\n"); - outl(0x800038a0, 0xcf8); - outl(0xff000001, 0xcfc); - outl(0x800038a0, 0xcf8); - vxtime.hpet_address = inl(0xcfc) & 0xfffffffe; - printk(KERN_WARNING "time.c: WARNING: Enabled HPET " - "at %#lx.\n", vxtime.hpet_address); - } -#endif if (nohpet) vxtime.hpet_address = 0; -- cgit v1.1 From 4221133845f81ab4428c79a89e37be2c87624c1a Mon Sep 17 00:00:00 2001 From: Vojtech Pavlik Date: Mon, 26 Jun 2006 13:58:32 +0200 Subject: [PATCH] x86_64: Make use of the *PER* constants in time.c This patch makes use of the newly added conversion constants in time.h to x86-64 time.c. The code gets significantly easier to understand. Signed-off-by: Vojtech Pavlik Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- arch/x86_64/kernel/time.c | 63 ++++++++++++++++++++++++++--------------------- 1 file changed, 35 insertions(+), 28 deletions(-) diff --git a/arch/x86_64/kernel/time.c b/arch/x86_64/kernel/time.c index ab9bea8..51afb07 100644 --- a/arch/x86_64/kernel/time.c +++ b/arch/x86_64/kernel/time.c @@ -56,6 +56,13 @@ DEFINE_SPINLOCK(i8253_lock); int nohpet __initdata = 0; static int notsc __initdata = 0; +#define USEC_PER_TICK (USEC_PER_SEC / HZ) +#define NSEC_PER_TICK (NSEC_PER_SEC / HZ) +#define FSEC_PER_TICK (FSEC_PER_SEC / HZ) + +#define NS_SCALE 10 /* 2^10, carefully chosen */ +#define US_SCALE 32 /* 2^32, arbitralrily chosen */ + unsigned int cpu_khz; /* TSC clocks / usec, not used here */ static unsigned long hpet_period; /* fsecs / HPET clock */ unsigned long hpet_tick; /* HPET clocks / interrupt */ @@ -88,7 +95,7 @@ static inline unsigned int do_gettimeoffset_tsc(void) t = get_cycles_sync(); if (t < vxtime.last_tsc) t = vxtime.last_tsc; /* hack */ - x = ((t - vxtime.last_tsc) * vxtime.tsc_quot) >> 32; + x = ((t - vxtime.last_tsc) * vxtime.tsc_quot) >> US_SCALE; return x; } @@ -96,7 +103,7 @@ static inline unsigned int do_gettimeoffset_hpet(void) { /* cap counter read to one tick to avoid inconsistencies */ unsigned long counter = hpet_readl(HPET_COUNTER) - vxtime.last; - return (min(counter,hpet_tick) * vxtime.quot) >> 32; + return (min(counter,hpet_tick) * vxtime.quot) >> US_SCALE; } unsigned int (*do_gettimeoffset)(void) = do_gettimeoffset_tsc; @@ -116,7 +123,7 @@ void do_gettimeofday(struct timeval *tv) seq = read_seqbegin(&xtime_lock); sec = xtime.tv_sec; - usec = xtime.tv_nsec / 1000; + usec = xtime.tv_nsec / NSEC_PER_USEC; /* i386 does some correction here to keep the clock monotonous even when ntpd is fixing drift. @@ -127,14 +134,14 @@ void do_gettimeofday(struct timeval *tv) in arch/x86_64/kernel/vsyscall.c and export all needed variables in vmlinux.lds. -AK */ - t = (jiffies - wall_jiffies) * (1000000L / HZ) + + t = (jiffies - wall_jiffies) * USEC_PER_TICK + do_gettimeoffset(); usec += t; } while (read_seqretry(&xtime_lock, seq)); - tv->tv_sec = sec + usec / 1000000; - tv->tv_usec = usec % 1000000; + tv->tv_sec = sec + usec / USEC_PER_SEC; + tv->tv_usec = usec % USEC_PER_SEC; } EXPORT_SYMBOL(do_gettimeofday); @@ -155,8 +162,8 @@ int do_settimeofday(struct timespec *tv) write_seqlock_irq(&xtime_lock); - nsec -= do_gettimeoffset() * 1000 + - (jiffies - wall_jiffies) * (NSEC_PER_SEC/HZ); + nsec -= do_gettimeoffset() * NSEC_PER_USEC + + (jiffies - wall_jiffies) * NSEC_PER_TICK; wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec); wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec); @@ -286,7 +293,7 @@ unsigned long long monotonic_clock(void) this_offset = hpet_readl(HPET_COUNTER); } while (read_seqretry(&xtime_lock, seq)); offset = (this_offset - last_offset); - offset *= (NSEC_PER_SEC/HZ) / hpet_tick; + offset *= NSEC_PER_TICK / hpet_tick; } else { do { seq = read_seqbegin(&xtime_lock); @@ -295,7 +302,8 @@ unsigned long long monotonic_clock(void) base = monotonic_base; } while (read_seqretry(&xtime_lock, seq)); this_offset = get_cycles_sync(); - offset = (this_offset - last_offset)*1000 / cpu_khz; + /* FIXME: 1000 or 1000000? */ + offset = (this_offset - last_offset)*1000 / cpu_khz; } return base + offset; } @@ -380,7 +388,7 @@ void main_timer_handler(struct pt_regs *regs) } monotonic_base += - (offset - vxtime.last)*(NSEC_PER_SEC/HZ) / hpet_tick; + (offset - vxtime.last) * NSEC_PER_TICK / hpet_tick; vxtime.last = offset; #ifdef CONFIG_X86_PM_TIMER @@ -389,24 +397,25 @@ void main_timer_handler(struct pt_regs *regs) #endif } else { offset = (((tsc - vxtime.last_tsc) * - vxtime.tsc_quot) >> 32) - (USEC_PER_SEC / HZ); + vxtime.tsc_quot) >> US_SCALE) - USEC_PER_TICK; if (offset < 0) offset = 0; - if (offset > (USEC_PER_SEC / HZ)) { - lost = offset / (USEC_PER_SEC / HZ); - offset %= (USEC_PER_SEC / HZ); + if (offset > USEC_PER_TICK) { + lost = offset / USEC_PER_TICK; + offset %= USEC_PER_TICK; } - monotonic_base += (tsc - vxtime.last_tsc)*1000000/cpu_khz ; + /* FIXME: 1000 or 1000000? */ + monotonic_base += (tsc - vxtime.last_tsc) * 1000000 / cpu_khz; vxtime.last_tsc = tsc - vxtime.quot * delay / vxtime.tsc_quot; if ((((tsc - vxtime.last_tsc) * - vxtime.tsc_quot) >> 32) < offset) + vxtime.tsc_quot) >> US_SCALE) < offset) vxtime.last_tsc = tsc - - (((long) offset << 32) / vxtime.tsc_quot) - 1; + (((long) offset << US_SCALE) / vxtime.tsc_quot) - 1; } if (lost > 0) { @@ -466,16 +475,15 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs) } static unsigned int cyc2ns_scale __read_mostly; -#define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */ static inline void set_cyc2ns_scale(unsigned long cpu_khz) { - cyc2ns_scale = (1000000 << CYC2NS_SCALE_FACTOR)/cpu_khz; + cyc2ns_scale = (NSEC_PER_MSEC << NS_SCALE) / cpu_khz; } static inline unsigned long long cycles_2_ns(unsigned long long cyc) { - return (cyc * cyc2ns_scale) >> CYC2NS_SCALE_FACTOR; + return (cyc * cyc2ns_scale) >> NS_SCALE; } unsigned long long sched_clock(void) @@ -488,7 +496,7 @@ unsigned long long sched_clock(void) Disadvantage is a small drift between CPUs in some configurations, but that should be tolerable. */ if (__vxtime.mode == VXTIME_HPET) - return (hpet_readl(HPET_COUNTER) * vxtime.quot) >> 32; + return (hpet_readl(HPET_COUNTER) * vxtime.quot) >> US_SCALE; #endif /* Could do CPU core sync here. Opteron can execute rdtsc speculatively, @@ -631,7 +639,7 @@ static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val, cpu_khz = cpufreq_scale(cpu_khz_ref, ref_freq, freq->new); if (!(freq->flags & CPUFREQ_CONST_LOOPS)) - vxtime.tsc_quot = (1000L << 32) / cpu_khz; + vxtime.tsc_quot = (USEC_PER_MSEC << US_SCALE) / cpu_khz; } set_cyc2ns_scale(cpu_khz_ref); @@ -823,8 +831,7 @@ static int hpet_init(void) if (hpet_period < 100000 || hpet_period > 100000000) return -1; - hpet_tick = (1000000000L * (USEC_PER_SEC / HZ) + hpet_period / 2) / - hpet_period; + hpet_tick = (FSEC_PER_TICK + hpet_period / 2) / hpet_period; hpet_use_timer = (id & HPET_ID_LEGSUP); @@ -898,7 +905,7 @@ void __init time_init(void) -xtime.tv_sec, -xtime.tv_nsec); if (!hpet_init()) - vxtime_hz = (1000000000000000L + hpet_period / 2) / hpet_period; + vxtime_hz = (FSEC_PER_SEC + hpet_period / 2) / hpet_period; else vxtime.hpet_address = 0; @@ -927,8 +934,8 @@ void __init time_init(void) vxtime_hz / 1000000, vxtime_hz % 1000000, timename, gtod); printk(KERN_INFO "time.c: Detected %d.%03d MHz processor.\n", cpu_khz / 1000, cpu_khz % 1000); - vxtime.quot = (1000000L << 32) / vxtime_hz; - vxtime.tsc_quot = (1000L << 32) / cpu_khz; + vxtime.quot = (USEC_PER_SEC << US_SCALE) / vxtime_hz; + vxtime.tsc_quot = (USEC_PER_MSEC << US_SCALE) / cpu_khz; vxtime.last_tsc = get_cycles_sync(); setup_irq(0, &irq0); -- cgit v1.1 From b2df3ddb68fc02e3bae78b7adaeca8561d02ea6d Mon Sep 17 00:00:00 2001 From: Vojtech Pavlik Date: Mon, 26 Jun 2006 13:58:35 +0200 Subject: [PATCH] x86_64: Explain why HPET T0_CMP register is written twice After writing the CFG register, the first value written to the T0_CMP register is the value at which next interrupt should be triggered, every value after that sets the period of the interrupt. For that reason, the code needs to write the value twice - to set both the phase and period. [AK: I had already figured it out by myself, but it's still useful to have a comment for this.] Signed-off-by: Vojtech Pavlik Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- arch/x86_64/kernel/time.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/x86_64/kernel/time.c b/arch/x86_64/kernel/time.c index 51afb07..ee622ac 100644 --- a/arch/x86_64/kernel/time.c +++ b/arch/x86_64/kernel/time.c @@ -795,8 +795,8 @@ static int hpet_timer_stop_set_go(unsigned long tick) if (hpet_use_timer) { hpet_writel(HPET_TN_ENABLE | HPET_TN_PERIODIC | HPET_TN_SETVAL | HPET_TN_32BIT, HPET_T0_CFG); - hpet_writel(hpet_tick, HPET_T0_CMP); - hpet_writel(hpet_tick, HPET_T0_CMP); /* AK: why twice? */ + hpet_writel(hpet_tick, HPET_T0_CMP); /* next interrupt */ + hpet_writel(hpet_tick, HPET_T0_CMP); /* period */ cfg |= HPET_CFG_LEGACY; } /* -- cgit v1.1 From 2f82bde472184f989283c173c1eb95bab455a774 Mon Sep 17 00:00:00 2001 From: Vojtech Pavlik Date: Mon, 26 Jun 2006 13:58:38 +0200 Subject: [PATCH] x86_64: Update copyright in time.c Update my copyright dates in arch/x86-64/kernel/time.c Signed-off-by: Vojtech Pavlik Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- arch/x86_64/kernel/time.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86_64/kernel/time.c b/arch/x86_64/kernel/time.c index ee622ac..2125d6c 100644 --- a/arch/x86_64/kernel/time.c +++ b/arch/x86_64/kernel/time.c @@ -8,7 +8,7 @@ * Copyright (c) 1995 Markus Kuhn * Copyright (c) 1996 Ingo Molnar * Copyright (c) 1998 Andrea Arcangeli - * Copyright (c) 2002 Vojtech Pavlik + * Copyright (c) 2002,2006 Vojtech Pavlik * Copyright (c) 2003 Andi Kleen * RTC support code taken from arch/i386/kernel/timers/time_hpet.c */ -- cgit v1.1 From bf660254e0d430e5d991e432d5dad8d6714828f6 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Mon, 26 Jun 2006 13:58:41 +0200 Subject: [PATCH] i386/x86-64: Fix isoimage when syslinux is in /usr/share It's like this on SUSE systems. Cc: hpa@zytor.com Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- arch/i386/boot/Makefile | 9 +++++++-- arch/x86_64/boot/Makefile | 9 +++++++-- 2 files changed, 14 insertions(+), 4 deletions(-) diff --git a/arch/i386/boot/Makefile b/arch/i386/boot/Makefile index 33e5547..e979466 100644 --- a/arch/i386/boot/Makefile +++ b/arch/i386/boot/Makefile @@ -109,8 +109,13 @@ fdimage288: $(BOOTIMAGE) $(obj)/mtools.conf isoimage: $(BOOTIMAGE) -rm -rf $(obj)/isoimage mkdir $(obj)/isoimage - cp `echo /usr/lib*/syslinux/isolinux.bin | awk '{ print $1; }'` \ - $(obj)/isoimage + for i in lib lib64 share end ; do \ + if [ -f /usr/$$i/syslinux/isolinux.bin ] ; then \ + cp /usr/$$i/syslinux/isolinux.bin $(obj)/isoimage ; \ + break ; \ + fi ; \ + if [ $$i = end ] ; then exit 1 ; fi ; \ + done cp $(BOOTIMAGE) $(obj)/isoimage/linux echo '$(image_cmdline)' > $(obj)/isoimage/isolinux.cfg if [ -f '$(FDINITRD)' ] ; then \ diff --git a/arch/x86_64/boot/Makefile b/arch/x86_64/boot/Makefile index 43ee6c5..deb063e 100644 --- a/arch/x86_64/boot/Makefile +++ b/arch/x86_64/boot/Makefile @@ -107,8 +107,13 @@ fdimage288: $(BOOTIMAGE) $(obj)/mtools.conf isoimage: $(BOOTIMAGE) -rm -rf $(obj)/isoimage mkdir $(obj)/isoimage - cp `echo /usr/lib*/syslinux/isolinux.bin | awk '{ print $1; }'` \ - $(obj)/isoimage + for i in lib lib64 share end ; do \ + if [ -f /usr/$$i/syslinux/isolinux.bin ] ; then \ + cp /usr/$$i/syslinux/isolinux.bin $(obj)/isoimage ; \ + break ; \ + fi ; \ + if [ $$i = end ] ; then exit 1 ; fi ; \ + done cp $(BOOTIMAGE) $(obj)/isoimage/linux echo '$(image_cmdline)' > $(obj)/isoimage/isolinux.cfg if [ -f '$(FDINITRD)' ] ; then \ -- cgit v1.1 From c38bfdc85aae0c6d1458269c0e063c2f4a116711 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Mon, 26 Jun 2006 13:58:44 +0200 Subject: [PATCH] x86_64: Move VM86 config into arch/i386/Kconfig Architecture specific configs like this have no business at all in init/Kconfig. This prevents it from being set on x86-64 Pointed out by H.Peter Anvin Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- arch/i386/Kconfig | 9 +++++++++ init/Kconfig | 10 ---------- 2 files changed, 9 insertions(+), 10 deletions(-) diff --git a/arch/i386/Kconfig b/arch/i386/Kconfig index 2206bf6..0f8da3b 100644 --- a/arch/i386/Kconfig +++ b/arch/i386/Kconfig @@ -324,6 +324,15 @@ config X86_MCE_P4THERMAL Enabling this feature will cause a message to be printed when the P4 enters thermal throttling. +config VM86 + default y + bool "Enable VM86 support" if EMBEDDED + help + This option is required by programs like DOSEMU to run 16-bit legacy + code on X86 processors. It also may be needed by software like + XFree86 to initialize some video cards via BIOS. Disabling this + option saves about 6k. + config TOSHIBA tristate "Toshiba Laptop support" ---help--- diff --git a/init/Kconfig b/init/Kconfig index e0358f3..36b02d5 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -236,16 +236,6 @@ config UID16 help This enables the legacy 16-bit UID syscall wrappers. -config VM86 - depends X86 - default y - bool "Enable VM86 support" if EMBEDDED - help - This option is required by programs like DOSEMU to run 16-bit legacy - code on X86 processors. It also may be needed by software like - XFree86 to initialize some video cards via BIOS. Disabling this - option saves about 6k. - config CC_OPTIMIZE_FOR_SIZE bool "Optimize for size (Look out for broken compilers!)" default y -- cgit v1.1 From 17fc14ff1bdbc393e1cf4f6fd1e1e53d72ab9fe5 Mon Sep 17 00:00:00 2001 From: Jacob Shin Date: Mon, 26 Jun 2006 13:58:47 +0200 Subject: [PATCH] x86_64: apic support for extended apic interrupt Add support for extended APIC LVT found in future AMD processors. Signed-off-by: Jacob Shin Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- arch/x86_64/kernel/apic.c | 10 ++++------ include/asm-x86_64/apic.h | 11 ++++++++++- 2 files changed, 14 insertions(+), 7 deletions(-) diff --git a/arch/x86_64/kernel/apic.c b/arch/x86_64/kernel/apic.c index 53fc17d..396e125 100644 --- a/arch/x86_64/kernel/apic.c +++ b/arch/x86_64/kernel/apic.c @@ -909,15 +909,13 @@ int setup_profiling_timer(unsigned int multiplier) return -EINVAL; } -#ifdef CONFIG_X86_MCE_AMD -void setup_threshold_lvt(unsigned long lvt_off) +void setup_APIC_extened_lvt(unsigned char lvt_off, unsigned char vector, + unsigned char msg_type, unsigned char mask) { - unsigned int v = 0; - unsigned long reg = (lvt_off << 4) + 0x500; - v |= THRESHOLD_APIC_VECTOR; + unsigned long reg = (lvt_off << 4) + K8_APIC_EXT_LVT_BASE; + unsigned int v = (mask << 16) | (msg_type << 8) | vector; apic_write(reg, v); } -#endif /* CONFIG_X86_MCE_AMD */ #undef APIC_DIVISOR diff --git a/include/asm-x86_64/apic.h b/include/asm-x86_64/apic.h index c9e6c25..9d43ac8 100644 --- a/include/asm-x86_64/apic.h +++ b/include/asm-x86_64/apic.h @@ -84,9 +84,18 @@ extern void disable_APIC_timer(void); extern void enable_APIC_timer(void); extern void clustered_apic_check(void); +extern void setup_APIC_extened_lvt(unsigned char lvt_off, unsigned char vector, + unsigned char msg_type, unsigned char mask); + +#define K8_APIC_EXT_LVT_BASE 0x500 +#define K8_APIC_EXT_INT_MSG_FIX 0x0 +#define K8_APIC_EXT_INT_MSG_SMI 0x2 +#define K8_APIC_EXT_INT_MSG_NMI 0x4 +#define K8_APIC_EXT_INT_MSG_EXT 0x7 +#define K8_APIC_EXT_LVT_ENTRY_THRESHOLD 0 + extern int disable_timer_pin_1; -extern void setup_threshold_lvt(unsigned long lvt_off); void smp_send_timer_broadcast_ipi(void); void switch_APIC_timer_to_ipi(void *cpumask); -- cgit v1.1 From fff2e89f11dd9b9b45e9212bc543154ca3d028a1 Mon Sep 17 00:00:00 2001 From: Jacob Shin Date: Mon, 26 Jun 2006 13:58:50 +0200 Subject: [PATCH] x86_64: mce_amd relocate sysfs files Get rid of /sys/devices/system/threshold directory and move mce_amd thresholding files into the machine sysfs directory -- /sys/devices/system/machinecheck. AK: Fixed warning Signed-off-by: Jacob Shin Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- arch/x86_64/kernel/mce.c | 2 +- arch/x86_64/kernel/mce_amd.c | 40 ++++++++++------------------------------ include/asm-x86_64/mce.h | 2 ++ 3 files changed, 13 insertions(+), 31 deletions(-) diff --git a/arch/x86_64/kernel/mce.c b/arch/x86_64/kernel/mce.c index c69fc43..acd5816 100644 --- a/arch/x86_64/kernel/mce.c +++ b/arch/x86_64/kernel/mce.c @@ -562,7 +562,7 @@ static struct sysdev_class mce_sysclass = { set_kset_name("machinecheck"), }; -static DEFINE_PER_CPU(struct sys_device, device_mce); +DEFINE_PER_CPU(struct sys_device, device_mce); /* Why are there no generic functions for this? */ #define ACCESSOR(name, var, start) \ diff --git a/arch/x86_64/kernel/mce_amd.c b/arch/x86_64/kernel/mce_amd.c index 86e1e02..b96682e 100644 --- a/arch/x86_64/kernel/mce_amd.c +++ b/arch/x86_64/kernel/mce_amd.c @@ -30,7 +30,7 @@ #include #define PFX "mce_threshold: " -#define VERSION "version 1.00.9" +#define VERSION "version 1.0.10" #define NR_BANKS 5 #define THRESHOLD_MAX 0xFFF #define INT_TYPE_APIC 0x00020000 @@ -166,12 +166,6 @@ asmlinkage void mce_threshold_interrupt(void) * Sysfs Interface */ -static struct sysdev_class threshold_sysclass = { - set_kset_name("threshold"), -}; - -static DEFINE_PER_CPU(struct sys_device, device_threshold); - struct threshold_attr { struct attribute attr; ssize_t(*show) (struct threshold_bank *, char *); @@ -332,8 +326,8 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, int bank) b = per_cpu(threshold_banks, lcpu)[bank]; if (!b) goto out; - sprintf(name, "bank%i", bank); - err = sysfs_create_link(&per_cpu(device_threshold, cpu).kobj, + sprintf(name, "threshold_bank%i", bank); + err = sysfs_create_link(&per_cpu(device_mce, cpu).kobj, &b->kobj, name); if (err) goto out; @@ -353,8 +347,8 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, int bank) b->bank = bank; b->interrupt_enable = 0; b->threshold_limit = THRESHOLD_MAX; - kobject_set_name(&b->kobj, "bank%i", bank); - b->kobj.parent = &per_cpu(device_threshold, cpu).kobj; + kobject_set_name(&b->kobj, "threshold_bank%i", bank); + b->kobj.parent = &per_cpu(device_mce, cpu).kobj; b->kobj.ktype = &threshold_ktype; err = kobject_register(&b->kobj); @@ -373,12 +367,6 @@ static __cpuinit int threshold_create_device(unsigned int cpu) int bank; int err = 0; - per_cpu(device_threshold, cpu).id = cpu; - per_cpu(device_threshold, cpu).cls = &threshold_sysclass; - err = sysdev_register(&per_cpu(device_threshold, cpu)); - if (err) - goto out; - for (bank = 0; bank < NR_BANKS; ++bank) { if (!(per_cpu(bank_map, cpu) & 1 << bank)) continue; @@ -407,8 +395,8 @@ static __cpuinit void threshold_remove_bank(unsigned int cpu, int bank) if (!b) return; if (shared_bank[bank] && atomic_read(&b->kobj.kref.refcount) > 2) { - sprintf(name, "bank%i", bank); - sysfs_remove_link(&per_cpu(device_threshold, cpu).kobj, name); + sprintf(name, "threshold_bank%i", bank); + sysfs_remove_link(&per_cpu(device_mce, cpu).kobj, name); per_cpu(threshold_banks, cpu)[bank] = NULL; } else { kobject_unregister(&b->kobj); @@ -425,7 +413,6 @@ static __cpuinit void threshold_remove_device(unsigned int cpu) continue; threshold_remove_bank(cpu, bank); } - sysdev_unregister(&per_cpu(device_threshold, cpu)); } /* link all existing siblings when first core comes up */ @@ -518,23 +505,16 @@ static struct notifier_block threshold_cpu_notifier = { static __init int threshold_init_device(void) { - int err; int lcpu = 0; - err = sysdev_class_register(&threshold_sysclass); - if (err) - goto out; - /* to hit CPUs online before the notifier is up */ for_each_online_cpu(lcpu) { - err = threshold_create_device(lcpu); + int err = threshold_create_device(lcpu); if (err) - goto out; + return err; } register_cpu_notifier(&threshold_cpu_notifier); - - out: - return err; + return 0; } device_initcall(threshold_init_device); diff --git a/include/asm-x86_64/mce.h b/include/asm-x86_64/mce.h index 7229785..08f721e 100644 --- a/include/asm-x86_64/mce.h +++ b/include/asm-x86_64/mce.h @@ -74,6 +74,8 @@ struct mce_log { #include void mce_log(struct mce *m); +DECLARE_PER_CPU(struct sys_device, device_mce); + #ifdef CONFIG_X86_MCE_INTEL void mce_intel_feature_init(struct cpuinfo_x86 *c); #else -- cgit v1.1 From 95268664390b19962ed41a3506c5bc8149db71e8 Mon Sep 17 00:00:00 2001 From: Jacob Shin Date: Mon, 26 Jun 2006 13:58:53 +0200 Subject: [PATCH] x86_64: mce_amd support for family 0x10 processors Add support for mce threshold registers found in future AMD family 0x10 processors. Backwards compatible with family 0xF hardware. AK: fixed build on !SMP Signed-off-by: Jacob Shin Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- arch/x86_64/kernel/mce_amd.c | 362 ++++++++++++++++++++++++++++++++++--------- include/asm-x86_64/mce.h | 11 +- 2 files changed, 297 insertions(+), 76 deletions(-) diff --git a/arch/x86_64/kernel/mce_amd.c b/arch/x86_64/kernel/mce_amd.c index b96682e..10ffbe5 100644 --- a/arch/x86_64/kernel/mce_amd.c +++ b/arch/x86_64/kernel/mce_amd.c @@ -1,5 +1,5 @@ /* - * (c) 2005 Advanced Micro Devices, Inc. + * (c) 2005, 2006 Advanced Micro Devices, Inc. * Your use of this code is subject to the terms and conditions of the * GNU general public license version 2. See "COPYING" or * http://www.gnu.org/licenses/gpl.html @@ -8,9 +8,10 @@ * * Support : jacob.shin@amd.com * - * MC4_MISC0 DRAM ECC Error Threshold available under AMD K8 Rev F. - * MC4_MISC0 exists per physical processor. + * April 2006 + * - added support for AMD Family 0x10 processors * + * All MC4_MISCi registers are shared between multi-cores */ #include @@ -30,8 +31,9 @@ #include #define PFX "mce_threshold: " -#define VERSION "version 1.0.10" -#define NR_BANKS 5 +#define VERSION "version 1.1.0" +#define NR_BANKS 6 +#define NR_BLOCKS 9 #define THRESHOLD_MAX 0xFFF #define INT_TYPE_APIC 0x00020000 #define MASK_VALID_HI 0x80000000 @@ -40,21 +42,33 @@ #define MASK_INT_TYPE_HI 0x00060000 #define MASK_OVERFLOW_HI 0x00010000 #define MASK_ERR_COUNT_HI 0x00000FFF -#define MASK_OVERFLOW 0x0001000000000000L +#define MASK_BLKPTR_LO 0xFF000000 +#define MCG_XBLK_ADDR 0xC0000400 -struct threshold_bank { +struct threshold_block { + unsigned int block; + unsigned int bank; unsigned int cpu; - u8 bank; - u8 interrupt_enable; + u32 address; + u16 interrupt_enable; u16 threshold_limit; struct kobject kobj; + struct list_head miscj; }; -static struct threshold_bank threshold_defaults = { +/* defaults used early on boot */ +static struct threshold_block threshold_defaults = { .interrupt_enable = 0, .threshold_limit = THRESHOLD_MAX, }; +struct threshold_bank { + struct kobject kobj; + struct threshold_block *blocks; + cpumask_t cpus; +}; +static DEFINE_PER_CPU(struct threshold_bank *, threshold_banks[NR_BANKS]); + #ifdef CONFIG_SMP static unsigned char shared_bank[NR_BANKS] = { 0, 0, 0, 0, 1 @@ -68,12 +82,12 @@ static DEFINE_PER_CPU(unsigned char, bank_map); /* see which banks are on */ */ /* must be called with correct cpu affinity */ -static void threshold_restart_bank(struct threshold_bank *b, +static void threshold_restart_bank(struct threshold_block *b, int reset, u16 old_limit) { u32 mci_misc_hi, mci_misc_lo; - rdmsr(MSR_IA32_MC0_MISC + b->bank * 4, mci_misc_lo, mci_misc_hi); + rdmsr(b->address, mci_misc_lo, mci_misc_hi); if (b->threshold_limit < (mci_misc_hi & THRESHOLD_MAX)) reset = 1; /* limit cannot be lower than err count */ @@ -94,35 +108,57 @@ static void threshold_restart_bank(struct threshold_bank *b, (mci_misc_hi &= ~MASK_INT_TYPE_HI); mci_misc_hi |= MASK_COUNT_EN_HI; - wrmsr(MSR_IA32_MC0_MISC + b->bank * 4, mci_misc_lo, mci_misc_hi); + wrmsr(b->address, mci_misc_lo, mci_misc_hi); } +/* cpu init entry point, called from mce.c with preempt off */ void __cpuinit mce_amd_feature_init(struct cpuinfo_x86 *c) { - int bank; - u32 mci_misc_lo, mci_misc_hi; + unsigned int bank, block; unsigned int cpu = smp_processor_id(); + u32 low = 0, high = 0, address = 0; for (bank = 0; bank < NR_BANKS; ++bank) { - rdmsr(MSR_IA32_MC0_MISC + bank * 4, mci_misc_lo, mci_misc_hi); + for (block = 0; block < NR_BLOCKS; ++block) { + if (block == 0) + address = MSR_IA32_MC0_MISC + bank * 4; + else if (block == 1) + address = MCG_XBLK_ADDR + + ((low & MASK_BLKPTR_LO) >> 21); + else + ++address; + + if (rdmsr_safe(address, &low, &high)) + continue; - /* !valid, !counter present, bios locked */ - if (!(mci_misc_hi & MASK_VALID_HI) || - !(mci_misc_hi & MASK_VALID_HI >> 1) || - (mci_misc_hi & MASK_VALID_HI >> 2)) - continue; + if (!(high & MASK_VALID_HI)) { + if (block) + continue; + else + break; + } - per_cpu(bank_map, cpu) |= (1 << bank); + if (!(high & MASK_VALID_HI >> 1) || + (high & MASK_VALID_HI >> 2)) + continue; + if (!block) + per_cpu(bank_map, cpu) |= (1 << bank); #ifdef CONFIG_SMP - if (shared_bank[bank] && c->cpu_core_id) - continue; + if (shared_bank[bank] && c->cpu_core_id) + break; #endif + high &= ~MASK_LVTOFF_HI; + high |= K8_APIC_EXT_LVT_ENTRY_THRESHOLD << 20; + wrmsr(address, low, high); + + setup_APIC_extened_lvt(K8_APIC_EXT_LVT_ENTRY_THRESHOLD, + THRESHOLD_APIC_VECTOR, + K8_APIC_EXT_INT_MSG_FIX, 0); - setup_threshold_lvt((mci_misc_hi & MASK_LVTOFF_HI) >> 20); - threshold_defaults.cpu = cpu; - threshold_defaults.bank = bank; - threshold_restart_bank(&threshold_defaults, 0, 0); + threshold_defaults.address = address; + threshold_restart_bank(&threshold_defaults, 0, 0); + } } } @@ -137,8 +173,9 @@ void __cpuinit mce_amd_feature_init(struct cpuinfo_x86 *c) */ asmlinkage void mce_threshold_interrupt(void) { - int bank; + unsigned int bank, block; struct mce m; + u32 low = 0, high = 0, address = 0; ack_APIC_irq(); exit_idle(); @@ -150,12 +187,39 @@ asmlinkage void mce_threshold_interrupt(void) /* assume first bank caused it */ for (bank = 0; bank < NR_BANKS; ++bank) { - m.bank = MCE_THRESHOLD_BASE + bank; - rdmsrl(MSR_IA32_MC0_MISC + bank * 4, m.misc); + for (block = 0; block < NR_BLOCKS; ++block) { + if (block == 0) + address = MSR_IA32_MC0_MISC + bank * 4; + else if (block == 1) + address = MCG_XBLK_ADDR + + ((low & MASK_BLKPTR_LO) >> 21); + else + ++address; + + if (rdmsr_safe(address, &low, &high)) + continue; - if (m.misc & MASK_OVERFLOW) { - mce_log(&m); - goto out; + if (!(high & MASK_VALID_HI)) { + if (block) + continue; + else + break; + } + + if (!(high & MASK_VALID_HI >> 1) || + (high & MASK_VALID_HI >> 2)) + continue; + + if (high & MASK_OVERFLOW_HI) { + rdmsrl(address, m.misc); + rdmsrl(MSR_IA32_MC0_STATUS + bank * 4, + m.status); + m.bank = K8_MCE_THRESHOLD_BASE + + bank * NR_BLOCKS + + block; + mce_log(&m); + goto out; + } } } out: @@ -168,12 +232,10 @@ asmlinkage void mce_threshold_interrupt(void) struct threshold_attr { struct attribute attr; - ssize_t(*show) (struct threshold_bank *, char *); - ssize_t(*store) (struct threshold_bank *, const char *, size_t count); + ssize_t(*show) (struct threshold_block *, char *); + ssize_t(*store) (struct threshold_block *, const char *, size_t count); }; -static DEFINE_PER_CPU(struct threshold_bank *, threshold_banks[NR_BANKS]); - static cpumask_t affinity_set(unsigned int cpu) { cpumask_t oldmask = current->cpus_allowed; @@ -189,14 +251,14 @@ static void affinity_restore(cpumask_t oldmask) } #define SHOW_FIELDS(name) \ - static ssize_t show_ ## name(struct threshold_bank * b, char *buf) \ + static ssize_t show_ ## name(struct threshold_block * b, char *buf) \ { \ return sprintf(buf, "%lx\n", (unsigned long) b->name); \ } SHOW_FIELDS(interrupt_enable) SHOW_FIELDS(threshold_limit) -static ssize_t store_interrupt_enable(struct threshold_bank *b, +static ssize_t store_interrupt_enable(struct threshold_block *b, const char *buf, size_t count) { char *end; @@ -213,7 +275,7 @@ static ssize_t store_interrupt_enable(struct threshold_bank *b, return end - buf; } -static ssize_t store_threshold_limit(struct threshold_bank *b, +static ssize_t store_threshold_limit(struct threshold_block *b, const char *buf, size_t count) { char *end; @@ -236,18 +298,18 @@ static ssize_t store_threshold_limit(struct threshold_bank *b, return end - buf; } -static ssize_t show_error_count(struct threshold_bank *b, char *buf) +static ssize_t show_error_count(struct threshold_block *b, char *buf) { u32 high, low; cpumask_t oldmask; oldmask = affinity_set(b->cpu); - rdmsr(MSR_IA32_MC0_MISC + b->bank * 4, low, high); /* ignore low 32 */ + rdmsr(b->address, low, high); affinity_restore(oldmask); return sprintf(buf, "%x\n", (high & 0xFFF) - (THRESHOLD_MAX - b->threshold_limit)); } -static ssize_t store_error_count(struct threshold_bank *b, +static ssize_t store_error_count(struct threshold_block *b, const char *buf, size_t count) { cpumask_t oldmask; @@ -278,12 +340,12 @@ static struct attribute *default_attrs[] = { NULL }; -#define to_bank(k) container_of(k,struct threshold_bank,kobj) +#define to_block(k) container_of(k, struct threshold_block, kobj) #define to_attr(a) container_of(a,struct threshold_attr,attr) static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf) { - struct threshold_bank *b = to_bank(kobj); + struct threshold_block *b = to_block(kobj); struct threshold_attr *a = to_attr(attr); ssize_t ret; ret = a->show ? a->show(b, buf) : -EIO; @@ -293,7 +355,7 @@ static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf) static ssize_t store(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) { - struct threshold_bank *b = to_bank(kobj); + struct threshold_block *b = to_block(kobj); struct threshold_attr *a = to_attr(attr); ssize_t ret; ret = a->store ? a->store(b, buf, count) : -EIO; @@ -310,53 +372,164 @@ static struct kobj_type threshold_ktype = { .default_attrs = default_attrs, }; +static __cpuinit int allocate_threshold_blocks(unsigned int cpu, + unsigned int bank, + unsigned int block, + u32 address) +{ + int err; + u32 low, high; + struct threshold_block *b = NULL; + + if ((bank >= NR_BANKS) || (block >= NR_BLOCKS)) + return 0; + + if (rdmsr_safe(address, &low, &high)) + goto recurse; + + if (!(high & MASK_VALID_HI)) { + if (block) + goto recurse; + else + return 0; + } + + if (!(high & MASK_VALID_HI >> 1) || + (high & MASK_VALID_HI >> 2)) + goto recurse; + + b = kzalloc(sizeof(struct threshold_block), GFP_KERNEL); + if (!b) + return -ENOMEM; + memset(b, 0, sizeof(struct threshold_block)); + + b->block = block; + b->bank = bank; + b->cpu = cpu; + b->address = address; + b->interrupt_enable = 0; + b->threshold_limit = THRESHOLD_MAX; + + INIT_LIST_HEAD(&b->miscj); + + if (per_cpu(threshold_banks, cpu)[bank]->blocks) + list_add(&b->miscj, + &per_cpu(threshold_banks, cpu)[bank]->blocks->miscj); + else + per_cpu(threshold_banks, cpu)[bank]->blocks = b; + + kobject_set_name(&b->kobj, "misc%i", block); + b->kobj.parent = &per_cpu(threshold_banks, cpu)[bank]->kobj; + b->kobj.ktype = &threshold_ktype; + err = kobject_register(&b->kobj); + if (err) + goto out_free; +recurse: + if (!block) { + address = (low & MASK_BLKPTR_LO) >> 21; + if (!address) + return 0; + address += MCG_XBLK_ADDR; + } else + ++address; + + err = allocate_threshold_blocks(cpu, bank, ++block, address); + if (err) + goto out_free; + + return err; + +out_free: + if (b) { + kobject_unregister(&b->kobj); + kfree(b); + } + return err; +} + /* symlinks sibling shared banks to first core. first core owns dir/files. */ -static __cpuinit int threshold_create_bank(unsigned int cpu, int bank) +static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank) { - int err = 0; + int i, err = 0; struct threshold_bank *b = NULL; + cpumask_t oldmask = CPU_MASK_NONE; + char name[32]; + + sprintf(name, "threshold_bank%i", bank); #ifdef CONFIG_SMP if (cpu_data[cpu].cpu_core_id && shared_bank[bank]) { /* symlink */ - char name[16]; - unsigned lcpu = first_cpu(cpu_core_map[cpu]); - if (cpu_data[lcpu].cpu_core_id) - goto out; /* first core not up yet */ + i = first_cpu(cpu_core_map[cpu]); + + /* first core not up yet */ + if (cpu_data[i].cpu_core_id) + goto out; + + /* already linked */ + if (per_cpu(threshold_banks, cpu)[bank]) + goto out; + + b = per_cpu(threshold_banks, i)[bank]; - b = per_cpu(threshold_banks, lcpu)[bank]; if (!b) goto out; - sprintf(name, "threshold_bank%i", bank); + err = sysfs_create_link(&per_cpu(device_mce, cpu).kobj, &b->kobj, name); if (err) goto out; + + b->cpus = cpu_core_map[cpu]; per_cpu(threshold_banks, cpu)[bank] = b; goto out; } #endif - b = kmalloc(sizeof(struct threshold_bank), GFP_KERNEL); + b = kzalloc(sizeof(struct threshold_bank), GFP_KERNEL); if (!b) { err = -ENOMEM; goto out; } memset(b, 0, sizeof(struct threshold_bank)); - b->cpu = cpu; - b->bank = bank; - b->interrupt_enable = 0; - b->threshold_limit = THRESHOLD_MAX; kobject_set_name(&b->kobj, "threshold_bank%i", bank); b->kobj.parent = &per_cpu(device_mce, cpu).kobj; - b->kobj.ktype = &threshold_ktype; - +#ifndef CONFIG_SMP + b->cpus = CPU_MASK_ALL; +#else + b->cpus = cpu_core_map[cpu]; +#endif err = kobject_register(&b->kobj); - if (err) { - kfree(b); - goto out; - } + if (err) + goto out_free; + per_cpu(threshold_banks, cpu)[bank] = b; + + oldmask = affinity_set(cpu); + err = allocate_threshold_blocks(cpu, bank, 0, + MSR_IA32_MC0_MISC + bank * 4); + affinity_restore(oldmask); + + if (err) + goto out_free; + + for_each_cpu_mask(i, b->cpus) { + if (i == cpu) + continue; + + err = sysfs_create_link(&per_cpu(device_mce, i).kobj, + &b->kobj, name); + if (err) + goto out; + + per_cpu(threshold_banks, i)[bank] = b; + } + + goto out; + +out_free: + per_cpu(threshold_banks, cpu)[bank] = NULL; + kfree(b); out: return err; } @@ -385,23 +558,64 @@ static __cpuinit int threshold_create_device(unsigned int cpu) * of shared sysfs dir/files, and rest of the cores will be symlinked to it. */ -/* cpu hotplug call removes all symlinks before first core dies */ +static __cpuinit void deallocate_threshold_block(unsigned int cpu, + unsigned int bank) +{ + struct threshold_block *pos = NULL; + struct threshold_block *tmp = NULL; + struct threshold_bank *head = per_cpu(threshold_banks, cpu)[bank]; + + if (!head) + return; + + list_for_each_entry_safe(pos, tmp, &head->blocks->miscj, miscj) { + kobject_unregister(&pos->kobj); + list_del(&pos->miscj); + kfree(pos); + } + + kfree(per_cpu(threshold_banks, cpu)[bank]->blocks); + per_cpu(threshold_banks, cpu)[bank]->blocks = NULL; +} + static __cpuinit void threshold_remove_bank(unsigned int cpu, int bank) { + int i = 0; struct threshold_bank *b; - char name[16]; + char name[32]; b = per_cpu(threshold_banks, cpu)[bank]; + if (!b) return; - if (shared_bank[bank] && atomic_read(&b->kobj.kref.refcount) > 2) { - sprintf(name, "threshold_bank%i", bank); + + if (!b->blocks) + goto free_out; + + sprintf(name, "threshold_bank%i", bank); + + /* sibling symlink */ + if (shared_bank[bank] && b->blocks->cpu != cpu) { sysfs_remove_link(&per_cpu(device_mce, cpu).kobj, name); - per_cpu(threshold_banks, cpu)[bank] = NULL; - } else { - kobject_unregister(&b->kobj); - kfree(per_cpu(threshold_banks, cpu)[bank]); + per_cpu(threshold_banks, i)[bank] = NULL; + return; } + + /* remove all sibling symlinks before unregistering */ + for_each_cpu_mask(i, b->cpus) { + if (i == cpu) + continue; + + sysfs_remove_link(&per_cpu(device_mce, i).kobj, name); + per_cpu(threshold_banks, i)[bank] = NULL; + } + + deallocate_threshold_block(cpu, bank); + +free_out: + kobject_unregister(&b->kobj); + kfree(b); + per_cpu(threshold_banks, cpu)[bank] = NULL; } static __cpuinit void threshold_remove_device(unsigned int cpu) diff --git a/include/asm-x86_64/mce.h b/include/asm-x86_64/mce.h index 08f721e..d13687d 100644 --- a/include/asm-x86_64/mce.h +++ b/include/asm-x86_64/mce.h @@ -67,8 +67,15 @@ struct mce_log { /* Software defined banks */ #define MCE_EXTENDED_BANK 128 #define MCE_THERMAL_BANK MCE_EXTENDED_BANK + 0 -#define MCE_THRESHOLD_BASE MCE_EXTENDED_BANK + 1 /* MCE_AMD */ -#define MCE_THRESHOLD_DRAM_ECC MCE_THRESHOLD_BASE + 4 + +#define K8_MCE_THRESHOLD_BASE (MCE_EXTENDED_BANK + 1) /* MCE_AMD */ +#define K8_MCE_THRESHOLD_BANK_0 (MCE_THRESHOLD_BASE + 0 * 9) +#define K8_MCE_THRESHOLD_BANK_1 (MCE_THRESHOLD_BASE + 1 * 9) +#define K8_MCE_THRESHOLD_BANK_2 (MCE_THRESHOLD_BASE + 2 * 9) +#define K8_MCE_THRESHOLD_BANK_3 (MCE_THRESHOLD_BASE + 3 * 9) +#define K8_MCE_THRESHOLD_BANK_4 (MCE_THRESHOLD_BASE + 4 * 9) +#define K8_MCE_THRESHOLD_BANK_5 (MCE_THRESHOLD_BASE + 5 * 9) +#define K8_MCE_THRESHOLD_DRAM_ECC (MCE_THRESHOLD_BANK_4 + 0) #ifdef __KERNEL__ #include -- cgit v1.1 From 2903ee85ce462d66955b800a0c48e26e51de0aae Mon Sep 17 00:00:00 2001 From: Jacob Shin Date: Mon, 26 Jun 2006 13:58:56 +0200 Subject: [PATCH] x86_64: mce_amd cleanup Clean up mce_amd.c for readability and remove code no longer needed. Signed-off-by: Jacob Shin Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- arch/x86_64/kernel/mce_amd.c | 112 +++++++++++-------------------------------- 1 file changed, 29 insertions(+), 83 deletions(-) diff --git a/arch/x86_64/kernel/mce_amd.c b/arch/x86_64/kernel/mce_amd.c index 10ffbe5..3f967d4 100644 --- a/arch/x86_64/kernel/mce_amd.c +++ b/arch/x86_64/kernel/mce_amd.c @@ -30,17 +30,17 @@ #include #include -#define PFX "mce_threshold: " -#define VERSION "version 1.1.0" -#define NR_BANKS 6 -#define NR_BLOCKS 9 -#define THRESHOLD_MAX 0xFFF -#define INT_TYPE_APIC 0x00020000 -#define MASK_VALID_HI 0x80000000 -#define MASK_LVTOFF_HI 0x00F00000 -#define MASK_COUNT_EN_HI 0x00080000 -#define MASK_INT_TYPE_HI 0x00060000 -#define MASK_OVERFLOW_HI 0x00010000 +#define PFX "mce_threshold: " +#define VERSION "version 1.1.1" +#define NR_BANKS 6 +#define NR_BLOCKS 9 +#define THRESHOLD_MAX 0xFFF +#define INT_TYPE_APIC 0x00020000 +#define MASK_VALID_HI 0x80000000 +#define MASK_LVTOFF_HI 0x00F00000 +#define MASK_COUNT_EN_HI 0x00080000 +#define MASK_INT_TYPE_HI 0x00060000 +#define MASK_OVERFLOW_HI 0x00010000 #define MASK_ERR_COUNT_HI 0x00000FFF #define MASK_BLKPTR_LO 0xFF000000 #define MCG_XBLK_ADDR 0xC0000400 @@ -222,7 +222,7 @@ asmlinkage void mce_threshold_interrupt(void) } } } - out: +out: irq_exit(); } @@ -231,7 +231,7 @@ asmlinkage void mce_threshold_interrupt(void) */ struct threshold_attr { - struct attribute attr; + struct attribute attr; ssize_t(*show) (struct threshold_block *, char *); ssize_t(*store) (struct threshold_block *, const char *, size_t count); }; @@ -250,11 +250,11 @@ static void affinity_restore(cpumask_t oldmask) set_cpus_allowed(current, oldmask); } -#define SHOW_FIELDS(name) \ - static ssize_t show_ ## name(struct threshold_block * b, char *buf) \ - { \ - return sprintf(buf, "%lx\n", (unsigned long) b->name); \ - } +#define SHOW_FIELDS(name) \ +static ssize_t show_ ## name(struct threshold_block * b, char *buf) \ +{ \ + return sprintf(buf, "%lx\n", (unsigned long) b->name); \ +} SHOW_FIELDS(interrupt_enable) SHOW_FIELDS(threshold_limit) @@ -325,13 +325,13 @@ static ssize_t store_error_count(struct threshold_block *b, .store = _store, \ }; -#define ATTR_FIELDS(name) \ - static struct threshold_attr name = \ +#define RW_ATTR(name) \ +static struct threshold_attr name = \ THRESHOLD_ATTR(name, 0644, show_## name, store_## name) -ATTR_FIELDS(interrupt_enable); -ATTR_FIELDS(threshold_limit); -ATTR_FIELDS(error_count); +RW_ATTR(interrupt_enable); +RW_ATTR(threshold_limit); +RW_ATTR(error_count); static struct attribute *default_attrs[] = { &interrupt_enable.attr, @@ -341,7 +341,7 @@ static struct attribute *default_attrs[] = { }; #define to_block(k) container_of(k, struct threshold_block, kobj) -#define to_attr(a) container_of(a,struct threshold_attr,attr) +#define to_attr(a) container_of(a, struct threshold_attr, attr) static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf) { @@ -530,14 +530,14 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank) out_free: per_cpu(threshold_banks, cpu)[bank] = NULL; kfree(b); - out: +out: return err; } /* create dir/files for all valid threshold banks */ static __cpuinit int threshold_create_device(unsigned int cpu) { - int bank; + unsigned int bank; int err = 0; for (bank = 0; bank < NR_BANKS; ++bank) { @@ -547,7 +547,7 @@ static __cpuinit int threshold_create_device(unsigned int cpu) if (err) goto out; } - out: +out: return err; } @@ -620,7 +620,7 @@ free_out: static __cpuinit void threshold_remove_device(unsigned int cpu) { - int bank; + unsigned int bank; for (bank = 0; bank < NR_BANKS; ++bank) { if (!(per_cpu(bank_map, cpu) & 1 << bank)) @@ -629,54 +629,7 @@ static __cpuinit void threshold_remove_device(unsigned int cpu) } } -/* link all existing siblings when first core comes up */ -static __cpuinit int threshold_create_symlinks(unsigned int cpu) -{ - int bank, err = 0; - unsigned int lcpu = 0; - - if (cpu_data[cpu].cpu_core_id) - return 0; - for_each_cpu_mask(lcpu, cpu_core_map[cpu]) { - if (lcpu == cpu) - continue; - for (bank = 0; bank < NR_BANKS; ++bank) { - if (!(per_cpu(bank_map, cpu) & 1 << bank)) - continue; - if (!shared_bank[bank]) - continue; - err = threshold_create_bank(lcpu, bank); - } - } - return err; -} - -/* remove all symlinks before first core dies. */ -static __cpuinit void threshold_remove_symlinks(unsigned int cpu) -{ - int bank; - unsigned int lcpu = 0; - if (cpu_data[cpu].cpu_core_id) - return; - for_each_cpu_mask(lcpu, cpu_core_map[cpu]) { - if (lcpu == cpu) - continue; - for (bank = 0; bank < NR_BANKS; ++bank) { - if (!(per_cpu(bank_map, cpu) & 1 << bank)) - continue; - if (!shared_bank[bank]) - continue; - threshold_remove_bank(lcpu, bank); - } - } -} #else /* !CONFIG_HOTPLUG_CPU */ -static __cpuinit void threshold_create_symlinks(unsigned int cpu) -{ -} -static __cpuinit void threshold_remove_symlinks(unsigned int cpu) -{ -} static void threshold_remove_device(unsigned int cpu) { } @@ -695,13 +648,6 @@ static int threshold_cpu_callback(struct notifier_block *nfb, switch (action) { case CPU_ONLINE: threshold_create_device(cpu); - threshold_create_symlinks(cpu); - break; - case CPU_DOWN_PREPARE: - threshold_remove_symlinks(cpu); - break; - case CPU_DOWN_FAILED: - threshold_create_symlinks(cpu); break; case CPU_DEAD: threshold_remove_device(cpu); @@ -719,7 +665,7 @@ static struct notifier_block threshold_cpu_notifier = { static __init int threshold_init_device(void) { - int lcpu = 0; + unsigned lcpu = 0; /* to hit CPUs online before the notifier is up */ for_each_online_cpu(lcpu) { -- cgit v1.1 From b633237e9c1b91b86c431c5d41266b47408b4642 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Mon, 26 Jun 2006 13:58:59 +0200 Subject: [PATCH] x86_64: Mark mce_amd cpu notifier __cpuinit/__cpuinitdata Cc: Jacob Shin Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- arch/x86_64/kernel/mce_amd.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/x86_64/kernel/mce_amd.c b/arch/x86_64/kernel/mce_amd.c index 3f967d4..335200a 100644 --- a/arch/x86_64/kernel/mce_amd.c +++ b/arch/x86_64/kernel/mce_amd.c @@ -636,7 +636,7 @@ static void threshold_remove_device(unsigned int cpu) #endif /* get notified when a cpu comes on/off */ -static int threshold_cpu_callback(struct notifier_block *nfb, +static int __cpuinit threshold_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { /* cpu was unsigned int to begin with */ @@ -659,7 +659,7 @@ static int threshold_cpu_callback(struct notifier_block *nfb, return NOTIFY_OK; } -static struct notifier_block threshold_cpu_notifier = { +static struct notifier_block threshold_cpu_notifier __cpuinitdata = { .notifier_call = threshold_cpu_callback, }; -- cgit v1.1 From 5f51e139d8b29116779044f26f84cceae34c31a4 Mon Sep 17 00:00:00 2001 From: Jan Beulich Date: Mon, 26 Jun 2006 13:59:02 +0200 Subject: [PATCH] x86_64: miscellaneous mm/init.c fixes - fix an off-by-one error in phys_pmd_init() - prevent phys_pmd_init() from removing mappings established earlier - fix the direct mapping early printk to in fact show the end of the range - remove an apparently orphan comment Signed-off-by: Jan Beulich Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- arch/x86_64/mm/init.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/arch/x86_64/mm/init.c b/arch/x86_64/mm/init.c index 2f5f5b1..02add1d 100644 --- a/arch/x86_64/mm/init.c +++ b/arch/x86_64/mm/init.c @@ -88,8 +88,6 @@ void show_mem(void) printk(KERN_INFO "%lu pages swap cached\n",cached); } -/* References to section boundaries */ - int after_bootmem; static __init void *spp_getpage(void) @@ -259,9 +257,10 @@ phys_pmd_init(pmd_t *pmd, unsigned long address, unsigned long end) for (i = 0; i < PTRS_PER_PMD; pmd++, i++, address += PMD_SIZE) { unsigned long entry; - if (address > end) { - for (; i < PTRS_PER_PMD; i++, pmd++) - set_pmd(pmd, __pmd(0)); + if (address >= end) { + if (!after_bootmem) + for (; i < PTRS_PER_PMD; i++, pmd++) + set_pmd(pmd, __pmd(0)); break; } entry = _PAGE_NX|_PAGE_PSE|_KERNPG_TABLE|_PAGE_GLOBAL|address; @@ -339,7 +338,8 @@ static void __init find_early_table_space(unsigned long end) table_end = table_start; early_printk("kernel direct mapping tables up to %lx @ %lx-%lx\n", - end, table_start << PAGE_SHIFT, table_end << PAGE_SHIFT); + end, table_start << PAGE_SHIFT, + (table_start << PAGE_SHIFT) + tables); } /* Setup the direct mapping of the physical memory at PAGE_OFFSET. -- cgit v1.1 From 8fa3d6fc5e952300c26490167a93bf502de03a99 Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Mon, 26 Jun 2006 13:59:05 +0200 Subject: [PATCH] x86_64: check_addr() cleanups - Use DMA_32BIT_MASK - Use %z for size_t - 80-cols Cc: Andi Kleen Signed-off-by: Andrew Morton Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- arch/x86_64/kernel/pci-nommu.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/arch/x86_64/kernel/pci-nommu.c b/arch/x86_64/kernel/pci-nommu.c index 1f6ecc6..c4c3cc3 100644 --- a/arch/x86_64/kernel/pci-nommu.c +++ b/arch/x86_64/kernel/pci-nommu.c @@ -4,6 +4,8 @@ #include #include #include +#include + #include #include #include @@ -12,10 +14,11 @@ static int check_addr(char *name, struct device *hwdev, dma_addr_t bus, size_t size) { if (hwdev && bus + size > *hwdev->dma_mask) { - if (*hwdev->dma_mask >= 0xffffffffULL) + if (*hwdev->dma_mask >= DMA_32BIT_MASK) printk(KERN_ERR - "nommu_%s: overflow %Lx+%lu of device mask %Lx\n", - name, (long long)bus, size, (long long)*hwdev->dma_mask); + "nommu_%s: overflow %Lx+%zu of device mask %Lx\n", + name, (long long)bus, size, + (long long)*hwdev->dma_mask); return 0; } return 1; -- cgit v1.1 From d9005b52de7bc9b20afa93e06d3e15843bafc12f Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Mon, 26 Jun 2006 13:59:08 +0200 Subject: [PATCH] x86_64: Remove bogus RED-PEN comment in signal.c No red zone possible/needed on the alternative stack. It caused confusion. Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- arch/x86_64/kernel/signal.c | 1 - 1 file changed, 1 deletion(-) diff --git a/arch/x86_64/kernel/signal.c b/arch/x86_64/kernel/signal.c index 94b4b11..2816117 100644 --- a/arch/x86_64/kernel/signal.c +++ b/arch/x86_64/kernel/signal.c @@ -237,7 +237,6 @@ get_stack(struct k_sigaction *ka, struct pt_regs *regs, unsigned long size) rsp = regs->rsp - 128; /* This is the X/Open sanctioned signal stack switching. */ - /* RED-PEN: redzone on that stack? */ if (ka->sa.sa_flags & SA_ONSTACK) { if (sas_ss_flags(rsp) == 0) rsp = current->sas_ss_sp + current->sas_ss_size; -- cgit v1.1 From 495ab9c045e1b0e5c82951b762257fe1c9d81564 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Mon, 26 Jun 2006 13:59:11 +0200 Subject: [PATCH] i386/x86-64/ia64: Move polling flag into thread_info_status During some profiling I noticed that default_idle causes a lot of memory traffic. I think that is caused by the atomic operations to clear/set the polling flag in thread_info. There is actually no reason to make this atomic - only the idle thread does it to itself, other CPUs only read it. So I moved it into ti->status. Converted i386/x86-64/ia64 for now because that was the easiest way to fix ACPI which also manipulates these flags in its idle function. Cc: Nick Piggin Cc: Tony Luck Cc: Len Brown Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- arch/i386/kernel/apm.c | 6 +++--- arch/i386/kernel/process.c | 6 +++--- arch/ia64/kernel/process.c | 4 ++-- arch/x86_64/kernel/process.c | 7 +++---- drivers/acpi/processor_idle.c | 12 ++++++------ include/asm-i386/thread_info.h | 7 ++++--- include/asm-ia64/thread_info.h | 5 +++++ include/asm-x86_64/thread_info.h | 6 ++++-- kernel/sched.c | 9 +++++++-- 9 files changed, 37 insertions(+), 25 deletions(-) diff --git a/arch/i386/kernel/apm.c b/arch/i386/kernel/apm.c index 9e819eb..7c5729d 100644 --- a/arch/i386/kernel/apm.c +++ b/arch/i386/kernel/apm.c @@ -764,9 +764,9 @@ static int apm_do_idle(void) int idled = 0; int polling; - polling = test_thread_flag(TIF_POLLING_NRFLAG); + polling = !!(current_thread_info()->status & TS_POLLING); if (polling) { - clear_thread_flag(TIF_POLLING_NRFLAG); + current_thread_info()->status &= ~TS_POLLING; smp_mb__after_clear_bit(); } if (!need_resched()) { @@ -774,7 +774,7 @@ static int apm_do_idle(void) ret = apm_bios_call_simple(APM_FUNC_IDLE, 0, 0, &eax); } if (polling) - set_thread_flag(TIF_POLLING_NRFLAG); + current_thread_info()->status |= TS_POLLING; if (!idled) return 0; diff --git a/arch/i386/kernel/process.c b/arch/i386/kernel/process.c index 525432e..6946b06 100644 --- a/arch/i386/kernel/process.c +++ b/arch/i386/kernel/process.c @@ -102,7 +102,7 @@ void default_idle(void) local_irq_enable(); if (!hlt_counter && boot_cpu_data.hlt_works_ok) { - clear_thread_flag(TIF_POLLING_NRFLAG); + current_thread_info()->status &= ~TS_POLLING; smp_mb__after_clear_bit(); while (!need_resched()) { local_irq_disable(); @@ -111,7 +111,7 @@ void default_idle(void) else local_irq_enable(); } - set_thread_flag(TIF_POLLING_NRFLAG); + current_thread_info()->status |= TS_POLLING; } else { while (!need_resched()) cpu_relax(); @@ -174,7 +174,7 @@ void cpu_idle(void) { int cpu = smp_processor_id(); - set_thread_flag(TIF_POLLING_NRFLAG); + current_thread_info()->status |= TS_POLLING; /* endless idle loop with no priority at all */ while (1) { diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c index 355d579..b045c27 100644 --- a/arch/ia64/kernel/process.c +++ b/arch/ia64/kernel/process.c @@ -272,9 +272,9 @@ cpu_idle (void) /* endless idle loop with no priority at all */ while (1) { if (can_do_pal_halt) - clear_thread_flag(TIF_POLLING_NRFLAG); + current_thread_info()->status &= ~TS_POLLING; else - set_thread_flag(TIF_POLLING_NRFLAG); + current_thread_info()->status |= TS_POLLING; if (!need_resched()) { void (*idle)(void); diff --git a/arch/x86_64/kernel/process.c b/arch/x86_64/kernel/process.c index d6fa414..b596837 100644 --- a/arch/x86_64/kernel/process.c +++ b/arch/x86_64/kernel/process.c @@ -110,7 +110,7 @@ static void default_idle(void) { local_irq_enable(); - clear_thread_flag(TIF_POLLING_NRFLAG); + current_thread_info()->status &= ~TS_POLLING; smp_mb__after_clear_bit(); while (!need_resched()) { local_irq_disable(); @@ -119,7 +119,7 @@ static void default_idle(void) else local_irq_enable(); } - set_thread_flag(TIF_POLLING_NRFLAG); + current_thread_info()->status |= TS_POLLING; } /* @@ -202,8 +202,7 @@ static inline void play_dead(void) */ void cpu_idle (void) { - set_thread_flag(TIF_POLLING_NRFLAG); - + current_thread_info()->status |= TS_POLLING; /* endless idle loop with no priority at all */ while (1) { while (!need_resched()) { diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index 3b97a5e..74173ce 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c @@ -206,11 +206,11 @@ acpi_processor_power_activate(struct acpi_processor *pr, static void acpi_safe_halt(void) { - clear_thread_flag(TIF_POLLING_NRFLAG); + current_thread_info()->status &= ~TS_POLLING; smp_mb__after_clear_bit(); if (!need_resched()) safe_halt(); - set_thread_flag(TIF_POLLING_NRFLAG); + current_thread_info()->status |= TS_POLLING; } static atomic_t c3_cpu_count; @@ -330,10 +330,10 @@ static void acpi_processor_idle(void) * Invoke the current Cx state to put the processor to sleep. */ if (cx->type == ACPI_STATE_C2 || cx->type == ACPI_STATE_C3) { - clear_thread_flag(TIF_POLLING_NRFLAG); + current_thread_info()->status &= ~TS_POLLING; smp_mb__after_clear_bit(); if (need_resched()) { - set_thread_flag(TIF_POLLING_NRFLAG); + current_thread_info()->status |= TS_POLLING; local_irq_enable(); return; } @@ -371,7 +371,7 @@ static void acpi_processor_idle(void) t2 = inl(acpi_fadt.xpm_tmr_blk.address); /* Re-enable interrupts */ local_irq_enable(); - set_thread_flag(TIF_POLLING_NRFLAG); + current_thread_info()->status |= TS_POLLING; /* Compute time (ticks) that we were actually asleep */ sleep_ticks = ticks_elapsed(t1, t2) - cx->latency_ticks - C2_OVERHEAD; @@ -411,7 +411,7 @@ static void acpi_processor_idle(void) /* Re-enable interrupts */ local_irq_enable(); - set_thread_flag(TIF_POLLING_NRFLAG); + current_thread_info()->status |= TS_POLLING; /* Compute time (ticks) that we were actually asleep */ sleep_ticks = ticks_elapsed(t1, t2) - cx->latency_ticks - C3_OVERHEAD; diff --git a/include/asm-i386/thread_info.h b/include/asm-i386/thread_info.h index 8420ed1..fdbc7f4 100644 --- a/include/asm-i386/thread_info.h +++ b/include/asm-i386/thread_info.h @@ -140,8 +140,7 @@ register unsigned long current_stack_pointer asm("esp") __attribute_used__; #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ #define TIF_SECCOMP 8 /* secure computing */ #define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal() */ -#define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */ -#define TIF_MEMDIE 17 +#define TIF_MEMDIE 16 #define _TIF_SYSCALL_TRACE (1<thread_info->status & TS_POLLING) #endif /* __KERNEL__ */ diff --git a/include/asm-ia64/thread_info.h b/include/asm-ia64/thread_info.h index e5392c4..8bc9869 100644 --- a/include/asm-ia64/thread_info.h +++ b/include/asm-ia64/thread_info.h @@ -27,6 +27,7 @@ struct thread_info { __u32 flags; /* thread_info flags (see TIF_*) */ __u32 cpu; /* current CPU */ __u32 last_cpu; /* Last CPU thread ran on */ + __u32 status; /* Thread synchronous flags */ mm_segment_t addr_limit; /* user-level address space limit */ int preempt_count; /* 0=premptable, <0=BUG; will also serve as bh-counter */ struct restart_block restart_block; @@ -103,4 +104,8 @@ struct thread_info { /* like TIF_ALLWORK_BITS but sans TIF_SYSCALL_TRACE or TIF_SYSCALL_AUDIT */ #define TIF_WORK_MASK (TIF_ALLWORK_MASK&~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT)) +#define TS_POLLING 1 /* true if in idle loop and not sleeping */ + +#define tsk_is_polling(t) ((t)->thread_info->status & TS_POLLING) + #endif /* _ASM_IA64_THREAD_INFO_H */ diff --git a/include/asm-x86_64/thread_info.h b/include/asm-x86_64/thread_info.h index 4ac0e0a..b5e8821 100644 --- a/include/asm-x86_64/thread_info.h +++ b/include/asm-x86_64/thread_info.h @@ -101,7 +101,7 @@ static inline struct thread_info *stack_thread_info(void) #define TIF_IRET 5 /* force IRET */ #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ #define TIF_SECCOMP 8 /* secure computing */ -#define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */ +/* 16 free */ #define TIF_IA32 17 /* 32bit process */ #define TIF_FORK 18 /* ret_from_fork */ #define TIF_ABI_PENDING 19 @@ -115,7 +115,6 @@ static inline struct thread_info *stack_thread_info(void) #define _TIF_IRET (1<thread_info->status & TS_POLLING) #endif /* __KERNEL__ */ diff --git a/kernel/sched.c b/kernel/sched.c index f06d059..7d1027a 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -818,6 +818,11 @@ static void deactivate_task(struct task_struct *p, runqueue_t *rq) * the target CPU. */ #ifdef CONFIG_SMP + +#ifndef tsk_is_polling +#define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG) +#endif + static void resched_task(task_t *p) { int cpu; @@ -833,9 +838,9 @@ static void resched_task(task_t *p) if (cpu == smp_processor_id()) return; - /* NEED_RESCHED must be visible before we test POLLING_NRFLAG */ + /* NEED_RESCHED must be visible before we test polling */ smp_mb(); - if (!test_tsk_thread_flag(p, TIF_POLLING_NRFLAG)) + if (!tsk_is_polling(p)) smp_send_reschedule(cpu); } #else -- cgit v1.1 From e42f94373764d825e2c0464092738f66f5d3effb Mon Sep 17 00:00:00 2001 From: Rohit Seth Date: Mon, 26 Jun 2006 13:59:14 +0200 Subject: [PATCH] x86_64: x86_64 setup.c - printing cmp related boottime information Getting phys_proc_id and cpu_core_id information to be printed at boot time for AMD processors. Also matching the Node related boot time information that gets printed for Intel and AMD processors for NUMA configurations. Signed-off-by: Rohit Seth Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- arch/x86_64/kernel/setup.c | 34 ++++++++++++++++------------------ 1 file changed, 16 insertions(+), 18 deletions(-) diff --git a/arch/x86_64/kernel/setup.c b/arch/x86_64/kernel/setup.c index 923b76f..8ade23d7 100644 --- a/arch/x86_64/kernel/setup.c +++ b/arch/x86_64/kernel/setup.c @@ -833,15 +833,13 @@ static void __init amd_detect_cmp(struct cpuinfo_x86 *c) } numa_set_node(cpu, node); - printk(KERN_INFO "CPU %d/%x(%d) -> Node %d -> Core %d\n", - cpu, apicid, c->x86_max_cores, node, c->cpu_core_id); + printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node); #endif #endif } -static int __init init_amd(struct cpuinfo_x86 *c) +static void __init init_amd(struct cpuinfo_x86 *c) { - int r; unsigned level; #ifdef CONFIG_SMP @@ -874,8 +872,8 @@ static int __init init_amd(struct cpuinfo_x86 *c) if (c->x86 >= 6) set_bit(X86_FEATURE_FXSAVE_LEAK, &c->x86_capability); - r = get_model_name(c); - if (!r) { + level = get_model_name(c); + if (!level) { switch (c->x86) { case 15: /* Should distinguish Models here, but this is only @@ -896,8 +894,6 @@ static int __init init_amd(struct cpuinfo_x86 *c) /* Fix cpuid4 emulation for more */ num_cache_leaves = 3; - - return r; } static void __cpuinit detect_ht(struct cpuinfo_x86 *c) @@ -909,8 +905,10 @@ static void __cpuinit detect_ht(struct cpuinfo_x86 *c) cpuid(1, &eax, &ebx, &ecx, &edx); - if (!cpu_has(c, X86_FEATURE_HT) || cpu_has(c, X86_FEATURE_CMP_LEGACY)) + if (!cpu_has(c, X86_FEATURE_HT)) return; + if (cpu_has(c, X86_FEATURE_CMP_LEGACY)) + goto out; smp_num_siblings = (ebx & 0xff0000) >> 16; @@ -927,9 +925,6 @@ static void __cpuinit detect_ht(struct cpuinfo_x86 *c) index_msb = get_count_order(smp_num_siblings); c->phys_proc_id = phys_pkg_id(index_msb); - printk(KERN_INFO "CPU: Physical Processor ID: %d\n", - c->phys_proc_id); - smp_num_siblings = smp_num_siblings / c->x86_max_cores; index_msb = get_count_order(smp_num_siblings) ; @@ -938,11 +933,13 @@ static void __cpuinit detect_ht(struct cpuinfo_x86 *c) c->cpu_core_id = phys_pkg_id(index_msb) & ((1 << core_bits) - 1); - - if (c->x86_max_cores > 1) - printk(KERN_INFO "CPU: Processor Core ID: %d\n", - c->cpu_core_id); } +out: + if ((c->x86_max_cores * smp_num_siblings) > 1) { + printk(KERN_INFO "CPU: Physical Processor ID: %d\n", c->phys_proc_id); + printk(KERN_INFO "CPU: Processor Core ID: %d\n", c->cpu_core_id); + } + #endif } @@ -969,16 +966,17 @@ static void srat_detect_node(void) #ifdef CONFIG_NUMA unsigned node; int cpu = smp_processor_id(); + int apicid = hard_smp_processor_id(); /* Don't do the funky fallback heuristics the AMD version employs for now. */ - node = apicid_to_node[hard_smp_processor_id()]; + node = apicid_to_node[apicid]; if (node == NUMA_NO_NODE) node = first_node(node_online_map); numa_set_node(cpu, node); if (acpi_numa > 0) - printk(KERN_INFO "CPU %d -> Node %d\n", cpu, node); + printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node); #endif } -- cgit v1.1 From 75bd665cc9f4d0f67164d9145e446ae554d4ed3f Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Mon, 26 Jun 2006 13:59:17 +0200 Subject: [PATCH] x86_64: Fix fast check in safe_smp_processor_id The APIC ID returned by hard_smp_processor_id can be beyond NR_CPUS and then overflow the x86_cpu_to_apic[] array. Add a check for overflow. If it happens then the slow loop below will catch. Bug pointed out by Doug Thompson Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- arch/x86_64/kernel/smp.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/x86_64/kernel/smp.c b/arch/x86_64/kernel/smp.c index fe906ba..5885b8f 100644 --- a/arch/x86_64/kernel/smp.c +++ b/arch/x86_64/kernel/smp.c @@ -520,13 +520,13 @@ asmlinkage void smp_call_function_interrupt(void) int safe_smp_processor_id(void) { - int apicid, i; + unsigned apicid, i; if (disable_apic) return 0; apicid = hard_smp_processor_id(); - if (x86_cpu_to_apicid[apicid] == apicid) + if (apicid < NR_CPUS && x86_cpu_to_apicid[apicid] == apicid) return apicid; for (i = 0; i < NR_CPUS; ++i) { -- cgit v1.1 From da5311258d2afb96fc592c8b11e818facfa46dc3 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Mon, 26 Jun 2006 13:59:20 +0200 Subject: [PATCH] x86_64: Fix race in cpu_local_* on preemptible kernels When a process changes CPUs while doing the non atomic cpu_local_* operations it might operate on the local_t of a different CPUs. Fix that by disabling preemption. Pointed out by Christopher Lameter Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- include/asm-i386/local.h | 26 ++++++++++++++++++++------ include/asm-x86_64/local.h | 26 ++++++++++++++++++++------ 2 files changed, 40 insertions(+), 12 deletions(-) diff --git a/include/asm-i386/local.h b/include/asm-i386/local.h index e67fa08..3b4998c 100644 --- a/include/asm-i386/local.h +++ b/include/asm-i386/local.h @@ -55,12 +55,26 @@ static __inline__ void local_sub(long i, local_t *v) * much more efficient than these naive implementations. Note they take * a variable, not an address. */ -#define cpu_local_read(v) local_read(&__get_cpu_var(v)) -#define cpu_local_set(v, i) local_set(&__get_cpu_var(v), (i)) -#define cpu_local_inc(v) local_inc(&__get_cpu_var(v)) -#define cpu_local_dec(v) local_dec(&__get_cpu_var(v)) -#define cpu_local_add(i, v) local_add((i), &__get_cpu_var(v)) -#define cpu_local_sub(i, v) local_sub((i), &__get_cpu_var(v)) + +/* Need to disable preemption for the cpu local counters otherwise we could + still access a variable of a previous CPU in a non atomic way. */ +#define cpu_local_wrap_v(v) \ + ({ local_t res__; \ + preempt_disable(); \ + res__ = (v); \ + preempt_enable(); \ + res__; }) +#define cpu_local_wrap(v) \ + ({ preempt_disable(); \ + v; \ + preempt_enable(); }) \ + +#define cpu_local_read(v) cpu_local_wrap_v(local_read(&__get_cpu_var(v))) +#define cpu_local_set(v, i) cpu_local_wrap(local_set(&__get_cpu_var(v), (i))) +#define cpu_local_inc(v) cpu_local_wrap(local_inc(&__get_cpu_var(v))) +#define cpu_local_dec(v) cpu_local_wrap(local_dec(&__get_cpu_var(v))) +#define cpu_local_add(i, v) cpu_local_wrap(local_add((i), &__get_cpu_var(v))) +#define cpu_local_sub(i, v) cpu_local_wrap(local_sub((i), &__get_cpu_var(v))) #define __cpu_local_inc(v) cpu_local_inc(v) #define __cpu_local_dec(v) cpu_local_dec(v) diff --git a/include/asm-x86_64/local.h b/include/asm-x86_64/local.h index cd17945..e769e62 100644 --- a/include/asm-x86_64/local.h +++ b/include/asm-x86_64/local.h @@ -59,12 +59,26 @@ static inline void local_sub(long i, local_t *v) * This could be done better if we moved the per cpu data directly * after GS. */ -#define cpu_local_read(v) local_read(&__get_cpu_var(v)) -#define cpu_local_set(v, i) local_set(&__get_cpu_var(v), (i)) -#define cpu_local_inc(v) local_inc(&__get_cpu_var(v)) -#define cpu_local_dec(v) local_dec(&__get_cpu_var(v)) -#define cpu_local_add(i, v) local_add((i), &__get_cpu_var(v)) -#define cpu_local_sub(i, v) local_sub((i), &__get_cpu_var(v)) + +/* Need to disable preemption for the cpu local counters otherwise we could + still access a variable of a previous CPU in a non atomic way. */ +#define cpu_local_wrap_v(v) \ + ({ local_t res__; \ + preempt_disable(); \ + res__ = (v); \ + preempt_enable(); \ + res__; }) +#define cpu_local_wrap(v) \ + ({ preempt_disable(); \ + v; \ + preempt_enable(); }) \ + +#define cpu_local_read(v) cpu_local_wrap_v(local_read(&__get_cpu_var(v))) +#define cpu_local_set(v, i) cpu_local_wrap(local_set(&__get_cpu_var(v), (i))) +#define cpu_local_inc(v) cpu_local_wrap(local_inc(&__get_cpu_var(v))) +#define cpu_local_dec(v) cpu_local_wrap(local_dec(&__get_cpu_var(v))) +#define cpu_local_add(i, v) cpu_local_wrap(local_add((i), &__get_cpu_var(v))) +#define cpu_local_sub(i, v) cpu_local_wrap(local_sub((i), &__get_cpu_var(v))) #define __cpu_local_inc(v) cpu_local_inc(v) #define __cpu_local_dec(v) cpu_local_dec(v) -- cgit v1.1 From bdbdaa791fb5b97ba5b3124c7593ffe308e2afef Mon Sep 17 00:00:00 2001 From: Jan Beulich Date: Mon, 26 Jun 2006 13:59:23 +0200 Subject: [PATCH] i386/x86-64: adjust /proc/interrupts column headings With (significantly) more than 10 CPUs online, the column headings drifted off the positions of the column contents with growing CPU numbers. Signed-off-by: Jan Beulich Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- arch/i386/kernel/irq.c | 2 +- arch/x86_64/kernel/irq.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/i386/kernel/irq.c b/arch/i386/kernel/irq.c index 49ce4c3..061533e 100644 --- a/arch/i386/kernel/irq.c +++ b/arch/i386/kernel/irq.c @@ -227,7 +227,7 @@ int show_interrupts(struct seq_file *p, void *v) if (i == 0) { seq_printf(p, " "); for_each_online_cpu(j) - seq_printf(p, "CPU%d ",j); + seq_printf(p, "CPU%-8d",j); seq_putc(p, '\n'); } diff --git a/arch/x86_64/kernel/irq.c b/arch/x86_64/kernel/irq.c index d8bd0b3..207ecdc 100644 --- a/arch/x86_64/kernel/irq.c +++ b/arch/x86_64/kernel/irq.c @@ -39,7 +39,7 @@ int show_interrupts(struct seq_file *p, void *v) if (i == 0) { seq_printf(p, " "); for_each_online_cpu(j) - seq_printf(p, "CPU%d ",j); + seq_printf(p, "CPU%-8d",j); seq_putc(p, '\n'); } -- cgit v1.1 From cab093b9d4b40c71c6261a11ba8f1ca027e09008 Mon Sep 17 00:00:00 2001 From: Jan Beulich Date: Mon, 26 Jun 2006 13:59:26 +0200 Subject: [PATCH] x86_64: adjust kstack_depth_to_print default Defaulting to a value not evenly divisible by four makes little sense, as four values are displayed per line (and hence the rest of the line would otherwise be wasted). Signed-off-by: Jan Beulich Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- arch/x86_64/kernel/traps.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86_64/kernel/traps.c b/arch/x86_64/kernel/traps.c index bd0891f..08f2435 100644 --- a/arch/x86_64/kernel/traps.c +++ b/arch/x86_64/kernel/traps.c @@ -106,7 +106,7 @@ static inline void preempt_conditional_cli(struct pt_regs *regs) preempt_enable_no_resched(); } -static int kstack_depth_to_print = 10; +static int kstack_depth_to_print = 12; static int call_trace = 1; #ifdef CONFIG_KALLSYMS -- cgit v1.1 From 899ced0dd9457b9c349663ca4cb4ec09167728a6 Mon Sep 17 00:00:00 2001 From: Andreas Mohr Date: Mon, 26 Jun 2006 13:59:29 +0200 Subject: [PATCH] x86_64: Add cpu_relax to apic_wait_icr_idle This one is adding a cpu_relax() that already existed in the i386 version. Signed-off-by: Andreas Mohr Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- include/asm-x86_64/apic.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/include/asm-x86_64/apic.h b/include/asm-x86_64/apic.h index 9d43ac8..9c96a0a 100644 --- a/include/asm-x86_64/apic.h +++ b/include/asm-x86_64/apic.h @@ -49,7 +49,8 @@ static __inline unsigned int apic_read(unsigned long reg) static __inline__ void apic_wait_icr_idle(void) { - while ( apic_read( APIC_ICR ) & APIC_ICR_BUSY ); + while (apic_read( APIC_ICR ) & APIC_ICR_BUSY) + cpu_relax(); } static inline void ack_APIC_irq(void) -- cgit v1.1 From 409f89e0efcfea1a8b25f9c4a050c0cf1f1cb87c Mon Sep 17 00:00:00 2001 From: Sergey Vlasov Date: Mon, 26 Jun 2006 13:59:32 +0200 Subject: [PATCH] x86_64: Do not use -ffunction-sections for modules Currently CONFIG_REORDER uses -ffunction-sections for all code; however, creating a separate section for each function is not useful for modules and just adds bloat. Moving this option from CFLAGS to CFLAGS_KERNEL shrinks module object files (e.g., the module tree for a kernel built with most drivers as modules shrinked from 54M to 46M), and decreases the number of sysfs files in /sys/module/*/sections/ directories. Signed-off-by: Sergey Vlasov Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- arch/x86_64/Makefile | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/arch/x86_64/Makefile b/arch/x86_64/Makefile index e573e2a..431bb4b 100644 --- a/arch/x86_64/Makefile +++ b/arch/x86_64/Makefile @@ -27,6 +27,7 @@ LDFLAGS_vmlinux := CHECKFLAGS += -D__x86_64__ -m64 cflags-y := +cflags-kernel-y := cflags-$(CONFIG_MK8) += $(call cc-option,-march=k8) cflags-$(CONFIG_MPSC) += $(call cc-option,-march=nocona) cflags-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mtune=generic) @@ -35,7 +36,7 @@ cflags-y += -m64 cflags-y += -mno-red-zone cflags-y += -mcmodel=kernel cflags-y += -pipe -cflags-$(CONFIG_REORDER) += -ffunction-sections +cflags-kernel-$(CONFIG_REORDER) += -ffunction-sections # this makes reading assembly source easier, but produces worse code # actually it makes the kernel smaller too. cflags-y += -fno-reorder-blocks @@ -55,6 +56,7 @@ cflags-y += $(call cc-option,-funit-at-a-time) cflags-y += $(call cc-option,-mno-sse -mno-mmx -mno-sse2 -mno-3dnow,) CFLAGS += $(cflags-y) +CFLAGS_KERNEL += $(cflags-kernel-y) AFLAGS += -m64 head-y := arch/x86_64/kernel/head.o arch/x86_64/kernel/head64.o arch/x86_64/kernel/init_task.o -- cgit v1.1 From d39159c27fe1375bb7773d364213b1047265022c Mon Sep 17 00:00:00 2001 From: Adrian Bunk Date: Mon, 26 Jun 2006 13:59:35 +0200 Subject: [PATCH] x86_64: remove sys32_ni_syscall() This patch removes the no longer used sys32_ni_syscall() Signed-off-by: Adrian Bunk Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- arch/x86_64/ia32/ia32entry.S | 4 ---- arch/x86_64/ia32/sys_ia32.c | 13 ------------- 2 files changed, 17 deletions(-) diff --git a/arch/x86_64/ia32/ia32entry.S b/arch/x86_64/ia32/ia32entry.S index 222777b..c536fa9 100644 --- a/arch/x86_64/ia32/ia32entry.S +++ b/arch/x86_64/ia32/ia32entry.S @@ -323,10 +323,6 @@ ia32_badsys: movq $-ENOSYS,RAX-ARGOFFSET(%rsp) jmp int_ret_from_sys_call -ni_syscall: - movq %rax,%rdi - jmp sys32_ni_syscall - quiet_ni_syscall: movq $-ENOSYS,%rax ret diff --git a/arch/x86_64/ia32/sys_ia32.c b/arch/x86_64/ia32/sys_ia32.c index a1adaa4..dc88154 100644 --- a/arch/x86_64/ia32/sys_ia32.c +++ b/arch/x86_64/ia32/sys_ia32.c @@ -508,19 +508,6 @@ sys32_waitpid(compat_pid_t pid, unsigned int *stat_addr, int options) return compat_sys_wait4(pid, stat_addr, options, NULL); } -int sys32_ni_syscall(int call) -{ - struct task_struct *me = current; - static char lastcomm[sizeof(me->comm)]; - - if (strncmp(lastcomm, me->comm, sizeof(lastcomm))) { - compat_printk(KERN_INFO "IA32 syscall %d from %s not implemented\n", - call, me->comm); - strncpy(lastcomm, me->comm, sizeof(lastcomm)); - } - return -ENOSYS; -} - /* 32-bit timeval and related flotsam. */ asmlinkage long -- cgit v1.1 From 9c63f8738734eb7e6d3f76ca03186f16ef88edf5 Mon Sep 17 00:00:00 2001 From: Piotr Kaczuba Date: Mon, 26 Jun 2006 13:59:38 +0200 Subject: [PATCH] x86_64: Fix modular pc speaker It turned out that the following change is needed when the speaker is compiled as a module. Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- arch/x86_64/kernel/setup.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86_64/kernel/setup.c b/arch/x86_64/kernel/setup.c index 8ade23d7..0d2d4f6 100644 --- a/arch/x86_64/kernel/setup.c +++ b/arch/x86_64/kernel/setup.c @@ -1361,7 +1361,7 @@ struct seq_operations cpuinfo_op = { .show = show_cpuinfo, }; -#ifdef CONFIG_INPUT_PCSPKR +#if defined(CONFIG_INPUT_PCSPKR) || defined(CONFIG_INPUT_PCSPKR_MODULE) #include static __init int add_pcspkr(void) { -- cgit v1.1 From 45486f81c9aa07218b73a38cbcf62ffa66e99088 Mon Sep 17 00:00:00 2001 From: Keith Owens Date: Mon, 26 Jun 2006 13:59:41 +0200 Subject: [PATCH] x86_64: Standardize i386/x86_64 handling of NMI_VECTOR x86_64 and i386 behave inconsistently when sending an IPI on vector 2 (NMI_VECTOR). Make both behave the same, so IPI 2 is sent as NMI. The crash code was abusing send_IPI_allbutself() by passing a code instead of a vector, it only worked because crash knew about the internal code of send_IPI_allbutself(). Change crash to use NMI_VECTOR instead, and remove the comment about how crash was abusing the function. This patch is a pre-requisite for fixing the problem where sending an IPI as NMI would reboot some Dell Xeon systems. I cannot fix that problem while crash continus to abuse send_IPI_allbutself(). It also removes the inconsistency between i386 and x86_64 for NMI_VECTOR. That will simplify all the RAS code that needs to bring all the cpus to a clean stop, even when one or more cpus are spinning disabled. Signed-off-by: Keith Owens Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- arch/i386/kernel/crash.c | 7 +------ arch/i386/kernel/smp.c | 12 +++++++++++- arch/x86_64/kernel/crash.c | 2 +- include/asm-i386/hw_irq.h | 2 ++ 4 files changed, 15 insertions(+), 8 deletions(-) diff --git a/arch/i386/kernel/crash.c b/arch/i386/kernel/crash.c index 21dc1bb..0c88d3e 100644 --- a/arch/i386/kernel/crash.c +++ b/arch/i386/kernel/crash.c @@ -120,14 +120,9 @@ static int crash_nmi_callback(struct pt_regs *regs, int cpu) return 1; } -/* - * By using the NMI code instead of a vector we just sneak thru the - * word generator coming out with just what we want. AND it does - * not matter if clustered_apic_mode is set or not. - */ static void smp_send_nmi_allbutself(void) { - send_IPI_allbutself(APIC_DM_NMI); + send_IPI_allbutself(NMI_VECTOR); } static void nmi_shootdown_cpus(void) diff --git a/arch/i386/kernel/smp.c b/arch/i386/kernel/smp.c index d134e96..c10789d 100644 --- a/arch/i386/kernel/smp.c +++ b/arch/i386/kernel/smp.c @@ -114,7 +114,17 @@ DEFINE_PER_CPU(struct tlb_state, cpu_tlbstate) ____cacheline_aligned = { &init_m static inline int __prepare_ICR (unsigned int shortcut, int vector) { - return APIC_DM_FIXED | shortcut | vector | APIC_DEST_LOGICAL; + unsigned int icr = shortcut | APIC_DEST_LOGICAL; + + switch (vector) { + default: + icr |= APIC_DM_FIXED | vector; + break; + case NMI_VECTOR: + icr |= APIC_DM_NMI; + break; + } + return icr; } static inline int __prepare_ICR2 (unsigned int mask) diff --git a/arch/x86_64/kernel/crash.c b/arch/x86_64/kernel/crash.c index ec1c743..8ca0491 100644 --- a/arch/x86_64/kernel/crash.c +++ b/arch/x86_64/kernel/crash.c @@ -118,7 +118,7 @@ static int crash_nmi_callback(struct pt_regs *regs, int cpu) static void smp_send_nmi_allbutself(void) { - send_IPI_allbutself(APIC_DM_NMI); + send_IPI_allbutself(NMI_VECTOR); } /* diff --git a/include/asm-i386/hw_irq.h b/include/asm-i386/hw_irq.h index 95d3fd0..a4c0a5a 100644 --- a/include/asm-i386/hw_irq.h +++ b/include/asm-i386/hw_irq.h @@ -19,6 +19,8 @@ struct hw_interrupt_type; +#define NMI_VECTOR 0x02 + /* * Various low-level irq details needed by irq.c, process.c, * time.c, io_apic.c and smp.c -- cgit v1.1 From 2ee60e17896c65da1df5780d3196c050bccb7d10 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Mon, 26 Jun 2006 13:59:44 +0200 Subject: [PATCH] x86_64: Move export symbols to their C functions Only exports for assembler files are left in x8664_ksyms.c Originally inspired by a patch from Al Viro Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- arch/x86_64/kernel/e820.c | 1 + arch/x86_64/kernel/process.c | 1 + arch/x86_64/kernel/reboot.c | 1 + arch/x86_64/kernel/setup.c | 2 + arch/x86_64/kernel/setup64.c | 2 + arch/x86_64/kernel/smp.c | 4 ++ arch/x86_64/kernel/smpboot.c | 5 ++ arch/x86_64/kernel/time.c | 2 + arch/x86_64/kernel/traps.c | 2 + arch/x86_64/kernel/x8664_ksyms.c | 114 ++------------------------------------- arch/x86_64/lib/csum-partial.c | 1 + arch/x86_64/lib/csum-wrappers.c | 1 + arch/x86_64/lib/delay.c | 5 ++ arch/x86_64/lib/memmove.c | 2 + arch/x86_64/lib/usercopy.c | 11 +++- arch/x86_64/mm/ioremap.c | 5 ++ 16 files changed, 49 insertions(+), 110 deletions(-) diff --git a/arch/x86_64/kernel/e820.c b/arch/x86_64/kernel/e820.c index d8ea250..9e94d83 100644 --- a/arch/x86_64/kernel/e820.c +++ b/arch/x86_64/kernel/e820.c @@ -620,6 +620,7 @@ void __init parse_memmapopt(char *p, char **from) } unsigned long pci_mem_start = 0xaeedbabe; +EXPORT_SYMBOL(pci_mem_start); /* * Search for the biggest gap in the low 32 bits of the e820 diff --git a/arch/x86_64/kernel/process.c b/arch/x86_64/kernel/process.c index b596837..ca56e19 100644 --- a/arch/x86_64/kernel/process.c +++ b/arch/x86_64/kernel/process.c @@ -63,6 +63,7 @@ EXPORT_SYMBOL(boot_option_idle_override); * Powermanagement idle function, if any.. */ void (*pm_idle)(void); +EXPORT_SYMBOL(pm_idle); static DEFINE_PER_CPU(unsigned int, cpu_idle_state); static ATOMIC_NOTIFIER_HEAD(idle_notifier); diff --git a/arch/x86_64/kernel/reboot.c b/arch/x86_64/kernel/reboot.c index 57117b8..2d67698 100644 --- a/arch/x86_64/kernel/reboot.c +++ b/arch/x86_64/kernel/reboot.c @@ -20,6 +20,7 @@ * Power off function, if any */ void (*pm_power_off)(void); +EXPORT_SYMBOL(pm_power_off); static long no_idt[3]; static enum { diff --git a/arch/x86_64/kernel/setup.c b/arch/x86_64/kernel/setup.c index 0d2d4f6..2a5fce0 100644 --- a/arch/x86_64/kernel/setup.c +++ b/arch/x86_64/kernel/setup.c @@ -71,6 +71,7 @@ */ struct cpuinfo_x86 boot_cpu_data __read_mostly; +EXPORT_SYMBOL(boot_cpu_data); unsigned long mmu_cr4_features; @@ -99,6 +100,7 @@ char dmi_alloc_data[DMI_MAX_DATA]; * Setup options */ struct screen_info screen_info; +EXPORT_SYMBOL(screen_info); struct sys_desc_table_struct { unsigned short length; unsigned char table[0]; diff --git a/arch/x86_64/kernel/setup64.c b/arch/x86_64/kernel/setup64.c index e5bf22a..f5934cb 100644 --- a/arch/x86_64/kernel/setup64.c +++ b/arch/x86_64/kernel/setup64.c @@ -30,6 +30,7 @@ char x86_boot_params[BOOT_PARAM_SIZE] __initdata = {0,}; cpumask_t cpu_initialized __cpuinitdata = CPU_MASK_NONE; struct x8664_pda *_cpu_pda[NR_CPUS] __read_mostly; +EXPORT_SYMBOL(_cpu_pda); struct x8664_pda boot_cpu_pda[NR_CPUS] __cacheline_aligned; struct desc_ptr idt_descr = { 256 * 16 - 1, (unsigned long) idt_table }; @@ -37,6 +38,7 @@ struct desc_ptr idt_descr = { 256 * 16 - 1, (unsigned long) idt_table }; char boot_cpu_stack[IRQSTACKSIZE] __attribute__((section(".bss.page_aligned"))); unsigned long __supported_pte_mask __read_mostly = ~0UL; +EXPORT_SYMBOL(__supported_pte_mask); static int do_not_nx __cpuinitdata = 0; /* noexec=on|off diff --git a/arch/x86_64/kernel/smp.c b/arch/x86_64/kernel/smp.c index 5885b8f..8188bae 100644 --- a/arch/x86_64/kernel/smp.c +++ b/arch/x86_64/kernel/smp.c @@ -224,6 +224,7 @@ void flush_tlb_current_task(void) flush_tlb_others(cpu_mask, mm, FLUSH_ALL); preempt_enable(); } +EXPORT_SYMBOL(flush_tlb_current_task); void flush_tlb_mm (struct mm_struct * mm) { @@ -244,6 +245,7 @@ void flush_tlb_mm (struct mm_struct * mm) preempt_enable(); } +EXPORT_SYMBOL(flush_tlb_mm); void flush_tlb_page(struct vm_area_struct * vma, unsigned long va) { @@ -266,6 +268,7 @@ void flush_tlb_page(struct vm_area_struct * vma, unsigned long va) preempt_enable(); } +EXPORT_SYMBOL(flush_tlb_page); static void do_flush_tlb_all(void* info) { @@ -443,6 +446,7 @@ int smp_call_function (void (*func) (void *info), void *info, int nonatomic, spin_unlock(&call_lock); return 0; } +EXPORT_SYMBOL(smp_call_function); void smp_stop_cpu(void) { diff --git a/arch/x86_64/kernel/smpboot.c b/arch/x86_64/kernel/smpboot.c index b1c10b1..4e97551 100644 --- a/arch/x86_64/kernel/smpboot.c +++ b/arch/x86_64/kernel/smpboot.c @@ -63,9 +63,11 @@ /* Number of siblings per CPU package */ int smp_num_siblings = 1; +EXPORT_SYMBOL(smp_num_siblings); /* Last level cache ID of each logical CPU */ u8 cpu_llc_id[NR_CPUS] __cpuinitdata = {[0 ... NR_CPUS-1] = BAD_APICID}; +EXPORT_SYMBOL(cpu_llc_id); /* Bitmask of currently online CPUs */ cpumask_t cpu_online_map __read_mostly; @@ -78,18 +80,21 @@ EXPORT_SYMBOL(cpu_online_map); */ cpumask_t cpu_callin_map; cpumask_t cpu_callout_map; +EXPORT_SYMBOL(cpu_callout_map); cpumask_t cpu_possible_map; EXPORT_SYMBOL(cpu_possible_map); /* Per CPU bogomips and other parameters */ struct cpuinfo_x86 cpu_data[NR_CPUS] __cacheline_aligned; +EXPORT_SYMBOL(cpu_data); /* Set when the idlers are all forked */ int smp_threads_ready; /* representing HT siblings of each logical CPU */ cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly; +EXPORT_SYMBOL(cpu_sibling_map); /* representing HT and core siblings of each logical CPU */ cpumask_t cpu_core_map[NR_CPUS] __read_mostly; diff --git a/arch/x86_64/kernel/time.c b/arch/x86_64/kernel/time.c index 2125d6c..ebbee6f 100644 --- a/arch/x86_64/kernel/time.c +++ b/arch/x86_64/kernel/time.c @@ -51,6 +51,7 @@ extern int using_apic_timer; static char *time_init_gtod(void); DEFINE_SPINLOCK(rtc_lock); +EXPORT_SYMBOL(rtc_lock); DEFINE_SPINLOCK(i8253_lock); int nohpet __initdata = 0; @@ -64,6 +65,7 @@ static int notsc __initdata = 0; #define US_SCALE 32 /* 2^32, arbitralrily chosen */ unsigned int cpu_khz; /* TSC clocks / usec, not used here */ +EXPORT_SYMBOL(cpu_khz); static unsigned long hpet_period; /* fsecs / HPET clock */ unsigned long hpet_tick; /* HPET clocks / interrupt */ int hpet_use_timer; /* Use counter of hpet for time keeping, otherwise PIT */ diff --git a/arch/x86_64/kernel/traps.c b/arch/x86_64/kernel/traps.c index 08f2435..3d11a2f 100644 --- a/arch/x86_64/kernel/traps.c +++ b/arch/x86_64/kernel/traps.c @@ -70,6 +70,7 @@ asmlinkage void machine_check(void); asmlinkage void spurious_interrupt_bug(void); ATOMIC_NOTIFIER_HEAD(die_chain); +EXPORT_SYMBOL(die_chain); int register_die_notifier(struct notifier_block *nb) { @@ -431,6 +432,7 @@ void out_of_line_bug(void) { BUG(); } +EXPORT_SYMBOL(out_of_line_bug); #endif static DEFINE_SPINLOCK(die_lock); diff --git a/arch/x86_64/kernel/x8664_ksyms.c b/arch/x86_64/kernel/x8664_ksyms.c index 1def21c..370952c 100644 --- a/arch/x86_64/kernel/x8664_ksyms.c +++ b/arch/x86_64/kernel/x8664_ksyms.c @@ -1,66 +1,21 @@ +/* Exports for assembly files. + All C exports should go in the respective C files. */ + #include #include #include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include #include #include -#include #include -#include -#include -#include -#include -#include -#include #include -#include -#include -#include -#include -#include -#include - -extern spinlock_t rtc_lock; -#ifdef CONFIG_SMP -extern void __write_lock_failed(rwlock_t *rw); -extern void __read_lock_failed(rwlock_t *rw); -#endif - -/* platform dependent support */ -EXPORT_SYMBOL(boot_cpu_data); -//EXPORT_SYMBOL(dump_fpu); -EXPORT_SYMBOL(__ioremap); -EXPORT_SYMBOL(ioremap_nocache); -EXPORT_SYMBOL(iounmap); EXPORT_SYMBOL(kernel_thread); -EXPORT_SYMBOL(pm_idle); -EXPORT_SYMBOL(pm_power_off); EXPORT_SYMBOL(__down_failed); EXPORT_SYMBOL(__down_failed_interruptible); EXPORT_SYMBOL(__down_failed_trylock); EXPORT_SYMBOL(__up_wakeup); -/* Networking helper routines. */ -EXPORT_SYMBOL(csum_partial_copy_nocheck); -EXPORT_SYMBOL(ip_compute_csum); -/* Delay loops */ -EXPORT_SYMBOL(__udelay); -EXPORT_SYMBOL(__ndelay); -EXPORT_SYMBOL(__delay); -EXPORT_SYMBOL(__const_udelay); EXPORT_SYMBOL(__get_user_1); EXPORT_SYMBOL(__get_user_2); @@ -71,42 +26,20 @@ EXPORT_SYMBOL(__put_user_2); EXPORT_SYMBOL(__put_user_4); EXPORT_SYMBOL(__put_user_8); -EXPORT_SYMBOL(strncpy_from_user); -EXPORT_SYMBOL(__strncpy_from_user); -EXPORT_SYMBOL(clear_user); -EXPORT_SYMBOL(__clear_user); EXPORT_SYMBOL(copy_user_generic); EXPORT_SYMBOL(copy_from_user); EXPORT_SYMBOL(copy_to_user); -EXPORT_SYMBOL(copy_in_user); -EXPORT_SYMBOL(strnlen_user); - -#ifdef CONFIG_PCI -EXPORT_SYMBOL(pci_mem_start); -#endif EXPORT_SYMBOL(copy_page); EXPORT_SYMBOL(clear_page); -EXPORT_SYMBOL(_cpu_pda); #ifdef CONFIG_SMP -EXPORT_SYMBOL(cpu_data); +extern void FASTCALL( __write_lock_failed(rwlock_t *rw)); +extern void FASTCALL( __read_lock_failed(rwlock_t *rw)); EXPORT_SYMBOL(__write_lock_failed); EXPORT_SYMBOL(__read_lock_failed); - -EXPORT_SYMBOL(smp_call_function); -EXPORT_SYMBOL(cpu_callout_map); -#endif - -#ifdef CONFIG_VT -EXPORT_SYMBOL(screen_info); #endif -EXPORT_SYMBOL(rtc_lock); - -EXPORT_SYMBOL_GPL(set_nmi_callback); -EXPORT_SYMBOL_GPL(unset_nmi_callback); - /* Export string functions. We normally rely on gcc builtin for most of these, but gcc sometimes decides not to inline them. */ #undef memcpy @@ -114,51 +47,14 @@ EXPORT_SYMBOL_GPL(unset_nmi_callback); #undef memmove extern void * memset(void *,int,__kernel_size_t); -extern size_t strlen(const char *); -extern void * memmove(void * dest,const void *src,size_t count); extern void * memcpy(void *,const void *,__kernel_size_t); extern void * __memcpy(void *,const void *,__kernel_size_t); EXPORT_SYMBOL(memset); -EXPORT_SYMBOL(memmove); EXPORT_SYMBOL(memcpy); EXPORT_SYMBOL(__memcpy); -#ifdef CONFIG_RWSEM_XCHGADD_ALGORITHM -/* prototypes are wrong, these are assembly with custom calling functions */ -extern void rwsem_down_read_failed_thunk(void); -extern void rwsem_wake_thunk(void); -extern void rwsem_downgrade_thunk(void); -extern void rwsem_down_write_failed_thunk(void); -EXPORT_SYMBOL(rwsem_down_read_failed_thunk); -EXPORT_SYMBOL(rwsem_wake_thunk); -EXPORT_SYMBOL(rwsem_downgrade_thunk); -EXPORT_SYMBOL(rwsem_down_write_failed_thunk); -#endif - EXPORT_SYMBOL(empty_zero_page); - -EXPORT_SYMBOL(die_chain); - -#ifdef CONFIG_SMP -EXPORT_SYMBOL(cpu_sibling_map); -EXPORT_SYMBOL(smp_num_siblings); -#endif - -#ifdef CONFIG_BUG -EXPORT_SYMBOL(out_of_line_bug); -#endif - EXPORT_SYMBOL(init_level4_pgt); - -extern unsigned long __supported_pte_mask; -EXPORT_SYMBOL(__supported_pte_mask); - -#ifdef CONFIG_SMP -EXPORT_SYMBOL(flush_tlb_page); -#endif - -EXPORT_SYMBOL(cpu_khz); - EXPORT_SYMBOL(load_gs_index); diff --git a/arch/x86_64/lib/csum-partial.c b/arch/x86_64/lib/csum-partial.c index 5384e22..c493735 100644 --- a/arch/x86_64/lib/csum-partial.c +++ b/arch/x86_64/lib/csum-partial.c @@ -147,4 +147,5 @@ unsigned short ip_compute_csum(unsigned char * buff, int len) { return csum_fold(csum_partial(buff,len,0)); } +EXPORT_SYMBOL(ip_compute_csum); diff --git a/arch/x86_64/lib/csum-wrappers.c b/arch/x86_64/lib/csum-wrappers.c index 94323f2..b1320ec 100644 --- a/arch/x86_64/lib/csum-wrappers.c +++ b/arch/x86_64/lib/csum-wrappers.c @@ -109,6 +109,7 @@ csum_partial_copy_nocheck(const unsigned char *src, unsigned char *dst, int len, { return csum_partial_copy_generic(src,dst,len,sum,NULL,NULL); } +EXPORT_SYMBOL(csum_partial_copy_nocheck); unsigned short csum_ipv6_magic(struct in6_addr *saddr, struct in6_addr *daddr, __u32 len, unsigned short proto, unsigned int sum) diff --git a/arch/x86_64/lib/delay.c b/arch/x86_64/lib/delay.c index 03c460c..b6cd3cc 100644 --- a/arch/x86_64/lib/delay.c +++ b/arch/x86_64/lib/delay.c @@ -9,6 +9,7 @@ */ #include +#include #include #include #include @@ -36,18 +37,22 @@ void __delay(unsigned long loops) } while((now-bclock) < loops); } +EXPORT_SYMBOL(__delay); inline void __const_udelay(unsigned long xloops) { __delay((xloops * HZ * cpu_data[raw_smp_processor_id()].loops_per_jiffy) >> 32); } +EXPORT_SYMBOL(__const_udelay); void __udelay(unsigned long usecs) { __const_udelay(usecs * 0x000010c6); /* 2**32 / 1000000 */ } +EXPORT_SYMBOL(__udelay); void __ndelay(unsigned long nsecs) { __const_udelay(nsecs * 0x00005); /* 2**32 / 1000000000 (rounded up) */ } +EXPORT_SYMBOL(__ndelay); diff --git a/arch/x86_64/lib/memmove.c b/arch/x86_64/lib/memmove.c index e93d525..517eb1bd 100644 --- a/arch/x86_64/lib/memmove.c +++ b/arch/x86_64/lib/memmove.c @@ -3,6 +3,7 @@ */ #define _STRING_C #include +#include #undef memmove void *memmove(void * dest,const void *src,size_t count) @@ -17,3 +18,4 @@ void *memmove(void * dest,const void *src,size_t count) } return dest; } +EXPORT_SYMBOL(memmove); diff --git a/arch/x86_64/lib/usercopy.c b/arch/x86_64/lib/usercopy.c index 9bc2c29..a82bfac 100644 --- a/arch/x86_64/lib/usercopy.c +++ b/arch/x86_64/lib/usercopy.c @@ -5,6 +5,7 @@ * Copyright 1997 Linus Torvalds * Copyright 2002 Andi Kleen */ +#include #include /* @@ -47,6 +48,7 @@ __strncpy_from_user(char *dst, const char __user *src, long count) __do_strncpy_from_user(dst, src, count, res); return res; } +EXPORT_SYMBOL(__strncpy_from_user); long strncpy_from_user(char *dst, const char __user *src, long count) @@ -56,6 +58,7 @@ strncpy_from_user(char *dst, const char __user *src, long count) __do_strncpy_from_user(dst, src, count, res); return res; } +EXPORT_SYMBOL(strncpy_from_user); /* * Zero Userspace @@ -94,7 +97,7 @@ unsigned long __clear_user(void __user *addr, unsigned long size) [zero] "r" (0UL), [eight] "r" (8UL)); return size; } - +EXPORT_SYMBOL(__clear_user); unsigned long clear_user(void __user *to, unsigned long n) { @@ -102,6 +105,7 @@ unsigned long clear_user(void __user *to, unsigned long n) return __clear_user(to, n); return n; } +EXPORT_SYMBOL(clear_user); /* * Return the size of a string (including the ending 0) @@ -125,6 +129,7 @@ long __strnlen_user(const char __user *s, long n) s++; } } +EXPORT_SYMBOL(__strnlen_user); long strnlen_user(const char __user *s, long n) { @@ -132,6 +137,7 @@ long strnlen_user(const char __user *s, long n) return 0; return __strnlen_user(s, n); } +EXPORT_SYMBOL(strnlen_user); long strlen_user(const char __user *s) { @@ -147,6 +153,7 @@ long strlen_user(const char __user *s) s++; } } +EXPORT_SYMBOL(strlen_user); unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len) { @@ -155,3 +162,5 @@ unsigned long copy_in_user(void __user *to, const void __user *from, unsigned le } return len; } +EXPORT_SYMBOL(copy_in_user); + diff --git a/arch/x86_64/mm/ioremap.c b/arch/x86_64/mm/ioremap.c index ae20706..45d7d82 100644 --- a/arch/x86_64/mm/ioremap.c +++ b/arch/x86_64/mm/ioremap.c @@ -11,6 +11,7 @@ #include #include #include +#include #include #include #include @@ -219,6 +220,7 @@ void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned l } return (__force void __iomem *) (offset + (char *)addr); } +EXPORT_SYMBOL(__ioremap); /** * ioremap_nocache - map bus memory into CPU space @@ -246,6 +248,7 @@ void __iomem *ioremap_nocache (unsigned long phys_addr, unsigned long size) { return __ioremap(phys_addr, size, _PAGE_PCD); } +EXPORT_SYMBOL(ioremap_nocache); /** * iounmap - Free a IO remapping @@ -291,3 +294,5 @@ void iounmap(volatile void __iomem *addr) BUG_ON(p != o || o == NULL); kfree(p); } +EXPORT_SYMBOL(iounmap); + -- cgit v1.1 From 6bfa9bb5195c68f75bd3937e4d77cb0bfe852d0d Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Mon, 26 Jun 2006 13:59:47 +0200 Subject: [PATCH] x86_64: Minor string functions optimizations - Use tail call from clear_user to __clear_user to save some code size - Use standard memcpy for forward memmove Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- arch/x86_64/lib/memmove.c | 2 +- arch/x86_64/lib/usercopy.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/x86_64/lib/memmove.c b/arch/x86_64/lib/memmove.c index 517eb1bd..751ebae 100644 --- a/arch/x86_64/lib/memmove.c +++ b/arch/x86_64/lib/memmove.c @@ -9,7 +9,7 @@ void *memmove(void * dest,const void *src,size_t count) { if (dest < src) { - __inline_memcpy(dest,src,count); + return memcpy(dest,src,count); } else { char *p = (char *) dest + count; char *s = (char *) src + count; diff --git a/arch/x86_64/lib/usercopy.c b/arch/x86_64/lib/usercopy.c index a82bfac..893d43f 100644 --- a/arch/x86_64/lib/usercopy.c +++ b/arch/x86_64/lib/usercopy.c @@ -55,7 +55,7 @@ strncpy_from_user(char *dst, const char __user *src, long count) { long res = -EFAULT; if (access_ok(VERIFY_READ, src, 1)) - __do_strncpy_from_user(dst, src, count, res); + return __strncpy_from_user(dst, src, count); return res; } EXPORT_SYMBOL(strncpy_from_user); -- cgit v1.1 From 03fdc2c277afdbd217eccd1ce0cfeff77ebd8b77 Mon Sep 17 00:00:00 2001 From: Chuck Ebbert <76306.1226@compuserve.com> Date: Mon, 26 Jun 2006 13:59:50 +0200 Subject: [PATCH] x86_64: enlarge window for stack growth Allow stack growth so the 'enter' instruction works. Also fixes problem in compat_sys_kexec_load() which could allocate more than 128 bytes using compat_alloc_user_space(). Signed-off-by: Chuck Ebbert <76306.1226@compuserve.com> Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- arch/x86_64/mm/fault.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/arch/x86_64/mm/fault.c b/arch/x86_64/mm/fault.c index 6472889..4968a71 100644 --- a/arch/x86_64/mm/fault.c +++ b/arch/x86_64/mm/fault.c @@ -410,8 +410,10 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, if (!(vma->vm_flags & VM_GROWSDOWN)) goto bad_area; if (error_code & 4) { - // XXX: align red zone size with ABI - if (address + 128 < regs->rsp) + /* Allow userspace just enough access below the stack pointer + * to let the 'enter' instruction work. + */ + if (address + 65536 + 32 * sizeof(unsigned long) < regs->rsp) goto bad_area; } if (expand_stack(vma, address)) -- cgit v1.1 From 704fc59e1d056de80beaf30174bc8e0b1682efbb Mon Sep 17 00:00:00 2001 From: "Siddha, Suresh B" Date: Mon, 26 Jun 2006 13:59:53 +0200 Subject: [PATCH] x86_64: fix apic error on bootup Appended patch fixes the "APIC error on CPUX: 00(40)" observed during bootup. From SDM Vol-3A "Valid Interrupt Vectors" section: "When an illegal vector value (0-15) is written to an LVT entry and the delivery mode is Fixed, the APIC may signal an illegal vector error, with out regard to whether the mask bit is set or whether an interrupt is actually seen on input." Signed-off-by: Suresh Siddha Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- arch/i386/kernel/apic.c | 15 +++++++++++++-- arch/x86_64/kernel/apic.c | 15 +++++++++++++-- 2 files changed, 26 insertions(+), 4 deletions(-) diff --git a/arch/i386/kernel/apic.c b/arch/i386/kernel/apic.c index 5f197e1..7ce0949 100644 --- a/arch/i386/kernel/apic.c +++ b/arch/i386/kernel/apic.c @@ -157,7 +157,7 @@ void clear_local_APIC(void) maxlvt = get_maxlvt(); /* - * Masking an LVT entry on a P6 can trigger a local APIC error + * Masking an LVT entry can trigger a local APIC error * if the vector is zero. Mask LVTERR first to prevent this. */ if (maxlvt >= 3) { @@ -1118,7 +1118,18 @@ void disable_APIC_timer(void) unsigned long v; v = apic_read(APIC_LVTT); - apic_write_around(APIC_LVTT, v | APIC_LVT_MASKED); + /* + * When an illegal vector value (0-15) is written to an LVT + * entry and delivery mode is Fixed, the APIC may signal an + * illegal vector error, with out regard to whether the mask + * bit is set or whether an interrupt is actually seen on input. + * + * Boot sequence might call this function when the LVTT has + * '0' vector value. So make sure vector field is set to + * valid value. + */ + v |= (APIC_LVT_MASKED | LOCAL_TIMER_VECTOR); + apic_write_around(APIC_LVTT, v); } } diff --git a/arch/x86_64/kernel/apic.c b/arch/x86_64/kernel/apic.c index 396e125..b2ead91 100644 --- a/arch/x86_64/kernel/apic.c +++ b/arch/x86_64/kernel/apic.c @@ -100,7 +100,7 @@ void clear_local_APIC(void) maxlvt = get_maxlvt(); /* - * Masking an LVT entry on a P6 can trigger a local APIC error + * Masking an LVT entry can trigger a local APIC error * if the vector is zero. Mask LVTERR first to prevent this. */ if (maxlvt >= 3) { @@ -851,7 +851,18 @@ void disable_APIC_timer(void) unsigned long v; v = apic_read(APIC_LVTT); - apic_write(APIC_LVTT, v | APIC_LVT_MASKED); + /* + * When an illegal vector value (0-15) is written to an LVT + * entry and delivery mode is Fixed, the APIC may signal an + * illegal vector error, with out regard to whether the mask + * bit is set or whether an interrupt is actually seen on input. + * + * Boot sequence might call this function when the LVTT has + * '0' vector value. So make sure vector field is set to + * valid value. + */ + v |= (APIC_LVT_MASKED | LOCAL_TIMER_VECTOR); + apic_write(APIC_LVTT, v); } } -- cgit v1.1 From e77deacb7b078156fcadf27b838a4ce1a65eda04 Mon Sep 17 00:00:00 2001 From: Keith Owens Date: Mon, 26 Jun 2006 13:59:56 +0200 Subject: [PATCH] x86_64: Avoid broadcasting NMI IPIs On some i386/x86_64 systems, sending an NMI IPI as a broadcast will reset the system. This seems to be a BIOS bug which affects machines where one or more cpus are not under OS control. It occurs on HT systems with a version of the OS that is not compiled without HT support. It also occurs when a system is booted with max_cpus=n where 2 <= n < cpus known to the BIOS. The fix is to always send NMI IPI as a mask instead of as a broadcast. Signed-off-by: Keith Owens Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- arch/x86_64/kernel/genapic_flat.c | 25 ++++++++++++++++--------- include/asm-i386/mach-default/mach_ipi.h | 7 +++++-- 2 files changed, 21 insertions(+), 11 deletions(-) diff --git a/arch/x86_64/kernel/genapic_flat.c b/arch/x86_64/kernel/genapic_flat.c index c66ca7b..21c7066 100644 --- a/arch/x86_64/kernel/genapic_flat.c +++ b/arch/x86_64/kernel/genapic_flat.c @@ -78,22 +78,29 @@ static void flat_send_IPI_mask(cpumask_t cpumask, int vector) static void flat_send_IPI_allbutself(int vector) { -#ifndef CONFIG_HOTPLUG_CPU - if (((num_online_cpus()) - 1) >= 1) - __send_IPI_shortcut(APIC_DEST_ALLBUT, vector,APIC_DEST_LOGICAL); +#ifdef CONFIG_HOTPLUG_CPU + int hotplug = 1; #else - cpumask_t allbutme = cpu_online_map; + int hotplug = 0; +#endif + if (hotplug || vector == NMI_VECTOR) { + cpumask_t allbutme = cpu_online_map; - cpu_clear(smp_processor_id(), allbutme); + cpu_clear(smp_processor_id(), allbutme); - if (!cpus_empty(allbutme)) - flat_send_IPI_mask(allbutme, vector); -#endif + if (!cpus_empty(allbutme)) + flat_send_IPI_mask(allbutme, vector); + } else if (num_online_cpus() > 1) { + __send_IPI_shortcut(APIC_DEST_ALLBUT, vector,APIC_DEST_LOGICAL); + } } static void flat_send_IPI_all(int vector) { - __send_IPI_shortcut(APIC_DEST_ALLINC, vector, APIC_DEST_LOGICAL); + if (vector == NMI_VECTOR) + flat_send_IPI_mask(cpu_online_map, vector); + else + __send_IPI_shortcut(APIC_DEST_ALLINC, vector, APIC_DEST_LOGICAL); } static int flat_apic_id_registered(void) diff --git a/include/asm-i386/mach-default/mach_ipi.h b/include/asm-i386/mach-default/mach_ipi.h index a1d0072..0dba244 100644 --- a/include/asm-i386/mach-default/mach_ipi.h +++ b/include/asm-i386/mach-default/mach_ipi.h @@ -1,6 +1,9 @@ #ifndef __ASM_MACH_IPI_H #define __ASM_MACH_IPI_H +/* Avoid include hell */ +#define NMI_VECTOR 0x02 + void send_IPI_mask_bitmask(cpumask_t mask, int vector); void __send_IPI_shortcut(unsigned int shortcut, int vector); @@ -13,7 +16,7 @@ static inline void send_IPI_mask(cpumask_t mask, int vector) static inline void __local_send_IPI_allbutself(int vector) { - if (no_broadcast) { + if (no_broadcast || vector == NMI_VECTOR) { cpumask_t mask = cpu_online_map; cpu_clear(smp_processor_id(), mask); @@ -24,7 +27,7 @@ static inline void __local_send_IPI_allbutself(int vector) static inline void __local_send_IPI_all(int vector) { - if (no_broadcast) + if (no_broadcast || vector == NMI_VECTOR) send_IPI_mask(cpu_online_map, vector); else __send_IPI_shortcut(APIC_DEST_ALLINC, vector); -- cgit v1.1 From 0080e667550db5ae8c9318181500c413b99ff164 Mon Sep 17 00:00:00 2001 From: Venkatesh Pallipadi Date: Mon, 26 Jun 2006 13:59:59 +0200 Subject: [PATCH] x86_64: i386/x86-64 Add nmi watchdog support for new Intel CPUs Intel now has support for Architectural Performance Monitoring Counters ( Refer to IA-32 Intel Architecture Software Developer's Manual http://www.intel.com/design/pentium4/manuals/253669.htm ). This feature is present starting from Intel Core Duo and Intel Core Solo processors. What this means is, the performance monitoring counters and some performance monitoring events are now defined in an architectural way (using cpuid). And there will be no need to check for family/model etc for these architectural events. Below is the patch to use this performance counters in nmi watchdog driver. Patch handles both i386 and x86-64 kernels. Signed-off-by: Venkatesh Pallipadi Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- arch/i386/kernel/cpu/intel.c | 6 +++ arch/i386/kernel/nmi.c | 65 +++++++++++++++++++++++++- arch/x86_64/kernel/nmi.c | 81 +++++++++++++++++++++++++++++++-- arch/x86_64/kernel/setup.c | 7 +++ include/asm-i386/cpufeature.h | 1 + include/asm-i386/intel_arch_perfmon.h | 19 ++++++++ include/asm-x86_64/cpufeature.h | 2 +- include/asm-x86_64/intel_arch_perfmon.h | 19 ++++++++ 8 files changed, 193 insertions(+), 7 deletions(-) create mode 100644 include/asm-i386/intel_arch_perfmon.h create mode 100644 include/asm-x86_64/intel_arch_perfmon.h diff --git a/arch/i386/kernel/cpu/intel.c b/arch/i386/kernel/cpu/intel.c index 5386b29..10afc64 100644 --- a/arch/i386/kernel/cpu/intel.c +++ b/arch/i386/kernel/cpu/intel.c @@ -122,6 +122,12 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c) select_idle_routine(c); l2 = init_intel_cacheinfo(c); + if (c->cpuid_level > 9 ) { + unsigned eax = cpuid_eax(10); + /* Check for version and the number of counters */ + if ((eax & 0xff) && (((eax>>8) & 0xff) > 1)) + set_bit(X86_FEATURE_ARCH_PERFMON, c->x86_capability); + } /* SEP CPUID bug: Pentium Pro reports SEP but doesn't have it until model 3 mask 3 */ if ((c->x86<<8 | c->x86_model<<4 | c->x86_mask) < 0x633) diff --git a/arch/i386/kernel/nmi.c b/arch/i386/kernel/nmi.c index bd38754..a76e931 100644 --- a/arch/i386/kernel/nmi.c +++ b/arch/i386/kernel/nmi.c @@ -24,6 +24,7 @@ #include #include +#include #include "mach_traps.h" @@ -95,6 +96,9 @@ int nmi_active; (P4_CCCR_OVF_PMI0|P4_CCCR_THRESHOLD(15)|P4_CCCR_COMPLEMENT| \ P4_CCCR_COMPARE|P4_CCCR_REQUIRED|P4_CCCR_ESCR_SELECT(4)|P4_CCCR_ENABLE) +#define ARCH_PERFMON_NMI_EVENT_SEL ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL +#define ARCH_PERFMON_NMI_EVENT_UMASK ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK + #ifdef CONFIG_SMP /* The performance counters used by NMI_LOCAL_APIC don't trigger when * the CPU is idle. To make sure the NMI watchdog really ticks on all @@ -207,6 +211,8 @@ static int __init setup_nmi_watchdog(char *str) __setup("nmi_watchdog=", setup_nmi_watchdog); +static void disable_intel_arch_watchdog(void); + static void disable_lapic_nmi_watchdog(void) { if (nmi_active <= 0) @@ -216,6 +222,10 @@ static void disable_lapic_nmi_watchdog(void) wrmsr(MSR_K7_EVNTSEL0, 0, 0); break; case X86_VENDOR_INTEL: + if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) { + disable_intel_arch_watchdog(); + break; + } switch (boot_cpu_data.x86) { case 6: if (boot_cpu_data.x86_model > 0xd) @@ -444,6 +454,53 @@ static int setup_p4_watchdog(void) return 1; } +static void disable_intel_arch_watchdog(void) +{ + unsigned ebx; + + /* + * Check whether the Architectural PerfMon supports + * Unhalted Core Cycles Event or not. + * NOTE: Corresponding bit = 0 in ebp indicates event present. + */ + ebx = cpuid_ebx(10); + if (!(ebx & ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT)) + wrmsr(MSR_ARCH_PERFMON_EVENTSEL0, 0, 0); +} + +static int setup_intel_arch_watchdog(void) +{ + unsigned int evntsel; + unsigned ebx; + + /* + * Check whether the Architectural PerfMon supports + * Unhalted Core Cycles Event or not. + * NOTE: Corresponding bit = 0 in ebp indicates event present. + */ + ebx = cpuid_ebx(10); + if ((ebx & ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT)) + return 0; + + nmi_perfctr_msr = MSR_ARCH_PERFMON_PERFCTR0; + + clear_msr_range(MSR_ARCH_PERFMON_EVENTSEL0, 2); + clear_msr_range(MSR_ARCH_PERFMON_PERFCTR0, 2); + + evntsel = ARCH_PERFMON_EVENTSEL_INT + | ARCH_PERFMON_EVENTSEL_OS + | ARCH_PERFMON_EVENTSEL_USR + | ARCH_PERFMON_NMI_EVENT_SEL + | ARCH_PERFMON_NMI_EVENT_UMASK; + + wrmsr(MSR_ARCH_PERFMON_EVENTSEL0, evntsel, 0); + write_watchdog_counter("INTEL_ARCH_PERFCTR0"); + apic_write(APIC_LVTPC, APIC_DM_NMI); + evntsel |= ARCH_PERFMON_EVENTSEL0_ENABLE; + wrmsr(MSR_ARCH_PERFMON_EVENTSEL0, evntsel, 0); + return 1; +} + void setup_apic_nmi_watchdog (void) { switch (boot_cpu_data.x86_vendor) { @@ -453,6 +510,11 @@ void setup_apic_nmi_watchdog (void) setup_k7_watchdog(); break; case X86_VENDOR_INTEL: + if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) { + if (!setup_intel_arch_watchdog()) + return; + break; + } switch (boot_cpu_data.x86) { case 6: if (boot_cpu_data.x86_model > 0xd) @@ -556,7 +618,8 @@ void nmi_watchdog_tick (struct pt_regs * regs) wrmsr(MSR_P4_IQ_CCCR0, nmi_p4_cccr_val, 0); apic_write(APIC_LVTPC, APIC_DM_NMI); } - else if (nmi_perfctr_msr == MSR_P6_PERFCTR0) { + else if (nmi_perfctr_msr == MSR_P6_PERFCTR0 || + nmi_perfctr_msr == MSR_ARCH_PERFMON_PERFCTR0) { /* Only P6 based Pentium M need to re-unmask * the apic vector but it doesn't hurt * other P6 variant */ diff --git a/arch/x86_64/kernel/nmi.c b/arch/x86_64/kernel/nmi.c index ab421e2..399489c 100644 --- a/arch/x86_64/kernel/nmi.c +++ b/arch/x86_64/kernel/nmi.c @@ -27,6 +27,7 @@ #include #include #include +#include /* * lapic_nmi_owner tracks the ownership of the lapic NMI hardware: @@ -66,6 +67,9 @@ static unsigned int nmi_p4_cccr_val; #define K7_EVENT_CYCLES_PROCESSOR_IS_RUNNING 0x76 #define K7_NMI_EVENT K7_EVENT_CYCLES_PROCESSOR_IS_RUNNING +#define ARCH_PERFMON_NMI_EVENT_SEL ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL +#define ARCH_PERFMON_NMI_EVENT_UMASK ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK + #define MSR_P4_MISC_ENABLE 0x1A0 #define MSR_P4_MISC_ENABLE_PERF_AVAIL (1<<7) #define MSR_P4_MISC_ENABLE_PEBS_UNAVAIL (1<<12) @@ -97,7 +101,10 @@ static __cpuinit inline int nmi_known_cpu(void) case X86_VENDOR_AMD: return boot_cpu_data.x86 == 15; case X86_VENDOR_INTEL: - return boot_cpu_data.x86 == 15; + if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) + return 1; + else + return (boot_cpu_data.x86 == 15); } return 0; } @@ -203,6 +210,8 @@ int __init setup_nmi_watchdog(char *str) __setup("nmi_watchdog=", setup_nmi_watchdog); +static void disable_intel_arch_watchdog(void); + static void disable_lapic_nmi_watchdog(void) { if (nmi_active <= 0) @@ -215,6 +224,8 @@ static void disable_lapic_nmi_watchdog(void) if (boot_cpu_data.x86 == 15) { wrmsr(MSR_P4_IQ_CCCR0, 0, 0); wrmsr(MSR_P4_CRU_ESCR0, 0, 0); + } else if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) { + disable_intel_arch_watchdog(); } break; } @@ -367,6 +378,53 @@ static void setup_k7_watchdog(void) wrmsr(MSR_K7_EVNTSEL0, evntsel, 0); } +static void disable_intel_arch_watchdog(void) +{ + unsigned ebx; + + /* + * Check whether the Architectural PerfMon supports + * Unhalted Core Cycles Event or not. + * NOTE: Corresponding bit = 0 in ebp indicates event present. + */ + ebx = cpuid_ebx(10); + if (!(ebx & ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT)) + wrmsr(MSR_ARCH_PERFMON_EVENTSEL0, 0, 0); +} + +static int setup_intel_arch_watchdog(void) +{ + unsigned int evntsel; + unsigned ebx; + + /* + * Check whether the Architectural PerfMon supports + * Unhalted Core Cycles Event or not. + * NOTE: Corresponding bit = 0 in ebp indicates event present. + */ + ebx = cpuid_ebx(10); + if ((ebx & ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT)) + return 0; + + nmi_perfctr_msr = MSR_ARCH_PERFMON_PERFCTR0; + + clear_msr_range(MSR_ARCH_PERFMON_EVENTSEL0, 2); + clear_msr_range(MSR_ARCH_PERFMON_PERFCTR0, 2); + + evntsel = ARCH_PERFMON_EVENTSEL_INT + | ARCH_PERFMON_EVENTSEL_OS + | ARCH_PERFMON_EVENTSEL_USR + | ARCH_PERFMON_NMI_EVENT_SEL + | ARCH_PERFMON_NMI_EVENT_UMASK; + + wrmsr(MSR_ARCH_PERFMON_EVENTSEL0, evntsel, 0); + wrmsrl(MSR_ARCH_PERFMON_PERFCTR0, -((u64)cpu_khz * 1000 / nmi_hz)); + apic_write(APIC_LVTPC, APIC_DM_NMI); + evntsel |= ARCH_PERFMON_EVENTSEL0_ENABLE; + wrmsr(MSR_ARCH_PERFMON_EVENTSEL0, evntsel, 0); + return 1; +} + static int setup_p4_watchdog(void) { @@ -420,10 +478,16 @@ void setup_apic_nmi_watchdog(void) setup_k7_watchdog(); break; case X86_VENDOR_INTEL: - if (boot_cpu_data.x86 != 15) - return; - if (!setup_p4_watchdog()) + if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) { + if (!setup_intel_arch_watchdog()) + return; + } else if (boot_cpu_data.x86 == 15) { + if (!setup_p4_watchdog()) + return; + } else { return; + } + break; default: @@ -508,7 +572,14 @@ void __kprobes nmi_watchdog_tick(struct pt_regs * regs, unsigned reason) */ wrmsr(MSR_P4_IQ_CCCR0, nmi_p4_cccr_val, 0); apic_write(APIC_LVTPC, APIC_DM_NMI); - } + } else if (nmi_perfctr_msr == MSR_ARCH_PERFMON_PERFCTR0) { + /* + * For Intel based architectural perfmon + * - LVTPC is masked on interrupt and must be + * unmasked by the LVTPC handler. + */ + apic_write(APIC_LVTPC, APIC_DM_NMI); + } wrmsrl(nmi_perfctr_msr, -((u64)cpu_khz * 1000 / nmi_hz)); } } diff --git a/arch/x86_64/kernel/setup.c b/arch/x86_64/kernel/setup.c index 2a5fce0..594642a 100644 --- a/arch/x86_64/kernel/setup.c +++ b/arch/x86_64/kernel/setup.c @@ -988,6 +988,13 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c) unsigned n; init_intel_cacheinfo(c); + if (c->cpuid_level > 9 ) { + unsigned eax = cpuid_eax(10); + /* Check for version and the number of counters */ + if ((eax & 0xff) && (((eax>>8) & 0xff) > 1)) + set_bit(X86_FEATURE_ARCH_PERFMON, &c->x86_capability); + } + n = c->extended_cpuid_level; if (n >= 0x80000008) { unsigned eax = cpuid_eax(0x80000008); diff --git a/include/asm-i386/cpufeature.h b/include/asm-i386/cpufeature.h index 3ecedba..d314ebb 100644 --- a/include/asm-i386/cpufeature.h +++ b/include/asm-i386/cpufeature.h @@ -72,6 +72,7 @@ #define X86_FEATURE_CONSTANT_TSC (3*32+ 8) /* TSC ticks at a constant rate */ #define X86_FEATURE_UP (3*32+ 9) /* smp kernel running on up */ #define X86_FEATURE_FXSAVE_LEAK (3*32+10) /* FXSAVE leaks FOP/FIP/FOP */ +#define X86_FEATURE_ARCH_PERFMON (3*32+11) /* Intel Architectural PerfMon */ /* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */ #define X86_FEATURE_XMM3 (4*32+ 0) /* Streaming SIMD Extensions-3 */ diff --git a/include/asm-i386/intel_arch_perfmon.h b/include/asm-i386/intel_arch_perfmon.h new file mode 100644 index 0000000..134ea9c --- /dev/null +++ b/include/asm-i386/intel_arch_perfmon.h @@ -0,0 +1,19 @@ +#ifndef X86_INTEL_ARCH_PERFMON_H +#define X86_INTEL_ARCH_PERFMON_H 1 + +#define MSR_ARCH_PERFMON_PERFCTR0 0xc1 +#define MSR_ARCH_PERFMON_PERFCTR1 0xc2 + +#define MSR_ARCH_PERFMON_EVENTSEL0 0x186 +#define MSR_ARCH_PERFMON_EVENTSEL1 0x187 + +#define ARCH_PERFMON_EVENTSEL0_ENABLE (1 << 22) +#define ARCH_PERFMON_EVENTSEL_INT (1 << 20) +#define ARCH_PERFMON_EVENTSEL_OS (1 << 17) +#define ARCH_PERFMON_EVENTSEL_USR (1 << 16) + +#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL (0x3c) +#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8) +#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT (1 << 0) + +#endif /* X86_INTEL_ARCH_PERFMON_H */ diff --git a/include/asm-x86_64/cpufeature.h b/include/asm-x86_64/cpufeature.h index fbf5a19..ee792fa 100644 --- a/include/asm-x86_64/cpufeature.h +++ b/include/asm-x86_64/cpufeature.h @@ -67,7 +67,7 @@ #define X86_FEATURE_SYNC_RDTSC (3*32+6) /* RDTSC syncs CPU core */ #define X86_FEATURE_FXSAVE_LEAK (3*32+7) /* FIP/FOP/FDP leaks through FXSAVE */ #define X86_FEATURE_UP (3*32+8) /* SMP kernel running on UP */ - +#define X86_FEATURE_ARCH_PERFMON (3*32+9) /* Intel Architectural PerfMon */ /* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */ #define X86_FEATURE_XMM3 (4*32+ 0) /* Streaming SIMD Extensions-3 */ diff --git a/include/asm-x86_64/intel_arch_perfmon.h b/include/asm-x86_64/intel_arch_perfmon.h new file mode 100644 index 0000000..59c3964 --- /dev/null +++ b/include/asm-x86_64/intel_arch_perfmon.h @@ -0,0 +1,19 @@ +#ifndef X86_64_INTEL_ARCH_PERFMON_H +#define X86_64_INTEL_ARCH_PERFMON_H 1 + +#define MSR_ARCH_PERFMON_PERFCTR0 0xc1 +#define MSR_ARCH_PERFMON_PERFCTR1 0xc2 + +#define MSR_ARCH_PERFMON_EVENTSEL0 0x186 +#define MSR_ARCH_PERFMON_EVENTSEL1 0x187 + +#define ARCH_PERFMON_EVENTSEL0_ENABLE (1 << 22) +#define ARCH_PERFMON_EVENTSEL_INT (1 << 20) +#define ARCH_PERFMON_EVENTSEL_OS (1 << 17) +#define ARCH_PERFMON_EVENTSEL_USR (1 << 16) + +#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL (0x3c) +#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8) +#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT (1 << 0) + +#endif /* X86_64_INTEL_ARCH_PERFMON_H */ -- cgit v1.1 From a4cffb6444c327677f901323ecf1a13d6bd2df3f Mon Sep 17 00:00:00 2001 From: Christian Kujau Date: Mon, 26 Jun 2006 14:00:02 +0200 Subject: [PATCH] x86_64: msi_apic.c build fix CC drivers/pci/msi-apic.o In file included from include/asm/msi.h:11, from drivers/pci/msi.h:71, from drivers/pci/msi-apic.c:8: include/asm/smp.h:103: error: syntax error before '->' token akpm: nasty. It appears to be static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) conflicting with include/asm-x86_64/mach_apic.h's #define cpu_mask_to_apicid (genapic->cpu_mask_to_apicid) And I don't know which patch in rc4-mm1 triggered this. Fixing this in the .c file seems wrong. Including asm/smp.h instead of linux/smp.h seems wrong too. Need that .config. Cc: Andi Kleen Signed-off-by: Andrew Morton Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- drivers/pci/msi-apic.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/pci/msi-apic.c b/drivers/pci/msi-apic.c index 0eb5fe9..5ed798b3 100644 --- a/drivers/pci/msi-apic.c +++ b/drivers/pci/msi-apic.c @@ -4,6 +4,7 @@ #include #include +#include #include "msi.h" -- cgit v1.1 From 4961f10e2205d0ededa291e12ec634efc58aa93c Mon Sep 17 00:00:00 2001 From: Eric Sandeen Date: Mon, 26 Jun 2006 14:00:05 +0200 Subject: [PATCH] x86_64: (resend) x86_64 stack overflow debugging Take two, now without spurious whitespace :( Applies to git & 2.6.17-rc6 CONFIG_DEBUG_STACKOVERFLOW existed for x86_64 in 2.4, but seems to have gone AWOL in 2.6. I've pretty much just copied this over from the 2.4 code, with appropriate tweaks for the 2.6 kernel, plus a bugfix. I'd personally rather see it printed out the way other arches do it, i.e. bytes-remaining-until-overflow, rather than having to do the subtraction yourself. Also, only 128 bytes remaining seems awfully late to issue a warning. But I'll start here :) Signed-off-by: Eric Sandeen Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- arch/x86_64/Kconfig.debug | 7 +++++++ arch/x86_64/kernel/irq.c | 28 +++++++++++++++++++++++++++- 2 files changed, 34 insertions(+), 1 deletion(-) diff --git a/arch/x86_64/Kconfig.debug b/arch/x86_64/Kconfig.debug index 0226f08..087a048 100644 --- a/arch/x86_64/Kconfig.debug +++ b/arch/x86_64/Kconfig.debug @@ -35,6 +35,13 @@ config IOMMU_LEAK Add a simple leak tracer to the IOMMU code. This is useful when you are debugging a buggy device driver that leaks IOMMU mappings. +config DEBUG_STACKOVERFLOW + bool "Check for stack overflows" + depends on DEBUG_KERNEL + help + This option will cause messages to be printed if free stack space + drops below a certain limit. + #config X86_REMOTE_DEBUG # bool "kgdb debugging stub" diff --git a/arch/x86_64/kernel/irq.c b/arch/x86_64/kernel/irq.c index 207ecdc..59518d4 100644 --- a/arch/x86_64/kernel/irq.c +++ b/arch/x86_64/kernel/irq.c @@ -26,6 +26,30 @@ atomic_t irq_mis_count; #endif #endif +#ifdef CONFIG_DEBUG_STACKOVERFLOW +/* + * Probabilistic stack overflow check: + * + * Only check the stack in process context, because everything else + * runs on the big interrupt stacks. Checking reliably is too expensive, + * so we just check from interrupts. + */ +static inline void stack_overflow_check(struct pt_regs *regs) +{ + u64 curbase = (u64) current->thread_info; + static unsigned long warned = -60*HZ; + + if (regs->rsp >= curbase && regs->rsp <= curbase + THREAD_SIZE && + regs->rsp < curbase + sizeof(struct thread_info) + 128 && + time_after(jiffies, warned + 60*HZ)) { + printk("do_IRQ: %s near stack overflow (cur:%Lx,rsp:%lx)\n", + current->comm, curbase, regs->rsp); + show_stack(NULL,NULL); + warned = jiffies; + } +} +#endif + /* * Generic, controller-independent functions: */ @@ -96,7 +120,9 @@ asmlinkage unsigned int do_IRQ(struct pt_regs *regs) exit_idle(); irq_enter(); - +#ifdef CONFIG_DEBUG_STACKOVERFLOW + stack_overflow_check(regs); +#endif __do_IRQ(irq, regs); irq_exit(); -- cgit v1.1 From 8501a2fbe762b21d2504ed3aca3b52be61b5e6e4 Mon Sep 17 00:00:00 2001 From: Eric Sandeen Date: Mon, 26 Jun 2006 14:00:08 +0200 Subject: [PATCH] x86_64: x86_64 stack usage debugging Applies to git & 2.6.17-rc6 after CONFIG_DEBUG_STACKOVERFLOW patch uses same stack-zeroing mechanism as on i386 to discover maximum stack excursions. Signed-off-by: Eric Sandeen Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- arch/x86_64/Kconfig.debug | 9 +++++++++ include/asm-x86_64/thread_info.h | 13 +++++++++++++ 2 files changed, 22 insertions(+) diff --git a/arch/x86_64/Kconfig.debug b/arch/x86_64/Kconfig.debug index 087a048..1d92ab5 100644 --- a/arch/x86_64/Kconfig.debug +++ b/arch/x86_64/Kconfig.debug @@ -42,6 +42,15 @@ config DEBUG_STACKOVERFLOW This option will cause messages to be printed if free stack space drops below a certain limit. +config DEBUG_STACK_USAGE + bool "Stack utilization instrumentation" + depends on DEBUG_KERNEL + help + Enables the display of the minimum amount of free stack which each + task has ever had available in the sysrq-T and sysrq-P debug output. + + This option will slow down process creation somewhat. + #config X86_REMOTE_DEBUG # bool "kgdb debugging stub" diff --git a/include/asm-x86_64/thread_info.h b/include/asm-x86_64/thread_info.h index b5e8821..2029b00 100644 --- a/include/asm-x86_64/thread_info.h +++ b/include/asm-x86_64/thread_info.h @@ -73,8 +73,21 @@ static inline struct thread_info *stack_thread_info(void) } /* thread information allocation */ +#ifdef CONFIG_DEBUG_STACK_USAGE +#define alloc_thread_info(tsk) \ + ({ \ + struct thread_info *ret; \ + \ + ret = ((struct thread_info *) __get_free_pages(GFP_KERNEL,THREAD_ORDER)); \ + if (ret) \ + memset(ret, 0, THREAD_SIZE); \ + ret; \ + }) +#else #define alloc_thread_info(tsk) \ ((struct thread_info *) __get_free_pages(GFP_KERNEL,THREAD_ORDER)) +#endif + #define free_thread_info(ti) free_pages((unsigned long) (ti), THREAD_ORDER) #else /* !__ASSEMBLY__ */ -- cgit v1.1