diff options
Diffstat (limited to 'arch')
767 files changed, 14112 insertions, 10818 deletions
diff --git a/arch/Kconfig b/arch/Kconfig index f1cf895..80bbb8c 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -336,6 +336,73 @@ config SECCOMP_FILTER See Documentation/prctl/seccomp_filter.txt for details. +config HAVE_CC_STACKPROTECTOR + bool + help + An arch should select this symbol if: + - its compiler supports the -fstack-protector option + - it has implemented a stack canary (e.g. __stack_chk_guard) + +config CC_STACKPROTECTOR + def_bool n + help + Set when a stack-protector mode is enabled, so that the build + can enable kernel-side support for the GCC feature. + +choice + prompt "Stack Protector buffer overflow detection" + depends on HAVE_CC_STACKPROTECTOR + default CC_STACKPROTECTOR_NONE + help + This option turns on the "stack-protector" GCC feature. This + feature puts, at the beginning of functions, a canary value on + the stack just before the return address, and validates + the value just before actually returning. Stack based buffer + overflows (that need to overwrite this return address) now also + overwrite the canary, which gets detected and the attack is then + neutralized via a kernel panic. + +config CC_STACKPROTECTOR_NONE + bool "None" + help + Disable "stack-protector" GCC feature. + +config CC_STACKPROTECTOR_REGULAR + bool "Regular" + select CC_STACKPROTECTOR + help + Functions will have the stack-protector canary logic added if they + have an 8-byte or larger character array on the stack. + + This feature requires gcc version 4.2 or above, or a distribution + gcc with the feature backported ("-fstack-protector"). + + On an x86 "defconfig" build, this feature adds canary checks to + about 3% of all kernel functions, which increases kernel code size + by about 0.3%. + +config CC_STACKPROTECTOR_STRONG + bool "Strong" + select CC_STACKPROTECTOR + help + Functions will have the stack-protector canary logic added in any + of the following conditions: + + - local variable's address used as part of the right hand side of an + assignment or function argument + - local variable is an array (or union containing an array), + regardless of array type or length + - uses register local variables + + This feature requires gcc version 4.9 or above, or a distribution + gcc with the feature backported ("-fstack-protector-strong"). + + On an x86 "defconfig" build, this feature adds canary checks to + about 20% of all kernel functions, which increases the kernel code + size by about 2%. + +endchoice + config HAVE_CONTEXT_TRACKING bool help diff --git a/arch/alpha/include/asm/barrier.h b/arch/alpha/include/asm/barrier.h index ce8860a..3832bdb 100644 --- a/arch/alpha/include/asm/barrier.h +++ b/arch/alpha/include/asm/barrier.h @@ -3,33 +3,18 @@ #include <asm/compiler.h> -#define mb() \ -__asm__ __volatile__("mb": : :"memory") +#define mb() __asm__ __volatile__("mb": : :"memory") +#define rmb() __asm__ __volatile__("mb": : :"memory") +#define wmb() __asm__ __volatile__("wmb": : :"memory") -#define rmb() \ -__asm__ __volatile__("mb": : :"memory") - -#define wmb() \ -__asm__ __volatile__("wmb": : :"memory") - -#define read_barrier_depends() \ -__asm__ __volatile__("mb": : :"memory") +#define read_barrier_depends() __asm__ __volatile__("mb": : :"memory") #ifdef CONFIG_SMP #define __ASM_SMP_MB "\tmb\n" -#define smp_mb() mb() -#define smp_rmb() rmb() -#define smp_wmb() wmb() -#define smp_read_barrier_depends() read_barrier_depends() #else #define __ASM_SMP_MB -#define smp_mb() barrier() -#define smp_rmb() barrier() -#define smp_wmb() barrier() -#define smp_read_barrier_depends() do { } while (0) #endif -#define set_mb(var, value) \ -do { var = value; mb(); } while (0) +#include <asm-generic/barrier.h> #endif /* __BARRIER_H */ diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig index 2ee0c9b..9063ae6 100644 --- a/arch/arc/Kconfig +++ b/arch/arc/Kconfig @@ -8,6 +8,7 @@ config ARC def_bool y + select BUILDTIME_EXTABLE_SORT select CLONE_BACKWARDS # ARC Busybox based initramfs absolutely relies on DEVTMPFS for /dev select DEVTMPFS if !INITRAMFS_SOURCE="" diff --git a/arch/arc/include/asm/Kbuild b/arch/arc/include/asm/Kbuild index 5943f7f..9ae21c1 100644 --- a/arch/arc/include/asm/Kbuild +++ b/arch/arc/include/asm/Kbuild @@ -1,4 +1,5 @@ generic-y += auxvec.h +generic-y += barrier.h generic-y += bugs.h generic-y += bitsperlong.h generic-y += clkdev.h diff --git a/arch/arc/include/asm/atomic.h b/arch/arc/include/asm/atomic.h index 83f03ca..03e494f 100644 --- a/arch/arc/include/asm/atomic.h +++ b/arch/arc/include/asm/atomic.h @@ -190,6 +190,11 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) #endif /* !CONFIG_ARC_HAS_LLSC */ +#define smp_mb__before_atomic_dec() barrier() +#define smp_mb__after_atomic_dec() barrier() +#define smp_mb__before_atomic_inc() barrier() +#define smp_mb__after_atomic_inc() barrier() + /** * __atomic_add_unless - add unless the number is a given value * @v: pointer of type atomic_t diff --git a/arch/arc/include/asm/barrier.h b/arch/arc/include/asm/barrier.h index f6cb7c4..c32245c 100644 --- a/arch/arc/include/asm/barrier.h +++ b/arch/arc/include/asm/barrier.h @@ -30,11 +30,6 @@ #define smp_wmb() barrier() #endif -#define smp_mb__before_atomic_dec() barrier() -#define smp_mb__after_atomic_dec() barrier() -#define smp_mb__before_atomic_inc() barrier() -#define smp_mb__after_atomic_inc() barrier() - #define smp_read_barrier_depends() do { } while (0) #endif diff --git a/arch/arc/include/uapi/asm/unistd.h b/arch/arc/include/uapi/asm/unistd.h index 6f30484..39e58d1 100644 --- a/arch/arc/include/uapi/asm/unistd.h +++ b/arch/arc/include/uapi/asm/unistd.h @@ -8,6 +8,13 @@ /******** no-legacy-syscalls-ABI *******/ +/* + * Non-typical guard macro to enable inclusion twice in ARCH sys.c + * That is how the Generic syscall wrapper generator works + */ +#if !defined(_UAPI_ASM_ARC_UNISTD_H) || defined(__SYSCALL) +#define _UAPI_ASM_ARC_UNISTD_H + #define __ARCH_WANT_SYS_EXECVE #define __ARCH_WANT_SYS_CLONE #define __ARCH_WANT_SYS_VFORK @@ -32,3 +39,7 @@ __SYSCALL(__NR_arc_gettls, sys_arc_gettls) /* Generic syscall (fs/filesystems.c - lost in asm-generic/unistd.h */ #define __NR_sysfs (__NR_arch_specific_syscall + 3) __SYSCALL(__NR_sysfs, sys_sysfs) + +#undef __SYSCALL + +#endif diff --git a/arch/arc/kernel/perf_event.c b/arch/arc/kernel/perf_event.c index e46d81f..63177e4 100644 --- a/arch/arc/kernel/perf_event.c +++ b/arch/arc/kernel/perf_event.c @@ -79,9 +79,9 @@ static int arc_pmu_cache_event(u64 config) cache_result = (config >> 16) & 0xff; if (cache_type >= PERF_COUNT_HW_CACHE_MAX) return -EINVAL; - if (cache_type >= PERF_COUNT_HW_CACHE_OP_MAX) + if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX) return -EINVAL; - if (cache_type >= PERF_COUNT_HW_CACHE_RESULT_MAX) + if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX) return -EINVAL; ret = arc_pmu_cache_map[cache_type][cache_op][cache_result]; diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index c1f1a7e..ab1689c 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -30,6 +30,7 @@ config ARM select HAVE_BPF_JIT select HAVE_CONTEXT_TRACKING select HAVE_C_RECORDMCOUNT + select HAVE_CC_STACKPROTECTOR select HAVE_DEBUG_KMEMLEAK select HAVE_DMA_API_DEBUG select HAVE_DMA_ATTRS @@ -710,7 +711,6 @@ config ARCH_S3C24XX select HAVE_S3C2410_WATCHDOG if WATCHDOG select HAVE_S3C_RTC if RTC_CLASS select MULTI_IRQ_HANDLER - select NEED_MACH_GPIO_H select NEED_MACH_IO_H select SAMSUNG_ATAGS help @@ -723,6 +723,7 @@ config ARCH_S3C64XX bool "Samsung S3C64XX" select ARCH_HAS_CPUFREQ select ARCH_REQUIRE_GPIOLIB + select ARM_AMBA select ARM_VIC select CLKDEV_LOOKUP select CLKSRC_SAMSUNG_PWM @@ -733,7 +734,6 @@ config ARCH_S3C64XX select HAVE_S3C2410_I2C if I2C select HAVE_S3C2410_WATCHDOG if WATCHDOG select HAVE_TCM - select NEED_MACH_GPIO_H select NO_IOPORT select PLAT_SAMSUNG select PM_GENERIC_DOMAINS @@ -1593,7 +1593,7 @@ config ARM_PSCI config ARCH_NR_GPIO int default 1024 if ARCH_SHMOBILE || ARCH_TEGRA - default 512 if ARCH_EXYNOS || ARCH_KEYSTONE || SOC_OMAP5 || SOC_DRA7XX + default 512 if ARCH_EXYNOS || ARCH_KEYSTONE || SOC_OMAP5 || SOC_DRA7XX || ARCH_S3C24XX || ARCH_S3C64XX default 392 if ARCH_U8500 default 352 if ARCH_VT8500 default 288 if ARCH_SUNXI @@ -1856,18 +1856,6 @@ config SECCOMP and the task is only allowed to execute a few safe syscalls defined by each seccomp mode. -config CC_STACKPROTECTOR - bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)" - help - This option turns on the -fstack-protector GCC feature. This - feature puts, at the beginning of functions, a canary value on - the stack just before the return address, and validates - the value just before actually returning. Stack based buffer - overflows (that need to overwrite this return address) now also - overwrite the canary, which gets detected and the attack is then - neutralized via a kernel panic. - This feature requires gcc version 4.2 or above. - config SWIOTLB def_bool y diff --git a/arch/arm/Makefile b/arch/arm/Makefile index c99b108..55b4255 100644 --- a/arch/arm/Makefile +++ b/arch/arm/Makefile @@ -40,10 +40,6 @@ ifeq ($(CONFIG_FRAME_POINTER),y) KBUILD_CFLAGS +=-fno-omit-frame-pointer -mapcs -mno-sched-prolog endif -ifeq ($(CONFIG_CC_STACKPROTECTOR),y) -KBUILD_CFLAGS +=-fstack-protector -endif - ifeq ($(CONFIG_CPU_BIG_ENDIAN),y) KBUILD_CPPFLAGS += -mbig-endian AS += -EB diff --git a/arch/arm/boot/compressed/misc.c b/arch/arm/boot/compressed/misc.c index 31bd43b..d4f891f 100644 --- a/arch/arm/boot/compressed/misc.c +++ b/arch/arm/boot/compressed/misc.c @@ -127,6 +127,18 @@ asmlinkage void __div0(void) error("Attempting division by 0!"); } +unsigned long __stack_chk_guard; + +void __stack_chk_guard_setup(void) +{ + __stack_chk_guard = 0x000a0dff; +} + +void __stack_chk_fail(void) +{ + error("stack-protector: Kernel stack is corrupted\n"); +} + extern int do_decompress(u8 *input, int len, u8 *output, void (*error)(char *x)); @@ -137,6 +149,8 @@ decompress_kernel(unsigned long output_start, unsigned long free_mem_ptr_p, { int ret; + __stack_chk_guard_setup(); + output_data = (unsigned char *)output_start; free_mem_ptr = free_mem_ptr_p; free_mem_end_ptr = free_mem_ptr_end_p; diff --git a/arch/arm/boot/dts/am335x-base0033.dts b/arch/arm/boot/dts/am335x-base0033.dts index b4f95c2..72a9b3f 100644 --- a/arch/arm/boot/dts/am335x-base0033.dts +++ b/arch/arm/boot/dts/am335x-base0033.dts @@ -13,4 +13,83 @@ / { model = "IGEP COM AM335x on AQUILA Expansion"; compatible = "isee,am335x-base0033", "isee,am335x-igep0033", "ti,am33xx"; + + hdmi { + compatible = "ti,tilcdc,slave"; + i2c = <&i2c0>; + pinctrl-names = "default", "off"; + pinctrl-0 = <&nxp_hdmi_pins>; + pinctrl-1 = <&nxp_hdmi_off_pins>; + status = "okay"; + }; + + leds_base { + pinctrl-names = "default"; + pinctrl-0 = <&leds_base_pins>; + + compatible = "gpio-leds"; + + led@0 { + label = "base:red:user"; + gpios = <&gpio1 21 GPIO_ACTIVE_HIGH>; /* gpio1_21 */ + default-state = "off"; + }; + + led@1 { + label = "base:green:user"; + gpios = <&gpio2 0 GPIO_ACTIVE_HIGH>; /* gpio2_0 */ + default-state = "off"; + }; + }; +}; + +&am33xx_pinmux { + nxp_hdmi_pins: pinmux_nxp_hdmi_pins { + pinctrl-single,pins = < + 0x1b0 (PIN_OUTPUT | MUX_MODE3) /* xdma_event_intr0.clkout1 */ + 0xa0 (PIN_OUTPUT | MUX_MODE0) /* lcd_data0 */ + 0xa4 (PIN_OUTPUT | MUX_MODE0) /* lcd_data1 */ + 0xa8 (PIN_OUTPUT | MUX_MODE0) /* lcd_data2 */ + 0xac (PIN_OUTPUT | MUX_MODE0) /* lcd_data3 */ + 0xb0 (PIN_OUTPUT | MUX_MODE0) /* lcd_data4 */ + 0xb4 (PIN_OUTPUT | MUX_MODE0) /* lcd_data5 */ + 0xb8 (PIN_OUTPUT | MUX_MODE0) /* lcd_data6 */ + 0xbc (PIN_OUTPUT | MUX_MODE0) /* lcd_data7 */ + 0xc0 (PIN_OUTPUT | MUX_MODE0) /* lcd_data8 */ + 0xc4 (PIN_OUTPUT | MUX_MODE0) /* lcd_data9 */ + 0xc8 (PIN_OUTPUT | MUX_MODE0) /* lcd_data10 */ + 0xcc (PIN_OUTPUT | MUX_MODE0) /* lcd_data11 */ + 0xd0 (PIN_OUTPUT | MUX_MODE0) /* lcd_data12 */ + 0xd4 (PIN_OUTPUT | MUX_MODE0) /* lcd_data13 */ + 0xd8 (PIN_OUTPUT | MUX_MODE0) /* lcd_data14 */ + 0xdc (PIN_OUTPUT | MUX_MODE0) /* lcd_data15 */ + 0xe0 (PIN_OUTPUT | MUX_MODE0) /* lcd_vsync */ + 0xe4 (PIN_OUTPUT | MUX_MODE0) /* lcd_hsync */ + 0xe8 (PIN_OUTPUT | MUX_MODE0) /* lcd_pclk */ + 0xec (PIN_OUTPUT | MUX_MODE0) /* lcd_ac_bias_en */ + >; + }; + nxp_hdmi_off_pins: pinmux_nxp_hdmi_off_pins { + pinctrl-single,pins = < + 0x1b0 (PIN_OUTPUT | MUX_MODE3) /* xdma_event_intr0.clkout1 */ + >; + }; + + leds_base_pins: pinmux_leds_base_pins { + pinctrl-single,pins = < + 0x54 (PIN_OUTPUT_PULLDOWN | MUX_MODE7) /* gpmc_a5.gpio1_21 */ + 0x88 (PIN_OUTPUT_PULLDOWN | MUX_MODE7) /* gpmc_csn3.gpio2_0 */ + >; + }; +}; + +&lcdc { + status = "okay"; +}; + +&i2c0 { + eeprom: eeprom@50 { + compatible = "at,24c256"; + reg = <0x50>; + }; }; diff --git a/arch/arm/boot/dts/am335x-igep0033.dtsi b/arch/arm/boot/dts/am335x-igep0033.dtsi index 6196244..7063311 100644 --- a/arch/arm/boot/dts/am335x-igep0033.dtsi +++ b/arch/arm/boot/dts/am335x-igep0033.dtsi @@ -199,6 +199,35 @@ pinctrl-0 = <&uart0_pins>; }; +&usb { + status = "okay"; + + control@44e10000 { + status = "okay"; + }; + + usb-phy@47401300 { + status = "okay"; + }; + + usb-phy@47401b00 { + status = "okay"; + }; + + usb@47401000 { + status = "okay"; + }; + + usb@47401800 { + status = "okay"; + dr_mode = "host"; + }; + + dma-controller@07402000 { + status = "okay"; + }; +}; + #include "tps65910.dtsi" &tps { diff --git a/arch/arm/boot/dts/am3517-evm.dts b/arch/arm/boot/dts/am3517-evm.dts index e99dfaf..03fcbf0 100644 --- a/arch/arm/boot/dts/am3517-evm.dts +++ b/arch/arm/boot/dts/am3517-evm.dts @@ -7,11 +7,11 @@ */ /dts-v1/; -#include "omap34xx.dtsi" +#include "am3517.dtsi" / { - model = "TI AM3517 EVM (AM3517/05)"; - compatible = "ti,am3517-evm", "ti,omap3"; + model = "TI AM3517 EVM (AM3517/05 TMDSEVM3517)"; + compatible = "ti,am3517-evm", "ti,am3517", "ti,omap3"; memory { device_type = "memory"; diff --git a/arch/arm/boot/dts/am3517.dtsi b/arch/arm/boot/dts/am3517.dtsi new file mode 100644 index 0000000..2fbe02f --- /dev/null +++ b/arch/arm/boot/dts/am3517.dtsi @@ -0,0 +1,63 @@ +/* + * Device Tree Source for am3517 SoC + * + * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com/ + * + * This file is licensed under the terms of the GNU General Public License + * version 2. This program is licensed "as is" without any warranty of any + * kind, whether express or implied. + */ + +#include "omap3.dtsi" + +/ { + aliases { + serial3 = &uart4; + }; + + ocp { + am35x_otg_hs: am35x_otg_hs@5c040000 { + compatible = "ti,omap3-musb"; + ti,hwmods = "am35x_otg_hs"; + status = "disabled"; + reg = <0x5c040000 0x1000>; + interrupts = <71>; + interrupt-names = "mc"; + }; + + davinci_emac: ethernet@0x5c000000 { + compatible = "ti,am3517-emac"; + ti,hwmods = "davinci_emac"; + status = "disabled"; + reg = <0x5c000000 0x30000>; + interrupts = <67 68 69 70>; + ti,davinci-ctrl-reg-offset = <0x10000>; + ti,davinci-ctrl-mod-reg-offset = <0>; + ti,davinci-ctrl-ram-offset = <0x20000>; + ti,davinci-ctrl-ram-size = <0x2000>; + ti,davinci-rmii-en = /bits/ 8 <1>; + local-mac-address = [ 00 00 00 00 00 00 ]; + }; + + davinci_mdio: ethernet@0x5c030000 { + compatible = "ti,davinci_mdio"; + ti,hwmods = "davinci_mdio"; + status = "disabled"; + reg = <0x5c030000 0x1000>; + bus_freq = <1000000>; + #address-cells = <1>; + #size-cells = <0>; + }; + + uart4: serial@4809e000 { + compatible = "ti,omap3-uart"; + ti,hwmods = "uart4"; + status = "disabled"; + reg = <0x4809e000 0x400>; + interrupts = <84>; + dmas = <&sdma 55 &sdma 54>; + dma-names = "tx", "rx"; + clock-frequency = <48000000>; + }; + }; +}; diff --git a/arch/arm/boot/dts/armada-370-db.dts b/arch/arm/boot/dts/armada-370-db.dts index 90ce29d..08a56bc 100644 --- a/arch/arm/boot/dts/armada-370-db.dts +++ b/arch/arm/boot/dts/armada-370-db.dts @@ -99,22 +99,22 @@ spi-max-frequency = <50000000>; }; }; + }; - pcie-controller { + pcie-controller { + status = "okay"; + /* + * The two PCIe units are accessible through + * both standard PCIe slots and mini-PCIe + * slots on the board. + */ + pcie@1,0 { + /* Port 0, Lane 0 */ + status = "okay"; + }; + pcie@2,0 { + /* Port 1, Lane 0 */ status = "okay"; - /* - * The two PCIe units are accessible through - * both standard PCIe slots and mini-PCIe - * slots on the board. - */ - pcie@1,0 { - /* Port 0, Lane 0 */ - status = "okay"; - }; - pcie@2,0 { - /* Port 1, Lane 0 */ - status = "okay"; - }; }; }; }; diff --git a/arch/arm/boot/dts/armada-370-xp.dtsi b/arch/arm/boot/dts/armada-370-xp.dtsi index 00d6a79..7f10f62 100644 --- a/arch/arm/boot/dts/armada-370-xp.dtsi +++ b/arch/arm/boot/dts/armada-370-xp.dtsi @@ -118,7 +118,7 @@ coherency-fabric@20200 { compatible = "marvell,coherency-fabric"; - reg = <0x20200 0xb0>, <0x21810 0x1c>; + reg = <0x20200 0xb0>, <0x21010 0x1c>; }; serial@12000 { diff --git a/arch/arm/boot/dts/armada-xp-mv78230.dtsi b/arch/arm/boot/dts/armada-xp-mv78230.dtsi index 3f5e612..98335fb 100644 --- a/arch/arm/boot/dts/armada-xp-mv78230.dtsi +++ b/arch/arm/boot/dts/armada-xp-mv78230.dtsi @@ -47,7 +47,7 @@ /* * MV78230 has 2 PCIe units Gen2.0: One unit can be * configured as x4 or quad x1 lanes. One unit is - * x4/x1. + * x1 only. */ pcie-controller { compatible = "marvell,armada-xp-pcie"; @@ -62,10 +62,10 @@ ranges = <0x82000000 0 0x40000 MBUS_ID(0xf0, 0x01) 0x40000 0 0x00002000 /* Port 0.0 registers */ - 0x82000000 0 0x42000 MBUS_ID(0xf0, 0x01) 0x42000 0 0x00002000 /* Port 2.0 registers */ 0x82000000 0 0x44000 MBUS_ID(0xf0, 0x01) 0x44000 0 0x00002000 /* Port 0.1 registers */ 0x82000000 0 0x48000 MBUS_ID(0xf0, 0x01) 0x48000 0 0x00002000 /* Port 0.2 registers */ 0x82000000 0 0x4c000 MBUS_ID(0xf0, 0x01) 0x4c000 0 0x00002000 /* Port 0.3 registers */ + 0x82000000 0 0x80000 MBUS_ID(0xf0, 0x01) 0x80000 0 0x00002000 /* Port 1.0 registers */ 0x82000000 0x1 0 MBUS_ID(0x04, 0xe8) 0 1 0 /* Port 0.0 MEM */ 0x81000000 0x1 0 MBUS_ID(0x04, 0xe0) 0 1 0 /* Port 0.0 IO */ 0x82000000 0x2 0 MBUS_ID(0x04, 0xd8) 0 1 0 /* Port 0.1 MEM */ @@ -74,8 +74,8 @@ 0x81000000 0x3 0 MBUS_ID(0x04, 0xb0) 0 1 0 /* Port 0.2 IO */ 0x82000000 0x4 0 MBUS_ID(0x04, 0x78) 0 1 0 /* Port 0.3 MEM */ 0x81000000 0x4 0 MBUS_ID(0x04, 0x70) 0 1 0 /* Port 0.3 IO */ - 0x82000000 0x9 0 MBUS_ID(0x04, 0xf8) 0 1 0 /* Port 2.0 MEM */ - 0x81000000 0x9 0 MBUS_ID(0x04, 0xf0) 0 1 0 /* Port 2.0 IO */>; + 0x82000000 0x5 0 MBUS_ID(0x08, 0xe8) 0 1 0 /* Port 1.0 MEM */ + 0x81000000 0x5 0 MBUS_ID(0x08, 0xe0) 0 1 0 /* Port 1.0 IO */>; pcie@1,0 { device_type = "pci"; @@ -145,20 +145,20 @@ status = "disabled"; }; - pcie@9,0 { + pcie@5,0 { device_type = "pci"; - assigned-addresses = <0x82000800 0 0x42000 0 0x2000>; - reg = <0x4800 0 0 0 0>; + assigned-addresses = <0x82000800 0 0x80000 0 0x2000>; + reg = <0x2800 0 0 0 0>; #address-cells = <3>; #size-cells = <2>; #interrupt-cells = <1>; - ranges = <0x82000000 0 0 0x82000000 0x9 0 1 0 - 0x81000000 0 0 0x81000000 0x9 0 1 0>; + ranges = <0x82000000 0 0 0x82000000 0x5 0 1 0 + 0x81000000 0 0 0x81000000 0x5 0 1 0>; interrupt-map-mask = <0 0 0 0>; - interrupt-map = <0 0 0 0 &mpic 99>; - marvell,pcie-port = <2>; + interrupt-map = <0 0 0 0 &mpic 62>; + marvell,pcie-port = <1>; marvell,pcie-lane = <0>; - clocks = <&gateclk 26>; + clocks = <&gateclk 9>; status = "disabled"; }; }; diff --git a/arch/arm/boot/dts/armada-xp-mv78260.dtsi b/arch/arm/boot/dts/armada-xp-mv78260.dtsi index 3e9fd13..6660968 100644 --- a/arch/arm/boot/dts/armada-xp-mv78260.dtsi +++ b/arch/arm/boot/dts/armada-xp-mv78260.dtsi @@ -48,7 +48,7 @@ /* * MV78260 has 3 PCIe units Gen2.0: Two units can be * configured as x4 or quad x1 lanes. One unit is - * x4/x1. + * x4 only. */ pcie-controller { compatible = "marvell,armada-xp-pcie"; @@ -68,7 +68,9 @@ 0x82000000 0 0x48000 MBUS_ID(0xf0, 0x01) 0x48000 0 0x00002000 /* Port 0.2 registers */ 0x82000000 0 0x4c000 MBUS_ID(0xf0, 0x01) 0x4c000 0 0x00002000 /* Port 0.3 registers */ 0x82000000 0 0x80000 MBUS_ID(0xf0, 0x01) 0x80000 0 0x00002000 /* Port 1.0 registers */ - 0x82000000 0 0x82000 MBUS_ID(0xf0, 0x01) 0x82000 0 0x00002000 /* Port 3.0 registers */ + 0x82000000 0 0x84000 MBUS_ID(0xf0, 0x01) 0x84000 0 0x00002000 /* Port 1.1 registers */ + 0x82000000 0 0x88000 MBUS_ID(0xf0, 0x01) 0x88000 0 0x00002000 /* Port 1.2 registers */ + 0x82000000 0 0x8c000 MBUS_ID(0xf0, 0x01) 0x8c000 0 0x00002000 /* Port 1.3 registers */ 0x82000000 0x1 0 MBUS_ID(0x04, 0xe8) 0 1 0 /* Port 0.0 MEM */ 0x81000000 0x1 0 MBUS_ID(0x04, 0xe0) 0 1 0 /* Port 0.0 IO */ 0x82000000 0x2 0 MBUS_ID(0x04, 0xd8) 0 1 0 /* Port 0.1 MEM */ @@ -77,10 +79,18 @@ 0x81000000 0x3 0 MBUS_ID(0x04, 0xb0) 0 1 0 /* Port 0.2 IO */ 0x82000000 0x4 0 MBUS_ID(0x04, 0x78) 0 1 0 /* Port 0.3 MEM */ 0x81000000 0x4 0 MBUS_ID(0x04, 0x70) 0 1 0 /* Port 0.3 IO */ - 0x82000000 0x9 0 MBUS_ID(0x08, 0xe8) 0 1 0 /* Port 1.0 MEM */ - 0x81000000 0x9 0 MBUS_ID(0x08, 0xe0) 0 1 0 /* Port 1.0 IO */ - 0x82000000 0xa 0 MBUS_ID(0x08, 0xf8) 0 1 0 /* Port 3.0 MEM */ - 0x81000000 0xa 0 MBUS_ID(0x08, 0xf0) 0 1 0 /* Port 3.0 IO */>; + + 0x82000000 0x5 0 MBUS_ID(0x08, 0xe8) 0 1 0 /* Port 1.0 MEM */ + 0x81000000 0x5 0 MBUS_ID(0x08, 0xe0) 0 1 0 /* Port 1.0 IO */ + 0x82000000 0x6 0 MBUS_ID(0x08, 0xd8) 0 1 0 /* Port 1.1 MEM */ + 0x81000000 0x6 0 MBUS_ID(0x08, 0xd0) 0 1 0 /* Port 1.1 IO */ + 0x82000000 0x7 0 MBUS_ID(0x08, 0xb8) 0 1 0 /* Port 1.2 MEM */ + 0x81000000 0x7 0 MBUS_ID(0x08, 0xb0) 0 1 0 /* Port 1.2 IO */ + 0x82000000 0x8 0 MBUS_ID(0x08, 0x78) 0 1 0 /* Port 1.3 MEM */ + 0x81000000 0x8 0 MBUS_ID(0x08, 0x70) 0 1 0 /* Port 1.3 IO */ + + 0x82000000 0x9 0 MBUS_ID(0x04, 0xf8) 0 1 0 /* Port 2.0 MEM */ + 0x81000000 0x9 0 MBUS_ID(0x04, 0xf0) 0 1 0 /* Port 2.0 IO */>; pcie@1,0 { device_type = "pci"; @@ -106,8 +116,8 @@ #address-cells = <3>; #size-cells = <2>; #interrupt-cells = <1>; - ranges = <0x82000000 0 0 0x82000000 0x2 0 1 0 - 0x81000000 0 0 0x81000000 0x2 0 1 0>; + ranges = <0x82000000 0 0 0x82000000 0x2 0 1 0 + 0x81000000 0 0 0x81000000 0x2 0 1 0>; interrupt-map-mask = <0 0 0 0>; interrupt-map = <0 0 0 0 &mpic 59>; marvell,pcie-port = <0>; @@ -150,37 +160,88 @@ status = "disabled"; }; - pcie@9,0 { + pcie@5,0 { device_type = "pci"; - assigned-addresses = <0x82000800 0 0x42000 0 0x2000>; - reg = <0x4800 0 0 0 0>; + assigned-addresses = <0x82000800 0 0x80000 0 0x2000>; + reg = <0x2800 0 0 0 0>; #address-cells = <3>; #size-cells = <2>; #interrupt-cells = <1>; - ranges = <0x82000000 0 0 0x82000000 0x9 0 1 0 - 0x81000000 0 0 0x81000000 0x9 0 1 0>; + ranges = <0x82000000 0 0 0x82000000 0x5 0 1 0 + 0x81000000 0 0 0x81000000 0x5 0 1 0>; interrupt-map-mask = <0 0 0 0>; - interrupt-map = <0 0 0 0 &mpic 99>; - marvell,pcie-port = <2>; + interrupt-map = <0 0 0 0 &mpic 62>; + marvell,pcie-port = <1>; marvell,pcie-lane = <0>; - clocks = <&gateclk 26>; + clocks = <&gateclk 9>; status = "disabled"; }; - pcie@10,0 { + pcie@6,0 { device_type = "pci"; - assigned-addresses = <0x82000800 0 0x82000 0 0x2000>; - reg = <0x5000 0 0 0 0>; + assigned-addresses = <0x82000800 0 0x84000 0 0x2000>; + reg = <0x3000 0 0 0 0>; #address-cells = <3>; #size-cells = <2>; #interrupt-cells = <1>; - ranges = <0x82000000 0 0 0x82000000 0xa 0 1 0 - 0x81000000 0 0 0x81000000 0xa 0 1 0>; + ranges = <0x82000000 0 0 0x82000000 0x6 0 1 0 + 0x81000000 0 0 0x81000000 0x6 0 1 0>; interrupt-map-mask = <0 0 0 0>; - interrupt-map = <0 0 0 0 &mpic 103>; - marvell,pcie-port = <3>; + interrupt-map = <0 0 0 0 &mpic 63>; + marvell,pcie-port = <1>; + marvell,pcie-lane = <1>; + clocks = <&gateclk 10>; + status = "disabled"; + }; + + pcie@7,0 { + device_type = "pci"; + assigned-addresses = <0x82000800 0 0x88000 0 0x2000>; + reg = <0x3800 0 0 0 0>; + #address-cells = <3>; + #size-cells = <2>; + #interrupt-cells = <1>; + ranges = <0x82000000 0 0 0x82000000 0x7 0 1 0 + 0x81000000 0 0 0x81000000 0x7 0 1 0>; + interrupt-map-mask = <0 0 0 0>; + interrupt-map = <0 0 0 0 &mpic 64>; + marvell,pcie-port = <1>; + marvell,pcie-lane = <2>; + clocks = <&gateclk 11>; + status = "disabled"; + }; + + pcie@8,0 { + device_type = "pci"; + assigned-addresses = <0x82000800 0 0x8c000 0 0x2000>; + reg = <0x4000 0 0 0 0>; + #address-cells = <3>; + #size-cells = <2>; + #interrupt-cells = <1>; + ranges = <0x82000000 0 0 0x82000000 0x8 0 1 0 + 0x81000000 0 0 0x81000000 0x8 0 1 0>; + interrupt-map-mask = <0 0 0 0>; + interrupt-map = <0 0 0 0 &mpic 65>; + marvell,pcie-port = <1>; + marvell,pcie-lane = <3>; + clocks = <&gateclk 12>; + status = "disabled"; + }; + + pcie@9,0 { + device_type = "pci"; + assigned-addresses = <0x82000800 0 0x42000 0 0x2000>; + reg = <0x4800 0 0 0 0>; + #address-cells = <3>; + #size-cells = <2>; + #interrupt-cells = <1>; + ranges = <0x82000000 0 0 0x82000000 0x9 0 1 0 + 0x81000000 0 0 0x81000000 0x9 0 1 0>; + interrupt-map-mask = <0 0 0 0>; + interrupt-map = <0 0 0 0 &mpic 99>; + marvell,pcie-port = <2>; marvell,pcie-lane = <0>; - clocks = <&gateclk 27>; + clocks = <&gateclk 26>; status = "disabled"; }; }; diff --git a/arch/arm/boot/dts/at91sam9x5_usart3.dtsi b/arch/arm/boot/dts/at91sam9x5_usart3.dtsi index 2347e95..6801106 100644 --- a/arch/arm/boot/dts/at91sam9x5_usart3.dtsi +++ b/arch/arm/boot/dts/at91sam9x5_usart3.dtsi @@ -11,6 +11,10 @@ #include <dt-bindings/interrupt-controller/irq.h> / { + aliases { + serial4 = &usart3; + }; + ahb { apb { pinctrl@fffff400 { diff --git a/arch/arm/boot/dts/bcm11351.dtsi b/arch/arm/boot/dts/bcm11351.dtsi index b0c0610..dd8e878 100644 --- a/arch/arm/boot/dts/bcm11351.dtsi +++ b/arch/arm/boot/dts/bcm11351.dtsi @@ -142,4 +142,8 @@ status = "disabled"; }; + pinctrl@35004800 { + compatible = "brcm,capri-pinctrl"; + reg = <0x35004800 0x430>; + }; }; diff --git a/arch/arm/boot/dts/bcm2835.dtsi b/arch/arm/boot/dts/bcm2835.dtsi index 1e12aef..aa537ed 100644 --- a/arch/arm/boot/dts/bcm2835.dtsi +++ b/arch/arm/boot/dts/bcm2835.dtsi @@ -85,6 +85,8 @@ reg = <0x7e205000 0x1000>; interrupts = <2 21>; clocks = <&clk_i2c>; + #address-cells = <1>; + #size-cells = <0>; status = "disabled"; }; @@ -93,6 +95,8 @@ reg = <0x7e804000 0x1000>; interrupts = <2 21>; clocks = <&clk_i2c>; + #address-cells = <1>; + #size-cells = <0>; status = "disabled"; }; diff --git a/arch/arm/boot/dts/cros5250-common.dtsi b/arch/arm/boot/dts/cros5250-common.dtsi index dc259e8b..9b186ac 100644 --- a/arch/arm/boot/dts/cros5250-common.dtsi +++ b/arch/arm/boot/dts/cros5250-common.dtsi @@ -27,6 +27,13 @@ i2c2_bus: i2c2-bus { samsung,pin-pud = <0>; }; + + max77686_irq: max77686-irq { + samsung,pins = "gpx3-2"; + samsung,pin-function = <0>; + samsung,pin-pud = <0>; + samsung,pin-drv = <0>; + }; }; i2c@12C60000 { @@ -35,6 +42,11 @@ max77686@09 { compatible = "maxim,max77686"; + interrupt-parent = <&gpx3>; + interrupts = <2 0>; + pinctrl-names = "default"; + pinctrl-0 = <&max77686_irq>; + wakeup-source; reg = <0x09>; voltage-regulators { diff --git a/arch/arm/boot/dts/exynos5250.dtsi b/arch/arm/boot/dts/exynos5250.dtsi index 9db5047..177becd 100644 --- a/arch/arm/boot/dts/exynos5250.dtsi +++ b/arch/arm/boot/dts/exynos5250.dtsi @@ -559,7 +559,7 @@ compatible = "arm,pl330", "arm,primecell"; reg = <0x10800000 0x1000>; interrupts = <0 33 0>; - clocks = <&clock 271>; + clocks = <&clock 346>; clock-names = "apb_pclk"; #dma-cells = <1>; #dma-channels = <8>; diff --git a/arch/arm/boot/dts/imx6qdl.dtsi b/arch/arm/boot/dts/imx6qdl.dtsi index 59154dc..fb28b2e 100644 --- a/arch/arm/boot/dts/imx6qdl.dtsi +++ b/arch/arm/boot/dts/imx6qdl.dtsi @@ -161,7 +161,7 @@ clocks = <&clks 197>, <&clks 3>, <&clks 197>, <&clks 107>, <&clks 0>, <&clks 118>, - <&clks 62>, <&clks 139>, + <&clks 0>, <&clks 139>, <&clks 0>; clock-names = "core", "rxtx0", "rxtx1", "rxtx2", diff --git a/arch/arm/boot/dts/omap-gpmc-smsc911x.dtsi b/arch/arm/boot/dts/omap-gpmc-smsc911x.dtsi index 9c18adf..f577b7d 100644 --- a/arch/arm/boot/dts/omap-gpmc-smsc911x.dtsi +++ b/arch/arm/boot/dts/omap-gpmc-smsc911x.dtsi @@ -44,8 +44,8 @@ gpmc,wr-access-ns = <186>; gpmc,cycle2cycle-samecsen; gpmc,cycle2cycle-diffcsen; - vmmc-supply = <&vddvario>; - vmmc_aux-supply = <&vdd33a>; + vddvario-supply = <&vddvario>; + vdd33a-supply = <&vdd33a>; reg-io-width = <4>; smsc,save-mac-address; }; diff --git a/arch/arm/boot/dts/omap-zoom-common.dtsi b/arch/arm/boot/dts/omap-zoom-common.dtsi index b0ee342..68221fa 100644 --- a/arch/arm/boot/dts/omap-zoom-common.dtsi +++ b/arch/arm/boot/dts/omap-zoom-common.dtsi @@ -13,7 +13,7 @@ * they probably share the same GPIO IRQ * REVISIT: Add timing support from slls644g.pdf */ - 8250@3,0 { + uart@3,0 { compatible = "ns16550a"; reg = <3 0 0x100>; bank-width = <2>; diff --git a/arch/arm/boot/dts/omap2.dtsi b/arch/arm/boot/dts/omap2.dtsi index a2bfcde..d0c5b37 100644 --- a/arch/arm/boot/dts/omap2.dtsi +++ b/arch/arm/boot/dts/omap2.dtsi @@ -9,6 +9,7 @@ */ #include <dt-bindings/gpio/gpio.h> +#include <dt-bindings/interrupt-controller/irq.h> #include <dt-bindings/pinctrl/omap.h> #include "skeleton.dtsi" @@ -21,6 +22,8 @@ serial0 = &uart1; serial1 = &uart2; serial2 = &uart3; + i2c0 = &i2c1; + i2c1 = &i2c2; }; cpus { @@ -53,6 +56,28 @@ ranges; ti,hwmods = "l3_main"; + aes: aes@480a6000 { + compatible = "ti,omap2-aes"; + ti,hwmods = "aes"; + reg = <0x480a6000 0x50>; + dmas = <&sdma 9 &sdma 10>; + dma-names = "tx", "rx"; + }; + + hdq1w: 1w@480b2000 { + compatible = "ti,omap2420-1w"; + ti,hwmods = "hdq1w"; + reg = <0x480b2000 0x1000>; + interrupts = <58>; + }; + + mailbox: mailbox@48094000 { + compatible = "ti,omap2-mailbox"; + ti,hwmods = "mailbox"; + reg = <0x48094000 0x200>; + interrupts = <26>; + }; + intc: interrupt-controller@1 { compatible = "ti,omap2-intc"; interrupt-controller; @@ -63,6 +88,7 @@ sdma: dma-controller@48056000 { compatible = "ti,omap2430-sdma", "ti,omap2420-sdma"; + ti,hwmods = "dma"; reg = <0x48056000 0x1000>; interrupts = <12>, <13>, @@ -73,21 +99,91 @@ #dma-requests = <64>; }; + i2c1: i2c@48070000 { + compatible = "ti,omap2-i2c"; + ti,hwmods = "i2c1"; + reg = <0x48070000 0x80>; + #address-cells = <1>; + #size-cells = <0>; + interrupts = <56>; + dmas = <&sdma 27 &sdma 28>; + dma-names = "tx", "rx"; + }; + + i2c2: i2c@48072000 { + compatible = "ti,omap2-i2c"; + ti,hwmods = "i2c2"; + reg = <0x48072000 0x80>; + #address-cells = <1>; + #size-cells = <0>; + interrupts = <57>; + dmas = <&sdma 29 &sdma 30>; + dma-names = "tx", "rx"; + }; + + mcspi1: mcspi@48098000 { + compatible = "ti,omap2-mcspi"; + ti,hwmods = "mcspi1"; + reg = <0x48098000 0x100>; + interrupts = <65>; + dmas = <&sdma 35 &sdma 36 &sdma 37 &sdma 38 + &sdma 39 &sdma 40 &sdma 41 &sdma 42>; + dma-names = "tx0", "rx0", "tx1", "rx1", + "tx2", "rx2", "tx3", "rx3"; + }; + + mcspi2: mcspi@4809a000 { + compatible = "ti,omap2-mcspi"; + ti,hwmods = "mcspi2"; + reg = <0x4809a000 0x100>; + interrupts = <66>; + dmas = <&sdma 43 &sdma 44 &sdma 45 &sdma 46>; + dma-names = "tx0", "rx0", "tx1", "rx1"; + }; + + rng: rng@480a0000 { + compatible = "ti,omap2-rng"; + ti,hwmods = "rng"; + reg = <0x480a0000 0x50>; + interrupts = <36>; + }; + + sham: sham@480a4000 { + compatible = "ti,omap2-sham"; + ti,hwmods = "sham"; + reg = <0x480a4000 0x64>; + interrupts = <51>; + dmas = <&sdma 13>; + dma-names = "rx"; + }; + uart1: serial@4806a000 { compatible = "ti,omap2-uart"; ti,hwmods = "uart1"; + reg = <0x4806a000 0x2000>; + interrupts = <72>; + dmas = <&sdma 49 &sdma 50>; + dma-names = "tx", "rx"; clock-frequency = <48000000>; }; uart2: serial@4806c000 { compatible = "ti,omap2-uart"; ti,hwmods = "uart2"; + reg = <0x4806c000 0x400>; + interrupts = <73>; + dmas = <&sdma 51 &sdma 52>; + dma-names = "tx", "rx"; clock-frequency = <48000000>; }; uart3: serial@4806e000 { compatible = "ti,omap2-uart"; ti,hwmods = "uart3"; + reg = <0x4806e000 0x400>; + interrupts = <74>; + dmas = <&sdma 53 &sdma 54>; + dma-names = "tx", "rx"; clock-frequency = <48000000>; }; diff --git a/arch/arm/boot/dts/omap2420.dtsi b/arch/arm/boot/dts/omap2420.dtsi index c8f9c55..60c605d 100644 --- a/arch/arm/boot/dts/omap2420.dtsi +++ b/arch/arm/boot/dts/omap2420.dtsi @@ -114,6 +114,15 @@ dma-names = "tx", "rx"; }; + msdi1: mmc@4809c000 { + compatible = "ti,omap2420-mmc"; + ti,hwmods = "msdi1"; + reg = <0x4809c000 0x80>; + interrupts = <83>; + dmas = <&sdma 61 &sdma 62>; + dma-names = "tx", "rx"; + }; + timer1: timer@48028000 { compatible = "ti,omap2420-timer"; reg = <0x48028000 0x400>; @@ -121,5 +130,19 @@ ti,hwmods = "timer1"; ti,timer-alwon; }; + + wd_timer2: wdt@48022000 { + compatible = "ti,omap2-wdt"; + ti,hwmods = "wd_timer2"; + reg = <0x48022000 0x80>; + }; }; }; + +&i2c1 { + compatible = "ti,omap2420-i2c"; +}; + +&i2c2 { + compatible = "ti,omap2420-i2c"; +}; diff --git a/arch/arm/boot/dts/omap2430.dtsi b/arch/arm/boot/dts/omap2430.dtsi index c535a5a..d624345 100644 --- a/arch/arm/boot/dts/omap2430.dtsi +++ b/arch/arm/boot/dts/omap2430.dtsi @@ -175,6 +175,25 @@ dma-names = "tx", "rx"; }; + mmc1: mmc@4809c000 { + compatible = "ti,omap2-hsmmc"; + reg = <0x4809c000 0x200>; + interrupts = <83>; + ti,hwmods = "mmc1"; + ti,dual-volt; + dmas = <&sdma 61>, <&sdma 62>; + dma-names = "tx", "rx"; + }; + + mmc2: mmc@480b4000 { + compatible = "ti,omap2-hsmmc"; + reg = <0x480b4000 0x200>; + interrupts = <86>; + ti,hwmods = "mmc2"; + dmas = <&sdma 47>, <&sdma 48>; + dma-names = "tx", "rx"; + }; + timer1: timer@49018000 { compatible = "ti,omap2420-timer"; reg = <0x49018000 0x400>; @@ -182,5 +201,35 @@ ti,hwmods = "timer1"; ti,timer-alwon; }; + + mcspi3: mcspi@480b8000 { + compatible = "ti,omap2-mcspi"; + ti,hwmods = "mcspi3"; + reg = <0x480b8000 0x100>; + interrupts = <91>; + dmas = <&sdma 15 &sdma 16 &sdma 23 &sdma 24>; + dma-names = "tx0", "rx0", "tx1", "rx1"; + }; + + usb_otg_hs: usb_otg_hs@480ac000 { + compatible = "ti,omap2-musb"; + ti,hwmods = "usb_otg_hs"; + reg = <0x480ac000 0x1000>; + interrupts = <93>; + }; + + wd_timer2: wdt@49016000 { + compatible = "ti,omap2-wdt"; + ti,hwmods = "wd_timer2"; + reg = <0x49016000 0x80>; + }; }; }; + +&i2c1 { + compatible = "ti,omap2430-i2c"; +}; + +&i2c2 { + compatible = "ti,omap2430-i2c"; +}; diff --git a/arch/arm/boot/dts/omap3-beagle-xm.dts b/arch/arm/boot/dts/omap3-beagle-xm.dts index 31a632f..df33a50 100644 --- a/arch/arm/boot/dts/omap3-beagle-xm.dts +++ b/arch/arm/boot/dts/omap3-beagle-xm.dts @@ -215,3 +215,10 @@ &usbhsehci { phys = <0 &hsusb2_phy>; }; + +&vaux2 { + regulator-name = "usb_1v8"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + regulator-always-on; +}; diff --git a/arch/arm/boot/dts/omap3-beagle.dts b/arch/arm/boot/dts/omap3-beagle.dts index fa532aa..3ba4a62 100644 --- a/arch/arm/boot/dts/omap3-beagle.dts +++ b/arch/arm/boot/dts/omap3-beagle.dts @@ -61,6 +61,14 @@ vcc-supply = <&hsusb2_power>; }; + sound { + compatible = "ti,omap-twl4030"; + ti,model = "omap3beagle"; + + ti,mcbsp = <&mcbsp2>; + ti,codec = <&twl_audio>; + }; + gpio_keys { compatible = "gpio-keys"; @@ -120,6 +128,12 @@ reg = <0x48>; interrupts = <7>; /* SYS_NIRQ cascaded to intc */ interrupt-parent = <&intc>; + + twl_audio: audio { + compatible = "ti,twl4030-audio"; + codec { + }; + }; }; }; @@ -178,3 +192,10 @@ mode = <3>; power = <50>; }; + +&vaux2 { + regulator-name = "vdd_ehci"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + regulator-always-on; +}; diff --git a/arch/arm/boot/dts/omap3-igep.dtsi b/arch/arm/boot/dts/omap3-igep.dtsi index ba1e58b..165aaf7 100644 --- a/arch/arm/boot/dts/omap3-igep.dtsi +++ b/arch/arm/boot/dts/omap3-igep.dtsi @@ -1,5 +1,5 @@ /* - * Device Tree Source for IGEP Technology devices + * Common device tree for IGEP boards based on AM/DM37x * * Copyright (C) 2012 Javier Martinez Canillas <javier@collabora.co.uk> * Copyright (C) 2012 Enric Balletbo i Serra <eballetbo@gmail.com> @@ -10,7 +10,7 @@ */ /dts-v1/; -#include "omap34xx.dtsi" +#include "omap36xx.dtsi" / { memory { @@ -24,6 +24,25 @@ ti,mcbsp = <&mcbsp2>; ti,codec = <&twl_audio>; }; + + vdd33: regulator-vdd33 { + compatible = "regulator-fixed"; + regulator-name = "vdd33"; + regulator-always-on; + }; + + lbee1usjyc_vmmc: lbee1usjyc_vmmc { + pinctrl-names = "default"; + pinctrl-0 = <&lbee1usjyc_pins>; + compatible = "regulator-fixed"; + regulator-name = "regulator-lbee1usjyc"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + gpio = <&gpio5 10 GPIO_ACTIVE_HIGH>; /* gpio_138 WIFI_PDN */ + startup-delay-us = <10000>; + enable-active-high; + vin-supply = <&vdd33>; + }; }; &omap3_pmx_core { @@ -48,6 +67,15 @@ >; }; + /* WiFi/BT combo */ + lbee1usjyc_pins: pinmux_lbee1usjyc_pins { + pinctrl-single,pins = < + 0x136 (PIN_OUTPUT | MUX_MODE4) /* sdmmc2_dat5.gpio_137 */ + 0x138 (PIN_OUTPUT | MUX_MODE4) /* sdmmc2_dat6.gpio_138 */ + 0x13a (PIN_OUTPUT | MUX_MODE4) /* sdmmc2_dat7.gpio_139 */ + >; + }; + mcbsp2_pins: pinmux_mcbsp2_pins { pinctrl-single,pins = < 0x10c (PIN_INPUT | MUX_MODE0) /* mcbsp2_fsx.mcbsp2_fsx */ @@ -65,10 +93,17 @@ 0x11a (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc1_dat1.sdmmc1_dat1 */ 0x11c (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc1_dat2.sdmmc1_dat2 */ 0x11e (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc1_dat3.sdmmc1_dat3 */ - 0x120 (PIN_INPUT | MUX_MODE0) /* sdmmc1_dat4.sdmmc1_dat4 */ - 0x122 (PIN_INPUT | MUX_MODE0) /* sdmmc1_dat5.sdmmc1_dat5 */ - 0x124 (PIN_INPUT | MUX_MODE0) /* sdmmc1_dat6.sdmmc1_dat6 */ - 0x126 (PIN_INPUT | MUX_MODE0) /* sdmmc1_dat7.sdmmc1_dat7 */ + >; + }; + + mmc2_pins: pinmux_mmc2_pins { + pinctrl-single,pins = < + 0x128 (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_clk.sdmmc2_clk */ + 0x12a (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_cmd.sdmmc2_cmd */ + 0x12c (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_dat0.sdmmc2_dat0 */ + 0x12e (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_dat1.sdmmc2_dat1 */ + 0x130 (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_dat2.sdmmc2_dat2 */ + 0x132 (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_dat3.sdmmc2_dat3 */ >; }; @@ -78,10 +113,33 @@ >; }; + i2c1_pins: pinmux_i2c1_pins { + pinctrl-single,pins = < + 0x18a (PIN_INPUT | MUX_MODE0) /* i2c1_scl.i2c1_scl */ + 0x18c (PIN_INPUT | MUX_MODE0) /* i2c1_sda.i2c1_sda */ + >; + }; + + i2c2_pins: pinmux_i2c2_pins { + pinctrl-single,pins = < + 0x18e (PIN_INPUT | MUX_MODE0) /* i2c2_scl.i2c2_scl */ + 0x190 (PIN_INPUT | MUX_MODE0) /* i2c2_sda.i2c2_sda */ + >; + }; + + i2c3_pins: pinmux_i2c3_pins { + pinctrl-single,pins = < + 0x192 (PIN_INPUT | MUX_MODE0) /* i2c3_scl.i2c3_scl */ + 0x194 (PIN_INPUT | MUX_MODE0) /* i2c3_sda.i2c3_sda */ + >; + }; + leds_pins: pinmux_leds_pins { }; }; &i2c1 { + pinctrl-names = "default"; + pinctrl-0 = <&i2c1_pins>; clock-frequency = <2600000>; twl: twl@48 { @@ -101,9 +159,16 @@ #include "twl4030_omap3.dtsi" &i2c2 { + pinctrl-names = "default"; + pinctrl-0 = <&i2c2_pins>; clock-frequency = <400000>; }; +&i2c3 { + pinctrl-names = "default"; + pinctrl-0 = <&i2c3_pins>; +}; + &mcbsp2 { pinctrl-names = "default"; pinctrl-0 = <&mcbsp2_pins>; @@ -114,11 +179,15 @@ pinctrl-0 = <&mmc1_pins>; vmmc-supply = <&vmmc1>; vmmc_aux-supply = <&vsim>; - bus-width = <8>; + bus-width = <4>; }; &mmc2 { - status = "disabled"; + pinctrl-names = "default"; + pinctrl-0 = <&mmc2_pins>; + vmmc-supply = <&lbee1usjyc_vmmc>; + bus-width = <4>; + non-removable; }; &mmc3 { diff --git a/arch/arm/boot/dts/omap3-igep0020.dts b/arch/arm/boot/dts/omap3-igep0020.dts index d5cc792..1c7e74d 100644 --- a/arch/arm/boot/dts/omap3-igep0020.dts +++ b/arch/arm/boot/dts/omap3-igep0020.dts @@ -1,5 +1,5 @@ /* - * Device Tree Source for IGEPv2 board + * Device Tree Source for IGEPv2 Rev. (TI OMAP AM/DM37x) * * Copyright (C) 2012 Javier Martinez Canillas <javier@collabora.co.uk> * Copyright (C) 2012 Enric Balletbo i Serra <eballetbo@gmail.com> @@ -13,7 +13,7 @@ #include "omap-gpmc-smsc911x.dtsi" / { - model = "IGEPv2"; + model = "IGEPv2 (TI OMAP AM/DM37x)"; compatible = "isee,omap3-igep0020", "ti,omap3"; leds { @@ -67,6 +67,8 @@ pinctrl-names = "default"; pinctrl-0 = < &hsusbb1_pins + &tfp410_pins + &dss_pins >; hsusbb1_pins: pinmux_hsusbb1_pins { @@ -85,6 +87,45 @@ 0x5ba (PIN_INPUT_PULLDOWN | MUX_MODE3) /* etk_d7.hsusb1_data3 */ >; }; + + tfp410_pins: tfp410_dvi_pins { + pinctrl-single,pins = < + 0x196 (PIN_OUTPUT | MUX_MODE4) /* hdq_sio.gpio_170 */ + >; + }; + + dss_pins: pinmux_dss_dvi_pins { + pinctrl-single,pins = < + 0x0a4 (PIN_OUTPUT | MUX_MODE0) /* dss_pclk.dss_pclk */ + 0x0a6 (PIN_OUTPUT | MUX_MODE0) /* dss_hsync.dss_hsync */ + 0x0a8 (PIN_OUTPUT | MUX_MODE0) /* dss_vsync.dss_vsync */ + 0x0aa (PIN_OUTPUT | MUX_MODE0) /* dss_acbias.dss_acbias */ + 0x0ac (PIN_OUTPUT | MUX_MODE0) /* dss_data0.dss_data0 */ + 0x0ae (PIN_OUTPUT | MUX_MODE0) /* dss_data1.dss_data1 */ + 0x0b0 (PIN_OUTPUT | MUX_MODE0) /* dss_data2.dss_data2 */ + 0x0b2 (PIN_OUTPUT | MUX_MODE0) /* dss_data3.dss_data3 */ + 0x0b4 (PIN_OUTPUT | MUX_MODE0) /* dss_data4.dss_data4 */ + 0x0b6 (PIN_OUTPUT | MUX_MODE0) /* dss_data5.dss_data5 */ + 0x0b8 (PIN_OUTPUT | MUX_MODE0) /* dss_data6.dss_data6 */ + 0x0ba (PIN_OUTPUT | MUX_MODE0) /* dss_data7.dss_data7 */ + 0x0bc (PIN_OUTPUT | MUX_MODE0) /* dss_data8.dss_data8 */ + 0x0be (PIN_OUTPUT | MUX_MODE0) /* dss_data9.dss_data9 */ + 0x0c0 (PIN_OUTPUT | MUX_MODE0) /* dss_data10.dss_data10 */ + 0x0c2 (PIN_OUTPUT | MUX_MODE0) /* dss_data11.dss_data11 */ + 0x0c4 (PIN_OUTPUT | MUX_MODE0) /* dss_data12.dss_data12 */ + 0x0c6 (PIN_OUTPUT | MUX_MODE0) /* dss_data13.dss_data13 */ + 0x0c8 (PIN_OUTPUT | MUX_MODE0) /* dss_data14.dss_data14 */ + 0x0ca (PIN_OUTPUT | MUX_MODE0) /* dss_data15.dss_data15 */ + 0x0cc (PIN_OUTPUT | MUX_MODE0) /* dss_data16.dss_data16 */ + 0x0ce (PIN_OUTPUT | MUX_MODE0) /* dss_data17.dss_data17 */ + 0x0d0 (PIN_OUTPUT | MUX_MODE0) /* dss_data18.dss_data18 */ + 0x0d2 (PIN_OUTPUT | MUX_MODE0) /* dss_data19.dss_data19 */ + 0x0d4 (PIN_OUTPUT | MUX_MODE0) /* dss_data20.dss_data20 */ + 0x0d6 (PIN_OUTPUT | MUX_MODE0) /* dss_data21.dss_data21 */ + 0x0d8 (PIN_OUTPUT | MUX_MODE0) /* dss_data22.dss_data22 */ + 0x0da (PIN_OUTPUT | MUX_MODE0) /* dss_data23.dss_data23 */ + >; + }; }; &leds_pins { @@ -174,3 +215,8 @@ &usbhsehci { phys = <&hsusb1_phy>; }; + +&vpll2 { + /* Needed for DSS */ + regulator-name = "vdds_dsi"; +}; diff --git a/arch/arm/boot/dts/omap3-igep0030.dts b/arch/arm/boot/dts/omap3-igep0030.dts index 525e6d9..02a23f8 100644 --- a/arch/arm/boot/dts/omap3-igep0030.dts +++ b/arch/arm/boot/dts/omap3-igep0030.dts @@ -1,5 +1,5 @@ /* - * Device Tree Source for IGEP COM Module + * Device Tree Source for IGEP COM MODULE (TI OMAP AM/DM37x) * * Copyright (C) 2012 Javier Martinez Canillas <javier@collabora.co.uk> * Copyright (C) 2012 Enric Balletbo i Serra <eballetbo@gmail.com> @@ -12,7 +12,7 @@ #include "omap3-igep.dtsi" / { - model = "IGEP COM Module"; + model = "IGEP COM MODULE (TI OMAP AM/DM37x)"; compatible = "isee,omap3-igep0030", "ti,omap3"; leds { diff --git a/arch/arm/boot/dts/omap3-n900.dts b/arch/arm/boot/dts/omap3-n900.dts index c4f20bf..6fc85f9 100644 --- a/arch/arm/boot/dts/omap3-n900.dts +++ b/arch/arm/boot/dts/omap3-n900.dts @@ -9,7 +9,7 @@ /dts-v1/; -#include "omap34xx.dtsi" +#include "omap34xx-hs.dtsi" / { model = "Nokia N900"; @@ -125,6 +125,21 @@ >; }; + mmc2_pins: pinmux_mmc2_pins { + pinctrl-single,pins = < + 0x128 (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_clk */ + 0x12a (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_cmd */ + 0x12c (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_dat0 */ + 0x12e (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_dat1 */ + 0x130 (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_dat2 */ + 0x132 (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_dat3 */ + 0x134 (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_dat4 */ + 0x136 (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_dat5 */ + 0x138 (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_dat6 */ + 0x13a (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_dat7 */ + >; + }; + display_pins: pinmux_display_pins { pinctrl-single,pins = < 0x0d4 (PIN_OUTPUT | MUX_MODE4) /* RX51_LCD_RESET_GPIO */ @@ -358,8 +373,14 @@ cd-gpios = <&gpio6 0 GPIO_ACTIVE_HIGH>; /* 160 */ }; +/* most boards use vaux3, only some old versions use vmmc2 instead */ &mmc2 { - status = "disabled"; + pinctrl-names = "default"; + pinctrl-0 = <&mmc2_pins>; + vmmc-supply = <&vaux3>; + vmmc_aux-supply = <&vsim>; + bus-width = <8>; + non-removable; }; &mmc3 { diff --git a/arch/arm/boot/dts/omap3-n950-n9.dtsi b/arch/arm/boot/dts/omap3-n950-n9.dtsi index 94eb77d..5c26c18 100644 --- a/arch/arm/boot/dts/omap3-n950-n9.dtsi +++ b/arch/arm/boot/dts/omap3-n950-n9.dtsi @@ -8,7 +8,7 @@ * published by the Free Software Foundation. */ -#include "omap36xx.dtsi" +#include "omap36xx-hs.dtsi" / { cpus { diff --git a/arch/arm/boot/dts/omap3.dtsi b/arch/arm/boot/dts/omap3.dtsi index f3a0c26..daabf99 100644 --- a/arch/arm/boot/dts/omap3.dtsi +++ b/arch/arm/boot/dts/omap3.dtsi @@ -82,6 +82,13 @@ ranges; ti,hwmods = "l3_main"; + aes: aes@480c5000 { + compatible = "ti,omap3-aes"; + ti,hwmods = "aes"; + reg = <0x480c5000 0x50>; + interrupts = <0>; + }; + counter32k: counter@48320000 { compatible = "ti,omap-counter32k"; reg = <0x48320000 0x20>; @@ -260,6 +267,13 @@ ti,hwmods = "i2c3"; }; + mailbox: mailbox@48094000 { + compatible = "ti,omap3-mailbox"; + ti,hwmods = "mailbox"; + reg = <0x48094000 0x200>; + interrupts = <26>; + }; + mcspi1: spi@48098000 { compatible = "ti,omap2-mcspi"; reg = <0x48098000 0x100>; @@ -357,6 +371,13 @@ dma-names = "tx", "rx"; }; + mmu_isp: mmu@480bd400 { + compatible = "ti,omap3-mmu-isp"; + ti,hwmods = "mmu_isp"; + reg = <0x480bd400 0x80>; + interrupts = <8>; + }; + wdt2: wdt@48314000 { compatible = "ti,omap3-wdt"; reg = <0x48314000 0x80>; @@ -442,6 +463,27 @@ dma-names = "tx", "rx"; }; + sham: sham@480c3000 { + compatible = "ti,omap3-sham"; + ti,hwmods = "sham"; + reg = <0x480c3000 0x64>; + interrupts = <49>; + }; + + smartreflex_core: smartreflex@480cb000 { + compatible = "ti,omap3-smartreflex-core"; + ti,hwmods = "smartreflex_core"; + reg = <0x480cb000 0x400>; + interrupts = <19>; + }; + + smartreflex_mpu_iva: smartreflex@480c9000 { + compatible = "ti,omap3-smartreflex-iva"; + ti,hwmods = "smartreflex_mpu_iva"; + reg = <0x480c9000 0x400>; + interrupts = <18>; + }; + timer1: timer@48318000 { compatible = "ti,omap3430-timer"; reg = <0x48318000 0x400>; diff --git a/arch/arm/boot/dts/omap34xx-hs.dtsi b/arch/arm/boot/dts/omap34xx-hs.dtsi new file mode 100644 index 0000000..1ff6264 --- /dev/null +++ b/arch/arm/boot/dts/omap34xx-hs.dtsi @@ -0,0 +1,16 @@ +/* Disabled modules for secure omaps */ + +#include "omap34xx.dtsi" + +/* Secure omaps have some devices inaccessible depending on the firmware */ +&aes { + status = "disabled"; +}; + +&sham { + status = "disabled"; +}; + +&timer12 { + status = "disabled"; +}; diff --git a/arch/arm/boot/dts/omap36xx-hs.dtsi b/arch/arm/boot/dts/omap36xx-hs.dtsi new file mode 100644 index 0000000..2c7febb --- /dev/null +++ b/arch/arm/boot/dts/omap36xx-hs.dtsi @@ -0,0 +1,16 @@ +/* Disabled modules for secure omaps */ + +#include "omap36xx.dtsi" + +/* Secure omaps have some devices inaccessible depending on the firmware */ +&aes { + status = "disabled"; +}; + +&sham { + status = "disabled"; +}; + +&timer12 { + status = "disabled"; +}; diff --git a/arch/arm/boot/dts/omap4-panda-common.dtsi b/arch/arm/boot/dts/omap4-panda-common.dtsi index 298e850..88c6a05 100644 --- a/arch/arm/boot/dts/omap4-panda-common.dtsi +++ b/arch/arm/boot/dts/omap4-panda-common.dtsi @@ -246,15 +246,6 @@ 0xf0 (PIN_INPUT_PULLUP | MUX_MODE0) /* i2c4_sda */ >; }; -}; - -&omap4_pmx_wkup { - led_wkgpio_pins: pinmux_leds_wkpins { - pinctrl-single,pins = < - 0x1a (PIN_OUTPUT | MUX_MODE3) /* gpio_wk7 */ - 0x1c (PIN_OUTPUT | MUX_MODE3) /* gpio_wk8 */ - >; - }; /* * wl12xx GPIO outputs for WLAN_EN, BT_EN, FM_EN, BT_WAKEUP @@ -274,7 +265,7 @@ pinctrl-single,pins = < 0x38 (PIN_INPUT | MUX_MODE3) /* gpmc_ncs2.gpio_52 */ 0x3a (PIN_INPUT | MUX_MODE3) /* gpmc_ncs3.gpio_53 */ - 0x108 (PIN_OUTPUT | MUX_MODE0) /* sdmmc5_clk.sdmmc5_clk */ + 0x108 (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc5_clk.sdmmc5_clk */ 0x10a (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc5_cmd.sdmmc5_cmd */ 0x10c (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc5_dat0.sdmmc5_dat0 */ 0x10e (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc5_dat1.sdmmc5_dat1 */ @@ -284,6 +275,15 @@ }; }; +&omap4_pmx_wkup { + led_wkgpio_pins: pinmux_leds_wkpins { + pinctrl-single,pins = < + 0x1a (PIN_OUTPUT | MUX_MODE3) /* gpio_wk7 */ + 0x1c (PIN_OUTPUT | MUX_MODE3) /* gpio_wk8 */ + >; + }; +}; + &i2c1 { pinctrl-names = "default"; pinctrl-0 = <&i2c1_pins>; diff --git a/arch/arm/boot/dts/omap4-sdp.dts b/arch/arm/boot/dts/omap4-sdp.dts index 5fc3f43..dbc81fb 100644 --- a/arch/arm/boot/dts/omap4-sdp.dts +++ b/arch/arm/boot/dts/omap4-sdp.dts @@ -300,12 +300,12 @@ wl12xx_pins: pinmux_wl12xx_pins { pinctrl-single,pins = < 0x3a (PIN_INPUT | MUX_MODE3) /* gpmc_ncs3.gpio_53 */ - 0x108 (PIN_OUTPUT | MUX_MODE3) /* sdmmc5_clk.sdmmc5_clk */ - 0x10a (PIN_INPUT_PULLUP | MUX_MODE3) /* sdmmc5_cmd.sdmmc5_cmd */ - 0x10c (PIN_INPUT_PULLUP | MUX_MODE3) /* sdmmc5_dat0.sdmmc5_dat0 */ - 0x10e (PIN_INPUT_PULLUP | MUX_MODE3) /* sdmmc5_dat1.sdmmc5_dat1 */ - 0x110 (PIN_INPUT_PULLUP | MUX_MODE3) /* sdmmc5_dat2.sdmmc5_dat2 */ - 0x112 (PIN_INPUT_PULLUP | MUX_MODE3) /* sdmmc5_dat3.sdmmc5_dat3 */ + 0x108 (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc5_clk.sdmmc5_clk */ + 0x10a (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc5_cmd.sdmmc5_cmd */ + 0x10c (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc5_dat0.sdmmc5_dat0 */ + 0x10e (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc5_dat1.sdmmc5_dat1 */ + 0x110 (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc5_dat2.sdmmc5_dat2 */ + 0x112 (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc5_dat3.sdmmc5_dat3 */ >; }; }; diff --git a/arch/arm/boot/dts/r8a7790.dtsi b/arch/arm/boot/dts/r8a7790.dtsi index ee845fa..9987dd0e 100644 --- a/arch/arm/boot/dts/r8a7790.dtsi +++ b/arch/arm/boot/dts/r8a7790.dtsi @@ -87,9 +87,9 @@ interrupts = <1 9 0xf04>; }; - gpio0: gpio@ffc40000 { + gpio0: gpio@e6050000 { compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar"; - reg = <0 0xffc40000 0 0x2c>; + reg = <0 0xe6050000 0 0x50>; interrupt-parent = <&gic>; interrupts = <0 4 0x4>; #gpio-cells = <2>; @@ -99,9 +99,9 @@ interrupt-controller; }; - gpio1: gpio@ffc41000 { + gpio1: gpio@e6051000 { compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar"; - reg = <0 0xffc41000 0 0x2c>; + reg = <0 0xe6051000 0 0x50>; interrupt-parent = <&gic>; interrupts = <0 5 0x4>; #gpio-cells = <2>; @@ -111,9 +111,9 @@ interrupt-controller; }; - gpio2: gpio@ffc42000 { + gpio2: gpio@e6052000 { compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar"; - reg = <0 0xffc42000 0 0x2c>; + reg = <0 0xe6052000 0 0x50>; interrupt-parent = <&gic>; interrupts = <0 6 0x4>; #gpio-cells = <2>; @@ -123,9 +123,9 @@ interrupt-controller; }; - gpio3: gpio@ffc43000 { + gpio3: gpio@e6053000 { compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar"; - reg = <0 0xffc43000 0 0x2c>; + reg = <0 0xe6053000 0 0x50>; interrupt-parent = <&gic>; interrupts = <0 7 0x4>; #gpio-cells = <2>; @@ -135,9 +135,9 @@ interrupt-controller; }; - gpio4: gpio@ffc44000 { + gpio4: gpio@e6054000 { compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar"; - reg = <0 0xffc44000 0 0x2c>; + reg = <0 0xe6054000 0 0x50>; interrupt-parent = <&gic>; interrupts = <0 8 0x4>; #gpio-cells = <2>; @@ -147,9 +147,9 @@ interrupt-controller; }; - gpio5: gpio@ffc45000 { + gpio5: gpio@e6055000 { compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar"; - reg = <0 0xffc45000 0 0x2c>; + reg = <0 0xe6055000 0 0x50>; interrupt-parent = <&gic>; interrupts = <0 9 0x4>; #gpio-cells = <2>; @@ -241,7 +241,7 @@ sdhi0: sdhi@ee100000 { compatible = "renesas,sdhi-r8a7790"; - reg = <0 0xee100000 0 0x100>; + reg = <0 0xee100000 0 0x200>; interrupt-parent = <&gic>; interrupts = <0 165 4>; cap-sd-highspeed; @@ -250,7 +250,7 @@ sdhi1: sdhi@ee120000 { compatible = "renesas,sdhi-r8a7790"; - reg = <0 0xee120000 0 0x100>; + reg = <0 0xee120000 0 0x200>; interrupt-parent = <&gic>; interrupts = <0 166 4>; cap-sd-highspeed; diff --git a/arch/arm/boot/dts/socfpga.dtsi b/arch/arm/boot/dts/socfpga.dtsi index 6d09b8d..f936476 100644 --- a/arch/arm/boot/dts/socfpga.dtsi +++ b/arch/arm/boot/dts/socfpga.dtsi @@ -245,14 +245,14 @@ mpu_periph_clk: mpu_periph_clk { #clock-cells = <0>; - compatible = "altr,socfpga-gate-clk"; + compatible = "altr,socfpga-perip-clk"; clocks = <&mpuclk>; fixed-divider = <4>; }; mpu_l2_ram_clk: mpu_l2_ram_clk { #clock-cells = <0>; - compatible = "altr,socfpga-gate-clk"; + compatible = "altr,socfpga-perip-clk"; clocks = <&mpuclk>; fixed-divider = <2>; }; @@ -266,8 +266,9 @@ l3_main_clk: l3_main_clk { #clock-cells = <0>; - compatible = "altr,socfpga-gate-clk"; + compatible = "altr,socfpga-perip-clk"; clocks = <&mainclk>; + fixed-divider = <1>; }; l3_mp_clk: l3_mp_clk { diff --git a/arch/arm/boot/dts/sun5i-a10s.dtsi b/arch/arm/boot/dts/sun5i-a10s.dtsi index 5247674..e674c94 100644 --- a/arch/arm/boot/dts/sun5i-a10s.dtsi +++ b/arch/arm/boot/dts/sun5i-a10s.dtsi @@ -332,5 +332,12 @@ clock-frequency = <100000>; status = "disabled"; }; + + timer@01c60000 { + compatible = "allwinner,sun5i-a13-hstimer"; + reg = <0x01c60000 0x1000>; + interrupts = <82>, <83>; + clocks = <&ahb_gates 28>; + }; }; }; diff --git a/arch/arm/boot/dts/sun5i-a13.dtsi b/arch/arm/boot/dts/sun5i-a13.dtsi index ce8ef2a..1ccd75d 100644 --- a/arch/arm/boot/dts/sun5i-a13.dtsi +++ b/arch/arm/boot/dts/sun5i-a13.dtsi @@ -273,5 +273,12 @@ clock-frequency = <100000>; status = "disabled"; }; + + timer@01c60000 { + compatible = "allwinner,sun5i-a13-hstimer"; + reg = <0x01c60000 0x1000>; + interrupts = <82>, <83>; + clocks = <&ahb_gates 28>; + }; }; }; diff --git a/arch/arm/boot/dts/sun6i-a31.dtsi b/arch/arm/boot/dts/sun6i-a31.dtsi index c1751a6..7f5878c 100644 --- a/arch/arm/boot/dts/sun6i-a31.dtsi +++ b/arch/arm/boot/dts/sun6i-a31.dtsi @@ -193,7 +193,10 @@ pio: pinctrl@01c20800 { compatible = "allwinner,sun6i-a31-pinctrl"; reg = <0x01c20800 0x400>; - interrupts = <0 11 1>, <0 15 1>, <0 16 1>, <0 17 1>; + interrupts = <0 11 4>, + <0 15 4>, + <0 16 4>, + <0 17 4>; clocks = <&apb1_gates 5>; gpio-controller; interrupt-controller; @@ -212,11 +215,11 @@ timer@01c20c00 { compatible = "allwinner,sun4i-timer"; reg = <0x01c20c00 0xa0>; - interrupts = <0 18 1>, - <0 19 1>, - <0 20 1>, - <0 21 1>, - <0 22 1>; + interrupts = <0 18 4>, + <0 19 4>, + <0 20 4>, + <0 21 4>, + <0 22 4>; clocks = <&osc24M>; }; @@ -228,7 +231,7 @@ uart0: serial@01c28000 { compatible = "snps,dw-apb-uart"; reg = <0x01c28000 0x400>; - interrupts = <0 0 1>; + interrupts = <0 0 4>; reg-shift = <2>; reg-io-width = <4>; clocks = <&apb2_gates 16>; @@ -238,7 +241,7 @@ uart1: serial@01c28400 { compatible = "snps,dw-apb-uart"; reg = <0x01c28400 0x400>; - interrupts = <0 1 1>; + interrupts = <0 1 4>; reg-shift = <2>; reg-io-width = <4>; clocks = <&apb2_gates 17>; @@ -248,7 +251,7 @@ uart2: serial@01c28800 { compatible = "snps,dw-apb-uart"; reg = <0x01c28800 0x400>; - interrupts = <0 2 1>; + interrupts = <0 2 4>; reg-shift = <2>; reg-io-width = <4>; clocks = <&apb2_gates 18>; @@ -258,7 +261,7 @@ uart3: serial@01c28c00 { compatible = "snps,dw-apb-uart"; reg = <0x01c28c00 0x400>; - interrupts = <0 3 1>; + interrupts = <0 3 4>; reg-shift = <2>; reg-io-width = <4>; clocks = <&apb2_gates 19>; @@ -268,7 +271,7 @@ uart4: serial@01c29000 { compatible = "snps,dw-apb-uart"; reg = <0x01c29000 0x400>; - interrupts = <0 4 1>; + interrupts = <0 4 4>; reg-shift = <2>; reg-io-width = <4>; clocks = <&apb2_gates 20>; @@ -278,7 +281,7 @@ uart5: serial@01c29400 { compatible = "snps,dw-apb-uart"; reg = <0x01c29400 0x400>; - interrupts = <0 5 1>; + interrupts = <0 5 4>; reg-shift = <2>; reg-io-width = <4>; clocks = <&apb2_gates 21>; diff --git a/arch/arm/boot/dts/sun7i-a20.dtsi b/arch/arm/boot/dts/sun7i-a20.dtsi index e46cfed..0135039 100644 --- a/arch/arm/boot/dts/sun7i-a20.dtsi +++ b/arch/arm/boot/dts/sun7i-a20.dtsi @@ -170,7 +170,7 @@ emac: ethernet@01c0b000 { compatible = "allwinner,sun4i-emac"; reg = <0x01c0b000 0x1000>; - interrupts = <0 55 1>; + interrupts = <0 55 4>; clocks = <&ahb_gates 17>; status = "disabled"; }; @@ -186,7 +186,7 @@ pio: pinctrl@01c20800 { compatible = "allwinner,sun7i-a20-pinctrl"; reg = <0x01c20800 0x400>; - interrupts = <0 28 1>; + interrupts = <0 28 4>; clocks = <&apb0_gates 5>; gpio-controller; interrupt-controller; @@ -251,12 +251,12 @@ timer@01c20c00 { compatible = "allwinner,sun4i-timer"; reg = <0x01c20c00 0x90>; - interrupts = <0 22 1>, - <0 23 1>, - <0 24 1>, - <0 25 1>, - <0 67 1>, - <0 68 1>; + interrupts = <0 22 4>, + <0 23 4>, + <0 24 4>, + <0 25 4>, + <0 67 4>, + <0 68 4>; clocks = <&osc24M>; }; @@ -273,7 +273,7 @@ uart0: serial@01c28000 { compatible = "snps,dw-apb-uart"; reg = <0x01c28000 0x400>; - interrupts = <0 1 1>; + interrupts = <0 1 4>; reg-shift = <2>; reg-io-width = <4>; clocks = <&apb1_gates 16>; @@ -283,7 +283,7 @@ uart1: serial@01c28400 { compatible = "snps,dw-apb-uart"; reg = <0x01c28400 0x400>; - interrupts = <0 2 1>; + interrupts = <0 2 4>; reg-shift = <2>; reg-io-width = <4>; clocks = <&apb1_gates 17>; @@ -293,7 +293,7 @@ uart2: serial@01c28800 { compatible = "snps,dw-apb-uart"; reg = <0x01c28800 0x400>; - interrupts = <0 3 1>; + interrupts = <0 3 4>; reg-shift = <2>; reg-io-width = <4>; clocks = <&apb1_gates 18>; @@ -303,7 +303,7 @@ uart3: serial@01c28c00 { compatible = "snps,dw-apb-uart"; reg = <0x01c28c00 0x400>; - interrupts = <0 4 1>; + interrupts = <0 4 4>; reg-shift = <2>; reg-io-width = <4>; clocks = <&apb1_gates 19>; @@ -313,7 +313,7 @@ uart4: serial@01c29000 { compatible = "snps,dw-apb-uart"; reg = <0x01c29000 0x400>; - interrupts = <0 17 1>; + interrupts = <0 17 4>; reg-shift = <2>; reg-io-width = <4>; clocks = <&apb1_gates 20>; @@ -323,7 +323,7 @@ uart5: serial@01c29400 { compatible = "snps,dw-apb-uart"; reg = <0x01c29400 0x400>; - interrupts = <0 18 1>; + interrupts = <0 18 4>; reg-shift = <2>; reg-io-width = <4>; clocks = <&apb1_gates 21>; @@ -333,7 +333,7 @@ uart6: serial@01c29800 { compatible = "snps,dw-apb-uart"; reg = <0x01c29800 0x400>; - interrupts = <0 19 1>; + interrupts = <0 19 4>; reg-shift = <2>; reg-io-width = <4>; clocks = <&apb1_gates 22>; @@ -343,7 +343,7 @@ uart7: serial@01c29c00 { compatible = "snps,dw-apb-uart"; reg = <0x01c29c00 0x400>; - interrupts = <0 20 1>; + interrupts = <0 20 4>; reg-shift = <2>; reg-io-width = <4>; clocks = <&apb1_gates 23>; @@ -353,7 +353,7 @@ i2c0: i2c@01c2ac00 { compatible = "allwinner,sun4i-i2c"; reg = <0x01c2ac00 0x400>; - interrupts = <0 7 1>; + interrupts = <0 7 4>; clocks = <&apb1_gates 0>; clock-frequency = <100000>; status = "disabled"; @@ -362,7 +362,7 @@ i2c1: i2c@01c2b000 { compatible = "allwinner,sun4i-i2c"; reg = <0x01c2b000 0x400>; - interrupts = <0 8 1>; + interrupts = <0 8 4>; clocks = <&apb1_gates 1>; clock-frequency = <100000>; status = "disabled"; @@ -371,7 +371,7 @@ i2c2: i2c@01c2b400 { compatible = "allwinner,sun4i-i2c"; reg = <0x01c2b400 0x400>; - interrupts = <0 9 1>; + interrupts = <0 9 4>; clocks = <&apb1_gates 2>; clock-frequency = <100000>; status = "disabled"; @@ -380,7 +380,7 @@ i2c3: i2c@01c2b800 { compatible = "allwinner,sun4i-i2c"; reg = <0x01c2b800 0x400>; - interrupts = <0 88 1>; + interrupts = <0 88 4>; clocks = <&apb1_gates 3>; clock-frequency = <100000>; status = "disabled"; @@ -389,12 +389,22 @@ i2c4: i2c@01c2bc00 { compatible = "allwinner,sun4i-i2c"; reg = <0x01c2bc00 0x400>; - interrupts = <0 89 1>; + interrupts = <0 89 4>; clocks = <&apb1_gates 15>; clock-frequency = <100000>; status = "disabled"; }; + hstimer@01c60000 { + compatible = "allwinner,sun7i-a20-hstimer"; + reg = <0x01c60000 0x1000>; + interrupts = <0 81 1>, + <0 82 1>, + <0 83 1>, + <0 84 1>; + clocks = <&ahb_gates 28>; + }; + gic: interrupt-controller@01c81000 { compatible = "arm,cortex-a7-gic", "arm,cortex-a15-gic"; reg = <0x01c81000 0x1000>, diff --git a/arch/arm/boot/dts/tegra20-colibri-512.dtsi b/arch/arm/boot/dts/tegra20-colibri-512.dtsi index d5c9bca..cbe89ff 100644 --- a/arch/arm/boot/dts/tegra20-colibri-512.dtsi +++ b/arch/arm/boot/dts/tegra20-colibri-512.dtsi @@ -268,8 +268,8 @@ reg = <3>; regulator-compatible = "sm2"; regulator-name = "vdd_sm2,vin_ldo*"; - regulator-min-microvolt = <3700000>; - regulator-max-microvolt = <3700000>; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; regulator-always-on; }; diff --git a/arch/arm/configs/bcm_defconfig b/arch/arm/configs/bcm_defconfig index 287ac1d..bede511 100644 --- a/arch/arm/configs/bcm_defconfig +++ b/arch/arm/configs/bcm_defconfig @@ -126,3 +126,4 @@ CONFIG_CRC_ITU_T=y CONFIG_CRC7=y CONFIG_XZ_DEC=y CONFIG_AVERAGE=y +CONFIG_PINCTRL_CAPRI=y diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig index 4a5903e..c1df4e9 100644 --- a/arch/arm/configs/multi_v7_defconfig +++ b/arch/arm/configs/multi_v7_defconfig @@ -69,6 +69,7 @@ CONFIG_KS8851=y CONFIG_SMSC911X=y CONFIG_STMMAC_ETH=y CONFIG_MDIO_SUN4I=y +CONFIG_TI_CPSW=y CONFIG_KEYBOARD_SPEAR=y CONFIG_SERIO_AMBAKMI=y CONFIG_SERIAL_8250=y @@ -133,12 +134,14 @@ CONFIG_USB_GPIO_VBUS=y CONFIG_USB_ISP1301=y CONFIG_USB_MXS_PHY=y CONFIG_MMC=y +CONFIG_MMC_BLOCK_MINORS=16 CONFIG_MMC_ARMMMCI=y CONFIG_MMC_SDHCI=y CONFIG_MMC_SDHCI_PLTFM=y CONFIG_MMC_SDHCI_ESDHC_IMX=y CONFIG_MMC_SDHCI_TEGRA=y CONFIG_MMC_SDHCI_SPEAR=y +CONFIG_MMC_SDHCI_BCM_KONA=y CONFIG_MMC_OMAP=y CONFIG_MMC_OMAP_HS=y CONFIG_EDAC=y diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig index 98a50c3..bfa80a1 100644 --- a/arch/arm/configs/omap2plus_defconfig +++ b/arch/arm/configs/omap2plus_defconfig @@ -173,6 +173,7 @@ CONFIG_MFD_PALMAS=y CONFIG_MFD_TPS65217=y CONFIG_MFD_TPS65910=y CONFIG_TWL6040_CORE=y +CONFIG_REGULATOR_FIXED_VOLTAGE=y CONFIG_REGULATOR_PALMAS=y CONFIG_REGULATOR_TPS65023=y CONFIG_REGULATOR_TPS6507X=y diff --git a/arch/arm/configs/sunxi_defconfig b/arch/arm/configs/sunxi_defconfig index d57a85b..3e2259b 100644 --- a/arch/arm/configs/sunxi_defconfig +++ b/arch/arm/configs/sunxi_defconfig @@ -12,6 +12,9 @@ CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y CONFIG_INET=y +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +CONFIG_IP_PNP_BOOTP=y # CONFIG_INET_XFRM_MODE_TRANSPORT is not set # CONFIG_INET_XFRM_MODE_TUNNEL is not set # CONFIG_INET_XFRM_MODE_BEET is not set @@ -58,4 +61,8 @@ CONFIG_LEDS_TRIGGER_HEARTBEAT=y CONFIG_LEDS_TRIGGER_DEFAULT_ON=y CONFIG_COMMON_CLK_DEBUG=y # CONFIG_IOMMU_SUPPORT is not set +CONFIG_TMPFS=y +CONFIG_NFS_FS=y +CONFIG_ROOT_NFS=y CONFIG_NLS=y +CONFIG_PRINTK_TIME=y diff --git a/arch/arm/configs/u8500_defconfig b/arch/arm/configs/u8500_defconfig index ac632cc..c6ebc18 100644 --- a/arch/arm/configs/u8500_defconfig +++ b/arch/arm/configs/u8500_defconfig @@ -22,6 +22,7 @@ CONFIG_CMDLINE="root=/dev/ram0 console=ttyAMA2,115200n8" CONFIG_CPU_FREQ=y CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y CONFIG_CPU_IDLE=y +CONFIG_ARM_U8500_CPUIDLE=y CONFIG_VFP=y CONFIG_NEON=y CONFIG_PM_RUNTIME=y @@ -109,6 +110,8 @@ CONFIG_EXT2_FS_SECURITY=y CONFIG_EXT3_FS=y CONFIG_EXT4_FS=y CONFIG_VFAT_FS=y +CONFIG_DEVTMPFS=y +CONFIG_DEVTMPFS_MOUNT=y CONFIG_TMPFS=y CONFIG_TMPFS_POSIX_ACL=y # CONFIG_MISC_FILESYSTEMS is not set diff --git a/arch/arm/crypto/aesbs-core.S_shipped b/arch/arm/crypto/aesbs-core.S_shipped index 64205d4..71e5fc7 100644 --- a/arch/arm/crypto/aesbs-core.S_shipped +++ b/arch/arm/crypto/aesbs-core.S_shipped @@ -58,7 +58,7 @@ # define VFP_ABI_FRAME 0 # define BSAES_ASM_EXTENDED_KEY # define XTS_CHAIN_TWEAK -# define __ARM_ARCH__ __LINUX_ARM_ARCH__ +# define __ARM_ARCH__ 7 #endif #ifdef __thumb__ diff --git a/arch/arm/crypto/bsaes-armv7.pl b/arch/arm/crypto/bsaes-armv7.pl index f3d96d9..be068db 100644 --- a/arch/arm/crypto/bsaes-armv7.pl +++ b/arch/arm/crypto/bsaes-armv7.pl @@ -701,7 +701,7 @@ $code.=<<___; # define VFP_ABI_FRAME 0 # define BSAES_ASM_EXTENDED_KEY # define XTS_CHAIN_TWEAK -# define __ARM_ARCH__ __LINUX_ARM_ARCH__ +# define __ARM_ARCH__ 7 #endif #ifdef __thumb__ diff --git a/arch/arm/include/asm/barrier.h b/arch/arm/include/asm/barrier.h index 60f15e2..2f59f74 100644 --- a/arch/arm/include/asm/barrier.h +++ b/arch/arm/include/asm/barrier.h @@ -59,6 +59,21 @@ #define smp_wmb() dmb(ishst) #endif +#define smp_store_release(p, v) \ +do { \ + compiletime_assert_atomic_type(*p); \ + smp_mb(); \ + ACCESS_ONCE(*p) = (v); \ +} while (0) + +#define smp_load_acquire(p) \ +({ \ + typeof(*p) ___p1 = ACCESS_ONCE(*p); \ + compiletime_assert_atomic_type(*p); \ + smp_mb(); \ + ___p1; \ +}) + #define read_barrier_depends() do { } while(0) #define smp_read_barrier_depends() do { } while(0) diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h index 3c597c2..fbeb39c 100644 --- a/arch/arm/include/asm/io.h +++ b/arch/arm/include/asm/io.h @@ -329,7 +329,7 @@ extern void _memset_io(volatile void __iomem *, int, size_t); */ #define ioremap(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE) #define ioremap_nocache(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE) -#define ioremap_cached(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE_CACHED) +#define ioremap_cache(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE_CACHED) #define ioremap_wc(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE_WC) #define iounmap __arm_iounmap diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h index 9ecccc8..8756e4b 100644 --- a/arch/arm/include/asm/memory.h +++ b/arch/arm/include/asm/memory.h @@ -100,23 +100,19 @@ #define TASK_UNMAPPED_BASE UL(0x00000000) #endif -#ifndef PHYS_OFFSET -#define PHYS_OFFSET UL(CONFIG_DRAM_BASE) -#endif - #ifndef END_MEM #define END_MEM (UL(CONFIG_DRAM_BASE) + CONFIG_DRAM_SIZE) #endif #ifndef PAGE_OFFSET -#define PAGE_OFFSET (PHYS_OFFSET) +#define PAGE_OFFSET PLAT_PHYS_OFFSET #endif /* * The module can be at any place in ram in nommu mode. */ #define MODULES_END (END_MEM) -#define MODULES_VADDR (PHYS_OFFSET) +#define MODULES_VADDR PAGE_OFFSET #define XIP_VIRT_ADDR(physaddr) (physaddr) @@ -157,6 +153,16 @@ #endif #define ARCH_PGD_MASK ((1 << ARCH_PGD_SHIFT) - 1) +/* + * PLAT_PHYS_OFFSET is the offset (from zero) of the start of physical + * memory. This is used for XIP and NoMMU kernels, or by kernels which + * have their own mach/memory.h. Assembly code must always use + * PLAT_PHYS_OFFSET and not PHYS_OFFSET. + */ +#ifndef PLAT_PHYS_OFFSET +#define PLAT_PHYS_OFFSET UL(CONFIG_PHYS_OFFSET) +#endif + #ifndef __ASSEMBLY__ /* @@ -239,6 +245,8 @@ static inline unsigned long __phys_to_virt(phys_addr_t x) #else +#define PHYS_OFFSET PLAT_PHYS_OFFSET + static inline phys_addr_t __virt_to_phys(unsigned long x) { return (phys_addr_t)x - PAGE_OFFSET + PHYS_OFFSET; @@ -251,17 +259,6 @@ static inline unsigned long __phys_to_virt(phys_addr_t x) #endif #endif -#endif /* __ASSEMBLY__ */ - -#ifndef PHYS_OFFSET -#ifdef PLAT_PHYS_OFFSET -#define PHYS_OFFSET PLAT_PHYS_OFFSET -#else -#define PHYS_OFFSET UL(CONFIG_PHYS_OFFSET) -#endif -#endif - -#ifndef __ASSEMBLY__ /* * PFNs are used to describe any physical page; this means @@ -350,7 +347,8 @@ static inline __deprecated void *bus_to_virt(unsigned long x) #define ARCH_PFN_OFFSET PHYS_PFN_OFFSET #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) -#define virt_addr_valid(kaddr) ((unsigned long)(kaddr) >= PAGE_OFFSET && (unsigned long)(kaddr) < (unsigned long)high_memory) +#define virt_addr_valid(kaddr) (((unsigned long)(kaddr) >= PAGE_OFFSET && (unsigned long)(kaddr) < (unsigned long)high_memory) \ + && pfn_valid(__pa(kaddr) >> PAGE_SHIFT) ) #endif diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h index be956db..1571d12 100644 --- a/arch/arm/include/asm/pgtable.h +++ b/arch/arm/include/asm/pgtable.h @@ -61,7 +61,7 @@ extern void __pgd_error(const char *file, int line, pgd_t); * mapping to be mapped at. This is particularly important for * non-high vector CPUs. */ -#define FIRST_USER_ADDRESS PAGE_SIZE +#define FIRST_USER_ADDRESS (PAGE_SIZE * 2) /* * Use TASK_SIZE as the ceiling argument for free_pgtables() and diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h index 141baa3..acabef1 100644 --- a/arch/arm/include/asm/unistd.h +++ b/arch/arm/include/asm/unistd.h @@ -15,7 +15,7 @@ #include <uapi/asm/unistd.h> -#define __NR_syscalls (380) +#define __NR_syscalls (384) #define __ARM_NR_cmpxchg (__ARM_NR_BASE+0x00fff0) #define __ARCH_WANT_STAT64 diff --git a/arch/arm/include/asm/xen/page.h b/arch/arm/include/asm/xen/page.h index 75579a9..3759cac 100644 --- a/arch/arm/include/asm/xen/page.h +++ b/arch/arm/include/asm/xen/page.h @@ -117,6 +117,6 @@ static inline bool set_phys_to_machine(unsigned long pfn, unsigned long mfn) return __set_phys_to_machine(pfn, mfn); } -#define xen_remap(cookie, size) ioremap_cached((cookie), (size)); +#define xen_remap(cookie, size) ioremap_cache((cookie), (size)); #endif /* _ASM_ARM_XEN_PAGE_H */ diff --git a/arch/arm/include/uapi/asm/unistd.h b/arch/arm/include/uapi/asm/unistd.h index af33b44..fb5584d 100644 --- a/arch/arm/include/uapi/asm/unistd.h +++ b/arch/arm/include/uapi/asm/unistd.h @@ -406,6 +406,8 @@ #define __NR_process_vm_writev (__NR_SYSCALL_BASE+377) #define __NR_kcmp (__NR_SYSCALL_BASE+378) #define __NR_finit_module (__NR_SYSCALL_BASE+379) +#define __NR_sched_setattr (__NR_SYSCALL_BASE+380) +#define __NR_sched_getattr (__NR_SYSCALL_BASE+381) /* * This may need to be greater than __NR_last_syscall+1 in order to diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S index c6ca7e3..166e945 100644 --- a/arch/arm/kernel/calls.S +++ b/arch/arm/kernel/calls.S @@ -389,6 +389,8 @@ CALL(sys_process_vm_writev) CALL(sys_kcmp) CALL(sys_finit_module) +/* 380 */ CALL(sys_sched_setattr) + CALL(sys_sched_getattr) #ifndef syscalls_counted .equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls #define syscalls_counted diff --git a/arch/arm/kernel/devtree.c b/arch/arm/kernel/devtree.c index 739c3df..34d5fd5 100644 --- a/arch/arm/kernel/devtree.c +++ b/arch/arm/kernel/devtree.c @@ -171,7 +171,7 @@ void __init arm_dt_init_cpu_maps(void) bool arch_match_cpu_phys_id(int cpu, u64 phys_id) { - return (phys_id & MPIDR_HWID_BITMASK) == cpu_logical_map(cpu); + return phys_id == cpu_logical_map(cpu); } static const void * __init arch_get_next_mach(const char *const **match) diff --git a/arch/arm/kernel/head-nommu.S b/arch/arm/kernel/head-nommu.S index 14235ba..716249c 100644 --- a/arch/arm/kernel/head-nommu.S +++ b/arch/arm/kernel/head-nommu.S @@ -68,7 +68,7 @@ ENTRY(stext) #ifdef CONFIG_ARM_MPU /* Calculate the size of a region covering just the kernel */ - ldr r5, =PHYS_OFFSET @ Region start: PHYS_OFFSET + ldr r5, =PLAT_PHYS_OFFSET @ Region start: PHYS_OFFSET ldr r6, =(_end) @ Cover whole kernel sub r6, r6, r5 @ Minimum size of region to map clz r6, r6 @ Region size must be 2^N... @@ -213,7 +213,7 @@ ENTRY(__setup_mpu) set_region_nr r0, #MPU_RAM_REGION isb /* Full access from PL0, PL1, shared for CONFIG_SMP, cacheable */ - ldr r0, =PHYS_OFFSET @ RAM starts at PHYS_OFFSET + ldr r0, =PLAT_PHYS_OFFSET @ RAM starts at PHYS_OFFSET ldr r5,=(MPU_AP_PL1RW_PL0RW | MPU_RGN_NORMAL) setup_region r0, r5, r6, MPU_DATA_SIDE @ PHYS_OFFSET, shared, enabled diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S index 11d59b3..32f317e 100644 --- a/arch/arm/kernel/head.S +++ b/arch/arm/kernel/head.S @@ -110,7 +110,7 @@ ENTRY(stext) sub r4, r3, r4 @ (PHYS_OFFSET - PAGE_OFFSET) add r8, r8, r4 @ PHYS_OFFSET #else - ldr r8, =PHYS_OFFSET @ always constant in this case + ldr r8, =PLAT_PHYS_OFFSET @ always constant in this case #endif /* diff --git a/arch/arm/kernel/machine_kexec.c b/arch/arm/kernel/machine_kexec.c index 57221e3..f0d180d 100644 --- a/arch/arm/kernel/machine_kexec.c +++ b/arch/arm/kernel/machine_kexec.c @@ -14,11 +14,12 @@ #include <asm/pgalloc.h> #include <asm/mmu_context.h> #include <asm/cacheflush.h> +#include <asm/fncpy.h> #include <asm/mach-types.h> #include <asm/smp_plat.h> #include <asm/system_misc.h> -extern const unsigned char relocate_new_kernel[]; +extern void relocate_new_kernel(void); extern const unsigned int relocate_new_kernel_size; extern unsigned long kexec_start_address; @@ -142,6 +143,8 @@ void machine_kexec(struct kimage *image) { unsigned long page_list; unsigned long reboot_code_buffer_phys; + unsigned long reboot_entry = (unsigned long)relocate_new_kernel; + unsigned long reboot_entry_phys; void *reboot_code_buffer; /* @@ -168,16 +171,16 @@ void machine_kexec(struct kimage *image) /* copy our kernel relocation code to the control code page */ - memcpy(reboot_code_buffer, - relocate_new_kernel, relocate_new_kernel_size); + reboot_entry = fncpy(reboot_code_buffer, + reboot_entry, + relocate_new_kernel_size); + reboot_entry_phys = (unsigned long)reboot_entry + + (reboot_code_buffer_phys - (unsigned long)reboot_code_buffer); - - flush_icache_range((unsigned long) reboot_code_buffer, - (unsigned long) reboot_code_buffer + KEXEC_CONTROL_PAGE_SIZE); printk(KERN_INFO "Bye!\n"); if (kexec_reinit) kexec_reinit(); - soft_restart(reboot_code_buffer_phys); + soft_restart(reboot_entry_phys); } diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index bc3f2ef..789d846 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c @@ -99,10 +99,6 @@ int armpmu_event_set_period(struct perf_event *event) s64 period = hwc->sample_period; int ret = 0; - /* The period may have been changed by PERF_EVENT_IOC_PERIOD */ - if (unlikely(period != hwc->last_period)) - left = period - (hwc->last_period - left); - if (unlikely(left <= -period)) { left = period; local64_set(&hwc->period_left, left); diff --git a/arch/arm/kernel/perf_event_cpu.c b/arch/arm/kernel/perf_event_cpu.c index d85055c..20d553c 100644 --- a/arch/arm/kernel/perf_event_cpu.c +++ b/arch/arm/kernel/perf_event_cpu.c @@ -254,7 +254,7 @@ static int probe_current_pmu(struct arm_pmu *pmu) static int cpu_pmu_device_probe(struct platform_device *pdev) { const struct of_device_id *of_id; - int (*init_fn)(struct arm_pmu *); + const int (*init_fn)(struct arm_pmu *); struct device_node *node = pdev->dev.of_node; struct arm_pmu *pmu; int ret = -ENODEV; diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index 94f6b05..92f7b15 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c @@ -404,6 +404,7 @@ EXPORT_SYMBOL(dump_fpu); unsigned long get_wchan(struct task_struct *p) { struct stackframe frame; + unsigned long stack_page; int count = 0; if (!p || p == current || p->state == TASK_RUNNING) return 0; @@ -412,9 +413,11 @@ unsigned long get_wchan(struct task_struct *p) frame.sp = thread_saved_sp(p); frame.lr = 0; /* recovered from the stack */ frame.pc = thread_saved_pc(p); + stack_page = (unsigned long)task_stack_page(p); do { - int ret = unwind_frame(&frame); - if (ret < 0) + if (frame.sp < stack_page || + frame.sp >= stack_page + THREAD_SIZE || + unwind_frame(&frame) < 0) return 0; if (!in_sched_functions(frame.pc)) return frame.pc; diff --git a/arch/arm/kernel/relocate_kernel.S b/arch/arm/kernel/relocate_kernel.S index d0cdedf..9585896 100644 --- a/arch/arm/kernel/relocate_kernel.S +++ b/arch/arm/kernel/relocate_kernel.S @@ -2,10 +2,12 @@ * relocate_kernel.S - put the kernel image in place to boot */ +#include <linux/linkage.h> #include <asm/kexec.h> - .globl relocate_new_kernel -relocate_new_kernel: + .align 3 /* not needed for this code, but keeps fncpy() happy */ + +ENTRY(relocate_new_kernel) ldr r0,kexec_indirection_page ldr r1,kexec_start_address @@ -79,6 +81,8 @@ kexec_mach_type: kexec_boot_atags: .long 0x0 +ENDPROC(relocate_new_kernel) + relocate_new_kernel_end: .globl relocate_new_kernel_size diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index 6a1b8a8..987a7f5 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c @@ -873,8 +873,6 @@ void __init setup_arch(char **cmdline_p) machine_desc = mdesc; machine_name = mdesc->name; - setup_dma_zone(mdesc); - if (mdesc->reboot_mode != REBOOT_HARD) reboot_mode = mdesc->reboot_mode; @@ -892,6 +890,7 @@ void __init setup_arch(char **cmdline_p) sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL); early_paging_init(mdesc, lookup_processor_type(read_cpuid_id())); + setup_dma_zone(mdesc); sanity_check_meminfo(); arm_memblock_init(&meminfo, mdesc); diff --git a/arch/arm/kernel/sigreturn_codes.S b/arch/arm/kernel/sigreturn_codes.S index 3c5d0f2..b84d0cb 100644 --- a/arch/arm/kernel/sigreturn_codes.S +++ b/arch/arm/kernel/sigreturn_codes.S @@ -30,6 +30,27 @@ * snippets. */ +/* + * In CPU_THUMBONLY case kernel arm opcodes are not allowed. + * Note in this case codes skips those instructions but it uses .org + * directive to keep correct layout of sigreturn_codes array. + */ +#ifndef CONFIG_CPU_THUMBONLY +#define ARM_OK(code...) code +#else +#define ARM_OK(code...) +#endif + + .macro arm_slot n + .org sigreturn_codes + 12 * (\n) +ARM_OK( .arm ) + .endm + + .macro thumb_slot n + .org sigreturn_codes + 12 * (\n) + 8 + .thumb + .endm + #if __LINUX_ARM_ARCH__ <= 4 /* * Note we manually set minimally required arch that supports @@ -45,26 +66,27 @@ .global sigreturn_codes .type sigreturn_codes, #object - .arm + .align sigreturn_codes: /* ARM sigreturn syscall code snippet */ - mov r7, #(__NR_sigreturn - __NR_SYSCALL_BASE) - swi #(__NR_sigreturn)|(__NR_OABI_SYSCALL_BASE) + arm_slot 0 +ARM_OK( mov r7, #(__NR_sigreturn - __NR_SYSCALL_BASE) ) +ARM_OK( swi #(__NR_sigreturn)|(__NR_OABI_SYSCALL_BASE) ) /* Thumb sigreturn syscall code snippet */ - .thumb + thumb_slot 0 movs r7, #(__NR_sigreturn - __NR_SYSCALL_BASE) swi #0 /* ARM sigreturn_rt syscall code snippet */ - .arm - mov r7, #(__NR_rt_sigreturn - __NR_SYSCALL_BASE) - swi #(__NR_rt_sigreturn)|(__NR_OABI_SYSCALL_BASE) + arm_slot 1 +ARM_OK( mov r7, #(__NR_rt_sigreturn - __NR_SYSCALL_BASE) ) +ARM_OK( swi #(__NR_rt_sigreturn)|(__NR_OABI_SYSCALL_BASE) ) /* Thumb sigreturn_rt syscall code snippet */ - .thumb + thumb_slot 1 movs r7, #(__NR_rt_sigreturn - __NR_SYSCALL_BASE) swi #0 @@ -74,7 +96,7 @@ sigreturn_codes: * it is thumb case or not, so we need additional * word after real last entry. */ - .arm + arm_slot 2 .space 4 .size sigreturn_codes, . - sigreturn_codes diff --git a/arch/arm/kernel/stacktrace.c b/arch/arm/kernel/stacktrace.c index 00f79e5..af4e8c8 100644 --- a/arch/arm/kernel/stacktrace.c +++ b/arch/arm/kernel/stacktrace.c @@ -31,7 +31,7 @@ int notrace unwind_frame(struct stackframe *frame) high = ALIGN(low, THREAD_SIZE); /* check current frame pointer is within bounds */ - if (fp < (low + 12) || fp + 4 >= high) + if (fp < low + 12 || fp > high - 4) return -EINVAL; /* restore the registers from the stack frame */ diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index dbf0923..4636d56 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c @@ -36,7 +36,13 @@ #include <asm/system_misc.h> #include <asm/opcodes.h> -static const char *handler[]= { "prefetch abort", "data abort", "address exception", "interrupt" }; +static const char *handler[]= { + "prefetch abort", + "data abort", + "address exception", + "interrupt", + "undefined instruction", +}; void *vectors_page; @@ -425,9 +431,10 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs) instr2 = __mem_to_opcode_thumb16(instr2); instr = __opcode_thumb32_compose(instr, instr2); } - } else if (get_user(instr, (u32 __user *)pc)) { + } else { + if (get_user(instr, (u32 __user *)pc)) + goto die_sig; instr = __mem_to_opcode_arm(instr); - goto die_sig; } if (call_undef_hook(regs, instr) == 0) @@ -509,9 +516,10 @@ static inline int __do_cache_op(unsigned long start, unsigned long end) { int ret; - unsigned long chunk = PAGE_SIZE; do { + unsigned long chunk = min(PAGE_SIZE, end - start); + if (signal_pending(current)) { struct thread_info *ti = current_thread_info(); diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c index 2a700e0..b18165c 100644 --- a/arch/arm/kvm/arm.c +++ b/arch/arm/kvm/arm.c @@ -17,6 +17,7 @@ */ #include <linux/cpu.h> +#include <linux/cpu_pm.h> #include <linux/errno.h> #include <linux/err.h> #include <linux/kvm_host.h> @@ -853,6 +854,33 @@ static struct notifier_block hyp_init_cpu_nb = { .notifier_call = hyp_init_cpu_notify, }; +#ifdef CONFIG_CPU_PM +static int hyp_init_cpu_pm_notifier(struct notifier_block *self, + unsigned long cmd, + void *v) +{ + if (cmd == CPU_PM_EXIT) { + cpu_init_hyp_mode(NULL); + return NOTIFY_OK; + } + + return NOTIFY_DONE; +} + +static struct notifier_block hyp_init_cpu_pm_nb = { + .notifier_call = hyp_init_cpu_pm_notifier, +}; + +static void __init hyp_cpu_pm_init(void) +{ + cpu_pm_register_notifier(&hyp_init_cpu_pm_nb); +} +#else +static inline void hyp_cpu_pm_init(void) +{ +} +#endif + /** * Inits Hyp-mode on all online CPUs */ @@ -1013,6 +1041,8 @@ int kvm_arch_init(void *opaque) goto out_err; } + hyp_cpu_pm_init(); + kvm_coproc_table_init(); return 0; out_err: diff --git a/arch/arm/lib/delay-loop.S b/arch/arm/lib/delay-loop.S index 36b668d..bc1033b 100644 --- a/arch/arm/lib/delay-loop.S +++ b/arch/arm/lib/delay-loop.S @@ -40,6 +40,7 @@ ENTRY(__loop_const_udelay) @ 0 <= r0 <= 0x7fffff06 /* * loops = r0 * HZ * loops_per_jiffy / 1000000 */ + .align 3 @ Delay routine ENTRY(__loop_delay) diff --git a/arch/arm/mach-at91/at91rm9200_time.c b/arch/arm/mach-at91/at91rm9200_time.c index f607deb..bc7b363 100644 --- a/arch/arm/mach-at91/at91rm9200_time.c +++ b/arch/arm/mach-at91/at91rm9200_time.c @@ -174,7 +174,6 @@ clkevt32k_next_event(unsigned long delta, struct clock_event_device *dev) static struct clock_event_device clkevt = { .name = "at91_tick", .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, - .shift = 32, .rating = 150, .set_next_event = clkevt32k_next_event, .set_mode = clkevt32k_mode, @@ -265,11 +264,9 @@ void __init at91rm9200_timer_init(void) at91_st_write(AT91_ST_RTMR, 1); /* Setup timer clockevent, with minimum of two ticks (important!!) */ - clkevt.mult = div_sc(AT91_SLOW_CLOCK, NSEC_PER_SEC, clkevt.shift); - clkevt.max_delta_ns = clockevent_delta2ns(AT91_ST_ALMV, &clkevt); - clkevt.min_delta_ns = clockevent_delta2ns(2, &clkevt) + 1; clkevt.cpumask = cpumask_of(0); - clockevents_register_device(&clkevt); + clockevents_config_and_register(&clkevt, AT91_SLOW_CLOCK, + 2, AT91_ST_ALMV); /* register clocksource */ clocksource_register_hz(&clk32k, AT91_SLOW_CLOCK); diff --git a/arch/arm/mach-at91/pm.h b/arch/arm/mach-at91/pm.h index 3ed190c..c5101dc 100644 --- a/arch/arm/mach-at91/pm.h +++ b/arch/arm/mach-at91/pm.h @@ -16,7 +16,11 @@ #include <mach/at91_ramc.h> #include <mach/at91rm9200_sdramc.h> +#ifdef CONFIG_PM extern void at91_pm_set_standby(void (*at91_standby)(void)); +#else +static inline void at91_pm_set_standby(void (*at91_standby)(void)) { } +#endif /* * The AT91RM9200 goes into self-refresh mode with this command, and will diff --git a/arch/arm/mach-at91/sama5d3.c b/arch/arm/mach-at91/sama5d3.c index 3ea8642..a28873f 100644 --- a/arch/arm/mach-at91/sama5d3.c +++ b/arch/arm/mach-at91/sama5d3.c @@ -95,19 +95,19 @@ static struct clk twi0_clk = { .name = "twi0_clk", .pid = SAMA5D3_ID_TWI0, .type = CLK_TYPE_PERIPHERAL, - .div = AT91_PMC_PCR_DIV2, + .div = AT91_PMC_PCR_DIV8, }; static struct clk twi1_clk = { .name = "twi1_clk", .pid = SAMA5D3_ID_TWI1, .type = CLK_TYPE_PERIPHERAL, - .div = AT91_PMC_PCR_DIV2, + .div = AT91_PMC_PCR_DIV8, }; static struct clk twi2_clk = { .name = "twi2_clk", .pid = SAMA5D3_ID_TWI2, .type = CLK_TYPE_PERIPHERAL, - .div = AT91_PMC_PCR_DIV2, + .div = AT91_PMC_PCR_DIV8, }; static struct clk mmc0_clk = { .name = "mci0_clk", diff --git a/arch/arm/mach-bcm/Kconfig b/arch/arm/mach-bcm/Kconfig index 9fe6d88..b1aa6a9 100644 --- a/arch/arm/mach-bcm/Kconfig +++ b/arch/arm/mach-bcm/Kconfig @@ -25,6 +25,7 @@ config ARCH_BCM_MOBILE select TICK_ONESHOT select CACHE_L2X0 select HAVE_ARM_ARCH_TIMER + select PINCTRL help This enables support for systems based on Broadcom mobile SoCs. It currently supports the 'BCM281XX' family, which includes diff --git a/arch/arm/mach-clps711x/devices.c b/arch/arm/mach-clps711x/devices.c index fb77d14..2001488 100644 --- a/arch/arm/mach-clps711x/devices.c +++ b/arch/arm/mach-clps711x/devices.c @@ -61,8 +61,29 @@ static void __init clps711x_add_syscon(void) &clps711x_syscon_res[i], 1); } +static const struct resource clps711x_uart1_res[] __initconst = { + DEFINE_RES_MEM(CLPS711X_PHYS_BASE + UARTDR1, SZ_128), + DEFINE_RES_IRQ(IRQ_UTXINT1), + DEFINE_RES_IRQ(IRQ_URXINT1), +}; + +static const struct resource clps711x_uart2_res[] __initconst = { + DEFINE_RES_MEM(CLPS711X_PHYS_BASE + UARTDR2, SZ_128), + DEFINE_RES_IRQ(IRQ_UTXINT2), + DEFINE_RES_IRQ(IRQ_URXINT2), +}; + +static void __init clps711x_add_uart(void) +{ + platform_device_register_simple("clps711x-uart", 0, clps711x_uart1_res, + ARRAY_SIZE(clps711x_uart1_res)); + platform_device_register_simple("clps711x-uart", 1, clps711x_uart2_res, + ARRAY_SIZE(clps711x_uart2_res)); +}; + void __init clps711x_devices_init(void) { clps711x_add_gpio(); clps711x_add_syscon(); + clps711x_add_uart(); } diff --git a/arch/arm/mach-davinci/devices-da8xx.c b/arch/arm/mach-davinci/devices-da8xx.c index c46eccb..78829c5 100644 --- a/arch/arm/mach-davinci/devices-da8xx.c +++ b/arch/arm/mach-davinci/devices-da8xx.c @@ -487,7 +487,7 @@ int __init da8xx_register_emac(void) static struct resource da830_mcasp1_resources[] = { { - .name = "mcasp1", + .name = "mpu", .start = DAVINCI_DA830_MCASP1_REG_BASE, .end = DAVINCI_DA830_MCASP1_REG_BASE + (SZ_1K * 12) - 1, .flags = IORESOURCE_MEM, @@ -515,7 +515,7 @@ static struct platform_device da830_mcasp1_device = { static struct resource da850_mcasp_resources[] = { { - .name = "mcasp", + .name = "mpu", .start = DAVINCI_DA8XX_MCASP0_REG_BASE, .end = DAVINCI_DA8XX_MCASP0_REG_BASE + (SZ_1K * 12) - 1, .flags = IORESOURCE_MEM, diff --git a/arch/arm/mach-davinci/dm355.c b/arch/arm/mach-davinci/dm355.c index ef9ff1f..6117fc6 100644 --- a/arch/arm/mach-davinci/dm355.c +++ b/arch/arm/mach-davinci/dm355.c @@ -641,6 +641,7 @@ static struct platform_device dm355_edma_device = { static struct resource dm355_asp1_resources[] = { { + .name = "mpu", .start = DAVINCI_ASP1_BASE, .end = DAVINCI_ASP1_BASE + SZ_8K - 1, .flags = IORESOURCE_MEM, @@ -906,7 +907,7 @@ static struct davinci_gpio_platform_data dm355_gpio_platform_data = { int __init dm355_gpio_register(void) { return davinci_gpio_register(dm355_gpio_resources, - sizeof(dm355_gpio_resources), + ARRAY_SIZE(dm355_gpio_resources), &dm355_gpio_platform_data); } /*----------------------------------------------------------------------*/ diff --git a/arch/arm/mach-davinci/dm365.c b/arch/arm/mach-davinci/dm365.c index 1511a06..d7c6f85 100644 --- a/arch/arm/mach-davinci/dm365.c +++ b/arch/arm/mach-davinci/dm365.c @@ -720,7 +720,7 @@ static struct davinci_gpio_platform_data dm365_gpio_platform_data = { int __init dm365_gpio_register(void) { return davinci_gpio_register(dm365_gpio_resources, - sizeof(dm365_gpio_resources), + ARRAY_SIZE(dm365_gpio_resources), &dm365_gpio_platform_data); } @@ -942,6 +942,7 @@ static struct platform_device dm365_edma_device = { static struct resource dm365_asp_resources[] = { { + .name = "mpu", .start = DAVINCI_DM365_ASP0_BASE, .end = DAVINCI_DM365_ASP0_BASE + SZ_8K - 1, .flags = IORESOURCE_MEM, diff --git a/arch/arm/mach-davinci/dm644x.c b/arch/arm/mach-davinci/dm644x.c index 143a321..3ce4799 100644 --- a/arch/arm/mach-davinci/dm644x.c +++ b/arch/arm/mach-davinci/dm644x.c @@ -572,6 +572,7 @@ static struct platform_device dm644x_edma_device = { /* DM6446 EVM uses ASP0; line-out is a pair of RCA jacks */ static struct resource dm644x_asp_resources[] = { { + .name = "mpu", .start = DAVINCI_ASP0_BASE, .end = DAVINCI_ASP0_BASE + SZ_8K - 1, .flags = IORESOURCE_MEM, @@ -792,7 +793,7 @@ static struct davinci_gpio_platform_data dm644_gpio_platform_data = { int __init dm644x_gpio_register(void) { return davinci_gpio_register(dm644_gpio_resources, - sizeof(dm644_gpio_resources), + ARRAY_SIZE(dm644_gpio_resources), &dm644_gpio_platform_data); } /*----------------------------------------------------------------------*/ diff --git a/arch/arm/mach-davinci/dm646x.c b/arch/arm/mach-davinci/dm646x.c index 2a73f29..0e81fea65 100644 --- a/arch/arm/mach-davinci/dm646x.c +++ b/arch/arm/mach-davinci/dm646x.c @@ -621,7 +621,7 @@ static struct platform_device dm646x_edma_device = { static struct resource dm646x_mcasp0_resources[] = { { - .name = "mcasp0", + .name = "mpu", .start = DAVINCI_DM646X_MCASP0_REG_BASE, .end = DAVINCI_DM646X_MCASP0_REG_BASE + (SZ_1K << 1) - 1, .flags = IORESOURCE_MEM, @@ -641,7 +641,7 @@ static struct resource dm646x_mcasp0_resources[] = { static struct resource dm646x_mcasp1_resources[] = { { - .name = "mcasp1", + .name = "mpu", .start = DAVINCI_DM646X_MCASP1_REG_BASE, .end = DAVINCI_DM646X_MCASP1_REG_BASE + (SZ_1K << 1) - 1, .flags = IORESOURCE_MEM, @@ -769,7 +769,7 @@ static struct davinci_gpio_platform_data dm646x_gpio_platform_data = { int __init dm646x_gpio_register(void) { return davinci_gpio_register(dm646x_gpio_resources, - sizeof(dm646x_gpio_resources), + ARRAY_SIZE(dm646x_gpio_resources), &dm646x_gpio_platform_data); } /*----------------------------------------------------------------------*/ diff --git a/arch/arm/mach-footbridge/common.c b/arch/arm/mach-footbridge/common.c index 2739ca2..e009168 100644 --- a/arch/arm/mach-footbridge/common.c +++ b/arch/arm/mach-footbridge/common.c @@ -15,6 +15,7 @@ #include <linux/init.h> #include <linux/io.h> #include <linux/spinlock.h> +#include <video/vga.h> #include <asm/pgtable.h> #include <asm/page.h> @@ -196,6 +197,8 @@ void __init footbridge_map_io(void) iotable_init(ebsa285_host_io_desc, ARRAY_SIZE(ebsa285_host_io_desc)); pci_map_io_early(__phys_to_pfn(DC21285_PCI_IO)); } + + vga_base = PCIMEM_BASE; } void footbridge_restart(enum reboot_mode mode, const char *cmd) diff --git a/arch/arm/mach-footbridge/dc21285-timer.c b/arch/arm/mach-footbridge/dc21285-timer.c index 9ee78f7..782f6c7 100644 --- a/arch/arm/mach-footbridge/dc21285-timer.c +++ b/arch/arm/mach-footbridge/dc21285-timer.c @@ -96,11 +96,12 @@ static struct irqaction footbridge_timer_irq = { void __init footbridge_timer_init(void) { struct clock_event_device *ce = &ckevt_dc21285; + unsigned rate = DIV_ROUND_CLOSEST(mem_fclk_21285, 16); - clocksource_register_hz(&cksrc_dc21285, (mem_fclk_21285 + 8) / 16); + clocksource_register_hz(&cksrc_dc21285, rate); setup_irq(ce->irq, &footbridge_timer_irq); ce->cpumask = cpumask_of(smp_processor_id()); - clockevents_config_and_register(ce, mem_fclk_21285, 0x4, 0xffffff); + clockevents_config_and_register(ce, rate, 0x4, 0xffffff); } diff --git a/arch/arm/mach-footbridge/dc21285.c b/arch/arm/mach-footbridge/dc21285.c index 3490a24..7c2fdae 100644 --- a/arch/arm/mach-footbridge/dc21285.c +++ b/arch/arm/mach-footbridge/dc21285.c @@ -18,7 +18,6 @@ #include <linux/irq.h> #include <linux/io.h> #include <linux/spinlock.h> -#include <video/vga.h> #include <asm/irq.h> #include <asm/mach/pci.h> @@ -291,7 +290,6 @@ void __init dc21285_preinit(void) int cfn_mode; pcibios_min_mem = 0x81000000; - vga_base = PCIMEM_BASE; mem_size = (unsigned int)high_memory - PAGE_OFFSET; for (mem_mask = 0x00100000; mem_mask < 0x10000000; mem_mask <<= 1) diff --git a/arch/arm/mach-footbridge/ebsa285.c b/arch/arm/mach-footbridge/ebsa285.c index b082435..1a7235f 100644 --- a/arch/arm/mach-footbridge/ebsa285.c +++ b/arch/arm/mach-footbridge/ebsa285.c @@ -30,21 +30,24 @@ static const struct { const char *name; const char *trigger; } ebsa285_leds[] = { - { "ebsa285:amber", "heartbeat", }, - { "ebsa285:green", "cpu0", }, + { "ebsa285:amber", "cpu0", }, + { "ebsa285:green", "heartbeat", }, { "ebsa285:red",}, }; +static unsigned char hw_led_state; + static void ebsa285_led_set(struct led_classdev *cdev, enum led_brightness b) { struct ebsa285_led *led = container_of(cdev, struct ebsa285_led, cdev); - if (b != LED_OFF) - *XBUS_LEDS |= led->mask; + if (b == LED_OFF) + hw_led_state |= led->mask; else - *XBUS_LEDS &= ~led->mask; + hw_led_state &= ~led->mask; + *XBUS_LEDS = hw_led_state; } static enum led_brightness ebsa285_led_get(struct led_classdev *cdev) @@ -52,18 +55,19 @@ static enum led_brightness ebsa285_led_get(struct led_classdev *cdev) struct ebsa285_led *led = container_of(cdev, struct ebsa285_led, cdev); - return (*XBUS_LEDS & led->mask) ? LED_FULL : LED_OFF; + return hw_led_state & led->mask ? LED_OFF : LED_FULL; } static int __init ebsa285_leds_init(void) { int i; - if (machine_is_ebsa285()) + if (!machine_is_ebsa285()) return -ENODEV; - /* 3 LEDS All ON */ - *XBUS_LEDS |= XBUS_LED_AMBER | XBUS_LED_GREEN | XBUS_LED_RED; + /* 3 LEDS all off */ + hw_led_state = XBUS_LED_AMBER | XBUS_LED_GREEN | XBUS_LED_RED; + *XBUS_LEDS = hw_led_state; for (i = 0; i < ARRAY_SIZE(ebsa285_leds); i++) { struct ebsa285_led *led; diff --git a/arch/arm/mach-highbank/highbank.c b/arch/arm/mach-highbank/highbank.c index b3d7e56..c7de89b 100644 --- a/arch/arm/mach-highbank/highbank.c +++ b/arch/arm/mach-highbank/highbank.c @@ -17,12 +17,15 @@ #include <linux/clkdev.h> #include <linux/clocksource.h> #include <linux/dma-mapping.h> +#include <linux/input.h> #include <linux/io.h> #include <linux/irqchip.h> +#include <linux/mailbox.h> #include <linux/of.h> #include <linux/of_irq.h> #include <linux/of_platform.h> #include <linux/of_address.h> +#include <linux/reboot.h> #include <linux/amba/bus.h> #include <linux/platform_device.h> @@ -50,6 +53,7 @@ static void __init highbank_scu_map_io(void) static void highbank_l2x0_disable(void) { + outer_flush_all(); /* Disable PL310 L2 Cache controller */ highbank_smc1(0x102, 0x0); } @@ -130,6 +134,24 @@ static struct platform_device highbank_cpuidle_device = { .name = "cpuidle-calxeda", }; +static int hb_keys_notifier(struct notifier_block *nb, unsigned long event, void *data) +{ + u32 key = *(u32 *)data; + + if (event != 0x1000) + return 0; + + if (key == KEY_POWER) + orderly_poweroff(false); + else if (key == 0xffff) + ctrl_alt_del(); + + return 0; +} +static struct notifier_block hb_keys_nb = { + .notifier_call = hb_keys_notifier, +}; + static void __init highbank_init(void) { struct device_node *np; @@ -145,6 +167,8 @@ static void __init highbank_init(void) bus_register_notifier(&platform_bus_type, &highbank_platform_nb); bus_register_notifier(&amba_bustype, &highbank_amba_nb); + pl320_ipc_register_notifier(&hb_keys_nb); + of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); if (psci_ops.cpu_suspend) diff --git a/arch/arm/mach-ks8695/include/mach/gpio.h b/arch/arm/mach-ks8695/include/mach/gpio.h deleted file mode 100644 index f5fda36..0000000 --- a/arch/arm/mach-ks8695/include/mach/gpio.h +++ /dev/null @@ -1,19 +0,0 @@ -/* - * arch/arm/mach-ks8695/include/mach/gpio.h - * - * Copyright (C) 2006 Andrew Victor - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#ifndef __ASM_ARCH_GPIO_H_ -#define __ASM_ARCH_GPIO_H_ - -/* - * Map IRQ number to GPIO line. - */ -extern int irq_to_gpio(unsigned int irq); - -#endif diff --git a/arch/arm/mach-lpc32xx/include/mach/gpio-lpc32xx.h b/arch/arm/mach-lpc32xx/include/mach/gpio-lpc32xx.h deleted file mode 100644 index a544e96..0000000 --- a/arch/arm/mach-lpc32xx/include/mach/gpio-lpc32xx.h +++ /dev/null @@ -1,50 +0,0 @@ -/* - * Author: Kevin Wells <kevin.wells@nxp.com> - * - * Copyright (C) 2010 NXP Semiconductors - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#ifndef __MACH_GPIO_LPC32XX_H -#define __MACH_GPIO_LPC32XX_H - -/* - * Note! - * Muxed GP pins need to be setup to the GP state in the board level - * code prior to using this driver. - * GPI pins : 28xP3 group - * GPO pins : 24xP3 group - * GPIO pins: 8xP0 group, 24xP1 group, 13xP2 group, 6xP3 group - */ - -#define LPC32XX_GPIO_P0_MAX 8 -#define LPC32XX_GPIO_P1_MAX 24 -#define LPC32XX_GPIO_P2_MAX 13 -#define LPC32XX_GPIO_P3_MAX 6 -#define LPC32XX_GPI_P3_MAX 29 -#define LPC32XX_GPO_P3_MAX 24 - -#define LPC32XX_GPIO_P0_GRP 0 -#define LPC32XX_GPIO_P1_GRP (LPC32XX_GPIO_P0_GRP + LPC32XX_GPIO_P0_MAX) -#define LPC32XX_GPIO_P2_GRP (LPC32XX_GPIO_P1_GRP + LPC32XX_GPIO_P1_MAX) -#define LPC32XX_GPIO_P3_GRP (LPC32XX_GPIO_P2_GRP + LPC32XX_GPIO_P2_MAX) -#define LPC32XX_GPI_P3_GRP (LPC32XX_GPIO_P3_GRP + LPC32XX_GPIO_P3_MAX) -#define LPC32XX_GPO_P3_GRP (LPC32XX_GPI_P3_GRP + LPC32XX_GPI_P3_MAX) - -/* - * A specific GPIO can be selected with this macro - * ie, GPIO_05 can be selected with LPC32XX_GPIO(LPC32XX_GPIO_P3_GRP, 5) - * See the LPC32x0 User's guide for GPIO group numbers - */ -#define LPC32XX_GPIO(x, y) ((x) + (y)) - -#endif /* __MACH_GPIO_LPC32XX_H */ diff --git a/arch/arm/mach-lpc32xx/include/mach/gpio.h b/arch/arm/mach-lpc32xx/include/mach/gpio.h deleted file mode 100644 index 0052e7a..0000000 --- a/arch/arm/mach-lpc32xx/include/mach/gpio.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef __MACH_GPIO_H -#define __MACH_GPIO_H - -#include "gpio-lpc32xx.h" - -#endif /* __MACH_GPIO_H */ diff --git a/arch/arm/mach-lpc32xx/phy3250.c b/arch/arm/mach-lpc32xx/phy3250.c index e54f87e..34932e0 100644 --- a/arch/arm/mach-lpc32xx/phy3250.c +++ b/arch/arm/mach-lpc32xx/phy3250.c @@ -36,6 +36,7 @@ #include <linux/clk.h> #include <linux/mtd/lpc32xx_slc.h> #include <linux/mtd/lpc32xx_mlc.h> +#include <linux/platform_data/gpio-lpc32xx.h> #include <asm/setup.h> #include <asm/mach-types.h> @@ -44,7 +45,6 @@ #include <mach/hardware.h> #include <mach/platform.h> #include <mach/board.h> -#include <mach/gpio-lpc32xx.h> #include "common.h" /* diff --git a/arch/arm/mach-mv78xx0/include/mach/gpio.h b/arch/arm/mach-mv78xx0/include/mach/gpio.h deleted file mode 100644 index 77e1b84..0000000 --- a/arch/arm/mach-mv78xx0/include/mach/gpio.h +++ /dev/null @@ -1,9 +0,0 @@ -/* - * arch/asm-arm/mach-mv78xx0/include/mach/gpio.h - * - * This file is licensed under the terms of the GNU General Public - * License version 2. This program is licensed "as is" without any - * warranty of any kind, whether express or implied. - */ - -#include <plat/gpio.h> diff --git a/arch/arm/mach-omap1/include/mach/usb.h b/arch/arm/mach-omap1/include/mach/usb.h index 45e5ac7..2c26305 100644 --- a/arch/arm/mach-omap1/include/mach/usb.h +++ b/arch/arm/mach-omap1/include/mach/usb.h @@ -8,43 +8,7 @@ #define is_usb0_device(config) 0 #endif -struct omap_usb_config { - /* Configure drivers according to the connectors on your board: - * - "A" connector (rectagular) - * ... for host/OHCI use, set "register_host". - * - "B" connector (squarish) or "Mini-B" - * ... for device/gadget use, set "register_dev". - * - "Mini-AB" connector (very similar to Mini-B) - * ... for OTG use as device OR host, initialize "otg" - */ - unsigned register_host:1; - unsigned register_dev:1; - u8 otg; /* port number, 1-based: usb1 == 2 */ - - u8 hmc_mode; - - /* implicitly true if otg: host supports remote wakeup? */ - u8 rwc; - - /* signaling pins used to talk to transceiver on usbN: - * 0 == usbN unused - * 2 == usb0-only, using internal transceiver - * 3 == 3 wire bidirectional - * 4 == 4 wire bidirectional - * 6 == 6 wire unidirectional (or TLL) - */ - u8 pins[3]; - - struct platform_device *udc_device; - struct platform_device *ohci_device; - struct platform_device *otg_device; - - u32 (*usb0_init)(unsigned nwires, unsigned is_device); - u32 (*usb1_init)(unsigned nwires); - u32 (*usb2_init)(unsigned nwires, unsigned alt_pingroup); - - int (*ocpi_enable)(void); -}; +#include <linux/platform_data/usb-omap1.h> void omap_otg_init(struct omap_usb_config *config); diff --git a/arch/arm/mach-omap2/Makefile b/arch/arm/mach-omap2/Makefile index 1f25f3e..adcef40 100644 --- a/arch/arm/mach-omap2/Makefile +++ b/arch/arm/mach-omap2/Makefile @@ -19,11 +19,11 @@ secure-common = omap-smc.o omap-secure.o obj-$(CONFIG_ARCH_OMAP2) += $(omap-2-3-common) $(hwmod-common) obj-$(CONFIG_ARCH_OMAP3) += $(omap-2-3-common) $(hwmod-common) $(secure-common) -obj-$(CONFIG_ARCH_OMAP4) += prm44xx.o $(hwmod-common) $(secure-common) +obj-$(CONFIG_ARCH_OMAP4) += $(hwmod-common) $(secure-common) obj-$(CONFIG_SOC_AM33XX) += irq.o $(hwmod-common) -obj-$(CONFIG_SOC_OMAP5) += prm44xx.o $(hwmod-common) $(secure-common) +obj-$(CONFIG_SOC_OMAP5) += $(hwmod-common) $(secure-common) obj-$(CONFIG_SOC_AM43XX) += $(hwmod-common) $(secure-common) -obj-$(CONFIG_SOC_DRA7XX) += prm44xx.o $(hwmod-common) $(secure-common) +obj-$(CONFIG_SOC_DRA7XX) += $(hwmod-common) $(secure-common) ifneq ($(CONFIG_SND_OMAP_SOC_MCBSP),) obj-y += mcbsp.o diff --git a/arch/arm/mach-omap2/board-generic.c b/arch/arm/mach-omap2/board-generic.c index 19f1652..8d972ff1 100644 --- a/arch/arm/mach-omap2/board-generic.c +++ b/arch/arm/mach-omap2/board-generic.c @@ -131,6 +131,24 @@ DT_MACHINE_START(OMAP3_GP_DT, "Generic OMAP3-GP (Flattened Device Tree)") .dt_compat = omap3_gp_boards_compat, .restart = omap3xxx_restart, MACHINE_END + +static const char *am3517_boards_compat[] __initdata = { + "ti,am3517", + NULL, +}; + +DT_MACHINE_START(AM3517_DT, "Generic AM3517 (Flattened Device Tree)") + .reserve = omap_reserve, + .map_io = omap3_map_io, + .init_early = am35xx_init_early, + .init_irq = omap_intc_of_init, + .handle_irq = omap3_intc_handle_irq, + .init_machine = omap_generic_init, + .init_late = omap3_init_late, + .init_time = omap3_gptimer_timer_init, + .dt_compat = am3517_boards_compat, + .restart = omap3xxx_restart, +MACHINE_END #endif #ifdef CONFIG_SOC_AM33XX diff --git a/arch/arm/mach-omap2/board-ldp.c b/arch/arm/mach-omap2/board-ldp.c index 4ec8d82..44a59c3 100644 --- a/arch/arm/mach-omap2/board-ldp.c +++ b/arch/arm/mach-omap2/board-ldp.c @@ -242,12 +242,18 @@ static void __init ldp_display_init(void) static int ldp_twl_gpio_setup(struct device *dev, unsigned gpio, unsigned ngpio) { + int res; + /* LCD enable GPIO */ ldp_lcd_pdata.enable_gpio = gpio + 7; /* Backlight enable GPIO */ ldp_lcd_pdata.backlight_gpio = gpio + 15; + res = platform_device_register(&ldp_lcd_device); + if (res) + pr_err("Unable to register LCD: %d\n", res); + return 0; } @@ -346,7 +352,6 @@ static struct omap2_hsmmc_info mmc[] __initdata = { static struct platform_device *ldp_devices[] __initdata = { &ldp_gpio_keys_device, - &ldp_lcd_device, }; #ifdef CONFIG_OMAP_MUX diff --git a/arch/arm/mach-omap2/common.h b/arch/arm/mach-omap2/common.h index f7644fe..e30ef67 100644 --- a/arch/arm/mach-omap2/common.h +++ b/arch/arm/mach-omap2/common.h @@ -299,7 +299,6 @@ struct omap_sdrc_params; extern void omap_sdrc_init(struct omap_sdrc_params *sdrc_cs0, struct omap_sdrc_params *sdrc_cs1); struct omap2_hsmmc_info; -extern int omap4_twl6030_hsmmc_init(struct omap2_hsmmc_info *controllers); extern void omap_reserve(void); struct omap_hwmod; diff --git a/arch/arm/mach-omap2/display.c b/arch/arm/mach-omap2/display.c index a4e536b..4cf1655 100644 --- a/arch/arm/mach-omap2/display.c +++ b/arch/arm/mach-omap2/display.c @@ -32,7 +32,6 @@ #include "soc.h" #include "iomap.h" -#include "mux.h" #include "control.h" #include "display.h" #include "prm.h" @@ -102,35 +101,6 @@ static const struct omap_dss_hwmod_data omap4_dss_hwmod_data[] __initconst = { { "dss_hdmi", "omapdss_hdmi", -1 }, }; -static void __init omap4_tpd12s015_mux_pads(void) -{ - omap_mux_init_signal("hdmi_cec", - OMAP_PIN_INPUT_PULLUP); - omap_mux_init_signal("hdmi_ddc_scl", - OMAP_PIN_INPUT_PULLUP); - omap_mux_init_signal("hdmi_ddc_sda", - OMAP_PIN_INPUT_PULLUP); -} - -static void __init omap4_hdmi_mux_pads(enum omap_hdmi_flags flags) -{ - u32 reg; - u16 control_i2c_1; - - /* - * CONTROL_I2C_1: HDMI_DDC_SDA_PULLUPRESX (bit 28) and - * HDMI_DDC_SCL_PULLUPRESX (bit 24) are set to disable - * internal pull up resistor. - */ - if (flags & OMAP_HDMI_SDA_SCL_EXTERNAL_PULLUP) { - control_i2c_1 = OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_I2C_1; - reg = omap4_ctrl_pad_readl(control_i2c_1); - reg |= (OMAP4_HDMI_DDC_SDA_PULLUPRESX_MASK | - OMAP4_HDMI_DDC_SCL_PULLUPRESX_MASK); - omap4_ctrl_pad_writel(reg, control_i2c_1); - } -} - static int omap4_dsi_mux_pads(int dsi_id, unsigned lanes) { u32 enable_mask, enable_shift; @@ -164,16 +134,6 @@ static int omap4_dsi_mux_pads(int dsi_id, unsigned lanes) return 0; } -int __init omap_hdmi_init(enum omap_hdmi_flags flags) -{ - if (cpu_is_omap44xx()) { - omap4_hdmi_mux_pads(flags); - omap4_tpd12s015_mux_pads(); - } - - return 0; -} - static int omap_dsi_enable_pads(int dsi_id, unsigned lane_mask) { if (cpu_is_omap44xx()) diff --git a/arch/arm/mach-omap2/dss-common.c b/arch/arm/mach-omap2/dss-common.c index 365bfd3..dadccc9 100644 --- a/arch/arm/mach-omap2/dss-common.c +++ b/arch/arm/mach-omap2/dss-common.c @@ -223,7 +223,7 @@ void __init omap_4430sdp_display_init_of(void) static struct connector_dvi_platform_data omap3_igep2_dvi_connector_pdata = { .name = "dvi", .source = "tfp410.0", - .i2c_bus_num = 3, + .i2c_bus_num = 2, }; static struct platform_device omap3_igep2_dvi_connector_device = { diff --git a/arch/arm/mach-omap2/gpmc.c b/arch/arm/mach-omap2/gpmc.c index 81de562..d24926e 100644 --- a/arch/arm/mach-omap2/gpmc.c +++ b/arch/arm/mach-omap2/gpmc.c @@ -1502,6 +1502,22 @@ static int gpmc_probe_generic_child(struct platform_device *pdev, } /* + * For some GPMC devices we still need to rely on the bootloader + * timings because the devices can be connected via FPGA. So far + * the list is smc91x on the omap2 SDP boards, and 8250 on zooms. + * REVISIT: Add timing support from slls644g.pdf and from the + * lan91c96 manual. + */ + if (of_device_is_compatible(child, "ns16550a") || + of_device_is_compatible(child, "smsc,lan91c94") || + of_device_is_compatible(child, "smsc,lan91c111")) { + dev_warn(&pdev->dev, + "%s using bootloader timings on CS%d\n", + child->name, cs); + goto no_timings; + } + + /* * FIXME: gpmc_cs_request() will map the CS to an arbitary * location in the gpmc address space. When booting with * device-tree we want the NOR flash to be mapped to the @@ -1529,6 +1545,7 @@ static int gpmc_probe_generic_child(struct platform_device *pdev, gpmc_read_timings_dt(child, &gpmc_t); gpmc_cs_set_timings(cs, &gpmc_t); +no_timings: if (of_platform_device_create(child, NULL, &pdev->dev)) return 0; @@ -1541,42 +1558,6 @@ err: return ret; } -/* - * REVISIT: Add timing support from slls644g.pdf - */ -static int gpmc_probe_8250(struct platform_device *pdev, - struct device_node *child) -{ - struct resource res; - unsigned long base; - int ret, cs; - - if (of_property_read_u32(child, "reg", &cs) < 0) { - dev_err(&pdev->dev, "%s has no 'reg' property\n", - child->full_name); - return -ENODEV; - } - - if (of_address_to_resource(child, 0, &res) < 0) { - dev_err(&pdev->dev, "%s has malformed 'reg' property\n", - child->full_name); - return -ENODEV; - } - - ret = gpmc_cs_request(cs, resource_size(&res), &base); - if (ret < 0) { - dev_err(&pdev->dev, "cannot request GPMC CS %d\n", cs); - return ret; - } - - if (of_platform_device_create(child, NULL, &pdev->dev)) - return 0; - - dev_err(&pdev->dev, "failed to create gpmc child %s\n", child->name); - - return -ENODEV; -} - static int gpmc_probe_dt(struct platform_device *pdev) { int ret; @@ -1618,10 +1599,9 @@ static int gpmc_probe_dt(struct platform_device *pdev) else if (of_node_cmp(child->name, "onenand") == 0) ret = gpmc_probe_onenand_child(pdev, child); else if (of_node_cmp(child->name, "ethernet") == 0 || - of_node_cmp(child->name, "nor") == 0) + of_node_cmp(child->name, "nor") == 0 || + of_node_cmp(child->name, "uart") == 0) ret = gpmc_probe_generic_child(pdev, child); - else if (of_node_cmp(child->name, "8250") == 0) - ret = gpmc_probe_8250(pdev, child); if (WARN(ret < 0, "%s: probing gpmc child %s failed\n", __func__, child->full_name)) diff --git a/arch/arm/mach-omap2/omap-secure.h b/arch/arm/mach-omap2/omap-secure.h index 8cc7d33..3e97c6c 100644 --- a/arch/arm/mach-omap2/omap-secure.h +++ b/arch/arm/mach-omap2/omap-secure.h @@ -76,6 +76,13 @@ static inline void omap_barrier_reserve_memblock(void) { } #endif +#ifdef CONFIG_SOC_HAS_REALTIME_COUNTER void set_cntfreq(void); +#else +static inline void set_cntfreq(void) +{ +} +#endif + #endif /* __ASSEMBLER__ */ #endif /* OMAP_ARCH_OMAP_SECURE_H */ diff --git a/arch/arm/mach-omap2/omap4-common.c b/arch/arm/mach-omap2/omap4-common.c index 5791143..c0ab9b2 100644 --- a/arch/arm/mach-omap2/omap4-common.c +++ b/arch/arm/mach-omap2/omap4-common.c @@ -35,7 +35,6 @@ #include "iomap.h" #include "common.h" #include "mmc.h" -#include "hsmmc.h" #include "prminst44xx.h" #include "prcm_mpu44xx.h" #include "omap4-sar-layout.h" @@ -163,6 +162,7 @@ void __iomem *omap4_get_l2cache_base(void) static void omap4_l2x0_disable(void) { + outer_flush_all(); /* Disable PL310 L2 Cache controller */ omap_smc1(0x102, 0x0); } @@ -284,59 +284,3 @@ skip_errata_init: omap_wakeupgen_init(); irqchip_init(); } - -#if defined(CONFIG_MMC_OMAP_HS) || defined(CONFIG_MMC_OMAP_HS_MODULE) -static int omap4_twl6030_hsmmc_late_init(struct device *dev) -{ - int irq = 0; - struct platform_device *pdev = container_of(dev, - struct platform_device, dev); - struct omap_mmc_platform_data *pdata = dev->platform_data; - - /* Setting MMC1 Card detect Irq */ - if (pdev->id == 0) { - irq = twl6030_mmc_card_detect_config(); - if (irq < 0) { - dev_err(dev, "%s: Error card detect config(%d)\n", - __func__, irq); - return irq; - } - pdata->slots[0].card_detect_irq = irq; - pdata->slots[0].card_detect = twl6030_mmc_card_detect; - } - return 0; -} - -static __init void omap4_twl6030_hsmmc_set_late_init(struct device *dev) -{ - struct omap_mmc_platform_data *pdata; - - /* dev can be null if CONFIG_MMC_OMAP_HS is not set */ - if (!dev) { - pr_err("Failed %s\n", __func__); - return; - } - pdata = dev->platform_data; - pdata->init = omap4_twl6030_hsmmc_late_init; -} - -int __init omap4_twl6030_hsmmc_init(struct omap2_hsmmc_info *controllers) -{ - struct omap2_hsmmc_info *c; - - omap_hsmmc_init(controllers); - for (c = controllers; c->mmc; c++) { - /* pdev can be null if CONFIG_MMC_OMAP_HS is not set */ - if (!c->pdev) - continue; - omap4_twl6030_hsmmc_set_late_init(&c->pdev->dev); - } - - return 0; -} -#else -int __init omap4_twl6030_hsmmc_init(struct omap2_hsmmc_info *controllers) -{ - return 0; -} -#endif diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c index 53f0735..e0a398c 100644 --- a/arch/arm/mach-omap2/omap_device.c +++ b/arch/arm/mach-omap2/omap_device.c @@ -183,6 +183,10 @@ static int omap_device_build_from_dt(struct platform_device *pdev) odbfd_exit1: kfree(hwmods); odbfd_exit: + /* if data/we are at fault.. load up a fail handler */ + if (ret) + pdev->dev.pm_domain = &omap_device_fail_pm_domain; + return ret; } @@ -604,6 +608,19 @@ static int _od_runtime_resume(struct device *dev) return pm_generic_runtime_resume(dev); } + +static int _od_fail_runtime_suspend(struct device *dev) +{ + dev_warn(dev, "%s: FIXME: missing hwmod/omap_dev info\n", __func__); + return -ENODEV; +} + +static int _od_fail_runtime_resume(struct device *dev) +{ + dev_warn(dev, "%s: FIXME: missing hwmod/omap_dev info\n", __func__); + return -ENODEV; +} + #endif #ifdef CONFIG_SUSPEND @@ -657,6 +674,13 @@ static int _od_resume_noirq(struct device *dev) #define _od_resume_noirq NULL #endif +struct dev_pm_domain omap_device_fail_pm_domain = { + .ops = { + SET_RUNTIME_PM_OPS(_od_fail_runtime_suspend, + _od_fail_runtime_resume, NULL) + } +}; + struct dev_pm_domain omap_device_pm_domain = { .ops = { SET_RUNTIME_PM_OPS(_od_runtime_suspend, _od_runtime_resume, diff --git a/arch/arm/mach-omap2/omap_device.h b/arch/arm/mach-omap2/omap_device.h index 17ca1ae..78c02b3 100644 --- a/arch/arm/mach-omap2/omap_device.h +++ b/arch/arm/mach-omap2/omap_device.h @@ -29,6 +29,7 @@ #include "omap_hwmod.h" extern struct dev_pm_domain omap_device_pm_domain; +extern struct dev_pm_domain omap_device_fail_pm_domain; /* omap_device._state values */ #define OMAP_DEVICE_STATE_UNKNOWN 0 diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c index e3f0eca..8a1b5e0 100644 --- a/arch/arm/mach-omap2/omap_hwmod.c +++ b/arch/arm/mach-omap2/omap_hwmod.c @@ -399,7 +399,7 @@ static int _set_clockactivity(struct omap_hwmod *oh, u8 clockact, u32 *v) } /** - * _set_softreset: set OCP_SYSCONFIG.CLOCKACTIVITY bits in @v + * _set_softreset: set OCP_SYSCONFIG.SOFTRESET bit in @v * @oh: struct omap_hwmod * * @v: pointer to register contents to modify * @@ -427,6 +427,36 @@ static int _set_softreset(struct omap_hwmod *oh, u32 *v) } /** + * _clear_softreset: clear OCP_SYSCONFIG.SOFTRESET bit in @v + * @oh: struct omap_hwmod * + * @v: pointer to register contents to modify + * + * Clear the SOFTRESET bit in @v for hwmod @oh. Returns -EINVAL upon + * error or 0 upon success. + */ +static int _clear_softreset(struct omap_hwmod *oh, u32 *v) +{ + u32 softrst_mask; + + if (!oh->class->sysc || + !(oh->class->sysc->sysc_flags & SYSC_HAS_SOFTRESET)) + return -EINVAL; + + if (!oh->class->sysc->sysc_fields) { + WARN(1, + "omap_hwmod: %s: sysc_fields absent for sysconfig class\n", + oh->name); + return -EINVAL; + } + + softrst_mask = (0x1 << oh->class->sysc->sysc_fields->srst_shift); + + *v &= ~softrst_mask; + + return 0; +} + +/** * _wait_softreset_complete - wait for an OCP softreset to complete * @oh: struct omap_hwmod * to wait on * @@ -785,6 +815,7 @@ static int _init_interface_clks(struct omap_hwmod *oh) pr_warning("omap_hwmod: %s: cannot clk_get interface_clk %s\n", oh->name, os->clk); ret = -EINVAL; + continue; } os->_clk = c; /* @@ -821,6 +852,7 @@ static int _init_opt_clks(struct omap_hwmod *oh) pr_warning("omap_hwmod: %s: cannot clk_get opt_clk %s\n", oh->name, oc->clk); ret = -EINVAL; + continue; } oc->_clk = c; /* @@ -1911,6 +1943,12 @@ static int _ocp_softreset(struct omap_hwmod *oh) ret = _set_softreset(oh, &v); if (ret) goto dis_opt_clks; + + _write_sysconfig(v, oh); + ret = _clear_softreset(oh, &v); + if (ret) + goto dis_opt_clks; + _write_sysconfig(v, oh); if (oh->class->sysc->srst_udelay) @@ -2326,38 +2364,80 @@ static int _shutdown(struct omap_hwmod *oh) return 0; } +static int of_dev_find_hwmod(struct device_node *np, + struct omap_hwmod *oh) +{ + int count, i, res; + const char *p; + + count = of_property_count_strings(np, "ti,hwmods"); + if (count < 1) + return -ENODEV; + + for (i = 0; i < count; i++) { + res = of_property_read_string_index(np, "ti,hwmods", + i, &p); + if (res) + continue; + if (!strcmp(p, oh->name)) { + pr_debug("omap_hwmod: dt %s[%i] uses hwmod %s\n", + np->name, i, oh->name); + return i; + } + } + + return -ENODEV; +} + /** * of_dev_hwmod_lookup - look up needed hwmod from dt blob * @np: struct device_node * * @oh: struct omap_hwmod * + * @index: index of the entry found + * @found: struct device_node * found or NULL * * Parse the dt blob and find out needed hwmod. Recursive function is * implemented to take care hierarchical dt blob parsing. - * Return: The device node on success or NULL on failure. + * Return: Returns 0 on success, -ENODEV when not found. */ -static struct device_node *of_dev_hwmod_lookup(struct device_node *np, - struct omap_hwmod *oh) +static int of_dev_hwmod_lookup(struct device_node *np, + struct omap_hwmod *oh, + int *index, + struct device_node **found) { - struct device_node *np0 = NULL, *np1 = NULL; - const char *p; + struct device_node *np0 = NULL; + int res; + + res = of_dev_find_hwmod(np, oh); + if (res >= 0) { + *found = np; + *index = res; + return 0; + } for_each_child_of_node(np, np0) { - if (of_find_property(np0, "ti,hwmods", NULL)) { - p = of_get_property(np0, "ti,hwmods", NULL); - if (!strcmp(p, oh->name)) - return np0; - np1 = of_dev_hwmod_lookup(np0, oh); - if (np1) - return np1; + struct device_node *fc; + int i; + + res = of_dev_hwmod_lookup(np0, oh, &i, &fc); + if (res == 0) { + *found = fc; + *index = i; + return 0; } } - return NULL; + + *found = NULL; + *index = 0; + + return -ENODEV; } /** * _init_mpu_rt_base - populate the virtual address for a hwmod * @oh: struct omap_hwmod * to locate the virtual address * @data: (unused, caller should pass NULL) + * @index: index of the reg entry iospace in device tree * @np: struct device_node * of the IP block's device node in the DT data * * Cache the virtual address used by the MPU to access this IP block's @@ -2368,7 +2448,7 @@ static struct device_node *of_dev_hwmod_lookup(struct device_node *np, * -ENXIO on absent or invalid register target address space. */ static int __init _init_mpu_rt_base(struct omap_hwmod *oh, void *data, - struct device_node *np) + int index, struct device_node *np) { struct omap_hwmod_addr_space *mem; void __iomem *va_start = NULL; @@ -2390,13 +2470,17 @@ static int __init _init_mpu_rt_base(struct omap_hwmod *oh, void *data, if (!np) return -ENXIO; - va_start = of_iomap(np, oh->mpu_rt_idx); + va_start = of_iomap(np, index + oh->mpu_rt_idx); } else { va_start = ioremap(mem->pa_start, mem->pa_end - mem->pa_start); } if (!va_start) { - pr_err("omap_hwmod: %s: Could not ioremap\n", oh->name); + if (mem) + pr_err("omap_hwmod: %s: Could not ioremap\n", oh->name); + else + pr_err("omap_hwmod: %s: Missing dt reg%i for %s\n", + oh->name, index, np->full_name); return -ENXIO; } @@ -2422,17 +2506,29 @@ static int __init _init_mpu_rt_base(struct omap_hwmod *oh, void *data, */ static int __init _init(struct omap_hwmod *oh, void *data) { - int r; + int r, index; struct device_node *np = NULL; if (oh->_state != _HWMOD_STATE_REGISTERED) return 0; - if (of_have_populated_dt()) - np = of_dev_hwmod_lookup(of_find_node_by_name(NULL, "ocp"), oh); + if (of_have_populated_dt()) { + struct device_node *bus; + + bus = of_find_node_by_name(NULL, "ocp"); + if (!bus) + return -ENODEV; + + r = of_dev_hwmod_lookup(bus, oh, &index, &np); + if (r) + pr_debug("omap_hwmod: %s missing dt data\n", oh->name); + else if (np && index) + pr_warn("omap_hwmod: %s using broken dt data from %s\n", + oh->name, np->name); + } if (oh->class->sysc) { - r = _init_mpu_rt_base(oh, NULL, np); + r = _init_mpu_rt_base(oh, NULL, index, np); if (r < 0) { WARN(1, "omap_hwmod: %s: doesn't have mpu register target base\n", oh->name); @@ -3169,6 +3265,11 @@ int omap_hwmod_softreset(struct omap_hwmod *oh) goto error; _write_sysconfig(v, oh); + ret = _clear_softreset(oh, &v); + if (ret) + goto error; + _write_sysconfig(v, oh); + error: return ret; } diff --git a/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c b/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c index 56cebb0..d23c77f 100644 --- a/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c @@ -796,7 +796,7 @@ struct omap_hwmod omap2xxx_counter_32k_hwmod = { /* gpmc */ static struct omap_hwmod_irq_info omap2xxx_gpmc_irqs[] = { - { .irq = 20 }, + { .irq = 20 + OMAP_INTC_START, }, { .irq = -1 } }; @@ -841,7 +841,7 @@ static struct omap_hwmod_class omap2_rng_hwmod_class = { }; static struct omap_hwmod_irq_info omap2_rng_mpu_irqs[] = { - { .irq = 52 }, + { .irq = 52 + OMAP_INTC_START, }, { .irq = -1 } }; diff --git a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c index 9e56fab..4c3b1e6 100644 --- a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c @@ -1943,7 +1943,8 @@ static struct omap_hwmod_class_sysconfig omap3xxx_usb_host_hs_sysc = { .syss_offs = 0x0014, .sysc_flags = (SYSC_HAS_MIDLEMODE | SYSC_HAS_CLOCKACTIVITY | SYSC_HAS_SIDLEMODE | SYSC_HAS_ENAWAKEUP | - SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE), + SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE | + SYSS_HAS_RESET_STATUS), .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART | MSTANDBY_FORCE | MSTANDBY_NO | MSTANDBY_SMART), .sysc_fields = &omap_hwmod_sysc_type1, @@ -2021,15 +2022,7 @@ static struct omap_hwmod omap3xxx_usb_host_hs_hwmod = { * hence HWMOD_SWSUP_MSTANDBY */ - /* - * During system boot; If the hwmod framework resets the module - * the module will have smart idle settings; which can lead to deadlock - * (above Errata Id:i660); so, dont reset the module during boot; - * Use HWMOD_INIT_NO_RESET. - */ - - .flags = HWMOD_SWSUP_SIDLE | HWMOD_SWSUP_MSTANDBY | - HWMOD_INIT_NO_RESET, + .flags = HWMOD_SWSUP_SIDLE | HWMOD_SWSUP_MSTANDBY, }; /* @@ -2172,7 +2165,7 @@ static struct omap_hwmod_class omap3xxx_gpmc_hwmod_class = { }; static struct omap_hwmod_irq_info omap3xxx_gpmc_irqs[] = { - { .irq = 20 }, + { .irq = 20 + OMAP_INTC_START, }, { .irq = -1 } }; @@ -3006,7 +2999,7 @@ static struct omap_mmu_dev_attr mmu_isp_dev_attr = { static struct omap_hwmod omap3xxx_mmu_isp_hwmod; static struct omap_hwmod_irq_info omap3xxx_mmu_isp_irqs[] = { - { .irq = 24 }, + { .irq = 24 + OMAP_INTC_START, }, { .irq = -1 } }; @@ -3048,7 +3041,7 @@ static struct omap_mmu_dev_attr mmu_iva_dev_attr = { static struct omap_hwmod omap3xxx_mmu_iva_hwmod; static struct omap_hwmod_irq_info omap3xxx_mmu_iva_irqs[] = { - { .irq = 28 }, + { .irq = 28 + OMAP_INTC_START, }, { .irq = -1 } }; diff --git a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c index 1e5b12c..3318cae9 100644 --- a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c @@ -2937,7 +2937,7 @@ static struct omap_hwmod_class_sysconfig omap44xx_usb_host_hs_sysc = { .sysc_offs = 0x0010, .syss_offs = 0x0014, .sysc_flags = (SYSC_HAS_MIDLEMODE | SYSC_HAS_SIDLEMODE | - SYSC_HAS_SOFTRESET), + SYSC_HAS_SOFTRESET | SYSC_HAS_RESET_STATUS), .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART | SIDLE_SMART_WKUP | MSTANDBY_FORCE | MSTANDBY_NO | MSTANDBY_SMART | MSTANDBY_SMART_WKUP), @@ -3001,15 +3001,7 @@ static struct omap_hwmod omap44xx_usb_host_hs_hwmod = { * hence HWMOD_SWSUP_MSTANDBY */ - /* - * During system boot; If the hwmod framework resets the module - * the module will have smart idle settings; which can lead to deadlock - * (above Errata Id:i660); so, dont reset the module during boot; - * Use HWMOD_INIT_NO_RESET. - */ - - .flags = HWMOD_SWSUP_SIDLE | HWMOD_SWSUP_MSTANDBY | - HWMOD_INIT_NO_RESET, + .flags = HWMOD_SWSUP_SIDLE | HWMOD_SWSUP_MSTANDBY, }; /* diff --git a/arch/arm/mach-omap2/omap_hwmod_54xx_data.c b/arch/arm/mach-omap2/omap_hwmod_54xx_data.c index 9e08d699..e297d62 100644 --- a/arch/arm/mach-omap2/omap_hwmod_54xx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_54xx_data.c @@ -1544,7 +1544,8 @@ static struct omap_hwmod_class_sysconfig omap54xx_usb_host_hs_sysc = { .rev_offs = 0x0000, .sysc_offs = 0x0010, .sysc_flags = (SYSC_HAS_MIDLEMODE | SYSC_HAS_RESET_STATUS | - SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET), + SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET | + SYSC_HAS_RESET_STATUS), .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART | SIDLE_SMART_WKUP | MSTANDBY_FORCE | MSTANDBY_NO | MSTANDBY_SMART | MSTANDBY_SMART_WKUP), @@ -1598,15 +1599,7 @@ static struct omap_hwmod omap54xx_usb_host_hs_hwmod = { * hence HWMOD_SWSUP_MSTANDBY */ - /* - * During system boot; If the hwmod framework resets the module - * the module will have smart idle settings; which can lead to deadlock - * (above Errata Id:i660); so, dont reset the module during boot; - * Use HWMOD_INIT_NO_RESET. - */ - - .flags = HWMOD_SWSUP_SIDLE | HWMOD_SWSUP_MSTANDBY | - HWMOD_INIT_NO_RESET, + .flags = HWMOD_SWSUP_SIDLE | HWMOD_SWSUP_MSTANDBY, .main_clk = "l3init_60m_fclk", .prcm = { .omap4 = { diff --git a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c index db32d53..18f333c 100644 --- a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c @@ -1637,7 +1637,7 @@ static struct omap_hwmod dra7xx_uart1_hwmod = { .class = &dra7xx_uart_hwmod_class, .clkdm_name = "l4per_clkdm", .main_clk = "uart1_gfclk_mux", - .flags = HWMOD_SWSUP_SIDLE_ACT, + .flags = HWMOD_SWSUP_SIDLE_ACT | DEBUG_OMAP2UART1_FLAGS, .prcm = { .omap4 = { .clkctrl_offs = DRA7XX_CM_L4PER_UART1_CLKCTRL_OFFSET, diff --git a/arch/arm/mach-omap2/pdata-quirks.c b/arch/arm/mach-omap2/pdata-quirks.c index 10c7145..39f020c 100644 --- a/arch/arm/mach-omap2/pdata-quirks.c +++ b/arch/arm/mach-omap2/pdata-quirks.c @@ -139,6 +139,7 @@ struct of_dev_auxdata omap_auxdata_lookup[] __initdata = { static struct pdata_init pdata_quirks[] __initdata = { #ifdef CONFIG_ARCH_OMAP3 + { "nokia,omap3-n900", hsmmc2_internal_input_clk, }, { "nokia,omap3-n9", hsmmc2_internal_input_clk, }, { "nokia,omap3-n950", hsmmc2_internal_input_clk, }, { "isee,omap3-igep0020", omap3_igep0020_legacy_init, }, diff --git a/arch/arm/mach-omap2/pm34xx.c b/arch/arm/mach-omap2/pm34xx.c index 93b80e5..1f3770a 100644 --- a/arch/arm/mach-omap2/pm34xx.c +++ b/arch/arm/mach-omap2/pm34xx.c @@ -120,7 +120,7 @@ static void omap3_save_secure_ram_context(void) * will hang the system. */ pwrdm_set_next_pwrst(mpu_pwrdm, PWRDM_POWER_ON); - ret = _omap_save_secure_sram((u32 *) + ret = _omap_save_secure_sram((u32 *)(unsigned long) __pa(omap3_secure_ram_storage)); pwrdm_set_next_pwrst(mpu_pwrdm, mpu_next_state); /* Following is for error tracking, it should not happen */ diff --git a/arch/arm/mach-omap2/powerdomain.c b/arch/arm/mach-omap2/powerdomain.c index e233dfc..93a2a6e 100644 --- a/arch/arm/mach-omap2/powerdomain.c +++ b/arch/arm/mach-omap2/powerdomain.c @@ -128,7 +128,8 @@ skip_voltdm: for (i = 0; i < pwrdm->banks; i++) pwrdm->ret_mem_off_counter[i] = 0; - arch_pwrdm->pwrdm_wait_transition(pwrdm); + if (arch_pwrdm && arch_pwrdm->pwrdm_wait_transition) + arch_pwrdm->pwrdm_wait_transition(pwrdm); pwrdm->state = pwrdm_read_pwrst(pwrdm); pwrdm->state_counter[pwrdm->state] = 1; diff --git a/arch/arm/mach-omap2/prm44xx_54xx.h b/arch/arm/mach-omap2/prm44xx_54xx.h index 7a97606..8d95aa5 100644 --- a/arch/arm/mach-omap2/prm44xx_54xx.h +++ b/arch/arm/mach-omap2/prm44xx_54xx.h @@ -43,7 +43,7 @@ extern void omap4_prm_vcvp_write(u32 val, u8 offset); extern u32 omap4_prm_vcvp_rmw(u32 mask, u32 bits, u8 offset); #if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5) || \ - defined(CONFIG_SOC_DRA7XX) + defined(CONFIG_SOC_DRA7XX) || defined(CONFIG_SOC_AM43XX) void omap44xx_prm_reconfigure_io_chain(void); #else static inline void omap44xx_prm_reconfigure_io_chain(void) diff --git a/arch/arm/mach-pxa/include/mach/lubbock.h b/arch/arm/mach-pxa/include/mach/lubbock.h index 2a086e8..958cd6af 100644 --- a/arch/arm/mach-pxa/include/mach/lubbock.h +++ b/arch/arm/mach-pxa/include/mach/lubbock.h @@ -10,6 +10,8 @@ * published by the Free Software Foundation. */ +#include <mach/irqs.h> + #define LUBBOCK_ETH_PHYS PXA_CS3_PHYS #define LUBBOCK_FPGA_PHYS PXA_CS2_PHYS diff --git a/arch/arm/mach-pxa/reset.c b/arch/arm/mach-pxa/reset.c index 0d5dd64..263b152 100644 --- a/arch/arm/mach-pxa/reset.c +++ b/arch/arm/mach-pxa/reset.c @@ -13,6 +13,7 @@ #include <mach/regs-ost.h> #include <mach/reset.h> +#include <mach/smemc.h> unsigned int reset_status; EXPORT_SYMBOL(reset_status); @@ -81,6 +82,12 @@ static void do_hw_reset(void) writel_relaxed(OSSR_M3, OSSR); /* ... in 100 ms */ writel_relaxed(readl_relaxed(OSCR) + 368640, OSMR3); + /* + * SDRAM hangs on watchdog reset on Marvell PXA270 (erratum 71) + * we put SDRAM into self-refresh to prevent that + */ + while (1) + writel_relaxed(MDREFR_SLFRSH, MDREFR); } void pxa_restart(enum reboot_mode mode, const char *cmd) @@ -104,4 +111,3 @@ void pxa_restart(enum reboot_mode mode, const char *cmd) break; } } - diff --git a/arch/arm/mach-pxa/tosa.c b/arch/arm/mach-pxa/tosa.c index 0206b91..ef5557b 100644 --- a/arch/arm/mach-pxa/tosa.c +++ b/arch/arm/mach-pxa/tosa.c @@ -425,57 +425,57 @@ static struct platform_device tosa_power_device = { * Tosa Keyboard */ static const uint32_t tosakbd_keymap[] = { - KEY(0, 2, KEY_W), - KEY(0, 6, KEY_K), - KEY(0, 7, KEY_BACKSPACE), - KEY(0, 8, KEY_P), - KEY(1, 1, KEY_Q), - KEY(1, 2, KEY_E), - KEY(1, 3, KEY_T), - KEY(1, 4, KEY_Y), - KEY(1, 6, KEY_O), - KEY(1, 7, KEY_I), - KEY(1, 8, KEY_COMMA), - KEY(2, 1, KEY_A), - KEY(2, 2, KEY_D), - KEY(2, 3, KEY_G), - KEY(2, 4, KEY_U), - KEY(2, 6, KEY_L), - KEY(2, 7, KEY_ENTER), - KEY(2, 8, KEY_DOT), - KEY(3, 1, KEY_Z), - KEY(3, 2, KEY_C), - KEY(3, 3, KEY_V), - KEY(3, 4, KEY_J), - KEY(3, 5, TOSA_KEY_ADDRESSBOOK), - KEY(3, 6, TOSA_KEY_CANCEL), - KEY(3, 7, TOSA_KEY_CENTER), - KEY(3, 8, TOSA_KEY_OK), - KEY(3, 9, KEY_LEFTSHIFT), - KEY(4, 1, KEY_S), - KEY(4, 2, KEY_R), - KEY(4, 3, KEY_B), - KEY(4, 4, KEY_N), - KEY(4, 5, TOSA_KEY_CALENDAR), - KEY(4, 6, TOSA_KEY_HOMEPAGE), - KEY(4, 7, KEY_LEFTCTRL), - KEY(4, 8, TOSA_KEY_LIGHT), - KEY(4, 10, KEY_RIGHTSHIFT), - KEY(5, 1, KEY_TAB), - KEY(5, 2, KEY_SLASH), - KEY(5, 3, KEY_H), - KEY(5, 4, KEY_M), - KEY(5, 5, TOSA_KEY_MENU), - KEY(5, 7, KEY_UP), - KEY(5, 11, TOSA_KEY_FN), - KEY(6, 1, KEY_X), - KEY(6, 2, KEY_F), - KEY(6, 3, KEY_SPACE), - KEY(6, 4, KEY_APOSTROPHE), - KEY(6, 5, TOSA_KEY_MAIL), - KEY(6, 6, KEY_LEFT), - KEY(6, 7, KEY_DOWN), - KEY(6, 8, KEY_RIGHT), + KEY(0, 1, KEY_W), + KEY(0, 5, KEY_K), + KEY(0, 6, KEY_BACKSPACE), + KEY(0, 7, KEY_P), + KEY(1, 0, KEY_Q), + KEY(1, 1, KEY_E), + KEY(1, 2, KEY_T), + KEY(1, 3, KEY_Y), + KEY(1, 5, KEY_O), + KEY(1, 6, KEY_I), + KEY(1, 7, KEY_COMMA), + KEY(2, 0, KEY_A), + KEY(2, 1, KEY_D), + KEY(2, 2, KEY_G), + KEY(2, 3, KEY_U), + KEY(2, 5, KEY_L), + KEY(2, 6, KEY_ENTER), + KEY(2, 7, KEY_DOT), + KEY(3, 0, KEY_Z), + KEY(3, 1, KEY_C), + KEY(3, 2, KEY_V), + KEY(3, 3, KEY_J), + KEY(3, 4, TOSA_KEY_ADDRESSBOOK), + KEY(3, 5, TOSA_KEY_CANCEL), + KEY(3, 6, TOSA_KEY_CENTER), + KEY(3, 7, TOSA_KEY_OK), + KEY(3, 8, KEY_LEFTSHIFT), + KEY(4, 0, KEY_S), + KEY(4, 1, KEY_R), + KEY(4, 2, KEY_B), + KEY(4, 3, KEY_N), + KEY(4, 4, TOSA_KEY_CALENDAR), + KEY(4, 5, TOSA_KEY_HOMEPAGE), + KEY(4, 6, KEY_LEFTCTRL), + KEY(4, 7, TOSA_KEY_LIGHT), + KEY(4, 9, KEY_RIGHTSHIFT), + KEY(5, 0, KEY_TAB), + KEY(5, 1, KEY_SLASH), + KEY(5, 2, KEY_H), + KEY(5, 3, KEY_M), + KEY(5, 4, TOSA_KEY_MENU), + KEY(5, 6, KEY_UP), + KEY(5, 10, TOSA_KEY_FN), + KEY(6, 0, KEY_X), + KEY(6, 1, KEY_F), + KEY(6, 2, KEY_SPACE), + KEY(6, 3, KEY_APOSTROPHE), + KEY(6, 4, TOSA_KEY_MAIL), + KEY(6, 5, KEY_LEFT), + KEY(6, 6, KEY_DOWN), + KEY(6, 7, KEY_RIGHT), }; static struct matrix_keymap_data tosakbd_keymap_data = { diff --git a/arch/arm/mach-s3c24xx/Kconfig b/arch/arm/mach-s3c24xx/Kconfig index 8f1d327..d876431 100644 --- a/arch/arm/mach-s3c24xx/Kconfig +++ b/arch/arm/mach-s3c24xx/Kconfig @@ -180,27 +180,6 @@ config CPU_LLSERIAL_S3C2440 Selected if there is an S3C2440 (or register compatible) serial low-level implementation needed -# gpio configurations - -config S3C24XX_GPIO_EXTRA - int - default 128 if S3C24XX_GPIO_EXTRA128 - default 64 if S3C24XX_GPIO_EXTRA64 - default 16 if ARCH_H1940 - default 0 - -config S3C24XX_GPIO_EXTRA64 - bool - help - Add an extra 64 gpio numbers to the available GPIO pool. This is - available for boards that need extra gpios for external devices. - -config S3C24XX_GPIO_EXTRA128 - bool - help - Add an extra 128 gpio numbers to the available GPIO pool. This is - available for boards that need extra gpios for external devices. - config S3C24XX_PLL bool "Support CPUfreq changing of PLL frequency (EXPERIMENTAL)" depends on ARM_S3C24XX_CPUFREQ diff --git a/arch/arm/mach-s3c24xx/common-smdk.c b/arch/arm/mach-s3c24xx/common-smdk.c index 404444d..e9fbcc9 100644 --- a/arch/arm/mach-s3c24xx/common-smdk.c +++ b/arch/arm/mach-s3c24xx/common-smdk.c @@ -37,8 +37,8 @@ #include <asm/irq.h> #include <mach/regs-gpio.h> +#include <mach/gpio-samsung.h> #include <linux/platform_data/leds-s3c24xx.h> - #include <linux/platform_data/mtd-nand-s3c2410.h> #include <plat/gpio-cfg.h> diff --git a/arch/arm/mach-s3c24xx/h1940-bluetooth.c b/arch/arm/mach-s3c24xx/h1940-bluetooth.c index 5b98bfd..b4d14b8 100644 --- a/arch/arm/mach-s3c24xx/h1940-bluetooth.c +++ b/arch/arm/mach-s3c24xx/h1940-bluetooth.c @@ -19,8 +19,10 @@ #include <linux/gpio.h> #include <linux/rfkill.h> +#include <plat/gpio-cfg.h> #include <mach/hardware.h> #include <mach/regs-gpio.h> +#include <mach/gpio-samsung.h> #include "h1940.h" diff --git a/arch/arm/mach-s3c24xx/include/mach/gpio.h b/arch/arm/mach-s3c24xx/include/mach/gpio-samsung.h index 1459156..528fcdc 100644 --- a/arch/arm/mach-s3c24xx/include/mach/gpio.h +++ b/arch/arm/mach-s3c24xx/include/mach/gpio-samsung.h @@ -14,16 +14,8 @@ * devices that need GPIO. */ -#ifndef __MACH_GPIO_H -#define __MACH_GPIO_H __FILE__ - -#ifdef CONFIG_CPU_S3C244X -#define ARCH_NR_GPIOS (32 * 9 + CONFIG_S3C24XX_GPIO_EXTRA) -#elif defined(CONFIG_CPU_S3C2443) || defined(CONFIG_CPU_S3C2416) -#define ARCH_NR_GPIOS (32 * 12 + CONFIG_S3C24XX_GPIO_EXTRA) -#else -#define ARCH_NR_GPIOS (256 + CONFIG_S3C24XX_GPIO_EXTRA) -#endif +#ifndef GPIO_SAMSUNG_S3C24XX_H +#define GPIO_SAMSUNG_S3C24XX_H /* * GPIO sizes for various SoCs: @@ -31,17 +23,17 @@ * 2410 2412 2440 2443 2416 * 2442 * ---- ---- ---- ---- ---- - * A 23 22 25 16 25 - * B 11 11 11 11 9 - * C 16 15 16 16 16 + * A 23 22 25 16 27 + * B 11 11 11 11 11 + * C 16 16 16 16 16 * D 16 16 16 16 16 * E 16 16 16 16 16 * F 8 8 8 8 8 * G 16 16 16 16 8 - * H 11 11 9 15 15 + * H 11 11 11 15 15 * J -- -- 13 16 -- * K -- -- -- -- 16 - * L -- -- -- 15 7 + * L -- -- -- 15 14 * M -- -- -- 2 2 */ @@ -101,8 +93,6 @@ enum s3c_gpio_number { #define S3C2410_GPL(_nr) (S3C2410_GPIO_L_START + (_nr)) #define S3C2410_GPM(_nr) (S3C2410_GPIO_M_START + (_nr)) -#include <plat/gpio-cfg.h> - #ifdef CONFIG_CPU_S3C244X #define S3C_GPIO_END (S3C2410_GPJ(0) + 32) #elif defined(CONFIG_CPU_S3C2443) || defined(CONFIG_CPU_S3C2416) @@ -111,4 +101,4 @@ enum s3c_gpio_number { #define S3C_GPIO_END (S3C2410_GPH(0) + 32) #endif -#endif /* __MACH_GPIO_H */ +#endif /* GPIO_SAMSUNG_S3C24XX_H */ diff --git a/arch/arm/mach-s3c24xx/mach-amlm5900.c b/arch/arm/mach-s3c24xx/mach-amlm5900.c index e27b5c9..284ea1f 100644 --- a/arch/arm/mach-s3c24xx/mach-amlm5900.c +++ b/arch/arm/mach-s3c24xx/mach-amlm5900.c @@ -52,6 +52,7 @@ #include <plat/regs-serial.h> #include <mach/regs-lcd.h> #include <mach/regs-gpio.h> +#include <mach/gpio-samsung.h> #include <linux/platform_data/i2c-s3c2410.h> #include <plat/devs.h> diff --git a/arch/arm/mach-s3c24xx/mach-anubis.c b/arch/arm/mach-s3c24xx/mach-anubis.c index c1fb6c3..2a16f8f 100644 --- a/arch/arm/mach-s3c24xx/mach-anubis.c +++ b/arch/arm/mach-s3c24xx/mach-anubis.c @@ -35,6 +35,7 @@ #include <plat/regs-serial.h> #include <mach/regs-gpio.h> #include <mach/regs-lcd.h> +#include <mach/gpio-samsung.h> #include <linux/platform_data/mtd-nand-s3c2410.h> #include <linux/platform_data/i2c-s3c2410.h> diff --git a/arch/arm/mach-s3c24xx/mach-at2440evb.c b/arch/arm/mach-s3c24xx/mach-at2440evb.c index 6dfeeb7..6beab67 100644 --- a/arch/arm/mach-s3c24xx/mach-at2440evb.c +++ b/arch/arm/mach-s3c24xx/mach-at2440evb.c @@ -36,6 +36,7 @@ #include <plat/regs-serial.h> #include <mach/regs-gpio.h> #include <mach/regs-lcd.h> +#include <mach/gpio-samsung.h> #include <linux/platform_data/mtd-nand-s3c2410.h> #include <linux/platform_data/i2c-s3c2410.h> diff --git a/arch/arm/mach-s3c24xx/mach-bast.c b/arch/arm/mach-s3c24xx/mach-bast.c index 22d6ae9..981ba1e 100644 --- a/arch/arm/mach-s3c24xx/mach-bast.c +++ b/arch/arm/mach-s3c24xx/mach-bast.c @@ -48,6 +48,7 @@ #include <mach/hardware.h> #include <mach/regs-gpio.h> #include <mach/regs-lcd.h> +#include <mach/gpio-samsung.h> #include <plat/clock.h> #include <plat/cpu.h> diff --git a/arch/arm/mach-s3c24xx/mach-gta02.c b/arch/arm/mach-s3c24xx/mach-gta02.c index 13d8d07..d9170e9 100644 --- a/arch/arm/mach-s3c24xx/mach-gta02.c +++ b/arch/arm/mach-s3c24xx/mach-gta02.c @@ -75,6 +75,7 @@ #include <mach/hardware.h> #include <mach/regs-gpio.h> #include <mach/regs-irq.h> +#include <mach/gpio-samsung.h> #include <plat/cpu.h> #include <plat/devs.h> diff --git a/arch/arm/mach-s3c24xx/mach-h1940.c b/arch/arm/mach-s3c24xx/mach-h1940.c index 952b6a0..de08321 100644 --- a/arch/arm/mach-s3c24xx/mach-h1940.c +++ b/arch/arm/mach-s3c24xx/mach-h1940.c @@ -54,6 +54,7 @@ #include <mach/regs-clock.h> #include <mach/regs-gpio.h> #include <mach/regs-lcd.h> +#include <mach/gpio-samsung.h> #include <plat/clock.h> #include <plat/cpu.h> diff --git a/arch/arm/mach-s3c24xx/mach-jive.c b/arch/arm/mach-s3c24xx/mach-jive.c index 43c23e2..67cb8e9 100644 --- a/arch/arm/mach-s3c24xx/mach-jive.c +++ b/arch/arm/mach-s3c24xx/mach-jive.c @@ -38,6 +38,7 @@ #include <mach/regs-gpio.h> #include <mach/regs-lcd.h> #include <mach/fb.h> +#include <mach/gpio-samsung.h> #include <asm/mach-types.h> diff --git a/arch/arm/mach-s3c24xx/mach-mini2440.c b/arch/arm/mach-s3c24xx/mach-mini2440.c index 4a18d49..1f15597 100644 --- a/arch/arm/mach-s3c24xx/mach-mini2440.c +++ b/arch/arm/mach-s3c24xx/mach-mini2440.c @@ -42,6 +42,7 @@ #include <linux/platform_data/leds-s3c24xx.h> #include <mach/regs-lcd.h> #include <mach/irqs.h> +#include <mach/gpio-samsung.h> #include <linux/platform_data/mtd-nand-s3c2410.h> #include <linux/platform_data/i2c-s3c2410.h> #include <linux/platform_data/mmc-s3cmci.h> diff --git a/arch/arm/mach-s3c24xx/mach-n30.c b/arch/arm/mach-s3c24xx/mach-n30.c index 2cb46c3..997684f 100644 --- a/arch/arm/mach-s3c24xx/mach-n30.c +++ b/arch/arm/mach-s3c24xx/mach-n30.c @@ -36,6 +36,7 @@ #include <linux/platform_data/leds-s3c24xx.h> #include <mach/regs-gpio.h> #include <mach/regs-lcd.h> +#include <mach/gpio-samsung.h> #include <asm/mach/arch.h> #include <asm/mach/irq.h> diff --git a/arch/arm/mach-s3c24xx/mach-nexcoder.c b/arch/arm/mach-s3c24xx/mach-nexcoder.c index 01f4354..575d28c 100644 --- a/arch/arm/mach-s3c24xx/mach-nexcoder.c +++ b/arch/arm/mach-s3c24xx/mach-nexcoder.c @@ -37,6 +37,7 @@ //#include <asm/debug-ll.h> #include <mach/regs-gpio.h> +#include <mach/gpio-samsung.h> #include <plat/regs-serial.h> #include <linux/platform_data/i2c-s3c2410.h> diff --git a/arch/arm/mach-s3c24xx/mach-osiris-dvs.c b/arch/arm/mach-s3c24xx/mach-osiris-dvs.c index 45e7436..33afb91 100644 --- a/arch/arm/mach-s3c24xx/mach-osiris-dvs.c +++ b/arch/arm/mach-s3c24xx/mach-osiris-dvs.c @@ -20,6 +20,7 @@ #include <linux/i2c/tps65010.h> #include <plat/cpu-freq.h> +#include <mach/gpio-samsung.h> #define OSIRIS_GPIO_DVS S3C2410_GPB(5) diff --git a/arch/arm/mach-s3c24xx/mach-osiris.c b/arch/arm/mach-s3c24xx/mach-osiris.c index 58d6fbe..f84f2a4 100644 --- a/arch/arm/mach-s3c24xx/mach-osiris.c +++ b/arch/arm/mach-s3c24xx/mach-osiris.c @@ -50,6 +50,7 @@ #include <mach/hardware.h> #include <mach/regs-gpio.h> #include <mach/regs-lcd.h> +#include <mach/gpio-samsung.h> #include "common.h" #include "osiris.h" diff --git a/arch/arm/mach-s3c24xx/mach-qt2410.c b/arch/arm/mach-s3c24xx/mach-qt2410.c index f8feaea..b534b76 100644 --- a/arch/arm/mach-s3c24xx/mach-qt2410.c +++ b/arch/arm/mach-s3c24xx/mach-qt2410.c @@ -54,6 +54,7 @@ #include <linux/platform_data/mtd-nand-s3c2410.h> #include <linux/platform_data/usb-s3c2410_udc.h> #include <linux/platform_data/i2c-s3c2410.h> +#include <mach/gpio-samsung.h> #include <plat/gpio-cfg.h> #include <plat/devs.h> diff --git a/arch/arm/mach-s3c24xx/mach-rx1950.c b/arch/arm/mach-s3c24xx/mach-rx1950.c index 034b7fe..0a5456c 100644 --- a/arch/arm/mach-s3c24xx/mach-rx1950.c +++ b/arch/arm/mach-s3c24xx/mach-rx1950.c @@ -51,6 +51,7 @@ #include <mach/fb.h> #include <mach/regs-gpio.h> #include <mach/regs-lcd.h> +#include <mach/gpio-samsung.h> #include <plat/clock.h> #include <plat/cpu.h> @@ -58,6 +59,7 @@ #include <plat/pm.h> #include <plat/regs-serial.h> #include <plat/samsung-time.h> +#include <plat/gpio-cfg.h> #include "common.h" #include "h1940.h" diff --git a/arch/arm/mach-s3c24xx/mach-rx3715.c b/arch/arm/mach-s3c24xx/mach-rx3715.c index 3bc6231..b36edce 100644 --- a/arch/arm/mach-s3c24xx/mach-rx3715.c +++ b/arch/arm/mach-s3c24xx/mach-rx3715.c @@ -43,6 +43,7 @@ #include <mach/hardware.h> #include <mach/regs-gpio.h> #include <mach/regs-lcd.h> +#include <mach/gpio-samsung.h> #include <plat/clock.h> #include <plat/cpu.h> diff --git a/arch/arm/mach-s3c24xx/mach-smdk2413.c b/arch/arm/mach-s3c24xx/mach-smdk2413.c index c9d31ef..f5bc721 100644 --- a/arch/arm/mach-s3c24xx/mach-smdk2413.c +++ b/arch/arm/mach-s3c24xx/mach-smdk2413.c @@ -39,6 +39,7 @@ #include <linux/platform_data/usb-s3c2410_udc.h> #include <linux/platform_data/i2c-s3c2410.h> +#include <mach/gpio-samsung.h> #include <mach/fb.h> #include <plat/clock.h> diff --git a/arch/arm/mach-s3c24xx/mach-smdk2416.c b/arch/arm/mach-s3c24xx/mach-smdk2416.c index f88e672..12023ca 100644 --- a/arch/arm/mach-s3c24xx/mach-smdk2416.c +++ b/arch/arm/mach-s3c24xx/mach-smdk2416.c @@ -38,6 +38,7 @@ #include <mach/regs-gpio.h> #include <mach/regs-lcd.h> #include <mach/regs-s3c2443-clock.h> +#include <mach/gpio-samsung.h> #include <linux/platform_data/leds-s3c24xx.h> #include <linux/platform_data/i2c-s3c2410.h> diff --git a/arch/arm/mach-s3c24xx/mach-vr1000.c b/arch/arm/mach-s3c24xx/mach-vr1000.c index 42e7187..755df48 100644 --- a/arch/arm/mach-s3c24xx/mach-vr1000.c +++ b/arch/arm/mach-s3c24xx/mach-vr1000.c @@ -40,6 +40,7 @@ #include <mach/hardware.h> #include <mach/regs-gpio.h> +#include <mach/gpio-samsung.h> #include <plat/clock.h> #include <plat/cpu.h> diff --git a/arch/arm/mach-s3c24xx/pm-s3c2410.c b/arch/arm/mach-s3c24xx/pm-s3c2410.c index 2d82c4f..20e481d 100644 --- a/arch/arm/mach-s3c24xx/pm-s3c2410.c +++ b/arch/arm/mach-s3c24xx/pm-s3c2410.c @@ -33,7 +33,9 @@ #include <mach/hardware.h> #include <mach/regs-gpio.h> +#include <mach/gpio-samsung.h> +#include <plat/gpio-cfg.h> #include <plat/cpu.h> #include <plat/pm.h> diff --git a/arch/arm/mach-s3c24xx/pm.c b/arch/arm/mach-s3c24xx/pm.c index caa5b72..052ca23 100644 --- a/arch/arm/mach-s3c24xx/pm.c +++ b/arch/arm/mach-s3c24xx/pm.c @@ -39,6 +39,7 @@ #include <mach/regs-clock.h> #include <mach/regs-gpio.h> #include <mach/regs-irq.h> +#include <mach/gpio-samsung.h> #include <asm/mach/time.h> diff --git a/arch/arm/mach-s3c24xx/s3c2410.c b/arch/arm/mach-s3c24xx/s3c2410.c index 34676d1..ffb92cbc 100644 --- a/arch/arm/mach-s3c24xx/s3c2410.c +++ b/arch/arm/mach-s3c24xx/s3c2410.c @@ -30,6 +30,7 @@ #include <asm/mach/irq.h> #include <mach/hardware.h> +#include <mach/gpio-samsung.h> #include <asm/irq.h> #include <asm/system_misc.h> diff --git a/arch/arm/mach-s3c24xx/s3c2416.c b/arch/arm/mach-s3c24xx/s3c2416.c index 9ef3ccf..8e01b4f 100644 --- a/arch/arm/mach-s3c24xx/s3c2416.c +++ b/arch/arm/mach-s3c24xx/s3c2416.c @@ -42,6 +42,7 @@ #include <asm/mach/irq.h> #include <mach/hardware.h> +#include <mach/gpio-samsung.h> #include <asm/proc-fns.h> #include <asm/irq.h> #include <asm/system_misc.h> diff --git a/arch/arm/mach-s3c24xx/s3c2440.c b/arch/arm/mach-s3c24xx/s3c2440.c index 5f9d656..03d379f 100644 --- a/arch/arm/mach-s3c24xx/s3c2440.c +++ b/arch/arm/mach-s3c24xx/s3c2440.c @@ -29,6 +29,7 @@ #include <asm/mach/irq.h> #include <mach/hardware.h> +#include <mach/gpio-samsung.h> #include <asm/irq.h> #include <plat/devs.h> diff --git a/arch/arm/mach-s3c24xx/s3c2442.c b/arch/arm/mach-s3c24xx/s3c2442.c index 6819961..2c8adc0 100644 --- a/arch/arm/mach-s3c24xx/s3c2442.c +++ b/arch/arm/mach-s3c24xx/s3c2442.c @@ -37,6 +37,7 @@ #include <linux/io.h> #include <mach/hardware.h> +#include <mach/gpio-samsung.h> #include <linux/atomic.h> #include <asm/irq.h> diff --git a/arch/arm/mach-s3c24xx/s3c2443.c b/arch/arm/mach-s3c24xx/s3c2443.c index b6c7191..886c214 100644 --- a/arch/arm/mach-s3c24xx/s3c2443.c +++ b/arch/arm/mach-s3c24xx/s3c2443.c @@ -29,6 +29,7 @@ #include <asm/mach/irq.h> #include <mach/hardware.h> +#include <mach/gpio-samsung.h> #include <asm/irq.h> #include <asm/system_misc.h> diff --git a/arch/arm/mach-s3c24xx/setup-i2c.c b/arch/arm/mach-s3c24xx/setup-i2c.c index 7b4f333..1852696 100644 --- a/arch/arm/mach-s3c24xx/setup-i2c.c +++ b/arch/arm/mach-s3c24xx/setup-i2c.c @@ -19,6 +19,7 @@ struct platform_device; #include <linux/platform_data/i2c-s3c2410.h> #include <mach/hardware.h> #include <mach/regs-gpio.h> +#include <mach/gpio-samsung.h> void s3c_i2c0_cfg_gpio(struct platform_device *dev) { diff --git a/arch/arm/mach-s3c24xx/setup-sdhci-gpio.c b/arch/arm/mach-s3c24xx/setup-sdhci-gpio.c index f65cb3e..c99b0f6 100644 --- a/arch/arm/mach-s3c24xx/setup-sdhci-gpio.c +++ b/arch/arm/mach-s3c24xx/setup-sdhci-gpio.c @@ -20,6 +20,7 @@ #include <linux/gpio.h> #include <mach/regs-gpio.h> +#include <mach/gpio-samsung.h> #include <plat/gpio-cfg.h> void s3c2416_setup_sdhci0_cfg_gpio(struct platform_device *dev, int width) diff --git a/arch/arm/mach-s3c24xx/setup-ts.c b/arch/arm/mach-s3c24xx/setup-ts.c index 4e11aff..46466d2 100644 --- a/arch/arm/mach-s3c24xx/setup-ts.c +++ b/arch/arm/mach-s3c24xx/setup-ts.c @@ -15,7 +15,9 @@ struct platform_device; /* don't need the contents */ +#include <plat/gpio-cfg.h> #include <mach/hardware.h> +#include <mach/gpio-samsung.h> /** * s3c24xx_ts_cfg_gpio - configure gpio for s3c2410 systems diff --git a/arch/arm/mach-s3c24xx/simtec-usb.c b/arch/arm/mach-s3c24xx/simtec-usb.c index 2ed2e32..8dea917 100644 --- a/arch/arm/mach-s3c24xx/simtec-usb.c +++ b/arch/arm/mach-s3c24xx/simtec-usb.c @@ -29,6 +29,7 @@ #include <asm/mach/irq.h> #include <mach/hardware.h> +#include <mach/gpio-samsung.h> #include <asm/irq.h> #include <linux/platform_data/usb-ohci-s3c2410.h> diff --git a/arch/arm/mach-s3c64xx/Kconfig b/arch/arm/mach-s3c64xx/Kconfig index 2cb8dc5..64f04e6 100644 --- a/arch/arm/mach-s3c64xx/Kconfig +++ b/arch/arm/mach-s3c64xx/Kconfig @@ -17,9 +17,10 @@ config CPU_S3C6410 help Enable S3C6410 CPU support -config S3C64XX_DMA - bool "S3C64XX DMA" - select S3C_DMA +config S3C64XX_PL080 + bool "S3C64XX DMA using generic PL08x driver" + select AMBA_PL08X + select SAMSUNG_DMADEV config S3C64XX_SETUP_SDHCI bool @@ -192,7 +193,6 @@ config SMDK6410_WM1190_EV1 select MFD_WM8350_I2C select REGULATOR select REGULATOR_WM8350 - select SAMSUNG_GPIO_EXTRA64 help The Wolfson Microelectronics 1190-EV1 is a WM835x based PMIC and audio daughtercard for the Samsung SMDK6410 reference @@ -208,7 +208,6 @@ config SMDK6410_WM1192_EV1 select MFD_WM831X_I2C select REGULATOR select REGULATOR_WM831X - select SAMSUNG_GPIO_EXTRA64 help The Wolfson Microelectronics 1192-EV1 is a WM831x based PMIC daughtercard for the Samsung SMDK6410 reference platform. @@ -294,7 +293,6 @@ config MACH_WLF_CRAGG_6410 select SAMSUNG_DEV_ADC select SAMSUNG_DEV_KEYPAD select SAMSUNG_DEV_PWM - select SAMSUNG_GPIO_EXTRA128 help Machine support for the Wolfson Cragganmore S3C6410 variant. diff --git a/arch/arm/mach-s3c64xx/Makefile b/arch/arm/mach-s3c64xx/Makefile index 6faedcf..58069a7 100644 --- a/arch/arm/mach-s3c64xx/Makefile +++ b/arch/arm/mach-s3c64xx/Makefile @@ -26,7 +26,7 @@ obj-$(CONFIG_CPU_IDLE) += cpuidle.o # DMA support -obj-$(CONFIG_S3C64XX_DMA) += dma.o +obj-$(CONFIG_S3C64XX_PL080) += pl080.o # Device support diff --git a/arch/arm/mach-s3c64xx/common.c b/arch/arm/mach-s3c64xx/common.c index 7a3ce4c..76ab595 100644 --- a/arch/arm/mach-s3c64xx/common.c +++ b/arch/arm/mach-s3c64xx/common.c @@ -41,6 +41,7 @@ #include <mach/map.h> #include <mach/hardware.h> #include <mach/regs-gpio.h> +#include <mach/gpio-samsung.h> #include <plat/cpu.h> #include <plat/devs.h> diff --git a/arch/arm/mach-s3c64xx/common.h b/arch/arm/mach-s3c64xx/common.h index bd3bd56..7043e7a 100644 --- a/arch/arm/mach-s3c64xx/common.h +++ b/arch/arm/mach-s3c64xx/common.h @@ -58,4 +58,9 @@ int __init s3c64xx_pm_late_initcall(void); static inline int s3c64xx_pm_late_initcall(void) { return 0; } #endif +#ifdef CONFIG_S3C64XX_PL080 +extern struct pl08x_platform_data s3c64xx_dma0_plat_data; +extern struct pl08x_platform_data s3c64xx_dma1_plat_data; +#endif + #endif /* __ARCH_ARM_MACH_S3C64XX_COMMON_H */ diff --git a/arch/arm/mach-s3c64xx/crag6410.h b/arch/arm/mach-s3c64xx/crag6410.h index 4c3c999..7bc6668 100644 --- a/arch/arm/mach-s3c64xx/crag6410.h +++ b/arch/arm/mach-s3c64xx/crag6410.h @@ -11,7 +11,7 @@ #ifndef MACH_CRAG6410_H #define MACH_CRAG6410_H -#include <linux/gpio.h> +#include <mach/gpio-samsung.h> #define GLENFARCLAS_PMIC_IRQ_BASE IRQ_BOARD_START diff --git a/arch/arm/mach-s3c64xx/dev-audio.c b/arch/arm/mach-s3c64xx/dev-audio.c index e367e87..ff780a8 100644 --- a/arch/arm/mach-s3c64xx/dev-audio.c +++ b/arch/arm/mach-s3c64xx/dev-audio.c @@ -22,6 +22,7 @@ #include <plat/devs.h> #include <linux/platform_data/asoc-s3c.h> #include <plat/gpio-cfg.h> +#include <mach/gpio-samsung.h> static int s3c64xx_i2s_cfg_gpio(struct platform_device *pdev) { diff --git a/arch/arm/mach-s3c64xx/dma.c b/arch/arm/mach-s3c64xx/dma.c deleted file mode 100644 index 7e22c21..0000000 --- a/arch/arm/mach-s3c64xx/dma.c +++ /dev/null @@ -1,762 +0,0 @@ -/* linux/arch/arm/plat-s3c64xx/dma.c - * - * Copyright 2009 Openmoko, Inc. - * Copyright 2009 Simtec Electronics - * Ben Dooks <ben@simtec.co.uk> - * http://armlinux.simtec.co.uk/ - * - * S3C64XX DMA core - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. -*/ - -/* - * NOTE: Code in this file is not used when booting with Device Tree support. - */ - -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/interrupt.h> -#include <linux/dmapool.h> -#include <linux/device.h> -#include <linux/errno.h> -#include <linux/slab.h> -#include <linux/delay.h> -#include <linux/clk.h> -#include <linux/err.h> -#include <linux/io.h> -#include <linux/amba/pl080.h> -#include <linux/of.h> - -#include <mach/dma.h> -#include <mach/map.h> -#include <mach/irqs.h> - -#include "regs-sys.h" - -/* dma channel state information */ - -struct s3c64xx_dmac { - struct device dev; - struct clk *clk; - void __iomem *regs; - struct s3c2410_dma_chan *channels; - enum dma_ch chanbase; -}; - -/* pool to provide LLI buffers */ -static struct dma_pool *dma_pool; - -/* Debug configuration and code */ - -static unsigned char debug_show_buffs = 0; - -static void dbg_showchan(struct s3c2410_dma_chan *chan) -{ - pr_debug("DMA%d: %08x->%08x L %08x C %08x,%08x S %08x\n", - chan->number, - readl(chan->regs + PL080_CH_SRC_ADDR), - readl(chan->regs + PL080_CH_DST_ADDR), - readl(chan->regs + PL080_CH_LLI), - readl(chan->regs + PL080_CH_CONTROL), - readl(chan->regs + PL080S_CH_CONTROL2), - readl(chan->regs + PL080S_CH_CONFIG)); -} - -static void show_lli(struct pl080s_lli *lli) -{ - pr_debug("LLI[%p] %08x->%08x, NL %08x C %08x,%08x\n", - lli, lli->src_addr, lli->dst_addr, lli->next_lli, - lli->control0, lli->control1); -} - -static void dbg_showbuffs(struct s3c2410_dma_chan *chan) -{ - struct s3c64xx_dma_buff *ptr; - struct s3c64xx_dma_buff *end; - - pr_debug("DMA%d: buffs next %p, curr %p, end %p\n", - chan->number, chan->next, chan->curr, chan->end); - - ptr = chan->next; - end = chan->end; - - if (debug_show_buffs) { - for (; ptr != NULL; ptr = ptr->next) { - pr_debug("DMA%d: %08x ", - chan->number, ptr->lli_dma); - show_lli(ptr->lli); - } - } -} - -/* End of Debug */ - -static struct s3c2410_dma_chan *s3c64xx_dma_map_channel(unsigned int channel) -{ - struct s3c2410_dma_chan *chan; - unsigned int start, offs; - - start = 0; - - if (channel >= DMACH_PCM1_TX) - start = 8; - - for (offs = 0; offs < 8; offs++) { - chan = &s3c2410_chans[start + offs]; - if (!chan->in_use) - goto found; - } - - return NULL; - -found: - s3c_dma_chan_map[channel] = chan; - return chan; -} - -int s3c2410_dma_config(enum dma_ch channel, int xferunit) -{ - struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel); - - if (chan == NULL) - return -EINVAL; - - switch (xferunit) { - case 1: - chan->hw_width = 0; - break; - case 2: - chan->hw_width = 1; - break; - case 4: - chan->hw_width = 2; - break; - default: - printk(KERN_ERR "%s: illegal width %d\n", __func__, xferunit); - return -EINVAL; - } - - return 0; -} -EXPORT_SYMBOL(s3c2410_dma_config); - -static void s3c64xx_dma_fill_lli(struct s3c2410_dma_chan *chan, - struct pl080s_lli *lli, - dma_addr_t data, int size) -{ - dma_addr_t src, dst; - u32 control0, control1; - - switch (chan->source) { - case DMA_FROM_DEVICE: - src = chan->dev_addr; - dst = data; - control0 = PL080_CONTROL_SRC_AHB2; - control0 |= PL080_CONTROL_DST_INCR; - break; - - case DMA_TO_DEVICE: - src = data; - dst = chan->dev_addr; - control0 = PL080_CONTROL_DST_AHB2; - control0 |= PL080_CONTROL_SRC_INCR; - break; - default: - BUG(); - } - - /* note, we do not currently setup any of the burst controls */ - - control1 = size >> chan->hw_width; /* size in no of xfers */ - control0 |= PL080_CONTROL_PROT_SYS; /* always in priv. mode */ - control0 |= PL080_CONTROL_TC_IRQ_EN; /* always fire IRQ */ - control0 |= (u32)chan->hw_width << PL080_CONTROL_DWIDTH_SHIFT; - control0 |= (u32)chan->hw_width << PL080_CONTROL_SWIDTH_SHIFT; - - lli->src_addr = src; - lli->dst_addr = dst; - lli->next_lli = 0; - lli->control0 = control0; - lli->control1 = control1; -} - -static void s3c64xx_lli_to_regs(struct s3c2410_dma_chan *chan, - struct pl080s_lli *lli) -{ - void __iomem *regs = chan->regs; - - pr_debug("%s: LLI %p => regs\n", __func__, lli); - show_lli(lli); - - writel(lli->src_addr, regs + PL080_CH_SRC_ADDR); - writel(lli->dst_addr, regs + PL080_CH_DST_ADDR); - writel(lli->next_lli, regs + PL080_CH_LLI); - writel(lli->control0, regs + PL080_CH_CONTROL); - writel(lli->control1, regs + PL080S_CH_CONTROL2); -} - -static int s3c64xx_dma_start(struct s3c2410_dma_chan *chan) -{ - struct s3c64xx_dmac *dmac = chan->dmac; - u32 config; - u32 bit = chan->bit; - - dbg_showchan(chan); - - pr_debug("%s: clearing interrupts\n", __func__); - - /* clear interrupts */ - writel(bit, dmac->regs + PL080_TC_CLEAR); - writel(bit, dmac->regs + PL080_ERR_CLEAR); - - pr_debug("%s: starting channel\n", __func__); - - config = readl(chan->regs + PL080S_CH_CONFIG); - config |= PL080_CONFIG_ENABLE; - config &= ~PL080_CONFIG_HALT; - - pr_debug("%s: writing config %08x\n", __func__, config); - writel(config, chan->regs + PL080S_CH_CONFIG); - - return 0; -} - -static int s3c64xx_dma_stop(struct s3c2410_dma_chan *chan) -{ - u32 config; - int timeout; - - pr_debug("%s: stopping channel\n", __func__); - - dbg_showchan(chan); - - config = readl(chan->regs + PL080S_CH_CONFIG); - config |= PL080_CONFIG_HALT; - writel(config, chan->regs + PL080S_CH_CONFIG); - - timeout = 1000; - do { - config = readl(chan->regs + PL080S_CH_CONFIG); - pr_debug("%s: %d - config %08x\n", __func__, timeout, config); - if (config & PL080_CONFIG_ACTIVE) - udelay(10); - else - break; - } while (--timeout > 0); - - if (config & PL080_CONFIG_ACTIVE) { - printk(KERN_ERR "%s: channel still active\n", __func__); - return -EFAULT; - } - - config = readl(chan->regs + PL080S_CH_CONFIG); - config &= ~PL080_CONFIG_ENABLE; - writel(config, chan->regs + PL080S_CH_CONFIG); - - return 0; -} - -static inline void s3c64xx_dma_bufffdone(struct s3c2410_dma_chan *chan, - struct s3c64xx_dma_buff *buf, - enum s3c2410_dma_buffresult result) -{ - if (chan->callback_fn != NULL) - (chan->callback_fn)(chan, buf->pw, 0, result); -} - -static void s3c64xx_dma_freebuff(struct s3c64xx_dma_buff *buff) -{ - dma_pool_free(dma_pool, buff->lli, buff->lli_dma); - kfree(buff); -} - -static int s3c64xx_dma_flush(struct s3c2410_dma_chan *chan) -{ - struct s3c64xx_dma_buff *buff, *next; - u32 config; - - dbg_showchan(chan); - - pr_debug("%s: flushing channel\n", __func__); - - config = readl(chan->regs + PL080S_CH_CONFIG); - config &= ~PL080_CONFIG_ENABLE; - writel(config, chan->regs + PL080S_CH_CONFIG); - - /* dump all the buffers associated with this channel */ - - for (buff = chan->curr; buff != NULL; buff = next) { - next = buff->next; - pr_debug("%s: buff %p (next %p)\n", __func__, buff, buff->next); - - s3c64xx_dma_bufffdone(chan, buff, S3C2410_RES_ABORT); - s3c64xx_dma_freebuff(buff); - } - - chan->curr = chan->next = chan->end = NULL; - - return 0; -} - -int s3c2410_dma_ctrl(enum dma_ch channel, enum s3c2410_chan_op op) -{ - struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel); - - WARN_ON(!chan); - if (!chan) - return -EINVAL; - - switch (op) { - case S3C2410_DMAOP_START: - return s3c64xx_dma_start(chan); - - case S3C2410_DMAOP_STOP: - return s3c64xx_dma_stop(chan); - - case S3C2410_DMAOP_FLUSH: - return s3c64xx_dma_flush(chan); - - /* believe PAUSE/RESUME are no-ops */ - case S3C2410_DMAOP_PAUSE: - case S3C2410_DMAOP_RESUME: - case S3C2410_DMAOP_STARTED: - case S3C2410_DMAOP_TIMEOUT: - return 0; - } - - return -ENOENT; -} -EXPORT_SYMBOL(s3c2410_dma_ctrl); - -/* s3c2410_dma_enque - * - */ - -int s3c2410_dma_enqueue(enum dma_ch channel, void *id, - dma_addr_t data, int size) -{ - struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel); - struct s3c64xx_dma_buff *next; - struct s3c64xx_dma_buff *buff; - struct pl080s_lli *lli; - unsigned long flags; - int ret; - - WARN_ON(!chan); - if (!chan) - return -EINVAL; - - buff = kzalloc(sizeof(struct s3c64xx_dma_buff), GFP_ATOMIC); - if (!buff) { - printk(KERN_ERR "%s: no memory for buffer\n", __func__); - return -ENOMEM; - } - - lli = dma_pool_alloc(dma_pool, GFP_ATOMIC, &buff->lli_dma); - if (!lli) { - printk(KERN_ERR "%s: no memory for lli\n", __func__); - ret = -ENOMEM; - goto err_buff; - } - - pr_debug("%s: buff %p, dp %08x lli (%p, %08x) %d\n", - __func__, buff, data, lli, (u32)buff->lli_dma, size); - - buff->lli = lli; - buff->pw = id; - - s3c64xx_dma_fill_lli(chan, lli, data, size); - - local_irq_save(flags); - - if ((next = chan->next) != NULL) { - struct s3c64xx_dma_buff *end = chan->end; - struct pl080s_lli *endlli = end->lli; - - pr_debug("enquing onto channel\n"); - - end->next = buff; - endlli->next_lli = buff->lli_dma; - - if (chan->flags & S3C2410_DMAF_CIRCULAR) { - struct s3c64xx_dma_buff *curr = chan->curr; - lli->next_lli = curr->lli_dma; - } - - if (next == chan->curr) { - writel(buff->lli_dma, chan->regs + PL080_CH_LLI); - chan->next = buff; - } - - show_lli(endlli); - chan->end = buff; - } else { - pr_debug("enquing onto empty channel\n"); - - chan->curr = buff; - chan->next = buff; - chan->end = buff; - - s3c64xx_lli_to_regs(chan, lli); - } - - local_irq_restore(flags); - - show_lli(lli); - - dbg_showchan(chan); - dbg_showbuffs(chan); - return 0; - -err_buff: - kfree(buff); - return ret; -} - -EXPORT_SYMBOL(s3c2410_dma_enqueue); - - -int s3c2410_dma_devconfig(enum dma_ch channel, - enum dma_data_direction source, - unsigned long devaddr) -{ - struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel); - u32 peripheral; - u32 config = 0; - - pr_debug("%s: channel %d, source %d, dev %08lx, chan %p\n", - __func__, channel, source, devaddr, chan); - - WARN_ON(!chan); - if (!chan) - return -EINVAL; - - peripheral = (chan->peripheral & 0xf); - chan->source = source; - chan->dev_addr = devaddr; - - pr_debug("%s: peripheral %d\n", __func__, peripheral); - - switch (source) { - case DMA_FROM_DEVICE: - config = 2 << PL080_CONFIG_FLOW_CONTROL_SHIFT; - config |= peripheral << PL080_CONFIG_SRC_SEL_SHIFT; - break; - case DMA_TO_DEVICE: - config = 1 << PL080_CONFIG_FLOW_CONTROL_SHIFT; - config |= peripheral << PL080_CONFIG_DST_SEL_SHIFT; - break; - default: - printk(KERN_ERR "%s: bad source\n", __func__); - return -EINVAL; - } - - /* allow TC and ERR interrupts */ - config |= PL080_CONFIG_TC_IRQ_MASK; - config |= PL080_CONFIG_ERR_IRQ_MASK; - - pr_debug("%s: config %08x\n", __func__, config); - - writel(config, chan->regs + PL080S_CH_CONFIG); - - return 0; -} -EXPORT_SYMBOL(s3c2410_dma_devconfig); - - -int s3c2410_dma_getposition(enum dma_ch channel, - dma_addr_t *src, dma_addr_t *dst) -{ - struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel); - - WARN_ON(!chan); - if (!chan) - return -EINVAL; - - if (src != NULL) - *src = readl(chan->regs + PL080_CH_SRC_ADDR); - - if (dst != NULL) - *dst = readl(chan->regs + PL080_CH_DST_ADDR); - - return 0; -} -EXPORT_SYMBOL(s3c2410_dma_getposition); - -/* s3c2410_request_dma - * - * get control of an dma channel -*/ - -int s3c2410_dma_request(enum dma_ch channel, - struct s3c2410_dma_client *client, - void *dev) -{ - struct s3c2410_dma_chan *chan; - unsigned long flags; - - pr_debug("dma%d: s3c2410_request_dma: client=%s, dev=%p\n", - channel, client->name, dev); - - local_irq_save(flags); - - chan = s3c64xx_dma_map_channel(channel); - if (chan == NULL) { - local_irq_restore(flags); - return -EBUSY; - } - - dbg_showchan(chan); - - chan->client = client; - chan->in_use = 1; - chan->peripheral = channel; - chan->flags = 0; - - local_irq_restore(flags); - - /* need to setup */ - - pr_debug("%s: channel initialised, %p\n", __func__, chan); - - return chan->number | DMACH_LOW_LEVEL; -} - -EXPORT_SYMBOL(s3c2410_dma_request); - -/* s3c2410_dma_free - * - * release the given channel back to the system, will stop and flush - * any outstanding transfers, and ensure the channel is ready for the - * next claimant. - * - * Note, although a warning is currently printed if the freeing client - * info is not the same as the registrant's client info, the free is still - * allowed to go through. -*/ - -int s3c2410_dma_free(enum dma_ch channel, struct s3c2410_dma_client *client) -{ - struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel); - unsigned long flags; - - if (chan == NULL) - return -EINVAL; - - local_irq_save(flags); - - if (chan->client != client) { - printk(KERN_WARNING "dma%d: possible free from different client (channel %p, passed %p)\n", - channel, chan->client, client); - } - - /* sort out stopping and freeing the channel */ - - - chan->client = NULL; - chan->in_use = 0; - - if (!(channel & DMACH_LOW_LEVEL)) - s3c_dma_chan_map[channel] = NULL; - - local_irq_restore(flags); - - return 0; -} - -EXPORT_SYMBOL(s3c2410_dma_free); - -static irqreturn_t s3c64xx_dma_irq(int irq, void *pw) -{ - struct s3c64xx_dmac *dmac = pw; - struct s3c2410_dma_chan *chan; - enum s3c2410_dma_buffresult res; - u32 tcstat, errstat; - u32 bit; - int offs; - - tcstat = readl(dmac->regs + PL080_TC_STATUS); - errstat = readl(dmac->regs + PL080_ERR_STATUS); - - for (offs = 0, bit = 1; offs < 8; offs++, bit <<= 1) { - struct s3c64xx_dma_buff *buff; - - if (!(errstat & bit) && !(tcstat & bit)) - continue; - - chan = dmac->channels + offs; - res = S3C2410_RES_ERR; - - if (tcstat & bit) { - writel(bit, dmac->regs + PL080_TC_CLEAR); - res = S3C2410_RES_OK; - } - - if (errstat & bit) - writel(bit, dmac->regs + PL080_ERR_CLEAR); - - /* 'next' points to the buffer that is next to the - * currently active buffer. - * For CIRCULAR queues, 'next' will be same as 'curr' - * when 'end' is the active buffer. - */ - buff = chan->curr; - while (buff && buff != chan->next - && buff->next != chan->next) - buff = buff->next; - - if (!buff) - BUG(); - - if (buff == chan->next) - buff = chan->end; - - s3c64xx_dma_bufffdone(chan, buff, res); - - /* Free the node and update curr, if non-circular queue */ - if (!(chan->flags & S3C2410_DMAF_CIRCULAR)) { - chan->curr = buff->next; - s3c64xx_dma_freebuff(buff); - } - - /* Update 'next' */ - buff = chan->next; - if (chan->next == chan->end) { - chan->next = chan->curr; - if (!(chan->flags & S3C2410_DMAF_CIRCULAR)) - chan->end = NULL; - } else { - chan->next = buff->next; - } - } - - return IRQ_HANDLED; -} - -static struct bus_type dma_subsys = { - .name = "s3c64xx-dma", - .dev_name = "s3c64xx-dma", -}; - -static int s3c64xx_dma_init1(int chno, enum dma_ch chbase, - int irq, unsigned int base) -{ - struct s3c2410_dma_chan *chptr = &s3c2410_chans[chno]; - struct s3c64xx_dmac *dmac; - char clkname[16]; - void __iomem *regs; - void __iomem *regptr; - int err, ch; - - dmac = kzalloc(sizeof(struct s3c64xx_dmac), GFP_KERNEL); - if (!dmac) { - printk(KERN_ERR "%s: failed to alloc mem\n", __func__); - return -ENOMEM; - } - - dmac->dev.id = chno / 8; - dmac->dev.bus = &dma_subsys; - - err = device_register(&dmac->dev); - if (err) { - printk(KERN_ERR "%s: failed to register device\n", __func__); - goto err_alloc; - } - - regs = ioremap(base, 0x200); - if (!regs) { - printk(KERN_ERR "%s: failed to ioremap()\n", __func__); - err = -ENXIO; - goto err_dev; - } - - snprintf(clkname, sizeof(clkname), "dma%d", dmac->dev.id); - - dmac->clk = clk_get(NULL, clkname); - if (IS_ERR(dmac->clk)) { - printk(KERN_ERR "%s: failed to get clock %s\n", __func__, clkname); - err = PTR_ERR(dmac->clk); - goto err_map; - } - - clk_prepare_enable(dmac->clk); - - dmac->regs = regs; - dmac->chanbase = chbase; - dmac->channels = chptr; - - err = request_irq(irq, s3c64xx_dma_irq, 0, "DMA", dmac); - if (err < 0) { - printk(KERN_ERR "%s: failed to get irq\n", __func__); - goto err_clk; - } - - regptr = regs + PL080_Cx_BASE(0); - - for (ch = 0; ch < 8; ch++, chptr++) { - pr_debug("%s: registering DMA %d (%p)\n", - __func__, chno + ch, regptr); - - chptr->bit = 1 << ch; - chptr->number = chno + ch; - chptr->dmac = dmac; - chptr->regs = regptr; - regptr += PL080_Cx_STRIDE; - } - - /* for the moment, permanently enable the controller */ - writel(PL080_CONFIG_ENABLE, regs + PL080_CONFIG); - - printk(KERN_INFO "PL080: IRQ %d, at %p, channels %d..%d\n", - irq, regs, chno, chno+8); - - return 0; - -err_clk: - clk_disable_unprepare(dmac->clk); - clk_put(dmac->clk); -err_map: - iounmap(regs); -err_dev: - device_unregister(&dmac->dev); -err_alloc: - kfree(dmac); - return err; -} - -static int __init s3c64xx_dma_init(void) -{ - int ret; - - /* This driver is not supported when booting with device tree. */ - if (of_have_populated_dt()) - return -ENODEV; - - printk(KERN_INFO "%s: Registering DMA channels\n", __func__); - - dma_pool = dma_pool_create("DMA-LLI", NULL, sizeof(struct pl080s_lli), 16, 0); - if (!dma_pool) { - printk(KERN_ERR "%s: failed to create pool\n", __func__); - return -ENOMEM; - } - - ret = subsys_system_register(&dma_subsys, NULL); - if (ret) { - printk(KERN_ERR "%s: failed to create subsys\n", __func__); - return -ENOMEM; - } - - /* Set all DMA configuration to be DMA, not SDMA */ - writel(0xffffff, S3C64XX_SDMA_SEL); - - /* Register standard DMA controllers */ - s3c64xx_dma_init1(0, DMACH_UART0, IRQ_DMA0, 0x75000000); - s3c64xx_dma_init1(8, DMACH_PCM1_TX, IRQ_DMA1, 0x75100000); - - return 0; -} - -arch_initcall(s3c64xx_dma_init); diff --git a/arch/arm/mach-s3c64xx/include/mach/dma.h b/arch/arm/mach-s3c64xx/include/mach/dma.h index fe1a98c..059b1fc 100644 --- a/arch/arm/mach-s3c64xx/include/mach/dma.h +++ b/arch/arm/mach-s3c64xx/include/mach/dma.h @@ -11,51 +11,48 @@ #ifndef __ASM_ARCH_DMA_H #define __ASM_ARCH_DMA_H __FILE__ -#define S3C_DMA_CHANNELS (16) +#define S3C64XX_DMA_CHAN(name) ((unsigned long)(name)) + +/* DMA0/SDMA0 */ +#define DMACH_UART0 S3C64XX_DMA_CHAN("uart0_tx") +#define DMACH_UART0_SRC2 S3C64XX_DMA_CHAN("uart0_rx") +#define DMACH_UART1 S3C64XX_DMA_CHAN("uart1_tx") +#define DMACH_UART1_SRC2 S3C64XX_DMA_CHAN("uart1_rx") +#define DMACH_UART2 S3C64XX_DMA_CHAN("uart2_tx") +#define DMACH_UART2_SRC2 S3C64XX_DMA_CHAN("uart2_rx") +#define DMACH_UART3 S3C64XX_DMA_CHAN("uart3_tx") +#define DMACH_UART3_SRC2 S3C64XX_DMA_CHAN("uart3_rx") +#define DMACH_PCM0_TX S3C64XX_DMA_CHAN("pcm0_tx") +#define DMACH_PCM0_RX S3C64XX_DMA_CHAN("pcm0_rx") +#define DMACH_I2S0_OUT S3C64XX_DMA_CHAN("i2s0_tx") +#define DMACH_I2S0_IN S3C64XX_DMA_CHAN("i2s0_rx") +#define DMACH_SPI0_TX S3C64XX_DMA_CHAN("spi0_tx") +#define DMACH_SPI0_RX S3C64XX_DMA_CHAN("spi0_rx") +#define DMACH_HSI_I2SV40_TX S3C64XX_DMA_CHAN("i2s2_tx") +#define DMACH_HSI_I2SV40_RX S3C64XX_DMA_CHAN("i2s2_rx") + +/* DMA1/SDMA1 */ +#define DMACH_PCM1_TX S3C64XX_DMA_CHAN("pcm1_tx") +#define DMACH_PCM1_RX S3C64XX_DMA_CHAN("pcm1_rx") +#define DMACH_I2S1_OUT S3C64XX_DMA_CHAN("i2s1_tx") +#define DMACH_I2S1_IN S3C64XX_DMA_CHAN("i2s1_rx") +#define DMACH_SPI1_TX S3C64XX_DMA_CHAN("spi1_tx") +#define DMACH_SPI1_RX S3C64XX_DMA_CHAN("spi1_rx") +#define DMACH_AC97_PCMOUT S3C64XX_DMA_CHAN("ac97_out") +#define DMACH_AC97_PCMIN S3C64XX_DMA_CHAN("ac97_in") +#define DMACH_AC97_MICIN S3C64XX_DMA_CHAN("ac97_mic") +#define DMACH_PWM S3C64XX_DMA_CHAN("pwm") +#define DMACH_IRDA S3C64XX_DMA_CHAN("irda") +#define DMACH_EXTERNAL S3C64XX_DMA_CHAN("external") +#define DMACH_SECURITY_RX S3C64XX_DMA_CHAN("sec_rx") +#define DMACH_SECURITY_TX S3C64XX_DMA_CHAN("sec_tx") -/* see mach-s3c2410/dma.h for notes on dma channel numbers */ - -/* Note, for the S3C64XX architecture we keep the DMACH_ - * defines in the order they are allocated to [S]DMA0/[S]DMA1 - * so that is easy to do DHACH_ -> DMA controller conversion - */ enum dma_ch { - /* DMA0/SDMA0 */ - DMACH_UART0 = 0, - DMACH_UART0_SRC2, - DMACH_UART1, - DMACH_UART1_SRC2, - DMACH_UART2, - DMACH_UART2_SRC2, - DMACH_UART3, - DMACH_UART3_SRC2, - DMACH_PCM0_TX, - DMACH_PCM0_RX, - DMACH_I2S0_OUT, - DMACH_I2S0_IN, - DMACH_SPI0_TX, - DMACH_SPI0_RX, - DMACH_HSI_I2SV40_TX, - DMACH_HSI_I2SV40_RX, + DMACH_MAX = 32 +}; - /* DMA1/SDMA1 */ - DMACH_PCM1_TX = 16, - DMACH_PCM1_RX, - DMACH_I2S1_OUT, - DMACH_I2S1_IN, - DMACH_SPI1_TX, - DMACH_SPI1_RX, - DMACH_AC97_PCMOUT, - DMACH_AC97_PCMIN, - DMACH_AC97_MICIN, - DMACH_PWM, - DMACH_IRDA, - DMACH_EXTERNAL, - DMACH_RES1, - DMACH_RES2, - DMACH_SECURITY_RX, /* SDMA1 only */ - DMACH_SECURITY_TX, /* SDMA1 only */ - DMACH_MAX /* the end */ +struct s3c2410_dma_client { + char *name; }; static inline bool samsung_dma_has_circular(void) @@ -65,67 +62,10 @@ static inline bool samsung_dma_has_circular(void) static inline bool samsung_dma_is_dmadev(void) { - return false; + return true; } -#define S3C2410_DMAF_CIRCULAR (1 << 0) - -#include <plat/dma.h> - -#define DMACH_LOW_LEVEL (1<<28) /* use this to specifiy hardware ch no */ - -struct s3c64xx_dma_buff; - -/** s3c64xx_dma_buff - S3C64XX DMA buffer descriptor - * @next: Pointer to next buffer in queue or ring. - * @pw: Client provided identifier - * @lli: Pointer to hardware descriptor this buffer is associated with. - * @lli_dma: Hardare address of the descriptor. - */ -struct s3c64xx_dma_buff { - struct s3c64xx_dma_buff *next; - - void *pw; - struct pl080s_lli *lli; - dma_addr_t lli_dma; -}; - -struct s3c64xx_dmac; - -struct s3c2410_dma_chan { - unsigned char number; /* number of this dma channel */ - unsigned char in_use; /* channel allocated */ - unsigned char bit; /* bit for enable/disable/etc */ - unsigned char hw_width; - unsigned char peripheral; - - unsigned int flags; - enum dma_data_direction source; - - - dma_addr_t dev_addr; - - struct s3c2410_dma_client *client; - struct s3c64xx_dmac *dmac; /* pointer to controller */ - - void __iomem *regs; - - /* cdriver callbacks */ - s3c2410_dma_cbfn_t callback_fn; /* buffer done callback */ - s3c2410_dma_opfn_t op_fn; /* channel op callback */ - - /* buffer list and information */ - struct s3c64xx_dma_buff *curr; /* current dma buffer */ - struct s3c64xx_dma_buff *next; /* next buffer to load */ - struct s3c64xx_dma_buff *end; /* end of queue */ - - /* note, when channel is running in circular mode, curr is the - * first buffer enqueued, end is the last and curr is where the - * last buffer-done event is set-at. The buffers are not freed - * and the last buffer hardware descriptor points back to the - * first. - */ -}; -#include <plat/dma-core.h> +#include <linux/amba/pl08x.h> +#include <plat/dma-ops.h> #endif /* __ASM_ARCH_IRQ_H */ diff --git a/arch/arm/mach-s3c64xx/include/mach/gpio.h b/arch/arm/mach-s3c64xx/include/mach/gpio-samsung.h index 8b540c4..9c81fac 100644 --- a/arch/arm/mach-s3c64xx/include/mach/gpio.h +++ b/arch/arm/mach-s3c64xx/include/mach/gpio-samsung.h @@ -1,5 +1,4 @@ -/* arch/arm/mach-s3c6400/include/mach/gpio.h - * +/* * Copyright 2008 Openmoko, Inc. * Copyright 2008 Simtec Electronics * http://armlinux.simtec.co.uk/ @@ -12,6 +11,9 @@ * published by the Free Software Foundation. */ +#ifndef GPIO_SAMSUNG_S3C64XX_H +#define GPIO_SAMSUNG_S3C64XX_H + /* GPIO bank sizes */ #define S3C64XX_GPIO_A_NR (8) #define S3C64XX_GPIO_B_NR (7) @@ -88,6 +90,5 @@ enum s3c_gpio_number { /* define the number of gpios we need to the one after the GPQ() range */ #define GPIO_BOARD_START (S3C64XX_GPQ(S3C64XX_GPIO_Q_NR) + 1) -#define BOARD_NR_GPIOS (16 + CONFIG_SAMSUNG_GPIO_EXTRA) +#endif /* GPIO_SAMSUNG_S3C64XX_H */ -#define ARCH_NR_GPIOS (GPIO_BOARD_START + BOARD_NR_GPIOS) diff --git a/arch/arm/mach-s3c64xx/mach-anw6410.c b/arch/arm/mach-s3c64xx/mach-anw6410.c index d266dd5..ddeb0e5 100644 --- a/arch/arm/mach-s3c64xx/mach-anw6410.c +++ b/arch/arm/mach-s3c64xx/mach-anw6410.c @@ -49,6 +49,7 @@ #include <plat/devs.h> #include <plat/cpu.h> #include <mach/regs-gpio.h> +#include <mach/gpio-samsung.h> #include <plat/samsung-time.h> #include "common.h" diff --git a/arch/arm/mach-s3c64xx/mach-crag6410.c b/arch/arm/mach-s3c64xx/mach-crag6410.c index 758e31b..3df3c37 100644 --- a/arch/arm/mach-s3c64xx/mach-crag6410.c +++ b/arch/arm/mach-s3c64xx/mach-crag6410.c @@ -48,8 +48,8 @@ #include <video/samsung_fimd.h> #include <mach/hardware.h> #include <mach/map.h> - #include <mach/regs-gpio.h> +#include <mach/gpio-samsung.h> #include <plat/regs-serial.h> #include <plat/fb.h> diff --git a/arch/arm/mach-s3c64xx/mach-hmt.c b/arch/arm/mach-s3c64xx/mach-hmt.c index 614a03a..0431016 100644 --- a/arch/arm/mach-s3c64xx/mach-hmt.c +++ b/arch/arm/mach-s3c64xx/mach-hmt.c @@ -35,6 +35,7 @@ #include <plat/regs-serial.h> #include <linux/platform_data/i2c-s3c2410.h> +#include <mach/gpio-samsung.h> #include <plat/fb.h> #include <linux/platform_data/mtd-nand-s3c2410.h> diff --git a/arch/arm/mach-s3c64xx/mach-mini6410.c b/arch/arm/mach-s3c64xx/mach-mini6410.c index 58d46a3..8c84d34 100644 --- a/arch/arm/mach-s3c64xx/mach-mini6410.c +++ b/arch/arm/mach-s3c64xx/mach-mini6410.c @@ -30,6 +30,7 @@ #include <mach/map.h> #include <mach/regs-gpio.h> +#include <mach/gpio-samsung.h> #include <plat/adc.h> #include <plat/cpu.h> diff --git a/arch/arm/mach-s3c64xx/mach-real6410.c b/arch/arm/mach-s3c64xx/mach-real6410.c index 8bed37b..5152026 100644 --- a/arch/arm/mach-s3c64xx/mach-real6410.c +++ b/arch/arm/mach-s3c64xx/mach-real6410.c @@ -31,6 +31,7 @@ #include <mach/map.h> #include <mach/regs-gpio.h> +#include <mach/gpio-samsung.h> #include <plat/adc.h> #include <plat/cpu.h> diff --git a/arch/arm/mach-s3c64xx/mach-s3c64xx-dt.c b/arch/arm/mach-s3c64xx/mach-s3c64xx-dt.c index 7eb9a10..2fddf38 100644 --- a/arch/arm/mach-s3c64xx/mach-s3c64xx-dt.c +++ b/arch/arm/mach-s3c64xx/mach-s3c64xx-dt.c @@ -8,8 +8,6 @@ * published by the Free Software Foundation. */ -#include <linux/clk-provider.h> -#include <linux/irqchip.h> #include <linux/of_platform.h> #include <asm/mach/arch.h> @@ -48,15 +46,9 @@ static void __init s3c64xx_dt_map_io(void) panic("SoC is not S3C64xx!"); } -static void __init s3c64xx_dt_init_irq(void) -{ - of_clk_init(NULL); - samsung_wdt_reset_of_init(); - irqchip_init(); -}; - static void __init s3c64xx_dt_init_machine(void) { + samsung_wdt_reset_of_init(); of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); } @@ -79,7 +71,6 @@ DT_MACHINE_START(S3C6400_DT, "Samsung S3C64xx (Flattened Device Tree)") /* Maintainer: Tomasz Figa <tomasz.figa@gmail.com> */ .dt_compat = s3c64xx_dt_compat, .map_io = s3c64xx_dt_map_io, - .init_irq = s3c64xx_dt_init_irq, .init_machine = s3c64xx_dt_init_machine, .restart = s3c64xx_dt_restart, MACHINE_END diff --git a/arch/arm/mach-s3c64xx/mach-smartq.c b/arch/arm/mach-s3c64xx/mach-smartq.c index a6b338f..5629df9 100644 --- a/arch/arm/mach-s3c64xx/mach-smartq.c +++ b/arch/arm/mach-s3c64xx/mach-smartq.c @@ -25,6 +25,7 @@ #include <mach/map.h> #include <mach/regs-gpio.h> +#include <mach/gpio-samsung.h> #include <plat/clock.h> #include <plat/cpu.h> diff --git a/arch/arm/mach-s3c64xx/mach-smartq5.c b/arch/arm/mach-s3c64xx/mach-smartq5.c index 8aca5da..dec4c08 100644 --- a/arch/arm/mach-s3c64xx/mach-smartq5.c +++ b/arch/arm/mach-s3c64xx/mach-smartq5.c @@ -23,6 +23,7 @@ #include <video/samsung_fimd.h> #include <mach/map.h> #include <mach/regs-gpio.h> +#include <mach/gpio-samsung.h> #include <plat/cpu.h> #include <plat/devs.h> diff --git a/arch/arm/mach-s3c64xx/mach-smartq7.c b/arch/arm/mach-s3c64xx/mach-smartq7.c index a052e10..27b3220 100644 --- a/arch/arm/mach-s3c64xx/mach-smartq7.c +++ b/arch/arm/mach-s3c64xx/mach-smartq7.c @@ -23,6 +23,7 @@ #include <video/samsung_fimd.h> #include <mach/map.h> #include <mach/regs-gpio.h> +#include <mach/gpio-samsung.h> #include <plat/cpu.h> #include <plat/devs.h> diff --git a/arch/arm/mach-s3c64xx/mach-smdk6400.c b/arch/arm/mach-s3c64xx/mach-smdk6400.c index 27381cf..150f55f 100644 --- a/arch/arm/mach-s3c64xx/mach-smdk6400.c +++ b/arch/arm/mach-s3c64xx/mach-smdk6400.c @@ -35,6 +35,7 @@ #include <plat/devs.h> #include <plat/cpu.h> #include <linux/platform_data/i2c-s3c2410.h> +#include <mach/gpio-samsung.h> #include <plat/samsung-time.h> #include "common.h" diff --git a/arch/arm/mach-s3c64xx/mach-smdk6410.c b/arch/arm/mach-s3c64xx/mach-smdk6410.c index d5ea938..43261d2 100644 --- a/arch/arm/mach-s3c64xx/mach-smdk6410.c +++ b/arch/arm/mach-s3c64xx/mach-smdk6410.c @@ -57,6 +57,7 @@ #include <plat/regs-serial.h> #include <mach/regs-gpio.h> +#include <mach/gpio-samsung.h> #include <linux/platform_data/ata-samsung_cf.h> #include <linux/platform_data/i2c-s3c2410.h> #include <plat/fb.h> diff --git a/arch/arm/mach-s3c64xx/pl080.c b/arch/arm/mach-s3c64xx/pl080.c new file mode 100644 index 0000000..901a984 --- /dev/null +++ b/arch/arm/mach-s3c64xx/pl080.c @@ -0,0 +1,244 @@ +/* + * Samsung's S3C64XX generic DMA support using amba-pl08x driver. + * + * Copyright (c) 2013 Tomasz Figa <tomasz.figa@gmail.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/kernel.h> +#include <linux/amba/bus.h> +#include <linux/amba/pl080.h> +#include <linux/amba/pl08x.h> +#include <linux/of.h> + +#include <mach/irqs.h> +#include <mach/map.h> + +#include "regs-sys.h" + +static int pl08x_get_xfer_signal(const struct pl08x_channel_data *cd) +{ + return cd->min_signal; +} + +static void pl08x_put_xfer_signal(const struct pl08x_channel_data *cd, int ch) +{ +} + +/* + * DMA0 + */ + +static struct pl08x_channel_data s3c64xx_dma0_info[] = { + { + .bus_id = "uart0_tx", + .min_signal = 0, + .max_signal = 0, + .periph_buses = PL08X_AHB2, + }, { + .bus_id = "uart0_rx", + .min_signal = 1, + .max_signal = 1, + .periph_buses = PL08X_AHB2, + }, { + .bus_id = "uart1_tx", + .min_signal = 2, + .max_signal = 2, + .periph_buses = PL08X_AHB2, + }, { + .bus_id = "uart1_rx", + .min_signal = 3, + .max_signal = 3, + .periph_buses = PL08X_AHB2, + }, { + .bus_id = "uart2_tx", + .min_signal = 4, + .max_signal = 4, + .periph_buses = PL08X_AHB2, + }, { + .bus_id = "uart2_rx", + .min_signal = 5, + .max_signal = 5, + .periph_buses = PL08X_AHB2, + }, { + .bus_id = "uart3_tx", + .min_signal = 6, + .max_signal = 6, + .periph_buses = PL08X_AHB2, + }, { + .bus_id = "uart3_rx", + .min_signal = 7, + .max_signal = 7, + .periph_buses = PL08X_AHB2, + }, { + .bus_id = "pcm0_tx", + .min_signal = 8, + .max_signal = 8, + .periph_buses = PL08X_AHB2, + }, { + .bus_id = "pcm0_rx", + .min_signal = 9, + .max_signal = 9, + .periph_buses = PL08X_AHB2, + }, { + .bus_id = "i2s0_tx", + .min_signal = 10, + .max_signal = 10, + .periph_buses = PL08X_AHB2, + }, { + .bus_id = "i2s0_rx", + .min_signal = 11, + .max_signal = 11, + .periph_buses = PL08X_AHB2, + }, { + .bus_id = "spi0_tx", + .min_signal = 12, + .max_signal = 12, + .periph_buses = PL08X_AHB2, + }, { + .bus_id = "spi0_rx", + .min_signal = 13, + .max_signal = 13, + .periph_buses = PL08X_AHB2, + }, { + .bus_id = "i2s2_tx", + .min_signal = 14, + .max_signal = 14, + .periph_buses = PL08X_AHB2, + }, { + .bus_id = "i2s2_rx", + .min_signal = 15, + .max_signal = 15, + .periph_buses = PL08X_AHB2, + } +}; + +struct pl08x_platform_data s3c64xx_dma0_plat_data = { + .memcpy_channel = { + .bus_id = "memcpy", + .cctl_memcpy = + (PL080_BSIZE_4 << PL080_CONTROL_SB_SIZE_SHIFT | + PL080_BSIZE_4 << PL080_CONTROL_DB_SIZE_SHIFT | + PL080_WIDTH_32BIT << PL080_CONTROL_SWIDTH_SHIFT | + PL080_WIDTH_32BIT << PL080_CONTROL_DWIDTH_SHIFT | + PL080_CONTROL_PROT_BUFF | PL080_CONTROL_PROT_CACHE | + PL080_CONTROL_PROT_SYS), + }, + .lli_buses = PL08X_AHB1, + .mem_buses = PL08X_AHB1, + .get_xfer_signal = pl08x_get_xfer_signal, + .put_xfer_signal = pl08x_put_xfer_signal, + .slave_channels = s3c64xx_dma0_info, + .num_slave_channels = ARRAY_SIZE(s3c64xx_dma0_info), +}; + +static AMBA_AHB_DEVICE(s3c64xx_dma0, "dma-pl080s.0", 0, + 0x75000000, {IRQ_DMA0}, &s3c64xx_dma0_plat_data); + +/* + * DMA1 + */ + +static struct pl08x_channel_data s3c64xx_dma1_info[] = { + { + .bus_id = "pcm1_tx", + .min_signal = 0, + .max_signal = 0, + .periph_buses = PL08X_AHB2, + }, { + .bus_id = "pcm1_rx", + .min_signal = 1, + .max_signal = 1, + .periph_buses = PL08X_AHB2, + }, { + .bus_id = "i2s1_tx", + .min_signal = 2, + .max_signal = 2, + .periph_buses = PL08X_AHB2, + }, { + .bus_id = "i2s1_rx", + .min_signal = 3, + .max_signal = 3, + .periph_buses = PL08X_AHB2, + }, { + .bus_id = "spi1_tx", + .min_signal = 4, + .max_signal = 4, + .periph_buses = PL08X_AHB2, + }, { + .bus_id = "spi1_rx", + .min_signal = 5, + .max_signal = 5, + .periph_buses = PL08X_AHB2, + }, { + .bus_id = "ac97_out", + .min_signal = 6, + .max_signal = 6, + .periph_buses = PL08X_AHB2, + }, { + .bus_id = "ac97_in", + .min_signal = 7, + .max_signal = 7, + .periph_buses = PL08X_AHB2, + }, { + .bus_id = "ac97_mic", + .min_signal = 8, + .max_signal = 8, + .periph_buses = PL08X_AHB2, + }, { + .bus_id = "pwm", + .min_signal = 9, + .max_signal = 9, + .periph_buses = PL08X_AHB2, + }, { + .bus_id = "irda", + .min_signal = 10, + .max_signal = 10, + .periph_buses = PL08X_AHB2, + }, { + .bus_id = "external", + .min_signal = 11, + .max_signal = 11, + .periph_buses = PL08X_AHB2, + }, +}; + +struct pl08x_platform_data s3c64xx_dma1_plat_data = { + .memcpy_channel = { + .bus_id = "memcpy", + .cctl_memcpy = + (PL080_BSIZE_4 << PL080_CONTROL_SB_SIZE_SHIFT | + PL080_BSIZE_4 << PL080_CONTROL_DB_SIZE_SHIFT | + PL080_WIDTH_32BIT << PL080_CONTROL_SWIDTH_SHIFT | + PL080_WIDTH_32BIT << PL080_CONTROL_DWIDTH_SHIFT | + PL080_CONTROL_PROT_BUFF | PL080_CONTROL_PROT_CACHE | + PL080_CONTROL_PROT_SYS), + }, + .lli_buses = PL08X_AHB1, + .mem_buses = PL08X_AHB1, + .get_xfer_signal = pl08x_get_xfer_signal, + .put_xfer_signal = pl08x_put_xfer_signal, + .slave_channels = s3c64xx_dma1_info, + .num_slave_channels = ARRAY_SIZE(s3c64xx_dma1_info), +}; + +static AMBA_AHB_DEVICE(s3c64xx_dma1, "dma-pl080s.1", 0, + 0x75100000, {IRQ_DMA1}, &s3c64xx_dma1_plat_data); + +static int __init s3c64xx_pl080_init(void) +{ + /* Set all DMA configuration to be DMA, not SDMA */ + writel(0xffffff, S3C64XX_SDMA_SEL); + + if (of_have_populated_dt()) + return 0; + + amba_device_register(&s3c64xx_dma0_device, &iomem_resource); + amba_device_register(&s3c64xx_dma1_device, &iomem_resource); + + return 0; +} +arch_initcall(s3c64xx_pl080_init); diff --git a/arch/arm/mach-s3c64xx/pm.c b/arch/arm/mach-s3c64xx/pm.c index 8cdb824..b5a6698 100644 --- a/arch/arm/mach-s3c64xx/pm.c +++ b/arch/arm/mach-s3c64xx/pm.c @@ -28,6 +28,7 @@ #include <mach/regs-gpio.h> #include <mach/regs-clock.h> +#include <mach/gpio-samsung.h> #include "regs-gpio-memport.h" #include "regs-modem.h" diff --git a/arch/arm/mach-s3c64xx/setup-fb-24bpp.c b/arch/arm/mach-s3c64xx/setup-fb-24bpp.c index 2cf8002..9d17bff 100644 --- a/arch/arm/mach-s3c64xx/setup-fb-24bpp.c +++ b/arch/arm/mach-s3c64xx/setup-fb-24bpp.c @@ -19,6 +19,7 @@ #include <plat/fb.h> #include <plat/gpio-cfg.h> +#include <mach/gpio-samsung.h> void s3c64xx_fb_gpio_setup_24bpp(void) { diff --git a/arch/arm/mach-s3c64xx/setup-i2c0.c b/arch/arm/mach-s3c64xx/setup-i2c0.c index 40666ba..4b8c1cf 100644 --- a/arch/arm/mach-s3c64xx/setup-i2c0.c +++ b/arch/arm/mach-s3c64xx/setup-i2c0.c @@ -20,6 +20,7 @@ struct platform_device; /* don't need the contents */ #include <linux/platform_data/i2c-s3c2410.h> #include <plat/gpio-cfg.h> +#include <mach/gpio-samsung.h> void s3c_i2c0_cfg_gpio(struct platform_device *dev) { diff --git a/arch/arm/mach-s3c64xx/setup-i2c1.c b/arch/arm/mach-s3c64xx/setup-i2c1.c index 3fdb24c..cd1df71 100644 --- a/arch/arm/mach-s3c64xx/setup-i2c1.c +++ b/arch/arm/mach-s3c64xx/setup-i2c1.c @@ -20,6 +20,7 @@ struct platform_device; /* don't need the contents */ #include <linux/platform_data/i2c-s3c2410.h> #include <plat/gpio-cfg.h> +#include <mach/gpio-samsung.h> void s3c_i2c1_cfg_gpio(struct platform_device *dev) { diff --git a/arch/arm/mach-s3c64xx/setup-ide.c b/arch/arm/mach-s3c64xx/setup-ide.c index 648d8b8..689fb72 100644 --- a/arch/arm/mach-s3c64xx/setup-ide.c +++ b/arch/arm/mach-s3c64xx/setup-ide.c @@ -17,6 +17,7 @@ #include <mach/map.h> #include <mach/regs-clock.h> #include <plat/gpio-cfg.h> +#include <mach/gpio-samsung.h> #include <linux/platform_data/ata-samsung_cf.h> void s3c64xx_ide_setup_gpio(void) diff --git a/arch/arm/mach-s3c64xx/setup-keypad.c b/arch/arm/mach-s3c64xx/setup-keypad.c index 1d4d0ee..6ad9a89 100644 --- a/arch/arm/mach-s3c64xx/setup-keypad.c +++ b/arch/arm/mach-s3c64xx/setup-keypad.c @@ -13,6 +13,7 @@ #include <linux/gpio.h> #include <plat/gpio-cfg.h> #include <plat/keypad.h> +#include <mach/gpio-samsung.h> void samsung_keypad_cfg_gpio(unsigned int rows, unsigned int cols) { diff --git a/arch/arm/mach-s3c64xx/setup-sdhci-gpio.c b/arch/arm/mach-s3c64xx/setup-sdhci-gpio.c index 6eac071..f426b7a 100644 --- a/arch/arm/mach-s3c64xx/setup-sdhci-gpio.c +++ b/arch/arm/mach-s3c64xx/setup-sdhci-gpio.c @@ -20,6 +20,7 @@ #include <plat/gpio-cfg.h> #include <plat/sdhci.h> +#include <mach/gpio-samsung.h> void s3c64xx_setup_sdhci0_cfg_gpio(struct platform_device *dev, int width) { diff --git a/arch/arm/mach-s3c64xx/setup-spi.c b/arch/arm/mach-s3c64xx/setup-spi.c index 4dc5345..5fd1a31 100644 --- a/arch/arm/mach-s3c64xx/setup-spi.c +++ b/arch/arm/mach-s3c64xx/setup-spi.c @@ -10,6 +10,7 @@ #include <linux/gpio.h> #include <plat/gpio-cfg.h> +#include <mach/gpio-samsung.h> #ifdef CONFIG_S3C64XX_DEV_SPI0 int s3c64xx_spi0_cfg_gpio(void) diff --git a/arch/arm/mach-shmobile/board-armadillo800eva.c b/arch/arm/mach-shmobile/board-armadillo800eva.c index 958e3cb..8ea87bd 100644 --- a/arch/arm/mach-shmobile/board-armadillo800eva.c +++ b/arch/arm/mach-shmobile/board-armadillo800eva.c @@ -483,7 +483,7 @@ static struct platform_device lcdc0_device = { .id = 0, .dev = { .platform_data = &lcdc0_info, - .coherent_dma_mask = ~0, + .coherent_dma_mask = DMA_BIT_MASK(32), }, }; @@ -580,7 +580,7 @@ static struct platform_device hdmi_lcdc_device = { .id = 1, .dev = { .platform_data = &hdmi_lcdc_info, - .coherent_dma_mask = ~0, + .coherent_dma_mask = DMA_BIT_MASK(32), }, }; @@ -614,6 +614,11 @@ static struct regulator_consumer_supply fixed3v3_power_consumers[] = { REGULATOR_SUPPLY("vqmmc", "sh_mmcif"), }; +/* Fixed 3.3V regulator used by LCD backlight */ +static struct regulator_consumer_supply fixed5v0_power_consumers[] = { + REGULATOR_SUPPLY("power", "pwm-backlight.0"), +}; + /* Fixed 3.3V regulator to be used by SDHI0 */ static struct regulator_consumer_supply vcc_sdhi0_consumers[] = { REGULATOR_SUPPLY("vmmc", "sh_mobile_sdhi.0"), @@ -1196,6 +1201,8 @@ static void __init eva_init(void) regulator_register_always_on(0, "fixed-3.3V", fixed3v3_power_consumers, ARRAY_SIZE(fixed3v3_power_consumers), 3300000); + regulator_register_always_on(3, "fixed-5.0V", fixed5v0_power_consumers, + ARRAY_SIZE(fixed5v0_power_consumers), 5000000); pinctrl_register_mappings(eva_pinctrl_map, ARRAY_SIZE(eva_pinctrl_map)); pwm_add_table(pwm_lookup, ARRAY_SIZE(pwm_lookup)); diff --git a/arch/arm/mach-shmobile/board-bockw.c b/arch/arm/mach-shmobile/board-bockw.c index 38611526..3c4995a 100644 --- a/arch/arm/mach-shmobile/board-bockw.c +++ b/arch/arm/mach-shmobile/board-bockw.c @@ -679,7 +679,7 @@ static void __init bockw_init(void) .id = i, .data = &rsnd_card_info[i], .size_data = sizeof(struct asoc_simple_card_info), - .dma_mask = ~0, + .dma_mask = DMA_BIT_MASK(32), }; platform_device_register_full(&cardinfo); diff --git a/arch/arm/mach-shmobile/board-kzm9g.c b/arch/arm/mach-shmobile/board-kzm9g.c index fe689b7..bc40b85 100644 --- a/arch/arm/mach-shmobile/board-kzm9g.c +++ b/arch/arm/mach-shmobile/board-kzm9g.c @@ -334,7 +334,7 @@ static struct platform_device lcdc_device = { .resource = lcdc_resources, .dev = { .platform_data = &lcdc_info, - .coherent_dma_mask = ~0, + .coherent_dma_mask = DMA_BIT_MASK(32), }, }; diff --git a/arch/arm/mach-shmobile/board-lager.c b/arch/arm/mach-shmobile/board-lager.c index a8d3ce6..e0406fd 100644 --- a/arch/arm/mach-shmobile/board-lager.c +++ b/arch/arm/mach-shmobile/board-lager.c @@ -245,7 +245,9 @@ static void __init lager_init(void) { lager_add_standard_devices(); - phy_register_fixup_for_id("r8a7790-ether-ff:01", lager_ksz8041_fixup); + if (IS_ENABLED(CONFIG_PHYLIB)) + phy_register_fixup_for_id("r8a7790-ether-ff:01", + lager_ksz8041_fixup); } static const char * const lager_boards_compat_dt[] __initconst = { diff --git a/arch/arm/mach-shmobile/board-mackerel.c b/arch/arm/mach-shmobile/board-mackerel.c index af06753..e721d2c 100644 --- a/arch/arm/mach-shmobile/board-mackerel.c +++ b/arch/arm/mach-shmobile/board-mackerel.c @@ -409,7 +409,7 @@ static struct platform_device lcdc_device = { .resource = lcdc_resources, .dev = { .platform_data = &lcdc_info, - .coherent_dma_mask = ~0, + .coherent_dma_mask = DMA_BIT_MASK(32), }, }; @@ -499,7 +499,7 @@ static struct platform_device hdmi_lcdc_device = { .id = 1, .dev = { .platform_data = &hdmi_lcdc_info, - .coherent_dma_mask = ~0, + .coherent_dma_mask = DMA_BIT_MASK(32), }, }; diff --git a/arch/arm/mach-socfpga/Kconfig b/arch/arm/mach-socfpga/Kconfig index 037100a..aee77f0 100644 --- a/arch/arm/mach-socfpga/Kconfig +++ b/arch/arm/mach-socfpga/Kconfig @@ -10,6 +10,7 @@ config ARCH_SOCFPGA select GENERIC_CLOCKEVENTS select GPIO_PL061 if GPIOLIB select HAVE_ARM_SCU + select HAVE_ARM_TWD if SMP select HAVE_SMP select MFD_SYSCON select SPARSE_IRQ diff --git a/arch/arm/mach-sunxi/Kconfig b/arch/arm/mach-sunxi/Kconfig index c9e72c8..bce0d42 100644 --- a/arch/arm/mach-sunxi/Kconfig +++ b/arch/arm/mach-sunxi/Kconfig @@ -12,3 +12,4 @@ config ARCH_SUNXI select PINCTRL_SUNXI select SPARSE_IRQ select SUN4I_TIMER + select SUN5I_HSTIMER diff --git a/arch/arm/mach-tegra/board-paz00.c b/arch/arm/mach-tegra/board-paz00.c index 06f0240..e4dec9f 100644 --- a/arch/arm/mach-tegra/board-paz00.c +++ b/arch/arm/mach-tegra/board-paz00.c @@ -18,6 +18,7 @@ */ #include <linux/platform_device.h> +#include <linux/gpio/driver.h> #include <linux/rfkill-gpio.h> #include "board.h" @@ -36,7 +37,17 @@ static struct platform_device wifi_rfkill_device = { }, }; +static struct gpiod_lookup_table wifi_gpio_lookup = { + .dev_id = "rfkill_gpio", + .table = { + GPIO_LOOKUP_IDX("tegra-gpio", 25, NULL, 0, 0), + GPIO_LOOKUP_IDX("tegra-gpio", 85, NULL, 1, 0), + { }, + }, +}; + void __init tegra_paz00_wifikill_init(void) { + gpiod_add_lookup_table(&wifi_gpio_lookup); platform_device_register(&wifi_rfkill_device); } diff --git a/arch/arm/mach-tegra/fuse.c b/arch/arm/mach-tegra/fuse.c index d4639c5..3a9c1f1 100644 --- a/arch/arm/mach-tegra/fuse.c +++ b/arch/arm/mach-tegra/fuse.c @@ -198,10 +198,12 @@ void __init tegra_init_fuse(void) switch (tegra_chip_id) { case TEGRA20: tegra20_fuse_init_randomness(); + break; case TEGRA30: case TEGRA114: default: tegra30_fuse_init_randomness(); + break; } pr_info("Tegra Revision: %s SKU: %d CPU Process: %d Core Process: %d\n", @@ -209,13 +211,3 @@ void __init tegra_init_fuse(void) tegra_sku_id, tegra_cpu_process_id, tegra_core_process_id); } - -unsigned long long tegra_chip_uid(void) -{ - unsigned long long lo, hi; - - lo = tegra_fuse_readl(FUSE_UID_LOW); - hi = tegra_fuse_readl(FUSE_UID_HIGH); - return (hi << 32ull) | lo; -} -EXPORT_SYMBOL(tegra_chip_uid); diff --git a/arch/arm/mach-ux500/board-mop500-audio.c b/arch/arm/mach-ux500/board-mop500-audio.c index 154e15f..43d6cb8 100644 --- a/arch/arm/mach-ux500/board-mop500-audio.c +++ b/arch/arm/mach-ux500/board-mop500-audio.c @@ -31,7 +31,7 @@ static struct stedma40_chan_cfg msp0_dma_tx = { }; struct msp_i2s_platform_data msp0_platform_data = { - .id = MSP_I2S_0, + .id = 0, .msp_i2s_dma_rx = &msp0_dma_rx, .msp_i2s_dma_tx = &msp0_dma_tx, }; @@ -49,7 +49,7 @@ static struct stedma40_chan_cfg msp1_dma_tx = { }; struct msp_i2s_platform_data msp1_platform_data = { - .id = MSP_I2S_1, + .id = 1, .msp_i2s_dma_rx = NULL, .msp_i2s_dma_tx = &msp1_dma_tx, }; @@ -69,13 +69,13 @@ static struct stedma40_chan_cfg msp2_dma_tx = { }; struct msp_i2s_platform_data msp2_platform_data = { - .id = MSP_I2S_2, + .id = 2, .msp_i2s_dma_rx = &msp2_dma_rx, .msp_i2s_dma_tx = &msp2_dma_tx, }; struct msp_i2s_platform_data msp3_platform_data = { - .id = MSP_I2S_3, + .id = 3, .msp_i2s_dma_rx = &msp1_dma_rx, .msp_i2s_dma_tx = NULL, }; diff --git a/arch/arm/mach-ux500/cpu-db8500.c b/arch/arm/mach-ux500/cpu-db8500.c index 2e85c1e..12c7e5c 100644 --- a/arch/arm/mach-ux500/cpu-db8500.c +++ b/arch/arm/mach-ux500/cpu-db8500.c @@ -140,6 +140,10 @@ static struct of_dev_auxdata u8500_auxdata_lookup[] __initdata = { /* Requires call-back bindings. */ OF_DEV_AUXDATA("arm,cortex-a9-pmu", 0, "arm-pmu", &db8500_pmu_platdata), /* Requires DMA bindings. */ + OF_DEV_AUXDATA("arm,pl18x", 0x80126000, "sdi0", &mop500_sdi0_data), + OF_DEV_AUXDATA("arm,pl18x", 0x80118000, "sdi1", &mop500_sdi1_data), + OF_DEV_AUXDATA("arm,pl18x", 0x80005000, "sdi2", &mop500_sdi2_data), + OF_DEV_AUXDATA("arm,pl18x", 0x80114000, "sdi4", &mop500_sdi4_data), OF_DEV_AUXDATA("stericsson,ux500-msp-i2s", 0x80123000, "ux500-msp-i2s.0", &msp0_platform_data), OF_DEV_AUXDATA("stericsson,ux500-msp-i2s", 0x80124000, diff --git a/arch/arm/mach-vexpress/spc.c b/arch/arm/mach-vexpress/spc.c index 033d34d..c26ef5b 100644 --- a/arch/arm/mach-vexpress/spc.c +++ b/arch/arm/mach-vexpress/spc.c @@ -53,6 +53,11 @@ #define A15_BX_ADDR0 0x68 #define A7_BX_ADDR0 0x78 +/* SPC CPU/cluster reset statue */ +#define STANDBYWFI_STAT 0x3c +#define STANDBYWFI_STAT_A15_CPU_MASK(cpu) (1 << (cpu)) +#define STANDBYWFI_STAT_A7_CPU_MASK(cpu) (1 << (3 + (cpu))) + /* SPC system config interface registers */ #define SYSCFG_WDATA 0x70 #define SYSCFG_RDATA 0x74 @@ -213,6 +218,41 @@ void ve_spc_powerdown(u32 cluster, bool enable) writel_relaxed(enable, info->baseaddr + pwdrn_reg); } +static u32 standbywfi_cpu_mask(u32 cpu, u32 cluster) +{ + return cluster_is_a15(cluster) ? + STANDBYWFI_STAT_A15_CPU_MASK(cpu) + : STANDBYWFI_STAT_A7_CPU_MASK(cpu); +} + +/** + * ve_spc_cpu_in_wfi(u32 cpu, u32 cluster) + * + * @cpu: mpidr[7:0] bitfield describing CPU affinity level within cluster + * @cluster: mpidr[15:8] bitfield describing cluster affinity level + * + * @return: non-zero if and only if the specified CPU is in WFI + * + * Take care when interpreting the result of this function: a CPU might + * be in WFI temporarily due to idle, and is not necessarily safely + * parked. + */ +int ve_spc_cpu_in_wfi(u32 cpu, u32 cluster) +{ + int ret; + u32 mask = standbywfi_cpu_mask(cpu, cluster); + + if (cluster >= MAX_CLUSTERS) + return 1; + + ret = readl_relaxed(info->baseaddr + STANDBYWFI_STAT); + + pr_debug("%s: PCFGREG[0x%X] = 0x%08X, mask = 0x%X\n", + __func__, STANDBYWFI_STAT, ret, mask); + + return ret & mask; +} + static int ve_spc_get_performance(int cluster, u32 *freq) { struct ve_spc_opp *opps = info->opps[cluster]; diff --git a/arch/arm/mach-vexpress/spc.h b/arch/arm/mach-vexpress/spc.h index dbd44c3..793d065 100644 --- a/arch/arm/mach-vexpress/spc.h +++ b/arch/arm/mach-vexpress/spc.h @@ -20,5 +20,6 @@ void ve_spc_global_wakeup_irq(bool set); void ve_spc_cpu_wakeup_irq(u32 cluster, u32 cpu, bool set); void ve_spc_set_resume_addr(u32 cluster, u32 cpu, u32 addr); void ve_spc_powerdown(u32 cluster, bool enable); +int ve_spc_cpu_in_wfi(u32 cpu, u32 cluster); #endif diff --git a/arch/arm/mach-vexpress/tc2_pm.c b/arch/arm/mach-vexpress/tc2_pm.c index 05a364c50..29e7785 100644 --- a/arch/arm/mach-vexpress/tc2_pm.c +++ b/arch/arm/mach-vexpress/tc2_pm.c @@ -12,6 +12,7 @@ * published by the Free Software Foundation. */ +#include <linux/delay.h> #include <linux/init.h> #include <linux/io.h> #include <linux/kernel.h> @@ -32,11 +33,17 @@ #include "spc.h" /* SCC conf registers */ +#define RESET_CTRL 0x018 +#define RESET_A15_NCORERESET(cpu) (1 << (2 + (cpu))) +#define RESET_A7_NCORERESET(cpu) (1 << (16 + (cpu))) + #define A15_CONF 0x400 #define A7_CONF 0x500 #define SYS_INFO 0x700 #define SPC_BASE 0xb00 +static void __iomem *scc; + /* * We can't use regular spinlocks. In the switcher case, it is possible * for an outbound CPU to call power_down() after its inbound counterpart @@ -190,6 +197,55 @@ static void tc2_pm_power_down(void) tc2_pm_down(0); } +static int tc2_core_in_reset(unsigned int cpu, unsigned int cluster) +{ + u32 mask = cluster ? + RESET_A7_NCORERESET(cpu) + : RESET_A15_NCORERESET(cpu); + + return !(readl_relaxed(scc + RESET_CTRL) & mask); +} + +#define POLL_MSEC 10 +#define TIMEOUT_MSEC 1000 + +static int tc2_pm_power_down_finish(unsigned int cpu, unsigned int cluster) +{ + unsigned tries; + + pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster); + BUG_ON(cluster >= TC2_CLUSTERS || cpu >= TC2_MAX_CPUS_PER_CLUSTER); + + for (tries = 0; tries < TIMEOUT_MSEC / POLL_MSEC; ++tries) { + /* + * Only examine the hardware state if the target CPU has + * caught up at least as far as tc2_pm_down(): + */ + if (ACCESS_ONCE(tc2_pm_use_count[cpu][cluster]) == 0) { + pr_debug("%s(cpu=%u, cluster=%u): RESET_CTRL = 0x%08X\n", + __func__, cpu, cluster, + readl_relaxed(scc + RESET_CTRL)); + + /* + * We need the CPU to reach WFI, but the power + * controller may put the cluster in reset and + * power it off as soon as that happens, before + * we have a chance to see STANDBYWFI. + * + * So we need to check for both conditions: + */ + if (tc2_core_in_reset(cpu, cluster) || + ve_spc_cpu_in_wfi(cpu, cluster)) + return 0; /* success: the CPU is halted */ + } + + /* Otherwise, wait and retry: */ + msleep(POLL_MSEC); + } + + return -ETIMEDOUT; /* timeout */ +} + static void tc2_pm_suspend(u64 residency) { unsigned int mpidr, cpu, cluster; @@ -232,10 +288,11 @@ static void tc2_pm_powered_up(void) } static const struct mcpm_platform_ops tc2_pm_power_ops = { - .power_up = tc2_pm_power_up, - .power_down = tc2_pm_power_down, - .suspend = tc2_pm_suspend, - .powered_up = tc2_pm_powered_up, + .power_up = tc2_pm_power_up, + .power_down = tc2_pm_power_down, + .power_down_finish = tc2_pm_power_down_finish, + .suspend = tc2_pm_suspend, + .powered_up = tc2_pm_powered_up, }; static bool __init tc2_pm_usage_count_init(void) @@ -269,7 +326,6 @@ static void __naked tc2_pm_power_up_setup(unsigned int affinity_level) static int __init tc2_pm_init(void) { int ret, irq; - void __iomem *scc; u32 a15_cluster_id, a7_cluster_id, sys_info; struct device_node *np; diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 79f8b39..f61a570 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -9,6 +9,7 @@ * * DMA uncached mapping support. */ +#include <linux/bootmem.h> #include <linux/module.h> #include <linux/mm.h> #include <linux/gfp.h> @@ -157,6 +158,44 @@ struct dma_map_ops arm_coherent_dma_ops = { }; EXPORT_SYMBOL(arm_coherent_dma_ops); +static int __dma_supported(struct device *dev, u64 mask, bool warn) +{ + unsigned long max_dma_pfn; + + /* + * If the mask allows for more memory than we can address, + * and we actually have that much memory, then we must + * indicate that DMA to this device is not supported. + */ + if (sizeof(mask) != sizeof(dma_addr_t) && + mask > (dma_addr_t)~0 && + dma_to_pfn(dev, ~0) < max_pfn) { + if (warn) { + dev_warn(dev, "Coherent DMA mask %#llx is larger than dma_addr_t allows\n", + mask); + dev_warn(dev, "Driver did not use or check the return value from dma_set_coherent_mask()?\n"); + } + return 0; + } + + max_dma_pfn = min(max_pfn, arm_dma_pfn_limit); + + /* + * Translate the device's DMA mask to a PFN limit. This + * PFN number includes the page which we can DMA to. + */ + if (dma_to_pfn(dev, mask) < max_dma_pfn) { + if (warn) + dev_warn(dev, "Coherent DMA mask %#llx (pfn %#lx-%#lx) covers a smaller range of system memory than the DMA zone pfn 0x0-%#lx\n", + mask, + dma_to_pfn(dev, 0), dma_to_pfn(dev, mask) + 1, + max_dma_pfn + 1); + return 0; + } + + return 1; +} + static u64 get_coherent_dma_mask(struct device *dev) { u64 mask = (u64)DMA_BIT_MASK(32); @@ -173,32 +212,8 @@ static u64 get_coherent_dma_mask(struct device *dev) return 0; } - /* - * If the mask allows for more memory than we can address, - * and we actually have that much memory, then fail the - * allocation. - */ - if (sizeof(mask) != sizeof(dma_addr_t) && - mask > (dma_addr_t)~0 && - dma_to_pfn(dev, ~0) > arm_dma_pfn_limit) { - dev_warn(dev, "Coherent DMA mask %#llx is larger than dma_addr_t allows\n", - mask); - dev_warn(dev, "Driver did not use or check the return value from dma_set_coherent_mask()?\n"); - return 0; - } - - /* - * Now check that the mask, when translated to a PFN, - * fits within the allowable addresses which we can - * allocate. - */ - if (dma_to_pfn(dev, mask) < arm_dma_pfn_limit) { - dev_warn(dev, "Coherent DMA mask %#llx (pfn %#lx-%#lx) covers a smaller range of system memory than the DMA zone pfn 0x0-%#lx\n", - mask, - dma_to_pfn(dev, 0), dma_to_pfn(dev, mask) + 1, - arm_dma_pfn_limit + 1); + if (!__dma_supported(dev, mask, true)) return 0; - } } return mask; @@ -1027,28 +1042,7 @@ void arm_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, */ int dma_supported(struct device *dev, u64 mask) { - unsigned long limit; - - /* - * If the mask allows for more memory than we can address, - * and we actually have that much memory, then we must - * indicate that DMA to this device is not supported. - */ - if (sizeof(mask) != sizeof(dma_addr_t) && - mask > (dma_addr_t)~0 && - dma_to_pfn(dev, ~0) > arm_dma_pfn_limit) - return 0; - - /* - * Translate the device's DMA mask to a PFN limit. This - * PFN number includes the page which we can DMA to. - */ - limit = dma_to_pfn(dev, mask); - - if (limit < arm_dma_pfn_limit) - return 0; - - return 1; + return __dma_supported(dev, mask, false); } EXPORT_SYMBOL(dma_supported); diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c index 6d5ba9a..3387e60 100644 --- a/arch/arm/mm/flush.c +++ b/arch/arm/mm/flush.c @@ -175,16 +175,16 @@ void __flush_dcache_page(struct address_space *mapping, struct page *page) unsigned long i; if (cache_is_vipt_nonaliasing()) { for (i = 0; i < (1 << compound_order(page)); i++) { - void *addr = kmap_atomic(page); + void *addr = kmap_atomic(page + i); __cpuc_flush_dcache_area(addr, PAGE_SIZE); kunmap_atomic(addr); } } else { for (i = 0; i < (1 << compound_order(page)); i++) { - void *addr = kmap_high_get(page); + void *addr = kmap_high_get(page + i); if (addr) { __cpuc_flush_dcache_area(addr, PAGE_SIZE); - kunmap_high(page); + kunmap_high(page + i); } } } diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c index d27158c3..5e85ed3 100644 --- a/arch/arm/mm/mmap.c +++ b/arch/arm/mm/mmap.c @@ -146,7 +146,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, info.flags = VM_UNMAPPED_AREA_TOPDOWN; info.length = len; - info.low_limit = PAGE_SIZE; + info.low_limit = FIRST_USER_ADDRESS; info.high_limit = mm->mmap_base; info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0; info.align_offset = pgoff << PAGE_SHIFT; diff --git a/arch/arm/mm/pgd.c b/arch/arm/mm/pgd.c index 0acb089..1046b37 100644 --- a/arch/arm/mm/pgd.c +++ b/arch/arm/mm/pgd.c @@ -87,7 +87,8 @@ pgd_t *pgd_alloc(struct mm_struct *mm) init_pud = pud_offset(init_pgd, 0); init_pmd = pmd_offset(init_pud, 0); init_pte = pte_offset_map(init_pmd, 0); - set_pte_ext(new_pte, *init_pte, 0); + set_pte_ext(new_pte + 0, init_pte[0], 0); + set_pte_ext(new_pte + 1, init_pte[1], 0); pte_unmap(init_pte); pte_unmap(new_pte); } diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c index 9ed155a..271b5e9 100644 --- a/arch/arm/net/bpf_jit_32.c +++ b/arch/arm/net/bpf_jit_32.c @@ -641,10 +641,10 @@ load_ind: emit(ARM_MUL(r_A, r_A, r_X), ctx); break; case BPF_S_ALU_DIV_K: - /* current k == reciprocal_value(userspace k) */ + if (k == 1) + break; emit_mov_i(r_scratch, k, ctx); - /* A = top 32 bits of the product */ - emit(ARM_UMULL(r_scratch, r_A, r_A, r_scratch), ctx); + emit_udiv(r_A, r_A, r_scratch, ctx); break; case BPF_S_ALU_DIV_X: update_on_xread(ctx); diff --git a/arch/arm/plat-omap/include/plat/dmtimer.h b/arch/arm/plat-omap/include/plat/dmtimer.h index fb92abb..2861b15 100644 --- a/arch/arm/plat-omap/include/plat/dmtimer.h +++ b/arch/arm/plat-omap/include/plat/dmtimer.h @@ -336,8 +336,11 @@ static inline void __omap_dm_timer_enable_posted(struct omap_dm_timer *timer) if (timer->posted) return; - if (timer->errata & OMAP_TIMER_ERRATA_I103_I767) + if (timer->errata & OMAP_TIMER_ERRATA_I103_I767) { + timer->posted = OMAP_TIMER_NONPOSTED; + __omap_dm_timer_write(timer, OMAP_TIMER_IF_CTRL_REG, 0, 0); return; + } __omap_dm_timer_write(timer, OMAP_TIMER_IF_CTRL_REG, OMAP_TIMER_CTRL_POSTED, 0); diff --git a/arch/arm/plat-samsung/devs.c b/arch/arm/plat-samsung/devs.c index 99a3590..ac07e87 100644 --- a/arch/arm/plat-samsung/devs.c +++ b/arch/arm/plat-samsung/devs.c @@ -1468,6 +1468,8 @@ void __init s3c64xx_spi0_set_platdata(int (*cfg_gpio)(void), int src_clk_nr, pd.cfg_gpio = (cfg_gpio) ? cfg_gpio : s3c64xx_spi0_cfg_gpio; #if defined(CONFIG_PL330_DMA) pd.filter = pl330_filter; +#elif defined(CONFIG_S3C64XX_PL080) + pd.filter = pl08x_filter_id; #elif defined(CONFIG_S3C24XX_DMAC) pd.filter = s3c24xx_dma_filter; #endif @@ -1509,8 +1511,10 @@ void __init s3c64xx_spi1_set_platdata(int (*cfg_gpio)(void), int src_clk_nr, pd.num_cs = num_cs; pd.src_clk_nr = src_clk_nr; pd.cfg_gpio = (cfg_gpio) ? cfg_gpio : s3c64xx_spi1_cfg_gpio; -#ifdef CONFIG_PL330_DMA +#if defined(CONFIG_PL330_DMA) pd.filter = pl330_filter; +#elif defined(CONFIG_S3C64XX_PL080) + pd.filter = pl08x_filter_id; #endif s3c_set_platdata(&pd, sizeof(pd), &s3c64xx_device_spi1); @@ -1550,8 +1554,10 @@ void __init s3c64xx_spi2_set_platdata(int (*cfg_gpio)(void), int src_clk_nr, pd.num_cs = num_cs; pd.src_clk_nr = src_clk_nr; pd.cfg_gpio = (cfg_gpio) ? cfg_gpio : s3c64xx_spi2_cfg_gpio; -#ifdef CONFIG_PL330_DMA +#if defined(CONFIG_PL330_DMA) pd.filter = pl330_filter; +#elif defined(CONFIG_S3C64XX_PL080) + pd.filter = pl08x_filter_id; #endif s3c_set_platdata(&pd, sizeof(pd), &s3c64xx_device_spi2); diff --git a/arch/arm/plat-samsung/dma-ops.c b/arch/arm/plat-samsung/dma-ops.c index ec0d731..886326e 100644 --- a/arch/arm/plat-samsung/dma-ops.c +++ b/arch/arm/plat-samsung/dma-ops.c @@ -18,6 +18,12 @@ #include <mach/dma.h> +#if defined(CONFIG_PL330_DMA) +#define dma_filter pl330_filter +#elif defined(CONFIG_S3C64XX_PL080) +#define dma_filter pl08x_filter_id +#endif + static unsigned samsung_dmadev_request(enum dma_ch dma_ch, struct samsung_dma_req *param, struct device *dev, char *ch_name) @@ -30,7 +36,7 @@ static unsigned samsung_dmadev_request(enum dma_ch dma_ch, if (dev->of_node) return (unsigned)dma_request_slave_channel(dev, ch_name); else - return (unsigned)dma_request_channel(mask, pl330_filter, + return (unsigned)dma_request_channel(mask, dma_filter, (void *)dma_ch); } diff --git a/arch/arm/plat-samsung/pm-gpio.c b/arch/arm/plat-samsung/pm-gpio.c index a8de3cf..dd4c15d0 100644 --- a/arch/arm/plat-samsung/pm-gpio.c +++ b/arch/arm/plat-samsung/pm-gpio.c @@ -19,6 +19,10 @@ #include <linux/io.h> #include <linux/gpio.h> +#if defined(CONFIG_ARCH_S3C24XX) || defined(CONFIG_ARCH_S3C64XX) +#include <mach/gpio-samsung.h> +#endif + #include <plat/gpio-core.h> #include <plat/pm.h> diff --git a/arch/arm/plat-samsung/setup-camif.c b/arch/arm/plat-samsung/setup-camif.c index e01bf76..72d8edb 100644 --- a/arch/arm/plat-samsung/setup-camif.c +++ b/arch/arm/plat-samsung/setup-camif.c @@ -10,6 +10,7 @@ #include <linux/gpio.h> #include <plat/gpio-cfg.h> +#include <mach/gpio-samsung.h> /* Number of camera port pins, without FIELD */ #define S3C_CAMIF_NUM_GPIOS 13 diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c index 83e4f95..8550123 100644 --- a/arch/arm/xen/enlighten.c +++ b/arch/arm/xen/enlighten.c @@ -96,7 +96,7 @@ static int remap_pte_fn(pte_t *ptep, pgtable_t token, unsigned long addr, struct remap_data *info = data; struct page *page = info->pages[info->index++]; unsigned long pfn = page_to_pfn(page); - pte_t pte = pfn_pte(pfn, info->prot); + pte_t pte = pte_mkspecial(pfn_pte(pfn, info->prot)); if (map_foreign_page(pfn, info->fgmfn, info->domid)) return -EFAULT; @@ -224,10 +224,10 @@ static int __init xen_guest_init(void) } if (of_address_to_resource(node, GRANT_TABLE_PHYSADDR, &res)) return 0; - xen_hvm_resume_frames = res.start >> PAGE_SHIFT; + xen_hvm_resume_frames = res.start; xen_events_irq = irq_of_parse_and_map(node, 0); pr_info("Xen %s support found, events_irq=%d gnttab_frame_pfn=%lx\n", - version, xen_events_irq, xen_hvm_resume_frames); + version, xen_events_irq, (xen_hvm_resume_frames >> PAGE_SHIFT)); xen_domain_type = XEN_HVM_DOMAIN; xen_setup_features(); diff --git a/arch/arm/xen/p2m.c b/arch/arm/xen/p2m.c index 23732cd..b31ee1b2 100644 --- a/arch/arm/xen/p2m.c +++ b/arch/arm/xen/p2m.c @@ -25,8 +25,9 @@ struct xen_p2m_entry { struct rb_node rbnode_phys; }; -rwlock_t p2m_lock; +static rwlock_t p2m_lock; struct rb_root phys_to_mach = RB_ROOT; +EXPORT_SYMBOL_GPL(phys_to_mach); static struct rb_root mach_to_phys = RB_ROOT; static int xen_add_phys_to_mach_entry(struct xen_p2m_entry *new) @@ -200,7 +201,7 @@ bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn) } EXPORT_SYMBOL_GPL(__set_phys_to_machine); -int p2m_init(void) +static int p2m_init(void) { rwlock_init(&p2m_lock); return 0; diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 88c8b6c1..dd4327f 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -2,6 +2,7 @@ config ARM64 def_bool y select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE select ARCH_USE_CMPXCHG_LOCKREF + select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST select ARCH_WANT_OPTIONAL_GPIOLIB select ARCH_WANT_COMPAT_IPC_PARSE_VERSION select ARCH_WANT_FRAME_POINTERS @@ -11,19 +12,27 @@ config ARM64 select BUILDTIME_EXTABLE_SORT select CLONE_BACKWARDS select COMMON_CLK + select CPU_PM if (SUSPEND || CPU_IDLE) + select DCACHE_WORD_ACCESS select GENERIC_CLOCKEVENTS + select GENERIC_CLOCKEVENTS_BROADCAST if SMP select GENERIC_IOMAP select GENERIC_IRQ_PROBE select GENERIC_IRQ_SHOW select GENERIC_SCHED_CLOCK select GENERIC_SMP_IDLE_THREAD + select GENERIC_STRNCPY_FROM_USER + select GENERIC_STRNLEN_USER select GENERIC_TIME_VSYSCALL select HARDIRQS_SW_RESEND + select HAVE_ARCH_JUMP_LABEL select HAVE_ARCH_TRACEHOOK select HAVE_DEBUG_BUGVERBOSE select HAVE_DEBUG_KMEMLEAK select HAVE_DMA_API_DEBUG select HAVE_DMA_ATTRS + select HAVE_DMA_CONTIGUOUS + select HAVE_EFFICIENT_UNALIGNED_ACCESS select HAVE_GENERIC_DMA_COHERENT select HAVE_HW_BREAKPOINT if PERF_EVENTS select HAVE_MEMBLOCK @@ -159,8 +168,7 @@ config NR_CPUS range 2 32 depends on SMP # These have to remain sorted largest to smallest - default "8" if ARCH_XGENE - default "4" + default "8" config HOTPLUG_CPU bool "Support for hot-pluggable CPUs" @@ -276,6 +284,24 @@ config SYSVIPC_COMPAT endmenu +menu "Power management options" + +source "kernel/power/Kconfig" + +config ARCH_SUSPEND_POSSIBLE + def_bool y + +config ARM64_CPU_SUSPEND + def_bool PM_SLEEP + +endmenu + +menu "CPU Power Management" + +source "drivers/cpuidle/Kconfig" + +endmenu + source "net/Kconfig" source "drivers/Kconfig" diff --git a/arch/arm64/boot/dts/foundation-v8.dts b/arch/arm64/boot/dts/foundation-v8.dts index 84fcc50..4a06090 100644 --- a/arch/arm64/boot/dts/foundation-v8.dts +++ b/arch/arm64/boot/dts/foundation-v8.dts @@ -6,6 +6,8 @@ /dts-v1/; +/memreserve/ 0x80000000 0x00010000; + / { model = "Foundation-v8A"; compatible = "arm,foundation-aarch64", "arm,vexpress"; @@ -222,7 +224,7 @@ virtio_block@0130000 { compatible = "virtio,mmio"; - reg = <0x130000 0x1000>; + reg = <0x130000 0x200>; interrupts = <42>; }; }; diff --git a/arch/arm64/boot/dts/rtsm_ve-motherboard.dtsi b/arch/arm64/boot/dts/rtsm_ve-motherboard.dtsi index b45e5f3..2f2ecd2 100644 --- a/arch/arm64/boot/dts/rtsm_ve-motherboard.dtsi +++ b/arch/arm64/boot/dts/rtsm_ve-motherboard.dtsi @@ -183,6 +183,12 @@ clocks = <&v2m_oscclk1>, <&v2m_clk24mhz>; clock-names = "clcdclk", "apb_pclk"; }; + + virtio_block@0130000 { + compatible = "virtio,mmio"; + reg = <0x130000 0x200>; + interrupts = <42>; + }; }; v2m_fixed_3v3: fixedregulator@0 { diff --git a/arch/arm64/include/asm/Kbuild b/arch/arm64/include/asm/Kbuild index 519f89f..d0ff25d 100644 --- a/arch/arm64/include/asm/Kbuild +++ b/arch/arm64/include/asm/Kbuild @@ -26,7 +26,6 @@ generic-y += mman.h generic-y += msgbuf.h generic-y += mutex.h generic-y += pci.h -generic-y += percpu.h generic-y += poll.h generic-y += posix_types.h generic-y += resource.h diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h index d4a6333..78e20ba 100644 --- a/arch/arm64/include/asm/barrier.h +++ b/arch/arm64/include/asm/barrier.h @@ -35,10 +35,60 @@ #define smp_mb() barrier() #define smp_rmb() barrier() #define smp_wmb() barrier() + +#define smp_store_release(p, v) \ +do { \ + compiletime_assert_atomic_type(*p); \ + smp_mb(); \ + ACCESS_ONCE(*p) = (v); \ +} while (0) + +#define smp_load_acquire(p) \ +({ \ + typeof(*p) ___p1 = ACCESS_ONCE(*p); \ + compiletime_assert_atomic_type(*p); \ + smp_mb(); \ + ___p1; \ +}) + #else + #define smp_mb() asm volatile("dmb ish" : : : "memory") #define smp_rmb() asm volatile("dmb ishld" : : : "memory") #define smp_wmb() asm volatile("dmb ishst" : : : "memory") + +#define smp_store_release(p, v) \ +do { \ + compiletime_assert_atomic_type(*p); \ + switch (sizeof(*p)) { \ + case 4: \ + asm volatile ("stlr %w1, %0" \ + : "=Q" (*p) : "r" (v) : "memory"); \ + break; \ + case 8: \ + asm volatile ("stlr %1, %0" \ + : "=Q" (*p) : "r" (v) : "memory"); \ + break; \ + } \ +} while (0) + +#define smp_load_acquire(p) \ +({ \ + typeof(*p) ___p1; \ + compiletime_assert_atomic_type(*p); \ + switch (sizeof(*p)) { \ + case 4: \ + asm volatile ("ldar %w0, %1" \ + : "=r" (___p1) : "Q" (*p) : "memory"); \ + break; \ + case 8: \ + asm volatile ("ldar %0, %1" \ + : "=r" (___p1) : "Q" (*p) : "memory"); \ + break; \ + } \ + ___p1; \ +}) + #endif #define read_barrier_depends() do { } while(0) diff --git a/arch/arm64/include/asm/cmpxchg.h b/arch/arm64/include/asm/cmpxchg.h index 3914c0d..56166d7 100644 --- a/arch/arm64/include/asm/cmpxchg.h +++ b/arch/arm64/include/asm/cmpxchg.h @@ -158,17 +158,23 @@ static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old, return ret; } -#define cmpxchg(ptr,o,n) \ - ((__typeof__(*(ptr)))__cmpxchg_mb((ptr), \ - (unsigned long)(o), \ - (unsigned long)(n), \ - sizeof(*(ptr)))) - -#define cmpxchg_local(ptr,o,n) \ - ((__typeof__(*(ptr)))__cmpxchg((ptr), \ - (unsigned long)(o), \ - (unsigned long)(n), \ - sizeof(*(ptr)))) +#define cmpxchg(ptr, o, n) \ +({ \ + __typeof__(*(ptr)) __ret; \ + __ret = (__typeof__(*(ptr))) \ + __cmpxchg_mb((ptr), (unsigned long)(o), (unsigned long)(n), \ + sizeof(*(ptr))); \ + __ret; \ +}) + +#define cmpxchg_local(ptr, o, n) \ +({ \ + __typeof__(*(ptr)) __ret; \ + __ret = (__typeof__(*(ptr))) \ + __cmpxchg((ptr), (unsigned long)(o), \ + (unsigned long)(n), sizeof(*(ptr))); \ + __ret; \ +}) #define cmpxchg64(ptr,o,n) cmpxchg((ptr),(o),(n)) #define cmpxchg64_local(ptr,o,n) cmpxchg_local((ptr),(o),(n)) diff --git a/arch/arm64/include/asm/cpu_ops.h b/arch/arm64/include/asm/cpu_ops.h index c4cdb5e..15241307 100644 --- a/arch/arm64/include/asm/cpu_ops.h +++ b/arch/arm64/include/asm/cpu_ops.h @@ -39,6 +39,9 @@ struct device_node; * from the cpu to be killed. * @cpu_die: Makes a cpu leave the kernel. Must not fail. Called from the * cpu being killed. + * @cpu_suspend: Suspends a cpu and saves the required context. May fail owing + * to wrong parameters or error conditions. Called from the + * CPU being suspended. Must be called with IRQs disabled. */ struct cpu_operations { const char *name; @@ -50,6 +53,9 @@ struct cpu_operations { int (*cpu_disable)(unsigned int cpu); void (*cpu_die)(unsigned int cpu); #endif +#ifdef CONFIG_ARM64_CPU_SUSPEND + int (*cpu_suspend)(unsigned long); +#endif }; extern const struct cpu_operations *cpu_ops[NR_CPUS]; diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h index 5fe138e..c404fb0 100644 --- a/arch/arm64/include/asm/cputype.h +++ b/arch/arm64/include/asm/cputype.h @@ -16,23 +16,23 @@ #ifndef __ASM_CPUTYPE_H #define __ASM_CPUTYPE_H -#define ID_MIDR_EL1 "midr_el1" -#define ID_MPIDR_EL1 "mpidr_el1" -#define ID_CTR_EL0 "ctr_el0" - -#define ID_AA64PFR0_EL1 "id_aa64pfr0_el1" -#define ID_AA64DFR0_EL1 "id_aa64dfr0_el1" -#define ID_AA64AFR0_EL1 "id_aa64afr0_el1" -#define ID_AA64ISAR0_EL1 "id_aa64isar0_el1" -#define ID_AA64MMFR0_EL1 "id_aa64mmfr0_el1" - #define INVALID_HWID ULONG_MAX #define MPIDR_HWID_BITMASK 0xff00ffffff +#define MPIDR_LEVEL_BITS_SHIFT 3 +#define MPIDR_LEVEL_BITS (1 << MPIDR_LEVEL_BITS_SHIFT) +#define MPIDR_LEVEL_MASK ((1 << MPIDR_LEVEL_BITS) - 1) + +#define MPIDR_LEVEL_SHIFT(level) \ + (((1 << level) >> 1) << MPIDR_LEVEL_BITS_SHIFT) + +#define MPIDR_AFFINITY_LEVEL(mpidr, level) \ + ((mpidr >> MPIDR_LEVEL_SHIFT(level)) & MPIDR_LEVEL_MASK) + #define read_cpuid(reg) ({ \ u64 __val; \ - asm("mrs %0, " reg : "=r" (__val)); \ + asm("mrs %0, " #reg : "=r" (__val)); \ __val; \ }) @@ -54,12 +54,12 @@ */ static inline u32 __attribute_const__ read_cpuid_id(void) { - return read_cpuid(ID_MIDR_EL1); + return read_cpuid(MIDR_EL1); } static inline u64 __attribute_const__ read_cpuid_mpidr(void) { - return read_cpuid(ID_MPIDR_EL1); + return read_cpuid(MPIDR_EL1); } static inline unsigned int __attribute_const__ read_cpuid_implementor(void) @@ -74,7 +74,7 @@ static inline unsigned int __attribute_const__ read_cpuid_part_number(void) static inline u32 __attribute_const__ read_cpuid_cachetype(void) { - return read_cpuid(ID_CTR_EL0); + return read_cpuid(CTR_EL0); } #endif /* __ASSEMBLY__ */ diff --git a/arch/arm64/include/asm/debug-monitors.h b/arch/arm64/include/asm/debug-monitors.h index a2232d0..6231479 100644 --- a/arch/arm64/include/asm/debug-monitors.h +++ b/arch/arm64/include/asm/debug-monitors.h @@ -62,6 +62,27 @@ struct task_struct; #define DBG_ARCH_ID_RESERVED 0 /* In case of ptrace ABI updates. */ +#define DBG_HOOK_HANDLED 0 +#define DBG_HOOK_ERROR 1 + +struct step_hook { + struct list_head node; + int (*fn)(struct pt_regs *regs, unsigned int esr); +}; + +void register_step_hook(struct step_hook *hook); +void unregister_step_hook(struct step_hook *hook); + +struct break_hook { + struct list_head node; + u32 esr_val; + u32 esr_mask; + int (*fn)(struct pt_regs *regs, unsigned int esr); +}; + +void register_break_hook(struct break_hook *hook); +void unregister_break_hook(struct break_hook *hook); + u8 debug_monitors_arch(void); void enable_debug_monitors(enum debug_el el); diff --git a/arch/arm64/include/asm/dma-contiguous.h b/arch/arm64/include/asm/dma-contiguous.h new file mode 100644 index 0000000..d6aacb6 --- /dev/null +++ b/arch/arm64/include/asm/dma-contiguous.h @@ -0,0 +1,29 @@ +/* + * Copyright (c) 2013, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _ASM_DMA_CONTIGUOUS_H +#define _ASM_DMA_CONTIGUOUS_H + +#ifdef __KERNEL__ +#ifdef CONFIG_DMA_CMA + +#include <linux/types.h> +#include <asm-generic/dma-contiguous.h> + +static inline void +dma_contiguous_early_fixup(phys_addr_t base, unsigned long size) { } + +#endif +#endif + +#endif diff --git a/arch/arm64/include/asm/futex.h b/arch/arm64/include/asm/futex.h index c582fa3..78cc3ab 100644 --- a/arch/arm64/include/asm/futex.h +++ b/arch/arm64/include/asm/futex.h @@ -30,6 +30,7 @@ " cbnz %w3, 1b\n" \ "3:\n" \ " .pushsection .fixup,\"ax\"\n" \ +" .align 2\n" \ "4: mov %w0, %w5\n" \ " b 3b\n" \ " .popsection\n" \ diff --git a/arch/arm64/include/asm/hardirq.h b/arch/arm64/include/asm/hardirq.h index 990c051..ae4801d 100644 --- a/arch/arm64/include/asm/hardirq.h +++ b/arch/arm64/include/asm/hardirq.h @@ -20,7 +20,7 @@ #include <linux/threads.h> #include <asm/irq.h> -#define NR_IPI 4 +#define NR_IPI 5 typedef struct { unsigned int __softirq_pending; diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h new file mode 100644 index 0000000..c44ad39 --- /dev/null +++ b/arch/arm64/include/asm/insn.h @@ -0,0 +1,108 @@ +/* + * Copyright (C) 2013 Huawei Ltd. + * Author: Jiang Liu <liuj97@gmail.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ +#ifndef __ASM_INSN_H +#define __ASM_INSN_H +#include <linux/types.h> + +/* A64 instructions are always 32 bits. */ +#define AARCH64_INSN_SIZE 4 + +/* + * ARM Architecture Reference Manual for ARMv8 Profile-A, Issue A.a + * Section C3.1 "A64 instruction index by encoding": + * AArch64 main encoding table + * Bit position + * 28 27 26 25 Encoding Group + * 0 0 - - Unallocated + * 1 0 0 - Data processing, immediate + * 1 0 1 - Branch, exception generation and system instructions + * - 1 - 0 Loads and stores + * - 1 0 1 Data processing - register + * 0 1 1 1 Data processing - SIMD and floating point + * 1 1 1 1 Data processing - SIMD and floating point + * "-" means "don't care" + */ +enum aarch64_insn_encoding_class { + AARCH64_INSN_CLS_UNKNOWN, /* UNALLOCATED */ + AARCH64_INSN_CLS_DP_IMM, /* Data processing - immediate */ + AARCH64_INSN_CLS_DP_REG, /* Data processing - register */ + AARCH64_INSN_CLS_DP_FPSIMD, /* Data processing - SIMD and FP */ + AARCH64_INSN_CLS_LDST, /* Loads and stores */ + AARCH64_INSN_CLS_BR_SYS, /* Branch, exception generation and + * system instructions */ +}; + +enum aarch64_insn_hint_op { + AARCH64_INSN_HINT_NOP = 0x0 << 5, + AARCH64_INSN_HINT_YIELD = 0x1 << 5, + AARCH64_INSN_HINT_WFE = 0x2 << 5, + AARCH64_INSN_HINT_WFI = 0x3 << 5, + AARCH64_INSN_HINT_SEV = 0x4 << 5, + AARCH64_INSN_HINT_SEVL = 0x5 << 5, +}; + +enum aarch64_insn_imm_type { + AARCH64_INSN_IMM_ADR, + AARCH64_INSN_IMM_26, + AARCH64_INSN_IMM_19, + AARCH64_INSN_IMM_16, + AARCH64_INSN_IMM_14, + AARCH64_INSN_IMM_12, + AARCH64_INSN_IMM_9, + AARCH64_INSN_IMM_MAX +}; + +enum aarch64_insn_branch_type { + AARCH64_INSN_BRANCH_NOLINK, + AARCH64_INSN_BRANCH_LINK, +}; + +#define __AARCH64_INSN_FUNCS(abbr, mask, val) \ +static __always_inline bool aarch64_insn_is_##abbr(u32 code) \ +{ return (code & (mask)) == (val); } \ +static __always_inline u32 aarch64_insn_get_##abbr##_value(void) \ +{ return (val); } + +__AARCH64_INSN_FUNCS(b, 0xFC000000, 0x14000000) +__AARCH64_INSN_FUNCS(bl, 0xFC000000, 0x94000000) +__AARCH64_INSN_FUNCS(svc, 0xFFE0001F, 0xD4000001) +__AARCH64_INSN_FUNCS(hvc, 0xFFE0001F, 0xD4000002) +__AARCH64_INSN_FUNCS(smc, 0xFFE0001F, 0xD4000003) +__AARCH64_INSN_FUNCS(brk, 0xFFE0001F, 0xD4200000) +__AARCH64_INSN_FUNCS(hint, 0xFFFFF01F, 0xD503201F) + +#undef __AARCH64_INSN_FUNCS + +bool aarch64_insn_is_nop(u32 insn); + +int aarch64_insn_read(void *addr, u32 *insnp); +int aarch64_insn_write(void *addr, u32 insn); +enum aarch64_insn_encoding_class aarch64_get_insn_class(u32 insn); +u32 aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type, + u32 insn, u64 imm); +u32 aarch64_insn_gen_branch_imm(unsigned long pc, unsigned long addr, + enum aarch64_insn_branch_type type); +u32 aarch64_insn_gen_hint(enum aarch64_insn_hint_op op); +u32 aarch64_insn_gen_nop(void); + +bool aarch64_insn_hotpatch_safe(u32 old_insn, u32 new_insn); + +int aarch64_insn_patch_text_nosync(void *addr, u32 insn); +int aarch64_insn_patch_text_sync(void *addrs[], u32 insns[], int cnt); +int aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt); + +#endif /* __ASM_INSN_H */ diff --git a/arch/arm64/include/asm/irqflags.h b/arch/arm64/include/asm/irqflags.h index aa11943..b2fcfbc 100644 --- a/arch/arm64/include/asm/irqflags.h +++ b/arch/arm64/include/asm/irqflags.h @@ -56,6 +56,9 @@ static inline void arch_local_irq_disable(void) #define local_fiq_enable() asm("msr daifclr, #1" : : : "memory") #define local_fiq_disable() asm("msr daifset, #1" : : : "memory") +#define local_async_enable() asm("msr daifclr, #4" : : : "memory") +#define local_async_disable() asm("msr daifset, #4" : : : "memory") + /* * Save the current interrupt enable state. */ diff --git a/arch/arm64/include/asm/jump_label.h b/arch/arm64/include/asm/jump_label.h new file mode 100644 index 0000000..076a1c7 --- /dev/null +++ b/arch/arm64/include/asm/jump_label.h @@ -0,0 +1,52 @@ +/* + * Copyright (C) 2013 Huawei Ltd. + * Author: Jiang Liu <liuj97@gmail.com> + * + * Based on arch/arm/include/asm/jump_label.h + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ +#ifndef __ASM_JUMP_LABEL_H +#define __ASM_JUMP_LABEL_H +#include <linux/types.h> +#include <asm/insn.h> + +#ifdef __KERNEL__ + +#define JUMP_LABEL_NOP_SIZE AARCH64_INSN_SIZE + +static __always_inline bool arch_static_branch(struct static_key *key) +{ + asm goto("1: nop\n\t" + ".pushsection __jump_table, \"aw\"\n\t" + ".align 3\n\t" + ".quad 1b, %l[l_yes], %c0\n\t" + ".popsection\n\t" + : : "i"(key) : : l_yes); + + return false; +l_yes: + return true; +} + +#endif /* __KERNEL__ */ + +typedef u64 jump_label_t; + +struct jump_entry { + jump_label_t code; + jump_label_t target; + jump_label_t key; +}; + +#endif /* __ASM_JUMP_LABEL_H */ diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index 3776217..9dc5dc3 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h @@ -146,8 +146,7 @@ static inline void *phys_to_virt(phys_addr_t x) #define ARCH_PFN_OFFSET PHYS_PFN_OFFSET #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) -#define virt_addr_valid(kaddr) (((void *)(kaddr) >= (void *)PAGE_OFFSET) && \ - ((void *)(kaddr) < (void *)high_memory)) +#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) #endif diff --git a/arch/arm64/include/asm/percpu.h b/arch/arm64/include/asm/percpu.h new file mode 100644 index 0000000..13fb0b3 --- /dev/null +++ b/arch/arm64/include/asm/percpu.h @@ -0,0 +1,41 @@ +/* + * Copyright (C) 2013 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ +#ifndef __ASM_PERCPU_H +#define __ASM_PERCPU_H + +static inline void set_my_cpu_offset(unsigned long off) +{ + asm volatile("msr tpidr_el1, %0" :: "r" (off) : "memory"); +} + +static inline unsigned long __my_cpu_offset(void) +{ + unsigned long off; + register unsigned long *sp asm ("sp"); + + /* + * We want to allow caching the value, so avoid using volatile and + * instead use a fake stack read to hazard against barrier(). + */ + asm("mrs %0, tpidr_el1" : "=r" (off) : "Q" (*sp)); + + return off; +} +#define __my_cpu_offset __my_cpu_offset() + +#include <asm-generic/percpu.h> + +#endif /* __ASM_PERCPU_H */ diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h index 755f861..b1d2e26 100644 --- a/arch/arm64/include/asm/pgtable-hwdef.h +++ b/arch/arm64/include/asm/pgtable-hwdef.h @@ -43,7 +43,7 @@ * Section */ #define PMD_SECT_VALID (_AT(pmdval_t, 1) << 0) -#define PMD_SECT_PROT_NONE (_AT(pmdval_t, 1) << 2) +#define PMD_SECT_PROT_NONE (_AT(pmdval_t, 1) << 58) #define PMD_SECT_USER (_AT(pmdval_t, 1) << 6) /* AP[1] */ #define PMD_SECT_RDONLY (_AT(pmdval_t, 1) << 7) /* AP[2] */ #define PMD_SECT_S (_AT(pmdval_t, 3) << 8) diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 17bd3af..7f2b60a 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -25,10 +25,11 @@ * Software defined PTE bits definition. */ #define PTE_VALID (_AT(pteval_t, 1) << 0) -#define PTE_PROT_NONE (_AT(pteval_t, 1) << 2) /* only when !PTE_VALID */ -#define PTE_FILE (_AT(pteval_t, 1) << 3) /* only when !pte_present() */ +#define PTE_FILE (_AT(pteval_t, 1) << 2) /* only when !pte_present() */ #define PTE_DIRTY (_AT(pteval_t, 1) << 55) #define PTE_SPECIAL (_AT(pteval_t, 1) << 56) + /* bit 57 for PMD_SECT_SPLITTING */ +#define PTE_PROT_NONE (_AT(pteval_t, 1) << 58) /* only when !PTE_VALID */ /* * VMALLOC and SPARSEMEM_VMEMMAP ranges. @@ -254,7 +255,7 @@ static inline int has_transparent_hugepage(void) #define pgprot_noncached(prot) \ __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRnE)) #define pgprot_writecombine(prot) \ - __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_GRE)) + __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC)) #define pgprot_dmacoherent(prot) \ __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC)) #define __HAVE_PHYS_MEM_ACCESS_PROT @@ -357,18 +358,20 @@ extern pgd_t idmap_pg_dir[PTRS_PER_PGD]; /* * Encode and decode a swap entry: - * bits 0, 2: present (must both be zero) - * bit 3: PTE_FILE - * bits 4-8: swap type - * bits 9-63: swap offset + * bits 0-1: present (must be zero) + * bit 2: PTE_FILE + * bits 3-8: swap type + * bits 9-57: swap offset */ -#define __SWP_TYPE_SHIFT 4 +#define __SWP_TYPE_SHIFT 3 #define __SWP_TYPE_BITS 6 +#define __SWP_OFFSET_BITS 49 #define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1) #define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT) +#define __SWP_OFFSET_MASK ((1UL << __SWP_OFFSET_BITS) - 1) #define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK) -#define __swp_offset(x) ((x).val >> __SWP_OFFSET_SHIFT) +#define __swp_offset(x) (((x).val >> __SWP_OFFSET_SHIFT) & __SWP_OFFSET_MASK) #define __swp_entry(type,offset) ((swp_entry_t) { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) }) #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) @@ -382,15 +385,15 @@ extern pgd_t idmap_pg_dir[PTRS_PER_PGD]; /* * Encode and decode a file entry: - * bits 0, 2: present (must both be zero) - * bit 3: PTE_FILE - * bits 4-63: file offset / PAGE_SIZE + * bits 0-1: present (must be zero) + * bit 2: PTE_FILE + * bits 3-57: file offset / PAGE_SIZE */ #define pte_file(pte) (pte_val(pte) & PTE_FILE) -#define pte_to_pgoff(x) (pte_val(x) >> 4) -#define pgoff_to_pte(x) __pte(((x) << 4) | PTE_FILE) +#define pte_to_pgoff(x) (pte_val(x) >> 3) +#define pgoff_to_pte(x) __pte(((x) << 3) | PTE_FILE) -#define PTE_FILE_MAX_BITS 60 +#define PTE_FILE_MAX_BITS 55 extern int kern_addr_valid(unsigned long addr); diff --git a/arch/arm64/include/asm/proc-fns.h b/arch/arm64/include/asm/proc-fns.h index 7cdf466..0c657bb 100644 --- a/arch/arm64/include/asm/proc-fns.h +++ b/arch/arm64/include/asm/proc-fns.h @@ -26,11 +26,14 @@ #include <asm/page.h> struct mm_struct; +struct cpu_suspend_ctx; extern void cpu_cache_off(void); extern void cpu_do_idle(void); extern void cpu_do_switch_mm(unsigned long pgd_phys, struct mm_struct *mm); extern void cpu_reset(unsigned long addr) __attribute__((noreturn)); +extern void cpu_do_suspend(struct cpu_suspend_ctx *ptr); +extern u64 cpu_do_resume(phys_addr_t ptr, u64 idmap_ttbr); #include <asm/memory.h> diff --git a/arch/arm64/include/asm/smp_plat.h b/arch/arm64/include/asm/smp_plat.h index ed43a0d..59e2823 100644 --- a/arch/arm64/include/asm/smp_plat.h +++ b/arch/arm64/include/asm/smp_plat.h @@ -21,6 +21,19 @@ #include <asm/types.h> +struct mpidr_hash { + u64 mask; + u32 shift_aff[4]; + u32 bits; +}; + +extern struct mpidr_hash mpidr_hash; + +static inline u32 mpidr_hash_size(void) +{ + return 1 << mpidr_hash.bits; +} + /* * Logical CPU mapping. */ diff --git a/arch/arm64/include/asm/suspend.h b/arch/arm64/include/asm/suspend.h new file mode 100644 index 0000000..e9c149c --- /dev/null +++ b/arch/arm64/include/asm/suspend.h @@ -0,0 +1,27 @@ +#ifndef __ASM_SUSPEND_H +#define __ASM_SUSPEND_H + +#define NR_CTX_REGS 11 + +/* + * struct cpu_suspend_ctx must be 16-byte aligned since it is allocated on + * the stack, which must be 16-byte aligned on v8 + */ +struct cpu_suspend_ctx { + /* + * This struct must be kept in sync with + * cpu_do_{suspend/resume} in mm/proc.S + */ + u64 ctx_regs[NR_CTX_REGS]; + u64 sp; +} __aligned(16); + +struct sleep_save_sp { + phys_addr_t *save_ptr_stash; + phys_addr_t save_ptr_stash_phys; +}; + +extern void cpu_resume(void); +extern int cpu_suspend(unsigned long); + +#endif diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h index 7ecc2b2..6c0f684 100644 --- a/arch/arm64/include/asm/uaccess.h +++ b/arch/arm64/include/asm/uaccess.h @@ -100,6 +100,7 @@ static inline void set_fs(mm_segment_t fs) }) #define access_ok(type, addr, size) __range_ok(addr, size) +#define user_addr_max get_fs /* * The "__xxx" versions of the user access functions do not verify the address @@ -240,9 +241,6 @@ extern unsigned long __must_check __copy_to_user(void __user *to, const void *fr extern unsigned long __must_check __copy_in_user(void __user *to, const void __user *from, unsigned long n); extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n); -extern unsigned long __must_check __strncpy_from_user(char *to, const char __user *from, unsigned long count); -extern unsigned long __must_check __strnlen_user(const char __user *s, long n); - static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n) { if (access_ok(VERIFY_READ, from, n)) @@ -276,24 +274,9 @@ static inline unsigned long __must_check clear_user(void __user *to, unsigned lo return n; } -static inline long __must_check strncpy_from_user(char *dst, const char __user *src, long count) -{ - long res = -EFAULT; - if (access_ok(VERIFY_READ, src, 1)) - res = __strncpy_from_user(dst, src, count); - return res; -} - -#define strlen_user(s) strnlen_user(s, ~0UL >> 1) +extern long strncpy_from_user(char *dest, const char __user *src, long count); -static inline long __must_check strnlen_user(const char __user *s, long n) -{ - unsigned long res = 0; - - if (__addr_ok(s)) - res = __strnlen_user(s, n); - - return res; -} +extern __must_check long strlen_user(const char __user *str); +extern __must_check long strnlen_user(const char __user *str, long n); #endif /* __ASM_UACCESS_H */ diff --git a/arch/arm64/include/asm/word-at-a-time.h b/arch/arm64/include/asm/word-at-a-time.h new file mode 100644 index 0000000..aab5bf0 --- /dev/null +++ b/arch/arm64/include/asm/word-at-a-time.h @@ -0,0 +1,94 @@ +/* + * Copyright (C) 2013 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ +#ifndef __ASM_WORD_AT_A_TIME_H +#define __ASM_WORD_AT_A_TIME_H + +#ifndef __AARCH64EB__ + +#include <linux/kernel.h> + +struct word_at_a_time { + const unsigned long one_bits, high_bits; +}; + +#define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) } + +static inline unsigned long has_zero(unsigned long a, unsigned long *bits, + const struct word_at_a_time *c) +{ + unsigned long mask = ((a - c->one_bits) & ~a) & c->high_bits; + *bits = mask; + return mask; +} + +#define prep_zero_mask(a, bits, c) (bits) + +static inline unsigned long create_zero_mask(unsigned long bits) +{ + bits = (bits - 1) & ~bits; + return bits >> 7; +} + +static inline unsigned long find_zero(unsigned long mask) +{ + return fls64(mask) >> 3; +} + +#define zero_bytemask(mask) (mask) + +#else /* __AARCH64EB__ */ +#include <asm-generic/word-at-a-time.h> +#endif + +/* + * Load an unaligned word from kernel space. + * + * In the (very unlikely) case of the word being a page-crosser + * and the next page not being mapped, take the exception and + * return zeroes in the non-existing part. + */ +static inline unsigned long load_unaligned_zeropad(const void *addr) +{ + unsigned long ret, offset; + + /* Load word from unaligned pointer addr */ + asm( + "1: ldr %0, %3\n" + "2:\n" + " .pushsection .fixup,\"ax\"\n" + " .align 2\n" + "3: and %1, %2, #0x7\n" + " bic %2, %2, #0x7\n" + " ldr %0, [%2]\n" + " lsl %1, %1, #0x3\n" +#ifndef __AARCH64EB__ + " lsr %0, %0, %1\n" +#else + " lsl %0, %0, %1\n" +#endif + " b 2b\n" + " .popsection\n" + " .pushsection __ex_table,\"a\"\n" + " .align 3\n" + " .quad 1b, 3b\n" + " .popsection" + : "=&r" (ret), "=&r" (offset) + : "r" (addr), "Q" (*(unsigned long *)addr)); + + return ret; +} + +#endif /* __ASM_WORD_AT_A_TIME_H */ diff --git a/arch/arm64/include/asm/xen/page-coherent.h b/arch/arm64/include/asm/xen/page-coherent.h index 2820f1a..dde3fc9 100644 --- a/arch/arm64/include/asm/xen/page-coherent.h +++ b/arch/arm64/include/asm/xen/page-coherent.h @@ -23,25 +23,21 @@ static inline void xen_dma_map_page(struct device *hwdev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs) { - __generic_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs); } static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs) { - __generic_dma_ops(hwdev)->unmap_page(hwdev, handle, size, dir, attrs); } static inline void xen_dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t handle, size_t size, enum dma_data_direction dir) { - __generic_dma_ops(hwdev)->sync_single_for_cpu(hwdev, handle, size, dir); } static inline void xen_dma_sync_single_for_device(struct device *hwdev, dma_addr_t handle, size_t size, enum dma_data_direction dir) { - __generic_dma_ops(hwdev)->sync_single_for_device(hwdev, handle, size, dir); } #endif /* _ASM_ARM64_XEN_PAGE_COHERENT_H */ diff --git a/arch/arm64/include/uapi/asm/hwcap.h b/arch/arm64/include/uapi/asm/hwcap.h index 9b12476..73cf0f5 100644 --- a/arch/arm64/include/uapi/asm/hwcap.h +++ b/arch/arm64/include/uapi/asm/hwcap.h @@ -22,6 +22,10 @@ #define HWCAP_FP (1 << 0) #define HWCAP_ASIMD (1 << 1) #define HWCAP_EVTSTRM (1 << 2) - +#define HWCAP_AES (1 << 3) +#define HWCAP_PMULL (1 << 4) +#define HWCAP_SHA1 (1 << 5) +#define HWCAP_SHA2 (1 << 6) +#define HWCAP_CRC32 (1 << 7) #endif /* _UAPI__ASM_HWCAP_H */ diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile index 5ba2fd4..2d4554b 100644 --- a/arch/arm64/kernel/Makefile +++ b/arch/arm64/kernel/Makefile @@ -9,7 +9,7 @@ AFLAGS_head.o := -DTEXT_OFFSET=$(TEXT_OFFSET) arm64-obj-y := cputable.o debug-monitors.o entry.o irq.o fpsimd.o \ entry-fpsimd.o process.o ptrace.o setup.o signal.o \ sys.o stacktrace.o time.o traps.o io.o vdso.o \ - hyp-stub.o psci.o cpu_ops.o + hyp-stub.o psci.o cpu_ops.o insn.o arm64-obj-$(CONFIG_COMPAT) += sys32.o kuser32.o signal32.o \ sys_compat.o @@ -18,6 +18,8 @@ arm64-obj-$(CONFIG_SMP) += smp.o smp_spin_table.o arm64-obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o arm64-obj-$(CONFIG_HAVE_HW_BREAKPOINT)+= hw_breakpoint.o arm64-obj-$(CONFIG_EARLY_PRINTK) += early_printk.o +arm64-obj-$(CONFIG_ARM64_CPU_SUSPEND) += sleep.o suspend.o +arm64-obj-$(CONFIG_JUMP_LABEL) += jump_label.o obj-y += $(arm64-obj-y) vdso/ obj-m += $(arm64-obj-m) diff --git a/arch/arm64/kernel/arm64ksyms.c b/arch/arm64/kernel/arm64ksyms.c index e7ee770..338b568 100644 --- a/arch/arm64/kernel/arm64ksyms.c +++ b/arch/arm64/kernel/arm64ksyms.c @@ -29,13 +29,10 @@ #include <asm/checksum.h> - /* user mem (segment) */ -EXPORT_SYMBOL(__strnlen_user); -EXPORT_SYMBOL(__strncpy_from_user); - EXPORT_SYMBOL(copy_page); EXPORT_SYMBOL(clear_page); + /* user mem (segment) */ EXPORT_SYMBOL(__copy_from_user); EXPORT_SYMBOL(__copy_to_user); EXPORT_SYMBOL(__clear_user); diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c index 666e231..646f888 100644 --- a/arch/arm64/kernel/asm-offsets.c +++ b/arch/arm64/kernel/asm-offsets.c @@ -25,6 +25,8 @@ #include <asm/thread_info.h> #include <asm/memory.h> #include <asm/cputable.h> +#include <asm/smp_plat.h> +#include <asm/suspend.h> #include <asm/vdso_datapage.h> #include <linux/kbuild.h> @@ -138,5 +140,14 @@ int main(void) DEFINE(KVM_VTTBR, offsetof(struct kvm, arch.vttbr)); DEFINE(KVM_VGIC_VCTRL, offsetof(struct kvm, arch.vgic.vctrl_base)); #endif +#ifdef CONFIG_ARM64_CPU_SUSPEND + DEFINE(CPU_SUSPEND_SZ, sizeof(struct cpu_suspend_ctx)); + DEFINE(CPU_CTX_SP, offsetof(struct cpu_suspend_ctx, sp)); + DEFINE(MPIDR_HASH_MASK, offsetof(struct mpidr_hash, mask)); + DEFINE(MPIDR_HASH_SHIFTS, offsetof(struct mpidr_hash, shift_aff)); + DEFINE(SLEEP_SAVE_SP_SZ, sizeof(struct sleep_save_sp)); + DEFINE(SLEEP_SAVE_SP_PHYS, offsetof(struct sleep_save_sp, save_ptr_stash_phys)); + DEFINE(SLEEP_SAVE_SP_VIRT, offsetof(struct sleep_save_sp, save_ptr_stash)); +#endif return 0; } diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c index 6a0a9b1..636ba8b 100644 --- a/arch/arm64/kernel/debug-monitors.c +++ b/arch/arm64/kernel/debug-monitors.c @@ -187,6 +187,48 @@ static void clear_regs_spsr_ss(struct pt_regs *regs) regs->pstate = spsr; } +/* EL1 Single Step Handler hooks */ +static LIST_HEAD(step_hook); +DEFINE_RWLOCK(step_hook_lock); + +void register_step_hook(struct step_hook *hook) +{ + write_lock(&step_hook_lock); + list_add(&hook->node, &step_hook); + write_unlock(&step_hook_lock); +} + +void unregister_step_hook(struct step_hook *hook) +{ + write_lock(&step_hook_lock); + list_del(&hook->node); + write_unlock(&step_hook_lock); +} + +/* + * Call registered single step handers + * There is no Syndrome info to check for determining the handler. + * So we call all the registered handlers, until the right handler is + * found which returns zero. + */ +static int call_step_hook(struct pt_regs *regs, unsigned int esr) +{ + struct step_hook *hook; + int retval = DBG_HOOK_ERROR; + + read_lock(&step_hook_lock); + + list_for_each_entry(hook, &step_hook, node) { + retval = hook->fn(regs, esr); + if (retval == DBG_HOOK_HANDLED) + break; + } + + read_unlock(&step_hook_lock); + + return retval; +} + static int single_step_handler(unsigned long addr, unsigned int esr, struct pt_regs *regs) { @@ -214,7 +256,9 @@ static int single_step_handler(unsigned long addr, unsigned int esr, */ user_rewind_single_step(current); } else { - /* TODO: route to KGDB */ + if (call_step_hook(regs, esr) == DBG_HOOK_HANDLED) + return 0; + pr_warning("Unexpected kernel single-step exception at EL1\n"); /* * Re-enable stepping since we know that we will be @@ -226,11 +270,53 @@ static int single_step_handler(unsigned long addr, unsigned int esr, return 0; } +/* + * Breakpoint handler is re-entrant as another breakpoint can + * hit within breakpoint handler, especically in kprobes. + * Use reader/writer locks instead of plain spinlock. + */ +static LIST_HEAD(break_hook); +DEFINE_RWLOCK(break_hook_lock); + +void register_break_hook(struct break_hook *hook) +{ + write_lock(&break_hook_lock); + list_add(&hook->node, &break_hook); + write_unlock(&break_hook_lock); +} + +void unregister_break_hook(struct break_hook *hook) +{ + write_lock(&break_hook_lock); + list_del(&hook->node); + write_unlock(&break_hook_lock); +} + +static int call_break_hook(struct pt_regs *regs, unsigned int esr) +{ + struct break_hook *hook; + int (*fn)(struct pt_regs *regs, unsigned int esr) = NULL; + + read_lock(&break_hook_lock); + list_for_each_entry(hook, &break_hook, node) + if ((esr & hook->esr_mask) == hook->esr_val) + fn = hook->fn; + read_unlock(&break_hook_lock); + + return fn ? fn(regs, esr) : DBG_HOOK_ERROR; +} + static int brk_handler(unsigned long addr, unsigned int esr, struct pt_regs *regs) { siginfo_t info; + if (call_break_hook(regs, esr) == DBG_HOOK_HANDLED) + return 0; + + pr_warn("unexpected brk exception at %lx, esr=0x%x\n", + (long)instruction_pointer(regs), esr); + if (!user_mode(regs)) return -EFAULT; @@ -248,7 +334,8 @@ static int brk_handler(unsigned long addr, unsigned int esr, int aarch32_break_handler(struct pt_regs *regs) { siginfo_t info; - unsigned int instr; + u32 arm_instr; + u16 thumb_instr; bool bp = false; void __user *pc = (void __user *)instruction_pointer(regs); @@ -257,18 +344,21 @@ int aarch32_break_handler(struct pt_regs *regs) if (compat_thumb_mode(regs)) { /* get 16-bit Thumb instruction */ - get_user(instr, (u16 __user *)pc); - if (instr == AARCH32_BREAK_THUMB2_LO) { + get_user(thumb_instr, (u16 __user *)pc); + thumb_instr = le16_to_cpu(thumb_instr); + if (thumb_instr == AARCH32_BREAK_THUMB2_LO) { /* get second half of 32-bit Thumb-2 instruction */ - get_user(instr, (u16 __user *)(pc + 2)); - bp = instr == AARCH32_BREAK_THUMB2_HI; + get_user(thumb_instr, (u16 __user *)(pc + 2)); + thumb_instr = le16_to_cpu(thumb_instr); + bp = thumb_instr == AARCH32_BREAK_THUMB2_HI; } else { - bp = instr == AARCH32_BREAK_THUMB; + bp = thumb_instr == AARCH32_BREAK_THUMB; } } else { /* 32-bit ARM instruction */ - get_user(instr, (u32 __user *)pc); - bp = (instr & ~0xf0000000) == AARCH32_BREAK_ARM; + get_user(arm_instr, (u32 __user *)pc); + arm_instr = le32_to_cpu(arm_instr); + bp = (arm_instr & ~0xf0000000) == AARCH32_BREAK_ARM; } if (!bp) diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index e116614..39ac630 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -288,6 +288,8 @@ el1_dbg: /* * Debug exception handling */ + cmp x24, #ESR_EL1_EC_BRK64 // if BRK64 + cinc x24, x24, eq // set bit '0' tbz x24, #0, el1_inv // EL1 only mrs x0, far_el1 mov x2, sp // struct pt_regs @@ -309,15 +311,12 @@ el1_irq: #ifdef CONFIG_TRACE_IRQFLAGS bl trace_hardirqs_off #endif + + irq_handler + #ifdef CONFIG_PREEMPT get_thread_info tsk ldr w24, [tsk, #TI_PREEMPT] // get preempt count - add w0, w24, #1 // increment it - str w0, [tsk, #TI_PREEMPT] -#endif - irq_handler -#ifdef CONFIG_PREEMPT - str w24, [tsk, #TI_PREEMPT] // restore preempt count cbnz w24, 1f // preempt count != 0 ldr x0, [tsk, #TI_FLAGS] // get flags tbz x0, #TIF_NEED_RESCHED, 1f // needs rescheduling? @@ -507,22 +506,10 @@ el0_irq_naked: #ifdef CONFIG_TRACE_IRQFLAGS bl trace_hardirqs_off #endif - get_thread_info tsk -#ifdef CONFIG_PREEMPT - ldr w24, [tsk, #TI_PREEMPT] // get preempt count - add w23, w24, #1 // increment it - str w23, [tsk, #TI_PREEMPT] -#endif + irq_handler -#ifdef CONFIG_PREEMPT - ldr w0, [tsk, #TI_PREEMPT] - str w24, [tsk, #TI_PREEMPT] - cmp w0, w23 - b.eq 1f - mov x1, #0 - str x1, [x1] // BUG -1: -#endif + get_thread_info tsk + #ifdef CONFIG_TRACE_IRQFLAGS bl trace_hardirqs_on #endif diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c index bb785d2..4aef42a 100644 --- a/arch/arm64/kernel/fpsimd.c +++ b/arch/arm64/kernel/fpsimd.c @@ -17,6 +17,7 @@ * along with this program. If not, see <http://www.gnu.org/licenses/>. */ +#include <linux/cpu_pm.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/sched.h> @@ -113,6 +114,39 @@ EXPORT_SYMBOL(kernel_neon_end); #endif /* CONFIG_KERNEL_MODE_NEON */ +#ifdef CONFIG_CPU_PM +static int fpsimd_cpu_pm_notifier(struct notifier_block *self, + unsigned long cmd, void *v) +{ + switch (cmd) { + case CPU_PM_ENTER: + if (current->mm) + fpsimd_save_state(¤t->thread.fpsimd_state); + break; + case CPU_PM_EXIT: + if (current->mm) + fpsimd_load_state(¤t->thread.fpsimd_state); + break; + case CPU_PM_ENTER_FAILED: + default: + return NOTIFY_DONE; + } + return NOTIFY_OK; +} + +static struct notifier_block fpsimd_cpu_pm_notifier_block = { + .notifier_call = fpsimd_cpu_pm_notifier, +}; + +static void fpsimd_pm_init(void) +{ + cpu_pm_register_notifier(&fpsimd_cpu_pm_notifier_block); +} + +#else +static inline void fpsimd_pm_init(void) { } +#endif /* CONFIG_CPU_PM */ + /* * FP/SIMD support code initialisation. */ @@ -131,6 +165,8 @@ static int __init fpsimd_init(void) else elf_hwcap |= HWCAP_ASIMD; + fpsimd_pm_init(); + return 0; } late_initcall(fpsimd_init); diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index 7009387..0b281ff 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -282,8 +282,9 @@ ENDPROC(secondary_holding_pen) * be used where CPUs are brought online dynamically by the kernel. */ ENTRY(secondary_entry) - bl __calc_phys_offset // x2=phys offset bl el2_setup // Drop to EL1 + bl __calc_phys_offset // x24=PHYS_OFFSET, x28=PHYS_OFFSET-PAGE_OFFSET + bl set_cpu_boot_mode_flag b secondary_startup ENDPROC(secondary_entry) @@ -481,8 +482,6 @@ ENDPROC(__create_page_tables) .type __switch_data, %object __switch_data: .quad __mmap_switched - .quad __data_loc // x4 - .quad _data // x5 .quad __bss_start // x6 .quad _end // x7 .quad processor_id // x4 @@ -497,15 +496,7 @@ __switch_data: __mmap_switched: adr x3, __switch_data + 8 - ldp x4, x5, [x3], #16 ldp x6, x7, [x3], #16 - cmp x4, x5 // Copy data segment if needed -1: ccmp x5, x6, #4, ne - b.eq 2f - ldr x16, [x4], #8 - str x16, [x5], #8 - b 1b -2: 1: cmp x6, x7 b.hs 2f str xzr, [x6], #8 // Clear BSS diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c index ff516f6..f17f581 100644 --- a/arch/arm64/kernel/hw_breakpoint.c +++ b/arch/arm64/kernel/hw_breakpoint.c @@ -20,6 +20,7 @@ #define pr_fmt(fmt) "hw-breakpoint: " fmt +#include <linux/cpu_pm.h> #include <linux/errno.h> #include <linux/hw_breakpoint.h> #include <linux/perf_event.h> @@ -169,15 +170,68 @@ static enum debug_el debug_exception_level(int privilege) } } -/* - * Install a perf counter breakpoint. +enum hw_breakpoint_ops { + HW_BREAKPOINT_INSTALL, + HW_BREAKPOINT_UNINSTALL, + HW_BREAKPOINT_RESTORE +}; + +/** + * hw_breakpoint_slot_setup - Find and setup a perf slot according to + * operations + * + * @slots: pointer to array of slots + * @max_slots: max number of slots + * @bp: perf_event to setup + * @ops: operation to be carried out on the slot + * + * Return: + * slot index on success + * -ENOSPC if no slot is available/matches + * -EINVAL on wrong operations parameter */ -int arch_install_hw_breakpoint(struct perf_event *bp) +static int hw_breakpoint_slot_setup(struct perf_event **slots, int max_slots, + struct perf_event *bp, + enum hw_breakpoint_ops ops) +{ + int i; + struct perf_event **slot; + + for (i = 0; i < max_slots; ++i) { + slot = &slots[i]; + switch (ops) { + case HW_BREAKPOINT_INSTALL: + if (!*slot) { + *slot = bp; + return i; + } + break; + case HW_BREAKPOINT_UNINSTALL: + if (*slot == bp) { + *slot = NULL; + return i; + } + break; + case HW_BREAKPOINT_RESTORE: + if (*slot == bp) + return i; + break; + default: + pr_warn_once("Unhandled hw breakpoint ops %d\n", ops); + return -EINVAL; + } + } + return -ENOSPC; +} + +static int hw_breakpoint_control(struct perf_event *bp, + enum hw_breakpoint_ops ops) { struct arch_hw_breakpoint *info = counter_arch_bp(bp); - struct perf_event **slot, **slots; + struct perf_event **slots; struct debug_info *debug_info = ¤t->thread.debug; int i, max_slots, ctrl_reg, val_reg, reg_enable; + enum debug_el dbg_el = debug_exception_level(info->ctrl.privilege); u32 ctrl; if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) { @@ -196,67 +250,54 @@ int arch_install_hw_breakpoint(struct perf_event *bp) reg_enable = !debug_info->wps_disabled; } - for (i = 0; i < max_slots; ++i) { - slot = &slots[i]; - - if (!*slot) { - *slot = bp; - break; - } - } - - if (WARN_ONCE(i == max_slots, "Can't find any breakpoint slot")) - return -ENOSPC; + i = hw_breakpoint_slot_setup(slots, max_slots, bp, ops); - /* Ensure debug monitors are enabled at the correct exception level. */ - enable_debug_monitors(debug_exception_level(info->ctrl.privilege)); + if (WARN_ONCE(i < 0, "Can't find any breakpoint slot")) + return i; - /* Setup the address register. */ - write_wb_reg(val_reg, i, info->address); + switch (ops) { + case HW_BREAKPOINT_INSTALL: + /* + * Ensure debug monitors are enabled at the correct exception + * level. + */ + enable_debug_monitors(dbg_el); + /* Fall through */ + case HW_BREAKPOINT_RESTORE: + /* Setup the address register. */ + write_wb_reg(val_reg, i, info->address); + + /* Setup the control register. */ + ctrl = encode_ctrl_reg(info->ctrl); + write_wb_reg(ctrl_reg, i, + reg_enable ? ctrl | 0x1 : ctrl & ~0x1); + break; + case HW_BREAKPOINT_UNINSTALL: + /* Reset the control register. */ + write_wb_reg(ctrl_reg, i, 0); - /* Setup the control register. */ - ctrl = encode_ctrl_reg(info->ctrl); - write_wb_reg(ctrl_reg, i, reg_enable ? ctrl | 0x1 : ctrl & ~0x1); + /* + * Release the debug monitors for the correct exception + * level. + */ + disable_debug_monitors(dbg_el); + break; + } return 0; } -void arch_uninstall_hw_breakpoint(struct perf_event *bp) +/* + * Install a perf counter breakpoint. + */ +int arch_install_hw_breakpoint(struct perf_event *bp) { - struct arch_hw_breakpoint *info = counter_arch_bp(bp); - struct perf_event **slot, **slots; - int i, max_slots, base; - - if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) { - /* Breakpoint */ - base = AARCH64_DBG_REG_BCR; - slots = this_cpu_ptr(bp_on_reg); - max_slots = core_num_brps; - } else { - /* Watchpoint */ - base = AARCH64_DBG_REG_WCR; - slots = this_cpu_ptr(wp_on_reg); - max_slots = core_num_wrps; - } - - /* Remove the breakpoint. */ - for (i = 0; i < max_slots; ++i) { - slot = &slots[i]; - - if (*slot == bp) { - *slot = NULL; - break; - } - } - - if (WARN_ONCE(i == max_slots, "Can't find any breakpoint slot")) - return; - - /* Reset the control register. */ - write_wb_reg(base, i, 0); + return hw_breakpoint_control(bp, HW_BREAKPOINT_INSTALL); +} - /* Release the debug monitors for the correct exception level. */ - disable_debug_monitors(debug_exception_level(info->ctrl.privilege)); +void arch_uninstall_hw_breakpoint(struct perf_event *bp) +{ + hw_breakpoint_control(bp, HW_BREAKPOINT_UNINSTALL); } static int get_hbp_len(u8 hbp_len) @@ -806,18 +847,36 @@ void hw_breakpoint_thread_switch(struct task_struct *next) /* * CPU initialisation. */ -static void reset_ctrl_regs(void *unused) +static void hw_breakpoint_reset(void *unused) { int i; - - for (i = 0; i < core_num_brps; ++i) { - write_wb_reg(AARCH64_DBG_REG_BCR, i, 0UL); - write_wb_reg(AARCH64_DBG_REG_BVR, i, 0UL); + struct perf_event **slots; + /* + * When a CPU goes through cold-boot, it does not have any installed + * slot, so it is safe to share the same function for restoring and + * resetting breakpoints; when a CPU is hotplugged in, it goes + * through the slots, which are all empty, hence it just resets control + * and value for debug registers. + * When this function is triggered on warm-boot through a CPU PM + * notifier some slots might be initialized; if so they are + * reprogrammed according to the debug slots content. + */ + for (slots = this_cpu_ptr(bp_on_reg), i = 0; i < core_num_brps; ++i) { + if (slots[i]) { + hw_breakpoint_control(slots[i], HW_BREAKPOINT_RESTORE); + } else { + write_wb_reg(AARCH64_DBG_REG_BCR, i, 0UL); + write_wb_reg(AARCH64_DBG_REG_BVR, i, 0UL); + } } - for (i = 0; i < core_num_wrps; ++i) { - write_wb_reg(AARCH64_DBG_REG_WCR, i, 0UL); - write_wb_reg(AARCH64_DBG_REG_WVR, i, 0UL); + for (slots = this_cpu_ptr(wp_on_reg), i = 0; i < core_num_wrps; ++i) { + if (slots[i]) { + hw_breakpoint_control(slots[i], HW_BREAKPOINT_RESTORE); + } else { + write_wb_reg(AARCH64_DBG_REG_WCR, i, 0UL); + write_wb_reg(AARCH64_DBG_REG_WVR, i, 0UL); + } } } @@ -827,7 +886,7 @@ static int hw_breakpoint_reset_notify(struct notifier_block *self, { int cpu = (long)hcpu; if (action == CPU_ONLINE) - smp_call_function_single(cpu, reset_ctrl_regs, NULL, 1); + smp_call_function_single(cpu, hw_breakpoint_reset, NULL, 1); return NOTIFY_OK; } @@ -835,6 +894,14 @@ static struct notifier_block hw_breakpoint_reset_nb = { .notifier_call = hw_breakpoint_reset_notify, }; +#ifdef CONFIG_ARM64_CPU_SUSPEND +extern void cpu_suspend_set_dbg_restorer(void (*hw_bp_restore)(void *)); +#else +static inline void cpu_suspend_set_dbg_restorer(void (*hw_bp_restore)(void *)) +{ +} +#endif + /* * One-time initialisation. */ @@ -850,8 +917,8 @@ static int __init arch_hw_breakpoint_init(void) * Reset the breakpoint resources. We assume that a halting * debugger will leave the world in a nice state for us. */ - smp_call_function(reset_ctrl_regs, NULL, 1); - reset_ctrl_regs(NULL); + smp_call_function(hw_breakpoint_reset, NULL, 1); + hw_breakpoint_reset(NULL); /* Register debug fault handlers. */ hook_debug_fault_code(DBG_ESR_EVT_HWBP, breakpoint_handler, SIGTRAP, @@ -861,6 +928,8 @@ static int __init arch_hw_breakpoint_init(void) /* Register hotplug notifier. */ register_cpu_notifier(&hw_breakpoint_reset_nb); + /* Register cpu_suspend hw breakpoint restore hook */ + cpu_suspend_set_dbg_restorer(hw_breakpoint_reset); return 0; } diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c new file mode 100644 index 0000000..92f3683 --- /dev/null +++ b/arch/arm64/kernel/insn.c @@ -0,0 +1,304 @@ +/* + * Copyright (C) 2013 Huawei Ltd. + * Author: Jiang Liu <liuj97@gmail.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ +#include <linux/bitops.h> +#include <linux/compiler.h> +#include <linux/kernel.h> +#include <linux/smp.h> +#include <linux/stop_machine.h> +#include <linux/uaccess.h> +#include <asm/cacheflush.h> +#include <asm/insn.h> + +static int aarch64_insn_encoding_class[] = { + AARCH64_INSN_CLS_UNKNOWN, + AARCH64_INSN_CLS_UNKNOWN, + AARCH64_INSN_CLS_UNKNOWN, + AARCH64_INSN_CLS_UNKNOWN, + AARCH64_INSN_CLS_LDST, + AARCH64_INSN_CLS_DP_REG, + AARCH64_INSN_CLS_LDST, + AARCH64_INSN_CLS_DP_FPSIMD, + AARCH64_INSN_CLS_DP_IMM, + AARCH64_INSN_CLS_DP_IMM, + AARCH64_INSN_CLS_BR_SYS, + AARCH64_INSN_CLS_BR_SYS, + AARCH64_INSN_CLS_LDST, + AARCH64_INSN_CLS_DP_REG, + AARCH64_INSN_CLS_LDST, + AARCH64_INSN_CLS_DP_FPSIMD, +}; + +enum aarch64_insn_encoding_class __kprobes aarch64_get_insn_class(u32 insn) +{ + return aarch64_insn_encoding_class[(insn >> 25) & 0xf]; +} + +/* NOP is an alias of HINT */ +bool __kprobes aarch64_insn_is_nop(u32 insn) +{ + if (!aarch64_insn_is_hint(insn)) + return false; + + switch (insn & 0xFE0) { + case AARCH64_INSN_HINT_YIELD: + case AARCH64_INSN_HINT_WFE: + case AARCH64_INSN_HINT_WFI: + case AARCH64_INSN_HINT_SEV: + case AARCH64_INSN_HINT_SEVL: + return false; + default: + return true; + } +} + +/* + * In ARMv8-A, A64 instructions have a fixed length of 32 bits and are always + * little-endian. + */ +int __kprobes aarch64_insn_read(void *addr, u32 *insnp) +{ + int ret; + u32 val; + + ret = probe_kernel_read(&val, addr, AARCH64_INSN_SIZE); + if (!ret) + *insnp = le32_to_cpu(val); + + return ret; +} + +int __kprobes aarch64_insn_write(void *addr, u32 insn) +{ + insn = cpu_to_le32(insn); + return probe_kernel_write(addr, &insn, AARCH64_INSN_SIZE); +} + +static bool __kprobes __aarch64_insn_hotpatch_safe(u32 insn) +{ + if (aarch64_get_insn_class(insn) != AARCH64_INSN_CLS_BR_SYS) + return false; + + return aarch64_insn_is_b(insn) || + aarch64_insn_is_bl(insn) || + aarch64_insn_is_svc(insn) || + aarch64_insn_is_hvc(insn) || + aarch64_insn_is_smc(insn) || + aarch64_insn_is_brk(insn) || + aarch64_insn_is_nop(insn); +} + +/* + * ARM Architecture Reference Manual for ARMv8 Profile-A, Issue A.a + * Section B2.6.5 "Concurrent modification and execution of instructions": + * Concurrent modification and execution of instructions can lead to the + * resulting instruction performing any behavior that can be achieved by + * executing any sequence of instructions that can be executed from the + * same Exception level, except where the instruction before modification + * and the instruction after modification is a B, BL, NOP, BKPT, SVC, HVC, + * or SMC instruction. + */ +bool __kprobes aarch64_insn_hotpatch_safe(u32 old_insn, u32 new_insn) +{ + return __aarch64_insn_hotpatch_safe(old_insn) && + __aarch64_insn_hotpatch_safe(new_insn); +} + +int __kprobes aarch64_insn_patch_text_nosync(void *addr, u32 insn) +{ + u32 *tp = addr; + int ret; + + /* A64 instructions must be word aligned */ + if ((uintptr_t)tp & 0x3) + return -EINVAL; + + ret = aarch64_insn_write(tp, insn); + if (ret == 0) + flush_icache_range((uintptr_t)tp, + (uintptr_t)tp + AARCH64_INSN_SIZE); + + return ret; +} + +struct aarch64_insn_patch { + void **text_addrs; + u32 *new_insns; + int insn_cnt; + atomic_t cpu_count; +}; + +static int __kprobes aarch64_insn_patch_text_cb(void *arg) +{ + int i, ret = 0; + struct aarch64_insn_patch *pp = arg; + + /* The first CPU becomes master */ + if (atomic_inc_return(&pp->cpu_count) == 1) { + for (i = 0; ret == 0 && i < pp->insn_cnt; i++) + ret = aarch64_insn_patch_text_nosync(pp->text_addrs[i], + pp->new_insns[i]); + /* + * aarch64_insn_patch_text_nosync() calls flush_icache_range(), + * which ends with "dsb; isb" pair guaranteeing global + * visibility. + */ + atomic_set(&pp->cpu_count, -1); + } else { + while (atomic_read(&pp->cpu_count) != -1) + cpu_relax(); + isb(); + } + + return ret; +} + +int __kprobes aarch64_insn_patch_text_sync(void *addrs[], u32 insns[], int cnt) +{ + struct aarch64_insn_patch patch = { + .text_addrs = addrs, + .new_insns = insns, + .insn_cnt = cnt, + .cpu_count = ATOMIC_INIT(0), + }; + + if (cnt <= 0) + return -EINVAL; + + return stop_machine(aarch64_insn_patch_text_cb, &patch, + cpu_online_mask); +} + +int __kprobes aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt) +{ + int ret; + u32 insn; + + /* Unsafe to patch multiple instructions without synchronizaiton */ + if (cnt == 1) { + ret = aarch64_insn_read(addrs[0], &insn); + if (ret) + return ret; + + if (aarch64_insn_hotpatch_safe(insn, insns[0])) { + /* + * ARMv8 architecture doesn't guarantee all CPUs see + * the new instruction after returning from function + * aarch64_insn_patch_text_nosync(). So send IPIs to + * all other CPUs to achieve instruction + * synchronization. + */ + ret = aarch64_insn_patch_text_nosync(addrs[0], insns[0]); + kick_all_cpus_sync(); + return ret; + } + } + + return aarch64_insn_patch_text_sync(addrs, insns, cnt); +} + +u32 __kprobes aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type, + u32 insn, u64 imm) +{ + u32 immlo, immhi, lomask, himask, mask; + int shift; + + switch (type) { + case AARCH64_INSN_IMM_ADR: + lomask = 0x3; + himask = 0x7ffff; + immlo = imm & lomask; + imm >>= 2; + immhi = imm & himask; + imm = (immlo << 24) | (immhi); + mask = (lomask << 24) | (himask); + shift = 5; + break; + case AARCH64_INSN_IMM_26: + mask = BIT(26) - 1; + shift = 0; + break; + case AARCH64_INSN_IMM_19: + mask = BIT(19) - 1; + shift = 5; + break; + case AARCH64_INSN_IMM_16: + mask = BIT(16) - 1; + shift = 5; + break; + case AARCH64_INSN_IMM_14: + mask = BIT(14) - 1; + shift = 5; + break; + case AARCH64_INSN_IMM_12: + mask = BIT(12) - 1; + shift = 10; + break; + case AARCH64_INSN_IMM_9: + mask = BIT(9) - 1; + shift = 12; + break; + default: + pr_err("aarch64_insn_encode_immediate: unknown immediate encoding %d\n", + type); + return 0; + } + + /* Update the immediate field. */ + insn &= ~(mask << shift); + insn |= (imm & mask) << shift; + + return insn; +} + +u32 __kprobes aarch64_insn_gen_branch_imm(unsigned long pc, unsigned long addr, + enum aarch64_insn_branch_type type) +{ + u32 insn; + long offset; + + /* + * PC: A 64-bit Program Counter holding the address of the current + * instruction. A64 instructions must be word-aligned. + */ + BUG_ON((pc & 0x3) || (addr & 0x3)); + + /* + * B/BL support [-128M, 128M) offset + * ARM64 virtual address arrangement guarantees all kernel and module + * texts are within +/-128M. + */ + offset = ((long)addr - (long)pc); + BUG_ON(offset < -SZ_128M || offset >= SZ_128M); + + if (type == AARCH64_INSN_BRANCH_LINK) + insn = aarch64_insn_get_bl_value(); + else + insn = aarch64_insn_get_b_value(); + + return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_26, insn, + offset >> 2); +} + +u32 __kprobes aarch64_insn_gen_hint(enum aarch64_insn_hint_op op) +{ + return aarch64_insn_get_hint_value() | op; +} + +u32 __kprobes aarch64_insn_gen_nop(void) +{ + return aarch64_insn_gen_hint(AARCH64_INSN_HINT_NOP); +} diff --git a/arch/arm64/kernel/jump_label.c b/arch/arm64/kernel/jump_label.c new file mode 100644 index 0000000..263a166 --- /dev/null +++ b/arch/arm64/kernel/jump_label.c @@ -0,0 +1,58 @@ +/* + * Copyright (C) 2013 Huawei Ltd. + * Author: Jiang Liu <liuj97@gmail.com> + * + * Based on arch/arm/kernel/jump_label.c + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ +#include <linux/kernel.h> +#include <linux/jump_label.h> +#include <asm/insn.h> + +#ifdef HAVE_JUMP_LABEL + +static void __arch_jump_label_transform(struct jump_entry *entry, + enum jump_label_type type, + bool is_static) +{ + void *addr = (void *)entry->code; + u32 insn; + + if (type == JUMP_LABEL_ENABLE) { + insn = aarch64_insn_gen_branch_imm(entry->code, + entry->target, + AARCH64_INSN_BRANCH_NOLINK); + } else { + insn = aarch64_insn_gen_nop(); + } + + if (is_static) + aarch64_insn_patch_text_nosync(addr, insn); + else + aarch64_insn_patch_text(&addr, &insn, 1); +} + +void arch_jump_label_transform(struct jump_entry *entry, + enum jump_label_type type) +{ + __arch_jump_label_transform(entry, type, false); +} + +void arch_jump_label_transform_static(struct jump_entry *entry, + enum jump_label_type type) +{ + __arch_jump_label_transform(entry, type, true); +} + +#endif /* HAVE_JUMP_LABEL */ diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c index e2ad0d8..1eb1cc9 100644 --- a/arch/arm64/kernel/module.c +++ b/arch/arm64/kernel/module.c @@ -25,6 +25,10 @@ #include <linux/mm.h> #include <linux/moduleloader.h> #include <linux/vmalloc.h> +#include <asm/insn.h> + +#define AARCH64_INSN_IMM_MOVNZ AARCH64_INSN_IMM_MAX +#define AARCH64_INSN_IMM_MOVK AARCH64_INSN_IMM_16 void *module_alloc(unsigned long size) { @@ -94,28 +98,18 @@ static int reloc_data(enum aarch64_reloc_op op, void *place, u64 val, int len) return 0; } -enum aarch64_imm_type { - INSN_IMM_MOVNZ, - INSN_IMM_MOVK, - INSN_IMM_ADR, - INSN_IMM_26, - INSN_IMM_19, - INSN_IMM_16, - INSN_IMM_14, - INSN_IMM_12, - INSN_IMM_9, -}; - -static u32 encode_insn_immediate(enum aarch64_imm_type type, u32 insn, u64 imm) +static int reloc_insn_movw(enum aarch64_reloc_op op, void *place, u64 val, + int lsb, enum aarch64_insn_imm_type imm_type) { - u32 immlo, immhi, lomask, himask, mask; - int shift; + u64 imm, limit = 0; + s64 sval; + u32 insn = le32_to_cpu(*(u32 *)place); - /* The instruction stream is always little endian. */ - insn = le32_to_cpu(insn); + sval = do_reloc(op, place, val); + sval >>= lsb; + imm = sval & 0xffff; - switch (type) { - case INSN_IMM_MOVNZ: + if (imm_type == AARCH64_INSN_IMM_MOVNZ) { /* * For signed MOVW relocations, we have to manipulate the * instruction encoding depending on whether or not the @@ -134,70 +128,12 @@ static u32 encode_insn_immediate(enum aarch64_imm_type type, u32 insn, u64 imm) */ imm = ~imm; } - case INSN_IMM_MOVK: - mask = BIT(16) - 1; - shift = 5; - break; - case INSN_IMM_ADR: - lomask = 0x3; - himask = 0x7ffff; - immlo = imm & lomask; - imm >>= 2; - immhi = imm & himask; - imm = (immlo << 24) | (immhi); - mask = (lomask << 24) | (himask); - shift = 5; - break; - case INSN_IMM_26: - mask = BIT(26) - 1; - shift = 0; - break; - case INSN_IMM_19: - mask = BIT(19) - 1; - shift = 5; - break; - case INSN_IMM_16: - mask = BIT(16) - 1; - shift = 5; - break; - case INSN_IMM_14: - mask = BIT(14) - 1; - shift = 5; - break; - case INSN_IMM_12: - mask = BIT(12) - 1; - shift = 10; - break; - case INSN_IMM_9: - mask = BIT(9) - 1; - shift = 12; - break; - default: - pr_err("encode_insn_immediate: unknown immediate encoding %d\n", - type); - return 0; + imm_type = AARCH64_INSN_IMM_MOVK; } - /* Update the immediate field. */ - insn &= ~(mask << shift); - insn |= (imm & mask) << shift; - - return cpu_to_le32(insn); -} - -static int reloc_insn_movw(enum aarch64_reloc_op op, void *place, u64 val, - int lsb, enum aarch64_imm_type imm_type) -{ - u64 imm, limit = 0; - s64 sval; - u32 insn = *(u32 *)place; - - sval = do_reloc(op, place, val); - sval >>= lsb; - imm = sval & 0xffff; - /* Update the instruction with the new encoding. */ - *(u32 *)place = encode_insn_immediate(imm_type, insn, imm); + insn = aarch64_insn_encode_immediate(imm_type, insn, imm); + *(u32 *)place = cpu_to_le32(insn); /* Shift out the immediate field. */ sval >>= 16; @@ -206,9 +142,9 @@ static int reloc_insn_movw(enum aarch64_reloc_op op, void *place, u64 val, * For unsigned immediates, the overflow check is straightforward. * For signed immediates, the sign bit is actually the bit past the * most significant bit of the field. - * The INSN_IMM_16 immediate type is unsigned. + * The AARCH64_INSN_IMM_16 immediate type is unsigned. */ - if (imm_type != INSN_IMM_16) { + if (imm_type != AARCH64_INSN_IMM_16) { sval++; limit++; } @@ -221,11 +157,11 @@ static int reloc_insn_movw(enum aarch64_reloc_op op, void *place, u64 val, } static int reloc_insn_imm(enum aarch64_reloc_op op, void *place, u64 val, - int lsb, int len, enum aarch64_imm_type imm_type) + int lsb, int len, enum aarch64_insn_imm_type imm_type) { u64 imm, imm_mask; s64 sval; - u32 insn = *(u32 *)place; + u32 insn = le32_to_cpu(*(u32 *)place); /* Calculate the relocation value. */ sval = do_reloc(op, place, val); @@ -236,7 +172,8 @@ static int reloc_insn_imm(enum aarch64_reloc_op op, void *place, u64 val, imm = sval & imm_mask; /* Update the instruction's immediate field. */ - *(u32 *)place = encode_insn_immediate(imm_type, insn, imm); + insn = aarch64_insn_encode_immediate(imm_type, insn, imm); + *(u32 *)place = cpu_to_le32(insn); /* * Extract the upper value bits (including the sign bit) and @@ -318,125 +255,125 @@ int apply_relocate_add(Elf64_Shdr *sechdrs, overflow_check = false; case R_AARCH64_MOVW_UABS_G0: ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0, - INSN_IMM_16); + AARCH64_INSN_IMM_16); break; case R_AARCH64_MOVW_UABS_G1_NC: overflow_check = false; case R_AARCH64_MOVW_UABS_G1: ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16, - INSN_IMM_16); + AARCH64_INSN_IMM_16); break; case R_AARCH64_MOVW_UABS_G2_NC: overflow_check = false; case R_AARCH64_MOVW_UABS_G2: ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32, - INSN_IMM_16); + AARCH64_INSN_IMM_16); break; case R_AARCH64_MOVW_UABS_G3: /* We're using the top bits so we can't overflow. */ overflow_check = false; ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 48, - INSN_IMM_16); + AARCH64_INSN_IMM_16); break; case R_AARCH64_MOVW_SABS_G0: ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0, - INSN_IMM_MOVNZ); + AARCH64_INSN_IMM_MOVNZ); break; case R_AARCH64_MOVW_SABS_G1: ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16, - INSN_IMM_MOVNZ); + AARCH64_INSN_IMM_MOVNZ); break; case R_AARCH64_MOVW_SABS_G2: ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32, - INSN_IMM_MOVNZ); + AARCH64_INSN_IMM_MOVNZ); break; case R_AARCH64_MOVW_PREL_G0_NC: overflow_check = false; ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 0, - INSN_IMM_MOVK); + AARCH64_INSN_IMM_MOVK); break; case R_AARCH64_MOVW_PREL_G0: ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 0, - INSN_IMM_MOVNZ); + AARCH64_INSN_IMM_MOVNZ); break; case R_AARCH64_MOVW_PREL_G1_NC: overflow_check = false; ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 16, - INSN_IMM_MOVK); + AARCH64_INSN_IMM_MOVK); break; case R_AARCH64_MOVW_PREL_G1: ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 16, - INSN_IMM_MOVNZ); + AARCH64_INSN_IMM_MOVNZ); break; case R_AARCH64_MOVW_PREL_G2_NC: overflow_check = false; ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 32, - INSN_IMM_MOVK); + AARCH64_INSN_IMM_MOVK); break; case R_AARCH64_MOVW_PREL_G2: ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 32, - INSN_IMM_MOVNZ); + AARCH64_INSN_IMM_MOVNZ); break; case R_AARCH64_MOVW_PREL_G3: /* We're using the top bits so we can't overflow. */ overflow_check = false; ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 48, - INSN_IMM_MOVNZ); + AARCH64_INSN_IMM_MOVNZ); break; /* Immediate instruction relocations. */ case R_AARCH64_LD_PREL_LO19: ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 19, - INSN_IMM_19); + AARCH64_INSN_IMM_19); break; case R_AARCH64_ADR_PREL_LO21: ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 0, 21, - INSN_IMM_ADR); + AARCH64_INSN_IMM_ADR); break; case R_AARCH64_ADR_PREL_PG_HI21_NC: overflow_check = false; case R_AARCH64_ADR_PREL_PG_HI21: ovf = reloc_insn_imm(RELOC_OP_PAGE, loc, val, 12, 21, - INSN_IMM_ADR); + AARCH64_INSN_IMM_ADR); break; case R_AARCH64_ADD_ABS_LO12_NC: case R_AARCH64_LDST8_ABS_LO12_NC: overflow_check = false; ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 0, 12, - INSN_IMM_12); + AARCH64_INSN_IMM_12); break; case R_AARCH64_LDST16_ABS_LO12_NC: overflow_check = false; ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 1, 11, - INSN_IMM_12); + AARCH64_INSN_IMM_12); break; case R_AARCH64_LDST32_ABS_LO12_NC: overflow_check = false; ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 2, 10, - INSN_IMM_12); + AARCH64_INSN_IMM_12); break; case R_AARCH64_LDST64_ABS_LO12_NC: overflow_check = false; ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 3, 9, - INSN_IMM_12); + AARCH64_INSN_IMM_12); break; case R_AARCH64_LDST128_ABS_LO12_NC: overflow_check = false; ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 4, 8, - INSN_IMM_12); + AARCH64_INSN_IMM_12); break; case R_AARCH64_TSTBR14: ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 14, - INSN_IMM_14); + AARCH64_INSN_IMM_14); break; case R_AARCH64_CONDBR19: ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 19, - INSN_IMM_19); + AARCH64_INSN_IMM_19); break; case R_AARCH64_JUMP26: case R_AARCH64_CALL26: ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 26, - INSN_IMM_26); + AARCH64_INSN_IMM_26); break; default: diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c index 0e63c98..5b1cd79 100644 --- a/arch/arm64/kernel/perf_event.c +++ b/arch/arm64/kernel/perf_event.c @@ -22,6 +22,7 @@ #include <linux/bitmap.h> #include <linux/interrupt.h> +#include <linux/irq.h> #include <linux/kernel.h> #include <linux/export.h> #include <linux/perf_event.h> @@ -363,26 +364,53 @@ validate_group(struct perf_event *event) } static void +armpmu_disable_percpu_irq(void *data) +{ + unsigned int irq = *(unsigned int *)data; + disable_percpu_irq(irq); +} + +static void armpmu_release_hardware(struct arm_pmu *armpmu) { - int i, irq, irqs; + int irq; + unsigned int i, irqs; struct platform_device *pmu_device = armpmu->plat_device; irqs = min(pmu_device->num_resources, num_possible_cpus()); + if (!irqs) + return; - for (i = 0; i < irqs; ++i) { - if (!cpumask_test_and_clear_cpu(i, &armpmu->active_irqs)) - continue; - irq = platform_get_irq(pmu_device, i); - if (irq >= 0) - free_irq(irq, armpmu); + irq = platform_get_irq(pmu_device, 0); + if (irq <= 0) + return; + + if (irq_is_percpu(irq)) { + on_each_cpu(armpmu_disable_percpu_irq, &irq, 1); + free_percpu_irq(irq, &cpu_hw_events); + } else { + for (i = 0; i < irqs; ++i) { + if (!cpumask_test_and_clear_cpu(i, &armpmu->active_irqs)) + continue; + irq = platform_get_irq(pmu_device, i); + if (irq > 0) + free_irq(irq, armpmu); + } } } +static void +armpmu_enable_percpu_irq(void *data) +{ + unsigned int irq = *(unsigned int *)data; + enable_percpu_irq(irq, IRQ_TYPE_NONE); +} + static int armpmu_reserve_hardware(struct arm_pmu *armpmu) { - int i, err, irq, irqs; + int err, irq; + unsigned int i, irqs; struct platform_device *pmu_device = armpmu->plat_device; if (!pmu_device) { @@ -391,39 +419,59 @@ armpmu_reserve_hardware(struct arm_pmu *armpmu) } irqs = min(pmu_device->num_resources, num_possible_cpus()); - if (irqs < 1) { + if (!irqs) { pr_err("no irqs for PMUs defined\n"); return -ENODEV; } - for (i = 0; i < irqs; ++i) { - err = 0; - irq = platform_get_irq(pmu_device, i); - if (irq < 0) - continue; + irq = platform_get_irq(pmu_device, 0); + if (irq <= 0) { + pr_err("failed to get valid irq for PMU device\n"); + return -ENODEV; + } - /* - * If we have a single PMU interrupt that we can't shift, - * assume that we're running on a uniprocessor machine and - * continue. Otherwise, continue without this interrupt. - */ - if (irq_set_affinity(irq, cpumask_of(i)) && irqs > 1) { - pr_warning("unable to set irq affinity (irq=%d, cpu=%u)\n", - irq, i); - continue; - } + if (irq_is_percpu(irq)) { + err = request_percpu_irq(irq, armpmu->handle_irq, + "arm-pmu", &cpu_hw_events); - err = request_irq(irq, armpmu->handle_irq, - IRQF_NOBALANCING, - "arm-pmu", armpmu); if (err) { - pr_err("unable to request IRQ%d for ARM PMU counters\n", - irq); + pr_err("unable to request percpu IRQ%d for ARM PMU counters\n", + irq); armpmu_release_hardware(armpmu); return err; } - cpumask_set_cpu(i, &armpmu->active_irqs); + on_each_cpu(armpmu_enable_percpu_irq, &irq, 1); + } else { + for (i = 0; i < irqs; ++i) { + err = 0; + irq = platform_get_irq(pmu_device, i); + if (irq <= 0) + continue; + + /* + * If we have a single PMU interrupt that we can't shift, + * assume that we're running on a uniprocessor machine and + * continue. Otherwise, continue without this interrupt. + */ + if (irq_set_affinity(irq, cpumask_of(i)) && irqs > 1) { + pr_warning("unable to set irq affinity (irq=%d, cpu=%u)\n", + irq, i); + continue; + } + + err = request_irq(irq, armpmu->handle_irq, + IRQF_NOBALANCING, + "arm-pmu", armpmu); + if (err) { + pr_err("unable to request IRQ%d for ARM PMU counters\n", + irq); + armpmu_release_hardware(armpmu); + return err; + } + + cpumask_set_cpu(i, &armpmu->active_irqs); + } } return 0; diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index de17c89..248a15d 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -33,6 +33,7 @@ #include <linux/kallsyms.h> #include <linux/init.h> #include <linux/cpu.h> +#include <linux/cpuidle.h> #include <linux/elfcore.h> #include <linux/pm.h> #include <linux/tick.h> @@ -98,8 +99,10 @@ void arch_cpu_idle(void) * This should do all the clock switching and wait for interrupt * tricks */ - cpu_do_idle(); - local_irq_enable(); + if (cpuidle_idle_call()) { + cpu_do_idle(); + local_irq_enable(); + } } #ifdef CONFIG_HOTPLUG_CPU @@ -308,6 +311,7 @@ struct task_struct *__switch_to(struct task_struct *prev, unsigned long get_wchan(struct task_struct *p) { struct stackframe frame; + unsigned long stack_page; int count = 0; if (!p || p == current || p->state == TASK_RUNNING) return 0; @@ -315,9 +319,11 @@ unsigned long get_wchan(struct task_struct *p) frame.fp = thread_saved_fp(p); frame.sp = thread_saved_sp(p); frame.pc = thread_saved_pc(p); + stack_page = (unsigned long)task_stack_page(p); do { - int ret = unwind_frame(&frame); - if (ret < 0) + if (frame.sp < stack_page || + frame.sp >= stack_page + THREAD_SIZE || + unwind_frame(&frame)) return 0; if (!in_sched_functions(frame.pc)) return frame.pc; diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c index fecdbf7..6a8928b 100644 --- a/arch/arm64/kernel/ptrace.c +++ b/arch/arm64/kernel/ptrace.c @@ -214,31 +214,29 @@ static int ptrace_hbp_fill_attr_ctrl(unsigned int note_type, { int err, len, type, disabled = !ctrl.enabled; - if (disabled) { - len = 0; - type = HW_BREAKPOINT_EMPTY; - } else { - err = arch_bp_generic_fields(ctrl, &len, &type); - if (err) - return err; - - switch (note_type) { - case NT_ARM_HW_BREAK: - if ((type & HW_BREAKPOINT_X) != type) - return -EINVAL; - break; - case NT_ARM_HW_WATCH: - if ((type & HW_BREAKPOINT_RW) != type) - return -EINVAL; - break; - default: + attr->disabled = disabled; + if (disabled) + return 0; + + err = arch_bp_generic_fields(ctrl, &len, &type); + if (err) + return err; + + switch (note_type) { + case NT_ARM_HW_BREAK: + if ((type & HW_BREAKPOINT_X) != type) return -EINVAL; - } + break; + case NT_ARM_HW_WATCH: + if ((type & HW_BREAKPOINT_RW) != type) + return -EINVAL; + break; + default: + return -EINVAL; } attr->bp_len = len; attr->bp_type = type; - attr->disabled = disabled; return 0; } @@ -636,28 +634,27 @@ static int compat_gpr_get(struct task_struct *target, for (i = 0; i < num_regs; ++i) { unsigned int idx = start + i; - void *reg; + compat_ulong_t reg; switch (idx) { case 15: - reg = (void *)&task_pt_regs(target)->pc; + reg = task_pt_regs(target)->pc; break; case 16: - reg = (void *)&task_pt_regs(target)->pstate; + reg = task_pt_regs(target)->pstate; break; case 17: - reg = (void *)&task_pt_regs(target)->orig_x0; + reg = task_pt_regs(target)->orig_x0; break; default: - reg = (void *)&task_pt_regs(target)->regs[idx]; + reg = task_pt_regs(target)->regs[idx]; } - ret = copy_to_user(ubuf, reg, sizeof(compat_ulong_t)); - + ret = copy_to_user(ubuf, ®, sizeof(reg)); if (ret) break; - else - ubuf += sizeof(compat_ulong_t); + + ubuf += sizeof(reg); } return ret; @@ -685,28 +682,28 @@ static int compat_gpr_set(struct task_struct *target, for (i = 0; i < num_regs; ++i) { unsigned int idx = start + i; - void *reg; + compat_ulong_t reg; + + ret = copy_from_user(®, ubuf, sizeof(reg)); + if (ret) + return ret; + + ubuf += sizeof(reg); switch (idx) { case 15: - reg = (void *)&newregs.pc; + newregs.pc = reg; break; case 16: - reg = (void *)&newregs.pstate; + newregs.pstate = reg; break; case 17: - reg = (void *)&newregs.orig_x0; + newregs.orig_x0 = reg; break; default: - reg = (void *)&newregs.regs[idx]; + newregs.regs[idx] = reg; } - ret = copy_from_user(reg, ubuf, sizeof(compat_ulong_t)); - - if (ret) - goto out; - else - ubuf += sizeof(compat_ulong_t); } if (valid_user_regs(&newregs.user_regs)) @@ -714,7 +711,6 @@ static int compat_gpr_set(struct task_struct *target, else ret = -EINVAL; -out: return ret; } diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index 0bc5e4c..c8e9eff 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c @@ -108,20 +108,95 @@ void __init early_print(const char *str, ...) printk("%s", buf); } +void __init smp_setup_processor_id(void) +{ + /* + * clear __my_cpu_offset on boot CPU to avoid hang caused by + * using percpu variable early, for example, lockdep will + * access percpu variable inside lock_release + */ + set_my_cpu_offset(0); +} + bool arch_match_cpu_phys_id(int cpu, u64 phys_id) { return phys_id == cpu_logical_map(cpu); } +struct mpidr_hash mpidr_hash; +#ifdef CONFIG_SMP +/** + * smp_build_mpidr_hash - Pre-compute shifts required at each affinity + * level in order to build a linear index from an + * MPIDR value. Resulting algorithm is a collision + * free hash carried out through shifting and ORing + */ +static void __init smp_build_mpidr_hash(void) +{ + u32 i, affinity, fs[4], bits[4], ls; + u64 mask = 0; + /* + * Pre-scan the list of MPIDRS and filter out bits that do + * not contribute to affinity levels, ie they never toggle. + */ + for_each_possible_cpu(i) + mask |= (cpu_logical_map(i) ^ cpu_logical_map(0)); + pr_debug("mask of set bits %#llx\n", mask); + /* + * Find and stash the last and first bit set at all affinity levels to + * check how many bits are required to represent them. + */ + for (i = 0; i < 4; i++) { + affinity = MPIDR_AFFINITY_LEVEL(mask, i); + /* + * Find the MSB bit and LSB bits position + * to determine how many bits are required + * to express the affinity level. + */ + ls = fls(affinity); + fs[i] = affinity ? ffs(affinity) - 1 : 0; + bits[i] = ls - fs[i]; + } + /* + * An index can be created from the MPIDR_EL1 by isolating the + * significant bits at each affinity level and by shifting + * them in order to compress the 32 bits values space to a + * compressed set of values. This is equivalent to hashing + * the MPIDR_EL1 through shifting and ORing. It is a collision free + * hash though not minimal since some levels might contain a number + * of CPUs that is not an exact power of 2 and their bit + * representation might contain holes, eg MPIDR_EL1[7:0] = {0x2, 0x80}. + */ + mpidr_hash.shift_aff[0] = MPIDR_LEVEL_SHIFT(0) + fs[0]; + mpidr_hash.shift_aff[1] = MPIDR_LEVEL_SHIFT(1) + fs[1] - bits[0]; + mpidr_hash.shift_aff[2] = MPIDR_LEVEL_SHIFT(2) + fs[2] - + (bits[1] + bits[0]); + mpidr_hash.shift_aff[3] = MPIDR_LEVEL_SHIFT(3) + + fs[3] - (bits[2] + bits[1] + bits[0]); + mpidr_hash.mask = mask; + mpidr_hash.bits = bits[3] + bits[2] + bits[1] + bits[0]; + pr_debug("MPIDR hash: aff0[%u] aff1[%u] aff2[%u] aff3[%u] mask[%#llx] bits[%u]\n", + mpidr_hash.shift_aff[0], + mpidr_hash.shift_aff[1], + mpidr_hash.shift_aff[2], + mpidr_hash.shift_aff[3], + mpidr_hash.mask, + mpidr_hash.bits); + /* + * 4x is an arbitrary value used to warn on a hash table much bigger + * than expected on most systems. + */ + if (mpidr_hash_size() > 4 * num_possible_cpus()) + pr_warn("Large number of MPIDR hash buckets detected\n"); + __flush_dcache_area(&mpidr_hash, sizeof(struct mpidr_hash)); +} +#endif + static void __init setup_processor(void) { struct cpu_info *cpu_info; + u64 features, block; - /* - * locate processor in the list of supported processor - * types. The linker builds this table for us from the - * entries in arch/arm/mm/proc.S - */ cpu_info = lookup_processor_type(read_cpuid_id()); if (!cpu_info) { printk("CPU configuration botched (ID %08x), unable to continue.\n", @@ -136,6 +211,37 @@ static void __init setup_processor(void) sprintf(init_utsname()->machine, ELF_PLATFORM); elf_hwcap = 0; + + /* + * ID_AA64ISAR0_EL1 contains 4-bit wide signed feature blocks. + * The blocks we test below represent incremental functionality + * for non-negative values. Negative values are reserved. + */ + features = read_cpuid(ID_AA64ISAR0_EL1); + block = (features >> 4) & 0xf; + if (!(block & 0x8)) { + switch (block) { + default: + case 2: + elf_hwcap |= HWCAP_PMULL; + case 1: + elf_hwcap |= HWCAP_AES; + case 0: + break; + } + } + + block = (features >> 8) & 0xf; + if (block && !(block & 0x8)) + elf_hwcap |= HWCAP_SHA1; + + block = (features >> 12) & 0xf; + if (block && !(block & 0x8)) + elf_hwcap |= HWCAP_SHA2; + + block = (features >> 16) & 0xf; + if (block && !(block & 0x8)) + elf_hwcap |= HWCAP_CRC32; } static void __init setup_machine_fdt(phys_addr_t dt_phys) @@ -205,6 +311,11 @@ u64 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = INVALID_HWID }; void __init setup_arch(char **cmdline_p) { + /* + * Unmask asynchronous aborts early to catch possible system errors. + */ + local_async_enable(); + setup_processor(); setup_machine_fdt(__fdt_pointer); @@ -231,6 +342,7 @@ void __init setup_arch(char **cmdline_p) cpu_read_bootcpu_ops(); #ifdef CONFIG_SMP smp_init_cpus(); + smp_build_mpidr_hash(); #endif #ifdef CONFIG_VT @@ -270,6 +382,11 @@ static const char *hwcap_str[] = { "fp", "asimd", "evtstrm", + "aes", + "pmull", + "sha1", + "sha2", + "crc32", NULL }; diff --git a/arch/arm64/kernel/sleep.S b/arch/arm64/kernel/sleep.S new file mode 100644 index 0000000..b192572 --- /dev/null +++ b/arch/arm64/kernel/sleep.S @@ -0,0 +1,184 @@ +#include <linux/errno.h> +#include <linux/linkage.h> +#include <asm/asm-offsets.h> +#include <asm/assembler.h> + + .text +/* + * Implementation of MPIDR_EL1 hash algorithm through shifting + * and OR'ing. + * + * @dst: register containing hash result + * @rs0: register containing affinity level 0 bit shift + * @rs1: register containing affinity level 1 bit shift + * @rs2: register containing affinity level 2 bit shift + * @rs3: register containing affinity level 3 bit shift + * @mpidr: register containing MPIDR_EL1 value + * @mask: register containing MPIDR mask + * + * Pseudo C-code: + * + *u32 dst; + * + *compute_mpidr_hash(u32 rs0, u32 rs1, u32 rs2, u32 rs3, u64 mpidr, u64 mask) { + * u32 aff0, aff1, aff2, aff3; + * u64 mpidr_masked = mpidr & mask; + * aff0 = mpidr_masked & 0xff; + * aff1 = mpidr_masked & 0xff00; + * aff2 = mpidr_masked & 0xff0000; + * aff2 = mpidr_masked & 0xff00000000; + * dst = (aff0 >> rs0 | aff1 >> rs1 | aff2 >> rs2 | aff3 >> rs3); + *} + * Input registers: rs0, rs1, rs2, rs3, mpidr, mask + * Output register: dst + * Note: input and output registers must be disjoint register sets + (eg: a macro instance with mpidr = x1 and dst = x1 is invalid) + */ + .macro compute_mpidr_hash dst, rs0, rs1, rs2, rs3, mpidr, mask + and \mpidr, \mpidr, \mask // mask out MPIDR bits + and \dst, \mpidr, #0xff // mask=aff0 + lsr \dst ,\dst, \rs0 // dst=aff0>>rs0 + and \mask, \mpidr, #0xff00 // mask = aff1 + lsr \mask ,\mask, \rs1 + orr \dst, \dst, \mask // dst|=(aff1>>rs1) + and \mask, \mpidr, #0xff0000 // mask = aff2 + lsr \mask ,\mask, \rs2 + orr \dst, \dst, \mask // dst|=(aff2>>rs2) + and \mask, \mpidr, #0xff00000000 // mask = aff3 + lsr \mask ,\mask, \rs3 + orr \dst, \dst, \mask // dst|=(aff3>>rs3) + .endm +/* + * Save CPU state for a suspend. This saves callee registers, and allocates + * space on the kernel stack to save the CPU specific registers + some + * other data for resume. + * + * x0 = suspend finisher argument + */ +ENTRY(__cpu_suspend) + stp x29, lr, [sp, #-96]! + stp x19, x20, [sp,#16] + stp x21, x22, [sp,#32] + stp x23, x24, [sp,#48] + stp x25, x26, [sp,#64] + stp x27, x28, [sp,#80] + mov x2, sp + sub sp, sp, #CPU_SUSPEND_SZ // allocate cpu_suspend_ctx + mov x1, sp + /* + * x1 now points to struct cpu_suspend_ctx allocated on the stack + */ + str x2, [x1, #CPU_CTX_SP] + ldr x2, =sleep_save_sp + ldr x2, [x2, #SLEEP_SAVE_SP_VIRT] +#ifdef CONFIG_SMP + mrs x7, mpidr_el1 + ldr x9, =mpidr_hash + ldr x10, [x9, #MPIDR_HASH_MASK] + /* + * Following code relies on the struct mpidr_hash + * members size. + */ + ldp w3, w4, [x9, #MPIDR_HASH_SHIFTS] + ldp w5, w6, [x9, #(MPIDR_HASH_SHIFTS + 8)] + compute_mpidr_hash x8, x3, x4, x5, x6, x7, x10 + add x2, x2, x8, lsl #3 +#endif + bl __cpu_suspend_finisher + /* + * Never gets here, unless suspend fails. + * Successful cpu_suspend should return from cpu_resume, returning + * through this code path is considered an error + * If the return value is set to 0 force x0 = -EOPNOTSUPP + * to make sure a proper error condition is propagated + */ + cmp x0, #0 + mov x3, #-EOPNOTSUPP + csel x0, x3, x0, eq + add sp, sp, #CPU_SUSPEND_SZ // rewind stack pointer + ldp x19, x20, [sp, #16] + ldp x21, x22, [sp, #32] + ldp x23, x24, [sp, #48] + ldp x25, x26, [sp, #64] + ldp x27, x28, [sp, #80] + ldp x29, lr, [sp], #96 + ret +ENDPROC(__cpu_suspend) + .ltorg + +/* + * x0 must contain the sctlr value retrieved from restored context + */ +ENTRY(cpu_resume_mmu) + ldr x3, =cpu_resume_after_mmu + msr sctlr_el1, x0 // restore sctlr_el1 + isb + br x3 // global jump to virtual address +ENDPROC(cpu_resume_mmu) +cpu_resume_after_mmu: + mov x0, #0 // return zero on success + ldp x19, x20, [sp, #16] + ldp x21, x22, [sp, #32] + ldp x23, x24, [sp, #48] + ldp x25, x26, [sp, #64] + ldp x27, x28, [sp, #80] + ldp x29, lr, [sp], #96 + ret +ENDPROC(cpu_resume_after_mmu) + + .data +ENTRY(cpu_resume) + bl el2_setup // if in EL2 drop to EL1 cleanly +#ifdef CONFIG_SMP + mrs x1, mpidr_el1 + adr x4, mpidr_hash_ptr + ldr x5, [x4] + add x8, x4, x5 // x8 = struct mpidr_hash phys address + /* retrieve mpidr_hash members to compute the hash */ + ldr x2, [x8, #MPIDR_HASH_MASK] + ldp w3, w4, [x8, #MPIDR_HASH_SHIFTS] + ldp w5, w6, [x8, #(MPIDR_HASH_SHIFTS + 8)] + compute_mpidr_hash x7, x3, x4, x5, x6, x1, x2 + /* x7 contains hash index, let's use it to grab context pointer */ +#else + mov x7, xzr +#endif + adr x0, sleep_save_sp + ldr x0, [x0, #SLEEP_SAVE_SP_PHYS] + ldr x0, [x0, x7, lsl #3] + /* load sp from context */ + ldr x2, [x0, #CPU_CTX_SP] + adr x1, sleep_idmap_phys + /* load physical address of identity map page table in x1 */ + ldr x1, [x1] + mov sp, x2 + /* + * cpu_do_resume expects x0 to contain context physical address + * pointer and x1 to contain physical address of 1:1 page tables + */ + bl cpu_do_resume // PC relative jump, MMU off + b cpu_resume_mmu // Resume MMU, never returns +ENDPROC(cpu_resume) + + .align 3 +mpidr_hash_ptr: + /* + * offset of mpidr_hash symbol from current location + * used to obtain run-time mpidr_hash address with MMU off + */ + .quad mpidr_hash - . +/* + * physical address of identity mapped page tables + */ + .type sleep_idmap_phys, #object +ENTRY(sleep_idmap_phys) + .quad 0 +/* + * struct sleep_save_sp { + * phys_addr_t *save_ptr_stash; + * phys_addr_t save_ptr_stash_phys; + * }; + */ + .type sleep_save_sp, #object +ENTRY(sleep_save_sp) + .space SLEEP_SAVE_SP_SZ // struct sleep_save_sp diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index a5aeefa..1b7617a 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -61,6 +61,7 @@ enum ipi_msg_type { IPI_CALL_FUNC, IPI_CALL_FUNC_SINGLE, IPI_CPU_STOP, + IPI_TIMER, }; /* @@ -122,8 +123,6 @@ asmlinkage void secondary_start_kernel(void) struct mm_struct *mm = &init_mm; unsigned int cpu = smp_processor_id(); - printk("CPU%u: Booted secondary processor\n", cpu); - /* * All kernel threads share the same mm context; grab a * reference and switch to it. @@ -132,6 +131,9 @@ asmlinkage void secondary_start_kernel(void) current->active_mm = mm; cpumask_set_cpu(cpu, mm_cpumask(mm)); + set_my_cpu_offset(per_cpu_offset(smp_processor_id())); + printk("CPU%u: Booted secondary processor\n", cpu); + /* * TTBR0 is only used for the identity mapping at this stage. Make it * point to zero page to avoid speculatively fetching new entries. @@ -160,6 +162,7 @@ asmlinkage void secondary_start_kernel(void) local_irq_enable(); local_fiq_enable(); + local_async_enable(); /* * OK, it's off to the idle thread for us @@ -270,6 +273,7 @@ void __init smp_cpus_done(unsigned int max_cpus) void __init smp_prepare_boot_cpu(void) { + set_my_cpu_offset(per_cpu_offset(smp_processor_id())); } static void (*smp_cross_call)(const struct cpumask *, unsigned int); @@ -446,6 +450,7 @@ static const char *ipi_types[NR_IPI] = { S(IPI_CALL_FUNC, "Function call interrupts"), S(IPI_CALL_FUNC_SINGLE, "Single function call interrupts"), S(IPI_CPU_STOP, "CPU stop interrupts"), + S(IPI_TIMER, "Timer broadcast interrupts"), }; void show_ipi_list(struct seq_file *p, int prec) @@ -531,6 +536,14 @@ void handle_IPI(int ipinr, struct pt_regs *regs) irq_exit(); break; +#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST + case IPI_TIMER: + irq_enter(); + tick_receive_broadcast(); + irq_exit(); + break; +#endif + default: pr_crit("CPU%u: Unknown IPI message 0x%x\n", cpu, ipinr); break; @@ -543,6 +556,13 @@ void smp_send_reschedule(int cpu) smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE); } +#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST +void tick_broadcast(const struct cpumask *mask) +{ + smp_cross_call(mask, IPI_TIMER); +} +#endif + void smp_send_stop(void) { unsigned long timeout; diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c index d25459f..c3b6c63 100644 --- a/arch/arm64/kernel/stacktrace.c +++ b/arch/arm64/kernel/stacktrace.c @@ -43,7 +43,7 @@ int unwind_frame(struct stackframe *frame) low = frame->sp; high = ALIGN(low, THREAD_SIZE); - if (fp < low || fp > high || fp & 0xf) + if (fp < low || fp > high - 0x18 || fp & 0xf) return -EINVAL; frame->sp = fp + 0x10; diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c new file mode 100644 index 0000000..430344e --- /dev/null +++ b/arch/arm64/kernel/suspend.c @@ -0,0 +1,132 @@ +#include <linux/slab.h> +#include <asm/cacheflush.h> +#include <asm/cpu_ops.h> +#include <asm/debug-monitors.h> +#include <asm/pgtable.h> +#include <asm/memory.h> +#include <asm/smp_plat.h> +#include <asm/suspend.h> +#include <asm/tlbflush.h> + +extern int __cpu_suspend(unsigned long); +/* + * This is called by __cpu_suspend() to save the state, and do whatever + * flushing is required to ensure that when the CPU goes to sleep we have + * the necessary data available when the caches are not searched. + * + * @arg: Argument to pass to suspend operations + * @ptr: CPU context virtual address + * @save_ptr: address of the location where the context physical address + * must be saved + */ +int __cpu_suspend_finisher(unsigned long arg, struct cpu_suspend_ctx *ptr, + phys_addr_t *save_ptr) +{ + int cpu = smp_processor_id(); + + *save_ptr = virt_to_phys(ptr); + + cpu_do_suspend(ptr); + /* + * Only flush the context that must be retrieved with the MMU + * off. VA primitives ensure the flush is applied to all + * cache levels so context is pushed to DRAM. + */ + __flush_dcache_area(ptr, sizeof(*ptr)); + __flush_dcache_area(save_ptr, sizeof(*save_ptr)); + + return cpu_ops[cpu]->cpu_suspend(arg); +} + +/* + * This hook is provided so that cpu_suspend code can restore HW + * breakpoints as early as possible in the resume path, before reenabling + * debug exceptions. Code cannot be run from a CPU PM notifier since by the + * time the notifier runs debug exceptions might have been enabled already, + * with HW breakpoints registers content still in an unknown state. + */ +void (*hw_breakpoint_restore)(void *); +void __init cpu_suspend_set_dbg_restorer(void (*hw_bp_restore)(void *)) +{ + /* Prevent multiple restore hook initializations */ + if (WARN_ON(hw_breakpoint_restore)) + return; + hw_breakpoint_restore = hw_bp_restore; +} + +/** + * cpu_suspend + * + * @arg: argument to pass to the finisher function + */ +int cpu_suspend(unsigned long arg) +{ + struct mm_struct *mm = current->active_mm; + int ret, cpu = smp_processor_id(); + unsigned long flags; + + /* + * If cpu_ops have not been registered or suspend + * has not been initialized, cpu_suspend call fails early. + */ + if (!cpu_ops[cpu] || !cpu_ops[cpu]->cpu_suspend) + return -EOPNOTSUPP; + + /* + * From this point debug exceptions are disabled to prevent + * updates to mdscr register (saved and restored along with + * general purpose registers) from kernel debuggers. + */ + local_dbg_save(flags); + + /* + * mm context saved on the stack, it will be restored when + * the cpu comes out of reset through the identity mapped + * page tables, so that the thread address space is properly + * set-up on function return. + */ + ret = __cpu_suspend(arg); + if (ret == 0) { + cpu_switch_mm(mm->pgd, mm); + flush_tlb_all(); + /* + * Restore HW breakpoint registers to sane values + * before debug exceptions are possibly reenabled + * through local_dbg_restore. + */ + if (hw_breakpoint_restore) + hw_breakpoint_restore(NULL); + } + + /* + * Restore pstate flags. OS lock and mdscr have been already + * restored, so from this point onwards, debugging is fully + * renabled if it was enabled when core started shutdown. + */ + local_dbg_restore(flags); + + return ret; +} + +extern struct sleep_save_sp sleep_save_sp; +extern phys_addr_t sleep_idmap_phys; + +static int cpu_suspend_init(void) +{ + void *ctx_ptr; + + /* ctx_ptr is an array of physical addresses */ + ctx_ptr = kcalloc(mpidr_hash_size(), sizeof(phys_addr_t), GFP_KERNEL); + + if (WARN_ON(!ctx_ptr)) + return -ENOMEM; + + sleep_save_sp.save_ptr_stash = ctx_ptr; + sleep_save_sp.save_ptr_stash_phys = virt_to_phys(ctx_ptr); + sleep_idmap_phys = virt_to_phys(idmap_pg_dir); + __flush_dcache_area(&sleep_save_sp, sizeof(struct sleep_save_sp)); + __flush_dcache_area(&sleep_idmap_phys, sizeof(sleep_idmap_phys)); + + return 0; +} +early_initcall(cpu_suspend_init); diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S index 5161ad9..4ba7a55 100644 --- a/arch/arm64/kernel/vmlinux.lds.S +++ b/arch/arm64/kernel/vmlinux.lds.S @@ -99,17 +99,14 @@ SECTIONS . = ALIGN(PAGE_SIZE); _data = .; - __data_loc = _data - LOAD_OFFSET; _sdata = .; RW_DATA_SECTION(64, PAGE_SIZE, THREAD_SIZE) _edata = .; - _edata_loc = __data_loc + SIZEOF(.data); BSS_SECTION(0, 0, 0) _end = .; STABS_DEBUG - .comment 0 : { *(.comment) } } /* diff --git a/arch/arm64/lib/Makefile b/arch/arm64/lib/Makefile index 59acc0e..328ce1a9 100644 --- a/arch/arm64/lib/Makefile +++ b/arch/arm64/lib/Makefile @@ -1,6 +1,4 @@ -lib-y := bitops.o delay.o \ - strncpy_from_user.o strnlen_user.o clear_user.o \ - copy_from_user.o copy_to_user.o copy_in_user.o \ - copy_page.o clear_page.o \ - memchr.o memcpy.o memmove.o memset.o \ +lib-y := bitops.o clear_user.o delay.o copy_from_user.o \ + copy_to_user.o copy_in_user.o copy_page.o \ + clear_page.o memchr.o memcpy.o memmove.o memset.o \ strchr.o strrchr.o diff --git a/arch/arm64/lib/strncpy_from_user.S b/arch/arm64/lib/strncpy_from_user.S deleted file mode 100644 index 56e448a..0000000 --- a/arch/arm64/lib/strncpy_from_user.S +++ /dev/null @@ -1,50 +0,0 @@ -/* - * Based on arch/arm/lib/strncpy_from_user.S - * - * Copyright (C) 1995-2000 Russell King - * Copyright (C) 2012 ARM Ltd. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see <http://www.gnu.org/licenses/>. - */ - -#include <linux/linkage.h> -#include <asm/assembler.h> -#include <asm/errno.h> - - .text - .align 5 - -/* - * Copy a string from user space to kernel space. - * x0 = dst, x1 = src, x2 = byte length - * returns the number of characters copied (strlen of copied string), - * -EFAULT on exception, or "len" if we fill the whole buffer - */ -ENTRY(__strncpy_from_user) - mov x4, x1 -1: subs x2, x2, #1 - bmi 2f -USER(9f, ldrb w3, [x1], #1 ) - strb w3, [x0], #1 - cbnz w3, 1b - sub x1, x1, #1 // take NUL character out of count -2: sub x0, x1, x4 - ret -ENDPROC(__strncpy_from_user) - - .section .fixup,"ax" - .align 0 -9: strb wzr, [x0] // null terminate - mov x0, #-EFAULT - ret - .previous diff --git a/arch/arm64/lib/strnlen_user.S b/arch/arm64/lib/strnlen_user.S deleted file mode 100644 index 7f7b176..0000000 --- a/arch/arm64/lib/strnlen_user.S +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Based on arch/arm/lib/strnlen_user.S - * - * Copyright (C) 1995-2000 Russell King - * Copyright (C) 2012 ARM Ltd. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see <http://www.gnu.org/licenses/>. - */ - -#include <linux/linkage.h> -#include <asm/assembler.h> -#include <asm/errno.h> - - .text - .align 5 - -/* Prototype: unsigned long __strnlen_user(const char *str, long n) - * Purpose : get length of a string in user memory - * Params : str - address of string in user memory - * Returns : length of string *including terminator* - * or zero on exception, or n if too long - */ -ENTRY(__strnlen_user) - mov x2, x0 -1: subs x1, x1, #1 - b.mi 2f -USER(9f, ldrb w3, [x0], #1 ) - cbnz w3, 1b -2: sub x0, x0, x2 - ret -ENDPROC(__strnlen_user) - - .section .fixup,"ax" - .align 0 -9: mov x0, #0 - ret - .previous diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c index 4bd7579..45b5ab5 100644 --- a/arch/arm64/mm/dma-mapping.c +++ b/arch/arm64/mm/dma-mapping.c @@ -21,6 +21,7 @@ #include <linux/export.h> #include <linux/slab.h> #include <linux/dma-mapping.h> +#include <linux/dma-contiguous.h> #include <linux/vmalloc.h> #include <linux/swiotlb.h> @@ -33,17 +34,47 @@ static void *arm64_swiotlb_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flags, struct dma_attrs *attrs) { + if (dev == NULL) { + WARN_ONCE(1, "Use an actual device structure for DMA allocation\n"); + return NULL; + } + if (IS_ENABLED(CONFIG_ZONE_DMA32) && dev->coherent_dma_mask <= DMA_BIT_MASK(32)) flags |= GFP_DMA32; - return swiotlb_alloc_coherent(dev, size, dma_handle, flags); + if (IS_ENABLED(CONFIG_DMA_CMA)) { + struct page *page; + + page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT, + get_order(size)); + if (!page) + return NULL; + + *dma_handle = phys_to_dma(dev, page_to_phys(page)); + return page_address(page); + } else { + return swiotlb_alloc_coherent(dev, size, dma_handle, flags); + } } static void arm64_swiotlb_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle, struct dma_attrs *attrs) { - swiotlb_free_coherent(dev, size, vaddr, dma_handle); + if (dev == NULL) { + WARN_ONCE(1, "Use an actual device structure for DMA allocation\n"); + return; + } + + if (IS_ENABLED(CONFIG_DMA_CMA)) { + phys_addr_t paddr = dma_to_phys(dev, dma_handle); + + dma_release_from_contiguous(dev, + phys_to_page(paddr), + size >> PAGE_SHIFT); + } else { + swiotlb_free_coherent(dev, size, vaddr, dma_handle); + } } static struct dma_map_ops arm64_swiotlb_dma_ops = { diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index 0cb8742..d0b4c2e 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -30,6 +30,7 @@ #include <linux/memblock.h> #include <linux/sort.h> #include <linux/of_fdt.h> +#include <linux/dma-contiguous.h> #include <asm/sections.h> #include <asm/setup.h> @@ -159,6 +160,8 @@ void __init arm64_memblock_init(void) memblock_reserve(base, size); } + dma_contiguous_reserve(0); + memblock_allow_resize(); memblock_dump_all(); } diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index 421b99f..bed1f1d 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S @@ -80,6 +80,75 @@ ENTRY(cpu_do_idle) ret ENDPROC(cpu_do_idle) +#ifdef CONFIG_ARM64_CPU_SUSPEND +/** + * cpu_do_suspend - save CPU registers context + * + * x0: virtual address of context pointer + */ +ENTRY(cpu_do_suspend) + mrs x2, tpidr_el0 + mrs x3, tpidrro_el0 + mrs x4, contextidr_el1 + mrs x5, mair_el1 + mrs x6, cpacr_el1 + mrs x7, ttbr1_el1 + mrs x8, tcr_el1 + mrs x9, vbar_el1 + mrs x10, mdscr_el1 + mrs x11, oslsr_el1 + mrs x12, sctlr_el1 + stp x2, x3, [x0] + stp x4, x5, [x0, #16] + stp x6, x7, [x0, #32] + stp x8, x9, [x0, #48] + stp x10, x11, [x0, #64] + str x12, [x0, #80] + ret +ENDPROC(cpu_do_suspend) + +/** + * cpu_do_resume - restore CPU register context + * + * x0: Physical address of context pointer + * x1: ttbr0_el1 to be restored + * + * Returns: + * sctlr_el1 value in x0 + */ +ENTRY(cpu_do_resume) + /* + * Invalidate local tlb entries before turning on MMU + */ + tlbi vmalle1 + ldp x2, x3, [x0] + ldp x4, x5, [x0, #16] + ldp x6, x7, [x0, #32] + ldp x8, x9, [x0, #48] + ldp x10, x11, [x0, #64] + ldr x12, [x0, #80] + msr tpidr_el0, x2 + msr tpidrro_el0, x3 + msr contextidr_el1, x4 + msr mair_el1, x5 + msr cpacr_el1, x6 + msr ttbr0_el1, x1 + msr ttbr1_el1, x7 + msr tcr_el1, x8 + msr vbar_el1, x9 + msr mdscr_el1, x10 + /* + * Restore oslsr_el1 by writing oslar_el1 + */ + ubfx x11, x11, #1, #1 + msr oslar_el1, x11 + mov x0, x12 + dsb nsh // Make sure local tlb invalidation completed + isb + ret +ENDPROC(cpu_do_resume) +#endif + /* * cpu_switch_mm(pgd_phys, tsk) * @@ -111,12 +180,12 @@ ENTRY(__cpu_setup) bl __flush_dcache_all mov lr, x28 ic iallu // I+BTB cache invalidate + tlbi vmalle1is // invalidate I + D TLBs dsb sy mov x0, #3 << 20 msr cpacr_el1, x0 // Enable FP/ASIMD msr mdscr_el1, xzr // Reset mdscr_el1 - tlbi vmalle1is // invalidate I + D TLBs /* * Memory region attributes for LPAE: * diff --git a/arch/avr32/boards/favr-32/setup.c b/arch/avr32/boards/favr-32/setup.c index 7b1f2cd..1f12149 100644 --- a/arch/avr32/boards/favr-32/setup.c +++ b/arch/avr32/boards/favr-32/setup.c @@ -298,8 +298,10 @@ static int __init set_abdac_rate(struct platform_device *pdev) */ retval = clk_round_rate(pll1, CONFIG_BOARD_FAVR32_ABDAC_RATE * 256 * 16); - if (retval < 0) + if (retval <= 0) { + retval = -EINVAL; goto out_abdac; + } retval = clk_set_rate(pll1, retval); if (retval != 0) diff --git a/arch/avr32/configs/atngw100_defconfig b/arch/avr32/configs/atngw100_defconfig index d5aff36..4733e38 100644 --- a/arch/avr32/configs/atngw100_defconfig +++ b/arch/avr32/configs/atngw100_defconfig @@ -59,7 +59,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" # CONFIG_PREVENT_FIRMWARE_BUILD is not set # CONFIG_FW_LOADER is not set CONFIG_MTD=y -CONFIG_MTD_PARTITIONS=y CONFIG_MTD_CMDLINE_PARTS=y CONFIG_MTD_CHAR=y CONFIG_MTD_BLOCK=y diff --git a/arch/avr32/configs/atngw100_evklcd100_defconfig b/arch/avr32/configs/atngw100_evklcd100_defconfig index 4abcf43..1be0ee3 100644 --- a/arch/avr32/configs/atngw100_evklcd100_defconfig +++ b/arch/avr32/configs/atngw100_evklcd100_defconfig @@ -61,7 +61,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" # CONFIG_PREVENT_FIRMWARE_BUILD is not set # CONFIG_FW_LOADER is not set CONFIG_MTD=y -CONFIG_MTD_PARTITIONS=y CONFIG_MTD_CMDLINE_PARTS=y CONFIG_MTD_CHAR=y CONFIG_MTD_BLOCK=y diff --git a/arch/avr32/configs/atngw100_evklcd101_defconfig b/arch/avr32/configs/atngw100_evklcd101_defconfig index 18f3fa0..796e536 100644 --- a/arch/avr32/configs/atngw100_evklcd101_defconfig +++ b/arch/avr32/configs/atngw100_evklcd101_defconfig @@ -60,7 +60,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" # CONFIG_PREVENT_FIRMWARE_BUILD is not set # CONFIG_FW_LOADER is not set CONFIG_MTD=y -CONFIG_MTD_PARTITIONS=y CONFIG_MTD_CMDLINE_PARTS=y CONFIG_MTD_CHAR=y CONFIG_MTD_BLOCK=y diff --git a/arch/avr32/configs/atngw100_mrmt_defconfig b/arch/avr32/configs/atngw100_mrmt_defconfig index 06e389c..9a57da4 100644 --- a/arch/avr32/configs/atngw100_mrmt_defconfig +++ b/arch/avr32/configs/atngw100_mrmt_defconfig @@ -48,7 +48,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" # CONFIG_PREVENT_FIRMWARE_BUILD is not set # CONFIG_FW_LOADER is not set CONFIG_MTD=y -CONFIG_MTD_PARTITIONS=y CONFIG_MTD_CMDLINE_PARTS=y CONFIG_MTD_CHAR=y CONFIG_MTD_BLOCK=y diff --git a/arch/avr32/configs/atngw100mkii_defconfig b/arch/avr32/configs/atngw100mkii_defconfig index 2518a13..97fe1b3 100644 --- a/arch/avr32/configs/atngw100mkii_defconfig +++ b/arch/avr32/configs/atngw100mkii_defconfig @@ -59,7 +59,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" # CONFIG_PREVENT_FIRMWARE_BUILD is not set # CONFIG_FW_LOADER is not set CONFIG_MTD=y -CONFIG_MTD_PARTITIONS=y CONFIG_MTD_CMDLINE_PARTS=y CONFIG_MTD_CHAR=y CONFIG_MTD_BLOCK=y diff --git a/arch/avr32/configs/atngw100mkii_evklcd100_defconfig b/arch/avr32/configs/atngw100mkii_evklcd100_defconfig index 245ef6b..a176d24 100644 --- a/arch/avr32/configs/atngw100mkii_evklcd100_defconfig +++ b/arch/avr32/configs/atngw100mkii_evklcd100_defconfig @@ -62,7 +62,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" # CONFIG_PREVENT_FIRMWARE_BUILD is not set # CONFIG_FW_LOADER is not set CONFIG_MTD=y -CONFIG_MTD_PARTITIONS=y CONFIG_MTD_CMDLINE_PARTS=y CONFIG_MTD_CHAR=y CONFIG_MTD_BLOCK=y diff --git a/arch/avr32/configs/atngw100mkii_evklcd101_defconfig b/arch/avr32/configs/atngw100mkii_evklcd101_defconfig index fa6cbac..d1bf6dcf 100644 --- a/arch/avr32/configs/atngw100mkii_evklcd101_defconfig +++ b/arch/avr32/configs/atngw100mkii_evklcd101_defconfig @@ -61,7 +61,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" # CONFIG_PREVENT_FIRMWARE_BUILD is not set # CONFIG_FW_LOADER is not set CONFIG_MTD=y -CONFIG_MTD_PARTITIONS=y CONFIG_MTD_CMDLINE_PARTS=y CONFIG_MTD_CHAR=y CONFIG_MTD_BLOCK=y diff --git a/arch/avr32/configs/atstk1002_defconfig b/arch/avr32/configs/atstk1002_defconfig index bbd5131..2813dd2 100644 --- a/arch/avr32/configs/atstk1002_defconfig +++ b/arch/avr32/configs/atstk1002_defconfig @@ -53,7 +53,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" # CONFIG_PREVENT_FIRMWARE_BUILD is not set # CONFIG_FW_LOADER is not set CONFIG_MTD=y -CONFIG_MTD_PARTITIONS=y CONFIG_MTD_CMDLINE_PARTS=y CONFIG_MTD_CHAR=y CONFIG_MTD_BLOCK=y diff --git a/arch/avr32/configs/atstk1003_defconfig b/arch/avr32/configs/atstk1003_defconfig index c1cd726..f8ff3a3 100644 --- a/arch/avr32/configs/atstk1003_defconfig +++ b/arch/avr32/configs/atstk1003_defconfig @@ -42,7 +42,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" # CONFIG_PREVENT_FIRMWARE_BUILD is not set # CONFIG_FW_LOADER is not set CONFIG_MTD=y -CONFIG_MTD_PARTITIONS=y CONFIG_MTD_CMDLINE_PARTS=y CONFIG_MTD_CHAR=y CONFIG_MTD_BLOCK=y diff --git a/arch/avr32/configs/atstk1004_defconfig b/arch/avr32/configs/atstk1004_defconfig index 754ae56..992228e 100644 --- a/arch/avr32/configs/atstk1004_defconfig +++ b/arch/avr32/configs/atstk1004_defconfig @@ -42,7 +42,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" # CONFIG_PREVENT_FIRMWARE_BUILD is not set # CONFIG_FW_LOADER is not set CONFIG_MTD=y -CONFIG_MTD_PARTITIONS=y CONFIG_MTD_CMDLINE_PARTS=y CONFIG_MTD_CHAR=y CONFIG_MTD_BLOCK=y diff --git a/arch/avr32/configs/atstk1006_defconfig b/arch/avr32/configs/atstk1006_defconfig index 58589d8..b8e698b 100644 --- a/arch/avr32/configs/atstk1006_defconfig +++ b/arch/avr32/configs/atstk1006_defconfig @@ -54,7 +54,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" # CONFIG_PREVENT_FIRMWARE_BUILD is not set # CONFIG_FW_LOADER is not set CONFIG_MTD=y -CONFIG_MTD_PARTITIONS=y CONFIG_MTD_CMDLINE_PARTS=y CONFIG_MTD_CHAR=y CONFIG_MTD_BLOCK=y diff --git a/arch/avr32/configs/favr-32_defconfig b/arch/avr32/configs/favr-32_defconfig index c90fbf6..07bed3f 100644 --- a/arch/avr32/configs/favr-32_defconfig +++ b/arch/avr32/configs/favr-32_defconfig @@ -58,7 +58,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" # CONFIG_PREVENT_FIRMWARE_BUILD is not set # CONFIG_FW_LOADER is not set CONFIG_MTD=y -CONFIG_MTD_PARTITIONS=y CONFIG_MTD_CMDLINE_PARTS=y CONFIG_MTD_CHAR=y CONFIG_MTD_BLOCK=y diff --git a/arch/avr32/configs/hammerhead_defconfig b/arch/avr32/configs/hammerhead_defconfig index ba7c31e..18db853 100644 --- a/arch/avr32/configs/hammerhead_defconfig +++ b/arch/avr32/configs/hammerhead_defconfig @@ -58,7 +58,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" # CONFIG_PREVENT_FIRMWARE_BUILD is not set # CONFIG_FW_LOADER is not set CONFIG_MTD=y -CONFIG_MTD_PARTITIONS=y CONFIG_MTD_CMDLINE_PARTS=y CONFIG_MTD_CHAR=y CONFIG_MTD_BLOCK=y diff --git a/arch/avr32/configs/merisc_defconfig b/arch/avr32/configs/merisc_defconfig index 65de443..91df6b2 100644 --- a/arch/avr32/configs/merisc_defconfig +++ b/arch/avr32/configs/merisc_defconfig @@ -46,7 +46,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" # CONFIG_FW_LOADER is not set CONFIG_MTD=y CONFIG_MTD_CONCAT=y -CONFIG_MTD_PARTITIONS=y CONFIG_MTD_CHAR=y CONFIG_MTD_BLOCK=y CONFIG_MTD_CFI=y diff --git a/arch/avr32/configs/mimc200_defconfig b/arch/avr32/configs/mimc200_defconfig index 0a8bfdc..d630e08 100644 --- a/arch/avr32/configs/mimc200_defconfig +++ b/arch/avr32/configs/mimc200_defconfig @@ -49,7 +49,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" # CONFIG_PREVENT_FIRMWARE_BUILD is not set # CONFIG_FW_LOADER is not set CONFIG_MTD=y -CONFIG_MTD_PARTITIONS=y CONFIG_MTD_CMDLINE_PARTS=y CONFIG_MTD_CHAR=y CONFIG_MTD_BLOCK=y diff --git a/arch/avr32/include/asm/barrier.h b/arch/avr32/include/asm/barrier.h index 0961275..7151007 100644 --- a/arch/avr32/include/asm/barrier.h +++ b/arch/avr32/include/asm/barrier.h @@ -8,22 +8,15 @@ #ifndef __ASM_AVR32_BARRIER_H #define __ASM_AVR32_BARRIER_H -#define nop() asm volatile("nop") - -#define mb() asm volatile("" : : : "memory") -#define rmb() mb() -#define wmb() asm volatile("sync 0" : : : "memory") -#define read_barrier_depends() do { } while(0) -#define set_mb(var, value) do { var = value; mb(); } while(0) +/* + * Weirdest thing ever.. no full barrier, but it has a write barrier! + */ +#define wmb() asm volatile("sync 0" : : : "memory") #ifdef CONFIG_SMP # error "The AVR32 port does not support SMP" -#else -# define smp_mb() barrier() -# define smp_rmb() barrier() -# define smp_wmb() barrier() -# define smp_read_barrier_depends() do { } while(0) #endif +#include <asm-generic/barrier.h> #endif /* __ASM_AVR32_BARRIER_H */ diff --git a/arch/avr32/kernel/time.c b/arch/avr32/kernel/time.c index 12f828a..d0f771b 100644 --- a/arch/avr32/kernel/time.c +++ b/arch/avr32/kernel/time.c @@ -59,7 +59,7 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id) static struct irqaction timer_irqaction = { .handler = timer_interrupt, /* Oprofile uses the same irq as the timer, so allow it to be shared */ - .flags = IRQF_TIMER | IRQF_DISABLED | IRQF_SHARED, + .flags = IRQF_TIMER | IRQF_SHARED, .name = "avr32_comparator", }; diff --git a/arch/avr32/mach-at32ap/pm.c b/arch/avr32/mach-at32ap/pm.c index 32d680e..db19084 100644 --- a/arch/avr32/mach-at32ap/pm.c +++ b/arch/avr32/mach-at32ap/pm.c @@ -181,7 +181,7 @@ static const struct platform_suspend_ops avr32_pm_ops = { .enter = avr32_pm_enter, }; -static unsigned long avr32_pm_offset(void *symbol) +static unsigned long __init avr32_pm_offset(void *symbol) { extern u8 pm_exception[]; diff --git a/arch/blackfin/include/asm/barrier.h b/arch/blackfin/include/asm/barrier.h index ebb1895..19283a1 100644 --- a/arch/blackfin/include/asm/barrier.h +++ b/arch/blackfin/include/asm/barrier.h @@ -23,26 +23,10 @@ # define rmb() do { barrier(); smp_check_barrier(); } while (0) # define wmb() do { barrier(); smp_mark_barrier(); } while (0) # define read_barrier_depends() do { barrier(); smp_check_barrier(); } while (0) -#else -# define mb() barrier() -# define rmb() barrier() -# define wmb() barrier() -# define read_barrier_depends() do { } while (0) #endif -#else /* !CONFIG_SMP */ - -#define mb() barrier() -#define rmb() barrier() -#define wmb() barrier() -#define read_barrier_depends() do { } while (0) - #endif /* !CONFIG_SMP */ -#define smp_mb() mb() -#define smp_rmb() rmb() -#define smp_wmb() wmb() -#define set_mb(var, value) do { var = value; mb(); } while (0) -#define smp_read_barrier_depends() read_barrier_depends() +#include <asm-generic/barrier.h> #endif /* _BLACKFIN_BARRIER_H */ diff --git a/arch/cris/include/asm/Kbuild b/arch/cris/include/asm/Kbuild index b06caf6..199b1a9 100644 --- a/arch/cris/include/asm/Kbuild +++ b/arch/cris/include/asm/Kbuild @@ -3,6 +3,7 @@ header-y += arch-v10/ header-y += arch-v32/ +generic-y += barrier.h generic-y += clkdev.h generic-y += exec.h generic-y += kvm_para.h diff --git a/arch/cris/include/asm/barrier.h b/arch/cris/include/asm/barrier.h deleted file mode 100644 index 198ad7f..0000000 --- a/arch/cris/include/asm/barrier.h +++ /dev/null @@ -1,25 +0,0 @@ -#ifndef __ASM_CRIS_BARRIER_H -#define __ASM_CRIS_BARRIER_H - -#define nop() __asm__ __volatile__ ("nop"); - -#define barrier() __asm__ __volatile__("": : :"memory") -#define mb() barrier() -#define rmb() mb() -#define wmb() mb() -#define read_barrier_depends() do { } while(0) -#define set_mb(var, value) do { var = value; mb(); } while (0) - -#ifdef CONFIG_SMP -#define smp_mb() mb() -#define smp_rmb() rmb() -#define smp_wmb() wmb() -#define smp_read_barrier_depends() read_barrier_depends() -#else -#define smp_mb() barrier() -#define smp_rmb() barrier() -#define smp_wmb() barrier() -#define smp_read_barrier_depends() do { } while(0) -#endif - -#endif /* __ASM_CRIS_BARRIER_H */ diff --git a/arch/frv/include/asm/barrier.h b/arch/frv/include/asm/barrier.h index 06776ad..abbef47 100644 --- a/arch/frv/include/asm/barrier.h +++ b/arch/frv/include/asm/barrier.h @@ -17,13 +17,7 @@ #define mb() asm volatile ("membar" : : :"memory") #define rmb() asm volatile ("membar" : : :"memory") #define wmb() asm volatile ("membar" : : :"memory") -#define read_barrier_depends() do { } while (0) -#define smp_mb() barrier() -#define smp_rmb() barrier() -#define smp_wmb() barrier() -#define smp_read_barrier_depends() do {} while(0) -#define set_mb(var, value) \ - do { var = (value); barrier(); } while (0) +#include <asm-generic/barrier.h> #endif /* _ASM_BARRIER_H */ diff --git a/arch/hexagon/include/asm/Kbuild b/arch/hexagon/include/asm/Kbuild index 67c3450..ada843c 100644 --- a/arch/hexagon/include/asm/Kbuild +++ b/arch/hexagon/include/asm/Kbuild @@ -2,6 +2,7 @@ header-y += ucontext.h generic-y += auxvec.h +generic-y += barrier.h generic-y += bug.h generic-y += bugs.h generic-y += clkdev.h diff --git a/arch/hexagon/include/asm/atomic.h b/arch/hexagon/include/asm/atomic.h index 8a64ff2..7aae4cb 100644 --- a/arch/hexagon/include/asm/atomic.h +++ b/arch/hexagon/include/asm/atomic.h @@ -160,8 +160,12 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u) #define atomic_sub_and_test(i, v) (atomic_sub_return(i, (v)) == 0) #define atomic_add_negative(i, v) (atomic_add_return(i, (v)) < 0) - #define atomic_inc_return(v) (atomic_add_return(1, v)) #define atomic_dec_return(v) (atomic_sub_return(1, v)) +#define smp_mb__before_atomic_dec() barrier() +#define smp_mb__after_atomic_dec() barrier() +#define smp_mb__before_atomic_inc() barrier() +#define smp_mb__after_atomic_inc() barrier() + #endif diff --git a/arch/hexagon/include/asm/barrier.h b/arch/hexagon/include/asm/barrier.h index 1041a8e..4e863da 100644 --- a/arch/hexagon/include/asm/barrier.h +++ b/arch/hexagon/include/asm/barrier.h @@ -29,10 +29,6 @@ #define smp_read_barrier_depends() barrier() #define smp_wmb() barrier() #define smp_mb() barrier() -#define smp_mb__before_atomic_dec() barrier() -#define smp_mb__after_atomic_dec() barrier() -#define smp_mb__before_atomic_inc() barrier() -#define smp_mb__after_atomic_inc() barrier() /* Set a value and use a memory barrier. Used by the scheduler somewhere. */ #define set_mb(var, value) \ diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index 4e4119b..a8c3a11 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig @@ -147,9 +147,6 @@ config PARAVIRT over full virtualization. However, when run without a hypervisor the kernel is theoretically slower and slightly larger. - -source "arch/ia64/xen/Kconfig" - endif choice @@ -175,7 +172,6 @@ config IA64_GENERIC SGI-SN2 For SGI Altix systems SGI-UV For SGI UV systems Ski-simulator For the HP simulator <http://www.hpl.hp.com/research/linux/ski/> - Xen-domU For xen domU system If you don't know what to do, choose "generic". @@ -231,14 +227,6 @@ config IA64_HP_SIM bool "Ski-simulator" select SWIOTLB -config IA64_XEN_GUEST - bool "Xen guest" - select SWIOTLB - depends on XEN - help - Build a kernel that runs on Xen guest domain. At this moment only - 16KB page size in supported. - endchoice choice diff --git a/arch/ia64/Makefile b/arch/ia64/Makefile index be7bfa1..f37238f 100644 --- a/arch/ia64/Makefile +++ b/arch/ia64/Makefile @@ -51,11 +51,9 @@ core-$(CONFIG_IA64_DIG_VTD) += arch/ia64/dig/ core-$(CONFIG_IA64_GENERIC) += arch/ia64/dig/ core-$(CONFIG_IA64_HP_ZX1) += arch/ia64/dig/ core-$(CONFIG_IA64_HP_ZX1_SWIOTLB) += arch/ia64/dig/ -core-$(CONFIG_IA64_XEN_GUEST) += arch/ia64/dig/ core-$(CONFIG_IA64_SGI_SN2) += arch/ia64/sn/ core-$(CONFIG_IA64_SGI_UV) += arch/ia64/uv/ core-$(CONFIG_KVM) += arch/ia64/kvm/ -core-$(CONFIG_XEN) += arch/ia64/xen/ drivers-$(CONFIG_PCI) += arch/ia64/pci/ drivers-$(CONFIG_IA64_HP_SIM) += arch/ia64/hp/sim/ diff --git a/arch/ia64/configs/xen_domu_defconfig b/arch/ia64/configs/xen_domu_defconfig deleted file mode 100644 index b025acf..0000000 --- a/arch/ia64/configs/xen_domu_defconfig +++ /dev/null @@ -1,199 +0,0 @@ -CONFIG_EXPERIMENTAL=y -CONFIG_SYSVIPC=y -CONFIG_POSIX_MQUEUE=y -CONFIG_IKCONFIG=y -CONFIG_IKCONFIG_PROC=y -CONFIG_LOG_BUF_SHIFT=20 -CONFIG_SYSFS_DEPRECATED_V2=y -CONFIG_BLK_DEV_INITRD=y -CONFIG_KALLSYMS_ALL=y -CONFIG_MODULES=y -CONFIG_MODULE_UNLOAD=y -CONFIG_MODVERSIONS=y -CONFIG_MODULE_SRCVERSION_ALL=y -# CONFIG_BLK_DEV_BSG is not set -CONFIG_PARAVIRT_GUEST=y -CONFIG_IA64_XEN_GUEST=y -CONFIG_MCKINLEY=y -CONFIG_IA64_CYCLONE=y -CONFIG_SMP=y -CONFIG_NR_CPUS=16 -CONFIG_HOTPLUG_CPU=y -CONFIG_PERMIT_BSP_REMOVE=y -CONFIG_FORCE_CPEI_RETARGET=y -CONFIG_IA64_MCA_RECOVERY=y -CONFIG_PERFMON=y -CONFIG_IA64_PALINFO=y -CONFIG_KEXEC=y -CONFIG_EFI_VARS=y -CONFIG_BINFMT_MISC=m -CONFIG_ACPI_PROCFS=y -CONFIG_ACPI_BUTTON=m -CONFIG_ACPI_FAN=m -CONFIG_ACPI_PROCESSOR=m -CONFIG_ACPI_CONTAINER=m -CONFIG_HOTPLUG_PCI=y -CONFIG_HOTPLUG_PCI_ACPI=m -CONFIG_PACKET=y -CONFIG_UNIX=y -CONFIG_INET=y -CONFIG_IP_MULTICAST=y -CONFIG_ARPD=y -CONFIG_SYN_COOKIES=y -# CONFIG_INET_LRO is not set -# CONFIG_IPV6 is not set -CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" -CONFIG_BLK_DEV_LOOP=m -CONFIG_BLK_DEV_CRYPTOLOOP=m -CONFIG_BLK_DEV_NBD=m -CONFIG_BLK_DEV_RAM=y -CONFIG_IDE=y -CONFIG_BLK_DEV_IDECD=y -CONFIG_BLK_DEV_GENERIC=y -CONFIG_BLK_DEV_CMD64X=y -CONFIG_BLK_DEV_PIIX=y -CONFIG_SCSI=y -CONFIG_BLK_DEV_SD=y -CONFIG_CHR_DEV_ST=m -CONFIG_BLK_DEV_SR=m -CONFIG_CHR_DEV_SG=m -CONFIG_SCSI_SYM53C8XX_2=y -CONFIG_SCSI_QLOGIC_1280=y -CONFIG_MD=y -CONFIG_BLK_DEV_MD=m -CONFIG_MD_LINEAR=m -CONFIG_MD_RAID0=m -CONFIG_MD_RAID1=m -CONFIG_MD_MULTIPATH=m -CONFIG_BLK_DEV_DM=m -CONFIG_DM_CRYPT=m -CONFIG_DM_SNAPSHOT=m -CONFIG_DM_MIRROR=m -CONFIG_DM_ZERO=m -CONFIG_FUSION=y -CONFIG_FUSION_SPI=y -CONFIG_FUSION_FC=y -CONFIG_FUSION_CTL=y -CONFIG_NETDEVICES=y -CONFIG_DUMMY=m -CONFIG_NET_ETHERNET=y -CONFIG_NET_TULIP=y -CONFIG_TULIP=m -CONFIG_NET_PCI=y -CONFIG_NET_VENDOR_INTEL=y -CONFIG_E100=m -CONFIG_E1000=y -CONFIG_TIGON3=y -CONFIG_NETCONSOLE=y -# CONFIG_SERIO_SERPORT is not set -CONFIG_GAMEPORT=m -CONFIG_SERIAL_NONSTANDARD=y -CONFIG_SERIAL_8250=y -CONFIG_SERIAL_8250_CONSOLE=y -CONFIG_SERIAL_8250_NR_UARTS=6 -CONFIG_SERIAL_8250_EXTENDED=y -CONFIG_SERIAL_8250_SHARE_IRQ=y -# CONFIG_HW_RANDOM is not set -CONFIG_EFI_RTC=y -CONFIG_RAW_DRIVER=m -CONFIG_HPET=y -CONFIG_AGP=m -CONFIG_DRM=m -CONFIG_DRM_TDFX=m -CONFIG_DRM_R128=m -CONFIG_DRM_RADEON=m -CONFIG_DRM_MGA=m -CONFIG_DRM_SIS=m -CONFIG_HID_GYRATION=y -CONFIG_HID_NTRIG=y -CONFIG_HID_PANTHERLORD=y -CONFIG_HID_PETALYNX=y -CONFIG_HID_SAMSUNG=y -CONFIG_HID_SONY=y -CONFIG_HID_SUNPLUS=y -CONFIG_HID_TOPSEED=y -CONFIG_USB=y -CONFIG_USB_DEVICEFS=y -CONFIG_USB_EHCI_HCD=m -CONFIG_USB_OHCI_HCD=m -CONFIG_USB_UHCI_HCD=y -CONFIG_USB_STORAGE=m -CONFIG_EXT2_FS=y -CONFIG_EXT2_FS_XATTR=y -CONFIG_EXT2_FS_POSIX_ACL=y -CONFIG_EXT2_FS_SECURITY=y -CONFIG_EXT3_FS=y -CONFIG_EXT3_FS_POSIX_ACL=y -CONFIG_EXT3_FS_SECURITY=y -CONFIG_REISERFS_FS=y -CONFIG_REISERFS_FS_XATTR=y -CONFIG_REISERFS_FS_POSIX_ACL=y -CONFIG_REISERFS_FS_SECURITY=y -CONFIG_XFS_FS=y -CONFIG_AUTOFS_FS=y -CONFIG_AUTOFS4_FS=y -CONFIG_ISO9660_FS=m -CONFIG_JOLIET=y -CONFIG_UDF_FS=m -CONFIG_VFAT_FS=y -CONFIG_NTFS_FS=m -CONFIG_PROC_KCORE=y -CONFIG_TMPFS=y -CONFIG_HUGETLBFS=y -CONFIG_NFS_FS=m -CONFIG_NFS_V3=y -CONFIG_NFS_V4=y -CONFIG_NFSD=m -CONFIG_NFSD_V4=y -CONFIG_SMB_FS=m -CONFIG_SMB_NLS_DEFAULT=y -CONFIG_CIFS=m -CONFIG_PARTITION_ADVANCED=y -CONFIG_SGI_PARTITION=y -CONFIG_EFI_PARTITION=y -CONFIG_NLS_CODEPAGE_437=y -CONFIG_NLS_CODEPAGE_737=m -CONFIG_NLS_CODEPAGE_775=m -CONFIG_NLS_CODEPAGE_850=m -CONFIG_NLS_CODEPAGE_852=m -CONFIG_NLS_CODEPAGE_855=m -CONFIG_NLS_CODEPAGE_857=m -CONFIG_NLS_CODEPAGE_860=m -CONFIG_NLS_CODEPAGE_861=m -CONFIG_NLS_CODEPAGE_862=m -CONFIG_NLS_CODEPAGE_863=m -CONFIG_NLS_CODEPAGE_864=m -CONFIG_NLS_CODEPAGE_865=m -CONFIG_NLS_CODEPAGE_866=m -CONFIG_NLS_CODEPAGE_869=m -CONFIG_NLS_CODEPAGE_936=m -CONFIG_NLS_CODEPAGE_950=m -CONFIG_NLS_CODEPAGE_932=m -CONFIG_NLS_CODEPAGE_949=m -CONFIG_NLS_CODEPAGE_874=m -CONFIG_NLS_ISO8859_8=m -CONFIG_NLS_CODEPAGE_1250=m -CONFIG_NLS_CODEPAGE_1251=m -CONFIG_NLS_ISO8859_1=y -CONFIG_NLS_ISO8859_2=m -CONFIG_NLS_ISO8859_3=m -CONFIG_NLS_ISO8859_4=m -CONFIG_NLS_ISO8859_5=m -CONFIG_NLS_ISO8859_6=m -CONFIG_NLS_ISO8859_7=m -CONFIG_NLS_ISO8859_9=m -CONFIG_NLS_ISO8859_13=m -CONFIG_NLS_ISO8859_14=m -CONFIG_NLS_ISO8859_15=m -CONFIG_NLS_KOI8_R=m -CONFIG_NLS_KOI8_U=m -CONFIG_NLS_UTF8=m -CONFIG_MAGIC_SYSRQ=y -CONFIG_DEBUG_KERNEL=y -CONFIG_DEBUG_MUTEXES=y -# CONFIG_RCU_CPU_STALL_DETECTOR is not set -CONFIG_IA64_GRANULE_16MB=y -CONFIG_CRYPTO_ECB=m -CONFIG_CRYPTO_PCBC=m -CONFIG_CRYPTO_MD5=y -# CONFIG_CRYPTO_ANSI_CPRNG is not set diff --git a/arch/ia64/include/asm/acpi.h b/arch/ia64/include/asm/acpi.h index faa1bf0..d651102 100644 --- a/arch/ia64/include/asm/acpi.h +++ b/arch/ia64/include/asm/acpi.h @@ -111,8 +111,6 @@ static inline const char *acpi_get_sysname (void) return "uv"; # elif defined (CONFIG_IA64_DIG) return "dig"; -# elif defined (CONFIG_IA64_XEN_GUEST) - return "xen"; # elif defined(CONFIG_IA64_DIG_VTD) return "dig_vtd"; # else diff --git a/arch/ia64/include/asm/barrier.h b/arch/ia64/include/asm/barrier.h index 60576e0..d0a69aa 100644 --- a/arch/ia64/include/asm/barrier.h +++ b/arch/ia64/include/asm/barrier.h @@ -45,14 +45,37 @@ # define smp_rmb() rmb() # define smp_wmb() wmb() # define smp_read_barrier_depends() read_barrier_depends() + #else + # define smp_mb() barrier() # define smp_rmb() barrier() # define smp_wmb() barrier() # define smp_read_barrier_depends() do { } while(0) + #endif /* + * IA64 GCC turns volatile stores into st.rel and volatile loads into ld.acq no + * need for asm trickery! + */ + +#define smp_store_release(p, v) \ +do { \ + compiletime_assert_atomic_type(*p); \ + barrier(); \ + ACCESS_ONCE(*p) = (v); \ +} while (0) + +#define smp_load_acquire(p) \ +({ \ + typeof(*p) ___p1 = ACCESS_ONCE(*p); \ + compiletime_assert_atomic_type(*p); \ + barrier(); \ + ___p1; \ +}) + +/* * XXX check on this ---I suspect what Linus really wants here is * acquire vs release semantics but we can't discuss this stuff with * Linus just yet. Grrr... diff --git a/arch/ia64/include/asm/machvec.h b/arch/ia64/include/asm/machvec.h index 2d1ad4b1..9c39bdf 100644 --- a/arch/ia64/include/asm/machvec.h +++ b/arch/ia64/include/asm/machvec.h @@ -113,8 +113,6 @@ extern void machvec_tlb_migrate_finish (struct mm_struct *); # include <asm/machvec_sn2.h> # elif defined (CONFIG_IA64_SGI_UV) # include <asm/machvec_uv.h> -# elif defined (CONFIG_IA64_XEN_GUEST) -# include <asm/machvec_xen.h> # elif defined (CONFIG_IA64_GENERIC) # ifdef MACHVEC_PLATFORM_HEADER diff --git a/arch/ia64/include/asm/machvec_xen.h b/arch/ia64/include/asm/machvec_xen.h deleted file mode 100644 index 8b8bd0e..0000000 --- a/arch/ia64/include/asm/machvec_xen.h +++ /dev/null @@ -1,22 +0,0 @@ -#ifndef _ASM_IA64_MACHVEC_XEN_h -#define _ASM_IA64_MACHVEC_XEN_h - -extern ia64_mv_setup_t dig_setup; -extern ia64_mv_cpu_init_t xen_cpu_init; -extern ia64_mv_irq_init_t xen_irq_init; -extern ia64_mv_send_ipi_t xen_platform_send_ipi; - -/* - * This stuff has dual use! - * - * For a generic kernel, the macros are used to initialize the - * platform's machvec structure. When compiling a non-generic kernel, - * the macros are used directly. - */ -#define ia64_platform_name "xen" -#define platform_setup dig_setup -#define platform_cpu_init xen_cpu_init -#define platform_irq_init xen_irq_init -#define platform_send_ipi xen_platform_send_ipi - -#endif /* _ASM_IA64_MACHVEC_XEN_h */ diff --git a/arch/ia64/include/asm/meminit.h b/arch/ia64/include/asm/meminit.h index 61c7b17..092f1c9 100644 --- a/arch/ia64/include/asm/meminit.h +++ b/arch/ia64/include/asm/meminit.h @@ -18,7 +18,6 @@ * - crash dumping code reserved region * - Kernel memory map built from EFI memory map * - ELF core header - * - xen start info if CONFIG_XEN * * More could be added if necessary */ diff --git a/arch/ia64/include/asm/paravirt.h b/arch/ia64/include/asm/paravirt.h index b149b88..b53518a 100644 --- a/arch/ia64/include/asm/paravirt.h +++ b/arch/ia64/include/asm/paravirt.h @@ -75,7 +75,6 @@ void *paravirt_get_gate_section(void); #ifdef CONFIG_PARAVIRT_GUEST #define PARAVIRT_HYPERVISOR_TYPE_DEFAULT 0 -#define PARAVIRT_HYPERVISOR_TYPE_XEN 1 #ifndef __ASSEMBLY__ diff --git a/arch/ia64/include/asm/pvclock-abi.h b/arch/ia64/include/asm/pvclock-abi.h index 44ef9ef..42b233b 100644 --- a/arch/ia64/include/asm/pvclock-abi.h +++ b/arch/ia64/include/asm/pvclock-abi.h @@ -11,7 +11,7 @@ /* * These structs MUST NOT be changed. * They are the ABI between hypervisor and guest OS. - * Both Xen and KVM are using this. + * KVM is using this. * * pvclock_vcpu_time_info holds the system time and the tsc timestamp * of the last update. So the guest can use the tsc delta to get a diff --git a/arch/ia64/include/asm/sync_bitops.h b/arch/ia64/include/asm/sync_bitops.h deleted file mode 100644 index 593c12e..0000000 --- a/arch/ia64/include/asm/sync_bitops.h +++ /dev/null @@ -1,51 +0,0 @@ -#ifndef _ASM_IA64_SYNC_BITOPS_H -#define _ASM_IA64_SYNC_BITOPS_H - -/* - * Copyright (C) 2008 Isaku Yamahata <yamahata at valinux co jp> - * - * Based on synch_bitops.h which Dan Magenhaimer wrote. - * - * bit operations which provide guaranteed strong synchronisation - * when communicating with Xen or other guest OSes running on other CPUs. - */ - -static inline void sync_set_bit(int nr, volatile void *addr) -{ - set_bit(nr, addr); -} - -static inline void sync_clear_bit(int nr, volatile void *addr) -{ - clear_bit(nr, addr); -} - -static inline void sync_change_bit(int nr, volatile void *addr) -{ - change_bit(nr, addr); -} - -static inline int sync_test_and_set_bit(int nr, volatile void *addr) -{ - return test_and_set_bit(nr, addr); -} - -static inline int sync_test_and_clear_bit(int nr, volatile void *addr) -{ - return test_and_clear_bit(nr, addr); -} - -static inline int sync_test_and_change_bit(int nr, volatile void *addr) -{ - return test_and_change_bit(nr, addr); -} - -static inline int sync_test_bit(int nr, const volatile void *addr) -{ - return test_bit(nr, addr); -} - -#define sync_cmpxchg(ptr, old, new) \ - ((__typeof__(*(ptr)))cmpxchg_acq((ptr), (old), (new))) - -#endif /* _ASM_IA64_SYNC_BITOPS_H */ diff --git a/arch/ia64/include/asm/xen/events.h b/arch/ia64/include/asm/xen/events.h deleted file mode 100644 index baa74c8..0000000 --- a/arch/ia64/include/asm/xen/events.h +++ /dev/null @@ -1,41 +0,0 @@ -/****************************************************************************** - * arch/ia64/include/asm/xen/events.h - * - * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp> - * VA Linux Systems Japan K.K. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - * - */ -#ifndef _ASM_IA64_XEN_EVENTS_H -#define _ASM_IA64_XEN_EVENTS_H - -enum ipi_vector { - XEN_RESCHEDULE_VECTOR, - XEN_IPI_VECTOR, - XEN_CMCP_VECTOR, - XEN_CPEP_VECTOR, - - XEN_NR_IPIS, -}; - -static inline int xen_irqs_disabled(struct pt_regs *regs) -{ - return !(ia64_psr(regs)->i); -} - -#define irq_ctx_init(cpu) do { } while (0) - -#endif /* _ASM_IA64_XEN_EVENTS_H */ diff --git a/arch/ia64/include/asm/xen/hypercall.h b/arch/ia64/include/asm/xen/hypercall.h deleted file mode 100644 index ed28bcd..0000000 --- a/arch/ia64/include/asm/xen/hypercall.h +++ /dev/null @@ -1,265 +0,0 @@ -/****************************************************************************** - * hypercall.h - * - * Linux-specific hypervisor handling. - * - * Copyright (c) 2002-2004, K A Fraser - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version 2 - * as published by the Free Software Foundation; or, when distributed - * separately from the Linux kernel or incorporated into other - * software packages, subject to the following license: - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this source file (the "Software"), to deal in the Software without - * restriction, including without limitation the rights to use, copy, modify, - * merge, publish, distribute, sublicense, and/or sell copies of the Software, - * and to permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. - */ - -#ifndef _ASM_IA64_XEN_HYPERCALL_H -#define _ASM_IA64_XEN_HYPERCALL_H - -#include <xen/interface/xen.h> -#include <xen/interface/physdev.h> -#include <xen/interface/sched.h> -#include <asm/xen/xcom_hcall.h> -struct xencomm_handle; -extern unsigned long __hypercall(unsigned long a1, unsigned long a2, - unsigned long a3, unsigned long a4, - unsigned long a5, unsigned long cmd); - -/* - * Assembler stubs for hyper-calls. - */ - -#define _hypercall0(type, name) \ -({ \ - long __res; \ - __res = __hypercall(0, 0, 0, 0, 0, __HYPERVISOR_##name);\ - (type)__res; \ -}) - -#define _hypercall1(type, name, a1) \ -({ \ - long __res; \ - __res = __hypercall((unsigned long)a1, \ - 0, 0, 0, 0, __HYPERVISOR_##name); \ - (type)__res; \ -}) - -#define _hypercall2(type, name, a1, a2) \ -({ \ - long __res; \ - __res = __hypercall((unsigned long)a1, \ - (unsigned long)a2, \ - 0, 0, 0, __HYPERVISOR_##name); \ - (type)__res; \ -}) - -#define _hypercall3(type, name, a1, a2, a3) \ -({ \ - long __res; \ - __res = __hypercall((unsigned long)a1, \ - (unsigned long)a2, \ - (unsigned long)a3, \ - 0, 0, __HYPERVISOR_##name); \ - (type)__res; \ -}) - -#define _hypercall4(type, name, a1, a2, a3, a4) \ -({ \ - long __res; \ - __res = __hypercall((unsigned long)a1, \ - (unsigned long)a2, \ - (unsigned long)a3, \ - (unsigned long)a4, \ - 0, __HYPERVISOR_##name); \ - (type)__res; \ -}) - -#define _hypercall5(type, name, a1, a2, a3, a4, a5) \ -({ \ - long __res; \ - __res = __hypercall((unsigned long)a1, \ - (unsigned long)a2, \ - (unsigned long)a3, \ - (unsigned long)a4, \ - (unsigned long)a5, \ - __HYPERVISOR_##name); \ - (type)__res; \ -}) - - -static inline int -xencomm_arch_hypercall_sched_op(int cmd, struct xencomm_handle *arg) -{ - return _hypercall2(int, sched_op, cmd, arg); -} - -static inline long -HYPERVISOR_set_timer_op(u64 timeout) -{ - unsigned long timeout_hi = (unsigned long)(timeout >> 32); - unsigned long timeout_lo = (unsigned long)timeout; - return _hypercall2(long, set_timer_op, timeout_lo, timeout_hi); -} - -static inline int -xencomm_arch_hypercall_multicall(struct xencomm_handle *call_list, - int nr_calls) -{ - return _hypercall2(int, multicall, call_list, nr_calls); -} - -static inline int -xencomm_arch_hypercall_memory_op(unsigned int cmd, struct xencomm_handle *arg) -{ - return _hypercall2(int, memory_op, cmd, arg); -} - -static inline int -xencomm_arch_hypercall_event_channel_op(int cmd, struct xencomm_handle *arg) -{ - return _hypercall2(int, event_channel_op, cmd, arg); -} - -static inline int -xencomm_arch_hypercall_xen_version(int cmd, struct xencomm_handle *arg) -{ - return _hypercall2(int, xen_version, cmd, arg); -} - -static inline int -xencomm_arch_hypercall_console_io(int cmd, int count, - struct xencomm_handle *str) -{ - return _hypercall3(int, console_io, cmd, count, str); -} - -static inline int -xencomm_arch_hypercall_physdev_op(int cmd, struct xencomm_handle *arg) -{ - return _hypercall2(int, physdev_op, cmd, arg); -} - -static inline int -xencomm_arch_hypercall_grant_table_op(unsigned int cmd, - struct xencomm_handle *uop, - unsigned int count) -{ - return _hypercall3(int, grant_table_op, cmd, uop, count); -} - -int HYPERVISOR_grant_table_op(unsigned int cmd, void *uop, unsigned int count); - -extern int xencomm_arch_hypercall_suspend(struct xencomm_handle *arg); - -static inline int -xencomm_arch_hypercall_callback_op(int cmd, struct xencomm_handle *arg) -{ - return _hypercall2(int, callback_op, cmd, arg); -} - -static inline long -xencomm_arch_hypercall_vcpu_op(int cmd, int cpu, void *arg) -{ - return _hypercall3(long, vcpu_op, cmd, cpu, arg); -} - -static inline int -HYPERVISOR_physdev_op(int cmd, void *arg) -{ - switch (cmd) { - case PHYSDEVOP_eoi: - return _hypercall1(int, ia64_fast_eoi, - ((struct physdev_eoi *)arg)->irq); - default: - return xencomm_hypercall_physdev_op(cmd, arg); - } -} - -static inline long -xencomm_arch_hypercall_opt_feature(struct xencomm_handle *arg) -{ - return _hypercall1(long, opt_feature, arg); -} - -/* for balloon driver */ -#define HYPERVISOR_update_va_mapping(va, new_val, flags) (0) - -/* Use xencomm to do hypercalls. */ -#define HYPERVISOR_sched_op xencomm_hypercall_sched_op -#define HYPERVISOR_event_channel_op xencomm_hypercall_event_channel_op -#define HYPERVISOR_callback_op xencomm_hypercall_callback_op -#define HYPERVISOR_multicall xencomm_hypercall_multicall -#define HYPERVISOR_xen_version xencomm_hypercall_xen_version -#define HYPERVISOR_console_io xencomm_hypercall_console_io -#define HYPERVISOR_memory_op xencomm_hypercall_memory_op -#define HYPERVISOR_suspend xencomm_hypercall_suspend -#define HYPERVISOR_vcpu_op xencomm_hypercall_vcpu_op -#define HYPERVISOR_opt_feature xencomm_hypercall_opt_feature - -/* to compile gnttab_copy_grant_page() in drivers/xen/core/gnttab.c */ -#define HYPERVISOR_mmu_update(req, count, success_count, domid) ({ BUG(); 0; }) - -static inline int -HYPERVISOR_shutdown( - unsigned int reason) -{ - struct sched_shutdown sched_shutdown = { - .reason = reason - }; - - int rc = HYPERVISOR_sched_op(SCHEDOP_shutdown, &sched_shutdown); - - return rc; -} - -/* for netfront.c, netback.c */ -#define MULTI_UVMFLAGS_INDEX 0 /* XXX any value */ - -static inline void -MULTI_update_va_mapping( - struct multicall_entry *mcl, unsigned long va, - pte_t new_val, unsigned long flags) -{ - mcl->op = __HYPERVISOR_update_va_mapping; - mcl->result = 0; -} - -static inline void -MULTI_grant_table_op(struct multicall_entry *mcl, unsigned int cmd, - void *uop, unsigned int count) -{ - mcl->op = __HYPERVISOR_grant_table_op; - mcl->args[0] = cmd; - mcl->args[1] = (unsigned long)uop; - mcl->args[2] = count; -} - -static inline void -MULTI_mmu_update(struct multicall_entry *mcl, struct mmu_update *req, - int count, int *success_count, domid_t domid) -{ - mcl->op = __HYPERVISOR_mmu_update; - mcl->args[0] = (unsigned long)req; - mcl->args[1] = count; - mcl->args[2] = (unsigned long)success_count; - mcl->args[3] = domid; -} - -#endif /* _ASM_IA64_XEN_HYPERCALL_H */ diff --git a/arch/ia64/include/asm/xen/hypervisor.h b/arch/ia64/include/asm/xen/hypervisor.h deleted file mode 100644 index 67455c2..0000000 --- a/arch/ia64/include/asm/xen/hypervisor.h +++ /dev/null @@ -1,61 +0,0 @@ -/****************************************************************************** - * hypervisor.h - * - * Linux-specific hypervisor handling. - * - * Copyright (c) 2002-2004, K A Fraser - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version 2 - * as published by the Free Software Foundation; or, when distributed - * separately from the Linux kernel or incorporated into other - * software packages, subject to the following license: - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this source file (the "Software"), to deal in the Software without - * restriction, including without limitation the rights to use, copy, modify, - * merge, publish, distribute, sublicense, and/or sell copies of the Software, - * and to permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. - */ - -#ifndef _ASM_IA64_XEN_HYPERVISOR_H -#define _ASM_IA64_XEN_HYPERVISOR_H - -#include <linux/err.h> -#include <xen/interface/xen.h> -#include <xen/interface/version.h> /* to compile feature.c */ -#include <xen/features.h> /* to comiple xen-netfront.c */ -#include <xen/xen.h> -#include <asm/xen/hypercall.h> - -#ifdef CONFIG_XEN -extern struct shared_info *HYPERVISOR_shared_info; -extern struct start_info *xen_start_info; - -void __init xen_setup_vcpu_info_placement(void); -void force_evtchn_callback(void); - -/* for drivers/xen/balloon/balloon.c */ -#ifdef CONFIG_XEN_SCRUB_PAGES -#define scrub_pages(_p, _n) memset((void *)(_p), 0, (_n) << PAGE_SHIFT) -#else -#define scrub_pages(_p, _n) ((void)0) -#endif - -/* For setup_arch() in arch/ia64/kernel/setup.c */ -void xen_ia64_enable_opt_feature(void); -#endif - -#endif /* _ASM_IA64_XEN_HYPERVISOR_H */ diff --git a/arch/ia64/include/asm/xen/inst.h b/arch/ia64/include/asm/xen/inst.h deleted file mode 100644 index c53a476..0000000 --- a/arch/ia64/include/asm/xen/inst.h +++ /dev/null @@ -1,486 +0,0 @@ -/****************************************************************************** - * arch/ia64/include/asm/xen/inst.h - * - * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp> - * VA Linux Systems Japan K.K. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - * - */ - -#include <asm/xen/privop.h> - -#define ia64_ivt xen_ivt -#define DO_SAVE_MIN XEN_DO_SAVE_MIN - -#define __paravirt_switch_to xen_switch_to -#define __paravirt_leave_syscall xen_leave_syscall -#define __paravirt_work_processed_syscall xen_work_processed_syscall -#define __paravirt_leave_kernel xen_leave_kernel -#define __paravirt_pending_syscall_end xen_work_pending_syscall_end -#define __paravirt_work_processed_syscall_target \ - xen_work_processed_syscall - -#define paravirt_fsyscall_table xen_fsyscall_table -#define paravirt_fsys_bubble_down xen_fsys_bubble_down - -#define MOV_FROM_IFA(reg) \ - movl reg = XSI_IFA; \ - ;; \ - ld8 reg = [reg] - -#define MOV_FROM_ITIR(reg) \ - movl reg = XSI_ITIR; \ - ;; \ - ld8 reg = [reg] - -#define MOV_FROM_ISR(reg) \ - movl reg = XSI_ISR; \ - ;; \ - ld8 reg = [reg] - -#define MOV_FROM_IHA(reg) \ - movl reg = XSI_IHA; \ - ;; \ - ld8 reg = [reg] - -#define MOV_FROM_IPSR(pred, reg) \ -(pred) movl reg = XSI_IPSR; \ - ;; \ -(pred) ld8 reg = [reg] - -#define MOV_FROM_IIM(reg) \ - movl reg = XSI_IIM; \ - ;; \ - ld8 reg = [reg] - -#define MOV_FROM_IIP(reg) \ - movl reg = XSI_IIP; \ - ;; \ - ld8 reg = [reg] - -.macro __MOV_FROM_IVR reg, clob - .ifc "\reg", "r8" - XEN_HYPER_GET_IVR - .exitm - .endif - .ifc "\clob", "r8" - XEN_HYPER_GET_IVR - ;; - mov \reg = r8 - .exitm - .endif - - mov \clob = r8 - ;; - XEN_HYPER_GET_IVR - ;; - mov \reg = r8 - ;; - mov r8 = \clob -.endm -#define MOV_FROM_IVR(reg, clob) __MOV_FROM_IVR reg, clob - -.macro __MOV_FROM_PSR pred, reg, clob - .ifc "\reg", "r8" - (\pred) XEN_HYPER_GET_PSR; - .exitm - .endif - .ifc "\clob", "r8" - (\pred) XEN_HYPER_GET_PSR - ;; - (\pred) mov \reg = r8 - .exitm - .endif - - (\pred) mov \clob = r8 - (\pred) XEN_HYPER_GET_PSR - ;; - (\pred) mov \reg = r8 - (\pred) mov r8 = \clob -.endm -#define MOV_FROM_PSR(pred, reg, clob) __MOV_FROM_PSR pred, reg, clob - -/* assuming ar.itc is read with interrupt disabled. */ -#define MOV_FROM_ITC(pred, pred_clob, reg, clob) \ -(pred) movl clob = XSI_ITC_OFFSET; \ - ;; \ -(pred) ld8 clob = [clob]; \ -(pred) mov reg = ar.itc; \ - ;; \ -(pred) add reg = reg, clob; \ - ;; \ -(pred) movl clob = XSI_ITC_LAST; \ - ;; \ -(pred) ld8 clob = [clob]; \ - ;; \ -(pred) cmp.geu.unc pred_clob, p0 = clob, reg; \ - ;; \ -(pred_clob) add reg = 1, clob; \ - ;; \ -(pred) movl clob = XSI_ITC_LAST; \ - ;; \ -(pred) st8 [clob] = reg - - -#define MOV_TO_IFA(reg, clob) \ - movl clob = XSI_IFA; \ - ;; \ - st8 [clob] = reg \ - -#define MOV_TO_ITIR(pred, reg, clob) \ -(pred) movl clob = XSI_ITIR; \ - ;; \ -(pred) st8 [clob] = reg - -#define MOV_TO_IHA(pred, reg, clob) \ -(pred) movl clob = XSI_IHA; \ - ;; \ -(pred) st8 [clob] = reg - -#define MOV_TO_IPSR(pred, reg, clob) \ -(pred) movl clob = XSI_IPSR; \ - ;; \ -(pred) st8 [clob] = reg; \ - ;; - -#define MOV_TO_IFS(pred, reg, clob) \ -(pred) movl clob = XSI_IFS; \ - ;; \ -(pred) st8 [clob] = reg; \ - ;; - -#define MOV_TO_IIP(reg, clob) \ - movl clob = XSI_IIP; \ - ;; \ - st8 [clob] = reg - -.macro ____MOV_TO_KR kr, reg, clob0, clob1 - .ifc "\clob0", "r9" - .error "clob0 \clob0 must not be r9" - .endif - .ifc "\clob1", "r8" - .error "clob1 \clob1 must not be r8" - .endif - - .ifnc "\reg", "r9" - .ifnc "\clob1", "r9" - mov \clob1 = r9 - .endif - mov r9 = \reg - .endif - .ifnc "\clob0", "r8" - mov \clob0 = r8 - .endif - mov r8 = \kr - ;; - XEN_HYPER_SET_KR - - .ifnc "\reg", "r9" - .ifnc "\clob1", "r9" - mov r9 = \clob1 - .endif - .endif - .ifnc "\clob0", "r8" - mov r8 = \clob0 - .endif -.endm - -.macro __MOV_TO_KR kr, reg, clob0, clob1 - .ifc "\clob0", "r9" - ____MOV_TO_KR \kr, \reg, \clob1, \clob0 - .exitm - .endif - .ifc "\clob1", "r8" - ____MOV_TO_KR \kr, \reg, \clob1, \clob0 - .exitm - .endif - - ____MOV_TO_KR \kr, \reg, \clob0, \clob1 -.endm - -#define MOV_TO_KR(kr, reg, clob0, clob1) \ - __MOV_TO_KR IA64_KR_ ## kr, reg, clob0, clob1 - - -.macro __ITC_I pred, reg, clob - .ifc "\reg", "r8" - (\pred) XEN_HYPER_ITC_I - .exitm - .endif - .ifc "\clob", "r8" - (\pred) mov r8 = \reg - ;; - (\pred) XEN_HYPER_ITC_I - .exitm - .endif - - (\pred) mov \clob = r8 - (\pred) mov r8 = \reg - ;; - (\pred) XEN_HYPER_ITC_I - ;; - (\pred) mov r8 = \clob - ;; -.endm -#define ITC_I(pred, reg, clob) __ITC_I pred, reg, clob - -.macro __ITC_D pred, reg, clob - .ifc "\reg", "r8" - (\pred) XEN_HYPER_ITC_D - ;; - .exitm - .endif - .ifc "\clob", "r8" - (\pred) mov r8 = \reg - ;; - (\pred) XEN_HYPER_ITC_D - ;; - .exitm - .endif - - (\pred) mov \clob = r8 - (\pred) mov r8 = \reg - ;; - (\pred) XEN_HYPER_ITC_D - ;; - (\pred) mov r8 = \clob - ;; -.endm -#define ITC_D(pred, reg, clob) __ITC_D pred, reg, clob - -.macro __ITC_I_AND_D pred_i, pred_d, reg, clob - .ifc "\reg", "r8" - (\pred_i)XEN_HYPER_ITC_I - ;; - (\pred_d)XEN_HYPER_ITC_D - ;; - .exitm - .endif - .ifc "\clob", "r8" - mov r8 = \reg - ;; - (\pred_i)XEN_HYPER_ITC_I - ;; - (\pred_d)XEN_HYPER_ITC_D - ;; - .exitm - .endif - - mov \clob = r8 - mov r8 = \reg - ;; - (\pred_i)XEN_HYPER_ITC_I - ;; - (\pred_d)XEN_HYPER_ITC_D - ;; - mov r8 = \clob - ;; -.endm -#define ITC_I_AND_D(pred_i, pred_d, reg, clob) \ - __ITC_I_AND_D pred_i, pred_d, reg, clob - -.macro __THASH pred, reg0, reg1, clob - .ifc "\reg0", "r8" - (\pred) mov r8 = \reg1 - (\pred) XEN_HYPER_THASH - .exitm - .endc - .ifc "\reg1", "r8" - (\pred) XEN_HYPER_THASH - ;; - (\pred) mov \reg0 = r8 - ;; - .exitm - .endif - .ifc "\clob", "r8" - (\pred) mov r8 = \reg1 - (\pred) XEN_HYPER_THASH - ;; - (\pred) mov \reg0 = r8 - ;; - .exitm - .endif - - (\pred) mov \clob = r8 - (\pred) mov r8 = \reg1 - (\pred) XEN_HYPER_THASH - ;; - (\pred) mov \reg0 = r8 - (\pred) mov r8 = \clob - ;; -.endm -#define THASH(pred, reg0, reg1, clob) __THASH pred, reg0, reg1, clob - -#define SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(clob0, clob1) \ - mov clob0 = 1; \ - movl clob1 = XSI_PSR_IC; \ - ;; \ - st4 [clob1] = clob0 \ - ;; - -#define SSM_PSR_IC_AND_SRLZ_D(clob0, clob1) \ - ;; \ - srlz.d; \ - mov clob1 = 1; \ - movl clob0 = XSI_PSR_IC; \ - ;; \ - st4 [clob0] = clob1 - -#define RSM_PSR_IC(clob) \ - movl clob = XSI_PSR_IC; \ - ;; \ - st4 [clob] = r0; \ - ;; - -/* pred will be clobbered */ -#define MASK_TO_PEND_OFS (-1) -#define SSM_PSR_I(pred, pred_clob, clob) \ -(pred) movl clob = XSI_PSR_I_ADDR \ - ;; \ -(pred) ld8 clob = [clob] \ - ;; \ - /* if (pred) vpsr.i = 1 */ \ - /* if (pred) (vcpu->vcpu_info->evtchn_upcall_mask)=0 */ \ -(pred) st1 [clob] = r0, MASK_TO_PEND_OFS \ - ;; \ - /* if (vcpu->vcpu_info->evtchn_upcall_pending) */ \ -(pred) ld1 clob = [clob] \ - ;; \ -(pred) cmp.ne.unc pred_clob, p0 = clob, r0 \ - ;; \ -(pred_clob)XEN_HYPER_SSM_I /* do areal ssm psr.i */ - -#define RSM_PSR_I(pred, clob0, clob1) \ - movl clob0 = XSI_PSR_I_ADDR; \ - mov clob1 = 1; \ - ;; \ - ld8 clob0 = [clob0]; \ - ;; \ -(pred) st1 [clob0] = clob1 - -#define RSM_PSR_I_IC(clob0, clob1, clob2) \ - movl clob0 = XSI_PSR_I_ADDR; \ - movl clob1 = XSI_PSR_IC; \ - ;; \ - ld8 clob0 = [clob0]; \ - mov clob2 = 1; \ - ;; \ - /* note: clears both vpsr.i and vpsr.ic! */ \ - st1 [clob0] = clob2; \ - st4 [clob1] = r0; \ - ;; - -#define RSM_PSR_DT \ - XEN_HYPER_RSM_PSR_DT - -#define RSM_PSR_BE_I(clob0, clob1) \ - RSM_PSR_I(p0, clob0, clob1); \ - rum psr.be - -#define SSM_PSR_DT_AND_SRLZ_I \ - XEN_HYPER_SSM_PSR_DT - -#define BSW_0(clob0, clob1, clob2) \ - ;; \ - /* r16-r31 all now hold bank1 values */ \ - mov clob2 = ar.unat; \ - movl clob0 = XSI_BANK1_R16; \ - movl clob1 = XSI_BANK1_R16 + 8; \ - ;; \ -.mem.offset 0, 0; st8.spill [clob0] = r16, 16; \ -.mem.offset 8, 0; st8.spill [clob1] = r17, 16; \ - ;; \ -.mem.offset 0, 0; st8.spill [clob0] = r18, 16; \ -.mem.offset 8, 0; st8.spill [clob1] = r19, 16; \ - ;; \ -.mem.offset 0, 0; st8.spill [clob0] = r20, 16; \ -.mem.offset 8, 0; st8.spill [clob1] = r21, 16; \ - ;; \ -.mem.offset 0, 0; st8.spill [clob0] = r22, 16; \ -.mem.offset 8, 0; st8.spill [clob1] = r23, 16; \ - ;; \ -.mem.offset 0, 0; st8.spill [clob0] = r24, 16; \ -.mem.offset 8, 0; st8.spill [clob1] = r25, 16; \ - ;; \ -.mem.offset 0, 0; st8.spill [clob0] = r26, 16; \ -.mem.offset 8, 0; st8.spill [clob1] = r27, 16; \ - ;; \ -.mem.offset 0, 0; st8.spill [clob0] = r28, 16; \ -.mem.offset 8, 0; st8.spill [clob1] = r29, 16; \ - ;; \ -.mem.offset 0, 0; st8.spill [clob0] = r30, 16; \ -.mem.offset 8, 0; st8.spill [clob1] = r31, 16; \ - ;; \ - mov clob1 = ar.unat; \ - movl clob0 = XSI_B1NAT; \ - ;; \ - st8 [clob0] = clob1; \ - mov ar.unat = clob2; \ - movl clob0 = XSI_BANKNUM; \ - ;; \ - st4 [clob0] = r0 - - - /* FIXME: THIS CODE IS NOT NaT SAFE! */ -#define XEN_BSW_1(clob) \ - mov clob = ar.unat; \ - movl r30 = XSI_B1NAT; \ - ;; \ - ld8 r30 = [r30]; \ - mov r31 = 1; \ - ;; \ - mov ar.unat = r30; \ - movl r30 = XSI_BANKNUM; \ - ;; \ - st4 [r30] = r31; \ - movl r30 = XSI_BANK1_R16; \ - movl r31 = XSI_BANK1_R16+8; \ - ;; \ - ld8.fill r16 = [r30], 16; \ - ld8.fill r17 = [r31], 16; \ - ;; \ - ld8.fill r18 = [r30], 16; \ - ld8.fill r19 = [r31], 16; \ - ;; \ - ld8.fill r20 = [r30], 16; \ - ld8.fill r21 = [r31], 16; \ - ;; \ - ld8.fill r22 = [r30], 16; \ - ld8.fill r23 = [r31], 16; \ - ;; \ - ld8.fill r24 = [r30], 16; \ - ld8.fill r25 = [r31], 16; \ - ;; \ - ld8.fill r26 = [r30], 16; \ - ld8.fill r27 = [r31], 16; \ - ;; \ - ld8.fill r28 = [r30], 16; \ - ld8.fill r29 = [r31], 16; \ - ;; \ - ld8.fill r30 = [r30]; \ - ld8.fill r31 = [r31]; \ - ;; \ - mov ar.unat = clob - -#define BSW_1(clob0, clob1) XEN_BSW_1(clob1) - - -#define COVER \ - XEN_HYPER_COVER - -#define RFI \ - XEN_HYPER_RFI; \ - dv_serialize_data diff --git a/arch/ia64/include/asm/xen/interface.h b/arch/ia64/include/asm/xen/interface.h deleted file mode 100644 index e88c5de..0000000 --- a/arch/ia64/include/asm/xen/interface.h +++ /dev/null @@ -1,363 +0,0 @@ -/****************************************************************************** - * arch-ia64/hypervisor-if.h - * - * Guest OS interface to IA64 Xen. - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - * - * Copyright by those who contributed. (in alphabetical order) - * - * Anthony Xu <anthony.xu@intel.com> - * Eddie Dong <eddie.dong@intel.com> - * Fred Yang <fred.yang@intel.com> - * Kevin Tian <kevin.tian@intel.com> - * Alex Williamson <alex.williamson@hp.com> - * Chris Wright <chrisw@sous-sol.org> - * Christian Limpach <Christian.Limpach@cl.cam.ac.uk> - * Dietmar Hahn <dietmar.hahn@fujitsu-siemens.com> - * Hollis Blanchard <hollisb@us.ibm.com> - * Isaku Yamahata <yamahata@valinux.co.jp> - * Jan Beulich <jbeulich@novell.com> - * John Levon <john.levon@sun.com> - * Kazuhiro Suzuki <kaz@jp.fujitsu.com> - * Keir Fraser <keir.fraser@citrix.com> - * Kouya Shimura <kouya@jp.fujitsu.com> - * Masaki Kanno <kanno.masaki@jp.fujitsu.com> - * Matt Chapman <matthewc@hp.com> - * Matthew Chapman <matthewc@hp.com> - * Samuel Thibault <samuel.thibault@eu.citrix.com> - * Tomonari Horikoshi <t.horikoshi@jp.fujitsu.com> - * Tristan Gingold <tgingold@free.fr> - * Tsunehisa Doi <Doi.Tsunehisa@jp.fujitsu.com> - * Yutaka Ezaki <yutaka.ezaki@jp.fujitsu.com> - * Zhang Xin <xing.z.zhang@intel.com> - * Zhang xiantao <xiantao.zhang@intel.com> - * dan.magenheimer@hp.com - * ian.pratt@cl.cam.ac.uk - * michael.fetterman@cl.cam.ac.uk - */ - -#ifndef _ASM_IA64_XEN_INTERFACE_H -#define _ASM_IA64_XEN_INTERFACE_H - -#define __DEFINE_GUEST_HANDLE(name, type) \ - typedef struct { type *p; } __guest_handle_ ## name - -#define DEFINE_GUEST_HANDLE_STRUCT(name) \ - __DEFINE_GUEST_HANDLE(name, struct name) -#define DEFINE_GUEST_HANDLE(name) __DEFINE_GUEST_HANDLE(name, name) -#define GUEST_HANDLE(name) __guest_handle_ ## name -#define GUEST_HANDLE_64(name) GUEST_HANDLE(name) -#define set_xen_guest_handle(hnd, val) do { (hnd).p = val; } while (0) - -#ifndef __ASSEMBLY__ -/* Explicitly size integers that represent pfns in the public interface - * with Xen so that we could have one ABI that works for 32 and 64 bit - * guests. */ -typedef unsigned long xen_pfn_t; -typedef unsigned long xen_ulong_t; -/* Guest handles for primitive C types. */ -__DEFINE_GUEST_HANDLE(uchar, unsigned char); -__DEFINE_GUEST_HANDLE(uint, unsigned int); -__DEFINE_GUEST_HANDLE(ulong, unsigned long); - -DEFINE_GUEST_HANDLE(char); -DEFINE_GUEST_HANDLE(int); -DEFINE_GUEST_HANDLE(long); -DEFINE_GUEST_HANDLE(void); -DEFINE_GUEST_HANDLE(uint64_t); -DEFINE_GUEST_HANDLE(uint32_t); - -DEFINE_GUEST_HANDLE(xen_pfn_t); -#define PRI_xen_pfn "lx" -#endif - -/* Arch specific VIRQs definition */ -#define VIRQ_ITC VIRQ_ARCH_0 /* V. Virtual itc timer */ -#define VIRQ_MCA_CMC VIRQ_ARCH_1 /* MCA cmc interrupt */ -#define VIRQ_MCA_CPE VIRQ_ARCH_2 /* MCA cpe interrupt */ - -/* Maximum number of virtual CPUs in multi-processor guests. */ -/* keep sizeof(struct shared_page) <= PAGE_SIZE. - * this is checked in arch/ia64/xen/hypervisor.c. */ -#define MAX_VIRT_CPUS 64 - -#ifndef __ASSEMBLY__ - -#define INVALID_MFN (~0UL) - -union vac { - unsigned long value; - struct { - int a_int:1; - int a_from_int_cr:1; - int a_to_int_cr:1; - int a_from_psr:1; - int a_from_cpuid:1; - int a_cover:1; - int a_bsw:1; - long reserved:57; - }; -}; - -union vdc { - unsigned long value; - struct { - int d_vmsw:1; - int d_extint:1; - int d_ibr_dbr:1; - int d_pmc:1; - int d_to_pmd:1; - int d_itm:1; - long reserved:58; - }; -}; - -struct mapped_regs { - union vac vac; - union vdc vdc; - unsigned long virt_env_vaddr; - unsigned long reserved1[29]; - unsigned long vhpi; - unsigned long reserved2[95]; - union { - unsigned long vgr[16]; - unsigned long bank1_regs[16]; /* bank1 regs (r16-r31) - when bank0 active */ - }; - union { - unsigned long vbgr[16]; - unsigned long bank0_regs[16]; /* bank0 regs (r16-r31) - when bank1 active */ - }; - unsigned long vnat; - unsigned long vbnat; - unsigned long vcpuid[5]; - unsigned long reserved3[11]; - unsigned long vpsr; - unsigned long vpr; - unsigned long reserved4[76]; - union { - unsigned long vcr[128]; - struct { - unsigned long dcr; /* CR0 */ - unsigned long itm; - unsigned long iva; - unsigned long rsv1[5]; - unsigned long pta; /* CR8 */ - unsigned long rsv2[7]; - unsigned long ipsr; /* CR16 */ - unsigned long isr; - unsigned long rsv3; - unsigned long iip; - unsigned long ifa; - unsigned long itir; - unsigned long iipa; - unsigned long ifs; - unsigned long iim; /* CR24 */ - unsigned long iha; - unsigned long rsv4[38]; - unsigned long lid; /* CR64 */ - unsigned long ivr; - unsigned long tpr; - unsigned long eoi; - unsigned long irr[4]; - unsigned long itv; /* CR72 */ - unsigned long pmv; - unsigned long cmcv; - unsigned long rsv5[5]; - unsigned long lrr0; /* CR80 */ - unsigned long lrr1; - unsigned long rsv6[46]; - }; - }; - union { - unsigned long reserved5[128]; - struct { - unsigned long precover_ifs; - unsigned long unat; /* not sure if this is needed - until NaT arch is done */ - int interrupt_collection_enabled; /* virtual psr.ic */ - - /* virtual interrupt deliverable flag is - * evtchn_upcall_mask in shared info area now. - * interrupt_mask_addr is the address - * of evtchn_upcall_mask for current vcpu - */ - unsigned char *interrupt_mask_addr; - int pending_interruption; - unsigned char vpsr_pp; - unsigned char vpsr_dfh; - unsigned char hpsr_dfh; - unsigned char hpsr_mfh; - unsigned long reserved5_1[4]; - int metaphysical_mode; /* 1 = use metaphys mapping - 0 = use virtual */ - int banknum; /* 0 or 1, which virtual - register bank is active */ - unsigned long rrs[8]; /* region registers */ - unsigned long krs[8]; /* kernel registers */ - unsigned long tmp[16]; /* temp registers - (e.g. for hyperprivops) */ - - /* itc paravirtualization - * vAR.ITC = mAR.ITC + itc_offset - * itc_last is one which was lastly passed to - * the guest OS in order to prevent it from - * going backwords. - */ - unsigned long itc_offset; - unsigned long itc_last; - }; - }; -}; - -struct arch_vcpu_info { - /* nothing */ -}; - -/* - * This structure is used for magic page in domain pseudo physical address - * space and the result of XENMEM_machine_memory_map. - * As the XENMEM_machine_memory_map result, - * xen_memory_map::nr_entries indicates the size in bytes - * including struct xen_ia64_memmap_info. Not the number of entries. - */ -struct xen_ia64_memmap_info { - uint64_t efi_memmap_size; /* size of EFI memory map */ - uint64_t efi_memdesc_size; /* size of an EFI memory map - * descriptor */ - uint32_t efi_memdesc_version; /* memory descriptor version */ - void *memdesc[0]; /* array of efi_memory_desc_t */ -}; - -struct arch_shared_info { - /* PFN of the start_info page. */ - unsigned long start_info_pfn; - - /* Interrupt vector for event channel. */ - int evtchn_vector; - - /* PFN of memmap_info page */ - unsigned int memmap_info_num_pages; /* currently only = 1 case is - supported. */ - unsigned long memmap_info_pfn; - - uint64_t pad[31]; -}; - -struct xen_callback { - unsigned long ip; -}; -typedef struct xen_callback xen_callback_t; - -#endif /* !__ASSEMBLY__ */ - -#include <asm/pvclock-abi.h> - -/* Size of the shared_info area (this is not related to page size). */ -#define XSI_SHIFT 14 -#define XSI_SIZE (1 << XSI_SHIFT) -/* Log size of mapped_regs area (64 KB - only 4KB is used). */ -#define XMAPPEDREGS_SHIFT 12 -#define XMAPPEDREGS_SIZE (1 << XMAPPEDREGS_SHIFT) -/* Offset of XASI (Xen arch shared info) wrt XSI_BASE. */ -#define XMAPPEDREGS_OFS XSI_SIZE - -/* Hyperprivops. */ -#define HYPERPRIVOP_START 0x1 -#define HYPERPRIVOP_RFI (HYPERPRIVOP_START + 0x0) -#define HYPERPRIVOP_RSM_DT (HYPERPRIVOP_START + 0x1) -#define HYPERPRIVOP_SSM_DT (HYPERPRIVOP_START + 0x2) -#define HYPERPRIVOP_COVER (HYPERPRIVOP_START + 0x3) -#define HYPERPRIVOP_ITC_D (HYPERPRIVOP_START + 0x4) -#define HYPERPRIVOP_ITC_I (HYPERPRIVOP_START + 0x5) -#define HYPERPRIVOP_SSM_I (HYPERPRIVOP_START + 0x6) -#define HYPERPRIVOP_GET_IVR (HYPERPRIVOP_START + 0x7) -#define HYPERPRIVOP_GET_TPR (HYPERPRIVOP_START + 0x8) -#define HYPERPRIVOP_SET_TPR (HYPERPRIVOP_START + 0x9) -#define HYPERPRIVOP_EOI (HYPERPRIVOP_START + 0xa) -#define HYPERPRIVOP_SET_ITM (HYPERPRIVOP_START + 0xb) -#define HYPERPRIVOP_THASH (HYPERPRIVOP_START + 0xc) -#define HYPERPRIVOP_PTC_GA (HYPERPRIVOP_START + 0xd) -#define HYPERPRIVOP_ITR_D (HYPERPRIVOP_START + 0xe) -#define HYPERPRIVOP_GET_RR (HYPERPRIVOP_START + 0xf) -#define HYPERPRIVOP_SET_RR (HYPERPRIVOP_START + 0x10) -#define HYPERPRIVOP_SET_KR (HYPERPRIVOP_START + 0x11) -#define HYPERPRIVOP_FC (HYPERPRIVOP_START + 0x12) -#define HYPERPRIVOP_GET_CPUID (HYPERPRIVOP_START + 0x13) -#define HYPERPRIVOP_GET_PMD (HYPERPRIVOP_START + 0x14) -#define HYPERPRIVOP_GET_EFLAG (HYPERPRIVOP_START + 0x15) -#define HYPERPRIVOP_SET_EFLAG (HYPERPRIVOP_START + 0x16) -#define HYPERPRIVOP_RSM_BE (HYPERPRIVOP_START + 0x17) -#define HYPERPRIVOP_GET_PSR (HYPERPRIVOP_START + 0x18) -#define HYPERPRIVOP_SET_RR0_TO_RR4 (HYPERPRIVOP_START + 0x19) -#define HYPERPRIVOP_MAX (0x1a) - -/* Fast and light hypercalls. */ -#define __HYPERVISOR_ia64_fast_eoi __HYPERVISOR_arch_1 - -/* Xencomm macros. */ -#define XENCOMM_INLINE_MASK 0xf800000000000000UL -#define XENCOMM_INLINE_FLAG 0x8000000000000000UL - -#ifndef __ASSEMBLY__ - -/* - * Optimization features. - * The hypervisor may do some special optimizations for guests. This hypercall - * can be used to switch on/of these special optimizations. - */ -#define __HYPERVISOR_opt_feature 0x700UL - -#define XEN_IA64_OPTF_OFF 0x0 -#define XEN_IA64_OPTF_ON 0x1 - -/* - * If this feature is switched on, the hypervisor inserts the - * tlb entries without calling the guests traphandler. - * This is useful in guests using region 7 for identity mapping - * like the linux kernel does. - */ -#define XEN_IA64_OPTF_IDENT_MAP_REG7 1 - -/* Identity mapping of region 4 addresses in HVM. */ -#define XEN_IA64_OPTF_IDENT_MAP_REG4 2 - -/* Identity mapping of region 5 addresses in HVM. */ -#define XEN_IA64_OPTF_IDENT_MAP_REG5 3 - -#define XEN_IA64_OPTF_IDENT_MAP_NOT_SET (0) - -struct xen_ia64_opt_feature { - unsigned long cmd; /* Which feature */ - unsigned char on; /* Switch feature on/off */ - union { - struct { - /* The page protection bit mask of the pte. - * This will be or'ed with the pte. */ - unsigned long pgprot; - unsigned long key; /* A protection key for itir.*/ - }; - }; -}; - -#endif /* __ASSEMBLY__ */ - -#endif /* _ASM_IA64_XEN_INTERFACE_H */ diff --git a/arch/ia64/include/asm/xen/irq.h b/arch/ia64/include/asm/xen/irq.h deleted file mode 100644 index a904509..0000000 --- a/arch/ia64/include/asm/xen/irq.h +++ /dev/null @@ -1,44 +0,0 @@ -/****************************************************************************** - * arch/ia64/include/asm/xen/irq.h - * - * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp> - * VA Linux Systems Japan K.K. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - * - */ - -#ifndef _ASM_IA64_XEN_IRQ_H -#define _ASM_IA64_XEN_IRQ_H - -/* - * The flat IRQ space is divided into two regions: - * 1. A one-to-one mapping of real physical IRQs. This space is only used - * if we have physical device-access privilege. This region is at the - * start of the IRQ space so that existing device drivers do not need - * to be modified to translate physical IRQ numbers into our IRQ space. - * 3. A dynamic mapping of inter-domain and Xen-sourced virtual IRQs. These - * are bound using the provided bind/unbind functions. - */ - -#define XEN_PIRQ_BASE 0 -#define XEN_NR_PIRQS 256 - -#define XEN_DYNIRQ_BASE (XEN_PIRQ_BASE + XEN_NR_PIRQS) -#define XEN_NR_DYNIRQS (NR_CPUS * 8) - -#define XEN_NR_IRQS (XEN_NR_PIRQS + XEN_NR_DYNIRQS) - -#endif /* _ASM_IA64_XEN_IRQ_H */ diff --git a/arch/ia64/include/asm/xen/minstate.h b/arch/ia64/include/asm/xen/minstate.h deleted file mode 100644 index 00cf03e..0000000 --- a/arch/ia64/include/asm/xen/minstate.h +++ /dev/null @@ -1,143 +0,0 @@ - -#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE -/* read ar.itc in advance, and use it before leaving bank 0 */ -#define XEN_ACCOUNT_GET_STAMP \ - MOV_FROM_ITC(pUStk, p6, r20, r2); -#else -#define XEN_ACCOUNT_GET_STAMP -#endif - -/* - * DO_SAVE_MIN switches to the kernel stacks (if necessary) and saves - * the minimum state necessary that allows us to turn psr.ic back - * on. - * - * Assumed state upon entry: - * psr.ic: off - * r31: contains saved predicates (pr) - * - * Upon exit, the state is as follows: - * psr.ic: off - * r2 = points to &pt_regs.r16 - * r8 = contents of ar.ccv - * r9 = contents of ar.csd - * r10 = contents of ar.ssd - * r11 = FPSR_DEFAULT - * r12 = kernel sp (kernel virtual address) - * r13 = points to current task_struct (kernel virtual address) - * p15 = TRUE if psr.i is set in cr.ipsr - * predicate registers (other than p2, p3, and p15), b6, r3, r14, r15: - * preserved - * CONFIG_XEN note: p6/p7 are not preserved - * - * Note that psr.ic is NOT turned on by this macro. This is so that - * we can pass interruption state as arguments to a handler. - */ -#define XEN_DO_SAVE_MIN(__COVER,SAVE_IFS,EXTRA,WORKAROUND) \ - mov r16=IA64_KR(CURRENT); /* M */ \ - mov r27=ar.rsc; /* M */ \ - mov r20=r1; /* A */ \ - mov r25=ar.unat; /* M */ \ - MOV_FROM_IPSR(p0,r29); /* M */ \ - MOV_FROM_IIP(r28); /* M */ \ - mov r21=ar.fpsr; /* M */ \ - mov r26=ar.pfs; /* I */ \ - __COVER; /* B;; (or nothing) */ \ - adds r16=IA64_TASK_THREAD_ON_USTACK_OFFSET,r16; \ - ;; \ - ld1 r17=[r16]; /* load current->thread.on_ustack flag */ \ - st1 [r16]=r0; /* clear current->thread.on_ustack flag */ \ - adds r1=-IA64_TASK_THREAD_ON_USTACK_OFFSET,r16 \ - /* switch from user to kernel RBS: */ \ - ;; \ - invala; /* M */ \ - /* SAVE_IFS;*/ /* see xen special handling below */ \ - cmp.eq pKStk,pUStk=r0,r17; /* are we in kernel mode already? */ \ - ;; \ -(pUStk) mov ar.rsc=0; /* set enforced lazy mode, pl 0, little-endian, loadrs=0 */ \ - ;; \ -(pUStk) mov.m r24=ar.rnat; \ -(pUStk) addl r22=IA64_RBS_OFFSET,r1; /* compute base of RBS */ \ -(pKStk) mov r1=sp; /* get sp */ \ - ;; \ -(pUStk) lfetch.fault.excl.nt1 [r22]; \ -(pUStk) addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r1; /* compute base of memory stack */ \ -(pUStk) mov r23=ar.bspstore; /* save ar.bspstore */ \ - ;; \ -(pUStk) mov ar.bspstore=r22; /* switch to kernel RBS */ \ -(pKStk) addl r1=-IA64_PT_REGS_SIZE,r1; /* if in kernel mode, use sp (r12) */ \ - ;; \ -(pUStk) mov r18=ar.bsp; \ -(pUStk) mov ar.rsc=0x3; /* set eager mode, pl 0, little-endian, loadrs=0 */ \ - adds r17=2*L1_CACHE_BYTES,r1; /* really: biggest cache-line size */ \ - adds r16=PT(CR_IPSR),r1; \ - ;; \ - lfetch.fault.excl.nt1 [r17],L1_CACHE_BYTES; \ - st8 [r16]=r29; /* save cr.ipsr */ \ - ;; \ - lfetch.fault.excl.nt1 [r17]; \ - tbit.nz p15,p0=r29,IA64_PSR_I_BIT; \ - mov r29=b0 \ - ;; \ - WORKAROUND; \ - adds r16=PT(R8),r1; /* initialize first base pointer */ \ - adds r17=PT(R9),r1; /* initialize second base pointer */ \ -(pKStk) mov r18=r0; /* make sure r18 isn't NaT */ \ - ;; \ -.mem.offset 0,0; st8.spill [r16]=r8,16; \ -.mem.offset 8,0; st8.spill [r17]=r9,16; \ - ;; \ -.mem.offset 0,0; st8.spill [r16]=r10,24; \ - movl r8=XSI_PRECOVER_IFS; \ -.mem.offset 8,0; st8.spill [r17]=r11,24; \ - ;; \ - /* xen special handling for possibly lazy cover */ \ - /* SAVE_MIN case in dispatch_ia32_handler: mov r30=r0 */ \ - ld8 r30=[r8]; \ -(pUStk) sub r18=r18,r22; /* r18=RSE.ndirty*8 */ \ - st8 [r16]=r28,16; /* save cr.iip */ \ - ;; \ - st8 [r17]=r30,16; /* save cr.ifs */ \ - mov r8=ar.ccv; \ - mov r9=ar.csd; \ - mov r10=ar.ssd; \ - movl r11=FPSR_DEFAULT; /* L-unit */ \ - ;; \ - st8 [r16]=r25,16; /* save ar.unat */ \ - st8 [r17]=r26,16; /* save ar.pfs */ \ - shl r18=r18,16; /* compute ar.rsc to be used for "loadrs" */ \ - ;; \ - st8 [r16]=r27,16; /* save ar.rsc */ \ -(pUStk) st8 [r17]=r24,16; /* save ar.rnat */ \ -(pKStk) adds r17=16,r17; /* skip over ar_rnat field */ \ - ;; /* avoid RAW on r16 & r17 */ \ -(pUStk) st8 [r16]=r23,16; /* save ar.bspstore */ \ - st8 [r17]=r31,16; /* save predicates */ \ -(pKStk) adds r16=16,r16; /* skip over ar_bspstore field */ \ - ;; \ - st8 [r16]=r29,16; /* save b0 */ \ - st8 [r17]=r18,16; /* save ar.rsc value for "loadrs" */ \ - cmp.eq pNonSys,pSys=r0,r0 /* initialize pSys=0, pNonSys=1 */ \ - ;; \ -.mem.offset 0,0; st8.spill [r16]=r20,16; /* save original r1 */ \ -.mem.offset 8,0; st8.spill [r17]=r12,16; \ - adds r12=-16,r1; /* switch to kernel memory stack (with 16 bytes of scratch) */ \ - ;; \ -.mem.offset 0,0; st8.spill [r16]=r13,16; \ -.mem.offset 8,0; st8.spill [r17]=r21,16; /* save ar.fpsr */ \ - mov r13=IA64_KR(CURRENT); /* establish `current' */ \ - ;; \ -.mem.offset 0,0; st8.spill [r16]=r15,16; \ -.mem.offset 8,0; st8.spill [r17]=r14,16; \ - ;; \ -.mem.offset 0,0; st8.spill [r16]=r2,16; \ -.mem.offset 8,0; st8.spill [r17]=r3,16; \ - XEN_ACCOUNT_GET_STAMP \ - adds r2=IA64_PT_REGS_R16_OFFSET,r1; \ - ;; \ - EXTRA; \ - movl r1=__gp; /* establish kernel global pointer */ \ - ;; \ - ACCOUNT_SYS_ENTER \ - BSW_1(r3,r14); /* switch back to bank 1 (must be last in insn group) */ \ - ;; diff --git a/arch/ia64/include/asm/xen/page-coherent.h b/arch/ia64/include/asm/xen/page-coherent.h deleted file mode 100644 index 96e42f9..0000000 --- a/arch/ia64/include/asm/xen/page-coherent.h +++ /dev/null @@ -1,38 +0,0 @@ -#ifndef _ASM_IA64_XEN_PAGE_COHERENT_H -#define _ASM_IA64_XEN_PAGE_COHERENT_H - -#include <asm/page.h> -#include <linux/dma-attrs.h> -#include <linux/dma-mapping.h> - -static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size, - dma_addr_t *dma_handle, gfp_t flags, - struct dma_attrs *attrs) -{ - void *vstart = (void*)__get_free_pages(flags, get_order(size)); - *dma_handle = virt_to_phys(vstart); - return vstart; -} - -static inline void xen_free_coherent_pages(struct device *hwdev, size_t size, - void *cpu_addr, dma_addr_t dma_handle, - struct dma_attrs *attrs) -{ - free_pages((unsigned long) cpu_addr, get_order(size)); -} - -static inline void xen_dma_map_page(struct device *hwdev, struct page *page, - unsigned long offset, size_t size, enum dma_data_direction dir, - struct dma_attrs *attrs) { } - -static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, - size_t size, enum dma_data_direction dir, - struct dma_attrs *attrs) { } - -static inline void xen_dma_sync_single_for_cpu(struct device *hwdev, - dma_addr_t handle, size_t size, enum dma_data_direction dir) { } - -static inline void xen_dma_sync_single_for_device(struct device *hwdev, - dma_addr_t handle, size_t size, enum dma_data_direction dir) { } - -#endif /* _ASM_IA64_XEN_PAGE_COHERENT_H */ diff --git a/arch/ia64/include/asm/xen/page.h b/arch/ia64/include/asm/xen/page.h deleted file mode 100644 index 03441a78..0000000 --- a/arch/ia64/include/asm/xen/page.h +++ /dev/null @@ -1,65 +0,0 @@ -/****************************************************************************** - * arch/ia64/include/asm/xen/page.h - * - * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp> - * VA Linux Systems Japan K.K. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - * - */ - -#ifndef _ASM_IA64_XEN_PAGE_H -#define _ASM_IA64_XEN_PAGE_H - -#define INVALID_P2M_ENTRY (~0UL) - -static inline unsigned long mfn_to_pfn(unsigned long mfn) -{ - return mfn; -} - -static inline unsigned long pfn_to_mfn(unsigned long pfn) -{ - return pfn; -} - -#define phys_to_machine_mapping_valid(_x) (1) - -static inline void *mfn_to_virt(unsigned long mfn) -{ - return __va(mfn << PAGE_SHIFT); -} - -static inline unsigned long virt_to_mfn(void *virt) -{ - return __pa(virt) >> PAGE_SHIFT; -} - -/* for tpmfront.c */ -static inline unsigned long virt_to_machine(void *virt) -{ - return __pa(virt); -} - -static inline void set_phys_to_machine(unsigned long pfn, unsigned long mfn) -{ - /* nothing */ -} - -#define pte_mfn(_x) pte_pfn(_x) -#define mfn_pte(_x, _y) __pte_ma(0) /* unmodified use */ -#define __pte_ma(_x) ((pte_t) {(_x)}) /* unmodified use */ - -#endif /* _ASM_IA64_XEN_PAGE_H */ diff --git a/arch/ia64/include/asm/xen/patchlist.h b/arch/ia64/include/asm/xen/patchlist.h deleted file mode 100644 index eae944e..0000000 --- a/arch/ia64/include/asm/xen/patchlist.h +++ /dev/null @@ -1,38 +0,0 @@ -/****************************************************************************** - * arch/ia64/include/asm/xen/patchlist.h - * - * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp> - * VA Linux Systems Japan K.K. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - * - */ - -#define __paravirt_start_gate_fsyscall_patchlist \ - __xen_start_gate_fsyscall_patchlist -#define __paravirt_end_gate_fsyscall_patchlist \ - __xen_end_gate_fsyscall_patchlist -#define __paravirt_start_gate_brl_fsys_bubble_down_patchlist \ - __xen_start_gate_brl_fsys_bubble_down_patchlist -#define __paravirt_end_gate_brl_fsys_bubble_down_patchlist \ - __xen_end_gate_brl_fsys_bubble_down_patchlist -#define __paravirt_start_gate_vtop_patchlist \ - __xen_start_gate_vtop_patchlist -#define __paravirt_end_gate_vtop_patchlist \ - __xen_end_gate_vtop_patchlist -#define __paravirt_start_gate_mckinley_e9_patchlist \ - __xen_start_gate_mckinley_e9_patchlist -#define __paravirt_end_gate_mckinley_e9_patchlist \ - __xen_end_gate_mckinley_e9_patchlist diff --git a/arch/ia64/include/asm/xen/privop.h b/arch/ia64/include/asm/xen/privop.h deleted file mode 100644 index fb4ec5e..0000000 --- a/arch/ia64/include/asm/xen/privop.h +++ /dev/null @@ -1,135 +0,0 @@ -#ifndef _ASM_IA64_XEN_PRIVOP_H -#define _ASM_IA64_XEN_PRIVOP_H - -/* - * Copyright (C) 2005 Hewlett-Packard Co - * Dan Magenheimer <dan.magenheimer@hp.com> - * - * Paravirtualizations of privileged operations for Xen/ia64 - * - * - * inline privop and paravirt_alt support - * Copyright (c) 2007 Isaku Yamahata <yamahata at valinux co jp> - * VA Linux Systems Japan K.K. - * - */ - -#ifndef __ASSEMBLY__ -#include <linux/types.h> /* arch-ia64.h requires uint64_t */ -#endif -#include <asm/xen/interface.h> - -/* At 1 MB, before per-cpu space but still addressable using addl instead - of movl. */ -#define XSI_BASE 0xfffffffffff00000 - -/* Address of mapped regs. */ -#define XMAPPEDREGS_BASE (XSI_BASE + XSI_SIZE) - -#ifdef __ASSEMBLY__ -#define XEN_HYPER_RFI break HYPERPRIVOP_RFI -#define XEN_HYPER_RSM_PSR_DT break HYPERPRIVOP_RSM_DT -#define XEN_HYPER_SSM_PSR_DT break HYPERPRIVOP_SSM_DT -#define XEN_HYPER_COVER break HYPERPRIVOP_COVER -#define XEN_HYPER_ITC_D break HYPERPRIVOP_ITC_D -#define XEN_HYPER_ITC_I break HYPERPRIVOP_ITC_I -#define XEN_HYPER_SSM_I break HYPERPRIVOP_SSM_I -#define XEN_HYPER_GET_IVR break HYPERPRIVOP_GET_IVR -#define XEN_HYPER_THASH break HYPERPRIVOP_THASH -#define XEN_HYPER_ITR_D break HYPERPRIVOP_ITR_D -#define XEN_HYPER_SET_KR break HYPERPRIVOP_SET_KR -#define XEN_HYPER_GET_PSR break HYPERPRIVOP_GET_PSR -#define XEN_HYPER_SET_RR0_TO_RR4 break HYPERPRIVOP_SET_RR0_TO_RR4 - -#define XSI_IFS (XSI_BASE + XSI_IFS_OFS) -#define XSI_PRECOVER_IFS (XSI_BASE + XSI_PRECOVER_IFS_OFS) -#define XSI_IFA (XSI_BASE + XSI_IFA_OFS) -#define XSI_ISR (XSI_BASE + XSI_ISR_OFS) -#define XSI_IIM (XSI_BASE + XSI_IIM_OFS) -#define XSI_ITIR (XSI_BASE + XSI_ITIR_OFS) -#define XSI_PSR_I_ADDR (XSI_BASE + XSI_PSR_I_ADDR_OFS) -#define XSI_PSR_IC (XSI_BASE + XSI_PSR_IC_OFS) -#define XSI_IPSR (XSI_BASE + XSI_IPSR_OFS) -#define XSI_IIP (XSI_BASE + XSI_IIP_OFS) -#define XSI_B1NAT (XSI_BASE + XSI_B1NATS_OFS) -#define XSI_BANK1_R16 (XSI_BASE + XSI_BANK1_R16_OFS) -#define XSI_BANKNUM (XSI_BASE + XSI_BANKNUM_OFS) -#define XSI_IHA (XSI_BASE + XSI_IHA_OFS) -#define XSI_ITC_OFFSET (XSI_BASE + XSI_ITC_OFFSET_OFS) -#define XSI_ITC_LAST (XSI_BASE + XSI_ITC_LAST_OFS) -#endif - -#ifndef __ASSEMBLY__ - -/************************************************/ -/* Instructions paravirtualized for correctness */ -/************************************************/ - -/* "fc" and "thash" are privilege-sensitive instructions, meaning they - * may have different semantics depending on whether they are executed - * at PL0 vs PL!=0. When paravirtualized, these instructions mustn't - * be allowed to execute directly, lest incorrect semantics result. */ -extern void xen_fc(void *addr); -extern unsigned long xen_thash(unsigned long addr); - -/* Note that "ttag" and "cover" are also privilege-sensitive; "ttag" - * is not currently used (though it may be in a long-format VHPT system!) - * and the semantics of cover only change if psr.ic is off which is very - * rare (and currently non-existent outside of assembly code */ - -/* There are also privilege-sensitive registers. These registers are - * readable at any privilege level but only writable at PL0. */ -extern unsigned long xen_get_cpuid(int index); -extern unsigned long xen_get_pmd(int index); - -#ifndef ASM_SUPPORTED -extern unsigned long xen_get_eflag(void); /* see xen_ia64_getreg */ -extern void xen_set_eflag(unsigned long); /* see xen_ia64_setreg */ -#endif - -/************************************************/ -/* Instructions paravirtualized for performance */ -/************************************************/ - -/* Xen uses memory-mapped virtual privileged registers for access to many - * performance-sensitive privileged registers. Some, like the processor - * status register (psr), are broken up into multiple memory locations. - * Others, like "pend", are abstractions based on privileged registers. - * "Pend" is guaranteed to be set if reading cr.ivr would return a - * (non-spurious) interrupt. */ -#define XEN_MAPPEDREGS ((struct mapped_regs *)XMAPPEDREGS_BASE) - -#define XSI_PSR_I \ - (*XEN_MAPPEDREGS->interrupt_mask_addr) -#define xen_get_virtual_psr_i() \ - (!XSI_PSR_I) -#define xen_set_virtual_psr_i(_val) \ - ({ XSI_PSR_I = (uint8_t)(_val) ? 0 : 1; }) -#define xen_set_virtual_psr_ic(_val) \ - ({ XEN_MAPPEDREGS->interrupt_collection_enabled = _val ? 1 : 0; }) -#define xen_get_virtual_pend() \ - (*(((uint8_t *)XEN_MAPPEDREGS->interrupt_mask_addr) - 1)) - -#ifndef ASM_SUPPORTED -/* Although all privileged operations can be left to trap and will - * be properly handled by Xen, some are frequent enough that we use - * hyperprivops for performance. */ -extern unsigned long xen_get_psr(void); -extern unsigned long xen_get_ivr(void); -extern unsigned long xen_get_tpr(void); -extern void xen_hyper_ssm_i(void); -extern void xen_set_itm(unsigned long); -extern void xen_set_tpr(unsigned long); -extern void xen_eoi(unsigned long); -extern unsigned long xen_get_rr(unsigned long index); -extern void xen_set_rr(unsigned long index, unsigned long val); -extern void xen_set_rr0_to_rr4(unsigned long val0, unsigned long val1, - unsigned long val2, unsigned long val3, - unsigned long val4); -extern void xen_set_kr(unsigned long index, unsigned long val); -extern void xen_ptcga(unsigned long addr, unsigned long size); -#endif /* !ASM_SUPPORTED */ - -#endif /* !__ASSEMBLY__ */ - -#endif /* _ASM_IA64_XEN_PRIVOP_H */ diff --git a/arch/ia64/include/asm/xen/xcom_hcall.h b/arch/ia64/include/asm/xen/xcom_hcall.h deleted file mode 100644 index 20b2950..0000000 --- a/arch/ia64/include/asm/xen/xcom_hcall.h +++ /dev/null @@ -1,51 +0,0 @@ -/* - * Copyright (C) 2006 Tristan Gingold <tristan.gingold@bull.net>, Bull SAS - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - -#ifndef _ASM_IA64_XEN_XCOM_HCALL_H -#define _ASM_IA64_XEN_XCOM_HCALL_H - -/* These function creates inline or mini descriptor for the parameters and - calls the corresponding xencomm_arch_hypercall_X. - Architectures should defines HYPERVISOR_xxx as xencomm_hypercall_xxx unless - they want to use their own wrapper. */ -extern int xencomm_hypercall_console_io(int cmd, int count, char *str); - -extern int xencomm_hypercall_event_channel_op(int cmd, void *op); - -extern int xencomm_hypercall_xen_version(int cmd, void *arg); - -extern int xencomm_hypercall_physdev_op(int cmd, void *op); - -extern int xencomm_hypercall_grant_table_op(unsigned int cmd, void *op, - unsigned int count); - -extern int xencomm_hypercall_sched_op(int cmd, void *arg); - -extern int xencomm_hypercall_multicall(void *call_list, int nr_calls); - -extern int xencomm_hypercall_callback_op(int cmd, void *arg); - -extern int xencomm_hypercall_memory_op(unsigned int cmd, void *arg); - -extern int xencomm_hypercall_suspend(unsigned long srec); - -extern long xencomm_hypercall_vcpu_op(int cmd, int cpu, void *arg); - -extern long xencomm_hypercall_opt_feature(void *arg); - -#endif /* _ASM_IA64_XEN_XCOM_HCALL_H */ diff --git a/arch/ia64/include/asm/xen/xencomm.h b/arch/ia64/include/asm/xen/xencomm.h deleted file mode 100644 index cded677..0000000 --- a/arch/ia64/include/asm/xen/xencomm.h +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Copyright (C) 2006 Hollis Blanchard <hollisb@us.ibm.com>, IBM Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - -#ifndef _ASM_IA64_XEN_XENCOMM_H -#define _ASM_IA64_XEN_XENCOMM_H - -#include <xen/xencomm.h> -#include <asm/pgtable.h> - -/* Must be called before any hypercall. */ -extern void xencomm_initialize(void); -extern int xencomm_is_initialized(void); - -/* Check if virtual contiguity means physical contiguity - * where the passed address is a pointer value in virtual address. - * On ia64, identity mapping area in region 7 or the piece of region 5 - * that is mapped by itr[IA64_TR_KERNEL]/dtr[IA64_TR_KERNEL] - */ -static inline int xencomm_is_phys_contiguous(unsigned long addr) -{ - return (PAGE_OFFSET <= addr && - addr < (PAGE_OFFSET + (1UL << IA64_MAX_PHYS_BITS))) || - (KERNEL_START <= addr && - addr < KERNEL_START + KERNEL_TR_PAGE_SIZE); -} - -#endif /* _ASM_IA64_XEN_XENCOMM_H */ diff --git a/arch/ia64/include/uapi/asm/break.h b/arch/ia64/include/uapi/asm/break.h index e90c40e..f034020 100644 --- a/arch/ia64/include/uapi/asm/break.h +++ b/arch/ia64/include/uapi/asm/break.h @@ -20,13 +20,4 @@ */ #define __IA64_BREAK_SYSCALL 0x100000 -/* - * Xen specific break numbers: - */ -#define __IA64_XEN_HYPERCALL 0x1000 -/* [__IA64_XEN_HYPERPRIVOP_START, __IA64_XEN_HYPERPRIVOP_MAX] is used - for xen hyperprivops */ -#define __IA64_XEN_HYPERPRIVOP_START 0x1 -#define __IA64_XEN_HYPERPRIVOP_MAX 0x1a - #endif /* _ASM_IA64_BREAK_H */ diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c index 59d52e3..bfa1931 100644 --- a/arch/ia64/kernel/acpi.c +++ b/arch/ia64/kernel/acpi.c @@ -53,7 +53,6 @@ #include <asm/numa.h> #include <asm/sal.h> #include <asm/cyclone.h> -#include <asm/xen/hypervisor.h> #define BAD_MADT_ENTRY(entry, end) ( \ (!entry) || (unsigned long)entry + sizeof(*entry) > end || \ @@ -120,8 +119,6 @@ acpi_get_sysname(void) return "uv"; else return "sn2"; - } else if (xen_pv_domain() && !strcmp(hdr->oem_id, "XEN")) { - return "xen"; } #ifdef CONFIG_INTEL_IOMMU diff --git a/arch/ia64/kernel/asm-offsets.c b/arch/ia64/kernel/asm-offsets.c index 46c9e30..60ef83e 100644 --- a/arch/ia64/kernel/asm-offsets.c +++ b/arch/ia64/kernel/asm-offsets.c @@ -16,9 +16,6 @@ #include <asm/sigcontext.h> #include <asm/mca.h> -#include <asm/xen/interface.h> -#include <asm/xen/hypervisor.h> - #include "../kernel/sigframe.h" #include "../kernel/fsyscall_gtod_data.h" @@ -290,33 +287,4 @@ void foo(void) DEFINE(IA64_ITC_LASTCYCLE_OFFSET, offsetof (struct itc_jitter_data_t, itc_lastcycle)); -#ifdef CONFIG_XEN - BLANK(); - - DEFINE(XEN_NATIVE_ASM, XEN_NATIVE); - DEFINE(XEN_PV_DOMAIN_ASM, XEN_PV_DOMAIN); - -#define DEFINE_MAPPED_REG_OFS(sym, field) \ - DEFINE(sym, (XMAPPEDREGS_OFS + offsetof(struct mapped_regs, field))) - - DEFINE_MAPPED_REG_OFS(XSI_PSR_I_ADDR_OFS, interrupt_mask_addr); - DEFINE_MAPPED_REG_OFS(XSI_IPSR_OFS, ipsr); - DEFINE_MAPPED_REG_OFS(XSI_IIP_OFS, iip); - DEFINE_MAPPED_REG_OFS(XSI_IFS_OFS, ifs); - DEFINE_MAPPED_REG_OFS(XSI_PRECOVER_IFS_OFS, precover_ifs); - DEFINE_MAPPED_REG_OFS(XSI_ISR_OFS, isr); - DEFINE_MAPPED_REG_OFS(XSI_IFA_OFS, ifa); - DEFINE_MAPPED_REG_OFS(XSI_IIPA_OFS, iipa); - DEFINE_MAPPED_REG_OFS(XSI_IIM_OFS, iim); - DEFINE_MAPPED_REG_OFS(XSI_IHA_OFS, iha); - DEFINE_MAPPED_REG_OFS(XSI_ITIR_OFS, itir); - DEFINE_MAPPED_REG_OFS(XSI_PSR_IC_OFS, interrupt_collection_enabled); - DEFINE_MAPPED_REG_OFS(XSI_BANKNUM_OFS, banknum); - DEFINE_MAPPED_REG_OFS(XSI_BANK0_R16_OFS, bank0_regs[0]); - DEFINE_MAPPED_REG_OFS(XSI_BANK1_R16_OFS, bank1_regs[0]); - DEFINE_MAPPED_REG_OFS(XSI_B0NATS_OFS, vbnat); - DEFINE_MAPPED_REG_OFS(XSI_B1NATS_OFS, vnat); - DEFINE_MAPPED_REG_OFS(XSI_ITC_OFFSET_OFS, itc_offset); - DEFINE_MAPPED_REG_OFS(XSI_ITC_LAST_OFS, itc_last); -#endif /* CONFIG_XEN */ } diff --git a/arch/ia64/kernel/head.S b/arch/ia64/kernel/head.S index 991ca33..e6f80fc 100644 --- a/arch/ia64/kernel/head.S +++ b/arch/ia64/kernel/head.S @@ -416,8 +416,6 @@ start_ap: default_setup_hook = 0 // Currently nothing needs to be done. - .weak xen_setup_hook - .global hypervisor_type hypervisor_type: data8 PARAVIRT_HYPERVISOR_TYPE_DEFAULT @@ -426,7 +424,6 @@ hypervisor_type: hypervisor_setup_hooks: data8 default_setup_hook - data8 xen_setup_hook num_hypervisor_hooks = (. - hypervisor_setup_hooks) / 8 .previous diff --git a/arch/ia64/kernel/nr-irqs.c b/arch/ia64/kernel/nr-irqs.c index ee56457..f6769cd 100644 --- a/arch/ia64/kernel/nr-irqs.c +++ b/arch/ia64/kernel/nr-irqs.c @@ -10,15 +10,11 @@ #include <linux/kbuild.h> #include <linux/threads.h> #include <asm/native/irq.h> -#include <asm/xen/irq.h> void foo(void) { union paravirt_nr_irqs_max { char ia64_native_nr_irqs[IA64_NATIVE_NR_IRQS]; -#ifdef CONFIG_XEN - char xen_nr_irqs[XEN_NR_IRQS]; -#endif }; DEFINE(NR_IRQS, sizeof (union paravirt_nr_irqs_max)); diff --git a/arch/ia64/kernel/paravirt_inst.h b/arch/ia64/kernel/paravirt_inst.h index 64d6d81..1ad7512 100644 --- a/arch/ia64/kernel/paravirt_inst.h +++ b/arch/ia64/kernel/paravirt_inst.h @@ -22,9 +22,6 @@ #ifdef __IA64_ASM_PARAVIRTUALIZED_PVCHECK #include <asm/native/pvchk_inst.h> -#elif defined(__IA64_ASM_PARAVIRTUALIZED_XEN) -#include <asm/xen/inst.h> -#include <asm/xen/minstate.h> #else #include <asm/native/inst.h> #endif diff --git a/arch/ia64/kernel/paravirt_patchlist.h b/arch/ia64/kernel/paravirt_patchlist.h index 0684aa6..67cffc36 100644 --- a/arch/ia64/kernel/paravirt_patchlist.h +++ b/arch/ia64/kernel/paravirt_patchlist.h @@ -20,9 +20,5 @@ * */ -#if defined(__IA64_GATE_PARAVIRTUALIZED_XEN) -#include <asm/xen/patchlist.h> -#else #include <asm/native/patchlist.h> -#endif diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S index 0ccb28f..84f8a52 100644 --- a/arch/ia64/kernel/vmlinux.lds.S +++ b/arch/ia64/kernel/vmlinux.lds.S @@ -182,12 +182,6 @@ SECTIONS { __start_gate_section = .; *(.data..gate) __stop_gate_section = .; -#ifdef CONFIG_XEN - . = ALIGN(PAGE_SIZE); - __xen_start_gate_section = .; - *(.data..gate.xen) - __xen_stop_gate_section = .; -#endif } /* * make sure the gate page doesn't expose diff --git a/arch/ia64/xen/Kconfig b/arch/ia64/xen/Kconfig deleted file mode 100644 index 5d8a06b..0000000 --- a/arch/ia64/xen/Kconfig +++ /dev/null @@ -1,25 +0,0 @@ -# -# This Kconfig describes xen/ia64 options -# - -config XEN - bool "Xen hypervisor support" - default y - depends on PARAVIRT && MCKINLEY && IA64_PAGE_SIZE_16KB - select XEN_XENCOMM - select NO_IDLE_HZ - # followings are required to save/restore. - select ARCH_SUSPEND_POSSIBLE - select SUSPEND - select PM_SLEEP - help - Enable Xen hypervisor support. Resulting kernel runs - both as a guest OS on Xen and natively on hardware. - -config XEN_XENCOMM - depends on XEN - bool - -config NO_IDLE_HZ - depends on XEN - bool diff --git a/arch/ia64/xen/Makefile b/arch/ia64/xen/Makefile deleted file mode 100644 index e6f4a0a..0000000 --- a/arch/ia64/xen/Makefile +++ /dev/null @@ -1,37 +0,0 @@ -# -# Makefile for Xen components -# - -obj-y := hypercall.o xenivt.o xensetup.o xen_pv_ops.o irq_xen.o \ - hypervisor.o xencomm.o xcom_hcall.o grant-table.o time.o suspend.o \ - gate-data.o - -obj-$(CONFIG_IA64_GENERIC) += machvec.o - -# The gate DSO image is built using a special linker script. -include $(srctree)/arch/ia64/kernel/Makefile.gate - -# tell compiled for xen -CPPFLAGS_gate.lds += -D__IA64_GATE_PARAVIRTUALIZED_XEN -AFLAGS_gate.o += -D__IA64_ASM_PARAVIRTUALIZED_XEN -D__IA64_GATE_PARAVIRTUALIZED_XEN - -# use same file of native. -$(obj)/gate.o: $(src)/../kernel/gate.S FORCE - $(call if_changed_dep,as_o_S) -$(obj)/gate.lds: $(src)/../kernel/gate.lds.S FORCE - $(call if_changed_dep,cpp_lds_S) - - -AFLAGS_xenivt.o += -D__IA64_ASM_PARAVIRTUALIZED_XEN - -# xen multi compile -ASM_PARAVIRT_MULTI_COMPILE_SRCS = ivt.S entry.S fsys.S -ASM_PARAVIRT_OBJS = $(addprefix xen-,$(ASM_PARAVIRT_MULTI_COMPILE_SRCS:.S=.o)) -obj-y += $(ASM_PARAVIRT_OBJS) -define paravirtualized_xen -AFLAGS_$(1) += -D__IA64_ASM_PARAVIRTUALIZED_XEN -endef -$(foreach o,$(ASM_PARAVIRT_OBJS),$(eval $(call paravirtualized_xen,$(o)))) - -$(obj)/xen-%.o: $(src)/../kernel/%.S FORCE - $(call if_changed_dep,as_o_S) diff --git a/arch/ia64/xen/gate-data.S b/arch/ia64/xen/gate-data.S deleted file mode 100644 index 6f95b6b..0000000 --- a/arch/ia64/xen/gate-data.S +++ /dev/null @@ -1,3 +0,0 @@ - .section .data..gate.xen, "aw" - - .incbin "arch/ia64/xen/gate.so" diff --git a/arch/ia64/xen/grant-table.c b/arch/ia64/xen/grant-table.c deleted file mode 100644 index c182813..0000000 --- a/arch/ia64/xen/grant-table.c +++ /dev/null @@ -1,94 +0,0 @@ -/****************************************************************************** - * arch/ia64/xen/grant-table.c - * - * Copyright (c) 2006 Isaku Yamahata <yamahata at valinux co jp> - * VA Linux Systems Japan K.K. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - * - */ - -#include <linux/module.h> -#include <linux/vmalloc.h> -#include <linux/slab.h> -#include <linux/mm.h> - -#include <xen/interface/xen.h> -#include <xen/interface/memory.h> -#include <xen/grant_table.h> - -#include <asm/xen/hypervisor.h> - -/**************************************************************************** - * grant table hack - * cmd: GNTTABOP_xxx - */ - -int arch_gnttab_map_shared(unsigned long *frames, unsigned long nr_gframes, - unsigned long max_nr_gframes, - struct grant_entry **__shared) -{ - *__shared = __va(frames[0] << PAGE_SHIFT); - return 0; -} - -void arch_gnttab_unmap_shared(struct grant_entry *shared, - unsigned long nr_gframes) -{ - /* nothing */ -} - -static void -gnttab_map_grant_ref_pre(struct gnttab_map_grant_ref *uop) -{ - uint32_t flags; - - flags = uop->flags; - - if (flags & GNTMAP_host_map) { - if (flags & GNTMAP_application_map) { - printk(KERN_DEBUG - "GNTMAP_application_map is not supported yet: " - "flags 0x%x\n", flags); - BUG(); - } - if (flags & GNTMAP_contains_pte) { - printk(KERN_DEBUG - "GNTMAP_contains_pte is not supported yet: " - "flags 0x%x\n", flags); - BUG(); - } - } else if (flags & GNTMAP_device_map) { - printk("GNTMAP_device_map is not supported yet 0x%x\n", flags); - BUG(); /* not yet. actually this flag is not used. */ - } else { - BUG(); - } -} - -int -HYPERVISOR_grant_table_op(unsigned int cmd, void *uop, unsigned int count) -{ - if (cmd == GNTTABOP_map_grant_ref) { - unsigned int i; - for (i = 0; i < count; i++) { - gnttab_map_grant_ref_pre( - (struct gnttab_map_grant_ref *)uop + i); - } - } - return xencomm_hypercall_grant_table_op(cmd, uop, count); -} - -EXPORT_SYMBOL(HYPERVISOR_grant_table_op); diff --git a/arch/ia64/xen/hypercall.S b/arch/ia64/xen/hypercall.S deleted file mode 100644 index 08847aa..0000000 --- a/arch/ia64/xen/hypercall.S +++ /dev/null @@ -1,88 +0,0 @@ -/* - * Support routines for Xen hypercalls - * - * Copyright (C) 2005 Dan Magenheimer <dan.magenheimer@hp.com> - * Copyright (C) 2008 Yaozu (Eddie) Dong <eddie.dong@intel.com> - */ - -#include <asm/asmmacro.h> -#include <asm/intrinsics.h> -#include <asm/xen/privop.h> - -#ifdef __INTEL_COMPILER -/* - * Hypercalls without parameter. - */ -#define __HCALL0(name,hcall) \ - GLOBAL_ENTRY(name); \ - break hcall; \ - br.ret.sptk.many rp; \ - END(name) - -/* - * Hypercalls with 1 parameter. - */ -#define __HCALL1(name,hcall) \ - GLOBAL_ENTRY(name); \ - mov r8=r32; \ - break hcall; \ - br.ret.sptk.many rp; \ - END(name) - -/* - * Hypercalls with 2 parameters. - */ -#define __HCALL2(name,hcall) \ - GLOBAL_ENTRY(name); \ - mov r8=r32; \ - mov r9=r33; \ - break hcall; \ - br.ret.sptk.many rp; \ - END(name) - -__HCALL0(xen_get_psr, HYPERPRIVOP_GET_PSR) -__HCALL0(xen_get_ivr, HYPERPRIVOP_GET_IVR) -__HCALL0(xen_get_tpr, HYPERPRIVOP_GET_TPR) -__HCALL0(xen_hyper_ssm_i, HYPERPRIVOP_SSM_I) - -__HCALL1(xen_set_tpr, HYPERPRIVOP_SET_TPR) -__HCALL1(xen_eoi, HYPERPRIVOP_EOI) -__HCALL1(xen_thash, HYPERPRIVOP_THASH) -__HCALL1(xen_set_itm, HYPERPRIVOP_SET_ITM) -__HCALL1(xen_get_rr, HYPERPRIVOP_GET_RR) -__HCALL1(xen_fc, HYPERPRIVOP_FC) -__HCALL1(xen_get_cpuid, HYPERPRIVOP_GET_CPUID) -__HCALL1(xen_get_pmd, HYPERPRIVOP_GET_PMD) - -__HCALL2(xen_ptcga, HYPERPRIVOP_PTC_GA) -__HCALL2(xen_set_rr, HYPERPRIVOP_SET_RR) -__HCALL2(xen_set_kr, HYPERPRIVOP_SET_KR) - -GLOBAL_ENTRY(xen_set_rr0_to_rr4) - mov r8=r32 - mov r9=r33 - mov r10=r34 - mov r11=r35 - mov r14=r36 - XEN_HYPER_SET_RR0_TO_RR4 - br.ret.sptk.many rp - ;; -END(xen_set_rr0_to_rr4) -#endif - -GLOBAL_ENTRY(xen_send_ipi) - mov r14=r32 - mov r15=r33 - mov r2=0x400 - break 0x1000 - ;; - br.ret.sptk.many rp - ;; -END(xen_send_ipi) - -GLOBAL_ENTRY(__hypercall) - mov r2=r37 - break 0x1000 - br.ret.sptk.many b0 - ;; -END(__hypercall) diff --git a/arch/ia64/xen/hypervisor.c b/arch/ia64/xen/hypervisor.c deleted file mode 100644 index fab6252..0000000 --- a/arch/ia64/xen/hypervisor.c +++ /dev/null @@ -1,97 +0,0 @@ -/****************************************************************************** - * arch/ia64/xen/hypervisor.c - * - * Copyright (c) 2006 Isaku Yamahata <yamahata at valinux co jp> - * VA Linux Systems Japan K.K. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - * - */ - -#include <linux/efi.h> -#include <linux/export.h> -#include <asm/xen/hypervisor.h> -#include <asm/xen/privop.h> - -#include "irq_xen.h" - -struct shared_info *HYPERVISOR_shared_info __read_mostly = - (struct shared_info *)XSI_BASE; -EXPORT_SYMBOL(HYPERVISOR_shared_info); - -DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu); - -struct start_info *xen_start_info; -EXPORT_SYMBOL(xen_start_info); - -EXPORT_SYMBOL(xen_domain_type); - -EXPORT_SYMBOL(__hypercall); - -/* Stolen from arch/x86/xen/enlighten.c */ -/* - * Flag to determine whether vcpu info placement is available on all - * VCPUs. We assume it is to start with, and then set it to zero on - * the first failure. This is because it can succeed on some VCPUs - * and not others, since it can involve hypervisor memory allocation, - * or because the guest failed to guarantee all the appropriate - * constraints on all VCPUs (ie buffer can't cross a page boundary). - * - * Note that any particular CPU may be using a placed vcpu structure, - * but we can only optimise if the all are. - * - * 0: not available, 1: available - */ - -static void __init xen_vcpu_setup(int cpu) -{ - /* - * WARNING: - * before changing MAX_VIRT_CPUS, - * check that shared_info fits on a page - */ - BUILD_BUG_ON(sizeof(struct shared_info) > PAGE_SIZE); - per_cpu(xen_vcpu, cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu]; -} - -void __init xen_setup_vcpu_info_placement(void) -{ - int cpu; - - for_each_possible_cpu(cpu) - xen_vcpu_setup(cpu); -} - -void -xen_cpu_init(void) -{ - xen_smp_intr_init(); -} - -/************************************************************************** - * opt feature - */ -void -xen_ia64_enable_opt_feature(void) -{ - /* Enable region 7 identity map optimizations in Xen */ - struct xen_ia64_opt_feature optf; - - optf.cmd = XEN_IA64_OPTF_IDENT_MAP_REG7; - optf.on = XEN_IA64_OPTF_ON; - optf.pgprot = pgprot_val(PAGE_KERNEL); - optf.key = 0; /* No key on linux. */ - HYPERVISOR_opt_feature(&optf); -} diff --git a/arch/ia64/xen/irq_xen.c b/arch/ia64/xen/irq_xen.c deleted file mode 100644 index efb74da..0000000 --- a/arch/ia64/xen/irq_xen.c +++ /dev/null @@ -1,443 +0,0 @@ -/****************************************************************************** - * arch/ia64/xen/irq_xen.c - * - * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp> - * VA Linux Systems Japan K.K. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - * - */ - -#include <linux/cpu.h> - -#include <xen/interface/xen.h> -#include <xen/interface/callback.h> -#include <xen/events.h> - -#include <asm/xen/privop.h> - -#include "irq_xen.h" - -/*************************************************************************** - * pv_irq_ops - * irq operations - */ - -static int -xen_assign_irq_vector(int irq) -{ - struct physdev_irq irq_op; - - irq_op.irq = irq; - if (HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector, &irq_op)) - return -ENOSPC; - - return irq_op.vector; -} - -static void -xen_free_irq_vector(int vector) -{ - struct physdev_irq irq_op; - - if (vector < IA64_FIRST_DEVICE_VECTOR || - vector > IA64_LAST_DEVICE_VECTOR) - return; - - irq_op.vector = vector; - if (HYPERVISOR_physdev_op(PHYSDEVOP_free_irq_vector, &irq_op)) - printk(KERN_WARNING "%s: xen_free_irq_vector fail vector=%d\n", - __func__, vector); -} - - -static DEFINE_PER_CPU(int, xen_timer_irq) = -1; -static DEFINE_PER_CPU(int, xen_ipi_irq) = -1; -static DEFINE_PER_CPU(int, xen_resched_irq) = -1; -static DEFINE_PER_CPU(int, xen_cmc_irq) = -1; -static DEFINE_PER_CPU(int, xen_cmcp_irq) = -1; -static DEFINE_PER_CPU(int, xen_cpep_irq) = -1; -#define NAME_SIZE 15 -static DEFINE_PER_CPU(char[NAME_SIZE], xen_timer_name); -static DEFINE_PER_CPU(char[NAME_SIZE], xen_ipi_name); -static DEFINE_PER_CPU(char[NAME_SIZE], xen_resched_name); -static DEFINE_PER_CPU(char[NAME_SIZE], xen_cmc_name); -static DEFINE_PER_CPU(char[NAME_SIZE], xen_cmcp_name); -static DEFINE_PER_CPU(char[NAME_SIZE], xen_cpep_name); -#undef NAME_SIZE - -struct saved_irq { - unsigned int irq; - struct irqaction *action; -}; -/* 16 should be far optimistic value, since only several percpu irqs - * are registered early. - */ -#define MAX_LATE_IRQ 16 -static struct saved_irq saved_percpu_irqs[MAX_LATE_IRQ]; -static unsigned short late_irq_cnt; -static unsigned short saved_irq_cnt; -static int xen_slab_ready; - -#ifdef CONFIG_SMP -#include <linux/sched.h> - -/* Dummy stub. Though we may check XEN_RESCHEDULE_VECTOR before __do_IRQ, - * it ends up to issue several memory accesses upon percpu data and - * thus adds unnecessary traffic to other paths. - */ -static irqreturn_t -xen_dummy_handler(int irq, void *dev_id) -{ - return IRQ_HANDLED; -} - -static irqreturn_t -xen_resched_handler(int irq, void *dev_id) -{ - scheduler_ipi(); - return IRQ_HANDLED; -} - -static struct irqaction xen_ipi_irqaction = { - .handler = handle_IPI, - .flags = IRQF_DISABLED, - .name = "IPI" -}; - -static struct irqaction xen_resched_irqaction = { - .handler = xen_resched_handler, - .flags = IRQF_DISABLED, - .name = "resched" -}; - -static struct irqaction xen_tlb_irqaction = { - .handler = xen_dummy_handler, - .flags = IRQF_DISABLED, - .name = "tlb_flush" -}; -#endif - -/* - * This is xen version percpu irq registration, which needs bind - * to xen specific evtchn sub-system. One trick here is that xen - * evtchn binding interface depends on kmalloc because related - * port needs to be freed at device/cpu down. So we cache the - * registration on BSP before slab is ready and then deal them - * at later point. For rest instances happening after slab ready, - * we hook them to xen evtchn immediately. - * - * FIXME: MCA is not supported by far, and thus "nomca" boot param is - * required. - */ -static void -__xen_register_percpu_irq(unsigned int cpu, unsigned int vec, - struct irqaction *action, int save) -{ - int irq = 0; - - if (xen_slab_ready) { - switch (vec) { - case IA64_TIMER_VECTOR: - snprintf(per_cpu(xen_timer_name, cpu), - sizeof(per_cpu(xen_timer_name, cpu)), - "%s%d", action->name, cpu); - irq = bind_virq_to_irqhandler(VIRQ_ITC, cpu, - action->handler, action->flags, - per_cpu(xen_timer_name, cpu), action->dev_id); - per_cpu(xen_timer_irq, cpu) = irq; - break; - case IA64_IPI_RESCHEDULE: - snprintf(per_cpu(xen_resched_name, cpu), - sizeof(per_cpu(xen_resched_name, cpu)), - "%s%d", action->name, cpu); - irq = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR, cpu, - action->handler, action->flags, - per_cpu(xen_resched_name, cpu), action->dev_id); - per_cpu(xen_resched_irq, cpu) = irq; - break; - case IA64_IPI_VECTOR: - snprintf(per_cpu(xen_ipi_name, cpu), - sizeof(per_cpu(xen_ipi_name, cpu)), - "%s%d", action->name, cpu); - irq = bind_ipi_to_irqhandler(XEN_IPI_VECTOR, cpu, - action->handler, action->flags, - per_cpu(xen_ipi_name, cpu), action->dev_id); - per_cpu(xen_ipi_irq, cpu) = irq; - break; - case IA64_CMC_VECTOR: - snprintf(per_cpu(xen_cmc_name, cpu), - sizeof(per_cpu(xen_cmc_name, cpu)), - "%s%d", action->name, cpu); - irq = bind_virq_to_irqhandler(VIRQ_MCA_CMC, cpu, - action->handler, - action->flags, - per_cpu(xen_cmc_name, cpu), - action->dev_id); - per_cpu(xen_cmc_irq, cpu) = irq; - break; - case IA64_CMCP_VECTOR: - snprintf(per_cpu(xen_cmcp_name, cpu), - sizeof(per_cpu(xen_cmcp_name, cpu)), - "%s%d", action->name, cpu); - irq = bind_ipi_to_irqhandler(XEN_CMCP_VECTOR, cpu, - action->handler, - action->flags, - per_cpu(xen_cmcp_name, cpu), - action->dev_id); - per_cpu(xen_cmcp_irq, cpu) = irq; - break; - case IA64_CPEP_VECTOR: - snprintf(per_cpu(xen_cpep_name, cpu), - sizeof(per_cpu(xen_cpep_name, cpu)), - "%s%d", action->name, cpu); - irq = bind_ipi_to_irqhandler(XEN_CPEP_VECTOR, cpu, - action->handler, - action->flags, - per_cpu(xen_cpep_name, cpu), - action->dev_id); - per_cpu(xen_cpep_irq, cpu) = irq; - break; - case IA64_CPE_VECTOR: - case IA64_MCA_RENDEZ_VECTOR: - case IA64_PERFMON_VECTOR: - case IA64_MCA_WAKEUP_VECTOR: - case IA64_SPURIOUS_INT_VECTOR: - /* No need to complain, these aren't supported. */ - break; - default: - printk(KERN_WARNING "Percpu irq %d is unsupported " - "by xen!\n", vec); - break; - } - BUG_ON(irq < 0); - - if (irq > 0) { - /* - * Mark percpu. Without this, migrate_irqs() will - * mark the interrupt for migrations and trigger it - * on cpu hotplug. - */ - irq_set_status_flags(irq, IRQ_PER_CPU); - } - } - - /* For BSP, we cache registered percpu irqs, and then re-walk - * them when initializing APs - */ - if (!cpu && save) { - BUG_ON(saved_irq_cnt == MAX_LATE_IRQ); - saved_percpu_irqs[saved_irq_cnt].irq = vec; - saved_percpu_irqs[saved_irq_cnt].action = action; - saved_irq_cnt++; - if (!xen_slab_ready) - late_irq_cnt++; - } -} - -static void -xen_register_percpu_irq(ia64_vector vec, struct irqaction *action) -{ - __xen_register_percpu_irq(smp_processor_id(), vec, action, 1); -} - -static void -xen_bind_early_percpu_irq(void) -{ - int i; - - xen_slab_ready = 1; - /* There's no race when accessing this cached array, since only - * BSP will face with such step shortly - */ - for (i = 0; i < late_irq_cnt; i++) - __xen_register_percpu_irq(smp_processor_id(), - saved_percpu_irqs[i].irq, - saved_percpu_irqs[i].action, 0); -} - -/* FIXME: There's no obvious point to check whether slab is ready. So - * a hack is used here by utilizing a late time hook. - */ - -#ifdef CONFIG_HOTPLUG_CPU -static int unbind_evtchn_callback(struct notifier_block *nfb, - unsigned long action, void *hcpu) -{ - unsigned int cpu = (unsigned long)hcpu; - - if (action == CPU_DEAD) { - /* Unregister evtchn. */ - if (per_cpu(xen_cpep_irq, cpu) >= 0) { - unbind_from_irqhandler(per_cpu(xen_cpep_irq, cpu), - NULL); - per_cpu(xen_cpep_irq, cpu) = -1; - } - if (per_cpu(xen_cmcp_irq, cpu) >= 0) { - unbind_from_irqhandler(per_cpu(xen_cmcp_irq, cpu), - NULL); - per_cpu(xen_cmcp_irq, cpu) = -1; - } - if (per_cpu(xen_cmc_irq, cpu) >= 0) { - unbind_from_irqhandler(per_cpu(xen_cmc_irq, cpu), NULL); - per_cpu(xen_cmc_irq, cpu) = -1; - } - if (per_cpu(xen_ipi_irq, cpu) >= 0) { - unbind_from_irqhandler(per_cpu(xen_ipi_irq, cpu), NULL); - per_cpu(xen_ipi_irq, cpu) = -1; - } - if (per_cpu(xen_resched_irq, cpu) >= 0) { - unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu), - NULL); - per_cpu(xen_resched_irq, cpu) = -1; - } - if (per_cpu(xen_timer_irq, cpu) >= 0) { - unbind_from_irqhandler(per_cpu(xen_timer_irq, cpu), - NULL); - per_cpu(xen_timer_irq, cpu) = -1; - } - } - return NOTIFY_OK; -} - -static struct notifier_block unbind_evtchn_notifier = { - .notifier_call = unbind_evtchn_callback, - .priority = 0 -}; -#endif - -void xen_smp_intr_init_early(unsigned int cpu) -{ -#ifdef CONFIG_SMP - unsigned int i; - - for (i = 0; i < saved_irq_cnt; i++) - __xen_register_percpu_irq(cpu, saved_percpu_irqs[i].irq, - saved_percpu_irqs[i].action, 0); -#endif -} - -void xen_smp_intr_init(void) -{ -#ifdef CONFIG_SMP - unsigned int cpu = smp_processor_id(); - struct callback_register event = { - .type = CALLBACKTYPE_event, - .address = { .ip = (unsigned long)&xen_event_callback }, - }; - - if (cpu == 0) { - /* Initialization was already done for boot cpu. */ -#ifdef CONFIG_HOTPLUG_CPU - /* Register the notifier only once. */ - register_cpu_notifier(&unbind_evtchn_notifier); -#endif - return; - } - - /* This should be piggyback when setup vcpu guest context */ - BUG_ON(HYPERVISOR_callback_op(CALLBACKOP_register, &event)); -#endif /* CONFIG_SMP */ -} - -void __init -xen_irq_init(void) -{ - struct callback_register event = { - .type = CALLBACKTYPE_event, - .address = { .ip = (unsigned long)&xen_event_callback }, - }; - - xen_init_IRQ(); - BUG_ON(HYPERVISOR_callback_op(CALLBACKOP_register, &event)); - late_time_init = xen_bind_early_percpu_irq; -} - -void -xen_platform_send_ipi(int cpu, int vector, int delivery_mode, int redirect) -{ -#ifdef CONFIG_SMP - /* TODO: we need to call vcpu_up here */ - if (unlikely(vector == ap_wakeup_vector)) { - /* XXX - * This should be in __cpu_up(cpu) in ia64 smpboot.c - * like x86. But don't want to modify it, - * keep it untouched. - */ - xen_smp_intr_init_early(cpu); - - xen_send_ipi(cpu, vector); - /* vcpu_prepare_and_up(cpu); */ - return; - } -#endif - - switch (vector) { - case IA64_IPI_VECTOR: - xen_send_IPI_one(cpu, XEN_IPI_VECTOR); - break; - case IA64_IPI_RESCHEDULE: - xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR); - break; - case IA64_CMCP_VECTOR: - xen_send_IPI_one(cpu, XEN_CMCP_VECTOR); - break; - case IA64_CPEP_VECTOR: - xen_send_IPI_one(cpu, XEN_CPEP_VECTOR); - break; - case IA64_TIMER_VECTOR: { - /* this is used only once by check_sal_cache_flush() - at boot time */ - static int used = 0; - if (!used) { - xen_send_ipi(cpu, IA64_TIMER_VECTOR); - used = 1; - break; - } - /* fallthrough */ - } - default: - printk(KERN_WARNING "Unsupported IPI type 0x%x\n", - vector); - notify_remote_via_irq(0); /* defaults to 0 irq */ - break; - } -} - -static void __init -xen_register_ipi(void) -{ -#ifdef CONFIG_SMP - register_percpu_irq(IA64_IPI_VECTOR, &xen_ipi_irqaction); - register_percpu_irq(IA64_IPI_RESCHEDULE, &xen_resched_irqaction); - register_percpu_irq(IA64_IPI_LOCAL_TLB_FLUSH, &xen_tlb_irqaction); -#endif -} - -static void -xen_resend_irq(unsigned int vector) -{ - (void)resend_irq_on_evtchn(vector); -} - -const struct pv_irq_ops xen_irq_ops __initconst = { - .register_ipi = xen_register_ipi, - - .assign_irq_vector = xen_assign_irq_vector, - .free_irq_vector = xen_free_irq_vector, - .register_percpu_irq = xen_register_percpu_irq, - - .resend_irq = xen_resend_irq, -}; diff --git a/arch/ia64/xen/irq_xen.h b/arch/ia64/xen/irq_xen.h deleted file mode 100644 index 1778517..0000000 --- a/arch/ia64/xen/irq_xen.h +++ /dev/null @@ -1,34 +0,0 @@ -/****************************************************************************** - * arch/ia64/xen/irq_xen.h - * - * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp> - * VA Linux Systems Japan K.K. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - * - */ - -#ifndef IRQ_XEN_H -#define IRQ_XEN_H - -extern void (*late_time_init)(void); -extern char xen_event_callback; -void __init xen_init_IRQ(void); - -extern const struct pv_irq_ops xen_irq_ops __initconst; -extern void xen_smp_intr_init(void); -extern void xen_send_ipi(int cpu, int vec); - -#endif /* IRQ_XEN_H */ diff --git a/arch/ia64/xen/machvec.c b/arch/ia64/xen/machvec.c deleted file mode 100644 index 4ad588a..0000000 --- a/arch/ia64/xen/machvec.c +++ /dev/null @@ -1,4 +0,0 @@ -#define MACHVEC_PLATFORM_NAME xen -#define MACHVEC_PLATFORM_HEADER <asm/machvec_xen.h> -#include <asm/machvec_init.h> - diff --git a/arch/ia64/xen/suspend.c b/arch/ia64/xen/suspend.c deleted file mode 100644 index 419c862..0000000 --- a/arch/ia64/xen/suspend.c +++ /dev/null @@ -1,59 +0,0 @@ -/****************************************************************************** - * arch/ia64/xen/suspend.c - * - * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp> - * VA Linux Systems Japan K.K. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - * - * suspend/resume - */ - -#include <xen/xen-ops.h> -#include <asm/xen/hypervisor.h> -#include "time.h" - -void -xen_mm_pin_all(void) -{ - /* nothing */ -} - -void -xen_mm_unpin_all(void) -{ - /* nothing */ -} - -void -xen_arch_pre_suspend() -{ - /* nothing */ -} - -void -xen_arch_post_suspend(int suspend_cancelled) -{ - if (suspend_cancelled) - return; - - xen_ia64_enable_opt_feature(); - /* add more if necessary */ -} - -void xen_arch_resume(void) -{ - xen_timer_resume_on_aps(); -} diff --git a/arch/ia64/xen/time.c b/arch/ia64/xen/time.c deleted file mode 100644 index 1f8244a..0000000 --- a/arch/ia64/xen/time.c +++ /dev/null @@ -1,257 +0,0 @@ -/****************************************************************************** - * arch/ia64/xen/time.c - * - * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp> - * VA Linux Systems Japan K.K. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - * - */ - -#include <linux/delay.h> -#include <linux/kernel_stat.h> -#include <linux/posix-timers.h> -#include <linux/irq.h> -#include <linux/clocksource.h> - -#include <asm/timex.h> - -#include <asm/xen/hypervisor.h> - -#include <xen/interface/vcpu.h> - -#include "../kernel/fsyscall_gtod_data.h" - -static DEFINE_PER_CPU(struct vcpu_runstate_info, xen_runstate); -static DEFINE_PER_CPU(unsigned long, xen_stolen_time); -static DEFINE_PER_CPU(unsigned long, xen_blocked_time); - -/* taken from i386/kernel/time-xen.c */ -static void xen_init_missing_ticks_accounting(int cpu) -{ - struct vcpu_register_runstate_memory_area area; - struct vcpu_runstate_info *runstate = &per_cpu(xen_runstate, cpu); - int rc; - - memset(runstate, 0, sizeof(*runstate)); - - area.addr.v = runstate; - rc = HYPERVISOR_vcpu_op(VCPUOP_register_runstate_memory_area, cpu, - &area); - WARN_ON(rc && rc != -ENOSYS); - - per_cpu(xen_blocked_time, cpu) = runstate->time[RUNSTATE_blocked]; - per_cpu(xen_stolen_time, cpu) = runstate->time[RUNSTATE_runnable] - + runstate->time[RUNSTATE_offline]; -} - -/* - * Runstate accounting - */ -/* stolen from arch/x86/xen/time.c */ -static void get_runstate_snapshot(struct vcpu_runstate_info *res) -{ - u64 state_time; - struct vcpu_runstate_info *state; - - BUG_ON(preemptible()); - - state = &__get_cpu_var(xen_runstate); - - /* - * The runstate info is always updated by the hypervisor on - * the current CPU, so there's no need to use anything - * stronger than a compiler barrier when fetching it. - */ - do { - state_time = state->state_entry_time; - rmb(); - *res = *state; - rmb(); - } while (state->state_entry_time != state_time); -} - -#define NS_PER_TICK (1000000000LL/HZ) - -static unsigned long -consider_steal_time(unsigned long new_itm) -{ - unsigned long stolen, blocked; - unsigned long delta_itm = 0, stolentick = 0; - int cpu = smp_processor_id(); - struct vcpu_runstate_info runstate; - struct task_struct *p = current; - - get_runstate_snapshot(&runstate); - - /* - * Check for vcpu migration effect - * In this case, itc value is reversed. - * This causes huge stolen value. - * This function just checks and reject this effect. - */ - if (!time_after_eq(runstate.time[RUNSTATE_blocked], - per_cpu(xen_blocked_time, cpu))) - blocked = 0; - - if (!time_after_eq(runstate.time[RUNSTATE_runnable] + - runstate.time[RUNSTATE_offline], - per_cpu(xen_stolen_time, cpu))) - stolen = 0; - - if (!time_after(delta_itm + new_itm, ia64_get_itc())) - stolentick = ia64_get_itc() - new_itm; - - do_div(stolentick, NS_PER_TICK); - stolentick++; - - do_div(stolen, NS_PER_TICK); - - if (stolen > stolentick) - stolen = stolentick; - - stolentick -= stolen; - do_div(blocked, NS_PER_TICK); - - if (blocked > stolentick) - blocked = stolentick; - - if (stolen > 0 || blocked > 0) { - account_steal_ticks(stolen); - account_idle_ticks(blocked); - run_local_timers(); - - rcu_check_callbacks(cpu, user_mode(get_irq_regs())); - - scheduler_tick(); - run_posix_cpu_timers(p); - delta_itm += local_cpu_data->itm_delta * (stolen + blocked); - - if (cpu == time_keeper_id) - xtime_update(stolen + blocked); - - local_cpu_data->itm_next = delta_itm + new_itm; - - per_cpu(xen_stolen_time, cpu) += NS_PER_TICK * stolen; - per_cpu(xen_blocked_time, cpu) += NS_PER_TICK * blocked; - } - return delta_itm; -} - -static int xen_do_steal_accounting(unsigned long *new_itm) -{ - unsigned long delta_itm; - delta_itm = consider_steal_time(*new_itm); - *new_itm += delta_itm; - if (time_after(*new_itm, ia64_get_itc()) && delta_itm) - return 1; - - return 0; -} - -static void xen_itc_jitter_data_reset(void) -{ - u64 lcycle, ret; - - do { - lcycle = itc_jitter_data.itc_lastcycle; - ret = cmpxchg(&itc_jitter_data.itc_lastcycle, lcycle, 0); - } while (unlikely(ret != lcycle)); -} - -/* based on xen_sched_clock() in arch/x86/xen/time.c. */ -/* - * This relies on HAVE_UNSTABLE_SCHED_CLOCK. If it can't be defined, - * something similar logic should be implemented here. - */ -/* - * Xen sched_clock implementation. Returns the number of unstolen - * nanoseconds, which is nanoseconds the VCPU spent in RUNNING+BLOCKED - * states. - */ -static unsigned long long xen_sched_clock(void) -{ - struct vcpu_runstate_info runstate; - - unsigned long long now; - unsigned long long offset; - unsigned long long ret; - - /* - * Ideally sched_clock should be called on a per-cpu basis - * anyway, so preempt should already be disabled, but that's - * not current practice at the moment. - */ - preempt_disable(); - - /* - * both ia64_native_sched_clock() and xen's runstate are - * based on mAR.ITC. So difference of them makes sense. - */ - now = ia64_native_sched_clock(); - - get_runstate_snapshot(&runstate); - - WARN_ON(runstate.state != RUNSTATE_running); - - offset = 0; - if (now > runstate.state_entry_time) - offset = now - runstate.state_entry_time; - ret = runstate.time[RUNSTATE_blocked] + - runstate.time[RUNSTATE_running] + - offset; - - preempt_enable(); - - return ret; -} - -struct pv_time_ops xen_time_ops __initdata = { - .init_missing_ticks_accounting = xen_init_missing_ticks_accounting, - .do_steal_accounting = xen_do_steal_accounting, - .clocksource_resume = xen_itc_jitter_data_reset, - .sched_clock = xen_sched_clock, -}; - -/* Called after suspend, to resume time. */ -static void xen_local_tick_resume(void) -{ - /* Just trigger a tick. */ - ia64_cpu_local_tick(); - touch_softlockup_watchdog(); -} - -void -xen_timer_resume(void) -{ - unsigned int cpu; - - xen_local_tick_resume(); - - for_each_online_cpu(cpu) - xen_init_missing_ticks_accounting(cpu); -} - -static void ia64_cpu_local_tick_fn(void *unused) -{ - xen_local_tick_resume(); - xen_init_missing_ticks_accounting(smp_processor_id()); -} - -void -xen_timer_resume_on_aps(void) -{ - smp_call_function(&ia64_cpu_local_tick_fn, NULL, 1); -} diff --git a/arch/ia64/xen/time.h b/arch/ia64/xen/time.h deleted file mode 100644 index f98d7e1..0000000 --- a/arch/ia64/xen/time.h +++ /dev/null @@ -1,24 +0,0 @@ -/****************************************************************************** - * arch/ia64/xen/time.h - * - * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp> - * VA Linux Systems Japan K.K. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - * - */ - -extern struct pv_time_ops xen_time_ops __initdata; -void xen_timer_resume_on_aps(void); diff --git a/arch/ia64/xen/xcom_hcall.c b/arch/ia64/xen/xcom_hcall.c deleted file mode 100644 index ccaf743..0000000 --- a/arch/ia64/xen/xcom_hcall.c +++ /dev/null @@ -1,441 +0,0 @@ -/* - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. - * - * Tristan Gingold <tristan.gingold@bull.net> - * - * Copyright (c) 2007 - * Isaku Yamahata <yamahata at valinux co jp> - * VA Linux Systems Japan K.K. - * consolidate mini and inline version. - */ - -#include <linux/module.h> -#include <xen/interface/xen.h> -#include <xen/interface/memory.h> -#include <xen/interface/grant_table.h> -#include <xen/interface/callback.h> -#include <xen/interface/vcpu.h> -#include <asm/xen/hypervisor.h> -#include <asm/xen/xencomm.h> - -/* Xencomm notes: - * This file defines hypercalls to be used by xencomm. The hypercalls simply - * create inlines or mini descriptors for pointers and then call the raw arch - * hypercall xencomm_arch_hypercall_XXX - * - * If the arch wants to directly use these hypercalls, simply define macros - * in asm/xen/hypercall.h, eg: - * #define HYPERVISOR_sched_op xencomm_hypercall_sched_op - * - * The arch may also define HYPERVISOR_xxx as a function and do more operations - * before/after doing the hypercall. - * - * Note: because only inline or mini descriptors are created these functions - * must only be called with in kernel memory parameters. - */ - -int -xencomm_hypercall_console_io(int cmd, int count, char *str) -{ - /* xen early printk uses console io hypercall before - * xencomm initialization. In that case, we just ignore it. - */ - if (!xencomm_is_initialized()) - return 0; - - return xencomm_arch_hypercall_console_io - (cmd, count, xencomm_map_no_alloc(str, count)); -} -EXPORT_SYMBOL_GPL(xencomm_hypercall_console_io); - -int -xencomm_hypercall_event_channel_op(int cmd, void *op) -{ - struct xencomm_handle *desc; - desc = xencomm_map_no_alloc(op, sizeof(struct evtchn_op)); - if (desc == NULL) - return -EINVAL; - - return xencomm_arch_hypercall_event_channel_op(cmd, desc); -} -EXPORT_SYMBOL_GPL(xencomm_hypercall_event_channel_op); - -int -xencomm_hypercall_xen_version(int cmd, void *arg) -{ - struct xencomm_handle *desc; - unsigned int argsize; - - switch (cmd) { - case XENVER_version: - /* do not actually pass an argument */ - return xencomm_arch_hypercall_xen_version(cmd, 0); - case XENVER_extraversion: - argsize = sizeof(struct xen_extraversion); - break; - case XENVER_compile_info: - argsize = sizeof(struct xen_compile_info); - break; - case XENVER_capabilities: - argsize = sizeof(struct xen_capabilities_info); - break; - case XENVER_changeset: - argsize = sizeof(struct xen_changeset_info); - break; - case XENVER_platform_parameters: - argsize = sizeof(struct xen_platform_parameters); - break; - case XENVER_get_features: - argsize = (arg == NULL) ? 0 : sizeof(struct xen_feature_info); - break; - - default: - printk(KERN_DEBUG - "%s: unknown version op %d\n", __func__, cmd); - return -ENOSYS; - } - - desc = xencomm_map_no_alloc(arg, argsize); - if (desc == NULL) - return -EINVAL; - - return xencomm_arch_hypercall_xen_version(cmd, desc); -} -EXPORT_SYMBOL_GPL(xencomm_hypercall_xen_version); - -int -xencomm_hypercall_physdev_op(int cmd, void *op) -{ - unsigned int argsize; - - switch (cmd) { - case PHYSDEVOP_apic_read: - case PHYSDEVOP_apic_write: - argsize = sizeof(struct physdev_apic); - break; - case PHYSDEVOP_alloc_irq_vector: - case PHYSDEVOP_free_irq_vector: - argsize = sizeof(struct physdev_irq); - break; - case PHYSDEVOP_irq_status_query: - argsize = sizeof(struct physdev_irq_status_query); - break; - - default: - printk(KERN_DEBUG - "%s: unknown physdev op %d\n", __func__, cmd); - return -ENOSYS; - } - - return xencomm_arch_hypercall_physdev_op - (cmd, xencomm_map_no_alloc(op, argsize)); -} - -static int -xencommize_grant_table_op(struct xencomm_mini **xc_area, - unsigned int cmd, void *op, unsigned int count, - struct xencomm_handle **desc) -{ - struct xencomm_handle *desc1; - unsigned int argsize; - - switch (cmd) { - case GNTTABOP_map_grant_ref: - argsize = sizeof(struct gnttab_map_grant_ref); - break; - case GNTTABOP_unmap_grant_ref: - argsize = sizeof(struct gnttab_unmap_grant_ref); - break; - case GNTTABOP_setup_table: - { - struct gnttab_setup_table *setup = op; - - argsize = sizeof(*setup); - - if (count != 1) - return -EINVAL; - desc1 = __xencomm_map_no_alloc - (xen_guest_handle(setup->frame_list), - setup->nr_frames * - sizeof(*xen_guest_handle(setup->frame_list)), - *xc_area); - if (desc1 == NULL) - return -EINVAL; - (*xc_area)++; - set_xen_guest_handle(setup->frame_list, (void *)desc1); - break; - } - case GNTTABOP_dump_table: - argsize = sizeof(struct gnttab_dump_table); - break; - case GNTTABOP_transfer: - argsize = sizeof(struct gnttab_transfer); - break; - case GNTTABOP_copy: - argsize = sizeof(struct gnttab_copy); - break; - case GNTTABOP_query_size: - argsize = sizeof(struct gnttab_query_size); - break; - default: - printk(KERN_DEBUG "%s: unknown hypercall grant table op %d\n", - __func__, cmd); - BUG(); - } - - *desc = __xencomm_map_no_alloc(op, count * argsize, *xc_area); - if (*desc == NULL) - return -EINVAL; - (*xc_area)++; - - return 0; -} - -int -xencomm_hypercall_grant_table_op(unsigned int cmd, void *op, - unsigned int count) -{ - int rc; - struct xencomm_handle *desc; - XENCOMM_MINI_ALIGNED(xc_area, 2); - - rc = xencommize_grant_table_op(&xc_area, cmd, op, count, &desc); - if (rc) - return rc; - - return xencomm_arch_hypercall_grant_table_op(cmd, desc, count); -} -EXPORT_SYMBOL_GPL(xencomm_hypercall_grant_table_op); - -int -xencomm_hypercall_sched_op(int cmd, void *arg) -{ - struct xencomm_handle *desc; - unsigned int argsize; - - switch (cmd) { - case SCHEDOP_yield: - case SCHEDOP_block: - argsize = 0; - break; - case SCHEDOP_shutdown: - argsize = sizeof(struct sched_shutdown); - break; - case SCHEDOP_poll: - { - struct sched_poll *poll = arg; - struct xencomm_handle *ports; - - argsize = sizeof(struct sched_poll); - ports = xencomm_map_no_alloc(xen_guest_handle(poll->ports), - sizeof(*xen_guest_handle(poll->ports))); - - set_xen_guest_handle(poll->ports, (void *)ports); - break; - } - default: - printk(KERN_DEBUG "%s: unknown sched op %d\n", __func__, cmd); - return -ENOSYS; - } - - desc = xencomm_map_no_alloc(arg, argsize); - if (desc == NULL) - return -EINVAL; - - return xencomm_arch_hypercall_sched_op(cmd, desc); -} -EXPORT_SYMBOL_GPL(xencomm_hypercall_sched_op); - -int -xencomm_hypercall_multicall(void *call_list, int nr_calls) -{ - int rc; - int i; - struct multicall_entry *mce; - struct xencomm_handle *desc; - XENCOMM_MINI_ALIGNED(xc_area, nr_calls * 2); - - for (i = 0; i < nr_calls; i++) { - mce = (struct multicall_entry *)call_list + i; - - switch (mce->op) { - case __HYPERVISOR_update_va_mapping: - case __HYPERVISOR_mmu_update: - /* No-op on ia64. */ - break; - case __HYPERVISOR_grant_table_op: - rc = xencommize_grant_table_op - (&xc_area, - mce->args[0], (void *)mce->args[1], - mce->args[2], &desc); - if (rc) - return rc; - mce->args[1] = (unsigned long)desc; - break; - case __HYPERVISOR_memory_op: - default: - printk(KERN_DEBUG - "%s: unhandled multicall op entry op %lu\n", - __func__, mce->op); - return -ENOSYS; - } - } - - desc = xencomm_map_no_alloc(call_list, - nr_calls * sizeof(struct multicall_entry)); - if (desc == NULL) - return -EINVAL; - - return xencomm_arch_hypercall_multicall(desc, nr_calls); -} -EXPORT_SYMBOL_GPL(xencomm_hypercall_multicall); - -int -xencomm_hypercall_callback_op(int cmd, void *arg) -{ - unsigned int argsize; - switch (cmd) { - case CALLBACKOP_register: - argsize = sizeof(struct callback_register); - break; - case CALLBACKOP_unregister: - argsize = sizeof(struct callback_unregister); - break; - default: - printk(KERN_DEBUG - "%s: unknown callback op %d\n", __func__, cmd); - return -ENOSYS; - } - - return xencomm_arch_hypercall_callback_op - (cmd, xencomm_map_no_alloc(arg, argsize)); -} - -static int -xencommize_memory_reservation(struct xencomm_mini *xc_area, - struct xen_memory_reservation *mop) -{ - struct xencomm_handle *desc; - - desc = __xencomm_map_no_alloc(xen_guest_handle(mop->extent_start), - mop->nr_extents * - sizeof(*xen_guest_handle(mop->extent_start)), - xc_area); - if (desc == NULL) - return -EINVAL; - - set_xen_guest_handle(mop->extent_start, (void *)desc); - return 0; -} - -int -xencomm_hypercall_memory_op(unsigned int cmd, void *arg) -{ - GUEST_HANDLE(xen_pfn_t) extent_start_va[2] = { {NULL}, {NULL} }; - struct xen_memory_reservation *xmr = NULL; - int rc; - struct xencomm_handle *desc; - unsigned int argsize; - XENCOMM_MINI_ALIGNED(xc_area, 2); - - switch (cmd) { - case XENMEM_increase_reservation: - case XENMEM_decrease_reservation: - case XENMEM_populate_physmap: - xmr = (struct xen_memory_reservation *)arg; - set_xen_guest_handle(extent_start_va[0], - xen_guest_handle(xmr->extent_start)); - - argsize = sizeof(*xmr); - rc = xencommize_memory_reservation(xc_area, xmr); - if (rc) - return rc; - xc_area++; - break; - - case XENMEM_maximum_ram_page: - argsize = 0; - break; - - case XENMEM_add_to_physmap: - argsize = sizeof(struct xen_add_to_physmap); - break; - - default: - printk(KERN_DEBUG "%s: unknown memory op %d\n", __func__, cmd); - return -ENOSYS; - } - - desc = xencomm_map_no_alloc(arg, argsize); - if (desc == NULL) - return -EINVAL; - - rc = xencomm_arch_hypercall_memory_op(cmd, desc); - - switch (cmd) { - case XENMEM_increase_reservation: - case XENMEM_decrease_reservation: - case XENMEM_populate_physmap: - set_xen_guest_handle(xmr->extent_start, - xen_guest_handle(extent_start_va[0])); - break; - } - - return rc; -} -EXPORT_SYMBOL_GPL(xencomm_hypercall_memory_op); - -int -xencomm_hypercall_suspend(unsigned long srec) -{ - struct sched_shutdown arg; - - arg.reason = SHUTDOWN_suspend; - - return xencomm_arch_hypercall_sched_op( - SCHEDOP_shutdown, xencomm_map_no_alloc(&arg, sizeof(arg))); -} - -long -xencomm_hypercall_vcpu_op(int cmd, int cpu, void *arg) -{ - unsigned int argsize; - switch (cmd) { - case VCPUOP_register_runstate_memory_area: { - struct vcpu_register_runstate_memory_area *area = - (struct vcpu_register_runstate_memory_area *)arg; - argsize = sizeof(*arg); - set_xen_guest_handle(area->addr.h, - (void *)xencomm_map_no_alloc(area->addr.v, - sizeof(area->addr.v))); - break; - } - - default: - printk(KERN_DEBUG "%s: unknown vcpu op %d\n", __func__, cmd); - return -ENOSYS; - } - - return xencomm_arch_hypercall_vcpu_op(cmd, cpu, - xencomm_map_no_alloc(arg, argsize)); -} - -long -xencomm_hypercall_opt_feature(void *arg) -{ - return xencomm_arch_hypercall_opt_feature( - xencomm_map_no_alloc(arg, - sizeof(struct xen_ia64_opt_feature))); -} diff --git a/arch/ia64/xen/xen_pv_ops.c b/arch/ia64/xen/xen_pv_ops.c deleted file mode 100644 index 3e8d350..0000000 --- a/arch/ia64/xen/xen_pv_ops.c +++ /dev/null @@ -1,1141 +0,0 @@ -/****************************************************************************** - * arch/ia64/xen/xen_pv_ops.c - * - * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp> - * VA Linux Systems Japan K.K. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - * - */ - -#include <linux/console.h> -#include <linux/irq.h> -#include <linux/kernel.h> -#include <linux/pm.h> -#include <linux/unistd.h> - -#include <asm/xen/hypervisor.h> -#include <asm/xen/xencomm.h> -#include <asm/xen/privop.h> - -#include "irq_xen.h" -#include "time.h" - -/*************************************************************************** - * general info - */ -static struct pv_info xen_info __initdata = { - .kernel_rpl = 2, /* or 1: determin at runtime */ - .paravirt_enabled = 1, - .name = "Xen/ia64", -}; - -#define IA64_RSC_PL_SHIFT 2 -#define IA64_RSC_PL_BIT_SIZE 2 -#define IA64_RSC_PL_MASK \ - (((1UL << IA64_RSC_PL_BIT_SIZE) - 1) << IA64_RSC_PL_SHIFT) - -static void __init -xen_info_init(void) -{ - /* Xenified Linux/ia64 may run on pl = 1 or 2. - * determin at run time. */ - unsigned long rsc = ia64_getreg(_IA64_REG_AR_RSC); - unsigned int rpl = (rsc & IA64_RSC_PL_MASK) >> IA64_RSC_PL_SHIFT; - xen_info.kernel_rpl = rpl; -} - -/*************************************************************************** - * pv_init_ops - * initialization hooks. - */ - -static void -xen_panic_hypercall(struct unw_frame_info *info, void *arg) -{ - current->thread.ksp = (__u64)info->sw - 16; - HYPERVISOR_shutdown(SHUTDOWN_crash); - /* we're never actually going to get here... */ -} - -static int -xen_panic_event(struct notifier_block *this, unsigned long event, void *ptr) -{ - unw_init_running(xen_panic_hypercall, NULL); - /* we're never actually going to get here... */ - return NOTIFY_DONE; -} - -static struct notifier_block xen_panic_block = { - xen_panic_event, NULL, 0 /* try to go last */ -}; - -static void xen_pm_power_off(void) -{ - local_irq_disable(); - HYPERVISOR_shutdown(SHUTDOWN_poweroff); -} - -static void __init -xen_banner(void) -{ - printk(KERN_INFO - "Running on Xen! pl = %d start_info_pfn=0x%lx nr_pages=%ld " - "flags=0x%x\n", - xen_info.kernel_rpl, - HYPERVISOR_shared_info->arch.start_info_pfn, - xen_start_info->nr_pages, xen_start_info->flags); -} - -static int __init -xen_reserve_memory(struct rsvd_region *region) -{ - region->start = (unsigned long)__va( - (HYPERVISOR_shared_info->arch.start_info_pfn << PAGE_SHIFT)); - region->end = region->start + PAGE_SIZE; - return 1; -} - -static void __init -xen_arch_setup_early(void) -{ - struct shared_info *s; - BUG_ON(!xen_pv_domain()); - - s = HYPERVISOR_shared_info; - xen_start_info = __va(s->arch.start_info_pfn << PAGE_SHIFT); - - /* Must be done before any hypercall. */ - xencomm_initialize(); - - xen_setup_features(); - /* Register a call for panic conditions. */ - atomic_notifier_chain_register(&panic_notifier_list, - &xen_panic_block); - pm_power_off = xen_pm_power_off; - - xen_ia64_enable_opt_feature(); -} - -static void __init -xen_arch_setup_console(char **cmdline_p) -{ - add_preferred_console("xenboot", 0, NULL); - add_preferred_console("tty", 0, NULL); - /* use hvc_xen */ - add_preferred_console("hvc", 0, NULL); - -#if !defined(CONFIG_VT) || !defined(CONFIG_DUMMY_CONSOLE) - conswitchp = NULL; -#endif -} - -static int __init -xen_arch_setup_nomca(void) -{ - return 1; -} - -static void __init -xen_post_smp_prepare_boot_cpu(void) -{ - xen_setup_vcpu_info_placement(); -} - -#ifdef ASM_SUPPORTED -static unsigned long __init_or_module -xen_patch_bundle(void *sbundle, void *ebundle, unsigned long type); -#endif -static void __init -xen_patch_branch(unsigned long tag, unsigned long type); - -static const struct pv_init_ops xen_init_ops __initconst = { - .banner = xen_banner, - - .reserve_memory = xen_reserve_memory, - - .arch_setup_early = xen_arch_setup_early, - .arch_setup_console = xen_arch_setup_console, - .arch_setup_nomca = xen_arch_setup_nomca, - - .post_smp_prepare_boot_cpu = xen_post_smp_prepare_boot_cpu, -#ifdef ASM_SUPPORTED - .patch_bundle = xen_patch_bundle, -#endif - .patch_branch = xen_patch_branch, -}; - -/*************************************************************************** - * pv_fsys_data - * addresses for fsys - */ - -extern unsigned long xen_fsyscall_table[NR_syscalls]; -extern char xen_fsys_bubble_down[]; -struct pv_fsys_data xen_fsys_data __initdata = { - .fsyscall_table = (unsigned long *)xen_fsyscall_table, - .fsys_bubble_down = (void *)xen_fsys_bubble_down, -}; - -/*************************************************************************** - * pv_patchdata - * patchdata addresses - */ - -#define DECLARE(name) \ - extern unsigned long __xen_start_gate_##name##_patchlist[]; \ - extern unsigned long __xen_end_gate_##name##_patchlist[] - -DECLARE(fsyscall); -DECLARE(brl_fsys_bubble_down); -DECLARE(vtop); -DECLARE(mckinley_e9); - -extern unsigned long __xen_start_gate_section[]; - -#define ASSIGN(name) \ - .start_##name##_patchlist = \ - (unsigned long)__xen_start_gate_##name##_patchlist, \ - .end_##name##_patchlist = \ - (unsigned long)__xen_end_gate_##name##_patchlist - -static struct pv_patchdata xen_patchdata __initdata = { - ASSIGN(fsyscall), - ASSIGN(brl_fsys_bubble_down), - ASSIGN(vtop), - ASSIGN(mckinley_e9), - - .gate_section = (void*)__xen_start_gate_section, -}; - -/*************************************************************************** - * pv_cpu_ops - * intrinsics hooks. - */ - -#ifndef ASM_SUPPORTED -static void -xen_set_itm_with_offset(unsigned long val) -{ - /* ia64_cpu_local_tick() calls this with interrupt enabled. */ - /* WARN_ON(!irqs_disabled()); */ - xen_set_itm(val - XEN_MAPPEDREGS->itc_offset); -} - -static unsigned long -xen_get_itm_with_offset(void) -{ - /* unused at this moment */ - printk(KERN_DEBUG "%s is called.\n", __func__); - - WARN_ON(!irqs_disabled()); - return ia64_native_getreg(_IA64_REG_CR_ITM) + - XEN_MAPPEDREGS->itc_offset; -} - -/* ia64_set_itc() is only called by - * cpu_init() with ia64_set_itc(0) and ia64_sync_itc(). - * So XEN_MAPPEDRESG->itc_offset cal be considered as almost constant. - */ -static void -xen_set_itc(unsigned long val) -{ - unsigned long mitc; - - WARN_ON(!irqs_disabled()); - mitc = ia64_native_getreg(_IA64_REG_AR_ITC); - XEN_MAPPEDREGS->itc_offset = val - mitc; - XEN_MAPPEDREGS->itc_last = val; -} - -static unsigned long -xen_get_itc(void) -{ - unsigned long res; - unsigned long itc_offset; - unsigned long itc_last; - unsigned long ret_itc_last; - - itc_offset = XEN_MAPPEDREGS->itc_offset; - do { - itc_last = XEN_MAPPEDREGS->itc_last; - res = ia64_native_getreg(_IA64_REG_AR_ITC); - res += itc_offset; - if (itc_last >= res) - res = itc_last + 1; - ret_itc_last = cmpxchg(&XEN_MAPPEDREGS->itc_last, - itc_last, res); - } while (unlikely(ret_itc_last != itc_last)); - return res; - -#if 0 - /* ia64_itc_udelay() calls ia64_get_itc() with interrupt enabled. - Should it be paravirtualized instead? */ - WARN_ON(!irqs_disabled()); - itc_offset = XEN_MAPPEDREGS->itc_offset; - itc_last = XEN_MAPPEDREGS->itc_last; - res = ia64_native_getreg(_IA64_REG_AR_ITC); - res += itc_offset; - if (itc_last >= res) - res = itc_last + 1; - XEN_MAPPEDREGS->itc_last = res; - return res; -#endif -} - -static void xen_setreg(int regnum, unsigned long val) -{ - switch (regnum) { - case _IA64_REG_AR_KR0 ... _IA64_REG_AR_KR7: - xen_set_kr(regnum - _IA64_REG_AR_KR0, val); - break; - case _IA64_REG_AR_ITC: - xen_set_itc(val); - break; - case _IA64_REG_CR_TPR: - xen_set_tpr(val); - break; - case _IA64_REG_CR_ITM: - xen_set_itm_with_offset(val); - break; - case _IA64_REG_CR_EOI: - xen_eoi(val); - break; - default: - ia64_native_setreg_func(regnum, val); - break; - } -} - -static unsigned long xen_getreg(int regnum) -{ - unsigned long res; - - switch (regnum) { - case _IA64_REG_PSR: - res = xen_get_psr(); - break; - case _IA64_REG_AR_ITC: - res = xen_get_itc(); - break; - case _IA64_REG_CR_ITM: - res = xen_get_itm_with_offset(); - break; - case _IA64_REG_CR_IVR: - res = xen_get_ivr(); - break; - case _IA64_REG_CR_TPR: - res = xen_get_tpr(); - break; - default: - res = ia64_native_getreg_func(regnum); - break; - } - return res; -} - -/* turning on interrupts is a bit more complicated.. write to the - * memory-mapped virtual psr.i bit first (to avoid race condition), - * then if any interrupts were pending, we have to execute a hyperprivop - * to ensure the pending interrupt gets delivered; else we're done! */ -static void -xen_ssm_i(void) -{ - int old = xen_get_virtual_psr_i(); - xen_set_virtual_psr_i(1); - barrier(); - if (!old && xen_get_virtual_pend()) - xen_hyper_ssm_i(); -} - -/* turning off interrupts can be paravirtualized simply by writing - * to a memory-mapped virtual psr.i bit (implemented as a 16-bit bool) */ -static void -xen_rsm_i(void) -{ - xen_set_virtual_psr_i(0); - barrier(); -} - -static unsigned long -xen_get_psr_i(void) -{ - return xen_get_virtual_psr_i() ? IA64_PSR_I : 0; -} - -static void -xen_intrin_local_irq_restore(unsigned long mask) -{ - if (mask & IA64_PSR_I) - xen_ssm_i(); - else - xen_rsm_i(); -} -#else -#define __DEFINE_FUNC(name, code) \ - extern const char xen_ ## name ## _direct_start[]; \ - extern const char xen_ ## name ## _direct_end[]; \ - asm (".align 32\n" \ - ".proc xen_" #name "\n" \ - "xen_" #name ":\n" \ - "xen_" #name "_direct_start:\n" \ - code \ - "xen_" #name "_direct_end:\n" \ - "br.cond.sptk.many b6\n" \ - ".endp xen_" #name "\n") - -#define DEFINE_VOID_FUNC0(name, code) \ - extern void \ - xen_ ## name (void); \ - __DEFINE_FUNC(name, code) - -#define DEFINE_VOID_FUNC1(name, code) \ - extern void \ - xen_ ## name (unsigned long arg); \ - __DEFINE_FUNC(name, code) - -#define DEFINE_VOID_FUNC1_VOID(name, code) \ - extern void \ - xen_ ## name (void *arg); \ - __DEFINE_FUNC(name, code) - -#define DEFINE_VOID_FUNC2(name, code) \ - extern void \ - xen_ ## name (unsigned long arg0, \ - unsigned long arg1); \ - __DEFINE_FUNC(name, code) - -#define DEFINE_FUNC0(name, code) \ - extern unsigned long \ - xen_ ## name (void); \ - __DEFINE_FUNC(name, code) - -#define DEFINE_FUNC1(name, type, code) \ - extern unsigned long \ - xen_ ## name (type arg); \ - __DEFINE_FUNC(name, code) - -#define XEN_PSR_I_ADDR_ADDR (XSI_BASE + XSI_PSR_I_ADDR_OFS) - -/* - * static void xen_set_itm_with_offset(unsigned long val) - * xen_set_itm(val - XEN_MAPPEDREGS->itc_offset); - */ -/* 2 bundles */ -DEFINE_VOID_FUNC1(set_itm_with_offset, - "mov r2 = " __stringify(XSI_BASE) " + " - __stringify(XSI_ITC_OFFSET_OFS) "\n" - ";;\n" - "ld8 r3 = [r2]\n" - ";;\n" - "sub r8 = r8, r3\n" - "break " __stringify(HYPERPRIVOP_SET_ITM) "\n"); - -/* - * static unsigned long xen_get_itm_with_offset(void) - * return ia64_native_getreg(_IA64_REG_CR_ITM) + XEN_MAPPEDREGS->itc_offset; - */ -/* 2 bundles */ -DEFINE_FUNC0(get_itm_with_offset, - "mov r2 = " __stringify(XSI_BASE) " + " - __stringify(XSI_ITC_OFFSET_OFS) "\n" - ";;\n" - "ld8 r3 = [r2]\n" - "mov r8 = cr.itm\n" - ";;\n" - "add r8 = r8, r2\n"); - -/* - * static void xen_set_itc(unsigned long val) - * unsigned long mitc; - * - * WARN_ON(!irqs_disabled()); - * mitc = ia64_native_getreg(_IA64_REG_AR_ITC); - * XEN_MAPPEDREGS->itc_offset = val - mitc; - * XEN_MAPPEDREGS->itc_last = val; - */ -/* 2 bundles */ -DEFINE_VOID_FUNC1(set_itc, - "mov r2 = " __stringify(XSI_BASE) " + " - __stringify(XSI_ITC_LAST_OFS) "\n" - "mov r3 = ar.itc\n" - ";;\n" - "sub r3 = r8, r3\n" - "st8 [r2] = r8, " - __stringify(XSI_ITC_LAST_OFS) " - " - __stringify(XSI_ITC_OFFSET_OFS) "\n" - ";;\n" - "st8 [r2] = r3\n"); - -/* - * static unsigned long xen_get_itc(void) - * unsigned long res; - * unsigned long itc_offset; - * unsigned long itc_last; - * unsigned long ret_itc_last; - * - * itc_offset = XEN_MAPPEDREGS->itc_offset; - * do { - * itc_last = XEN_MAPPEDREGS->itc_last; - * res = ia64_native_getreg(_IA64_REG_AR_ITC); - * res += itc_offset; - * if (itc_last >= res) - * res = itc_last + 1; - * ret_itc_last = cmpxchg(&XEN_MAPPEDREGS->itc_last, - * itc_last, res); - * } while (unlikely(ret_itc_last != itc_last)); - * return res; - */ -/* 5 bundles */ -DEFINE_FUNC0(get_itc, - "mov r2 = " __stringify(XSI_BASE) " + " - __stringify(XSI_ITC_OFFSET_OFS) "\n" - ";;\n" - "ld8 r9 = [r2], " __stringify(XSI_ITC_LAST_OFS) " - " - __stringify(XSI_ITC_OFFSET_OFS) "\n" - /* r9 = itc_offset */ - /* r2 = XSI_ITC_OFFSET */ - "888:\n" - "mov r8 = ar.itc\n" /* res = ar.itc */ - ";;\n" - "ld8 r3 = [r2]\n" /* r3 = itc_last */ - "add r8 = r8, r9\n" /* res = ar.itc + itc_offset */ - ";;\n" - "cmp.gtu p6, p0 = r3, r8\n" - ";;\n" - "(p6) add r8 = 1, r3\n" /* if (itc_last > res) itc_last + 1 */ - ";;\n" - "mov ar.ccv = r8\n" - ";;\n" - "cmpxchg8.acq r10 = [r2], r8, ar.ccv\n" - ";;\n" - "cmp.ne p6, p0 = r10, r3\n" - "(p6) hint @pause\n" - "(p6) br.cond.spnt 888b\n"); - -DEFINE_VOID_FUNC1_VOID(fc, - "break " __stringify(HYPERPRIVOP_FC) "\n"); - -/* - * psr_i_addr_addr = XEN_PSR_I_ADDR_ADDR - * masked_addr = *psr_i_addr_addr - * pending_intr_addr = masked_addr - 1 - * if (val & IA64_PSR_I) { - * masked = *masked_addr - * *masked_addr = 0:xen_set_virtual_psr_i(1) - * compiler barrier - * if (masked) { - * uint8_t pending = *pending_intr_addr; - * if (pending) - * XEN_HYPER_SSM_I - * } - * } else { - * *masked_addr = 1:xen_set_virtual_psr_i(0) - * } - */ -/* 6 bundles */ -DEFINE_VOID_FUNC1(intrin_local_irq_restore, - /* r8 = input value: 0 or IA64_PSR_I - * p6 = (flags & IA64_PSR_I) - * = if clause - * p7 = !(flags & IA64_PSR_I) - * = else clause - */ - "cmp.ne p6, p7 = r8, r0\n" - "mov r9 = " __stringify(XEN_PSR_I_ADDR_ADDR) "\n" - ";;\n" - /* r9 = XEN_PSR_I_ADDR */ - "ld8 r9 = [r9]\n" - ";;\n" - - /* r10 = masked previous value */ - "(p6) ld1.acq r10 = [r9]\n" - ";;\n" - - /* p8 = !masked interrupt masked previously? */ - "(p6) cmp.ne.unc p8, p0 = r10, r0\n" - - /* p7 = else clause */ - "(p7) mov r11 = 1\n" - ";;\n" - /* masked = 1 */ - "(p7) st1.rel [r9] = r11\n" - - /* p6 = if clause */ - /* masked = 0 - * r9 = masked_addr - 1 - * = pending_intr_addr - */ - "(p8) st1.rel [r9] = r0, -1\n" - ";;\n" - /* r8 = pending_intr */ - "(p8) ld1.acq r11 = [r9]\n" - ";;\n" - /* p9 = interrupt pending? */ - "(p8) cmp.ne.unc p9, p10 = r11, r0\n" - ";;\n" - "(p10) mf\n" - /* issue hypercall to trigger interrupt */ - "(p9) break " __stringify(HYPERPRIVOP_SSM_I) "\n"); - -DEFINE_VOID_FUNC2(ptcga, - "break " __stringify(HYPERPRIVOP_PTC_GA) "\n"); -DEFINE_VOID_FUNC2(set_rr, - "break " __stringify(HYPERPRIVOP_SET_RR) "\n"); - -/* - * tmp = XEN_MAPPEDREGS->interrupt_mask_addr = XEN_PSR_I_ADDR_ADDR; - * tmp = *tmp - * tmp = *tmp; - * psr_i = tmp? 0: IA64_PSR_I; - */ -/* 4 bundles */ -DEFINE_FUNC0(get_psr_i, - "mov r9 = " __stringify(XEN_PSR_I_ADDR_ADDR) "\n" - ";;\n" - "ld8 r9 = [r9]\n" /* r9 = XEN_PSR_I_ADDR */ - "mov r8 = 0\n" /* psr_i = 0 */ - ";;\n" - "ld1.acq r9 = [r9]\n" /* r9 = XEN_PSR_I */ - ";;\n" - "cmp.eq.unc p6, p0 = r9, r0\n" /* p6 = (XEN_PSR_I != 0) */ - ";;\n" - "(p6) mov r8 = " __stringify(1 << IA64_PSR_I_BIT) "\n"); - -DEFINE_FUNC1(thash, unsigned long, - "break " __stringify(HYPERPRIVOP_THASH) "\n"); -DEFINE_FUNC1(get_cpuid, int, - "break " __stringify(HYPERPRIVOP_GET_CPUID) "\n"); -DEFINE_FUNC1(get_pmd, int, - "break " __stringify(HYPERPRIVOP_GET_PMD) "\n"); -DEFINE_FUNC1(get_rr, unsigned long, - "break " __stringify(HYPERPRIVOP_GET_RR) "\n"); - -/* - * void xen_privop_ssm_i(void) - * - * int masked = !xen_get_virtual_psr_i(); - * // masked = *(*XEN_MAPPEDREGS->interrupt_mask_addr) - * xen_set_virtual_psr_i(1) - * // *(*XEN_MAPPEDREGS->interrupt_mask_addr) = 0 - * // compiler barrier - * if (masked) { - * uint8_t* pend_int_addr = - * (uint8_t*)(*XEN_MAPPEDREGS->interrupt_mask_addr) - 1; - * uint8_t pending = *pend_int_addr; - * if (pending) - * XEN_HYPER_SSM_I - * } - */ -/* 4 bundles */ -DEFINE_VOID_FUNC0(ssm_i, - "mov r8 = " __stringify(XEN_PSR_I_ADDR_ADDR) "\n" - ";;\n" - "ld8 r8 = [r8]\n" /* r8 = XEN_PSR_I_ADDR */ - ";;\n" - "ld1.acq r9 = [r8]\n" /* r9 = XEN_PSR_I */ - ";;\n" - "st1.rel [r8] = r0, -1\n" /* psr_i = 0. enable interrupt - * r8 = XEN_PSR_I_ADDR - 1 - * = pend_int_addr - */ - "cmp.eq.unc p0, p6 = r9, r0\n"/* p6 = !XEN_PSR_I - * previously interrupt - * masked? - */ - ";;\n" - "(p6) ld1.acq r8 = [r8]\n" /* r8 = xen_pend_int */ - ";;\n" - "(p6) cmp.eq.unc p6, p7 = r8, r0\n" /*interrupt pending?*/ - ";;\n" - /* issue hypercall to get interrupt */ - "(p7) break " __stringify(HYPERPRIVOP_SSM_I) "\n" - ";;\n"); - -/* - * psr_i_addr_addr = XEN_MAPPEDREGS->interrupt_mask_addr - * = XEN_PSR_I_ADDR_ADDR; - * psr_i_addr = *psr_i_addr_addr; - * *psr_i_addr = 1; - */ -/* 2 bundles */ -DEFINE_VOID_FUNC0(rsm_i, - "mov r8 = " __stringify(XEN_PSR_I_ADDR_ADDR) "\n" - /* r8 = XEN_PSR_I_ADDR */ - "mov r9 = 1\n" - ";;\n" - "ld8 r8 = [r8]\n" /* r8 = XEN_PSR_I */ - ";;\n" - "st1.rel [r8] = r9\n"); /* XEN_PSR_I = 1 */ - -extern void -xen_set_rr0_to_rr4(unsigned long val0, unsigned long val1, - unsigned long val2, unsigned long val3, - unsigned long val4); -__DEFINE_FUNC(set_rr0_to_rr4, - "break " __stringify(HYPERPRIVOP_SET_RR0_TO_RR4) "\n"); - - -extern unsigned long xen_getreg(int regnum); -#define __DEFINE_GET_REG(id, privop) \ - "mov r2 = " __stringify(_IA64_REG_ ## id) "\n" \ - ";;\n" \ - "cmp.eq p6, p0 = r2, r8\n" \ - ";;\n" \ - "(p6) break " __stringify(HYPERPRIVOP_GET_ ## privop) "\n" \ - "(p6) br.cond.sptk.many b6\n" \ - ";;\n" - -__DEFINE_FUNC(getreg, - __DEFINE_GET_REG(PSR, PSR) - - /* get_itc */ - "mov r2 = " __stringify(_IA64_REG_AR_ITC) "\n" - ";;\n" - "cmp.eq p6, p0 = r2, r8\n" - ";;\n" - "(p6) br.cond.spnt xen_get_itc\n" - ";;\n" - - /* get itm */ - "mov r2 = " __stringify(_IA64_REG_CR_ITM) "\n" - ";;\n" - "cmp.eq p6, p0 = r2, r8\n" - ";;\n" - "(p6) br.cond.spnt xen_get_itm_with_offset\n" - ";;\n" - - __DEFINE_GET_REG(CR_IVR, IVR) - __DEFINE_GET_REG(CR_TPR, TPR) - - /* fall back */ - "movl r2 = ia64_native_getreg_func\n" - ";;\n" - "mov b7 = r2\n" - ";;\n" - "br.cond.sptk.many b7\n"); - -extern void xen_setreg(int regnum, unsigned long val); -#define __DEFINE_SET_REG(id, privop) \ - "mov r2 = " __stringify(_IA64_REG_ ## id) "\n" \ - ";;\n" \ - "cmp.eq p6, p0 = r2, r9\n" \ - ";;\n" \ - "(p6) break " __stringify(HYPERPRIVOP_ ## privop) "\n" \ - "(p6) br.cond.sptk.many b6\n" \ - ";;\n" - -__DEFINE_FUNC(setreg, - /* kr0 .. kr 7*/ - /* - * if (_IA64_REG_AR_KR0 <= regnum && - * regnum <= _IA64_REG_AR_KR7) { - * register __index asm ("r8") = regnum - _IA64_REG_AR_KR0 - * register __val asm ("r9") = val - * "break HYPERPRIVOP_SET_KR" - * } - */ - "mov r17 = r9\n" - "mov r2 = " __stringify(_IA64_REG_AR_KR0) "\n" - ";;\n" - "cmp.ge p6, p0 = r9, r2\n" - "sub r17 = r17, r2\n" - ";;\n" - "(p6) cmp.ge.unc p7, p0 = " - __stringify(_IA64_REG_AR_KR7) " - " __stringify(_IA64_REG_AR_KR0) - ", r17\n" - ";;\n" - "(p7) mov r9 = r8\n" - ";;\n" - "(p7) mov r8 = r17\n" - "(p7) break " __stringify(HYPERPRIVOP_SET_KR) "\n" - - /* set itm */ - "mov r2 = " __stringify(_IA64_REG_CR_ITM) "\n" - ";;\n" - "cmp.eq p6, p0 = r2, r8\n" - ";;\n" - "(p6) br.cond.spnt xen_set_itm_with_offset\n" - - /* set itc */ - "mov r2 = " __stringify(_IA64_REG_AR_ITC) "\n" - ";;\n" - "cmp.eq p6, p0 = r2, r8\n" - ";;\n" - "(p6) br.cond.spnt xen_set_itc\n" - - __DEFINE_SET_REG(CR_TPR, SET_TPR) - __DEFINE_SET_REG(CR_EOI, EOI) - - /* fall back */ - "movl r2 = ia64_native_setreg_func\n" - ";;\n" - "mov b7 = r2\n" - ";;\n" - "br.cond.sptk.many b7\n"); -#endif - -static const struct pv_cpu_ops xen_cpu_ops __initconst = { - .fc = xen_fc, - .thash = xen_thash, - .get_cpuid = xen_get_cpuid, - .get_pmd = xen_get_pmd, - .getreg = xen_getreg, - .setreg = xen_setreg, - .ptcga = xen_ptcga, - .get_rr = xen_get_rr, - .set_rr = xen_set_rr, - .set_rr0_to_rr4 = xen_set_rr0_to_rr4, - .ssm_i = xen_ssm_i, - .rsm_i = xen_rsm_i, - .get_psr_i = xen_get_psr_i, - .intrin_local_irq_restore - = xen_intrin_local_irq_restore, -}; - -/****************************************************************************** - * replacement of hand written assembly codes. - */ - -extern char xen_switch_to; -extern char xen_leave_syscall; -extern char xen_work_processed_syscall; -extern char xen_leave_kernel; - -const struct pv_cpu_asm_switch xen_cpu_asm_switch = { - .switch_to = (unsigned long)&xen_switch_to, - .leave_syscall = (unsigned long)&xen_leave_syscall, - .work_processed_syscall = (unsigned long)&xen_work_processed_syscall, - .leave_kernel = (unsigned long)&xen_leave_kernel, -}; - -/*************************************************************************** - * pv_iosapic_ops - * iosapic read/write hooks. - */ -static void -xen_pcat_compat_init(void) -{ - /* nothing */ -} - -static struct irq_chip* -xen_iosapic_get_irq_chip(unsigned long trigger) -{ - return NULL; -} - -static unsigned int -xen_iosapic_read(char __iomem *iosapic, unsigned int reg) -{ - struct physdev_apic apic_op; - int ret; - - apic_op.apic_physbase = (unsigned long)iosapic - - __IA64_UNCACHED_OFFSET; - apic_op.reg = reg; - ret = HYPERVISOR_physdev_op(PHYSDEVOP_apic_read, &apic_op); - if (ret) - return ret; - return apic_op.value; -} - -static void -xen_iosapic_write(char __iomem *iosapic, unsigned int reg, u32 val) -{ - struct physdev_apic apic_op; - - apic_op.apic_physbase = (unsigned long)iosapic - - __IA64_UNCACHED_OFFSET; - apic_op.reg = reg; - apic_op.value = val; - HYPERVISOR_physdev_op(PHYSDEVOP_apic_write, &apic_op); -} - -static struct pv_iosapic_ops xen_iosapic_ops __initdata = { - .pcat_compat_init = xen_pcat_compat_init, - .__get_irq_chip = xen_iosapic_get_irq_chip, - - .__read = xen_iosapic_read, - .__write = xen_iosapic_write, -}; - -/*************************************************************************** - * pv_ops initialization - */ - -void __init -xen_setup_pv_ops(void) -{ - xen_info_init(); - pv_info = xen_info; - pv_init_ops = xen_init_ops; - pv_fsys_data = xen_fsys_data; - pv_patchdata = xen_patchdata; - pv_cpu_ops = xen_cpu_ops; - pv_iosapic_ops = xen_iosapic_ops; - pv_irq_ops = xen_irq_ops; - pv_time_ops = xen_time_ops; - - paravirt_cpu_asm_init(&xen_cpu_asm_switch); -} - -#ifdef ASM_SUPPORTED -/*************************************************************************** - * binary pacthing - * pv_init_ops.patch_bundle - */ - -#define DEFINE_FUNC_GETREG(name, privop) \ - DEFINE_FUNC0(get_ ## name, \ - "break "__stringify(HYPERPRIVOP_GET_ ## privop) "\n") - -DEFINE_FUNC_GETREG(psr, PSR); -DEFINE_FUNC_GETREG(eflag, EFLAG); -DEFINE_FUNC_GETREG(ivr, IVR); -DEFINE_FUNC_GETREG(tpr, TPR); - -#define DEFINE_FUNC_SET_KR(n) \ - DEFINE_VOID_FUNC0(set_kr ## n, \ - ";;\n" \ - "mov r9 = r8\n" \ - "mov r8 = " #n "\n" \ - "break " __stringify(HYPERPRIVOP_SET_KR) "\n") - -DEFINE_FUNC_SET_KR(0); -DEFINE_FUNC_SET_KR(1); -DEFINE_FUNC_SET_KR(2); -DEFINE_FUNC_SET_KR(3); -DEFINE_FUNC_SET_KR(4); -DEFINE_FUNC_SET_KR(5); -DEFINE_FUNC_SET_KR(6); -DEFINE_FUNC_SET_KR(7); - -#define __DEFINE_FUNC_SETREG(name, privop) \ - DEFINE_VOID_FUNC0(name, \ - "break "__stringify(HYPERPRIVOP_ ## privop) "\n") - -#define DEFINE_FUNC_SETREG(name, privop) \ - __DEFINE_FUNC_SETREG(set_ ## name, SET_ ## privop) - -DEFINE_FUNC_SETREG(eflag, EFLAG); -DEFINE_FUNC_SETREG(tpr, TPR); -__DEFINE_FUNC_SETREG(eoi, EOI); - -extern const char xen_check_events[]; -extern const char __xen_intrin_local_irq_restore_direct_start[]; -extern const char __xen_intrin_local_irq_restore_direct_end[]; -extern const unsigned long __xen_intrin_local_irq_restore_direct_reloc; - -asm ( - ".align 32\n" - ".proc xen_check_events\n" - "xen_check_events:\n" - /* masked = 0 - * r9 = masked_addr - 1 - * = pending_intr_addr - */ - "st1.rel [r9] = r0, -1\n" - ";;\n" - /* r8 = pending_intr */ - "ld1.acq r11 = [r9]\n" - ";;\n" - /* p9 = interrupt pending? */ - "cmp.ne p9, p10 = r11, r0\n" - ";;\n" - "(p10) mf\n" - /* issue hypercall to trigger interrupt */ - "(p9) break " __stringify(HYPERPRIVOP_SSM_I) "\n" - "br.cond.sptk.many b6\n" - ".endp xen_check_events\n" - "\n" - ".align 32\n" - ".proc __xen_intrin_local_irq_restore_direct\n" - "__xen_intrin_local_irq_restore_direct:\n" - "__xen_intrin_local_irq_restore_direct_start:\n" - "1:\n" - "{\n" - "cmp.ne p6, p7 = r8, r0\n" - "mov r17 = ip\n" /* get ip to calc return address */ - "mov r9 = "__stringify(XEN_PSR_I_ADDR_ADDR) "\n" - ";;\n" - "}\n" - "{\n" - /* r9 = XEN_PSR_I_ADDR */ - "ld8 r9 = [r9]\n" - ";;\n" - /* r10 = masked previous value */ - "(p6) ld1.acq r10 = [r9]\n" - "adds r17 = 1f - 1b, r17\n" /* calculate return address */ - ";;\n" - "}\n" - "{\n" - /* p8 = !masked interrupt masked previously? */ - "(p6) cmp.ne.unc p8, p0 = r10, r0\n" - "\n" - /* p7 = else clause */ - "(p7) mov r11 = 1\n" - ";;\n" - "(p8) mov b6 = r17\n" /* set return address */ - "}\n" - "{\n" - /* masked = 1 */ - "(p7) st1.rel [r9] = r11\n" - "\n" - "[99:]\n" - "(p8) brl.cond.dptk.few xen_check_events\n" - "}\n" - /* pv calling stub is 5 bundles. fill nop to adjust return address */ - "{\n" - "nop 0\n" - "nop 0\n" - "nop 0\n" - "}\n" - "1:\n" - "__xen_intrin_local_irq_restore_direct_end:\n" - ".endp __xen_intrin_local_irq_restore_direct\n" - "\n" - ".align 8\n" - "__xen_intrin_local_irq_restore_direct_reloc:\n" - "data8 99b\n" -); - -static struct paravirt_patch_bundle_elem xen_patch_bundle_elems[] -__initdata_or_module = -{ -#define XEN_PATCH_BUNDLE_ELEM(name, type) \ - { \ - (void*)xen_ ## name ## _direct_start, \ - (void*)xen_ ## name ## _direct_end, \ - PARAVIRT_PATCH_TYPE_ ## type, \ - } - - XEN_PATCH_BUNDLE_ELEM(fc, FC), - XEN_PATCH_BUNDLE_ELEM(thash, THASH), - XEN_PATCH_BUNDLE_ELEM(get_cpuid, GET_CPUID), - XEN_PATCH_BUNDLE_ELEM(get_pmd, GET_PMD), - XEN_PATCH_BUNDLE_ELEM(ptcga, PTCGA), - XEN_PATCH_BUNDLE_ELEM(get_rr, GET_RR), - XEN_PATCH_BUNDLE_ELEM(set_rr, SET_RR), - XEN_PATCH_BUNDLE_ELEM(set_rr0_to_rr4, SET_RR0_TO_RR4), - XEN_PATCH_BUNDLE_ELEM(ssm_i, SSM_I), - XEN_PATCH_BUNDLE_ELEM(rsm_i, RSM_I), - XEN_PATCH_BUNDLE_ELEM(get_psr_i, GET_PSR_I), - { - (void*)__xen_intrin_local_irq_restore_direct_start, - (void*)__xen_intrin_local_irq_restore_direct_end, - PARAVIRT_PATCH_TYPE_INTRIN_LOCAL_IRQ_RESTORE, - }, - -#define XEN_PATCH_BUNDLE_ELEM_GETREG(name, reg) \ - { \ - xen_get_ ## name ## _direct_start, \ - xen_get_ ## name ## _direct_end, \ - PARAVIRT_PATCH_TYPE_GETREG + _IA64_REG_ ## reg, \ - } - - XEN_PATCH_BUNDLE_ELEM_GETREG(psr, PSR), - XEN_PATCH_BUNDLE_ELEM_GETREG(eflag, AR_EFLAG), - - XEN_PATCH_BUNDLE_ELEM_GETREG(ivr, CR_IVR), - XEN_PATCH_BUNDLE_ELEM_GETREG(tpr, CR_TPR), - - XEN_PATCH_BUNDLE_ELEM_GETREG(itc, AR_ITC), - XEN_PATCH_BUNDLE_ELEM_GETREG(itm_with_offset, CR_ITM), - - -#define __XEN_PATCH_BUNDLE_ELEM_SETREG(name, reg) \ - { \ - xen_ ## name ## _direct_start, \ - xen_ ## name ## _direct_end, \ - PARAVIRT_PATCH_TYPE_SETREG + _IA64_REG_ ## reg, \ - } - -#define XEN_PATCH_BUNDLE_ELEM_SETREG(name, reg) \ - __XEN_PATCH_BUNDLE_ELEM_SETREG(set_ ## name, reg) - - XEN_PATCH_BUNDLE_ELEM_SETREG(kr0, AR_KR0), - XEN_PATCH_BUNDLE_ELEM_SETREG(kr1, AR_KR1), - XEN_PATCH_BUNDLE_ELEM_SETREG(kr2, AR_KR2), - XEN_PATCH_BUNDLE_ELEM_SETREG(kr3, AR_KR3), - XEN_PATCH_BUNDLE_ELEM_SETREG(kr4, AR_KR4), - XEN_PATCH_BUNDLE_ELEM_SETREG(kr5, AR_KR5), - XEN_PATCH_BUNDLE_ELEM_SETREG(kr6, AR_KR6), - XEN_PATCH_BUNDLE_ELEM_SETREG(kr7, AR_KR7), - - XEN_PATCH_BUNDLE_ELEM_SETREG(eflag, AR_EFLAG), - XEN_PATCH_BUNDLE_ELEM_SETREG(tpr, CR_TPR), - __XEN_PATCH_BUNDLE_ELEM_SETREG(eoi, CR_EOI), - - XEN_PATCH_BUNDLE_ELEM_SETREG(itc, AR_ITC), - XEN_PATCH_BUNDLE_ELEM_SETREG(itm_with_offset, CR_ITM), -}; - -static unsigned long __init_or_module -xen_patch_bundle(void *sbundle, void *ebundle, unsigned long type) -{ - const unsigned long nelems = sizeof(xen_patch_bundle_elems) / - sizeof(xen_patch_bundle_elems[0]); - unsigned long used; - const struct paravirt_patch_bundle_elem *found; - - used = __paravirt_patch_apply_bundle(sbundle, ebundle, type, - xen_patch_bundle_elems, nelems, - &found); - - if (found == NULL) - /* fallback */ - return ia64_native_patch_bundle(sbundle, ebundle, type); - if (used == 0) - return used; - - /* relocation */ - switch (type) { - case PARAVIRT_PATCH_TYPE_INTRIN_LOCAL_IRQ_RESTORE: { - unsigned long reloc = - __xen_intrin_local_irq_restore_direct_reloc; - unsigned long reloc_offset = reloc - (unsigned long) - __xen_intrin_local_irq_restore_direct_start; - unsigned long tag = (unsigned long)sbundle + reloc_offset; - paravirt_patch_reloc_brl(tag, xen_check_events); - break; - } - default: - /* nothing */ - break; - } - return used; -} -#endif /* ASM_SUPPOTED */ - -const struct paravirt_patch_branch_target xen_branch_target[] -__initconst = { -#define PARAVIRT_BR_TARGET(name, type) \ - { \ - &xen_ ## name, \ - PARAVIRT_PATCH_TYPE_BR_ ## type, \ - } - PARAVIRT_BR_TARGET(switch_to, SWITCH_TO), - PARAVIRT_BR_TARGET(leave_syscall, LEAVE_SYSCALL), - PARAVIRT_BR_TARGET(work_processed_syscall, WORK_PROCESSED_SYSCALL), - PARAVIRT_BR_TARGET(leave_kernel, LEAVE_KERNEL), -}; - -static void __init -xen_patch_branch(unsigned long tag, unsigned long type) -{ - __paravirt_patch_apply_branch(tag, type, xen_branch_target, - ARRAY_SIZE(xen_branch_target)); -} diff --git a/arch/ia64/xen/xencomm.c b/arch/ia64/xen/xencomm.c deleted file mode 100644 index 73d903c..0000000 --- a/arch/ia64/xen/xencomm.c +++ /dev/null @@ -1,106 +0,0 @@ -/* - * Copyright (C) 2006 Hollis Blanchard <hollisb@us.ibm.com>, IBM Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - -#include <linux/mm.h> -#include <linux/err.h> - -static unsigned long kernel_virtual_offset; -static int is_xencomm_initialized; - -/* for xen early printk. It uses console io hypercall which uses xencomm. - * However early printk may use it before xencomm initialization. - */ -int -xencomm_is_initialized(void) -{ - return is_xencomm_initialized; -} - -void -xencomm_initialize(void) -{ - kernel_virtual_offset = KERNEL_START - ia64_tpa(KERNEL_START); - is_xencomm_initialized = 1; -} - -/* Translate virtual address to physical address. */ -unsigned long -xencomm_vtop(unsigned long vaddr) -{ - struct page *page; - struct vm_area_struct *vma; - - if (vaddr == 0) - return 0UL; - - if (REGION_NUMBER(vaddr) == 5) { - pgd_t *pgd; - pud_t *pud; - pmd_t *pmd; - pte_t *ptep; - - /* On ia64, TASK_SIZE refers to current. It is not initialized - during boot. - Furthermore the kernel is relocatable and __pa() doesn't - work on addresses. */ - if (vaddr >= KERNEL_START - && vaddr < (KERNEL_START + KERNEL_TR_PAGE_SIZE)) - return vaddr - kernel_virtual_offset; - - /* In kernel area -- virtually mapped. */ - pgd = pgd_offset_k(vaddr); - if (pgd_none(*pgd) || pgd_bad(*pgd)) - return ~0UL; - - pud = pud_offset(pgd, vaddr); - if (pud_none(*pud) || pud_bad(*pud)) - return ~0UL; - - pmd = pmd_offset(pud, vaddr); - if (pmd_none(*pmd) || pmd_bad(*pmd)) - return ~0UL; - - ptep = pte_offset_kernel(pmd, vaddr); - if (!ptep) - return ~0UL; - - return (pte_val(*ptep) & _PFN_MASK) | (vaddr & ~PAGE_MASK); - } - - if (vaddr > TASK_SIZE) { - /* percpu variables */ - if (REGION_NUMBER(vaddr) == 7 && - REGION_OFFSET(vaddr) >= (1ULL << IA64_MAX_PHYS_BITS)) - ia64_tpa(vaddr); - - /* kernel address */ - return __pa(vaddr); - } - - /* XXX double-check (lack of) locking */ - vma = find_extend_vma(current->mm, vaddr); - if (!vma) - return ~0UL; - - /* We assume the page is modified. */ - page = follow_page(vma, vaddr, FOLL_WRITE | FOLL_TOUCH); - if (IS_ERR_OR_NULL(page)) - return ~0UL; - - return (page_to_pfn(page) << PAGE_SHIFT) | (vaddr & ~PAGE_MASK); -} diff --git a/arch/ia64/xen/xenivt.S b/arch/ia64/xen/xenivt.S deleted file mode 100644 index 3e71d50..0000000 --- a/arch/ia64/xen/xenivt.S +++ /dev/null @@ -1,52 +0,0 @@ -/* - * arch/ia64/xen/ivt.S - * - * Copyright (C) 2005 Hewlett-Packard Co - * Dan Magenheimer <dan.magenheimer@hp.com> - * - * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp> - * VA Linux Systems Japan K.K. - * pv_ops. - */ - -#include <asm/asmmacro.h> -#include <asm/kregs.h> -#include <asm/pgtable.h> - -#include "../kernel/minstate.h" - - .section .text,"ax" -GLOBAL_ENTRY(xen_event_callback) - mov r31=pr // prepare to save predicates - ;; - SAVE_MIN_WITH_COVER // uses r31; defines r2 and r3 - ;; - movl r3=XSI_PSR_IC - mov r14=1 - ;; - st4 [r3]=r14 - ;; - adds r3=8,r2 // set up second base pointer for SAVE_REST - srlz.i // ensure everybody knows psr.ic is back on - ;; - SAVE_REST - ;; -1: - alloc r14=ar.pfs,0,0,1,0 // must be first in an insn group - add out0=16,sp // pass pointer to pt_regs as first arg - ;; - br.call.sptk.many b0=xen_evtchn_do_upcall - ;; - movl r20=XSI_PSR_I_ADDR - ;; - ld8 r20=[r20] - ;; - adds r20=-1,r20 // vcpu_info->evtchn_upcall_pending - ;; - ld1 r20=[r20] - ;; - cmp.ne p6,p0=r20,r0 // if there are pending events, - (p6) br.spnt.few 1b // call evtchn_do_upcall again. - br.sptk.many xen_leave_kernel // we know ia64_leave_kernel is - // paravirtualized as xen_leave_kernel -END(xen_event_callback) diff --git a/arch/ia64/xen/xensetup.S b/arch/ia64/xen/xensetup.S deleted file mode 100644 index e29519e..0000000 --- a/arch/ia64/xen/xensetup.S +++ /dev/null @@ -1,80 +0,0 @@ -/* - * Support routines for Xen - * - * Copyright (C) 2005 Dan Magenheimer <dan.magenheimer@hp.com> - */ - -#include <asm/processor.h> -#include <asm/asmmacro.h> -#include <asm/pgtable.h> -#include <asm/paravirt.h> -#include <asm/xen/privop.h> -#include <linux/elfnote.h> -#include <linux/init.h> -#include <xen/interface/elfnote.h> - - .section .data..read_mostly - .align 8 - .global xen_domain_type -xen_domain_type: - data4 XEN_NATIVE_ASM - .previous - - __INIT -ENTRY(startup_xen) - // Calculate load offset. - // The constant, LOAD_OFFSET, can't be used because the boot - // loader doesn't always load to the LMA specified by the vmlinux.lds. - mov r9=ip // must be the first instruction to make sure - // that r9 = the physical address of startup_xen. - // Usually r9 = startup_xen - LOAD_OFFSET - movl r8=startup_xen - ;; - sub r9=r9,r8 // Usually r9 = -LOAD_OFFSET. - - mov r10=PARAVIRT_HYPERVISOR_TYPE_XEN - movl r11=_start - ;; - add r11=r11,r9 - movl r8=hypervisor_type - ;; - add r8=r8,r9 - mov b0=r11 - ;; - st8 [r8]=r10 - br.cond.sptk.many b0 - ;; -END(startup_xen) - - ELFNOTE(Xen, XEN_ELFNOTE_GUEST_OS, .asciz "linux") - ELFNOTE(Xen, XEN_ELFNOTE_GUEST_VERSION, .asciz "2.6") - ELFNOTE(Xen, XEN_ELFNOTE_XEN_VERSION, .asciz "xen-3.0") - ELFNOTE(Xen, XEN_ELFNOTE_ENTRY, data8.ua startup_xen - LOAD_OFFSET) - -#define isBP p3 // are we the Bootstrap Processor? - -GLOBAL_ENTRY(xen_setup_hook) - mov r8=XEN_PV_DOMAIN_ASM -(isBP) movl r9=xen_domain_type;; -(isBP) st4 [r9]=r8 - movl r10=xen_ivt;; - - mov cr.iva=r10 - - /* Set xsi base. */ -#define FW_HYPERCALL_SET_SHARED_INFO_VA 0x600 -(isBP) mov r2=FW_HYPERCALL_SET_SHARED_INFO_VA -(isBP) movl r28=XSI_BASE;; -(isBP) break 0x1000;; - - /* setup pv_ops */ -(isBP) mov r4=rp - ;; -(isBP) br.call.sptk.many rp=xen_setup_pv_ops - ;; -(isBP) mov rp=r4 - ;; - - br.ret.sptk.many rp - ;; -END(xen_setup_hook) diff --git a/arch/m32r/include/asm/barrier.h b/arch/m32r/include/asm/barrier.h index 6976621..1a40265 100644 --- a/arch/m32r/include/asm/barrier.h +++ b/arch/m32r/include/asm/barrier.h @@ -11,84 +11,6 @@ #define nop() __asm__ __volatile__ ("nop" : : ) -/* - * Memory barrier. - * - * mb() prevents loads and stores being reordered across this point. - * rmb() prevents loads being reordered across this point. - * wmb() prevents stores being reordered across this point. - */ -#define mb() barrier() -#define rmb() mb() -#define wmb() mb() - -/** - * read_barrier_depends - Flush all pending reads that subsequents reads - * depend on. - * - * No data-dependent reads from memory-like regions are ever reordered - * over this barrier. All reads preceding this primitive are guaranteed - * to access memory (but not necessarily other CPUs' caches) before any - * reads following this primitive that depend on the data return by - * any of the preceding reads. This primitive is much lighter weight than - * rmb() on most CPUs, and is never heavier weight than is - * rmb(). - * - * These ordering constraints are respected by both the local CPU - * and the compiler. - * - * Ordering is not guaranteed by anything other than these primitives, - * not even by data dependencies. See the documentation for - * memory_barrier() for examples and URLs to more information. - * - * For example, the following code would force ordering (the initial - * value of "a" is zero, "b" is one, and "p" is "&a"): - * - * <programlisting> - * CPU 0 CPU 1 - * - * b = 2; - * memory_barrier(); - * p = &b; q = p; - * read_barrier_depends(); - * d = *q; - * </programlisting> - * - * - * because the read of "*q" depends on the read of "p" and these - * two reads are separated by a read_barrier_depends(). However, - * the following code, with the same initial values for "a" and "b": - * - * <programlisting> - * CPU 0 CPU 1 - * - * a = 2; - * memory_barrier(); - * b = 3; y = b; - * read_barrier_depends(); - * x = a; - * </programlisting> - * - * does not enforce ordering, since there is no data dependency between - * the read of "a" and the read of "b". Therefore, on some CPUs, such - * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb() - * in cases like this where there are no data dependencies. - **/ - -#define read_barrier_depends() do { } while (0) - -#ifdef CONFIG_SMP -#define smp_mb() mb() -#define smp_rmb() rmb() -#define smp_wmb() wmb() -#define smp_read_barrier_depends() read_barrier_depends() -#define set_mb(var, value) do { (void) xchg(&var, value); } while (0) -#else -#define smp_mb() barrier() -#define smp_rmb() barrier() -#define smp_wmb() barrier() -#define smp_read_barrier_depends() do { } while (0) -#define set_mb(var, value) do { var = value; barrier(); } while (0) -#endif +#include <asm-generic/barrier.h> #endif /* _ASM_M32R_BARRIER_H */ diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig index 75f25a8..dbdd223 100644 --- a/arch/m68k/Kconfig +++ b/arch/m68k/Kconfig @@ -87,6 +87,30 @@ config MMU_SUN3 bool depends on MMU && !MMU_MOTOROLA && !MMU_COLDFIRE +config KEXEC + bool "kexec system call" + depends on M68KCLASSIC + help + kexec is a system call that implements the ability to shutdown your + current kernel, and to start another kernel. It is like a reboot + but it is independent of the system firmware. And like a reboot + you can start any kernel with it, not just Linux. + + The name comes from the similarity to the exec system call. + + It is an ongoing process to be certain the hardware in a machine + is properly shutdown, so do not be surprised if this code does not + initially work for you. As of this writing the exact hardware + interface is strongly in flux, so no good recommendation can be + made. + +config BOOTINFO_PROC + bool "Export bootinfo in procfs" + depends on KEXEC && M68KCLASSIC + help + Say Y to export the bootinfo used to boot the kernel in a + "bootinfo" file in procfs. This is useful with kexec. + menu "Platform setup" source arch/m68k/Kconfig.cpu diff --git a/arch/m68k/amiga/chipram.c b/arch/m68k/amiga/chipram.c index 99449fb..ba03cec 100644 --- a/arch/m68k/amiga/chipram.c +++ b/arch/m68k/amiga/chipram.c @@ -87,7 +87,7 @@ void *amiga_chip_alloc_res(unsigned long size, struct resource *res) atomic_sub(size, &chipavail); pr_debug("amiga_chip_alloc_res: returning %pR\n", res); - return (void *)ZTWO_VADDR(res->start); + return ZTWO_VADDR(res->start); } void amiga_chip_free(void *ptr) diff --git a/arch/m68k/amiga/config.c b/arch/m68k/amiga/config.c index b819390..9625b71 100644 --- a/arch/m68k/amiga/config.c +++ b/arch/m68k/amiga/config.c @@ -28,6 +28,8 @@ #include <linux/keyboard.h> #include <asm/bootinfo.h> +#include <asm/bootinfo-amiga.h> +#include <asm/byteorder.h> #include <asm/setup.h> #include <asm/pgtable.h> #include <asm/amigahw.h> @@ -140,46 +142,46 @@ static struct resource ram_resource[NUM_MEMINFO]; * Parse an Amiga-specific record in the bootinfo */ -int amiga_parse_bootinfo(const struct bi_record *record) +int __init amiga_parse_bootinfo(const struct bi_record *record) { int unknown = 0; - const unsigned long *data = record->data; + const void *data = record->data; - switch (record->tag) { + switch (be16_to_cpu(record->tag)) { case BI_AMIGA_MODEL: - amiga_model = *data; + amiga_model = be32_to_cpup(data); break; case BI_AMIGA_ECLOCK: - amiga_eclock = *data; + amiga_eclock = be32_to_cpup(data); break; case BI_AMIGA_CHIPSET: - amiga_chipset = *data; + amiga_chipset = be32_to_cpup(data); break; case BI_AMIGA_CHIP_SIZE: - amiga_chip_size = *(const int *)data; + amiga_chip_size = be32_to_cpup(data); break; case BI_AMIGA_VBLANK: - amiga_vblank = *(const unsigned char *)data; + amiga_vblank = *(const __u8 *)data; break; case BI_AMIGA_PSFREQ: - amiga_psfreq = *(const unsigned char *)data; + amiga_psfreq = *(const __u8 *)data; break; case BI_AMIGA_AUTOCON: #ifdef CONFIG_ZORRO if (zorro_num_autocon < ZORRO_NUM_AUTO) { - const struct ConfigDev *cd = (struct ConfigDev *)data; - struct zorro_dev *dev = &zorro_autocon[zorro_num_autocon++]; + const struct ConfigDev *cd = data; + struct zorro_dev_init *dev = &zorro_autocon_init[zorro_num_autocon++]; dev->rom = cd->cd_Rom; - dev->slotaddr = cd->cd_SlotAddr; - dev->slotsize = cd->cd_SlotSize; - dev->resource.start = (unsigned long)cd->cd_BoardAddr; - dev->resource.end = dev->resource.start + cd->cd_BoardSize - 1; + dev->slotaddr = be16_to_cpu(cd->cd_SlotAddr); + dev->slotsize = be16_to_cpu(cd->cd_SlotSize); + dev->boardaddr = be32_to_cpu(cd->cd_BoardAddr); + dev->boardsize = be32_to_cpu(cd->cd_BoardSize); } else printk("amiga_parse_bootinfo: too many AutoConfig devices\n"); #endif /* CONFIG_ZORRO */ @@ -358,6 +360,14 @@ static void __init amiga_identify(void) #undef AMIGAHW_ANNOUNCE } + +static unsigned long amiga_random_get_entropy(void) +{ + /* VPOSR/VHPOSR provide at least 17 bits of data changing at 1.79 MHz */ + return *(unsigned long *)&amiga_custom.vposr; +} + + /* * Setup the Amiga configuration info */ @@ -395,6 +405,8 @@ void __init config_amiga(void) mach_heartbeat = amiga_heartbeat; #endif + mach_random_get_entropy = amiga_random_get_entropy; + /* Fill in the clock value (based on the 700 kHz E-Clock) */ amiga_colorclock = 5*amiga_eclock; /* 3.5 MHz */ @@ -608,6 +620,8 @@ static void amiga_mem_console_write(struct console *co, const char *s, static int __init amiga_savekmsg_setup(char *arg) { + bool registered; + if (!MACH_IS_AMIGA || strcmp(arg, "mem")) return 0; @@ -618,14 +632,16 @@ static int __init amiga_savekmsg_setup(char *arg) /* Just steal the block, the chipram allocator isn't functional yet */ amiga_chip_size -= SAVEKMSG_MAXMEM; - savekmsg = (void *)ZTWO_VADDR(CHIP_PHYSADDR + amiga_chip_size); + savekmsg = ZTWO_VADDR(CHIP_PHYSADDR + amiga_chip_size); savekmsg->magic1 = SAVEKMSG_MAGIC1; savekmsg->magic2 = SAVEKMSG_MAGIC2; savekmsg->magicptr = ZTWO_PADDR(savekmsg); savekmsg->size = 0; + registered = !!amiga_console_driver.write; amiga_console_driver.write = amiga_mem_console_write; - register_console(&amiga_console_driver); + if (!registered) + register_console(&amiga_console_driver); return 0; } @@ -707,11 +723,16 @@ void amiga_serial_gets(struct console *co, char *s, int len) static int __init amiga_debug_setup(char *arg) { - if (MACH_IS_AMIGA && !strcmp(arg, "ser")) { - /* no initialization required (?) */ - amiga_console_driver.write = amiga_serial_console_write; + bool registered; + + if (!MACH_IS_AMIGA || strcmp(arg, "ser")) + return 0; + + /* no initialization required (?) */ + registered = !!amiga_console_driver.write; + amiga_console_driver.write = amiga_serial_console_write; + if (!registered) register_console(&amiga_console_driver); - } return 0; } diff --git a/arch/m68k/amiga/platform.c b/arch/m68k/amiga/platform.c index dacd9f9..d34029d 100644 --- a/arch/m68k/amiga/platform.c +++ b/arch/m68k/amiga/platform.c @@ -13,6 +13,7 @@ #include <asm/amigahw.h> #include <asm/amigayle.h> +#include <asm/byteorder.h> #ifdef CONFIG_ZORRO @@ -66,10 +67,12 @@ static int __init z_dev_present(zorro_id id) { unsigned int i; - for (i = 0; i < zorro_num_autocon; i++) - if (zorro_autocon[i].rom.er_Manufacturer == ZORRO_MANUF(id) && - zorro_autocon[i].rom.er_Product == ZORRO_PROD(id)) + for (i = 0; i < zorro_num_autocon; i++) { + const struct ExpansionRom *rom = &zorro_autocon_init[i].rom; + if (be16_to_cpu(rom->er_Manufacturer) == ZORRO_MANUF(id) && + rom->er_Product == ZORRO_PROD(id)) return 1; + } return 0; } diff --git a/arch/m68k/apollo/config.c b/arch/m68k/apollo/config.c index 3ea56b9..9268c0f9 100644 --- a/arch/m68k/apollo/config.c +++ b/arch/m68k/apollo/config.c @@ -1,3 +1,4 @@ +#include <linux/init.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/mm.h> @@ -9,6 +10,8 @@ #include <asm/setup.h> #include <asm/bootinfo.h> +#include <asm/bootinfo-apollo.h> +#include <asm/byteorder.h> #include <asm/pgtable.h> #include <asm/apollohw.h> #include <asm/irq.h> @@ -43,26 +46,25 @@ static const char *apollo_models[] = { [APOLLO_DN4500-APOLLO_DN3000] = "DN4500 (Roadrunner)" }; -int apollo_parse_bootinfo(const struct bi_record *record) { - +int __init apollo_parse_bootinfo(const struct bi_record *record) +{ int unknown = 0; - const unsigned long *data = record->data; + const void *data = record->data; - switch(record->tag) { - case BI_APOLLO_MODEL: - apollo_model=*data; - break; + switch (be16_to_cpu(record->tag)) { + case BI_APOLLO_MODEL: + apollo_model = be32_to_cpup(data); + break; - default: - unknown=1; + default: + unknown=1; } return unknown; } -void dn_setup_model(void) { - - +static void __init dn_setup_model(void) +{ printk("Apollo hardware found: "); printk("[%s]\n", apollo_models[apollo_model - APOLLO_DN3000]); diff --git a/arch/m68k/atari/ataints.c b/arch/m68k/atari/ataints.c index 20cde4e..3e73a63 100644 --- a/arch/m68k/atari/ataints.c +++ b/arch/m68k/atari/ataints.c @@ -333,6 +333,9 @@ void __init atari_init_IRQ(void) m68k_setup_irq_controller(&atari_mfptimer_chip, handle_simple_irq, IRQ_MFP_TIMER1, 8); + irq_set_status_flags(IRQ_MFP_TIMER1, IRQ_IS_POLLED); + irq_set_status_flags(IRQ_MFP_TIMER2, IRQ_IS_POLLED); + /* prepare timer D data for use as poll interrupt */ /* set Timer D data Register - needs to be > 0 */ st_mfp.tim_dt_d = 254; /* < 100 Hz */ diff --git a/arch/m68k/atari/config.c b/arch/m68k/atari/config.c index fb2d0bd..01a6216 100644 --- a/arch/m68k/atari/config.c +++ b/arch/m68k/atari/config.c @@ -37,6 +37,8 @@ #include <linux/module.h> #include <asm/bootinfo.h> +#include <asm/bootinfo-atari.h> +#include <asm/byteorder.h> #include <asm/setup.h> #include <asm/atarihw.h> #include <asm/atariints.h> @@ -129,14 +131,14 @@ static int __init scc_test(volatile char *ctla) int __init atari_parse_bootinfo(const struct bi_record *record) { int unknown = 0; - const u_long *data = record->data; + const void *data = record->data; - switch (record->tag) { + switch (be16_to_cpu(record->tag)) { case BI_ATARI_MCH_COOKIE: - atari_mch_cookie = *data; + atari_mch_cookie = be32_to_cpup(data); break; case BI_ATARI_MCH_TYPE: - atari_mch_type = *data; + atari_mch_type = be32_to_cpup(data); break; default: unknown = 1; diff --git a/arch/m68k/atari/debug.c b/arch/m68k/atari/debug.c index a547ba9..03cb5e0 100644 --- a/arch/m68k/atari/debug.c +++ b/arch/m68k/atari/debug.c @@ -287,6 +287,8 @@ static void __init atari_init_midi_port(int cflag) static int __init atari_debug_setup(char *arg) { + bool registered; + if (!MACH_IS_ATARI) return 0; @@ -294,6 +296,7 @@ static int __init atari_debug_setup(char *arg) /* defaults to ser2 for a Falcon and ser1 otherwise */ arg = MACH_IS_FALCON ? "ser2" : "ser1"; + registered = !!atari_console_driver.write; if (!strcmp(arg, "ser1")) { /* ST-MFP Modem1 serial port */ atari_init_mfp_port(B9600|CS8); @@ -317,7 +320,7 @@ static int __init atari_debug_setup(char *arg) sound_ym.wd_data = sound_ym.rd_data_reg_sel | 0x20; /* strobe H */ atari_console_driver.write = atari_par_console_write; } - if (atari_console_driver.write) + if (atari_console_driver.write && !registered) register_console(&atari_console_driver); return 0; diff --git a/arch/m68k/bvme6000/config.c b/arch/m68k/bvme6000/config.c index 8943aa4..478623d 100644 --- a/arch/m68k/bvme6000/config.c +++ b/arch/m68k/bvme6000/config.c @@ -28,6 +28,8 @@ #include <linux/bcd.h> #include <asm/bootinfo.h> +#include <asm/bootinfo-vme.h> +#include <asm/byteorder.h> #include <asm/pgtable.h> #include <asm/setup.h> #include <asm/irq.h> @@ -50,9 +52,9 @@ void bvme6000_set_vectors (void); static irq_handler_t tick_handler; -int bvme6000_parse_bootinfo(const struct bi_record *bi) +int __init bvme6000_parse_bootinfo(const struct bi_record *bi) { - if (bi->tag == BI_VME_TYPE) + if (be16_to_cpu(bi->tag) == BI_VME_TYPE) return 0; else return 1; diff --git a/arch/m68k/configs/amiga_defconfig b/arch/m68k/configs/amiga_defconfig index 19325e1..559ff3a 100644 --- a/arch/m68k/configs/amiga_defconfig +++ b/arch/m68k/configs/amiga_defconfig @@ -52,7 +52,6 @@ CONFIG_IP_PNP_RARP=y CONFIG_NET_IPIP=m CONFIG_NET_IPGRE_DEMUX=m CONFIG_NET_IPGRE=m -CONFIG_SYN_COOKIES=y CONFIG_NET_IPVTI=m CONFIG_INET_AH=m CONFIG_INET_ESP=m @@ -63,11 +62,11 @@ CONFIG_INET_XFRM_MODE_BEET=m # CONFIG_INET_LRO is not set CONFIG_INET_DIAG=m CONFIG_INET_UDP_DIAG=m -CONFIG_IPV6_PRIVACY=y CONFIG_IPV6_ROUTER_PREF=y CONFIG_INET6_AH=m CONFIG_INET6_ESP=m CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_VTI=m CONFIG_IPV6_GRE=m CONFIG_NETFILTER=y CONFIG_NF_CONNTRACK=m @@ -85,6 +84,17 @@ CONFIG_NF_CONNTRACK_PPTP=m CONFIG_NF_CONNTRACK_SANE=m CONFIG_NF_CONNTRACK_SIP=m CONFIG_NF_CONNTRACK_TFTP=m +CONFIG_NF_TABLES=m +CONFIG_NFT_EXTHDR=m +CONFIG_NFT_META=m +CONFIG_NFT_CT=m +CONFIG_NFT_RBTREE=m +CONFIG_NFT_HASH=m +CONFIG_NFT_COUNTER=m +CONFIG_NFT_LOG=m +CONFIG_NFT_LIMIT=m +CONFIG_NFT_NAT=m +CONFIG_NFT_COMPAT=m CONFIG_NETFILTER_XT_SET=m CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m @@ -98,6 +108,7 @@ CONFIG_NETFILTER_XT_TARGET_NFLOG=m CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m CONFIG_NETFILTER_XT_TARGET_NOTRACK=m CONFIG_NETFILTER_XT_TARGET_TEE=m +CONFIG_NETFILTER_XT_TARGET_TPROXY=m CONFIG_NETFILTER_XT_TARGET_TRACE=m CONFIG_NETFILTER_XT_TARGET_TCPMSS=m CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m @@ -130,6 +141,7 @@ CONFIG_NETFILTER_XT_MATCH_QUOTA=m CONFIG_NETFILTER_XT_MATCH_RATEEST=m CONFIG_NETFILTER_XT_MATCH_REALM=m CONFIG_NETFILTER_XT_MATCH_RECENT=m +CONFIG_NETFILTER_XT_MATCH_SOCKET=m CONFIG_NETFILTER_XT_MATCH_STATE=m CONFIG_NETFILTER_XT_MATCH_STATISTIC=m CONFIG_NETFILTER_XT_MATCH_STRING=m @@ -144,11 +156,18 @@ CONFIG_IP_SET_HASH_IP=m CONFIG_IP_SET_HASH_IPPORT=m CONFIG_IP_SET_HASH_IPPORTIP=m CONFIG_IP_SET_HASH_IPPORTNET=m +CONFIG_IP_SET_HASH_NETPORTNET=m CONFIG_IP_SET_HASH_NET=m +CONFIG_IP_SET_HASH_NETNET=m CONFIG_IP_SET_HASH_NETPORT=m CONFIG_IP_SET_HASH_NETIFACE=m CONFIG_IP_SET_LIST_SET=m CONFIG_NF_CONNTRACK_IPV4=m +CONFIG_NF_TABLES_IPV4=m +CONFIG_NFT_REJECT_IPV4=m +CONFIG_NFT_CHAIN_ROUTE_IPV4=m +CONFIG_NFT_CHAIN_NAT_IPV4=m +CONFIG_NF_TABLES_ARP=m CONFIG_IP_NF_IPTABLES=m CONFIG_IP_NF_MATCH_AH=m CONFIG_IP_NF_MATCH_ECN=m @@ -156,6 +175,7 @@ CONFIG_IP_NF_MATCH_RPFILTER=m CONFIG_IP_NF_MATCH_TTL=m CONFIG_IP_NF_FILTER=m CONFIG_IP_NF_TARGET_REJECT=m +CONFIG_IP_NF_TARGET_SYNPROXY=m CONFIG_IP_NF_TARGET_ULOG=m CONFIG_NF_NAT_IPV4=m CONFIG_IP_NF_TARGET_MASQUERADE=m @@ -170,6 +190,9 @@ CONFIG_IP_NF_ARPTABLES=m CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m CONFIG_NF_CONNTRACK_IPV6=m +CONFIG_NF_TABLES_IPV6=m +CONFIG_NFT_CHAIN_ROUTE_IPV6=m +CONFIG_NFT_CHAIN_NAT_IPV6=m CONFIG_IP6_NF_IPTABLES=m CONFIG_IP6_NF_MATCH_AH=m CONFIG_IP6_NF_MATCH_EUI64=m @@ -183,11 +206,13 @@ CONFIG_IP6_NF_MATCH_RT=m CONFIG_IP6_NF_TARGET_HL=m CONFIG_IP6_NF_FILTER=m CONFIG_IP6_NF_TARGET_REJECT=m +CONFIG_IP6_NF_TARGET_SYNPROXY=m CONFIG_IP6_NF_MANGLE=m CONFIG_IP6_NF_RAW=m CONFIG_NF_NAT_IPV6=m CONFIG_IP6_NF_TARGET_MASQUERADE=m CONFIG_IP6_NF_TARGET_NPT=m +CONFIG_NF_TABLES_BRIDGE=m CONFIG_IP_DCCP=m # CONFIG_IP_DCCP_CCID3 is not set CONFIG_SCTP_COOKIE_HMAC_SHA1=y @@ -195,10 +220,13 @@ CONFIG_RDS=m CONFIG_RDS_TCP=m CONFIG_L2TP=m CONFIG_ATALK=m +CONFIG_DNS_RESOLVER=y CONFIG_BATMAN_ADV=m CONFIG_BATMAN_ADV_DAT=y +CONFIG_BATMAN_ADV_NC=y +CONFIG_NETLINK_DIAG=m +CONFIG_NET_MPLS_GSO=m # CONFIG_WIRELESS is not set -CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" CONFIG_DEVTMPFS=y # CONFIG_FIRMWARE_IN_KERNEL is not set # CONFIG_FW_LOADER_USER_HELPER is not set @@ -216,6 +244,7 @@ CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_RAM=y CONFIG_CDROM_PKTCDVD=m CONFIG_ATA_OVER_ETH=m +CONFIG_DUMMY_IRQ=m CONFIG_IDE=y CONFIG_IDE_GD_ATAPI=y CONFIG_BLK_DEV_IDECD=y @@ -262,6 +291,7 @@ CONFIG_EQUALIZER=m CONFIG_NET_TEAM=m CONFIG_NET_TEAM_MODE_BROADCAST=m CONFIG_NET_TEAM_MODE_ROUNDROBIN=m +CONFIG_NET_TEAM_MODE_RANDOM=m CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m CONFIG_NET_TEAM_MODE_LOADBALANCE=m CONFIG_VXLAN=m @@ -271,10 +301,10 @@ CONFIG_VETH=m # CONFIG_NET_VENDOR_3COM is not set CONFIG_A2065=y CONFIG_ARIADNE=y +# CONFIG_NET_VENDOR_ARC is not set # CONFIG_NET_CADENCE is not set # CONFIG_NET_VENDOR_BROADCOM is not set # CONFIG_NET_VENDOR_CIRRUS is not set -# CONFIG_NET_VENDOR_FUJITSU is not set # CONFIG_NET_VENDOR_HP is not set # CONFIG_NET_VENDOR_INTEL is not set # CONFIG_NET_VENDOR_MARVELL is not set @@ -285,6 +315,7 @@ CONFIG_ZORRO8390=y # CONFIG_NET_VENDOR_SEEQ is not set # CONFIG_NET_VENDOR_SMSC is not set # CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_VIA is not set # CONFIG_NET_VENDOR_WIZNET is not set CONFIG_PPP=m CONFIG_PPP_BSDCOMP=m @@ -311,7 +342,6 @@ CONFIG_JOYSTICK_AMIGA=m CONFIG_INPUT_MISC=y CONFIG_INPUT_M68K_BEEP=m # CONFIG_SERIO is not set -CONFIG_VT_HW_CONSOLE_BINDING=y # CONFIG_LEGACY_PTYS is not set # CONFIG_DEVKMEM is not set CONFIG_PRINTER=m @@ -345,10 +375,6 @@ CONFIG_HEARTBEAT=y CONFIG_PROC_HARDWARE=y CONFIG_AMIGA_BUILTIN_SERIAL=y CONFIG_SERIAL_CONSOLE=y -CONFIG_EXT2_FS=y -CONFIG_EXT3_FS=y -# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set -# CONFIG_EXT3_FS_XATTR is not set CONFIG_EXT4_FS=y CONFIG_REISERFS_FS=m CONFIG_JFS_FS=m @@ -385,7 +411,7 @@ CONFIG_QNX6FS_FS=m CONFIG_SYSV_FS=m CONFIG_UFS_FS=m CONFIG_NFS_FS=y -CONFIG_NFS_V4=y +CONFIG_NFS_V4=m CONFIG_NFS_SWAP=y CONFIG_ROOT_NFS=y CONFIG_NFSD=m @@ -444,10 +470,10 @@ CONFIG_NLS_MAC_TURKISH=m CONFIG_DLM=m CONFIG_MAGIC_SYSRQ=y CONFIG_ASYNC_RAID6_TEST=m +CONFIG_TEST_STRING_HELPERS=m CONFIG_ENCRYPTED_KEYS=m CONFIG_CRYPTO_MANAGER=y CONFIG_CRYPTO_USER=m -CONFIG_CRYPTO_NULL=m CONFIG_CRYPTO_CRYPTD=m CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_CCM=m @@ -480,6 +506,8 @@ CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_ZLIB=m CONFIG_CRYPTO_LZO=m +CONFIG_CRYPTO_LZ4=m +CONFIG_CRYPTO_LZ4HC=m # CONFIG_CRYPTO_ANSI_CPRNG is not set CONFIG_CRYPTO_USER_API_HASH=m CONFIG_CRYPTO_USER_API_SKCIPHER=m diff --git a/arch/m68k/configs/apollo_defconfig b/arch/m68k/configs/apollo_defconfig index 14dc6cc..cb1f55d 100644 --- a/arch/m68k/configs/apollo_defconfig +++ b/arch/m68k/configs/apollo_defconfig @@ -50,7 +50,6 @@ CONFIG_IP_PNP_RARP=y CONFIG_NET_IPIP=m CONFIG_NET_IPGRE_DEMUX=m CONFIG_NET_IPGRE=m -CONFIG_SYN_COOKIES=y CONFIG_NET_IPVTI=m CONFIG_INET_AH=m CONFIG_INET_ESP=m @@ -61,11 +60,11 @@ CONFIG_INET_XFRM_MODE_BEET=m # CONFIG_INET_LRO is not set CONFIG_INET_DIAG=m CONFIG_INET_UDP_DIAG=m -CONFIG_IPV6_PRIVACY=y CONFIG_IPV6_ROUTER_PREF=y CONFIG_INET6_AH=m CONFIG_INET6_ESP=m CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_VTI=m CONFIG_IPV6_GRE=m CONFIG_NETFILTER=y CONFIG_NF_CONNTRACK=m @@ -83,6 +82,17 @@ CONFIG_NF_CONNTRACK_PPTP=m CONFIG_NF_CONNTRACK_SANE=m CONFIG_NF_CONNTRACK_SIP=m CONFIG_NF_CONNTRACK_TFTP=m +CONFIG_NF_TABLES=m +CONFIG_NFT_EXTHDR=m +CONFIG_NFT_META=m +CONFIG_NFT_CT=m +CONFIG_NFT_RBTREE=m +CONFIG_NFT_HASH=m +CONFIG_NFT_COUNTER=m +CONFIG_NFT_LOG=m +CONFIG_NFT_LIMIT=m +CONFIG_NFT_NAT=m +CONFIG_NFT_COMPAT=m CONFIG_NETFILTER_XT_SET=m CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m @@ -96,6 +106,7 @@ CONFIG_NETFILTER_XT_TARGET_NFLOG=m CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m CONFIG_NETFILTER_XT_TARGET_NOTRACK=m CONFIG_NETFILTER_XT_TARGET_TEE=m +CONFIG_NETFILTER_XT_TARGET_TPROXY=m CONFIG_NETFILTER_XT_TARGET_TRACE=m CONFIG_NETFILTER_XT_TARGET_TCPMSS=m CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m @@ -128,6 +139,7 @@ CONFIG_NETFILTER_XT_MATCH_QUOTA=m CONFIG_NETFILTER_XT_MATCH_RATEEST=m CONFIG_NETFILTER_XT_MATCH_REALM=m CONFIG_NETFILTER_XT_MATCH_RECENT=m +CONFIG_NETFILTER_XT_MATCH_SOCKET=m CONFIG_NETFILTER_XT_MATCH_STATE=m CONFIG_NETFILTER_XT_MATCH_STATISTIC=m CONFIG_NETFILTER_XT_MATCH_STRING=m @@ -142,11 +154,18 @@ CONFIG_IP_SET_HASH_IP=m CONFIG_IP_SET_HASH_IPPORT=m CONFIG_IP_SET_HASH_IPPORTIP=m CONFIG_IP_SET_HASH_IPPORTNET=m +CONFIG_IP_SET_HASH_NETPORTNET=m CONFIG_IP_SET_HASH_NET=m +CONFIG_IP_SET_HASH_NETNET=m CONFIG_IP_SET_HASH_NETPORT=m CONFIG_IP_SET_HASH_NETIFACE=m CONFIG_IP_SET_LIST_SET=m CONFIG_NF_CONNTRACK_IPV4=m +CONFIG_NF_TABLES_IPV4=m +CONFIG_NFT_REJECT_IPV4=m +CONFIG_NFT_CHAIN_ROUTE_IPV4=m +CONFIG_NFT_CHAIN_NAT_IPV4=m +CONFIG_NF_TABLES_ARP=m CONFIG_IP_NF_IPTABLES=m CONFIG_IP_NF_MATCH_AH=m CONFIG_IP_NF_MATCH_ECN=m @@ -154,6 +173,7 @@ CONFIG_IP_NF_MATCH_RPFILTER=m CONFIG_IP_NF_MATCH_TTL=m CONFIG_IP_NF_FILTER=m CONFIG_IP_NF_TARGET_REJECT=m +CONFIG_IP_NF_TARGET_SYNPROXY=m CONFIG_IP_NF_TARGET_ULOG=m CONFIG_NF_NAT_IPV4=m CONFIG_IP_NF_TARGET_MASQUERADE=m @@ -168,6 +188,9 @@ CONFIG_IP_NF_ARPTABLES=m CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m CONFIG_NF_CONNTRACK_IPV6=m +CONFIG_NF_TABLES_IPV6=m +CONFIG_NFT_CHAIN_ROUTE_IPV6=m +CONFIG_NFT_CHAIN_NAT_IPV6=m CONFIG_IP6_NF_IPTABLES=m CONFIG_IP6_NF_MATCH_AH=m CONFIG_IP6_NF_MATCH_EUI64=m @@ -181,11 +204,13 @@ CONFIG_IP6_NF_MATCH_RT=m CONFIG_IP6_NF_TARGET_HL=m CONFIG_IP6_NF_FILTER=m CONFIG_IP6_NF_TARGET_REJECT=m +CONFIG_IP6_NF_TARGET_SYNPROXY=m CONFIG_IP6_NF_MANGLE=m CONFIG_IP6_NF_RAW=m CONFIG_NF_NAT_IPV6=m CONFIG_IP6_NF_TARGET_MASQUERADE=m CONFIG_IP6_NF_TARGET_NPT=m +CONFIG_NF_TABLES_BRIDGE=m CONFIG_IP_DCCP=m # CONFIG_IP_DCCP_CCID3 is not set CONFIG_SCTP_COOKIE_HMAC_SHA1=y @@ -193,10 +218,13 @@ CONFIG_RDS=m CONFIG_RDS_TCP=m CONFIG_L2TP=m CONFIG_ATALK=m +CONFIG_DNS_RESOLVER=y CONFIG_BATMAN_ADV=m CONFIG_BATMAN_ADV_DAT=y +CONFIG_BATMAN_ADV_NC=y +CONFIG_NETLINK_DIAG=m +CONFIG_NET_MPLS_GSO=m # CONFIG_WIRELESS is not set -CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" CONFIG_DEVTMPFS=y # CONFIG_FIRMWARE_IN_KERNEL is not set # CONFIG_FW_LOADER_USER_HELPER is not set @@ -208,6 +236,7 @@ CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_RAM=y CONFIG_CDROM_PKTCDVD=m CONFIG_ATA_OVER_ETH=m +CONFIG_DUMMY_IRQ=m CONFIG_RAID_ATTRS=m CONFIG_SCSI=y CONFIG_SCSI_TGT=m @@ -244,12 +273,14 @@ CONFIG_EQUALIZER=m CONFIG_NET_TEAM=m CONFIG_NET_TEAM_MODE_BROADCAST=m CONFIG_NET_TEAM_MODE_ROUNDROBIN=m +CONFIG_NET_TEAM_MODE_RANDOM=m CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m CONFIG_NET_TEAM_MODE_LOADBALANCE=m CONFIG_VXLAN=m CONFIG_NETCONSOLE=m CONFIG_NETCONSOLE_DYNAMIC=y CONFIG_VETH=m +# CONFIG_NET_VENDOR_ARC is not set # CONFIG_NET_CADENCE is not set # CONFIG_NET_VENDOR_BROADCOM is not set # CONFIG_NET_VENDOR_INTEL is not set @@ -258,6 +289,7 @@ CONFIG_VETH=m # CONFIG_NET_VENDOR_NATSEMI is not set # CONFIG_NET_VENDOR_SEEQ is not set # CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_VIA is not set # CONFIG_NET_VENDOR_WIZNET is not set CONFIG_PPP=m CONFIG_PPP_BSDCOMP=m @@ -279,7 +311,6 @@ CONFIG_INPUT_EVDEV=m # CONFIG_MOUSE_PS2 is not set CONFIG_MOUSE_SERIAL=m CONFIG_SERIO=m -CONFIG_VT_HW_CONSOLE_BINDING=y # CONFIG_LEGACY_PTYS is not set # CONFIG_DEVKMEM is not set # CONFIG_HW_RANDOM is not set @@ -302,10 +333,6 @@ CONFIG_RTC_DRV_GENERIC=m # CONFIG_IOMMU_SUPPORT is not set CONFIG_HEARTBEAT=y CONFIG_PROC_HARDWARE=y -CONFIG_EXT2_FS=y -CONFIG_EXT3_FS=y -# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set -# CONFIG_EXT3_FS_XATTR is not set CONFIG_EXT4_FS=y CONFIG_REISERFS_FS=m CONFIG_JFS_FS=m @@ -342,7 +369,7 @@ CONFIG_QNX6FS_FS=m CONFIG_SYSV_FS=m CONFIG_UFS_FS=m CONFIG_NFS_FS=y -CONFIG_NFS_V4=y +CONFIG_NFS_V4=m CONFIG_NFS_SWAP=y CONFIG_ROOT_NFS=y CONFIG_NFSD=m @@ -401,10 +428,10 @@ CONFIG_NLS_MAC_TURKISH=m CONFIG_DLM=m CONFIG_MAGIC_SYSRQ=y CONFIG_ASYNC_RAID6_TEST=m +CONFIG_TEST_STRING_HELPERS=m CONFIG_ENCRYPTED_KEYS=m CONFIG_CRYPTO_MANAGER=y CONFIG_CRYPTO_USER=m -CONFIG_CRYPTO_NULL=m CONFIG_CRYPTO_CRYPTD=m CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_CCM=m @@ -437,6 +464,8 @@ CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_ZLIB=m CONFIG_CRYPTO_LZO=m +CONFIG_CRYPTO_LZ4=m +CONFIG_CRYPTO_LZ4HC=m # CONFIG_CRYPTO_ANSI_CPRNG is not set CONFIG_CRYPTO_USER_API_HASH=m CONFIG_CRYPTO_USER_API_SKCIPHER=m diff --git a/arch/m68k/configs/atari_defconfig b/arch/m68k/configs/atari_defconfig index 6d5370c..e880cfb 100644 --- a/arch/m68k/configs/atari_defconfig +++ b/arch/m68k/configs/atari_defconfig @@ -49,7 +49,6 @@ CONFIG_IP_PNP_RARP=y CONFIG_NET_IPIP=m CONFIG_NET_IPGRE_DEMUX=m CONFIG_NET_IPGRE=m -CONFIG_SYN_COOKIES=y CONFIG_NET_IPVTI=m CONFIG_INET_AH=m CONFIG_INET_ESP=m @@ -60,11 +59,11 @@ CONFIG_INET_XFRM_MODE_BEET=m # CONFIG_INET_LRO is not set CONFIG_INET_DIAG=m CONFIG_INET_UDP_DIAG=m -CONFIG_IPV6_PRIVACY=y CONFIG_IPV6_ROUTER_PREF=y CONFIG_INET6_AH=m CONFIG_INET6_ESP=m CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_VTI=m CONFIG_IPV6_GRE=m CONFIG_NETFILTER=y CONFIG_NF_CONNTRACK=m @@ -82,6 +81,17 @@ CONFIG_NF_CONNTRACK_PPTP=m CONFIG_NF_CONNTRACK_SANE=m CONFIG_NF_CONNTRACK_SIP=m CONFIG_NF_CONNTRACK_TFTP=m +CONFIG_NF_TABLES=m +CONFIG_NFT_EXTHDR=m +CONFIG_NFT_META=m +CONFIG_NFT_CT=m +CONFIG_NFT_RBTREE=m +CONFIG_NFT_HASH=m +CONFIG_NFT_COUNTER=m +CONFIG_NFT_LOG=m +CONFIG_NFT_LIMIT=m +CONFIG_NFT_NAT=m +CONFIG_NFT_COMPAT=m CONFIG_NETFILTER_XT_SET=m CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m @@ -95,6 +105,7 @@ CONFIG_NETFILTER_XT_TARGET_NFLOG=m CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m CONFIG_NETFILTER_XT_TARGET_NOTRACK=m CONFIG_NETFILTER_XT_TARGET_TEE=m +CONFIG_NETFILTER_XT_TARGET_TPROXY=m CONFIG_NETFILTER_XT_TARGET_TRACE=m CONFIG_NETFILTER_XT_TARGET_TCPMSS=m CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m @@ -127,6 +138,7 @@ CONFIG_NETFILTER_XT_MATCH_QUOTA=m CONFIG_NETFILTER_XT_MATCH_RATEEST=m CONFIG_NETFILTER_XT_MATCH_REALM=m CONFIG_NETFILTER_XT_MATCH_RECENT=m +CONFIG_NETFILTER_XT_MATCH_SOCKET=m CONFIG_NETFILTER_XT_MATCH_STATE=m CONFIG_NETFILTER_XT_MATCH_STATISTIC=m CONFIG_NETFILTER_XT_MATCH_STRING=m @@ -141,11 +153,18 @@ CONFIG_IP_SET_HASH_IP=m CONFIG_IP_SET_HASH_IPPORT=m CONFIG_IP_SET_HASH_IPPORTIP=m CONFIG_IP_SET_HASH_IPPORTNET=m +CONFIG_IP_SET_HASH_NETPORTNET=m CONFIG_IP_SET_HASH_NET=m +CONFIG_IP_SET_HASH_NETNET=m CONFIG_IP_SET_HASH_NETPORT=m CONFIG_IP_SET_HASH_NETIFACE=m CONFIG_IP_SET_LIST_SET=m CONFIG_NF_CONNTRACK_IPV4=m +CONFIG_NF_TABLES_IPV4=m +CONFIG_NFT_REJECT_IPV4=m +CONFIG_NFT_CHAIN_ROUTE_IPV4=m +CONFIG_NFT_CHAIN_NAT_IPV4=m +CONFIG_NF_TABLES_ARP=m CONFIG_IP_NF_IPTABLES=m CONFIG_IP_NF_MATCH_AH=m CONFIG_IP_NF_MATCH_ECN=m @@ -153,6 +172,7 @@ CONFIG_IP_NF_MATCH_RPFILTER=m CONFIG_IP_NF_MATCH_TTL=m CONFIG_IP_NF_FILTER=m CONFIG_IP_NF_TARGET_REJECT=m +CONFIG_IP_NF_TARGET_SYNPROXY=m CONFIG_IP_NF_TARGET_ULOG=m CONFIG_NF_NAT_IPV4=m CONFIG_IP_NF_TARGET_MASQUERADE=m @@ -167,6 +187,9 @@ CONFIG_IP_NF_ARPTABLES=m CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m CONFIG_NF_CONNTRACK_IPV6=m +CONFIG_NF_TABLES_IPV6=m +CONFIG_NFT_CHAIN_ROUTE_IPV6=m +CONFIG_NFT_CHAIN_NAT_IPV6=m CONFIG_IP6_NF_IPTABLES=m CONFIG_IP6_NF_MATCH_AH=m CONFIG_IP6_NF_MATCH_EUI64=m @@ -180,11 +203,13 @@ CONFIG_IP6_NF_MATCH_RT=m CONFIG_IP6_NF_TARGET_HL=m CONFIG_IP6_NF_FILTER=m CONFIG_IP6_NF_TARGET_REJECT=m +CONFIG_IP6_NF_TARGET_SYNPROXY=m CONFIG_IP6_NF_MANGLE=m CONFIG_IP6_NF_RAW=m CONFIG_NF_NAT_IPV6=m CONFIG_IP6_NF_TARGET_MASQUERADE=m CONFIG_IP6_NF_TARGET_NPT=m +CONFIG_NF_TABLES_BRIDGE=m CONFIG_IP_DCCP=m # CONFIG_IP_DCCP_CCID3 is not set CONFIG_SCTP_COOKIE_HMAC_SHA1=y @@ -192,10 +217,13 @@ CONFIG_RDS=m CONFIG_RDS_TCP=m CONFIG_L2TP=m CONFIG_ATALK=m +CONFIG_DNS_RESOLVER=y CONFIG_BATMAN_ADV=m CONFIG_BATMAN_ADV_DAT=y +CONFIG_BATMAN_ADV_NC=y +CONFIG_NETLINK_DIAG=m +CONFIG_NET_MPLS_GSO=m # CONFIG_WIRELESS is not set -CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" CONFIG_DEVTMPFS=y # CONFIG_FIRMWARE_IN_KERNEL is not set # CONFIG_FW_LOADER_USER_HELPER is not set @@ -211,6 +239,7 @@ CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_RAM=y CONFIG_CDROM_PKTCDVD=m CONFIG_ATA_OVER_ETH=m +CONFIG_DUMMY_IRQ=m CONFIG_IDE=y CONFIG_IDE_GD_ATAPI=y CONFIG_BLK_DEV_IDECD=y @@ -249,10 +278,10 @@ CONFIG_TCM_PSCSI=m CONFIG_NETDEVICES=y CONFIG_DUMMY=m CONFIG_EQUALIZER=m -CONFIG_MII=y CONFIG_NET_TEAM=m CONFIG_NET_TEAM_MODE_BROADCAST=m CONFIG_NET_TEAM_MODE_ROUNDROBIN=m +CONFIG_NET_TEAM_MODE_RANDOM=m CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m CONFIG_NET_TEAM_MODE_LOADBALANCE=m CONFIG_VXLAN=m @@ -260,6 +289,7 @@ CONFIG_NETCONSOLE=m CONFIG_NETCONSOLE_DYNAMIC=y CONFIG_VETH=m CONFIG_ATARILANCE=y +# CONFIG_NET_VENDOR_ARC is not set # CONFIG_NET_CADENCE is not set # CONFIG_NET_VENDOR_BROADCOM is not set # CONFIG_NET_VENDOR_INTEL is not set @@ -267,6 +297,7 @@ CONFIG_ATARILANCE=y # CONFIG_NET_VENDOR_MICREL is not set # CONFIG_NET_VENDOR_SEEQ is not set # CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_VIA is not set # CONFIG_NET_VENDOR_WIZNET is not set CONFIG_PPP=m CONFIG_PPP_BSDCOMP=m @@ -291,7 +322,6 @@ CONFIG_MOUSE_ATARI=m CONFIG_INPUT_MISC=y CONFIG_INPUT_M68K_BEEP=m # CONFIG_SERIO is not set -CONFIG_VT_HW_CONSOLE_BINDING=y # CONFIG_LEGACY_PTYS is not set # CONFIG_DEVKMEM is not set CONFIG_PRINTER=m @@ -320,10 +350,6 @@ CONFIG_NFBLOCK=y CONFIG_NFCON=y CONFIG_NFETH=y CONFIG_ATARI_DSP56K=m -CONFIG_EXT2_FS=y -CONFIG_EXT3_FS=y -# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set -# CONFIG_EXT3_FS_XATTR is not set CONFIG_EXT4_FS=y CONFIG_REISERFS_FS=m CONFIG_JFS_FS=m @@ -360,7 +386,7 @@ CONFIG_QNX6FS_FS=m CONFIG_SYSV_FS=m CONFIG_UFS_FS=m CONFIG_NFS_FS=y -CONFIG_NFS_V4=y +CONFIG_NFS_V4=m CONFIG_NFS_SWAP=y CONFIG_ROOT_NFS=y CONFIG_NFSD=m @@ -419,10 +445,10 @@ CONFIG_NLS_MAC_TURKISH=m CONFIG_DLM=m CONFIG_MAGIC_SYSRQ=y CONFIG_ASYNC_RAID6_TEST=m +CONFIG_TEST_STRING_HELPERS=m CONFIG_ENCRYPTED_KEYS=m CONFIG_CRYPTO_MANAGER=y CONFIG_CRYPTO_USER=m -CONFIG_CRYPTO_NULL=m CONFIG_CRYPTO_CRYPTD=m CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_CCM=m @@ -455,6 +481,8 @@ CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_ZLIB=m CONFIG_CRYPTO_LZO=m +CONFIG_CRYPTO_LZ4=m +CONFIG_CRYPTO_LZ4HC=m # CONFIG_CRYPTO_ANSI_CPRNG is not set CONFIG_CRYPTO_USER_API_HASH=m CONFIG_CRYPTO_USER_API_SKCIPHER=m diff --git a/arch/m68k/configs/bvme6000_defconfig b/arch/m68k/configs/bvme6000_defconfig index c015ddb..4aa4f45 100644 --- a/arch/m68k/configs/bvme6000_defconfig +++ b/arch/m68k/configs/bvme6000_defconfig @@ -48,7 +48,6 @@ CONFIG_IP_PNP_RARP=y CONFIG_NET_IPIP=m CONFIG_NET_IPGRE_DEMUX=m CONFIG_NET_IPGRE=m -CONFIG_SYN_COOKIES=y CONFIG_NET_IPVTI=m CONFIG_INET_AH=m CONFIG_INET_ESP=m @@ -59,11 +58,11 @@ CONFIG_INET_XFRM_MODE_BEET=m # CONFIG_INET_LRO is not set CONFIG_INET_DIAG=m CONFIG_INET_UDP_DIAG=m -CONFIG_IPV6_PRIVACY=y CONFIG_IPV6_ROUTER_PREF=y CONFIG_INET6_AH=m CONFIG_INET6_ESP=m CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_VTI=m CONFIG_IPV6_GRE=m CONFIG_NETFILTER=y CONFIG_NF_CONNTRACK=m @@ -81,6 +80,17 @@ CONFIG_NF_CONNTRACK_PPTP=m CONFIG_NF_CONNTRACK_SANE=m CONFIG_NF_CONNTRACK_SIP=m CONFIG_NF_CONNTRACK_TFTP=m +CONFIG_NF_TABLES=m +CONFIG_NFT_EXTHDR=m +CONFIG_NFT_META=m +CONFIG_NFT_CT=m +CONFIG_NFT_RBTREE=m +CONFIG_NFT_HASH=m +CONFIG_NFT_COUNTER=m +CONFIG_NFT_LOG=m +CONFIG_NFT_LIMIT=m +CONFIG_NFT_NAT=m +CONFIG_NFT_COMPAT=m CONFIG_NETFILTER_XT_SET=m CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m @@ -94,6 +104,7 @@ CONFIG_NETFILTER_XT_TARGET_NFLOG=m CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m CONFIG_NETFILTER_XT_TARGET_NOTRACK=m CONFIG_NETFILTER_XT_TARGET_TEE=m +CONFIG_NETFILTER_XT_TARGET_TPROXY=m CONFIG_NETFILTER_XT_TARGET_TRACE=m CONFIG_NETFILTER_XT_TARGET_TCPMSS=m CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m @@ -126,6 +137,7 @@ CONFIG_NETFILTER_XT_MATCH_QUOTA=m CONFIG_NETFILTER_XT_MATCH_RATEEST=m CONFIG_NETFILTER_XT_MATCH_REALM=m CONFIG_NETFILTER_XT_MATCH_RECENT=m +CONFIG_NETFILTER_XT_MATCH_SOCKET=m CONFIG_NETFILTER_XT_MATCH_STATE=m CONFIG_NETFILTER_XT_MATCH_STATISTIC=m CONFIG_NETFILTER_XT_MATCH_STRING=m @@ -140,11 +152,18 @@ CONFIG_IP_SET_HASH_IP=m CONFIG_IP_SET_HASH_IPPORT=m CONFIG_IP_SET_HASH_IPPORTIP=m CONFIG_IP_SET_HASH_IPPORTNET=m +CONFIG_IP_SET_HASH_NETPORTNET=m CONFIG_IP_SET_HASH_NET=m +CONFIG_IP_SET_HASH_NETNET=m CONFIG_IP_SET_HASH_NETPORT=m CONFIG_IP_SET_HASH_NETIFACE=m CONFIG_IP_SET_LIST_SET=m CONFIG_NF_CONNTRACK_IPV4=m +CONFIG_NF_TABLES_IPV4=m +CONFIG_NFT_REJECT_IPV4=m +CONFIG_NFT_CHAIN_ROUTE_IPV4=m +CONFIG_NFT_CHAIN_NAT_IPV4=m +CONFIG_NF_TABLES_ARP=m CONFIG_IP_NF_IPTABLES=m CONFIG_IP_NF_MATCH_AH=m CONFIG_IP_NF_MATCH_ECN=m @@ -152,6 +171,7 @@ CONFIG_IP_NF_MATCH_RPFILTER=m CONFIG_IP_NF_MATCH_TTL=m CONFIG_IP_NF_FILTER=m CONFIG_IP_NF_TARGET_REJECT=m +CONFIG_IP_NF_TARGET_SYNPROXY=m CONFIG_IP_NF_TARGET_ULOG=m CONFIG_NF_NAT_IPV4=m CONFIG_IP_NF_TARGET_MASQUERADE=m @@ -166,6 +186,9 @@ CONFIG_IP_NF_ARPTABLES=m CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m CONFIG_NF_CONNTRACK_IPV6=m +CONFIG_NF_TABLES_IPV6=m +CONFIG_NFT_CHAIN_ROUTE_IPV6=m +CONFIG_NFT_CHAIN_NAT_IPV6=m CONFIG_IP6_NF_IPTABLES=m CONFIG_IP6_NF_MATCH_AH=m CONFIG_IP6_NF_MATCH_EUI64=m @@ -179,11 +202,13 @@ CONFIG_IP6_NF_MATCH_RT=m CONFIG_IP6_NF_TARGET_HL=m CONFIG_IP6_NF_FILTER=m CONFIG_IP6_NF_TARGET_REJECT=m +CONFIG_IP6_NF_TARGET_SYNPROXY=m CONFIG_IP6_NF_MANGLE=m CONFIG_IP6_NF_RAW=m CONFIG_NF_NAT_IPV6=m CONFIG_IP6_NF_TARGET_MASQUERADE=m CONFIG_IP6_NF_TARGET_NPT=m +CONFIG_NF_TABLES_BRIDGE=m CONFIG_IP_DCCP=m # CONFIG_IP_DCCP_CCID3 is not set CONFIG_SCTP_COOKIE_HMAC_SHA1=y @@ -191,10 +216,13 @@ CONFIG_RDS=m CONFIG_RDS_TCP=m CONFIG_L2TP=m CONFIG_ATALK=m +CONFIG_DNS_RESOLVER=y CONFIG_BATMAN_ADV=m CONFIG_BATMAN_ADV_DAT=y +CONFIG_BATMAN_ADV_NC=y +CONFIG_NETLINK_DIAG=m +CONFIG_NET_MPLS_GSO=m # CONFIG_WIRELESS is not set -CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" CONFIG_DEVTMPFS=y # CONFIG_FIRMWARE_IN_KERNEL is not set # CONFIG_FW_LOADER_USER_HELPER is not set @@ -206,6 +234,7 @@ CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_RAM=y CONFIG_CDROM_PKTCDVD=m CONFIG_ATA_OVER_ETH=m +CONFIG_DUMMY_IRQ=m CONFIG_RAID_ATTRS=m CONFIG_SCSI=y CONFIG_SCSI_TGT=m @@ -243,12 +272,14 @@ CONFIG_EQUALIZER=m CONFIG_NET_TEAM=m CONFIG_NET_TEAM_MODE_BROADCAST=m CONFIG_NET_TEAM_MODE_ROUNDROBIN=m +CONFIG_NET_TEAM_MODE_RANDOM=m CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m CONFIG_NET_TEAM_MODE_LOADBALANCE=m CONFIG_VXLAN=m CONFIG_NETCONSOLE=m CONFIG_NETCONSOLE_DYNAMIC=y CONFIG_VETH=m +# CONFIG_NET_VENDOR_ARC is not set # CONFIG_NET_CADENCE is not set # CONFIG_NET_VENDOR_BROADCOM is not set CONFIG_BVME6000_NET=y @@ -257,6 +288,7 @@ CONFIG_BVME6000_NET=y # CONFIG_NET_VENDOR_NATSEMI is not set # CONFIG_NET_VENDOR_SEEQ is not set # CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_VIA is not set # CONFIG_NET_VENDOR_WIZNET is not set CONFIG_PPP=m CONFIG_PPP_BSDCOMP=m @@ -294,10 +326,6 @@ CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_GENERIC=m # CONFIG_IOMMU_SUPPORT is not set CONFIG_PROC_HARDWARE=y -CONFIG_EXT2_FS=y -CONFIG_EXT3_FS=y -# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set -# CONFIG_EXT3_FS_XATTR is not set CONFIG_EXT4_FS=y CONFIG_REISERFS_FS=m CONFIG_JFS_FS=m @@ -334,7 +362,7 @@ CONFIG_QNX6FS_FS=m CONFIG_SYSV_FS=m CONFIG_UFS_FS=m CONFIG_NFS_FS=y -CONFIG_NFS_V4=y +CONFIG_NFS_V4=m CONFIG_NFS_SWAP=y CONFIG_ROOT_NFS=y CONFIG_NFSD=m @@ -393,10 +421,10 @@ CONFIG_NLS_MAC_TURKISH=m CONFIG_DLM=m CONFIG_MAGIC_SYSRQ=y CONFIG_ASYNC_RAID6_TEST=m +CONFIG_TEST_STRING_HELPERS=m CONFIG_ENCRYPTED_KEYS=m CONFIG_CRYPTO_MANAGER=y CONFIG_CRYPTO_USER=m -CONFIG_CRYPTO_NULL=m CONFIG_CRYPTO_CRYPTD=m CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_CCM=m @@ -429,6 +457,8 @@ CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_ZLIB=m CONFIG_CRYPTO_LZO=m +CONFIG_CRYPTO_LZ4=m +CONFIG_CRYPTO_LZ4HC=m # CONFIG_CRYPTO_ANSI_CPRNG is not set CONFIG_CRYPTO_USER_API_HASH=m CONFIG_CRYPTO_USER_API_SKCIPHER=m diff --git a/arch/m68k/configs/hp300_defconfig b/arch/m68k/configs/hp300_defconfig index ec7382d..7cd9d9f 100644 --- a/arch/m68k/configs/hp300_defconfig +++ b/arch/m68k/configs/hp300_defconfig @@ -50,7 +50,6 @@ CONFIG_IP_PNP_RARP=y CONFIG_NET_IPIP=m CONFIG_NET_IPGRE_DEMUX=m CONFIG_NET_IPGRE=m -CONFIG_SYN_COOKIES=y CONFIG_NET_IPVTI=m CONFIG_INET_AH=m CONFIG_INET_ESP=m @@ -61,11 +60,11 @@ CONFIG_INET_XFRM_MODE_BEET=m # CONFIG_INET_LRO is not set CONFIG_INET_DIAG=m CONFIG_INET_UDP_DIAG=m -CONFIG_IPV6_PRIVACY=y CONFIG_IPV6_ROUTER_PREF=y CONFIG_INET6_AH=m CONFIG_INET6_ESP=m CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_VTI=m CONFIG_IPV6_GRE=m CONFIG_NETFILTER=y CONFIG_NF_CONNTRACK=m @@ -83,6 +82,17 @@ CONFIG_NF_CONNTRACK_PPTP=m CONFIG_NF_CONNTRACK_SANE=m CONFIG_NF_CONNTRACK_SIP=m CONFIG_NF_CONNTRACK_TFTP=m +CONFIG_NF_TABLES=m +CONFIG_NFT_EXTHDR=m +CONFIG_NFT_META=m +CONFIG_NFT_CT=m +CONFIG_NFT_RBTREE=m +CONFIG_NFT_HASH=m +CONFIG_NFT_COUNTER=m +CONFIG_NFT_LOG=m +CONFIG_NFT_LIMIT=m +CONFIG_NFT_NAT=m +CONFIG_NFT_COMPAT=m CONFIG_NETFILTER_XT_SET=m CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m @@ -96,6 +106,7 @@ CONFIG_NETFILTER_XT_TARGET_NFLOG=m CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m CONFIG_NETFILTER_XT_TARGET_NOTRACK=m CONFIG_NETFILTER_XT_TARGET_TEE=m +CONFIG_NETFILTER_XT_TARGET_TPROXY=m CONFIG_NETFILTER_XT_TARGET_TRACE=m CONFIG_NETFILTER_XT_TARGET_TCPMSS=m CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m @@ -128,6 +139,7 @@ CONFIG_NETFILTER_XT_MATCH_QUOTA=m CONFIG_NETFILTER_XT_MATCH_RATEEST=m CONFIG_NETFILTER_XT_MATCH_REALM=m CONFIG_NETFILTER_XT_MATCH_RECENT=m +CONFIG_NETFILTER_XT_MATCH_SOCKET=m CONFIG_NETFILTER_XT_MATCH_STATE=m CONFIG_NETFILTER_XT_MATCH_STATISTIC=m CONFIG_NETFILTER_XT_MATCH_STRING=m @@ -142,11 +154,18 @@ CONFIG_IP_SET_HASH_IP=m CONFIG_IP_SET_HASH_IPPORT=m CONFIG_IP_SET_HASH_IPPORTIP=m CONFIG_IP_SET_HASH_IPPORTNET=m +CONFIG_IP_SET_HASH_NETPORTNET=m CONFIG_IP_SET_HASH_NET=m +CONFIG_IP_SET_HASH_NETNET=m CONFIG_IP_SET_HASH_NETPORT=m CONFIG_IP_SET_HASH_NETIFACE=m CONFIG_IP_SET_LIST_SET=m CONFIG_NF_CONNTRACK_IPV4=m +CONFIG_NF_TABLES_IPV4=m +CONFIG_NFT_REJECT_IPV4=m +CONFIG_NFT_CHAIN_ROUTE_IPV4=m +CONFIG_NFT_CHAIN_NAT_IPV4=m +CONFIG_NF_TABLES_ARP=m CONFIG_IP_NF_IPTABLES=m CONFIG_IP_NF_MATCH_AH=m CONFIG_IP_NF_MATCH_ECN=m @@ -154,6 +173,7 @@ CONFIG_IP_NF_MATCH_RPFILTER=m CONFIG_IP_NF_MATCH_TTL=m CONFIG_IP_NF_FILTER=m CONFIG_IP_NF_TARGET_REJECT=m +CONFIG_IP_NF_TARGET_SYNPROXY=m CONFIG_IP_NF_TARGET_ULOG=m CONFIG_NF_NAT_IPV4=m CONFIG_IP_NF_TARGET_MASQUERADE=m @@ -168,6 +188,9 @@ CONFIG_IP_NF_ARPTABLES=m CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m CONFIG_NF_CONNTRACK_IPV6=m +CONFIG_NF_TABLES_IPV6=m +CONFIG_NFT_CHAIN_ROUTE_IPV6=m +CONFIG_NFT_CHAIN_NAT_IPV6=m CONFIG_IP6_NF_IPTABLES=m CONFIG_IP6_NF_MATCH_AH=m CONFIG_IP6_NF_MATCH_EUI64=m @@ -181,11 +204,13 @@ CONFIG_IP6_NF_MATCH_RT=m CONFIG_IP6_NF_TARGET_HL=m CONFIG_IP6_NF_FILTER=m CONFIG_IP6_NF_TARGET_REJECT=m +CONFIG_IP6_NF_TARGET_SYNPROXY=m CONFIG_IP6_NF_MANGLE=m CONFIG_IP6_NF_RAW=m CONFIG_NF_NAT_IPV6=m CONFIG_IP6_NF_TARGET_MASQUERADE=m CONFIG_IP6_NF_TARGET_NPT=m +CONFIG_NF_TABLES_BRIDGE=m CONFIG_IP_DCCP=m # CONFIG_IP_DCCP_CCID3 is not set CONFIG_SCTP_COOKIE_HMAC_SHA1=y @@ -193,10 +218,13 @@ CONFIG_RDS=m CONFIG_RDS_TCP=m CONFIG_L2TP=m CONFIG_ATALK=m +CONFIG_DNS_RESOLVER=y CONFIG_BATMAN_ADV=m CONFIG_BATMAN_ADV_DAT=y +CONFIG_BATMAN_ADV_NC=y +CONFIG_NETLINK_DIAG=m +CONFIG_NET_MPLS_GSO=m # CONFIG_WIRELESS is not set -CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" CONFIG_DEVTMPFS=y # CONFIG_FIRMWARE_IN_KERNEL is not set # CONFIG_FW_LOADER_USER_HELPER is not set @@ -208,6 +236,7 @@ CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_RAM=y CONFIG_CDROM_PKTCDVD=m CONFIG_ATA_OVER_ETH=m +CONFIG_DUMMY_IRQ=m CONFIG_RAID_ATTRS=m CONFIG_SCSI=y CONFIG_SCSI_TGT=m @@ -244,6 +273,7 @@ CONFIG_EQUALIZER=m CONFIG_NET_TEAM=m CONFIG_NET_TEAM_MODE_BROADCAST=m CONFIG_NET_TEAM_MODE_ROUNDROBIN=m +CONFIG_NET_TEAM_MODE_RANDOM=m CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m CONFIG_NET_TEAM_MODE_LOADBALANCE=m CONFIG_VXLAN=m @@ -251,6 +281,7 @@ CONFIG_NETCONSOLE=m CONFIG_NETCONSOLE_DYNAMIC=y CONFIG_VETH=m CONFIG_HPLANCE=y +# CONFIG_NET_VENDOR_ARC is not set # CONFIG_NET_CADENCE is not set # CONFIG_NET_VENDOR_BROADCOM is not set # CONFIG_NET_VENDOR_INTEL is not set @@ -259,6 +290,7 @@ CONFIG_HPLANCE=y # CONFIG_NET_VENDOR_NATSEMI is not set # CONFIG_NET_VENDOR_SEEQ is not set # CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_VIA is not set # CONFIG_NET_VENDOR_WIZNET is not set CONFIG_PPP=m CONFIG_PPP_BSDCOMP=m @@ -282,7 +314,6 @@ CONFIG_MOUSE_SERIAL=m CONFIG_INPUT_MISC=y CONFIG_HP_SDC_RTC=m CONFIG_SERIO_SERPORT=m -CONFIG_VT_HW_CONSOLE_BINDING=y # CONFIG_LEGACY_PTYS is not set # CONFIG_DEVKMEM is not set # CONFIG_HW_RANDOM is not set @@ -304,10 +335,6 @@ CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_GENERIC=m # CONFIG_IOMMU_SUPPORT is not set CONFIG_PROC_HARDWARE=y -CONFIG_EXT2_FS=y -CONFIG_EXT3_FS=y -# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set -# CONFIG_EXT3_FS_XATTR is not set CONFIG_EXT4_FS=y CONFIG_REISERFS_FS=m CONFIG_JFS_FS=m @@ -344,7 +371,7 @@ CONFIG_QNX6FS_FS=m CONFIG_SYSV_FS=m CONFIG_UFS_FS=m CONFIG_NFS_FS=y -CONFIG_NFS_V4=y +CONFIG_NFS_V4=m CONFIG_NFS_SWAP=y CONFIG_ROOT_NFS=y CONFIG_NFSD=m @@ -403,10 +430,10 @@ CONFIG_NLS_MAC_TURKISH=m CONFIG_DLM=m CONFIG_MAGIC_SYSRQ=y CONFIG_ASYNC_RAID6_TEST=m +CONFIG_TEST_STRING_HELPERS=m CONFIG_ENCRYPTED_KEYS=m CONFIG_CRYPTO_MANAGER=y CONFIG_CRYPTO_USER=m -CONFIG_CRYPTO_NULL=m CONFIG_CRYPTO_CRYPTD=m CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_CCM=m @@ -439,6 +466,8 @@ CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_ZLIB=m CONFIG_CRYPTO_LZO=m +CONFIG_CRYPTO_LZ4=m +CONFIG_CRYPTO_LZ4HC=m # CONFIG_CRYPTO_ANSI_CPRNG is not set CONFIG_CRYPTO_USER_API_HASH=m CONFIG_CRYPTO_USER_API_SKCIPHER=m diff --git a/arch/m68k/configs/mac_defconfig b/arch/m68k/configs/mac_defconfig index 7d46fbe..31f5bd0 100644 --- a/arch/m68k/configs/mac_defconfig +++ b/arch/m68k/configs/mac_defconfig @@ -49,7 +49,6 @@ CONFIG_IP_PNP_RARP=y CONFIG_NET_IPIP=m CONFIG_NET_IPGRE_DEMUX=m CONFIG_NET_IPGRE=m -CONFIG_SYN_COOKIES=y CONFIG_NET_IPVTI=m CONFIG_INET_AH=m CONFIG_INET_ESP=m @@ -60,11 +59,11 @@ CONFIG_INET_XFRM_MODE_BEET=m # CONFIG_INET_LRO is not set CONFIG_INET_DIAG=m CONFIG_INET_UDP_DIAG=m -CONFIG_IPV6_PRIVACY=y CONFIG_IPV6_ROUTER_PREF=y CONFIG_INET6_AH=m CONFIG_INET6_ESP=m CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_VTI=m CONFIG_IPV6_GRE=m CONFIG_NETFILTER=y CONFIG_NF_CONNTRACK=m @@ -82,6 +81,17 @@ CONFIG_NF_CONNTRACK_PPTP=m CONFIG_NF_CONNTRACK_SANE=m CONFIG_NF_CONNTRACK_SIP=m CONFIG_NF_CONNTRACK_TFTP=m +CONFIG_NF_TABLES=m +CONFIG_NFT_EXTHDR=m +CONFIG_NFT_META=m +CONFIG_NFT_CT=m +CONFIG_NFT_RBTREE=m +CONFIG_NFT_HASH=m +CONFIG_NFT_COUNTER=m +CONFIG_NFT_LOG=m +CONFIG_NFT_LIMIT=m +CONFIG_NFT_NAT=m +CONFIG_NFT_COMPAT=m CONFIG_NETFILTER_XT_SET=m CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m @@ -95,6 +105,7 @@ CONFIG_NETFILTER_XT_TARGET_NFLOG=m CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m CONFIG_NETFILTER_XT_TARGET_NOTRACK=m CONFIG_NETFILTER_XT_TARGET_TEE=m +CONFIG_NETFILTER_XT_TARGET_TPROXY=m CONFIG_NETFILTER_XT_TARGET_TRACE=m CONFIG_NETFILTER_XT_TARGET_TCPMSS=m CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m @@ -127,6 +138,7 @@ CONFIG_NETFILTER_XT_MATCH_QUOTA=m CONFIG_NETFILTER_XT_MATCH_RATEEST=m CONFIG_NETFILTER_XT_MATCH_REALM=m CONFIG_NETFILTER_XT_MATCH_RECENT=m +CONFIG_NETFILTER_XT_MATCH_SOCKET=m CONFIG_NETFILTER_XT_MATCH_STATE=m CONFIG_NETFILTER_XT_MATCH_STATISTIC=m CONFIG_NETFILTER_XT_MATCH_STRING=m @@ -141,11 +153,18 @@ CONFIG_IP_SET_HASH_IP=m CONFIG_IP_SET_HASH_IPPORT=m CONFIG_IP_SET_HASH_IPPORTIP=m CONFIG_IP_SET_HASH_IPPORTNET=m +CONFIG_IP_SET_HASH_NETPORTNET=m CONFIG_IP_SET_HASH_NET=m +CONFIG_IP_SET_HASH_NETNET=m CONFIG_IP_SET_HASH_NETPORT=m CONFIG_IP_SET_HASH_NETIFACE=m CONFIG_IP_SET_LIST_SET=m CONFIG_NF_CONNTRACK_IPV4=m +CONFIG_NF_TABLES_IPV4=m +CONFIG_NFT_REJECT_IPV4=m +CONFIG_NFT_CHAIN_ROUTE_IPV4=m +CONFIG_NFT_CHAIN_NAT_IPV4=m +CONFIG_NF_TABLES_ARP=m CONFIG_IP_NF_IPTABLES=m CONFIG_IP_NF_MATCH_AH=m CONFIG_IP_NF_MATCH_ECN=m @@ -153,6 +172,7 @@ CONFIG_IP_NF_MATCH_RPFILTER=m CONFIG_IP_NF_MATCH_TTL=m CONFIG_IP_NF_FILTER=m CONFIG_IP_NF_TARGET_REJECT=m +CONFIG_IP_NF_TARGET_SYNPROXY=m CONFIG_IP_NF_TARGET_ULOG=m CONFIG_NF_NAT_IPV4=m CONFIG_IP_NF_TARGET_MASQUERADE=m @@ -167,6 +187,9 @@ CONFIG_IP_NF_ARPTABLES=m CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m CONFIG_NF_CONNTRACK_IPV6=m +CONFIG_NF_TABLES_IPV6=m +CONFIG_NFT_CHAIN_ROUTE_IPV6=m +CONFIG_NFT_CHAIN_NAT_IPV6=m CONFIG_IP6_NF_IPTABLES=m CONFIG_IP6_NF_MATCH_AH=m CONFIG_IP6_NF_MATCH_EUI64=m @@ -180,11 +203,13 @@ CONFIG_IP6_NF_MATCH_RT=m CONFIG_IP6_NF_TARGET_HL=m CONFIG_IP6_NF_FILTER=m CONFIG_IP6_NF_TARGET_REJECT=m +CONFIG_IP6_NF_TARGET_SYNPROXY=m CONFIG_IP6_NF_MANGLE=m CONFIG_IP6_NF_RAW=m CONFIG_NF_NAT_IPV6=m CONFIG_IP6_NF_TARGET_MASQUERADE=m CONFIG_IP6_NF_TARGET_NPT=m +CONFIG_NF_TABLES_BRIDGE=m CONFIG_IP_DCCP=m # CONFIG_IP_DCCP_CCID3 is not set CONFIG_SCTP_COOKIE_HMAC_SHA1=y @@ -195,11 +220,13 @@ CONFIG_ATALK=m CONFIG_DEV_APPLETALK=m CONFIG_IPDDP=m CONFIG_IPDDP_ENCAP=y -CONFIG_IPDDP_DECAP=y +CONFIG_DNS_RESOLVER=y CONFIG_BATMAN_ADV=m CONFIG_BATMAN_ADV_DAT=y +CONFIG_BATMAN_ADV_NC=y +CONFIG_NETLINK_DIAG=m +CONFIG_NET_MPLS_GSO=m # CONFIG_WIRELESS is not set -CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" CONFIG_DEVTMPFS=y # CONFIG_FIRMWARE_IN_KERNEL is not set # CONFIG_FW_LOADER_USER_HELPER is not set @@ -212,6 +239,7 @@ CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_RAM=y CONFIG_CDROM_PKTCDVD=m CONFIG_ATA_OVER_ETH=m +CONFIG_DUMMY_IRQ=m CONFIG_IDE=y CONFIG_IDE_GD_ATAPI=y CONFIG_BLK_DEV_IDECD=y @@ -261,6 +289,7 @@ CONFIG_EQUALIZER=m CONFIG_NET_TEAM=m CONFIG_NET_TEAM_MODE_BROADCAST=m CONFIG_NET_TEAM_MODE_ROUNDROBIN=m +CONFIG_NET_TEAM_MODE_RANDOM=m CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m CONFIG_NET_TEAM_MODE_LOADBALANCE=m CONFIG_VXLAN=m @@ -268,6 +297,7 @@ CONFIG_NETCONSOLE=m CONFIG_NETCONSOLE_DYNAMIC=y CONFIG_VETH=m CONFIG_MACMACE=y +# CONFIG_NET_VENDOR_ARC is not set # CONFIG_NET_CADENCE is not set # CONFIG_NET_VENDOR_BROADCOM is not set CONFIG_MAC89x0=y @@ -279,6 +309,7 @@ CONFIG_MAC8390=y # CONFIG_NET_VENDOR_SEEQ is not set # CONFIG_NET_VENDOR_SMSC is not set # CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_VIA is not set # CONFIG_NET_VENDOR_WIZNET is not set CONFIG_PPP=m CONFIG_PPP_BSDCOMP=m @@ -302,7 +333,6 @@ CONFIG_MOUSE_SERIAL=m CONFIG_INPUT_MISC=y CONFIG_INPUT_M68K_BEEP=m CONFIG_SERIO=m -CONFIG_VT_HW_CONSOLE_BINDING=y # CONFIG_LEGACY_PTYS is not set # CONFIG_DEVKMEM is not set CONFIG_SERIAL_PMACZILOG=y @@ -327,10 +357,6 @@ CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_GENERIC=m # CONFIG_IOMMU_SUPPORT is not set CONFIG_PROC_HARDWARE=y -CONFIG_EXT2_FS=y -CONFIG_EXT3_FS=y -# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set -# CONFIG_EXT3_FS_XATTR is not set CONFIG_EXT4_FS=y CONFIG_REISERFS_FS=m CONFIG_JFS_FS=m @@ -367,7 +393,7 @@ CONFIG_QNX6FS_FS=m CONFIG_SYSV_FS=m CONFIG_UFS_FS=m CONFIG_NFS_FS=y -CONFIG_NFS_V4=y +CONFIG_NFS_V4=m CONFIG_NFS_SWAP=y CONFIG_ROOT_NFS=y CONFIG_NFSD=m @@ -426,10 +452,11 @@ CONFIG_NLS_MAC_TURKISH=m CONFIG_DLM=m CONFIG_MAGIC_SYSRQ=y CONFIG_ASYNC_RAID6_TEST=m +CONFIG_TEST_STRING_HELPERS=m +CONFIG_EARLY_PRINTK=y CONFIG_ENCRYPTED_KEYS=m CONFIG_CRYPTO_MANAGER=y CONFIG_CRYPTO_USER=m -CONFIG_CRYPTO_NULL=m CONFIG_CRYPTO_CRYPTD=m CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_CCM=m @@ -462,6 +489,8 @@ CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_ZLIB=m CONFIG_CRYPTO_LZO=m +CONFIG_CRYPTO_LZ4=m +CONFIG_CRYPTO_LZ4HC=m # CONFIG_CRYPTO_ANSI_CPRNG is not set CONFIG_CRYPTO_USER_API_HASH=m CONFIG_CRYPTO_USER_API_SKCIPHER=m diff --git a/arch/m68k/configs/multi_defconfig b/arch/m68k/configs/multi_defconfig index b17a883..4e5adff 100644 --- a/arch/m68k/configs/multi_defconfig +++ b/arch/m68k/configs/multi_defconfig @@ -58,7 +58,6 @@ CONFIG_IP_PNP_RARP=y CONFIG_NET_IPIP=m CONFIG_NET_IPGRE_DEMUX=m CONFIG_NET_IPGRE=m -CONFIG_SYN_COOKIES=y CONFIG_NET_IPVTI=m CONFIG_INET_AH=m CONFIG_INET_ESP=m @@ -69,11 +68,11 @@ CONFIG_INET_XFRM_MODE_BEET=m # CONFIG_INET_LRO is not set CONFIG_INET_DIAG=m CONFIG_INET_UDP_DIAG=m -CONFIG_IPV6_PRIVACY=y CONFIG_IPV6_ROUTER_PREF=y CONFIG_INET6_AH=m CONFIG_INET6_ESP=m CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_VTI=m CONFIG_IPV6_GRE=m CONFIG_NETFILTER=y CONFIG_NF_CONNTRACK=m @@ -91,6 +90,17 @@ CONFIG_NF_CONNTRACK_PPTP=m CONFIG_NF_CONNTRACK_SANE=m CONFIG_NF_CONNTRACK_SIP=m CONFIG_NF_CONNTRACK_TFTP=m +CONFIG_NF_TABLES=m +CONFIG_NFT_EXTHDR=m +CONFIG_NFT_META=m +CONFIG_NFT_CT=m +CONFIG_NFT_RBTREE=m +CONFIG_NFT_HASH=m +CONFIG_NFT_COUNTER=m +CONFIG_NFT_LOG=m +CONFIG_NFT_LIMIT=m +CONFIG_NFT_NAT=m +CONFIG_NFT_COMPAT=m CONFIG_NETFILTER_XT_SET=m CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m @@ -104,6 +114,7 @@ CONFIG_NETFILTER_XT_TARGET_NFLOG=m CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m CONFIG_NETFILTER_XT_TARGET_NOTRACK=m CONFIG_NETFILTER_XT_TARGET_TEE=m +CONFIG_NETFILTER_XT_TARGET_TPROXY=m CONFIG_NETFILTER_XT_TARGET_TRACE=m CONFIG_NETFILTER_XT_TARGET_TCPMSS=m CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m @@ -136,6 +147,7 @@ CONFIG_NETFILTER_XT_MATCH_QUOTA=m CONFIG_NETFILTER_XT_MATCH_RATEEST=m CONFIG_NETFILTER_XT_MATCH_REALM=m CONFIG_NETFILTER_XT_MATCH_RECENT=m +CONFIG_NETFILTER_XT_MATCH_SOCKET=m CONFIG_NETFILTER_XT_MATCH_STATE=m CONFIG_NETFILTER_XT_MATCH_STATISTIC=m CONFIG_NETFILTER_XT_MATCH_STRING=m @@ -150,11 +162,18 @@ CONFIG_IP_SET_HASH_IP=m CONFIG_IP_SET_HASH_IPPORT=m CONFIG_IP_SET_HASH_IPPORTIP=m CONFIG_IP_SET_HASH_IPPORTNET=m +CONFIG_IP_SET_HASH_NETPORTNET=m CONFIG_IP_SET_HASH_NET=m +CONFIG_IP_SET_HASH_NETNET=m CONFIG_IP_SET_HASH_NETPORT=m CONFIG_IP_SET_HASH_NETIFACE=m CONFIG_IP_SET_LIST_SET=m CONFIG_NF_CONNTRACK_IPV4=m +CONFIG_NF_TABLES_IPV4=m +CONFIG_NFT_REJECT_IPV4=m +CONFIG_NFT_CHAIN_ROUTE_IPV4=m +CONFIG_NFT_CHAIN_NAT_IPV4=m +CONFIG_NF_TABLES_ARP=m CONFIG_IP_NF_IPTABLES=m CONFIG_IP_NF_MATCH_AH=m CONFIG_IP_NF_MATCH_ECN=m @@ -162,6 +181,7 @@ CONFIG_IP_NF_MATCH_RPFILTER=m CONFIG_IP_NF_MATCH_TTL=m CONFIG_IP_NF_FILTER=m CONFIG_IP_NF_TARGET_REJECT=m +CONFIG_IP_NF_TARGET_SYNPROXY=m CONFIG_IP_NF_TARGET_ULOG=m CONFIG_NF_NAT_IPV4=m CONFIG_IP_NF_TARGET_MASQUERADE=m @@ -176,6 +196,9 @@ CONFIG_IP_NF_ARPTABLES=m CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m CONFIG_NF_CONNTRACK_IPV6=m +CONFIG_NF_TABLES_IPV6=m +CONFIG_NFT_CHAIN_ROUTE_IPV6=m +CONFIG_NFT_CHAIN_NAT_IPV6=m CONFIG_IP6_NF_IPTABLES=m CONFIG_IP6_NF_MATCH_AH=m CONFIG_IP6_NF_MATCH_EUI64=m @@ -189,11 +212,13 @@ CONFIG_IP6_NF_MATCH_RT=m CONFIG_IP6_NF_TARGET_HL=m CONFIG_IP6_NF_FILTER=m CONFIG_IP6_NF_TARGET_REJECT=m +CONFIG_IP6_NF_TARGET_SYNPROXY=m CONFIG_IP6_NF_MANGLE=m CONFIG_IP6_NF_RAW=m CONFIG_NF_NAT_IPV6=m CONFIG_IP6_NF_TARGET_MASQUERADE=m CONFIG_IP6_NF_TARGET_NPT=m +CONFIG_NF_TABLES_BRIDGE=m CONFIG_IP_DCCP=m # CONFIG_IP_DCCP_CCID3 is not set CONFIG_SCTP_COOKIE_HMAC_SHA1=y @@ -204,11 +229,13 @@ CONFIG_ATALK=m CONFIG_DEV_APPLETALK=m CONFIG_IPDDP=m CONFIG_IPDDP_ENCAP=y -CONFIG_IPDDP_DECAP=y +CONFIG_DNS_RESOLVER=y CONFIG_BATMAN_ADV=m CONFIG_BATMAN_ADV_DAT=y +CONFIG_BATMAN_ADV_NC=y +CONFIG_NETLINK_DIAG=m +CONFIG_NET_MPLS_GSO=m # CONFIG_WIRELESS is not set -CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" CONFIG_DEVTMPFS=y # CONFIG_FIRMWARE_IN_KERNEL is not set # CONFIG_FW_LOADER_USER_HELPER is not set @@ -230,6 +257,7 @@ CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_RAM=y CONFIG_CDROM_PKTCDVD=m CONFIG_ATA_OVER_ETH=m +CONFIG_DUMMY_IRQ=m CONFIG_IDE=y CONFIG_IDE_GD_ATAPI=y CONFIG_BLK_DEV_IDECD=y @@ -290,10 +318,10 @@ CONFIG_MAC_EMUMOUSEBTN=y CONFIG_NETDEVICES=y CONFIG_DUMMY=m CONFIG_EQUALIZER=m -CONFIG_MII=y CONFIG_NET_TEAM=m CONFIG_NET_TEAM_MODE_BROADCAST=m CONFIG_NET_TEAM_MODE_ROUNDROBIN=m +CONFIG_NET_TEAM_MODE_RANDOM=m CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m CONFIG_NET_TEAM_MODE_LOADBALANCE=m CONFIG_VXLAN=m @@ -308,10 +336,10 @@ CONFIG_HPLANCE=y CONFIG_MVME147_NET=y CONFIG_SUN3LANCE=y CONFIG_MACMACE=y +# CONFIG_NET_VENDOR_ARC is not set # CONFIG_NET_CADENCE is not set # CONFIG_NET_VENDOR_BROADCOM is not set CONFIG_MAC89x0=y -# CONFIG_NET_VENDOR_FUJITSU is not set # CONFIG_NET_VENDOR_HP is not set CONFIG_BVME6000_NET=y CONFIG_MVME16x_NET=y @@ -325,6 +353,7 @@ CONFIG_APNE=y CONFIG_ZORRO8390=y # CONFIG_NET_VENDOR_SEEQ is not set # CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_VIA is not set # CONFIG_NET_VENDOR_WIZNET is not set CONFIG_PLIP=m CONFIG_PPP=m @@ -357,7 +386,6 @@ CONFIG_INPUT_MISC=y CONFIG_INPUT_M68K_BEEP=m CONFIG_HP_SDC_RTC=m CONFIG_SERIO_Q40KBD=y -CONFIG_VT_HW_CONSOLE_BINDING=y # CONFIG_LEGACY_PTYS is not set # CONFIG_DEVKMEM is not set CONFIG_SERIAL_PMACZILOG=y @@ -405,10 +433,6 @@ CONFIG_NFETH=y CONFIG_ATARI_DSP56K=m CONFIG_AMIGA_BUILTIN_SERIAL=y CONFIG_SERIAL_CONSOLE=y -CONFIG_EXT2_FS=y -CONFIG_EXT3_FS=y -# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set -# CONFIG_EXT3_FS_XATTR is not set CONFIG_EXT4_FS=y CONFIG_REISERFS_FS=m CONFIG_JFS_FS=m @@ -445,7 +469,7 @@ CONFIG_QNX6FS_FS=m CONFIG_SYSV_FS=m CONFIG_UFS_FS=m CONFIG_NFS_FS=y -CONFIG_NFS_V4=y +CONFIG_NFS_V4=m CONFIG_NFS_SWAP=y CONFIG_ROOT_NFS=y CONFIG_NFSD=m @@ -504,10 +528,11 @@ CONFIG_NLS_MAC_TURKISH=m CONFIG_DLM=m CONFIG_MAGIC_SYSRQ=y CONFIG_ASYNC_RAID6_TEST=m +CONFIG_TEST_STRING_HELPERS=m +CONFIG_EARLY_PRINTK=y CONFIG_ENCRYPTED_KEYS=m CONFIG_CRYPTO_MANAGER=y CONFIG_CRYPTO_USER=m -CONFIG_CRYPTO_NULL=m CONFIG_CRYPTO_CRYPTD=m CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_CCM=m @@ -540,6 +565,8 @@ CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_ZLIB=m CONFIG_CRYPTO_LZO=m +CONFIG_CRYPTO_LZ4=m +CONFIG_CRYPTO_LZ4HC=m # CONFIG_CRYPTO_ANSI_CPRNG is not set CONFIG_CRYPTO_USER_API_HASH=m CONFIG_CRYPTO_USER_API_SKCIPHER=m diff --git a/arch/m68k/configs/mvme147_defconfig b/arch/m68k/configs/mvme147_defconfig index 5586c65..02cdbac 100644 --- a/arch/m68k/configs/mvme147_defconfig +++ b/arch/m68k/configs/mvme147_defconfig @@ -47,7 +47,6 @@ CONFIG_IP_PNP_RARP=y CONFIG_NET_IPIP=m CONFIG_NET_IPGRE_DEMUX=m CONFIG_NET_IPGRE=m -CONFIG_SYN_COOKIES=y CONFIG_NET_IPVTI=m CONFIG_INET_AH=m CONFIG_INET_ESP=m @@ -58,11 +57,11 @@ CONFIG_INET_XFRM_MODE_BEET=m # CONFIG_INET_LRO is not set CONFIG_INET_DIAG=m CONFIG_INET_UDP_DIAG=m -CONFIG_IPV6_PRIVACY=y CONFIG_IPV6_ROUTER_PREF=y CONFIG_INET6_AH=m CONFIG_INET6_ESP=m CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_VTI=m CONFIG_IPV6_GRE=m CONFIG_NETFILTER=y CONFIG_NF_CONNTRACK=m @@ -80,6 +79,17 @@ CONFIG_NF_CONNTRACK_PPTP=m CONFIG_NF_CONNTRACK_SANE=m CONFIG_NF_CONNTRACK_SIP=m CONFIG_NF_CONNTRACK_TFTP=m +CONFIG_NF_TABLES=m +CONFIG_NFT_EXTHDR=m +CONFIG_NFT_META=m +CONFIG_NFT_CT=m +CONFIG_NFT_RBTREE=m +CONFIG_NFT_HASH=m +CONFIG_NFT_COUNTER=m +CONFIG_NFT_LOG=m +CONFIG_NFT_LIMIT=m +CONFIG_NFT_NAT=m +CONFIG_NFT_COMPAT=m CONFIG_NETFILTER_XT_SET=m CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m @@ -93,6 +103,7 @@ CONFIG_NETFILTER_XT_TARGET_NFLOG=m CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m CONFIG_NETFILTER_XT_TARGET_NOTRACK=m CONFIG_NETFILTER_XT_TARGET_TEE=m +CONFIG_NETFILTER_XT_TARGET_TPROXY=m CONFIG_NETFILTER_XT_TARGET_TRACE=m CONFIG_NETFILTER_XT_TARGET_TCPMSS=m CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m @@ -125,6 +136,7 @@ CONFIG_NETFILTER_XT_MATCH_QUOTA=m CONFIG_NETFILTER_XT_MATCH_RATEEST=m CONFIG_NETFILTER_XT_MATCH_REALM=m CONFIG_NETFILTER_XT_MATCH_RECENT=m +CONFIG_NETFILTER_XT_MATCH_SOCKET=m CONFIG_NETFILTER_XT_MATCH_STATE=m CONFIG_NETFILTER_XT_MATCH_STATISTIC=m CONFIG_NETFILTER_XT_MATCH_STRING=m @@ -139,11 +151,18 @@ CONFIG_IP_SET_HASH_IP=m CONFIG_IP_SET_HASH_IPPORT=m CONFIG_IP_SET_HASH_IPPORTIP=m CONFIG_IP_SET_HASH_IPPORTNET=m +CONFIG_IP_SET_HASH_NETPORTNET=m CONFIG_IP_SET_HASH_NET=m +CONFIG_IP_SET_HASH_NETNET=m CONFIG_IP_SET_HASH_NETPORT=m CONFIG_IP_SET_HASH_NETIFACE=m CONFIG_IP_SET_LIST_SET=m CONFIG_NF_CONNTRACK_IPV4=m +CONFIG_NF_TABLES_IPV4=m +CONFIG_NFT_REJECT_IPV4=m +CONFIG_NFT_CHAIN_ROUTE_IPV4=m +CONFIG_NFT_CHAIN_NAT_IPV4=m +CONFIG_NF_TABLES_ARP=m CONFIG_IP_NF_IPTABLES=m CONFIG_IP_NF_MATCH_AH=m CONFIG_IP_NF_MATCH_ECN=m @@ -151,6 +170,7 @@ CONFIG_IP_NF_MATCH_RPFILTER=m CONFIG_IP_NF_MATCH_TTL=m CONFIG_IP_NF_FILTER=m CONFIG_IP_NF_TARGET_REJECT=m +CONFIG_IP_NF_TARGET_SYNPROXY=m CONFIG_IP_NF_TARGET_ULOG=m CONFIG_NF_NAT_IPV4=m CONFIG_IP_NF_TARGET_MASQUERADE=m @@ -165,6 +185,9 @@ CONFIG_IP_NF_ARPTABLES=m CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m CONFIG_NF_CONNTRACK_IPV6=m +CONFIG_NF_TABLES_IPV6=m +CONFIG_NFT_CHAIN_ROUTE_IPV6=m +CONFIG_NFT_CHAIN_NAT_IPV6=m CONFIG_IP6_NF_IPTABLES=m CONFIG_IP6_NF_MATCH_AH=m CONFIG_IP6_NF_MATCH_EUI64=m @@ -178,11 +201,13 @@ CONFIG_IP6_NF_MATCH_RT=m CONFIG_IP6_NF_TARGET_HL=m CONFIG_IP6_NF_FILTER=m CONFIG_IP6_NF_TARGET_REJECT=m +CONFIG_IP6_NF_TARGET_SYNPROXY=m CONFIG_IP6_NF_MANGLE=m CONFIG_IP6_NF_RAW=m CONFIG_NF_NAT_IPV6=m CONFIG_IP6_NF_TARGET_MASQUERADE=m CONFIG_IP6_NF_TARGET_NPT=m +CONFIG_NF_TABLES_BRIDGE=m CONFIG_IP_DCCP=m # CONFIG_IP_DCCP_CCID3 is not set CONFIG_SCTP_COOKIE_HMAC_SHA1=y @@ -190,10 +215,13 @@ CONFIG_RDS=m CONFIG_RDS_TCP=m CONFIG_L2TP=m CONFIG_ATALK=m +CONFIG_DNS_RESOLVER=y CONFIG_BATMAN_ADV=m CONFIG_BATMAN_ADV_DAT=y +CONFIG_BATMAN_ADV_NC=y +CONFIG_NETLINK_DIAG=m +CONFIG_NET_MPLS_GSO=m # CONFIG_WIRELESS is not set -CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" CONFIG_DEVTMPFS=y # CONFIG_FIRMWARE_IN_KERNEL is not set # CONFIG_FW_LOADER_USER_HELPER is not set @@ -205,6 +233,7 @@ CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_RAM=y CONFIG_CDROM_PKTCDVD=m CONFIG_ATA_OVER_ETH=m +CONFIG_DUMMY_IRQ=m CONFIG_RAID_ATTRS=m CONFIG_SCSI=y CONFIG_SCSI_TGT=m @@ -242,6 +271,7 @@ CONFIG_EQUALIZER=m CONFIG_NET_TEAM=m CONFIG_NET_TEAM_MODE_BROADCAST=m CONFIG_NET_TEAM_MODE_ROUNDROBIN=m +CONFIG_NET_TEAM_MODE_RANDOM=m CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m CONFIG_NET_TEAM_MODE_LOADBALANCE=m CONFIG_VXLAN=m @@ -249,6 +279,7 @@ CONFIG_NETCONSOLE=m CONFIG_NETCONSOLE_DYNAMIC=y CONFIG_VETH=m CONFIG_MVME147_NET=y +# CONFIG_NET_VENDOR_ARC is not set # CONFIG_NET_CADENCE is not set # CONFIG_NET_VENDOR_BROADCOM is not set # CONFIG_NET_VENDOR_INTEL is not set @@ -257,6 +288,7 @@ CONFIG_MVME147_NET=y # CONFIG_NET_VENDOR_NATSEMI is not set # CONFIG_NET_VENDOR_SEEQ is not set # CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_VIA is not set # CONFIG_NET_VENDOR_WIZNET is not set CONFIG_PPP=m CONFIG_PPP_BSDCOMP=m @@ -294,10 +326,6 @@ CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_GENERIC=m # CONFIG_IOMMU_SUPPORT is not set CONFIG_PROC_HARDWARE=y -CONFIG_EXT2_FS=y -CONFIG_EXT3_FS=y -# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set -# CONFIG_EXT3_FS_XATTR is not set CONFIG_EXT4_FS=y CONFIG_REISERFS_FS=m CONFIG_JFS_FS=m @@ -334,7 +362,7 @@ CONFIG_QNX6FS_FS=m CONFIG_SYSV_FS=m CONFIG_UFS_FS=m CONFIG_NFS_FS=y -CONFIG_NFS_V4=y +CONFIG_NFS_V4=m CONFIG_NFS_SWAP=y CONFIG_ROOT_NFS=y CONFIG_NFSD=m @@ -393,10 +421,10 @@ CONFIG_NLS_MAC_TURKISH=m CONFIG_DLM=m CONFIG_MAGIC_SYSRQ=y CONFIG_ASYNC_RAID6_TEST=m +CONFIG_TEST_STRING_HELPERS=m CONFIG_ENCRYPTED_KEYS=m CONFIG_CRYPTO_MANAGER=y CONFIG_CRYPTO_USER=m -CONFIG_CRYPTO_NULL=m CONFIG_CRYPTO_CRYPTD=m CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_CCM=m @@ -429,6 +457,8 @@ CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_ZLIB=m CONFIG_CRYPTO_LZO=m +CONFIG_CRYPTO_LZ4=m +CONFIG_CRYPTO_LZ4HC=m # CONFIG_CRYPTO_ANSI_CPRNG is not set CONFIG_CRYPTO_USER_API_HASH=m CONFIG_CRYPTO_USER_API_SKCIPHER=m diff --git a/arch/m68k/configs/mvme16x_defconfig b/arch/m68k/configs/mvme16x_defconfig index e5e8262..05a990a 100644 --- a/arch/m68k/configs/mvme16x_defconfig +++ b/arch/m68k/configs/mvme16x_defconfig @@ -48,7 +48,6 @@ CONFIG_IP_PNP_RARP=y CONFIG_NET_IPIP=m CONFIG_NET_IPGRE_DEMUX=m CONFIG_NET_IPGRE=m -CONFIG_SYN_COOKIES=y CONFIG_NET_IPVTI=m CONFIG_INET_AH=m CONFIG_INET_ESP=m @@ -59,11 +58,11 @@ CONFIG_INET_XFRM_MODE_BEET=m # CONFIG_INET_LRO is not set CONFIG_INET_DIAG=m CONFIG_INET_UDP_DIAG=m -CONFIG_IPV6_PRIVACY=y CONFIG_IPV6_ROUTER_PREF=y CONFIG_INET6_AH=m CONFIG_INET6_ESP=m CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_VTI=m CONFIG_IPV6_GRE=m CONFIG_NETFILTER=y CONFIG_NF_CONNTRACK=m @@ -81,6 +80,17 @@ CONFIG_NF_CONNTRACK_PPTP=m CONFIG_NF_CONNTRACK_SANE=m CONFIG_NF_CONNTRACK_SIP=m CONFIG_NF_CONNTRACK_TFTP=m +CONFIG_NF_TABLES=m +CONFIG_NFT_EXTHDR=m +CONFIG_NFT_META=m +CONFIG_NFT_CT=m +CONFIG_NFT_RBTREE=m +CONFIG_NFT_HASH=m +CONFIG_NFT_COUNTER=m +CONFIG_NFT_LOG=m +CONFIG_NFT_LIMIT=m +CONFIG_NFT_NAT=m +CONFIG_NFT_COMPAT=m CONFIG_NETFILTER_XT_SET=m CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m @@ -94,6 +104,7 @@ CONFIG_NETFILTER_XT_TARGET_NFLOG=m CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m CONFIG_NETFILTER_XT_TARGET_NOTRACK=m CONFIG_NETFILTER_XT_TARGET_TEE=m +CONFIG_NETFILTER_XT_TARGET_TPROXY=m CONFIG_NETFILTER_XT_TARGET_TRACE=m CONFIG_NETFILTER_XT_TARGET_TCPMSS=m CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m @@ -126,6 +137,7 @@ CONFIG_NETFILTER_XT_MATCH_QUOTA=m CONFIG_NETFILTER_XT_MATCH_RATEEST=m CONFIG_NETFILTER_XT_MATCH_REALM=m CONFIG_NETFILTER_XT_MATCH_RECENT=m +CONFIG_NETFILTER_XT_MATCH_SOCKET=m CONFIG_NETFILTER_XT_MATCH_STATE=m CONFIG_NETFILTER_XT_MATCH_STATISTIC=m CONFIG_NETFILTER_XT_MATCH_STRING=m @@ -140,11 +152,18 @@ CONFIG_IP_SET_HASH_IP=m CONFIG_IP_SET_HASH_IPPORT=m CONFIG_IP_SET_HASH_IPPORTIP=m CONFIG_IP_SET_HASH_IPPORTNET=m +CONFIG_IP_SET_HASH_NETPORTNET=m CONFIG_IP_SET_HASH_NET=m +CONFIG_IP_SET_HASH_NETNET=m CONFIG_IP_SET_HASH_NETPORT=m CONFIG_IP_SET_HASH_NETIFACE=m CONFIG_IP_SET_LIST_SET=m CONFIG_NF_CONNTRACK_IPV4=m +CONFIG_NF_TABLES_IPV4=m +CONFIG_NFT_REJECT_IPV4=m +CONFIG_NFT_CHAIN_ROUTE_IPV4=m +CONFIG_NFT_CHAIN_NAT_IPV4=m +CONFIG_NF_TABLES_ARP=m CONFIG_IP_NF_IPTABLES=m CONFIG_IP_NF_MATCH_AH=m CONFIG_IP_NF_MATCH_ECN=m @@ -152,6 +171,7 @@ CONFIG_IP_NF_MATCH_RPFILTER=m CONFIG_IP_NF_MATCH_TTL=m CONFIG_IP_NF_FILTER=m CONFIG_IP_NF_TARGET_REJECT=m +CONFIG_IP_NF_TARGET_SYNPROXY=m CONFIG_IP_NF_TARGET_ULOG=m CONFIG_NF_NAT_IPV4=m CONFIG_IP_NF_TARGET_MASQUERADE=m @@ -166,6 +186,9 @@ CONFIG_IP_NF_ARPTABLES=m CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m CONFIG_NF_CONNTRACK_IPV6=m +CONFIG_NF_TABLES_IPV6=m +CONFIG_NFT_CHAIN_ROUTE_IPV6=m +CONFIG_NFT_CHAIN_NAT_IPV6=m CONFIG_IP6_NF_IPTABLES=m CONFIG_IP6_NF_MATCH_AH=m CONFIG_IP6_NF_MATCH_EUI64=m @@ -179,11 +202,13 @@ CONFIG_IP6_NF_MATCH_RT=m CONFIG_IP6_NF_TARGET_HL=m CONFIG_IP6_NF_FILTER=m CONFIG_IP6_NF_TARGET_REJECT=m +CONFIG_IP6_NF_TARGET_SYNPROXY=m CONFIG_IP6_NF_MANGLE=m CONFIG_IP6_NF_RAW=m CONFIG_NF_NAT_IPV6=m CONFIG_IP6_NF_TARGET_MASQUERADE=m CONFIG_IP6_NF_TARGET_NPT=m +CONFIG_NF_TABLES_BRIDGE=m CONFIG_IP_DCCP=m # CONFIG_IP_DCCP_CCID3 is not set CONFIG_SCTP_COOKIE_HMAC_SHA1=y @@ -191,10 +216,13 @@ CONFIG_RDS=m CONFIG_RDS_TCP=m CONFIG_L2TP=m CONFIG_ATALK=m +CONFIG_DNS_RESOLVER=y CONFIG_BATMAN_ADV=m CONFIG_BATMAN_ADV_DAT=y +CONFIG_BATMAN_ADV_NC=y +CONFIG_NETLINK_DIAG=m +CONFIG_NET_MPLS_GSO=m # CONFIG_WIRELESS is not set -CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" CONFIG_DEVTMPFS=y # CONFIG_FIRMWARE_IN_KERNEL is not set # CONFIG_FW_LOADER_USER_HELPER is not set @@ -206,6 +234,7 @@ CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_RAM=y CONFIG_CDROM_PKTCDVD=m CONFIG_ATA_OVER_ETH=m +CONFIG_DUMMY_IRQ=m CONFIG_RAID_ATTRS=m CONFIG_SCSI=y CONFIG_SCSI_TGT=m @@ -243,12 +272,14 @@ CONFIG_EQUALIZER=m CONFIG_NET_TEAM=m CONFIG_NET_TEAM_MODE_BROADCAST=m CONFIG_NET_TEAM_MODE_ROUNDROBIN=m +CONFIG_NET_TEAM_MODE_RANDOM=m CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m CONFIG_NET_TEAM_MODE_LOADBALANCE=m CONFIG_VXLAN=m CONFIG_NETCONSOLE=m CONFIG_NETCONSOLE_DYNAMIC=y CONFIG_VETH=m +# CONFIG_NET_VENDOR_ARC is not set # CONFIG_NET_CADENCE is not set # CONFIG_NET_VENDOR_BROADCOM is not set CONFIG_MVME16x_NET=y @@ -257,6 +288,7 @@ CONFIG_MVME16x_NET=y # CONFIG_NET_VENDOR_NATSEMI is not set # CONFIG_NET_VENDOR_SEEQ is not set # CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_VIA is not set # CONFIG_NET_VENDOR_WIZNET is not set CONFIG_PPP=m CONFIG_PPP_BSDCOMP=m @@ -294,10 +326,6 @@ CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_GENERIC=m # CONFIG_IOMMU_SUPPORT is not set CONFIG_PROC_HARDWARE=y -CONFIG_EXT2_FS=y -CONFIG_EXT3_FS=y -# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set -# CONFIG_EXT3_FS_XATTR is not set CONFIG_EXT4_FS=y CONFIG_REISERFS_FS=m CONFIG_JFS_FS=m @@ -334,7 +362,7 @@ CONFIG_QNX6FS_FS=m CONFIG_SYSV_FS=m CONFIG_UFS_FS=m CONFIG_NFS_FS=y -CONFIG_NFS_V4=y +CONFIG_NFS_V4=m CONFIG_NFS_SWAP=y CONFIG_ROOT_NFS=y CONFIG_NFSD=m @@ -393,10 +421,11 @@ CONFIG_NLS_MAC_TURKISH=m CONFIG_DLM=m CONFIG_MAGIC_SYSRQ=y CONFIG_ASYNC_RAID6_TEST=m +CONFIG_TEST_STRING_HELPERS=m +CONFIG_EARLY_PRINTK=y CONFIG_ENCRYPTED_KEYS=m CONFIG_CRYPTO_MANAGER=y CONFIG_CRYPTO_USER=m -CONFIG_CRYPTO_NULL=m CONFIG_CRYPTO_CRYPTD=m CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_CCM=m @@ -429,6 +458,8 @@ CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_ZLIB=m CONFIG_CRYPTO_LZO=m +CONFIG_CRYPTO_LZ4=m +CONFIG_CRYPTO_LZ4HC=m # CONFIG_CRYPTO_ANSI_CPRNG is not set CONFIG_CRYPTO_USER_API_HASH=m CONFIG_CRYPTO_USER_API_SKCIPHER=m diff --git a/arch/m68k/configs/q40_defconfig b/arch/m68k/configs/q40_defconfig index be1496e..568e2a9 100644 --- a/arch/m68k/configs/q40_defconfig +++ b/arch/m68k/configs/q40_defconfig @@ -48,7 +48,6 @@ CONFIG_IP_PNP_RARP=y CONFIG_NET_IPIP=m CONFIG_NET_IPGRE_DEMUX=m CONFIG_NET_IPGRE=m -CONFIG_SYN_COOKIES=y CONFIG_NET_IPVTI=m CONFIG_INET_AH=m CONFIG_INET_ESP=m @@ -59,11 +58,11 @@ CONFIG_INET_XFRM_MODE_BEET=m # CONFIG_INET_LRO is not set CONFIG_INET_DIAG=m CONFIG_INET_UDP_DIAG=m -CONFIG_IPV6_PRIVACY=y CONFIG_IPV6_ROUTER_PREF=y CONFIG_INET6_AH=m CONFIG_INET6_ESP=m CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_VTI=m CONFIG_IPV6_GRE=m CONFIG_NETFILTER=y CONFIG_NF_CONNTRACK=m @@ -81,6 +80,17 @@ CONFIG_NF_CONNTRACK_PPTP=m CONFIG_NF_CONNTRACK_SANE=m CONFIG_NF_CONNTRACK_SIP=m CONFIG_NF_CONNTRACK_TFTP=m +CONFIG_NF_TABLES=m +CONFIG_NFT_EXTHDR=m +CONFIG_NFT_META=m +CONFIG_NFT_CT=m +CONFIG_NFT_RBTREE=m +CONFIG_NFT_HASH=m +CONFIG_NFT_COUNTER=m +CONFIG_NFT_LOG=m +CONFIG_NFT_LIMIT=m +CONFIG_NFT_NAT=m +CONFIG_NFT_COMPAT=m CONFIG_NETFILTER_XT_SET=m CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m @@ -94,6 +104,7 @@ CONFIG_NETFILTER_XT_TARGET_NFLOG=m CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m CONFIG_NETFILTER_XT_TARGET_NOTRACK=m CONFIG_NETFILTER_XT_TARGET_TEE=m +CONFIG_NETFILTER_XT_TARGET_TPROXY=m CONFIG_NETFILTER_XT_TARGET_TRACE=m CONFIG_NETFILTER_XT_TARGET_TCPMSS=m CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m @@ -126,6 +137,7 @@ CONFIG_NETFILTER_XT_MATCH_QUOTA=m CONFIG_NETFILTER_XT_MATCH_RATEEST=m CONFIG_NETFILTER_XT_MATCH_REALM=m CONFIG_NETFILTER_XT_MATCH_RECENT=m +CONFIG_NETFILTER_XT_MATCH_SOCKET=m CONFIG_NETFILTER_XT_MATCH_STATE=m CONFIG_NETFILTER_XT_MATCH_STATISTIC=m CONFIG_NETFILTER_XT_MATCH_STRING=m @@ -140,11 +152,18 @@ CONFIG_IP_SET_HASH_IP=m CONFIG_IP_SET_HASH_IPPORT=m CONFIG_IP_SET_HASH_IPPORTIP=m CONFIG_IP_SET_HASH_IPPORTNET=m +CONFIG_IP_SET_HASH_NETPORTNET=m CONFIG_IP_SET_HASH_NET=m +CONFIG_IP_SET_HASH_NETNET=m CONFIG_IP_SET_HASH_NETPORT=m CONFIG_IP_SET_HASH_NETIFACE=m CONFIG_IP_SET_LIST_SET=m CONFIG_NF_CONNTRACK_IPV4=m +CONFIG_NF_TABLES_IPV4=m +CONFIG_NFT_REJECT_IPV4=m +CONFIG_NFT_CHAIN_ROUTE_IPV4=m +CONFIG_NFT_CHAIN_NAT_IPV4=m +CONFIG_NF_TABLES_ARP=m CONFIG_IP_NF_IPTABLES=m CONFIG_IP_NF_MATCH_AH=m CONFIG_IP_NF_MATCH_ECN=m @@ -152,6 +171,7 @@ CONFIG_IP_NF_MATCH_RPFILTER=m CONFIG_IP_NF_MATCH_TTL=m CONFIG_IP_NF_FILTER=m CONFIG_IP_NF_TARGET_REJECT=m +CONFIG_IP_NF_TARGET_SYNPROXY=m CONFIG_IP_NF_TARGET_ULOG=m CONFIG_NF_NAT_IPV4=m CONFIG_IP_NF_TARGET_MASQUERADE=m @@ -166,6 +186,9 @@ CONFIG_IP_NF_ARPTABLES=m CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m CONFIG_NF_CONNTRACK_IPV6=m +CONFIG_NF_TABLES_IPV6=m +CONFIG_NFT_CHAIN_ROUTE_IPV6=m +CONFIG_NFT_CHAIN_NAT_IPV6=m CONFIG_IP6_NF_IPTABLES=m CONFIG_IP6_NF_MATCH_AH=m CONFIG_IP6_NF_MATCH_EUI64=m @@ -179,11 +202,13 @@ CONFIG_IP6_NF_MATCH_RT=m CONFIG_IP6_NF_TARGET_HL=m CONFIG_IP6_NF_FILTER=m CONFIG_IP6_NF_TARGET_REJECT=m +CONFIG_IP6_NF_TARGET_SYNPROXY=m CONFIG_IP6_NF_MANGLE=m CONFIG_IP6_NF_RAW=m CONFIG_NF_NAT_IPV6=m CONFIG_IP6_NF_TARGET_MASQUERADE=m CONFIG_IP6_NF_TARGET_NPT=m +CONFIG_NF_TABLES_BRIDGE=m CONFIG_IP_DCCP=m # CONFIG_IP_DCCP_CCID3 is not set CONFIG_SCTP_COOKIE_HMAC_SHA1=y @@ -191,10 +216,13 @@ CONFIG_RDS=m CONFIG_RDS_TCP=m CONFIG_L2TP=m CONFIG_ATALK=m +CONFIG_DNS_RESOLVER=y CONFIG_BATMAN_ADV=m CONFIG_BATMAN_ADV_DAT=y +CONFIG_BATMAN_ADV_NC=y +CONFIG_NETLINK_DIAG=m +CONFIG_NET_MPLS_GSO=m # CONFIG_WIRELESS is not set -CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" CONFIG_DEVTMPFS=y # CONFIG_FIRMWARE_IN_KERNEL is not set # CONFIG_FW_LOADER_USER_HELPER is not set @@ -209,6 +237,7 @@ CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_RAM=y CONFIG_CDROM_PKTCDVD=m CONFIG_ATA_OVER_ETH=m +CONFIG_DUMMY_IRQ=m CONFIG_IDE=y CONFIG_IDE_GD_ATAPI=y CONFIG_BLK_DEV_IDECD=y @@ -249,6 +278,7 @@ CONFIG_EQUALIZER=m CONFIG_NET_TEAM=m CONFIG_NET_TEAM_MODE_BROADCAST=m CONFIG_NET_TEAM_MODE_ROUNDROBIN=m +CONFIG_NET_TEAM_MODE_RANDOM=m CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m CONFIG_NET_TEAM_MODE_LOADBALANCE=m CONFIG_VXLAN=m @@ -257,10 +287,10 @@ CONFIG_NETCONSOLE_DYNAMIC=y CONFIG_VETH=m # CONFIG_NET_VENDOR_3COM is not set # CONFIG_NET_VENDOR_AMD is not set +# CONFIG_NET_VENDOR_ARC is not set # CONFIG_NET_CADENCE is not set # CONFIG_NET_VENDOR_BROADCOM is not set # CONFIG_NET_VENDOR_CIRRUS is not set -# CONFIG_NET_VENDOR_FUJITSU is not set # CONFIG_NET_VENDOR_HP is not set # CONFIG_NET_VENDOR_INTEL is not set # CONFIG_NET_VENDOR_MARVELL is not set @@ -269,6 +299,7 @@ CONFIG_NE2000=m # CONFIG_NET_VENDOR_SEEQ is not set # CONFIG_NET_VENDOR_SMSC is not set # CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_VIA is not set # CONFIG_NET_VENDOR_WIZNET is not set CONFIG_PLIP=m CONFIG_PPP=m @@ -293,7 +324,6 @@ CONFIG_MOUSE_SERIAL=m CONFIG_INPUT_MISC=y CONFIG_INPUT_M68K_BEEP=m CONFIG_SERIO_Q40KBD=y -CONFIG_VT_HW_CONSOLE_BINDING=y # CONFIG_LEGACY_PTYS is not set # CONFIG_DEVKMEM is not set CONFIG_PRINTER=m @@ -318,10 +348,6 @@ CONFIG_RTC_DRV_GENERIC=m # CONFIG_IOMMU_SUPPORT is not set CONFIG_HEARTBEAT=y CONFIG_PROC_HARDWARE=y -CONFIG_EXT2_FS=y -CONFIG_EXT3_FS=y -# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set -# CONFIG_EXT3_FS_XATTR is not set CONFIG_EXT4_FS=y CONFIG_REISERFS_FS=m CONFIG_JFS_FS=m @@ -358,7 +384,7 @@ CONFIG_QNX6FS_FS=m CONFIG_SYSV_FS=m CONFIG_UFS_FS=m CONFIG_NFS_FS=y -CONFIG_NFS_V4=y +CONFIG_NFS_V4=m CONFIG_NFS_SWAP=y CONFIG_ROOT_NFS=y CONFIG_NFSD=m @@ -417,10 +443,10 @@ CONFIG_NLS_MAC_TURKISH=m CONFIG_DLM=m CONFIG_MAGIC_SYSRQ=y CONFIG_ASYNC_RAID6_TEST=m +CONFIG_TEST_STRING_HELPERS=m CONFIG_ENCRYPTED_KEYS=m CONFIG_CRYPTO_MANAGER=y CONFIG_CRYPTO_USER=m -CONFIG_CRYPTO_NULL=m CONFIG_CRYPTO_CRYPTD=m CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_CCM=m @@ -453,6 +479,8 @@ CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_ZLIB=m CONFIG_CRYPTO_LZO=m +CONFIG_CRYPTO_LZ4=m +CONFIG_CRYPTO_LZ4HC=m # CONFIG_CRYPTO_ANSI_CPRNG is not set CONFIG_CRYPTO_USER_API_HASH=m CONFIG_CRYPTO_USER_API_SKCIPHER=m diff --git a/arch/m68k/configs/sun3_defconfig b/arch/m68k/configs/sun3_defconfig index 54674d6..60b0aea 100644 --- a/arch/m68k/configs/sun3_defconfig +++ b/arch/m68k/configs/sun3_defconfig @@ -45,7 +45,6 @@ CONFIG_IP_PNP_RARP=y CONFIG_NET_IPIP=m CONFIG_NET_IPGRE_DEMUX=m CONFIG_NET_IPGRE=m -CONFIG_SYN_COOKIES=y CONFIG_NET_IPVTI=m CONFIG_INET_AH=m CONFIG_INET_ESP=m @@ -56,11 +55,11 @@ CONFIG_INET_XFRM_MODE_BEET=m # CONFIG_INET_LRO is not set CONFIG_INET_DIAG=m CONFIG_INET_UDP_DIAG=m -CONFIG_IPV6_PRIVACY=y CONFIG_IPV6_ROUTER_PREF=y CONFIG_INET6_AH=m CONFIG_INET6_ESP=m CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_VTI=m CONFIG_IPV6_GRE=m CONFIG_NETFILTER=y CONFIG_NF_CONNTRACK=m @@ -78,6 +77,17 @@ CONFIG_NF_CONNTRACK_PPTP=m CONFIG_NF_CONNTRACK_SANE=m CONFIG_NF_CONNTRACK_SIP=m CONFIG_NF_CONNTRACK_TFTP=m +CONFIG_NF_TABLES=m +CONFIG_NFT_EXTHDR=m +CONFIG_NFT_META=m +CONFIG_NFT_CT=m +CONFIG_NFT_RBTREE=m +CONFIG_NFT_HASH=m +CONFIG_NFT_COUNTER=m +CONFIG_NFT_LOG=m +CONFIG_NFT_LIMIT=m +CONFIG_NFT_NAT=m +CONFIG_NFT_COMPAT=m CONFIG_NETFILTER_XT_SET=m CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m @@ -91,6 +101,7 @@ CONFIG_NETFILTER_XT_TARGET_NFLOG=m CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m CONFIG_NETFILTER_XT_TARGET_NOTRACK=m CONFIG_NETFILTER_XT_TARGET_TEE=m +CONFIG_NETFILTER_XT_TARGET_TPROXY=m CONFIG_NETFILTER_XT_TARGET_TRACE=m CONFIG_NETFILTER_XT_TARGET_TCPMSS=m CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m @@ -123,6 +134,7 @@ CONFIG_NETFILTER_XT_MATCH_QUOTA=m CONFIG_NETFILTER_XT_MATCH_RATEEST=m CONFIG_NETFILTER_XT_MATCH_REALM=m CONFIG_NETFILTER_XT_MATCH_RECENT=m +CONFIG_NETFILTER_XT_MATCH_SOCKET=m CONFIG_NETFILTER_XT_MATCH_STATE=m CONFIG_NETFILTER_XT_MATCH_STATISTIC=m CONFIG_NETFILTER_XT_MATCH_STRING=m @@ -137,11 +149,18 @@ CONFIG_IP_SET_HASH_IP=m CONFIG_IP_SET_HASH_IPPORT=m CONFIG_IP_SET_HASH_IPPORTIP=m CONFIG_IP_SET_HASH_IPPORTNET=m +CONFIG_IP_SET_HASH_NETPORTNET=m CONFIG_IP_SET_HASH_NET=m +CONFIG_IP_SET_HASH_NETNET=m CONFIG_IP_SET_HASH_NETPORT=m CONFIG_IP_SET_HASH_NETIFACE=m CONFIG_IP_SET_LIST_SET=m CONFIG_NF_CONNTRACK_IPV4=m +CONFIG_NF_TABLES_IPV4=m +CONFIG_NFT_REJECT_IPV4=m +CONFIG_NFT_CHAIN_ROUTE_IPV4=m +CONFIG_NFT_CHAIN_NAT_IPV4=m +CONFIG_NF_TABLES_ARP=m CONFIG_IP_NF_IPTABLES=m CONFIG_IP_NF_MATCH_AH=m CONFIG_IP_NF_MATCH_ECN=m @@ -149,6 +168,7 @@ CONFIG_IP_NF_MATCH_RPFILTER=m CONFIG_IP_NF_MATCH_TTL=m CONFIG_IP_NF_FILTER=m CONFIG_IP_NF_TARGET_REJECT=m +CONFIG_IP_NF_TARGET_SYNPROXY=m CONFIG_IP_NF_TARGET_ULOG=m CONFIG_NF_NAT_IPV4=m CONFIG_IP_NF_TARGET_MASQUERADE=m @@ -163,6 +183,9 @@ CONFIG_IP_NF_ARPTABLES=m CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m CONFIG_NF_CONNTRACK_IPV6=m +CONFIG_NF_TABLES_IPV6=m +CONFIG_NFT_CHAIN_ROUTE_IPV6=m +CONFIG_NFT_CHAIN_NAT_IPV6=m CONFIG_IP6_NF_IPTABLES=m CONFIG_IP6_NF_MATCH_AH=m CONFIG_IP6_NF_MATCH_EUI64=m @@ -176,11 +199,13 @@ CONFIG_IP6_NF_MATCH_RT=m CONFIG_IP6_NF_TARGET_HL=m CONFIG_IP6_NF_FILTER=m CONFIG_IP6_NF_TARGET_REJECT=m +CONFIG_IP6_NF_TARGET_SYNPROXY=m CONFIG_IP6_NF_MANGLE=m CONFIG_IP6_NF_RAW=m CONFIG_NF_NAT_IPV6=m CONFIG_IP6_NF_TARGET_MASQUERADE=m CONFIG_IP6_NF_TARGET_NPT=m +CONFIG_NF_TABLES_BRIDGE=m CONFIG_IP_DCCP=m # CONFIG_IP_DCCP_CCID3 is not set CONFIG_SCTP_COOKIE_HMAC_SHA1=y @@ -188,10 +213,13 @@ CONFIG_RDS=m CONFIG_RDS_TCP=m CONFIG_L2TP=m CONFIG_ATALK=m +CONFIG_DNS_RESOLVER=y CONFIG_BATMAN_ADV=m CONFIG_BATMAN_ADV_DAT=y +CONFIG_BATMAN_ADV_NC=y +CONFIG_NETLINK_DIAG=m +CONFIG_NET_MPLS_GSO=m # CONFIG_WIRELESS is not set -CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" CONFIG_DEVTMPFS=y # CONFIG_FIRMWARE_IN_KERNEL is not set # CONFIG_FW_LOADER_USER_HELPER is not set @@ -203,6 +231,7 @@ CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_RAM=y CONFIG_CDROM_PKTCDVD=m CONFIG_ATA_OVER_ETH=m +CONFIG_DUMMY_IRQ=m CONFIG_RAID_ATTRS=m CONFIG_SCSI=y CONFIG_SCSI_TGT=m @@ -240,6 +269,7 @@ CONFIG_EQUALIZER=m CONFIG_NET_TEAM=m CONFIG_NET_TEAM_MODE_BROADCAST=m CONFIG_NET_TEAM_MODE_ROUNDROBIN=m +CONFIG_NET_TEAM_MODE_RANDOM=m CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m CONFIG_NET_TEAM_MODE_LOADBALANCE=m CONFIG_VXLAN=m @@ -247,6 +277,7 @@ CONFIG_NETCONSOLE=m CONFIG_NETCONSOLE_DYNAMIC=y CONFIG_VETH=m CONFIG_SUN3LANCE=y +# CONFIG_NET_VENDOR_ARC is not set # CONFIG_NET_CADENCE is not set CONFIG_SUN3_82586=y # CONFIG_NET_VENDOR_MARVELL is not set @@ -255,6 +286,7 @@ CONFIG_SUN3_82586=y # CONFIG_NET_VENDOR_SEEQ is not set # CONFIG_NET_VENDOR_STMICRO is not set # CONFIG_NET_VENDOR_SUN is not set +# CONFIG_NET_VENDOR_VIA is not set # CONFIG_NET_VENDOR_WIZNET is not set CONFIG_PPP=m CONFIG_PPP_BSDCOMP=m @@ -276,7 +308,6 @@ CONFIG_INPUT_EVDEV=m CONFIG_KEYBOARD_SUNKBD=y # CONFIG_MOUSE_PS2 is not set CONFIG_MOUSE_SERIAL=m -CONFIG_VT_HW_CONSOLE_BINDING=y # CONFIG_LEGACY_PTYS is not set # CONFIG_DEVKMEM is not set # CONFIG_HW_RANDOM is not set @@ -296,10 +327,6 @@ CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_GENERIC=m # CONFIG_IOMMU_SUPPORT is not set CONFIG_PROC_HARDWARE=y -CONFIG_EXT2_FS=y -CONFIG_EXT3_FS=y -# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set -# CONFIG_EXT3_FS_XATTR is not set CONFIG_EXT4_FS=y CONFIG_REISERFS_FS=m CONFIG_JFS_FS=m @@ -336,7 +363,7 @@ CONFIG_QNX6FS_FS=m CONFIG_SYSV_FS=m CONFIG_UFS_FS=m CONFIG_NFS_FS=y -CONFIG_NFS_V4=y +CONFIG_NFS_V4=m CONFIG_NFS_SWAP=y CONFIG_ROOT_NFS=y CONFIG_NFSD=m @@ -395,10 +422,10 @@ CONFIG_NLS_MAC_TURKISH=m CONFIG_DLM=m CONFIG_MAGIC_SYSRQ=y CONFIG_ASYNC_RAID6_TEST=m +CONFIG_TEST_STRING_HELPERS=m CONFIG_ENCRYPTED_KEYS=m CONFIG_CRYPTO_MANAGER=y CONFIG_CRYPTO_USER=m -CONFIG_CRYPTO_NULL=m CONFIG_CRYPTO_CRYPTD=m CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_CCM=m @@ -431,6 +458,8 @@ CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_ZLIB=m CONFIG_CRYPTO_LZO=m +CONFIG_CRYPTO_LZ4=m +CONFIG_CRYPTO_LZ4HC=m # CONFIG_CRYPTO_ANSI_CPRNG is not set CONFIG_CRYPTO_USER_API_HASH=m CONFIG_CRYPTO_USER_API_SKCIPHER=m diff --git a/arch/m68k/configs/sun3x_defconfig b/arch/m68k/configs/sun3x_defconfig index 832d953..21bda33 100644 --- a/arch/m68k/configs/sun3x_defconfig +++ b/arch/m68k/configs/sun3x_defconfig @@ -45,7 +45,6 @@ CONFIG_IP_PNP_RARP=y CONFIG_NET_IPIP=m CONFIG_NET_IPGRE_DEMUX=m CONFIG_NET_IPGRE=m -CONFIG_SYN_COOKIES=y CONFIG_NET_IPVTI=m CONFIG_INET_AH=m CONFIG_INET_ESP=m @@ -56,11 +55,11 @@ CONFIG_INET_XFRM_MODE_BEET=m # CONFIG_INET_LRO is not set CONFIG_INET_DIAG=m CONFIG_INET_UDP_DIAG=m -CONFIG_IPV6_PRIVACY=y CONFIG_IPV6_ROUTER_PREF=y CONFIG_INET6_AH=m CONFIG_INET6_ESP=m CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_VTI=m CONFIG_IPV6_GRE=m CONFIG_NETFILTER=y CONFIG_NF_CONNTRACK=m @@ -78,6 +77,17 @@ CONFIG_NF_CONNTRACK_PPTP=m CONFIG_NF_CONNTRACK_SANE=m CONFIG_NF_CONNTRACK_SIP=m CONFIG_NF_CONNTRACK_TFTP=m +CONFIG_NF_TABLES=m +CONFIG_NFT_EXTHDR=m +CONFIG_NFT_META=m +CONFIG_NFT_CT=m +CONFIG_NFT_RBTREE=m +CONFIG_NFT_HASH=m +CONFIG_NFT_COUNTER=m +CONFIG_NFT_LOG=m +CONFIG_NFT_LIMIT=m +CONFIG_NFT_NAT=m +CONFIG_NFT_COMPAT=m CONFIG_NETFILTER_XT_SET=m CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m @@ -91,6 +101,7 @@ CONFIG_NETFILTER_XT_TARGET_NFLOG=m CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m CONFIG_NETFILTER_XT_TARGET_NOTRACK=m CONFIG_NETFILTER_XT_TARGET_TEE=m +CONFIG_NETFILTER_XT_TARGET_TPROXY=m CONFIG_NETFILTER_XT_TARGET_TRACE=m CONFIG_NETFILTER_XT_TARGET_TCPMSS=m CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m @@ -123,6 +134,7 @@ CONFIG_NETFILTER_XT_MATCH_QUOTA=m CONFIG_NETFILTER_XT_MATCH_RATEEST=m CONFIG_NETFILTER_XT_MATCH_REALM=m CONFIG_NETFILTER_XT_MATCH_RECENT=m +CONFIG_NETFILTER_XT_MATCH_SOCKET=m CONFIG_NETFILTER_XT_MATCH_STATE=m CONFIG_NETFILTER_XT_MATCH_STATISTIC=m CONFIG_NETFILTER_XT_MATCH_STRING=m @@ -137,11 +149,18 @@ CONFIG_IP_SET_HASH_IP=m CONFIG_IP_SET_HASH_IPPORT=m CONFIG_IP_SET_HASH_IPPORTIP=m CONFIG_IP_SET_HASH_IPPORTNET=m +CONFIG_IP_SET_HASH_NETPORTNET=m CONFIG_IP_SET_HASH_NET=m +CONFIG_IP_SET_HASH_NETNET=m CONFIG_IP_SET_HASH_NETPORT=m CONFIG_IP_SET_HASH_NETIFACE=m CONFIG_IP_SET_LIST_SET=m CONFIG_NF_CONNTRACK_IPV4=m +CONFIG_NF_TABLES_IPV4=m +CONFIG_NFT_REJECT_IPV4=m +CONFIG_NFT_CHAIN_ROUTE_IPV4=m +CONFIG_NFT_CHAIN_NAT_IPV4=m +CONFIG_NF_TABLES_ARP=m CONFIG_IP_NF_IPTABLES=m CONFIG_IP_NF_MATCH_AH=m CONFIG_IP_NF_MATCH_ECN=m @@ -149,6 +168,7 @@ CONFIG_IP_NF_MATCH_RPFILTER=m CONFIG_IP_NF_MATCH_TTL=m CONFIG_IP_NF_FILTER=m CONFIG_IP_NF_TARGET_REJECT=m +CONFIG_IP_NF_TARGET_SYNPROXY=m CONFIG_IP_NF_TARGET_ULOG=m CONFIG_NF_NAT_IPV4=m CONFIG_IP_NF_TARGET_MASQUERADE=m @@ -163,6 +183,9 @@ CONFIG_IP_NF_ARPTABLES=m CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m CONFIG_NF_CONNTRACK_IPV6=m +CONFIG_NF_TABLES_IPV6=m +CONFIG_NFT_CHAIN_ROUTE_IPV6=m +CONFIG_NFT_CHAIN_NAT_IPV6=m CONFIG_IP6_NF_IPTABLES=m CONFIG_IP6_NF_MATCH_AH=m CONFIG_IP6_NF_MATCH_EUI64=m @@ -176,11 +199,13 @@ CONFIG_IP6_NF_MATCH_RT=m CONFIG_IP6_NF_TARGET_HL=m CONFIG_IP6_NF_FILTER=m CONFIG_IP6_NF_TARGET_REJECT=m +CONFIG_IP6_NF_TARGET_SYNPROXY=m CONFIG_IP6_NF_MANGLE=m CONFIG_IP6_NF_RAW=m CONFIG_NF_NAT_IPV6=m CONFIG_IP6_NF_TARGET_MASQUERADE=m CONFIG_IP6_NF_TARGET_NPT=m +CONFIG_NF_TABLES_BRIDGE=m CONFIG_IP_DCCP=m # CONFIG_IP_DCCP_CCID3 is not set CONFIG_SCTP_COOKIE_HMAC_SHA1=y @@ -188,10 +213,13 @@ CONFIG_RDS=m CONFIG_RDS_TCP=m CONFIG_L2TP=m CONFIG_ATALK=m +CONFIG_DNS_RESOLVER=y CONFIG_BATMAN_ADV=m CONFIG_BATMAN_ADV_DAT=y +CONFIG_BATMAN_ADV_NC=y +CONFIG_NETLINK_DIAG=m +CONFIG_NET_MPLS_GSO=m # CONFIG_WIRELESS is not set -CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" CONFIG_DEVTMPFS=y # CONFIG_FIRMWARE_IN_KERNEL is not set # CONFIG_FW_LOADER_USER_HELPER is not set @@ -203,6 +231,7 @@ CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_RAM=y CONFIG_CDROM_PKTCDVD=m CONFIG_ATA_OVER_ETH=m +CONFIG_DUMMY_IRQ=m CONFIG_RAID_ATTRS=m CONFIG_SCSI=y CONFIG_SCSI_TGT=m @@ -240,6 +269,7 @@ CONFIG_EQUALIZER=m CONFIG_NET_TEAM=m CONFIG_NET_TEAM_MODE_BROADCAST=m CONFIG_NET_TEAM_MODE_ROUNDROBIN=m +CONFIG_NET_TEAM_MODE_RANDOM=m CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m CONFIG_NET_TEAM_MODE_LOADBALANCE=m CONFIG_VXLAN=m @@ -247,6 +277,7 @@ CONFIG_NETCONSOLE=m CONFIG_NETCONSOLE_DYNAMIC=y CONFIG_VETH=m CONFIG_SUN3LANCE=y +# CONFIG_NET_VENDOR_ARC is not set # CONFIG_NET_CADENCE is not set # CONFIG_NET_VENDOR_BROADCOM is not set # CONFIG_NET_VENDOR_INTEL is not set @@ -255,6 +286,7 @@ CONFIG_SUN3LANCE=y # CONFIG_NET_VENDOR_NATSEMI is not set # CONFIG_NET_VENDOR_SEEQ is not set # CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_VIA is not set # CONFIG_NET_VENDOR_WIZNET is not set CONFIG_PPP=m CONFIG_PPP_BSDCOMP=m @@ -276,7 +308,6 @@ CONFIG_INPUT_EVDEV=m CONFIG_KEYBOARD_SUNKBD=y # CONFIG_MOUSE_PS2 is not set CONFIG_MOUSE_SERIAL=m -CONFIG_VT_HW_CONSOLE_BINDING=y # CONFIG_LEGACY_PTYS is not set # CONFIG_DEVKMEM is not set # CONFIG_HW_RANDOM is not set @@ -296,10 +327,6 @@ CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_GENERIC=m # CONFIG_IOMMU_SUPPORT is not set CONFIG_PROC_HARDWARE=y -CONFIG_EXT2_FS=y -CONFIG_EXT3_FS=y -# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set -# CONFIG_EXT3_FS_XATTR is not set CONFIG_EXT4_FS=y CONFIG_REISERFS_FS=m CONFIG_JFS_FS=m @@ -336,7 +363,7 @@ CONFIG_QNX6FS_FS=m CONFIG_SYSV_FS=m CONFIG_UFS_FS=m CONFIG_NFS_FS=y -CONFIG_NFS_V4=y +CONFIG_NFS_V4=m CONFIG_NFS_SWAP=y CONFIG_ROOT_NFS=y CONFIG_NFSD=m @@ -395,10 +422,10 @@ CONFIG_NLS_MAC_TURKISH=m CONFIG_DLM=m CONFIG_MAGIC_SYSRQ=y CONFIG_ASYNC_RAID6_TEST=m +CONFIG_TEST_STRING_HELPERS=m CONFIG_ENCRYPTED_KEYS=m CONFIG_CRYPTO_MANAGER=y CONFIG_CRYPTO_USER=m -CONFIG_CRYPTO_NULL=m CONFIG_CRYPTO_CRYPTD=m CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_CCM=m @@ -431,6 +458,8 @@ CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_ZLIB=m CONFIG_CRYPTO_LZO=m +CONFIG_CRYPTO_LZ4=m +CONFIG_CRYPTO_LZ4HC=m # CONFIG_CRYPTO_ANSI_CPRNG is not set CONFIG_CRYPTO_USER_API_HASH=m CONFIG_CRYPTO_USER_API_SKCIPHER=m diff --git a/arch/m68k/emu/natfeat.c b/arch/m68k/emu/natfeat.c index 121a666..71b78ec 100644 --- a/arch/m68k/emu/natfeat.c +++ b/arch/m68k/emu/natfeat.c @@ -9,6 +9,7 @@ * the GNU General Public License (GPL), incorporated herein by reference. */ +#include <linux/init.h> #include <linux/types.h> #include <linux/console.h> #include <linux/string.h> @@ -70,7 +71,7 @@ static void nf_poweroff(void) nf_call(id); } -void nf_init(void) +void __init nf_init(void) { unsigned long id, version; char buf[256]; diff --git a/arch/m68k/hp300/config.c b/arch/m68k/hp300/config.c index b7609f7..2e5a787 100644 --- a/arch/m68k/hp300/config.c +++ b/arch/m68k/hp300/config.c @@ -14,6 +14,8 @@ #include <linux/console.h> #include <asm/bootinfo.h> +#include <asm/bootinfo-hp300.h> +#include <asm/byteorder.h> #include <asm/machdep.h> #include <asm/blinken.h> #include <asm/io.h> /* readb() and writeb() */ @@ -70,15 +72,15 @@ extern int hp300_setup_serial_console(void) __init; int __init hp300_parse_bootinfo(const struct bi_record *record) { int unknown = 0; - const unsigned long *data = record->data; + const void *data = record->data; - switch (record->tag) { + switch (be16_to_cpu(record->tag)) { case BI_HP300_MODEL: - hp300_model = *data; + hp300_model = be32_to_cpup(data); break; case BI_HP300_UART_SCODE: - hp300_uart_scode = *data; + hp300_uart_scode = be32_to_cpup(data); break; case BI_HP300_UART_ADDR: diff --git a/arch/m68k/include/asm/amigahw.h b/arch/m68k/include/asm/amigahw.h index 7a19b56..5ad5681 100644 --- a/arch/m68k/include/asm/amigahw.h +++ b/arch/m68k/include/asm/amigahw.h @@ -18,26 +18,7 @@ #include <linux/ioport.h> - /* - * Different Amiga models - */ - -#define AMI_UNKNOWN (0) -#define AMI_500 (1) -#define AMI_500PLUS (2) -#define AMI_600 (3) -#define AMI_1000 (4) -#define AMI_1200 (5) -#define AMI_2000 (6) -#define AMI_2500 (7) -#define AMI_3000 (8) -#define AMI_3000T (9) -#define AMI_3000PLUS (10) -#define AMI_4000 (11) -#define AMI_4000T (12) -#define AMI_CDTV (13) -#define AMI_CD32 (14) -#define AMI_DRACO (15) +#include <asm/bootinfo-amiga.h> /* @@ -46,11 +27,6 @@ extern unsigned long amiga_chipset; -#define CS_STONEAGE (0) -#define CS_OCS (1) -#define CS_ECS (2) -#define CS_AGA (3) - /* * Miscellaneous @@ -266,7 +242,7 @@ struct CIA { #define zTwoBase (0x80000000) #define ZTWO_PADDR(x) (((unsigned long)(x))-zTwoBase) -#define ZTWO_VADDR(x) (((unsigned long)(x))+zTwoBase) +#define ZTWO_VADDR(x) ((void __iomem *)(((unsigned long)(x))+zTwoBase)) #define CUSTOM_PHYSADDR (0xdff000) #define amiga_custom ((*(volatile struct CUSTOM *)(zTwoBase+CUSTOM_PHYSADDR))) diff --git a/arch/m68k/include/asm/apollohw.h b/arch/m68k/include/asm/apollohw.h index 6c19e0c..87fc899 100644 --- a/arch/m68k/include/asm/apollohw.h +++ b/arch/m68k/include/asm/apollohw.h @@ -5,18 +5,11 @@ #include <linux/types.h> -/* - apollo models -*/ +#include <asm/bootinfo-apollo.h> + extern u_long apollo_model; -#define APOLLO_UNKNOWN (0) -#define APOLLO_DN3000 (1) -#define APOLLO_DN3010 (2) -#define APOLLO_DN3500 (3) -#define APOLLO_DN4000 (4) -#define APOLLO_DN4500 (5) /* see scn2681 data sheet for more info. diff --git a/arch/m68k/include/asm/atarihw.h b/arch/m68k/include/asm/atarihw.h index d887050..972c8f3 100644 --- a/arch/m68k/include/asm/atarihw.h +++ b/arch/m68k/include/asm/atarihw.h @@ -21,7 +21,7 @@ #define _LINUX_ATARIHW_H_ #include <linux/types.h> -#include <asm/bootinfo.h> +#include <asm/bootinfo-atari.h> #include <asm/raw_io.h> extern u_long atari_mch_cookie; diff --git a/arch/m68k/include/asm/barrier.h b/arch/m68k/include/asm/barrier.h index 445ce22..15c5f77 100644 --- a/arch/m68k/include/asm/barrier.h +++ b/arch/m68k/include/asm/barrier.h @@ -1,20 +1,8 @@ #ifndef _M68K_BARRIER_H #define _M68K_BARRIER_H -/* - * Force strict CPU ordering. - * Not really required on m68k... - */ #define nop() do { asm volatile ("nop"); barrier(); } while (0) -#define mb() barrier() -#define rmb() barrier() -#define wmb() barrier() -#define read_barrier_depends() ((void)0) -#define set_mb(var, value) ({ (var) = (value); wmb(); }) -#define smp_mb() barrier() -#define smp_rmb() barrier() -#define smp_wmb() barrier() -#define smp_read_barrier_depends() ((void)0) +#include <asm-generic/barrier.h> #endif /* _M68K_BARRIER_H */ diff --git a/arch/m68k/include/asm/bootinfo.h b/arch/m68k/include/asm/bootinfo.h index 67e7a78..8e21326 100644 --- a/arch/m68k/include/asm/bootinfo.h +++ b/arch/m68k/include/asm/bootinfo.h @@ -6,373 +6,23 @@ ** This file is subject to the terms and conditions of the GNU General Public ** License. See the file COPYING in the main directory of this archive ** for more details. -** -** Created 09/29/92 by Greg Harp -** -** 5/2/94 Roman Hodek: -** Added bi_atari part of the machine dependent union bi_un; for now it -** contains just a model field to distinguish between TT and Falcon. -** 26/7/96 Roman Zippel: -** Renamed to setup.h; added some useful macros to allow gcc some -** optimizations if possible. -** 5/10/96 Geert Uytterhoeven: -** Redesign of the boot information structure; renamed to bootinfo.h again -** 27/11/96 Geert Uytterhoeven: -** Backwards compatibility with bootinfo interface version 1.0 */ #ifndef _M68K_BOOTINFO_H #define _M68K_BOOTINFO_H +#include <uapi/asm/bootinfo.h> - /* - * Bootinfo definitions - * - * This is an easily parsable and extendable structure containing all - * information to be passed from the bootstrap to the kernel. - * - * This way I hope to keep all future changes back/forewards compatible. - * Thus, keep your fingers crossed... - * - * This structure is copied right after the kernel bss by the bootstrap - * routine. - */ #ifndef __ASSEMBLY__ -struct bi_record { - unsigned short tag; /* tag ID */ - unsigned short size; /* size of record (in bytes) */ - unsigned long data[0]; /* data */ -}; - -#endif /* __ASSEMBLY__ */ - - - /* - * Tag Definitions - * - * Machine independent tags start counting from 0x0000 - * Machine dependent tags start counting from 0x8000 - */ - -#define BI_LAST 0x0000 /* last record (sentinel) */ -#define BI_MACHTYPE 0x0001 /* machine type (u_long) */ -#define BI_CPUTYPE 0x0002 /* cpu type (u_long) */ -#define BI_FPUTYPE 0x0003 /* fpu type (u_long) */ -#define BI_MMUTYPE 0x0004 /* mmu type (u_long) */ -#define BI_MEMCHUNK 0x0005 /* memory chunk address and size */ - /* (struct mem_info) */ -#define BI_RAMDISK 0x0006 /* ramdisk address and size */ - /* (struct mem_info) */ -#define BI_COMMAND_LINE 0x0007 /* kernel command line parameters */ - /* (string) */ - - /* - * Amiga-specific tags - */ - -#define BI_AMIGA_MODEL 0x8000 /* model (u_long) */ -#define BI_AMIGA_AUTOCON 0x8001 /* AutoConfig device */ - /* (struct ConfigDev) */ -#define BI_AMIGA_CHIP_SIZE 0x8002 /* size of Chip RAM (u_long) */ -#define BI_AMIGA_VBLANK 0x8003 /* VBLANK frequency (u_char) */ -#define BI_AMIGA_PSFREQ 0x8004 /* power supply frequency (u_char) */ -#define BI_AMIGA_ECLOCK 0x8005 /* EClock frequency (u_long) */ -#define BI_AMIGA_CHIPSET 0x8006 /* native chipset present (u_long) */ -#define BI_AMIGA_SERPER 0x8007 /* serial port period (u_short) */ - - /* - * Atari-specific tags - */ - -#define BI_ATARI_MCH_COOKIE 0x8000 /* _MCH cookie from TOS (u_long) */ -#define BI_ATARI_MCH_TYPE 0x8001 /* special machine type (u_long) */ - /* (values are ATARI_MACH_* defines */ - -/* mch_cookie values (upper word) */ -#define ATARI_MCH_ST 0 -#define ATARI_MCH_STE 1 -#define ATARI_MCH_TT 2 -#define ATARI_MCH_FALCON 3 - -/* mch_type values */ -#define ATARI_MACH_NORMAL 0 /* no special machine type */ -#define ATARI_MACH_MEDUSA 1 /* Medusa 040 */ -#define ATARI_MACH_HADES 2 /* Hades 040 or 060 */ -#define ATARI_MACH_AB40 3 /* Afterburner040 on Falcon */ - - /* - * VME-specific tags - */ - -#define BI_VME_TYPE 0x8000 /* VME sub-architecture (u_long) */ -#define BI_VME_BRDINFO 0x8001 /* VME board information (struct) */ - -/* BI_VME_TYPE codes */ -#define VME_TYPE_TP34V 0x0034 /* Tadpole TP34V */ -#define VME_TYPE_MVME147 0x0147 /* Motorola MVME147 */ -#define VME_TYPE_MVME162 0x0162 /* Motorola MVME162 */ -#define VME_TYPE_MVME166 0x0166 /* Motorola MVME166 */ -#define VME_TYPE_MVME167 0x0167 /* Motorola MVME167 */ -#define VME_TYPE_MVME172 0x0172 /* Motorola MVME172 */ -#define VME_TYPE_MVME177 0x0177 /* Motorola MVME177 */ -#define VME_TYPE_BVME4000 0x4000 /* BVM Ltd. BVME4000 */ -#define VME_TYPE_BVME6000 0x6000 /* BVM Ltd. BVME6000 */ - -/* BI_VME_BRDINFO is a 32 byte struct as returned by the Bug code on - * Motorola VME boards. Contains board number, Bug version, board - * configuration options, etc. See include/asm/mvme16xhw.h for details. - */ - - - /* - * Macintosh-specific tags (all u_long) - */ - -#define BI_MAC_MODEL 0x8000 /* Mac Gestalt ID (model type) */ -#define BI_MAC_VADDR 0x8001 /* Mac video base address */ -#define BI_MAC_VDEPTH 0x8002 /* Mac video depth */ -#define BI_MAC_VROW 0x8003 /* Mac video rowbytes */ -#define BI_MAC_VDIM 0x8004 /* Mac video dimensions */ -#define BI_MAC_VLOGICAL 0x8005 /* Mac video logical base */ -#define BI_MAC_SCCBASE 0x8006 /* Mac SCC base address */ -#define BI_MAC_BTIME 0x8007 /* Mac boot time */ -#define BI_MAC_GMTBIAS 0x8008 /* Mac GMT timezone offset */ -#define BI_MAC_MEMSIZE 0x8009 /* Mac RAM size (sanity check) */ -#define BI_MAC_CPUID 0x800a /* Mac CPU type (sanity check) */ -#define BI_MAC_ROMBASE 0x800b /* Mac system ROM base address */ - - /* - * Macintosh hardware profile data - unused, see macintosh.h for - * reasonable type values - */ - -#define BI_MAC_VIA1BASE 0x8010 /* Mac VIA1 base address (always present) */ -#define BI_MAC_VIA2BASE 0x8011 /* Mac VIA2 base address (type varies) */ -#define BI_MAC_VIA2TYPE 0x8012 /* Mac VIA2 type (VIA, RBV, OSS) */ -#define BI_MAC_ADBTYPE 0x8013 /* Mac ADB interface type */ -#define BI_MAC_ASCBASE 0x8014 /* Mac Apple Sound Chip base address */ -#define BI_MAC_SCSI5380 0x8015 /* Mac NCR 5380 SCSI (base address, multi) */ -#define BI_MAC_SCSIDMA 0x8016 /* Mac SCSI DMA (base address) */ -#define BI_MAC_SCSI5396 0x8017 /* Mac NCR 53C96 SCSI (base address, multi) */ -#define BI_MAC_IDETYPE 0x8018 /* Mac IDE interface type */ -#define BI_MAC_IDEBASE 0x8019 /* Mac IDE interface base address */ -#define BI_MAC_NUBUS 0x801a /* Mac Nubus type (none, regular, pseudo) */ -#define BI_MAC_SLOTMASK 0x801b /* Mac Nubus slots present */ -#define BI_MAC_SCCTYPE 0x801c /* Mac SCC serial type (normal, IOP) */ -#define BI_MAC_ETHTYPE 0x801d /* Mac builtin ethernet type (Sonic, MACE */ -#define BI_MAC_ETHBASE 0x801e /* Mac builtin ethernet base address */ -#define BI_MAC_PMU 0x801f /* Mac power management / poweroff hardware */ -#define BI_MAC_IOP_SWIM 0x8020 /* Mac SWIM floppy IOP */ -#define BI_MAC_IOP_ADB 0x8021 /* Mac ADB IOP */ - - /* - * Mac: compatibility with old booter data format (temporarily) - * Fields unused with the new bootinfo can be deleted now; instead of - * adding new fields the struct might be splitted into a hardware address - * part and a hardware type part - */ - -#ifndef __ASSEMBLY__ - -struct mac_booter_data -{ - unsigned long videoaddr; - unsigned long videorow; - unsigned long videodepth; - unsigned long dimensions; - unsigned long args; - unsigned long boottime; - unsigned long gmtbias; - unsigned long bootver; - unsigned long videological; - unsigned long sccbase; - unsigned long id; - unsigned long memsize; - unsigned long serialmf; - unsigned long serialhsk; - unsigned long serialgpi; - unsigned long printmf; - unsigned long printhsk; - unsigned long printgpi; - unsigned long cpuid; - unsigned long rombase; - unsigned long adbdelay; - unsigned long timedbra; -}; - -extern struct mac_booter_data - mac_bi_data; - +#ifdef CONFIG_BOOTINFO_PROC +extern void save_bootinfo(const struct bi_record *bi); +#else +static inline void save_bootinfo(const struct bi_record *bi) {} #endif - /* - * Apollo-specific tags - */ - -#define BI_APOLLO_MODEL 0x8000 /* model (u_long) */ - - /* - * HP300-specific tags - */ - -#define BI_HP300_MODEL 0x8000 /* model (u_long) */ -#define BI_HP300_UART_SCODE 0x8001 /* UART select code (u_long) */ -#define BI_HP300_UART_ADDR 0x8002 /* phys. addr of UART (u_long) */ - - /* - * Stuff for bootinfo interface versioning - * - * At the start of kernel code, a 'struct bootversion' is located. - * bootstrap checks for a matching version of the interface before booting - * a kernel, to avoid user confusion if kernel and bootstrap don't work - * together :-) - * - * If incompatible changes are made to the bootinfo interface, the major - * number below should be stepped (and the minor reset to 0) for the - * appropriate machine. If a change is backward-compatible, the minor - * should be stepped. "Backwards-compatible" means that booting will work, - * but certain features may not. - */ - -#define BOOTINFOV_MAGIC 0x4249561A /* 'BIV^Z' */ -#define MK_BI_VERSION(major,minor) (((major)<<16)+(minor)) -#define BI_VERSION_MAJOR(v) (((v) >> 16) & 0xffff) -#define BI_VERSION_MINOR(v) ((v) & 0xffff) - -#ifndef __ASSEMBLY__ - -struct bootversion { - unsigned short branch; - unsigned long magic; - struct { - unsigned long machtype; - unsigned long version; - } machversions[0]; -}; - #endif /* __ASSEMBLY__ */ -#define AMIGA_BOOTI_VERSION MK_BI_VERSION( 2, 0 ) -#define ATARI_BOOTI_VERSION MK_BI_VERSION( 2, 1 ) -#define MAC_BOOTI_VERSION MK_BI_VERSION( 2, 0 ) -#define MVME147_BOOTI_VERSION MK_BI_VERSION( 2, 0 ) -#define MVME16x_BOOTI_VERSION MK_BI_VERSION( 2, 0 ) -#define BVME6000_BOOTI_VERSION MK_BI_VERSION( 2, 0 ) -#define Q40_BOOTI_VERSION MK_BI_VERSION( 2, 0 ) -#define HP300_BOOTI_VERSION MK_BI_VERSION( 2, 0 ) - -#ifdef BOOTINFO_COMPAT_1_0 - - /* - * Backwards compatibility with bootinfo interface version 1.0 - */ - -#define COMPAT_AMIGA_BOOTI_VERSION MK_BI_VERSION( 1, 0 ) -#define COMPAT_ATARI_BOOTI_VERSION MK_BI_VERSION( 1, 0 ) -#define COMPAT_MAC_BOOTI_VERSION MK_BI_VERSION( 1, 0 ) - -#include <linux/zorro.h> - -#define COMPAT_NUM_AUTO 16 - -struct compat_bi_Amiga { - int model; - int num_autocon; - struct ConfigDev autocon[COMPAT_NUM_AUTO]; - unsigned long chip_size; - unsigned char vblank; - unsigned char psfreq; - unsigned long eclock; - unsigned long chipset; - unsigned long hw_present; -}; - -struct compat_bi_Atari { - unsigned long hw_present; - unsigned long mch_cookie; -}; - -#ifndef __ASSEMBLY__ - -struct compat_bi_Macintosh -{ - unsigned long videoaddr; - unsigned long videorow; - unsigned long videodepth; - unsigned long dimensions; - unsigned long args; - unsigned long boottime; - unsigned long gmtbias; - unsigned long bootver; - unsigned long videological; - unsigned long sccbase; - unsigned long id; - unsigned long memsize; - unsigned long serialmf; - unsigned long serialhsk; - unsigned long serialgpi; - unsigned long printmf; - unsigned long printhsk; - unsigned long printgpi; - unsigned long cpuid; - unsigned long rombase; - unsigned long adbdelay; - unsigned long timedbra; -}; - -#endif - -struct compat_mem_info { - unsigned long addr; - unsigned long size; -}; - -#define COMPAT_NUM_MEMINFO 4 - -#define COMPAT_CPUB_68020 0 -#define COMPAT_CPUB_68030 1 -#define COMPAT_CPUB_68040 2 -#define COMPAT_CPUB_68060 3 -#define COMPAT_FPUB_68881 5 -#define COMPAT_FPUB_68882 6 -#define COMPAT_FPUB_68040 7 -#define COMPAT_FPUB_68060 8 - -#define COMPAT_CPU_68020 (1<<COMPAT_CPUB_68020) -#define COMPAT_CPU_68030 (1<<COMPAT_CPUB_68030) -#define COMPAT_CPU_68040 (1<<COMPAT_CPUB_68040) -#define COMPAT_CPU_68060 (1<<COMPAT_CPUB_68060) -#define COMPAT_CPU_MASK (31) -#define COMPAT_FPU_68881 (1<<COMPAT_FPUB_68881) -#define COMPAT_FPU_68882 (1<<COMPAT_FPUB_68882) -#define COMPAT_FPU_68040 (1<<COMPAT_FPUB_68040) -#define COMPAT_FPU_68060 (1<<COMPAT_FPUB_68060) -#define COMPAT_FPU_MASK (0xfe0) - -#define COMPAT_CL_SIZE (256) - -struct compat_bootinfo { - unsigned long machtype; - unsigned long cputype; - struct compat_mem_info memory[COMPAT_NUM_MEMINFO]; - int num_memory; - unsigned long ramdisk_size; - unsigned long ramdisk_addr; - char command_line[COMPAT_CL_SIZE]; - union { - struct compat_bi_Amiga bi_ami; - struct compat_bi_Atari bi_ata; - struct compat_bi_Macintosh bi_mac; - } bi_un; -}; - -#define bi_amiga bi_un.bi_ami -#define bi_atari bi_un.bi_ata -#define bi_mac bi_un.bi_mac - -#endif /* BOOTINFO_COMPAT_1_0 */ - #endif /* _M68K_BOOTINFO_H */ diff --git a/arch/m68k/include/asm/hp300hw.h b/arch/m68k/include/asm/hp300hw.h index d998ea6..64f5271 100644 --- a/arch/m68k/include/asm/hp300hw.h +++ b/arch/m68k/include/asm/hp300hw.h @@ -1,25 +1,9 @@ #ifndef _M68K_HP300HW_H #define _M68K_HP300HW_H -extern unsigned long hp300_model; +#include <asm/bootinfo-hp300.h> -/* This information was taken from NetBSD */ -#define HP_320 (0) /* 16MHz 68020+HP MMU+16K external cache */ -#define HP_330 (1) /* 16MHz 68020+68851 MMU */ -#define HP_340 (2) /* 16MHz 68030 */ -#define HP_345 (3) /* 50MHz 68030+32K external cache */ -#define HP_350 (4) /* 25MHz 68020+HP MMU+32K external cache */ -#define HP_360 (5) /* 25MHz 68030 */ -#define HP_370 (6) /* 33MHz 68030+64K external cache */ -#define HP_375 (7) /* 50MHz 68030+32K external cache */ -#define HP_380 (8) /* 25MHz 68040 */ -#define HP_385 (9) /* 33MHz 68040 */ -#define HP_400 (10) /* 50MHz 68030+32K external cache */ -#define HP_425T (11) /* 25MHz 68040 - model 425t */ -#define HP_425S (12) /* 25MHz 68040 - model 425s */ -#define HP_425E (13) /* 25MHz 68040 - model 425e */ -#define HP_433T (14) /* 33MHz 68040 - model 433t */ -#define HP_433S (15) /* 33MHz 68040 - model 433s */ +extern unsigned long hp300_model; #endif /* _M68K_HP300HW_H */ diff --git a/arch/m68k/include/asm/kexec.h b/arch/m68k/include/asm/kexec.h new file mode 100644 index 0000000..3df97ab --- /dev/null +++ b/arch/m68k/include/asm/kexec.h @@ -0,0 +1,29 @@ +#ifndef _ASM_M68K_KEXEC_H +#define _ASM_M68K_KEXEC_H + +#ifdef CONFIG_KEXEC + +/* Maximum physical address we can use pages from */ +#define KEXEC_SOURCE_MEMORY_LIMIT (-1UL) +/* Maximum address we can reach in physical address mode */ +#define KEXEC_DESTINATION_MEMORY_LIMIT (-1UL) +/* Maximum address we can use for the control code buffer */ +#define KEXEC_CONTROL_MEMORY_LIMIT (-1UL) + +#define KEXEC_CONTROL_PAGE_SIZE 4096 + +#define KEXEC_ARCH KEXEC_ARCH_68K + +#ifndef __ASSEMBLY__ + +static inline void crash_setup_regs(struct pt_regs *newregs, + struct pt_regs *oldregs) +{ + /* Dummy implementation for now */ +} + +#endif /* __ASSEMBLY__ */ + +#endif /* CONFIG_KEXEC */ + +#endif /* _ASM_M68K_KEXEC_H */ diff --git a/arch/m68k/include/asm/mac_via.h b/arch/m68k/include/asm/mac_via.h index aeeedf8..fe3fc9a 100644 --- a/arch/m68k/include/asm/mac_via.h +++ b/arch/m68k/include/asm/mac_via.h @@ -254,6 +254,8 @@ extern volatile __u8 *via1,*via2; extern int rbv_present,via_alt_mapping; +struct irq_desc; + extern void via_register_interrupts(void); extern void via_irq_enable(int); extern void via_irq_disable(int); diff --git a/arch/m68k/include/asm/macintosh.h b/arch/m68k/include/asm/macintosh.h index 682a1a2..d323b2c 100644 --- a/arch/m68k/include/asm/macintosh.h +++ b/arch/m68k/include/asm/macintosh.h @@ -4,6 +4,9 @@ #include <linux/seq_file.h> #include <linux/interrupt.h> +#include <asm/bootinfo-mac.h> + + /* * Apple Macintoshisms */ @@ -74,65 +77,29 @@ struct mac_model #define MAC_FLOPPY_SWIM_IOP 3 #define MAC_FLOPPY_AV 4 -/* - * Gestalt numbers - */ +extern struct mac_model *macintosh_config; -#define MAC_MODEL_II 6 -#define MAC_MODEL_IIX 7 -#define MAC_MODEL_IICX 8 -#define MAC_MODEL_SE30 9 -#define MAC_MODEL_IICI 11 -#define MAC_MODEL_IIFX 13 /* And well numbered it is too */ -#define MAC_MODEL_IISI 18 -#define MAC_MODEL_LC 19 -#define MAC_MODEL_Q900 20 -#define MAC_MODEL_PB170 21 -#define MAC_MODEL_Q700 22 -#define MAC_MODEL_CLII 23 /* aka: P200 */ -#define MAC_MODEL_PB140 25 -#define MAC_MODEL_Q950 26 /* aka: WGS95 */ -#define MAC_MODEL_LCIII 27 /* aka: P450 */ -#define MAC_MODEL_PB210 29 -#define MAC_MODEL_C650 30 -#define MAC_MODEL_PB230 32 -#define MAC_MODEL_PB180 33 -#define MAC_MODEL_PB160 34 -#define MAC_MODEL_Q800 35 /* aka: WGS80 */ -#define MAC_MODEL_Q650 36 -#define MAC_MODEL_LCII 37 /* aka: P400/405/410/430 */ -#define MAC_MODEL_PB250 38 -#define MAC_MODEL_IIVI 44 -#define MAC_MODEL_P600 45 /* aka: P600CD */ -#define MAC_MODEL_IIVX 48 -#define MAC_MODEL_CCL 49 /* aka: P250 */ -#define MAC_MODEL_PB165C 50 -#define MAC_MODEL_C610 52 /* aka: WGS60 */ -#define MAC_MODEL_Q610 53 -#define MAC_MODEL_PB145 54 /* aka: PB145B */ -#define MAC_MODEL_P520 56 /* aka: LC520 */ -#define MAC_MODEL_C660 60 -#define MAC_MODEL_P460 62 /* aka: LCIII+, P466/P467 */ -#define MAC_MODEL_PB180C 71 -#define MAC_MODEL_PB520 72 /* aka: PB520C, PB540, PB540C, PB550C */ -#define MAC_MODEL_PB270C 77 -#define MAC_MODEL_Q840 78 -#define MAC_MODEL_P550 80 /* aka: LC550, P560 */ -#define MAC_MODEL_CCLII 83 /* aka: P275 */ -#define MAC_MODEL_PB165 84 -#define MAC_MODEL_PB190 85 /* aka: PB190CS */ -#define MAC_MODEL_TV 88 -#define MAC_MODEL_P475 89 /* aka: LC475, P476 */ -#define MAC_MODEL_P475F 90 /* aka: P475 w/ FPU (no LC040) */ -#define MAC_MODEL_P575 92 /* aka: LC575, P577/P578 */ -#define MAC_MODEL_Q605 94 -#define MAC_MODEL_Q605_ACC 95 /* Q605 accelerated to 33 MHz */ -#define MAC_MODEL_Q630 98 /* aka: LC630, P630/631/635/636/637/638/640 */ -#define MAC_MODEL_P588 99 /* aka: LC580, P580 */ -#define MAC_MODEL_PB280 102 -#define MAC_MODEL_PB280C 103 -#define MAC_MODEL_PB150 115 -extern struct mac_model *macintosh_config; + /* + * Internal representation of the Mac hardware, filled in from bootinfo + */ + +struct mac_booter_data +{ + unsigned long videoaddr; + unsigned long videorow; + unsigned long videodepth; + unsigned long dimensions; + unsigned long boottime; + unsigned long gmtbias; + unsigned long videological; + unsigned long sccbase; + unsigned long id; + unsigned long memsize; + unsigned long cpuid; + unsigned long rombase; +}; + +extern struct mac_booter_data mac_bi_data; #endif diff --git a/arch/m68k/include/asm/mc146818rtc.h b/arch/m68k/include/asm/mc146818rtc.h index 9f70a01..05b43bf 100644 --- a/arch/m68k/include/asm/mc146818rtc.h +++ b/arch/m68k/include/asm/mc146818rtc.h @@ -10,16 +10,16 @@ #include <asm/atarihw.h> -#define RTC_PORT(x) (TT_RTC_BAS + 2*(x)) +#define ATARI_RTC_PORT(x) (TT_RTC_BAS + 2*(x)) #define RTC_ALWAYS_BCD 0 #define CMOS_READ(addr) ({ \ -atari_outb_p((addr),RTC_PORT(0)); \ -atari_inb_p(RTC_PORT(1)); \ +atari_outb_p((addr), ATARI_RTC_PORT(0)); \ +atari_inb_p(ATARI_RTC_PORT(1)); \ }) #define CMOS_WRITE(val, addr) ({ \ -atari_outb_p((addr),RTC_PORT(0)); \ -atari_outb_p((val),RTC_PORT(1)); \ +atari_outb_p((addr), ATARI_RTC_PORT(0)); \ +atari_outb_p((val), ATARI_RTC_PORT(1)); \ }) #endif /* CONFIG_ATARI */ diff --git a/arch/m68k/include/asm/mvme16xhw.h b/arch/m68k/include/asm/mvme16xhw.h index 6117f56..1eb89de 100644 --- a/arch/m68k/include/asm/mvme16xhw.h +++ b/arch/m68k/include/asm/mvme16xhw.h @@ -3,23 +3,6 @@ #include <asm/irq.h> -/* Board ID data structure - pointer to this retrieved from Bug by head.S */ - -/* Note, bytes 12 and 13 are board no in BCD (0162,0166,0167,0177,etc) */ - -extern long mvme_bdid_ptr; - -typedef struct { - char bdid[4]; - u_char rev, mth, day, yr; - u_short size, reserved; - u_short brdno; - char brdsuffix[2]; - u_long options; - u_short clun, dlun, ctype, dnum; - u_long option2; -} t_bdid, *p_bdid; - typedef struct { u_char ack_icr, diff --git a/arch/m68k/include/asm/setup.h b/arch/m68k/include/asm/setup.h index 65e78a2d..8f2023f 100644 --- a/arch/m68k/include/asm/setup.h +++ b/arch/m68k/include/asm/setup.h @@ -22,6 +22,7 @@ #ifndef _M68K_SETUP_H #define _M68K_SETUP_H +#include <uapi/asm/bootinfo.h> #include <uapi/asm/setup.h> @@ -297,14 +298,14 @@ extern int m68k_is040or060; #define NUM_MEMINFO 4 #ifndef __ASSEMBLY__ -struct mem_info { +struct m68k_mem_info { unsigned long addr; /* physical address of memory chunk */ unsigned long size; /* length of memory chunk (in bytes) */ }; extern int m68k_num_memory; /* # of memory blocks found (and used) */ extern int m68k_realnum_memory; /* real # of memory blocks found */ -extern struct mem_info m68k_memory[NUM_MEMINFO];/* memory description */ +extern struct m68k_mem_info m68k_memory[NUM_MEMINFO];/* memory description */ #endif #endif /* _M68K_SETUP_H */ diff --git a/arch/m68k/include/asm/timex.h b/arch/m68k/include/asm/timex.h index 6759dad..efc1f48 100644 --- a/arch/m68k/include/asm/timex.h +++ b/arch/m68k/include/asm/timex.h @@ -28,4 +28,14 @@ static inline cycles_t get_cycles(void) return 0; } +extern unsigned long (*mach_random_get_entropy)(void); + +static inline unsigned long random_get_entropy(void) +{ + if (mach_random_get_entropy) + return mach_random_get_entropy(); + return 0; +} +#define random_get_entropy random_get_entropy + #endif diff --git a/arch/m68k/include/uapi/asm/Kbuild b/arch/m68k/include/uapi/asm/Kbuild index 1fef45a..6a2d257 100644 --- a/arch/m68k/include/uapi/asm/Kbuild +++ b/arch/m68k/include/uapi/asm/Kbuild @@ -11,6 +11,14 @@ generic-y += termbits.h generic-y += termios.h header-y += a.out.h +header-y += bootinfo.h +header-y += bootinfo-amiga.h +header-y += bootinfo-apollo.h +header-y += bootinfo-atari.h +header-y += bootinfo-hp300.h +header-y += bootinfo-mac.h +header-y += bootinfo-q40.h +header-y += bootinfo-vme.h header-y += byteorder.h header-y += cachectl.h header-y += fcntl.h diff --git a/arch/m68k/include/uapi/asm/bootinfo-amiga.h b/arch/m68k/include/uapi/asm/bootinfo-amiga.h new file mode 100644 index 0000000..daad3c5 --- /dev/null +++ b/arch/m68k/include/uapi/asm/bootinfo-amiga.h @@ -0,0 +1,63 @@ +/* +** asm/bootinfo-amiga.h -- Amiga-specific boot information definitions +*/ + +#ifndef _UAPI_ASM_M68K_BOOTINFO_AMIGA_H +#define _UAPI_ASM_M68K_BOOTINFO_AMIGA_H + + + /* + * Amiga-specific tags + */ + +#define BI_AMIGA_MODEL 0x8000 /* model (__be32) */ +#define BI_AMIGA_AUTOCON 0x8001 /* AutoConfig device */ + /* (AmigaOS struct ConfigDev) */ +#define BI_AMIGA_CHIP_SIZE 0x8002 /* size of Chip RAM (__be32) */ +#define BI_AMIGA_VBLANK 0x8003 /* VBLANK frequency (__u8) */ +#define BI_AMIGA_PSFREQ 0x8004 /* power supply frequency (__u8) */ +#define BI_AMIGA_ECLOCK 0x8005 /* EClock frequency (__be32) */ +#define BI_AMIGA_CHIPSET 0x8006 /* native chipset present (__be32) */ +#define BI_AMIGA_SERPER 0x8007 /* serial port period (__be16) */ + + + /* + * Amiga models (BI_AMIGA_MODEL) + */ + +#define AMI_UNKNOWN 0 +#define AMI_500 1 +#define AMI_500PLUS 2 +#define AMI_600 3 +#define AMI_1000 4 +#define AMI_1200 5 +#define AMI_2000 6 +#define AMI_2500 7 +#define AMI_3000 8 +#define AMI_3000T 9 +#define AMI_3000PLUS 10 +#define AMI_4000 11 +#define AMI_4000T 12 +#define AMI_CDTV 13 +#define AMI_CD32 14 +#define AMI_DRACO 15 + + + /* + * Amiga chipsets (BI_AMIGA_CHIPSET) + */ + +#define CS_STONEAGE 0 +#define CS_OCS 1 +#define CS_ECS 2 +#define CS_AGA 3 + + + /* + * Latest Amiga bootinfo version + */ + +#define AMIGA_BOOTI_VERSION MK_BI_VERSION(2, 0) + + +#endif /* _UAPI_ASM_M68K_BOOTINFO_AMIGA_H */ diff --git a/arch/m68k/include/uapi/asm/bootinfo-apollo.h b/arch/m68k/include/uapi/asm/bootinfo-apollo.h new file mode 100644 index 0000000..a93e0af --- /dev/null +++ b/arch/m68k/include/uapi/asm/bootinfo-apollo.h @@ -0,0 +1,28 @@ +/* +** asm/bootinfo-apollo.h -- Apollo-specific boot information definitions +*/ + +#ifndef _UAPI_ASM_M68K_BOOTINFO_APOLLO_H +#define _UAPI_ASM_M68K_BOOTINFO_APOLLO_H + + + /* + * Apollo-specific tags + */ + +#define BI_APOLLO_MODEL 0x8000 /* model (__be32) */ + + + /* + * Apollo models (BI_APOLLO_MODEL) + */ + +#define APOLLO_UNKNOWN 0 +#define APOLLO_DN3000 1 +#define APOLLO_DN3010 2 +#define APOLLO_DN3500 3 +#define APOLLO_DN4000 4 +#define APOLLO_DN4500 5 + + +#endif /* _UAPI_ASM_M68K_BOOTINFO_APOLLO_H */ diff --git a/arch/m68k/include/uapi/asm/bootinfo-atari.h b/arch/m68k/include/uapi/asm/bootinfo-atari.h new file mode 100644 index 0000000..a817854 --- /dev/null +++ b/arch/m68k/include/uapi/asm/bootinfo-atari.h @@ -0,0 +1,44 @@ +/* +** asm/bootinfo-atari.h -- Atari-specific boot information definitions +*/ + +#ifndef _UAPI_ASM_M68K_BOOTINFO_ATARI_H +#define _UAPI_ASM_M68K_BOOTINFO_ATARI_H + + + /* + * Atari-specific tags + */ + +#define BI_ATARI_MCH_COOKIE 0x8000 /* _MCH cookie from TOS (__be32) */ +#define BI_ATARI_MCH_TYPE 0x8001 /* special machine type (__be32) */ + + + /* + * mch_cookie values (upper word of BI_ATARI_MCH_COOKIE) + */ + +#define ATARI_MCH_ST 0 +#define ATARI_MCH_STE 1 +#define ATARI_MCH_TT 2 +#define ATARI_MCH_FALCON 3 + + + /* + * Atari machine types (BI_ATARI_MCH_TYPE) + */ + +#define ATARI_MACH_NORMAL 0 /* no special machine type */ +#define ATARI_MACH_MEDUSA 1 /* Medusa 040 */ +#define ATARI_MACH_HADES 2 /* Hades 040 or 060 */ +#define ATARI_MACH_AB40 3 /* Afterburner040 on Falcon */ + + + /* + * Latest Atari bootinfo version + */ + +#define ATARI_BOOTI_VERSION MK_BI_VERSION(2, 1) + + +#endif /* _UAPI_ASM_M68K_BOOTINFO_ATARI_H */ diff --git a/arch/m68k/include/uapi/asm/bootinfo-hp300.h b/arch/m68k/include/uapi/asm/bootinfo-hp300.h new file mode 100644 index 0000000..c90cb71 --- /dev/null +++ b/arch/m68k/include/uapi/asm/bootinfo-hp300.h @@ -0,0 +1,50 @@ +/* +** asm/bootinfo-hp300.h -- HP9000/300-specific boot information definitions +*/ + +#ifndef _UAPI_ASM_M68K_BOOTINFO_HP300_H +#define _UAPI_ASM_M68K_BOOTINFO_HP300_H + + + /* + * HP9000/300-specific tags + */ + +#define BI_HP300_MODEL 0x8000 /* model (__be32) */ +#define BI_HP300_UART_SCODE 0x8001 /* UART select code (__be32) */ +#define BI_HP300_UART_ADDR 0x8002 /* phys. addr of UART (__be32) */ + + + /* + * HP9000/300 and /400 models (BI_HP300_MODEL) + * + * This information was taken from NetBSD + */ + +#define HP_320 0 /* 16MHz 68020+HP MMU+16K external cache */ +#define HP_330 1 /* 16MHz 68020+68851 MMU */ +#define HP_340 2 /* 16MHz 68030 */ +#define HP_345 3 /* 50MHz 68030+32K external cache */ +#define HP_350 4 /* 25MHz 68020+HP MMU+32K external cache */ +#define HP_360 5 /* 25MHz 68030 */ +#define HP_370 6 /* 33MHz 68030+64K external cache */ +#define HP_375 7 /* 50MHz 68030+32K external cache */ +#define HP_380 8 /* 25MHz 68040 */ +#define HP_385 9 /* 33MHz 68040 */ + +#define HP_400 10 /* 50MHz 68030+32K external cache */ +#define HP_425T 11 /* 25MHz 68040 - model 425t */ +#define HP_425S 12 /* 25MHz 68040 - model 425s */ +#define HP_425E 13 /* 25MHz 68040 - model 425e */ +#define HP_433T 14 /* 33MHz 68040 - model 433t */ +#define HP_433S 15 /* 33MHz 68040 - model 433s */ + + + /* + * Latest HP9000/300 bootinfo version + */ + +#define HP300_BOOTI_VERSION MK_BI_VERSION(2, 0) + + +#endif /* _UAPI_ASM_M68K_BOOTINFO_HP300_H */ diff --git a/arch/m68k/include/uapi/asm/bootinfo-mac.h b/arch/m68k/include/uapi/asm/bootinfo-mac.h new file mode 100644 index 0000000..b44ff73 --- /dev/null +++ b/arch/m68k/include/uapi/asm/bootinfo-mac.h @@ -0,0 +1,119 @@ +/* +** asm/bootinfo-mac.h -- Macintosh-specific boot information definitions +*/ + +#ifndef _UAPI_ASM_M68K_BOOTINFO_MAC_H +#define _UAPI_ASM_M68K_BOOTINFO_MAC_H + + + /* + * Macintosh-specific tags (all __be32) + */ + +#define BI_MAC_MODEL 0x8000 /* Mac Gestalt ID (model type) */ +#define BI_MAC_VADDR 0x8001 /* Mac video base address */ +#define BI_MAC_VDEPTH 0x8002 /* Mac video depth */ +#define BI_MAC_VROW 0x8003 /* Mac video rowbytes */ +#define BI_MAC_VDIM 0x8004 /* Mac video dimensions */ +#define BI_MAC_VLOGICAL 0x8005 /* Mac video logical base */ +#define BI_MAC_SCCBASE 0x8006 /* Mac SCC base address */ +#define BI_MAC_BTIME 0x8007 /* Mac boot time */ +#define BI_MAC_GMTBIAS 0x8008 /* Mac GMT timezone offset */ +#define BI_MAC_MEMSIZE 0x8009 /* Mac RAM size (sanity check) */ +#define BI_MAC_CPUID 0x800a /* Mac CPU type (sanity check) */ +#define BI_MAC_ROMBASE 0x800b /* Mac system ROM base address */ + + + /* + * Macintosh hardware profile data - unused, see macintosh.h for + * reasonable type values + */ + +#define BI_MAC_VIA1BASE 0x8010 /* Mac VIA1 base address (always present) */ +#define BI_MAC_VIA2BASE 0x8011 /* Mac VIA2 base address (type varies) */ +#define BI_MAC_VIA2TYPE 0x8012 /* Mac VIA2 type (VIA, RBV, OSS) */ +#define BI_MAC_ADBTYPE 0x8013 /* Mac ADB interface type */ +#define BI_MAC_ASCBASE 0x8014 /* Mac Apple Sound Chip base address */ +#define BI_MAC_SCSI5380 0x8015 /* Mac NCR 5380 SCSI (base address, multi) */ +#define BI_MAC_SCSIDMA 0x8016 /* Mac SCSI DMA (base address) */ +#define BI_MAC_SCSI5396 0x8017 /* Mac NCR 53C96 SCSI (base address, multi) */ +#define BI_MAC_IDETYPE 0x8018 /* Mac IDE interface type */ +#define BI_MAC_IDEBASE 0x8019 /* Mac IDE interface base address */ +#define BI_MAC_NUBUS 0x801a /* Mac Nubus type (none, regular, pseudo) */ +#define BI_MAC_SLOTMASK 0x801b /* Mac Nubus slots present */ +#define BI_MAC_SCCTYPE 0x801c /* Mac SCC serial type (normal, IOP) */ +#define BI_MAC_ETHTYPE 0x801d /* Mac builtin ethernet type (Sonic, MACE */ +#define BI_MAC_ETHBASE 0x801e /* Mac builtin ethernet base address */ +#define BI_MAC_PMU 0x801f /* Mac power management / poweroff hardware */ +#define BI_MAC_IOP_SWIM 0x8020 /* Mac SWIM floppy IOP */ +#define BI_MAC_IOP_ADB 0x8021 /* Mac ADB IOP */ + + + /* + * Macintosh Gestalt numbers (BI_MAC_MODEL) + */ + +#define MAC_MODEL_II 6 +#define MAC_MODEL_IIX 7 +#define MAC_MODEL_IICX 8 +#define MAC_MODEL_SE30 9 +#define MAC_MODEL_IICI 11 +#define MAC_MODEL_IIFX 13 /* And well numbered it is too */ +#define MAC_MODEL_IISI 18 +#define MAC_MODEL_LC 19 +#define MAC_MODEL_Q900 20 +#define MAC_MODEL_PB170 21 +#define MAC_MODEL_Q700 22 +#define MAC_MODEL_CLII 23 /* aka: P200 */ +#define MAC_MODEL_PB140 25 +#define MAC_MODEL_Q950 26 /* aka: WGS95 */ +#define MAC_MODEL_LCIII 27 /* aka: P450 */ +#define MAC_MODEL_PB210 29 +#define MAC_MODEL_C650 30 +#define MAC_MODEL_PB230 32 +#define MAC_MODEL_PB180 33 +#define MAC_MODEL_PB160 34 +#define MAC_MODEL_Q800 35 /* aka: WGS80 */ +#define MAC_MODEL_Q650 36 +#define MAC_MODEL_LCII 37 /* aka: P400/405/410/430 */ +#define MAC_MODEL_PB250 38 +#define MAC_MODEL_IIVI 44 +#define MAC_MODEL_P600 45 /* aka: P600CD */ +#define MAC_MODEL_IIVX 48 +#define MAC_MODEL_CCL 49 /* aka: P250 */ +#define MAC_MODEL_PB165C 50 +#define MAC_MODEL_C610 52 /* aka: WGS60 */ +#define MAC_MODEL_Q610 53 +#define MAC_MODEL_PB145 54 /* aka: PB145B */ +#define MAC_MODEL_P520 56 /* aka: LC520 */ +#define MAC_MODEL_C660 60 +#define MAC_MODEL_P460 62 /* aka: LCIII+, P466/P467 */ +#define MAC_MODEL_PB180C 71 +#define MAC_MODEL_PB520 72 /* aka: PB520C, PB540, PB540C, PB550C */ +#define MAC_MODEL_PB270C 77 +#define MAC_MODEL_Q840 78 +#define MAC_MODEL_P550 80 /* aka: LC550, P560 */ +#define MAC_MODEL_CCLII 83 /* aka: P275 */ +#define MAC_MODEL_PB165 84 +#define MAC_MODEL_PB190 85 /* aka: PB190CS */ +#define MAC_MODEL_TV 88 +#define MAC_MODEL_P475 89 /* aka: LC475, P476 */ +#define MAC_MODEL_P475F 90 /* aka: P475 w/ FPU (no LC040) */ +#define MAC_MODEL_P575 92 /* aka: LC575, P577/P578 */ +#define MAC_MODEL_Q605 94 +#define MAC_MODEL_Q605_ACC 95 /* Q605 accelerated to 33 MHz */ +#define MAC_MODEL_Q630 98 /* aka: LC630, P630/631/635/636/637/638/640 */ +#define MAC_MODEL_P588 99 /* aka: LC580, P580 */ +#define MAC_MODEL_PB280 102 +#define MAC_MODEL_PB280C 103 +#define MAC_MODEL_PB150 115 + + + /* + * Latest Macintosh bootinfo version + */ + +#define MAC_BOOTI_VERSION MK_BI_VERSION(2, 0) + + +#endif /* _UAPI_ASM_M68K_BOOTINFO_MAC_H */ diff --git a/arch/m68k/include/uapi/asm/bootinfo-q40.h b/arch/m68k/include/uapi/asm/bootinfo-q40.h new file mode 100644 index 0000000..c79fea7 --- /dev/null +++ b/arch/m68k/include/uapi/asm/bootinfo-q40.h @@ -0,0 +1,16 @@ +/* +** asm/bootinfo-q40.h -- Q40-specific boot information definitions +*/ + +#ifndef _UAPI_ASM_M68K_BOOTINFO_Q40_H +#define _UAPI_ASM_M68K_BOOTINFO_Q40_H + + + /* + * Latest Q40 bootinfo version + */ + +#define Q40_BOOTI_VERSION MK_BI_VERSION(2, 0) + + +#endif /* _UAPI_ASM_M68K_BOOTINFO_Q40_H */ diff --git a/arch/m68k/include/uapi/asm/bootinfo-vme.h b/arch/m68k/include/uapi/asm/bootinfo-vme.h new file mode 100644 index 0000000..a135eb4 --- /dev/null +++ b/arch/m68k/include/uapi/asm/bootinfo-vme.h @@ -0,0 +1,70 @@ +/* +** asm/bootinfo-vme.h -- VME-specific boot information definitions +*/ + +#ifndef _UAPI_ASM_M68K_BOOTINFO_VME_H +#define _UAPI_ASM_M68K_BOOTINFO_VME_H + + +#include <linux/types.h> + + + /* + * VME-specific tags + */ + +#define BI_VME_TYPE 0x8000 /* VME sub-architecture (__be32) */ +#define BI_VME_BRDINFO 0x8001 /* VME board information (struct) */ + + + /* + * VME models (BI_VME_TYPE) + */ + +#define VME_TYPE_TP34V 0x0034 /* Tadpole TP34V */ +#define VME_TYPE_MVME147 0x0147 /* Motorola MVME147 */ +#define VME_TYPE_MVME162 0x0162 /* Motorola MVME162 */ +#define VME_TYPE_MVME166 0x0166 /* Motorola MVME166 */ +#define VME_TYPE_MVME167 0x0167 /* Motorola MVME167 */ +#define VME_TYPE_MVME172 0x0172 /* Motorola MVME172 */ +#define VME_TYPE_MVME177 0x0177 /* Motorola MVME177 */ +#define VME_TYPE_BVME4000 0x4000 /* BVM Ltd. BVME4000 */ +#define VME_TYPE_BVME6000 0x6000 /* BVM Ltd. BVME6000 */ + + +#ifndef __ASSEMBLY__ + +/* + * Board ID data structure - pointer to this retrieved from Bug by head.S + * + * BI_VME_BRDINFO is a 32 byte struct as returned by the Bug code on + * Motorola VME boards. Contains board number, Bug version, board + * configuration options, etc. + * + * Note, bytes 12 and 13 are board no in BCD (0162,0166,0167,0177,etc) + */ + +typedef struct { + char bdid[4]; + __u8 rev, mth, day, yr; + __be16 size, reserved; + __be16 brdno; + char brdsuffix[2]; + __be32 options; + __be16 clun, dlun, ctype, dnum; + __be32 option2; +} t_bdid, *p_bdid; + +#endif /* __ASSEMBLY__ */ + + + /* + * Latest VME bootinfo versions + */ + +#define MVME147_BOOTI_VERSION MK_BI_VERSION(2, 0) +#define MVME16x_BOOTI_VERSION MK_BI_VERSION(2, 0) +#define BVME6000_BOOTI_VERSION MK_BI_VERSION(2, 0) + + +#endif /* _UAPI_ASM_M68K_BOOTINFO_VME_H */ diff --git a/arch/m68k/include/uapi/asm/bootinfo.h b/arch/m68k/include/uapi/asm/bootinfo.h new file mode 100644 index 0000000..cdeb26a0 --- /dev/null +++ b/arch/m68k/include/uapi/asm/bootinfo.h @@ -0,0 +1,174 @@ +/* + * asm/bootinfo.h -- Definition of the Linux/m68k boot information structure + * + * Copyright 1992 by Greg Harp + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file COPYING in the main directory of this archive + * for more details. + */ + +#ifndef _UAPI_ASM_M68K_BOOTINFO_H +#define _UAPI_ASM_M68K_BOOTINFO_H + + +#include <linux/types.h> + + +#ifndef __ASSEMBLY__ + + /* + * Bootinfo definitions + * + * This is an easily parsable and extendable structure containing all + * information to be passed from the bootstrap to the kernel. + * + * This way I hope to keep all future changes back/forewards compatible. + * Thus, keep your fingers crossed... + * + * This structure is copied right after the kernel by the bootstrap + * routine. + */ + +struct bi_record { + __be16 tag; /* tag ID */ + __be16 size; /* size of record (in bytes) */ + __be32 data[0]; /* data */ +}; + + +struct mem_info { + __be32 addr; /* physical address of memory chunk */ + __be32 size; /* length of memory chunk (in bytes) */ +}; + +#endif /* __ASSEMBLY__ */ + + + /* + * Tag Definitions + * + * Machine independent tags start counting from 0x0000 + * Machine dependent tags start counting from 0x8000 + */ + +#define BI_LAST 0x0000 /* last record (sentinel) */ +#define BI_MACHTYPE 0x0001 /* machine type (__be32) */ +#define BI_CPUTYPE 0x0002 /* cpu type (__be32) */ +#define BI_FPUTYPE 0x0003 /* fpu type (__be32) */ +#define BI_MMUTYPE 0x0004 /* mmu type (__be32) */ +#define BI_MEMCHUNK 0x0005 /* memory chunk address and size */ + /* (struct mem_info) */ +#define BI_RAMDISK 0x0006 /* ramdisk address and size */ + /* (struct mem_info) */ +#define BI_COMMAND_LINE 0x0007 /* kernel command line parameters */ + /* (string) */ + + + /* + * Linux/m68k Architectures (BI_MACHTYPE) + */ + +#define MACH_AMIGA 1 +#define MACH_ATARI 2 +#define MACH_MAC 3 +#define MACH_APOLLO 4 +#define MACH_SUN3 5 +#define MACH_MVME147 6 +#define MACH_MVME16x 7 +#define MACH_BVME6000 8 +#define MACH_HP300 9 +#define MACH_Q40 10 +#define MACH_SUN3X 11 +#define MACH_M54XX 12 + + + /* + * CPU, FPU and MMU types (BI_CPUTYPE, BI_FPUTYPE, BI_MMUTYPE) + * + * Note: we may rely on the following equalities: + * + * CPU_68020 == MMU_68851 + * CPU_68030 == MMU_68030 + * CPU_68040 == FPU_68040 == MMU_68040 + * CPU_68060 == FPU_68060 == MMU_68060 + */ + +#define CPUB_68020 0 +#define CPUB_68030 1 +#define CPUB_68040 2 +#define CPUB_68060 3 +#define CPUB_COLDFIRE 4 + +#define CPU_68020 (1 << CPUB_68020) +#define CPU_68030 (1 << CPUB_68030) +#define CPU_68040 (1 << CPUB_68040) +#define CPU_68060 (1 << CPUB_68060) +#define CPU_COLDFIRE (1 << CPUB_COLDFIRE) + +#define FPUB_68881 0 +#define FPUB_68882 1 +#define FPUB_68040 2 /* Internal FPU */ +#define FPUB_68060 3 /* Internal FPU */ +#define FPUB_SUNFPA 4 /* Sun-3 FPA */ +#define FPUB_COLDFIRE 5 /* ColdFire FPU */ + +#define FPU_68881 (1 << FPUB_68881) +#define FPU_68882 (1 << FPUB_68882) +#define FPU_68040 (1 << FPUB_68040) +#define FPU_68060 (1 << FPUB_68060) +#define FPU_SUNFPA (1 << FPUB_SUNFPA) +#define FPU_COLDFIRE (1 << FPUB_COLDFIRE) + +#define MMUB_68851 0 +#define MMUB_68030 1 /* Internal MMU */ +#define MMUB_68040 2 /* Internal MMU */ +#define MMUB_68060 3 /* Internal MMU */ +#define MMUB_APOLLO 4 /* Custom Apollo */ +#define MMUB_SUN3 5 /* Custom Sun-3 */ +#define MMUB_COLDFIRE 6 /* Internal MMU */ + +#define MMU_68851 (1 << MMUB_68851) +#define MMU_68030 (1 << MMUB_68030) +#define MMU_68040 (1 << MMUB_68040) +#define MMU_68060 (1 << MMUB_68060) +#define MMU_SUN3 (1 << MMUB_SUN3) +#define MMU_APOLLO (1 << MMUB_APOLLO) +#define MMU_COLDFIRE (1 << MMUB_COLDFIRE) + + + /* + * Stuff for bootinfo interface versioning + * + * At the start of kernel code, a 'struct bootversion' is located. + * bootstrap checks for a matching version of the interface before booting + * a kernel, to avoid user confusion if kernel and bootstrap don't work + * together :-) + * + * If incompatible changes are made to the bootinfo interface, the major + * number below should be stepped (and the minor reset to 0) for the + * appropriate machine. If a change is backward-compatible, the minor + * should be stepped. "Backwards-compatible" means that booting will work, + * but certain features may not. + */ + +#define BOOTINFOV_MAGIC 0x4249561A /* 'BIV^Z' */ +#define MK_BI_VERSION(major, minor) (((major) << 16) + (minor)) +#define BI_VERSION_MAJOR(v) (((v) >> 16) & 0xffff) +#define BI_VERSION_MINOR(v) ((v) & 0xffff) + +#ifndef __ASSEMBLY__ + +struct bootversion { + __be16 branch; + __be32 magic; + struct { + __be32 machtype; + __be32 version; + } machversions[0]; +} __packed; + +#endif /* __ASSEMBLY__ */ + + +#endif /* _UAPI_ASM_M68K_BOOTINFO_H */ diff --git a/arch/m68k/include/uapi/asm/setup.h b/arch/m68k/include/uapi/asm/setup.h index 85579bf..6a6dc63 100644 --- a/arch/m68k/include/uapi/asm/setup.h +++ b/arch/m68k/include/uapi/asm/setup.h @@ -6,98 +6,11 @@ ** This file is subject to the terms and conditions of the GNU General Public ** License. See the file COPYING in the main directory of this archive ** for more details. -** -** Created 09/29/92 by Greg Harp -** -** 5/2/94 Roman Hodek: -** Added bi_atari part of the machine dependent union bi_un; for now it -** contains just a model field to distinguish between TT and Falcon. -** 26/7/96 Roman Zippel: -** Renamed to setup.h; added some useful macros to allow gcc some -** optimizations if possible. -** 5/10/96 Geert Uytterhoeven: -** Redesign of the boot information structure; moved boot information -** structure to bootinfo.h */ #ifndef _UAPI_M68K_SETUP_H #define _UAPI_M68K_SETUP_H - - - /* - * Linux/m68k Architectures - */ - -#define MACH_AMIGA 1 -#define MACH_ATARI 2 -#define MACH_MAC 3 -#define MACH_APOLLO 4 -#define MACH_SUN3 5 -#define MACH_MVME147 6 -#define MACH_MVME16x 7 -#define MACH_BVME6000 8 -#define MACH_HP300 9 -#define MACH_Q40 10 -#define MACH_SUN3X 11 -#define MACH_M54XX 12 - #define COMMAND_LINE_SIZE 256 - - - /* - * CPU, FPU and MMU types - * - * Note: we may rely on the following equalities: - * - * CPU_68020 == MMU_68851 - * CPU_68030 == MMU_68030 - * CPU_68040 == FPU_68040 == MMU_68040 - * CPU_68060 == FPU_68060 == MMU_68060 - */ - -#define CPUB_68020 0 -#define CPUB_68030 1 -#define CPUB_68040 2 -#define CPUB_68060 3 -#define CPUB_COLDFIRE 4 - -#define CPU_68020 (1<<CPUB_68020) -#define CPU_68030 (1<<CPUB_68030) -#define CPU_68040 (1<<CPUB_68040) -#define CPU_68060 (1<<CPUB_68060) -#define CPU_COLDFIRE (1<<CPUB_COLDFIRE) - -#define FPUB_68881 0 -#define FPUB_68882 1 -#define FPUB_68040 2 /* Internal FPU */ -#define FPUB_68060 3 /* Internal FPU */ -#define FPUB_SUNFPA 4 /* Sun-3 FPA */ -#define FPUB_COLDFIRE 5 /* ColdFire FPU */ - -#define FPU_68881 (1<<FPUB_68881) -#define FPU_68882 (1<<FPUB_68882) -#define FPU_68040 (1<<FPUB_68040) -#define FPU_68060 (1<<FPUB_68060) -#define FPU_SUNFPA (1<<FPUB_SUNFPA) -#define FPU_COLDFIRE (1<<FPUB_COLDFIRE) - -#define MMUB_68851 0 -#define MMUB_68030 1 /* Internal MMU */ -#define MMUB_68040 2 /* Internal MMU */ -#define MMUB_68060 3 /* Internal MMU */ -#define MMUB_APOLLO 4 /* Custom Apollo */ -#define MMUB_SUN3 5 /* Custom Sun-3 */ -#define MMUB_COLDFIRE 6 /* Internal MMU */ - -#define MMU_68851 (1<<MMUB_68851) -#define MMU_68030 (1<<MMUB_68030) -#define MMU_68040 (1<<MMUB_68040) -#define MMU_68060 (1<<MMUB_68060) -#define MMU_SUN3 (1<<MMUB_SUN3) -#define MMU_APOLLO (1<<MMUB_APOLLO) -#define MMU_COLDFIRE (1<<MMUB_COLDFIRE) - - #endif /* _UAPI_M68K_SETUP_H */ diff --git a/arch/m68k/kernel/Makefile b/arch/m68k/kernel/Makefile index 655347d..2d5d9be 100644 --- a/arch/m68k/kernel/Makefile +++ b/arch/m68k/kernel/Makefile @@ -22,3 +22,6 @@ obj-$(CONFIG_PCI) += pcibios.o obj-$(CONFIG_HAS_DMA) += dma.o +obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o +obj-$(CONFIG_BOOTINFO_PROC) += bootinfo_proc.o + diff --git a/arch/m68k/kernel/asm-offsets.c b/arch/m68k/kernel/asm-offsets.c index 8b7b228..3a38634 100644 --- a/arch/m68k/kernel/asm-offsets.c +++ b/arch/m68k/kernel/asm-offsets.c @@ -98,6 +98,9 @@ int main(void) DEFINE(CIABBASE, &ciab); DEFINE(C_PRA, offsetof(struct CIA, pra)); DEFINE(ZTWOBASE, zTwoBase); + + /* enum m68k_fixup_type */ + DEFINE(M68K_FIXUP_MEMOFFSET, m68k_fixup_memoffset); #endif return 0; diff --git a/arch/m68k/kernel/bootinfo_proc.c b/arch/m68k/kernel/bootinfo_proc.c new file mode 100644 index 0000000..7ee853e --- /dev/null +++ b/arch/m68k/kernel/bootinfo_proc.c @@ -0,0 +1,80 @@ +/* + * Based on arch/arm/kernel/atags_proc.c + */ + +#include <linux/fs.h> +#include <linux/init.h> +#include <linux/printk.h> +#include <linux/proc_fs.h> +#include <linux/slab.h> +#include <linux/string.h> + +#include <asm/bootinfo.h> +#include <asm/byteorder.h> + + +static char bootinfo_tmp[1536] __initdata; + +static void *bootinfo_copy; +static size_t bootinfo_size; + +static ssize_t bootinfo_read(struct file *file, char __user *buf, + size_t count, loff_t *ppos) +{ + return simple_read_from_buffer(buf, count, ppos, bootinfo_copy, + bootinfo_size); +} + +static const struct file_operations bootinfo_fops = { + .read = bootinfo_read, + .llseek = default_llseek, +}; + +void __init save_bootinfo(const struct bi_record *bi) +{ + const void *start = bi; + size_t size = sizeof(bi->tag); + + while (be16_to_cpu(bi->tag) != BI_LAST) { + uint16_t n = be16_to_cpu(bi->size); + size += n; + bi = (struct bi_record *)((unsigned long)bi + n); + } + + if (size > sizeof(bootinfo_tmp)) { + pr_err("Cannot save %zu bytes of bootinfo\n", size); + return; + } + + pr_info("Saving %zu bytes of bootinfo\n", size); + memcpy(bootinfo_tmp, start, size); + bootinfo_size = size; +} + +static int __init init_bootinfo_procfs(void) +{ + /* + * This cannot go into save_bootinfo() because kmalloc and proc don't + * work yet when it is called. + */ + struct proc_dir_entry *pde; + + if (!bootinfo_size) + return -EINVAL; + + bootinfo_copy = kmalloc(bootinfo_size, GFP_KERNEL); + if (!bootinfo_copy) + return -ENOMEM; + + memcpy(bootinfo_copy, bootinfo_tmp, bootinfo_size); + + pde = proc_create_data("bootinfo", 0400, NULL, &bootinfo_fops, NULL); + if (!pde) { + kfree(bootinfo_copy); + return -ENOMEM; + } + + return 0; +} + +arch_initcall(init_bootinfo_procfs); diff --git a/arch/m68k/kernel/head.S b/arch/m68k/kernel/head.S index ac85f16..4c99bab 100644 --- a/arch/m68k/kernel/head.S +++ b/arch/m68k/kernel/head.S @@ -23,7 +23,7 @@ ** 98/04/25 Phil Blundell: added HP300 support ** 1998/08/30 David Kilzer: Added support for font_desc structures ** for linux-2.1.115 -** 9/02/11 Richard Zidlicky: added Q40 support (initial vesion 99/01/01) +** 1999/02/11 Richard Zidlicky: added Q40 support (initial version 99/01/01) ** 2004/05/13 Kars de Jong: Finalised HP300 support ** ** This file is subject to the terms and conditions of the GNU General Public @@ -257,6 +257,12 @@ #include <linux/linkage.h> #include <linux/init.h> #include <asm/bootinfo.h> +#include <asm/bootinfo-amiga.h> +#include <asm/bootinfo-atari.h> +#include <asm/bootinfo-hp300.h> +#include <asm/bootinfo-mac.h> +#include <asm/bootinfo-q40.h> +#include <asm/bootinfo-vme.h> #include <asm/setup.h> #include <asm/entry.h> #include <asm/pgtable.h> @@ -1532,7 +1538,7 @@ L(cache_done): /* * Find a tag record in the bootinfo structure - * The bootinfo structure is located right after the kernel bss + * The bootinfo structure is located right after the kernel * Returns: d0: size (-1 if not found) * a0: data pointer (end-of-records if not found) */ @@ -2909,7 +2915,9 @@ func_start serial_init,%d0/%d1/%a0/%a1 #if defined(MAC_USE_SCC_A) || defined(MAC_USE_SCC_B) movel %pc@(L(mac_sccbase)),%a0 - /* Reset SCC device */ + /* Reset SCC register pointer */ + moveb %a0@(mac_scc_cha_a_ctrl_offset),%d0 + /* Reset SCC device: write register pointer then register value */ moveb #9,%a0@(mac_scc_cha_a_ctrl_offset) moveb #0xc0,%a0@(mac_scc_cha_a_ctrl_offset) /* Wait for 5 PCLK cycles, which is about 68 CPU cycles */ @@ -3896,8 +3904,6 @@ BVME_SCC_DATA_A = 0xffb0000f #endif #if defined(CONFIG_MAC) -L(mac_booter_data): - .long 0 L(mac_videobase): .long 0 L(mac_videodepth): diff --git a/arch/m68k/kernel/machine_kexec.c b/arch/m68k/kernel/machine_kexec.c new file mode 100644 index 0000000..d4affc9 --- /dev/null +++ b/arch/m68k/kernel/machine_kexec.c @@ -0,0 +1,58 @@ +/* + * machine_kexec.c - handle transition of Linux booting another kernel + */ +#include <linux/compiler.h> +#include <linux/kexec.h> +#include <linux/mm.h> +#include <linux/delay.h> + +#include <asm/cacheflush.h> +#include <asm/page.h> +#include <asm/setup.h> + +extern const unsigned char relocate_new_kernel[]; +extern const size_t relocate_new_kernel_size; + +int machine_kexec_prepare(struct kimage *kimage) +{ + return 0; +} + +void machine_kexec_cleanup(struct kimage *kimage) +{ +} + +void machine_shutdown(void) +{ +} + +void machine_crash_shutdown(struct pt_regs *regs) +{ +} + +typedef void (*relocate_kernel_t)(unsigned long ptr, + unsigned long start, + unsigned long cpu_mmu_flags) __noreturn; + +void machine_kexec(struct kimage *image) +{ + void *reboot_code_buffer; + unsigned long cpu_mmu_flags; + + reboot_code_buffer = page_address(image->control_code_page); + + memcpy(reboot_code_buffer, relocate_new_kernel, + relocate_new_kernel_size); + + /* + * we do not want to be bothered. + */ + local_irq_disable(); + + pr_info("Will call new kernel at 0x%08lx. Bye...\n", image->start); + __flush_cache_all(); + cpu_mmu_flags = m68k_cputype | m68k_mmutype << 8; + ((relocate_kernel_t) reboot_code_buffer)(image->head & PAGE_MASK, + image->start, + cpu_mmu_flags); +} diff --git a/arch/m68k/kernel/relocate_kernel.S b/arch/m68k/kernel/relocate_kernel.S new file mode 100644 index 0000000..3e09a89 --- /dev/null +++ b/arch/m68k/kernel/relocate_kernel.S @@ -0,0 +1,159 @@ +#include <linux/linkage.h> + +#include <asm/asm-offsets.h> +#include <asm/page.h> +#include <asm/setup.h> + + +#define MMU_BASE 8 /* MMU flags base in cpu_mmu_flags */ + +.text + +ENTRY(relocate_new_kernel) + movel %sp@(4),%a0 /* a0 = ptr */ + movel %sp@(8),%a1 /* a1 = start */ + movel %sp@(12),%d1 /* d1 = cpu_mmu_flags */ + movew #PAGE_MASK,%d2 /* d2 = PAGE_MASK */ + + /* Disable MMU */ + + btst #MMU_BASE + MMUB_68851,%d1 + jeq 3f + +1: /* 68851 or 68030 */ + + lea %pc@(.Lcopy),%a4 +2: addl #0x00000000,%a4 /* virt_to_phys() */ + + .section ".m68k_fixup","aw" + .long M68K_FIXUP_MEMOFFSET, 2b+2 + .previous + + .chip 68030 + pmove %tc,%d0 /* Disable MMU */ + bclr #7,%d0 + pmove %d0,%tc + jmp %a4@ /* Jump to physical .Lcopy */ + .chip 68k + +3: + btst #MMU_BASE + MMUB_68030,%d1 + jne 1b + + btst #MMU_BASE + MMUB_68040,%d1 + jeq 6f + +4: /* 68040 or 68060 */ + + lea %pc@(.Lcont040),%a4 +5: addl #0x00000000,%a4 /* virt_to_phys() */ + + .section ".m68k_fixup","aw" + .long M68K_FIXUP_MEMOFFSET, 5b+2 + .previous + + movel %a4,%d0 + andl #0xff000000,%d0 + orw #0xe020,%d0 /* Map 16 MiB, enable, cacheable */ + .chip 68040 + movec %d0,%itt0 + movec %d0,%dtt0 + .chip 68k + jmp %a4@ /* Jump to physical .Lcont040 */ + +.Lcont040: + moveq #0,%d0 + .chip 68040 + movec %d0,%tc /* Disable MMU */ + movec %d0,%itt0 + movec %d0,%itt1 + movec %d0,%dtt0 + movec %d0,%dtt1 + .chip 68k + jra .Lcopy + +6: + btst #MMU_BASE + MMUB_68060,%d1 + jne 4b + +.Lcopy: + movel %a0@+,%d0 /* d0 = entry = *ptr */ + jeq .Lflush + + btst #2,%d0 /* entry & IND_DONE? */ + jne .Lflush + + btst #1,%d0 /* entry & IND_INDIRECTION? */ + jeq 1f + andw %d2,%d0 + movel %d0,%a0 /* ptr = entry & PAGE_MASK */ + jra .Lcopy + +1: + btst #0,%d0 /* entry & IND_DESTINATION? */ + jeq 2f + andw %d2,%d0 + movel %d0,%a2 /* a2 = dst = entry & PAGE_MASK */ + jra .Lcopy + +2: + btst #3,%d0 /* entry & IND_SOURCE? */ + jeq .Lcopy + + andw %d2,%d0 + movel %d0,%a3 /* a3 = src = entry & PAGE_MASK */ + movew #PAGE_SIZE/32 - 1,%d0 /* d0 = PAGE_SIZE/32 - 1 */ +3: + movel %a3@+,%a2@+ /* *dst++ = *src++ */ + movel %a3@+,%a2@+ /* *dst++ = *src++ */ + movel %a3@+,%a2@+ /* *dst++ = *src++ */ + movel %a3@+,%a2@+ /* *dst++ = *src++ */ + movel %a3@+,%a2@+ /* *dst++ = *src++ */ + movel %a3@+,%a2@+ /* *dst++ = *src++ */ + movel %a3@+,%a2@+ /* *dst++ = *src++ */ + movel %a3@+,%a2@+ /* *dst++ = *src++ */ + dbf %d0, 3b + jra .Lcopy + +.Lflush: + /* Flush all caches */ + + btst #CPUB_68020,%d1 + jeq 2f + +1: /* 68020 or 68030 */ + .chip 68030 + movec %cacr,%d0 + orw #0x808,%d0 + movec %d0,%cacr + .chip 68k + jra .Lreincarnate + +2: + btst #CPUB_68030,%d1 + jne 1b + + btst #CPUB_68040,%d1 + jeq 4f + +3: /* 68040 or 68060 */ + .chip 68040 + nop + cpusha %bc + nop + cinva %bc + nop + .chip 68k + jra .Lreincarnate + +4: + btst #CPUB_68060,%d1 + jne 3b + +.Lreincarnate: + jmp %a1@ + +relocate_new_kernel_end: + +ENTRY(relocate_new_kernel_size) + .long relocate_new_kernel_end - relocate_new_kernel diff --git a/arch/m68k/kernel/setup_mm.c b/arch/m68k/kernel/setup_mm.c index e67e531..5b8ec4d 100644 --- a/arch/m68k/kernel/setup_mm.c +++ b/arch/m68k/kernel/setup_mm.c @@ -26,6 +26,7 @@ #include <linux/initrd.h> #include <asm/bootinfo.h> +#include <asm/byteorder.h> #include <asm/sections.h> #include <asm/setup.h> #include <asm/fpu.h> @@ -71,12 +72,12 @@ EXPORT_SYMBOL(m68k_num_memory); int m68k_realnum_memory; EXPORT_SYMBOL(m68k_realnum_memory); unsigned long m68k_memoffset; -struct mem_info m68k_memory[NUM_MEMINFO]; +struct m68k_mem_info m68k_memory[NUM_MEMINFO]; EXPORT_SYMBOL(m68k_memory); -struct mem_info m68k_ramdisk; +static struct m68k_mem_info m68k_ramdisk __initdata; -static char m68k_command_line[CL_SIZE]; +static char m68k_command_line[CL_SIZE] __initdata; void (*mach_sched_init) (irq_handler_t handler) __initdata = NULL; /* machine dependent irq functions */ @@ -143,11 +144,16 @@ extern void paging_init(void); static void __init m68k_parse_bootinfo(const struct bi_record *record) { - while (record->tag != BI_LAST) { + uint16_t tag; + + save_bootinfo(record); + + while ((tag = be16_to_cpu(record->tag)) != BI_LAST) { int unknown = 0; - const unsigned long *data = record->data; + const void *data = record->data; + uint16_t size = be16_to_cpu(record->size); - switch (record->tag) { + switch (tag) { case BI_MACHTYPE: case BI_CPUTYPE: case BI_FPUTYPE: @@ -157,20 +163,27 @@ static void __init m68k_parse_bootinfo(const struct bi_record *record) case BI_MEMCHUNK: if (m68k_num_memory < NUM_MEMINFO) { - m68k_memory[m68k_num_memory].addr = data[0]; - m68k_memory[m68k_num_memory].size = data[1]; + const struct mem_info *m = data; + m68k_memory[m68k_num_memory].addr = + be32_to_cpu(m->addr); + m68k_memory[m68k_num_memory].size = + be32_to_cpu(m->size); m68k_num_memory++; } else - printk("m68k_parse_bootinfo: too many memory chunks\n"); + pr_warn("%s: too many memory chunks\n", + __func__); break; case BI_RAMDISK: - m68k_ramdisk.addr = data[0]; - m68k_ramdisk.size = data[1]; + { + const struct mem_info *m = data; + m68k_ramdisk.addr = be32_to_cpu(m->addr); + m68k_ramdisk.size = be32_to_cpu(m->size); + } break; case BI_COMMAND_LINE: - strlcpy(m68k_command_line, (const char *)data, + strlcpy(m68k_command_line, data, sizeof(m68k_command_line)); break; @@ -197,17 +210,16 @@ static void __init m68k_parse_bootinfo(const struct bi_record *record) unknown = 1; } if (unknown) - printk("m68k_parse_bootinfo: unknown tag 0x%04x ignored\n", - record->tag); - record = (struct bi_record *)((unsigned long)record + - record->size); + pr_warn("%s: unknown tag 0x%04x ignored\n", __func__, + tag); + record = (struct bi_record *)((unsigned long)record + size); } m68k_realnum_memory = m68k_num_memory; #ifdef CONFIG_SINGLE_MEMORY_CHUNK if (m68k_num_memory > 1) { - printk("Ignoring last %i chunks of physical memory\n", - (m68k_num_memory - 1)); + pr_warn("%s: ignoring last %i chunks of physical memory\n", + __func__, (m68k_num_memory - 1)); m68k_num_memory = 1; } #endif @@ -219,7 +231,7 @@ void __init setup_arch(char **cmdline_p) int i; #endif - /* The bootinfo is located right after the kernel bss */ + /* The bootinfo is located right after the kernel */ if (!CPU_IS_COLDFIRE) m68k_parse_bootinfo((const struct bi_record *)_end); @@ -247,7 +259,7 @@ void __init setup_arch(char **cmdline_p) asm (".chip 68060; movec %%pcr,%0; .chip 68k" : "=d" (pcr)); if (((pcr >> 8) & 0xff) <= 5) { - printk("Enabling workaround for errata I14\n"); + pr_warn("Enabling workaround for errata I14\n"); asm (".chip 68060; movec %0,%%pcr; .chip 68k" : : "d" (pcr | 0x20)); } @@ -336,12 +348,12 @@ void __init setup_arch(char **cmdline_p) panic("No configuration setup"); } + paging_init(); + #ifdef CONFIG_NATFEAT nf_init(); #endif - paging_init(); - #ifndef CONFIG_SUN3 for (i = 1; i < m68k_num_memory; i++) free_bootmem_node(NODE_DATA(i), m68k_memory[i].addr, @@ -353,7 +365,7 @@ void __init setup_arch(char **cmdline_p) BOOTMEM_DEFAULT); initrd_start = (unsigned long)phys_to_virt(m68k_ramdisk.addr); initrd_end = initrd_start + m68k_ramdisk.size; - printk("initrd: %08lx - %08lx\n", initrd_start, initrd_end); + pr_info("initrd: %08lx - %08lx\n", initrd_start, initrd_end); } #endif @@ -538,9 +550,9 @@ void check_bugs(void) { #ifndef CONFIG_M68KFPU_EMU if (m68k_fputype == 0) { - printk(KERN_EMERG "*** YOU DO NOT HAVE A FLOATING POINT UNIT, " + pr_emerg("*** YOU DO NOT HAVE A FLOATING POINT UNIT, " "WHICH IS REQUIRED BY LINUX/M68K ***\n"); - printk(KERN_EMERG "Upgrade your hardware or join the FPU " + pr_emerg("Upgrade your hardware or join the FPU " "emulation project\n"); panic("no FPU"); } diff --git a/arch/m68k/kernel/time.c b/arch/m68k/kernel/time.c index 7eb9792..958f1ad 100644 --- a/arch/m68k/kernel/time.c +++ b/arch/m68k/kernel/time.c @@ -28,6 +28,10 @@ #include <linux/timex.h> #include <linux/profile.h> + +unsigned long (*mach_random_get_entropy)(void); + + /* * timer_interrupt() needs to keep up the real-time clock, * as well as call the "xtime_update()" routine every clocktick diff --git a/arch/m68k/kernel/traps.c b/arch/m68k/kernel/traps.c index 88fcd8c..6c9ca24 100644 --- a/arch/m68k/kernel/traps.c +++ b/arch/m68k/kernel/traps.c @@ -133,9 +133,7 @@ static inline void access_error060 (struct frame *fp) { unsigned long fslw = fp->un.fmt4.pc; /* is really FSLW for access error */ -#ifdef DEBUG - printk("fslw=%#lx, fa=%#lx\n", fslw, fp->un.fmt4.effaddr); -#endif + pr_debug("fslw=%#lx, fa=%#lx\n", fslw, fp->un.fmt4.effaddr); if (fslw & MMU060_BPE) { /* branch prediction error -> clear branch cache */ @@ -162,9 +160,7 @@ static inline void access_error060 (struct frame *fp) } if (fslw & MMU060_W) errorcode |= 2; -#ifdef DEBUG - printk("errorcode = %d\n", errorcode ); -#endif + pr_debug("errorcode = %ld\n", errorcode); do_page_fault(&fp->ptregs, addr, errorcode); } else if (fslw & (MMU060_SEE)){ /* Software Emulation Error. @@ -173,8 +169,9 @@ static inline void access_error060 (struct frame *fp) send_fault_sig(&fp->ptregs); } else if (!(fslw & (MMU060_RE|MMU060_WE)) || send_fault_sig(&fp->ptregs) > 0) { - printk("pc=%#lx, fa=%#lx\n", fp->ptregs.pc, fp->un.fmt4.effaddr); - printk( "68060 access error, fslw=%lx\n", fslw ); + pr_err("pc=%#lx, fa=%#lx\n", fp->ptregs.pc, + fp->un.fmt4.effaddr); + pr_err("68060 access error, fslw=%lx\n", fslw); trap_c( fp ); } } @@ -225,9 +222,7 @@ static inline int do_040writeback1(unsigned short wbs, unsigned long wba, set_fs(old_fs); -#ifdef DEBUG - printk("do_040writeback1, res=%d\n",res); -#endif + pr_debug("do_040writeback1, res=%d\n", res); return res; } @@ -249,7 +244,7 @@ static inline void do_040writebacks(struct frame *fp) int res = 0; #if 0 if (fp->un.fmt7.wb1s & WBV_040) - printk("access_error040: cannot handle 1st writeback. oops.\n"); + pr_err("access_error040: cannot handle 1st writeback. oops.\n"); #endif if ((fp->un.fmt7.wb2s & WBV_040) && @@ -302,14 +297,12 @@ static inline void access_error040(struct frame *fp) unsigned short ssw = fp->un.fmt7.ssw; unsigned long mmusr; -#ifdef DEBUG - printk("ssw=%#x, fa=%#lx\n", ssw, fp->un.fmt7.faddr); - printk("wb1s=%#x, wb2s=%#x, wb3s=%#x\n", fp->un.fmt7.wb1s, + pr_debug("ssw=%#x, fa=%#lx\n", ssw, fp->un.fmt7.faddr); + pr_debug("wb1s=%#x, wb2s=%#x, wb3s=%#x\n", fp->un.fmt7.wb1s, fp->un.fmt7.wb2s, fp->un.fmt7.wb3s); - printk ("wb2a=%lx, wb3a=%lx, wb2d=%lx, wb3d=%lx\n", + pr_debug("wb2a=%lx, wb3a=%lx, wb2d=%lx, wb3d=%lx\n", fp->un.fmt7.wb2a, fp->un.fmt7.wb3a, fp->un.fmt7.wb2d, fp->un.fmt7.wb3d); -#endif if (ssw & ATC_040) { unsigned long addr = fp->un.fmt7.faddr; @@ -324,9 +317,7 @@ static inline void access_error040(struct frame *fp) /* MMU error, get the MMUSR info for this access */ mmusr = probe040(!(ssw & RW_040), addr, ssw); -#ifdef DEBUG - printk("mmusr = %lx\n", mmusr); -#endif + pr_debug("mmusr = %lx\n", mmusr); errorcode = 1; if (!(mmusr & MMU_R_040)) { /* clear the invalid atc entry */ @@ -340,14 +331,10 @@ static inline void access_error040(struct frame *fp) errorcode |= 2; if (do_page_fault(&fp->ptregs, addr, errorcode)) { -#ifdef DEBUG - printk("do_page_fault() !=0\n"); -#endif + pr_debug("do_page_fault() !=0\n"); if (user_mode(&fp->ptregs)){ /* delay writebacks after signal delivery */ -#ifdef DEBUG - printk(".. was usermode - return\n"); -#endif + pr_debug(".. was usermode - return\n"); return; } /* disable writeback into user space from kernel @@ -355,9 +342,7 @@ static inline void access_error040(struct frame *fp) * the writeback won't do good) */ disable_wb: -#ifdef DEBUG - printk(".. disabling wb2\n"); -#endif + pr_debug(".. disabling wb2\n"); if (fp->un.fmt7.wb2a == fp->un.fmt7.faddr) fp->un.fmt7.wb2s &= ~WBV_040; if (fp->un.fmt7.wb3a == fp->un.fmt7.faddr) @@ -371,7 +356,7 @@ disable_wb: current->thread.signo = SIGBUS; current->thread.faddr = fp->un.fmt7.faddr; if (send_fault_sig(&fp->ptregs) >= 0) - printk("68040 bus error (ssw=%x, faddr=%lx)\n", ssw, + pr_err("68040 bus error (ssw=%x, faddr=%lx)\n", ssw, fp->un.fmt7.faddr); goto disable_wb; } @@ -394,19 +379,17 @@ static inline void bus_error030 (struct frame *fp) unsigned short ssw = fp->un.fmtb.ssw; extern unsigned long _sun3_map_test_start, _sun3_map_test_end; -#ifdef DEBUG if (ssw & (FC | FB)) - printk ("Instruction fault at %#010lx\n", + pr_debug("Instruction fault at %#010lx\n", ssw & FC ? fp->ptregs.format == 0xa ? fp->ptregs.pc + 2 : fp->un.fmtb.baddr - 2 : fp->ptregs.format == 0xa ? fp->ptregs.pc + 4 : fp->un.fmtb.baddr); if (ssw & DF) - printk ("Data %s fault at %#010lx in %s (pc=%#lx)\n", + pr_debug("Data %s fault at %#010lx in %s (pc=%#lx)\n", ssw & RW ? "read" : "write", fp->un.fmtb.daddr, space_names[ssw & DFC], fp->ptregs.pc); -#endif /* * Check if this page should be demand-mapped. This needs to go before @@ -429,7 +412,7 @@ static inline void bus_error030 (struct frame *fp) return; /* instruction fault or kernel data fault! */ if (ssw & (FC | FB)) - printk ("Instruction fault at %#010lx\n", + pr_err("Instruction fault at %#010lx\n", fp->ptregs.pc); if (ssw & DF) { /* was this fault incurred testing bus mappings? */ @@ -439,12 +422,12 @@ static inline void bus_error030 (struct frame *fp) return; } - printk ("Data %s fault at %#010lx in %s (pc=%#lx)\n", + pr_err("Data %s fault at %#010lx in %s (pc=%#lx)\n", ssw & RW ? "read" : "write", fp->un.fmtb.daddr, space_names[ssw & DFC], fp->ptregs.pc); } - printk ("BAD KERNEL BUSERR\n"); + pr_err("BAD KERNEL BUSERR\n"); die_if_kernel("Oops", &fp->ptregs,0); force_sig(SIGKILL, current); @@ -473,12 +456,11 @@ static inline void bus_error030 (struct frame *fp) else if (buserr_type & SUN3_BUSERR_INVALID) errorcode = 0x00; else { -#ifdef DEBUG - printk ("*** unexpected busfault type=%#04x\n", buserr_type); - printk ("invalid %s access at %#lx from pc %#lx\n", - !(ssw & RW) ? "write" : "read", addr, - fp->ptregs.pc); -#endif + pr_debug("*** unexpected busfault type=%#04x\n", + buserr_type); + pr_debug("invalid %s access at %#lx from pc %#lx\n", + !(ssw & RW) ? "write" : "read", addr, + fp->ptregs.pc); die_if_kernel ("Oops", &fp->ptregs, buserr_type); force_sig (SIGBUS, current); return; @@ -509,9 +491,7 @@ static inline void bus_error030 (struct frame *fp) if (!mmu_emu_handle_fault(addr, 1, 0)) do_page_fault (&fp->ptregs, addr, 0); } else { -#ifdef DEBUG - printk ("protection fault on insn access (segv).\n"); -#endif + pr_debug("protection fault on insn access (segv).\n"); force_sig (SIGSEGV, current); } } @@ -525,22 +505,22 @@ static inline void bus_error030 (struct frame *fp) unsigned short ssw = fp->un.fmtb.ssw; #ifdef DEBUG unsigned long desc; +#endif - printk ("pid = %x ", current->pid); - printk ("SSW=%#06x ", ssw); + pr_debug("pid = %x ", current->pid); + pr_debug("SSW=%#06x ", ssw); if (ssw & (FC | FB)) - printk ("Instruction fault at %#010lx\n", + pr_debug("Instruction fault at %#010lx\n", ssw & FC ? fp->ptregs.format == 0xa ? fp->ptregs.pc + 2 : fp->un.fmtb.baddr - 2 : fp->ptregs.format == 0xa ? fp->ptregs.pc + 4 : fp->un.fmtb.baddr); if (ssw & DF) - printk ("Data %s fault at %#010lx in %s (pc=%#lx)\n", + pr_debug("Data %s fault at %#010lx in %s (pc=%#lx)\n", ssw & RW ? "read" : "write", fp->un.fmtb.daddr, space_names[ssw & DFC], fp->ptregs.pc); -#endif /* ++andreas: If a data fault and an instruction fault happen at the same time map in both pages. */ @@ -554,27 +534,23 @@ static inline void bus_error030 (struct frame *fp) "pmove %%psr,%1" : "=a&" (desc), "=m" (temp) : "a" (addr), "d" (ssw)); + pr_debug("mmusr is %#x for addr %#lx in task %p\n", + temp, addr, current); + pr_debug("descriptor address is 0x%p, contents %#lx\n", + __va(desc), *(unsigned long *)__va(desc)); #else asm volatile ("ptestr %2,%1@,#7\n\t" "pmove %%psr,%0" : "=m" (temp) : "a" (addr), "d" (ssw)); #endif mmusr = temp; - -#ifdef DEBUG - printk("mmusr is %#x for addr %#lx in task %p\n", - mmusr, addr, current); - printk("descriptor address is %#lx, contents %#lx\n", - __va(desc), *(unsigned long *)__va(desc)); -#endif - errorcode = (mmusr & MMU_I) ? 0 : 1; if (!(ssw & RW) || (ssw & RM)) errorcode |= 2; if (mmusr & (MMU_I | MMU_WP)) { if (ssw & 4) { - printk("Data %s fault at %#010lx in %s (pc=%#lx)\n", + pr_err("Data %s fault at %#010lx in %s (pc=%#lx)\n", ssw & RW ? "read" : "write", fp->un.fmtb.daddr, space_names[ssw & DFC], fp->ptregs.pc); @@ -587,9 +563,10 @@ static inline void bus_error030 (struct frame *fp) } else if (!(mmusr & MMU_I)) { /* probably a 020 cas fault */ if (!(ssw & RM) && send_fault_sig(&fp->ptregs) > 0) - printk("unexpected bus error (%#x,%#x)\n", ssw, mmusr); + pr_err("unexpected bus error (%#x,%#x)\n", ssw, + mmusr); } else if (mmusr & (MMU_B|MMU_L|MMU_S)) { - printk("invalid %s access at %#lx from pc %#lx\n", + pr_err("invalid %s access at %#lx from pc %#lx\n", !(ssw & RW) ? "write" : "read", addr, fp->ptregs.pc); die_if_kernel("Oops",&fp->ptregs,mmusr); @@ -600,7 +577,7 @@ static inline void bus_error030 (struct frame *fp) static volatile long tlong; #endif - printk("weird %s access at %#lx from pc %#lx (ssw is %#x)\n", + pr_err("weird %s access at %#lx from pc %#lx (ssw is %#x)\n", !(ssw & RW) ? "write" : "read", addr, fp->ptregs.pc, ssw); asm volatile ("ptestr #1,%1@,#0\n\t" @@ -609,18 +586,16 @@ static inline void bus_error030 (struct frame *fp) : "a" (addr)); mmusr = temp; - printk ("level 0 mmusr is %#x\n", mmusr); + pr_err("level 0 mmusr is %#x\n", mmusr); #if 0 asm volatile ("pmove %%tt0,%0" : "=m" (tlong)); - printk("tt0 is %#lx, ", tlong); + pr_debug("tt0 is %#lx, ", tlong); asm volatile ("pmove %%tt1,%0" : "=m" (tlong)); - printk("tt1 is %#lx\n", tlong); -#endif -#ifdef DEBUG - printk("Unknown SIGSEGV - 1\n"); + pr_debug("tt1 is %#lx\n", tlong); #endif + pr_debug("Unknown SIGSEGV - 1\n"); die_if_kernel("Oops",&fp->ptregs,mmusr); force_sig(SIGSEGV, current); return; @@ -641,10 +616,9 @@ static inline void bus_error030 (struct frame *fp) return; if (fp->ptregs.sr & PS_S) { - printk("Instruction fault at %#010lx\n", - fp->ptregs.pc); + pr_err("Instruction fault at %#010lx\n", fp->ptregs.pc); buserr: - printk ("BAD KERNEL BUSERR\n"); + pr_err("BAD KERNEL BUSERR\n"); die_if_kernel("Oops",&fp->ptregs,0); force_sig(SIGKILL, current); return; @@ -668,28 +642,22 @@ static inline void bus_error030 (struct frame *fp) "pmove %%psr,%1" : "=a&" (desc), "=m" (temp) : "a" (addr)); + pr_debug("mmusr is %#x for addr %#lx in task %p\n", + temp, addr, current); + pr_debug("descriptor address is 0x%p, contents %#lx\n", + __va(desc), *(unsigned long *)__va(desc)); #else asm volatile ("ptestr #1,%1@,#7\n\t" "pmove %%psr,%0" : "=m" (temp) : "a" (addr)); #endif mmusr = temp; - -#ifdef DEBUG - printk ("mmusr is %#x for addr %#lx in task %p\n", - mmusr, addr, current); - printk ("descriptor address is %#lx, contents %#lx\n", - __va(desc), *(unsigned long *)__va(desc)); -#endif - if (mmusr & MMU_I) do_page_fault (&fp->ptregs, addr, 0); else if (mmusr & (MMU_B|MMU_L|MMU_S)) { - printk ("invalid insn access at %#lx from pc %#lx\n", + pr_err("invalid insn access at %#lx from pc %#lx\n", addr, fp->ptregs.pc); -#ifdef DEBUG - printk("Unknown SIGSEGV - 2\n"); -#endif + pr_debug("Unknown SIGSEGV - 2\n"); die_if_kernel("Oops",&fp->ptregs,mmusr); force_sig(SIGSEGV, current); return; @@ -791,9 +759,7 @@ asmlinkage void buserr_c(struct frame *fp) if (user_mode(&fp->ptregs)) current->thread.esp0 = (unsigned long) fp; -#ifdef DEBUG - printk ("*** Bus Error *** Format is %x\n", fp->ptregs.format); -#endif + pr_debug("*** Bus Error *** Format is %x\n", fp->ptregs.format); #if defined(CONFIG_COLDFIRE) && defined(CONFIG_MMU) if (CPU_IS_COLDFIRE) { @@ -836,9 +802,7 @@ asmlinkage void buserr_c(struct frame *fp) #endif default: die_if_kernel("bad frame format",&fp->ptregs,0); -#ifdef DEBUG - printk("Unknown SIGSEGV - 4\n"); -#endif + pr_debug("Unknown SIGSEGV - 4\n"); force_sig(SIGSEGV, current); } } @@ -852,7 +816,7 @@ void show_trace(unsigned long *stack) unsigned long addr; int i; - printk("Call Trace:"); + pr_info("Call Trace:"); addr = (unsigned long)stack + THREAD_SIZE - 1; endstack = (unsigned long *)(addr & -THREAD_SIZE); i = 0; @@ -869,13 +833,13 @@ void show_trace(unsigned long *stack) if (__kernel_text_address(addr)) { #ifndef CONFIG_KALLSYMS if (i % 5 == 0) - printk("\n "); + pr_cont("\n "); #endif - printk(" [<%08lx>] %pS\n", addr, (void *)addr); + pr_cont(" [<%08lx>] %pS\n", addr, (void *)addr); i++; } } - printk("\n"); + pr_cont("\n"); } void show_registers(struct pt_regs *regs) @@ -887,81 +851,87 @@ void show_registers(struct pt_regs *regs) int i; print_modules(); - printk("PC: [<%08lx>] %pS\n", regs->pc, (void *)regs->pc); - printk("SR: %04x SP: %p a2: %08lx\n", regs->sr, regs, regs->a2); - printk("d0: %08lx d1: %08lx d2: %08lx d3: %08lx\n", + pr_info("PC: [<%08lx>] %pS\n", regs->pc, (void *)regs->pc); + pr_info("SR: %04x SP: %p a2: %08lx\n", regs->sr, regs, regs->a2); + pr_info("d0: %08lx d1: %08lx d2: %08lx d3: %08lx\n", regs->d0, regs->d1, regs->d2, regs->d3); - printk("d4: %08lx d5: %08lx a0: %08lx a1: %08lx\n", + pr_info("d4: %08lx d5: %08lx a0: %08lx a1: %08lx\n", regs->d4, regs->d5, regs->a0, regs->a1); - printk("Process %s (pid: %d, task=%p)\n", + pr_info("Process %s (pid: %d, task=%p)\n", current->comm, task_pid_nr(current), current); addr = (unsigned long)&fp->un; - printk("Frame format=%X ", regs->format); + pr_info("Frame format=%X ", regs->format); switch (regs->format) { case 0x2: - printk("instr addr=%08lx\n", fp->un.fmt2.iaddr); + pr_cont("instr addr=%08lx\n", fp->un.fmt2.iaddr); addr += sizeof(fp->un.fmt2); break; case 0x3: - printk("eff addr=%08lx\n", fp->un.fmt3.effaddr); + pr_cont("eff addr=%08lx\n", fp->un.fmt3.effaddr); addr += sizeof(fp->un.fmt3); break; case 0x4: - printk((CPU_IS_060 ? "fault addr=%08lx fslw=%08lx\n" - : "eff addr=%08lx pc=%08lx\n"), - fp->un.fmt4.effaddr, fp->un.fmt4.pc); + if (CPU_IS_060) + pr_cont("fault addr=%08lx fslw=%08lx\n", + fp->un.fmt4.effaddr, fp->un.fmt4.pc); + else + pr_cont("eff addr=%08lx pc=%08lx\n", + fp->un.fmt4.effaddr, fp->un.fmt4.pc); addr += sizeof(fp->un.fmt4); break; case 0x7: - printk("eff addr=%08lx ssw=%04x faddr=%08lx\n", + pr_cont("eff addr=%08lx ssw=%04x faddr=%08lx\n", fp->un.fmt7.effaddr, fp->un.fmt7.ssw, fp->un.fmt7.faddr); - printk("wb 1 stat/addr/data: %04x %08lx %08lx\n", + pr_info("wb 1 stat/addr/data: %04x %08lx %08lx\n", fp->un.fmt7.wb1s, fp->un.fmt7.wb1a, fp->un.fmt7.wb1dpd0); - printk("wb 2 stat/addr/data: %04x %08lx %08lx\n", + pr_info("wb 2 stat/addr/data: %04x %08lx %08lx\n", fp->un.fmt7.wb2s, fp->un.fmt7.wb2a, fp->un.fmt7.wb2d); - printk("wb 3 stat/addr/data: %04x %08lx %08lx\n", + pr_info("wb 3 stat/addr/data: %04x %08lx %08lx\n", fp->un.fmt7.wb3s, fp->un.fmt7.wb3a, fp->un.fmt7.wb3d); - printk("push data: %08lx %08lx %08lx %08lx\n", + pr_info("push data: %08lx %08lx %08lx %08lx\n", fp->un.fmt7.wb1dpd0, fp->un.fmt7.pd1, fp->un.fmt7.pd2, fp->un.fmt7.pd3); addr += sizeof(fp->un.fmt7); break; case 0x9: - printk("instr addr=%08lx\n", fp->un.fmt9.iaddr); + pr_cont("instr addr=%08lx\n", fp->un.fmt9.iaddr); addr += sizeof(fp->un.fmt9); break; case 0xa: - printk("ssw=%04x isc=%04x isb=%04x daddr=%08lx dobuf=%08lx\n", + pr_cont("ssw=%04x isc=%04x isb=%04x daddr=%08lx dobuf=%08lx\n", fp->un.fmta.ssw, fp->un.fmta.isc, fp->un.fmta.isb, fp->un.fmta.daddr, fp->un.fmta.dobuf); addr += sizeof(fp->un.fmta); break; case 0xb: - printk("ssw=%04x isc=%04x isb=%04x daddr=%08lx dobuf=%08lx\n", + pr_cont("ssw=%04x isc=%04x isb=%04x daddr=%08lx dobuf=%08lx\n", fp->un.fmtb.ssw, fp->un.fmtb.isc, fp->un.fmtb.isb, fp->un.fmtb.daddr, fp->un.fmtb.dobuf); - printk("baddr=%08lx dibuf=%08lx ver=%x\n", + pr_info("baddr=%08lx dibuf=%08lx ver=%x\n", fp->un.fmtb.baddr, fp->un.fmtb.dibuf, fp->un.fmtb.ver); addr += sizeof(fp->un.fmtb); break; default: - printk("\n"); + pr_cont("\n"); } show_stack(NULL, (unsigned long *)addr); - printk("Code:"); + pr_info("Code:"); set_fs(KERNEL_DS); cp = (u16 *)regs->pc; for (i = -8; i < 16; i++) { if (get_user(c, cp + i) && i >= 0) { - printk(" Bad PC value."); + pr_cont(" Bad PC value."); break; } - printk(i ? " %04x" : " <%04x>", c); + if (i) + pr_cont(" %04x", c); + else + pr_cont(" <%04x>", c); } set_fs(old_fs); - printk ("\n"); + pr_cont("\n"); } void show_stack(struct task_struct *task, unsigned long *stack) @@ -978,16 +948,16 @@ void show_stack(struct task_struct *task, unsigned long *stack) } endstack = (unsigned long *)(((unsigned long)stack + THREAD_SIZE - 1) & -THREAD_SIZE); - printk("Stack from %08lx:", (unsigned long)stack); + pr_info("Stack from %08lx:", (unsigned long)stack); p = stack; for (i = 0; i < kstack_depth_to_print; i++) { if (p + 1 > endstack) break; if (i % 8 == 0) - printk("\n "); - printk(" %08lx", *p++); + pr_cont("\n "); + pr_cont(" %08lx", *p++); } - printk("\n"); + pr_cont("\n"); show_trace(stack); } @@ -1005,32 +975,32 @@ void bad_super_trap (struct frame *fp) console_verbose(); if (vector < ARRAY_SIZE(vec_names)) - printk ("*** %s *** FORMAT=%X\n", + pr_err("*** %s *** FORMAT=%X\n", vec_names[vector], fp->ptregs.format); else - printk ("*** Exception %d *** FORMAT=%X\n", + pr_err("*** Exception %d *** FORMAT=%X\n", vector, fp->ptregs.format); if (vector == VEC_ADDRERR && CPU_IS_020_OR_030) { unsigned short ssw = fp->un.fmtb.ssw; - printk ("SSW=%#06x ", ssw); + pr_err("SSW=%#06x ", ssw); if (ssw & RC) - printk ("Pipe stage C instruction fault at %#010lx\n", + pr_err("Pipe stage C instruction fault at %#010lx\n", (fp->ptregs.format) == 0xA ? fp->ptregs.pc + 2 : fp->un.fmtb.baddr - 2); if (ssw & RB) - printk ("Pipe stage B instruction fault at %#010lx\n", + pr_err("Pipe stage B instruction fault at %#010lx\n", (fp->ptregs.format) == 0xA ? fp->ptregs.pc + 4 : fp->un.fmtb.baddr); if (ssw & DF) - printk ("Data %s fault at %#010lx in %s (pc=%#lx)\n", + pr_err("Data %s fault at %#010lx in %s (pc=%#lx)\n", ssw & RW ? "read" : "write", fp->un.fmtb.daddr, space_names[ssw & DFC], fp->ptregs.pc); } - printk ("Current process id is %d\n", task_pid_nr(current)); + pr_err("Current process id is %d\n", task_pid_nr(current)); die_if_kernel("BAD KERNEL TRAP", &fp->ptregs, 0); } @@ -1162,7 +1132,7 @@ void die_if_kernel (char *str, struct pt_regs *fp, int nr) return; console_verbose(); - printk("%s: %08x\n",str,nr); + pr_crit("%s: %08x\n", str, nr); show_registers(fp); add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE); do_exit(SIGSEGV); diff --git a/arch/m68k/mac/config.c b/arch/m68k/mac/config.c index afb95d5..982c3fe 100644 --- a/arch/m68k/mac/config.c +++ b/arch/m68k/mac/config.c @@ -26,9 +26,10 @@ #include <linux/adb.h> #include <linux/cuda.h> -#define BOOTINFO_COMPAT_1_0 #include <asm/setup.h> #include <asm/bootinfo.h> +#include <asm/bootinfo-mac.h> +#include <asm/byteorder.h> #include <asm/io.h> #include <asm/irq.h> @@ -107,45 +108,46 @@ static void __init mac_sched_init(irq_handler_t vector) int __init mac_parse_bootinfo(const struct bi_record *record) { int unknown = 0; - const u_long *data = record->data; + const void *data = record->data; - switch (record->tag) { + switch (be16_to_cpu(record->tag)) { case BI_MAC_MODEL: - mac_bi_data.id = *data; + mac_bi_data.id = be32_to_cpup(data); break; case BI_MAC_VADDR: - mac_bi_data.videoaddr = *data; + mac_bi_data.videoaddr = be32_to_cpup(data); break; case BI_MAC_VDEPTH: - mac_bi_data.videodepth = *data; + mac_bi_data.videodepth = be32_to_cpup(data); break; case BI_MAC_VROW: - mac_bi_data.videorow = *data; + mac_bi_data.videorow = be32_to_cpup(data); break; case BI_MAC_VDIM: - mac_bi_data.dimensions = *data; + mac_bi_data.dimensions = be32_to_cpup(data); break; case BI_MAC_VLOGICAL: - mac_bi_data.videological = VIDEOMEMBASE + (*data & ~VIDEOMEMMASK); - mac_orig_videoaddr = *data; + mac_orig_videoaddr = be32_to_cpup(data); + mac_bi_data.videological = + VIDEOMEMBASE + (mac_orig_videoaddr & ~VIDEOMEMMASK); break; case BI_MAC_SCCBASE: - mac_bi_data.sccbase = *data; + mac_bi_data.sccbase = be32_to_cpup(data); break; case BI_MAC_BTIME: - mac_bi_data.boottime = *data; + mac_bi_data.boottime = be32_to_cpup(data); break; case BI_MAC_GMTBIAS: - mac_bi_data.gmtbias = *data; + mac_bi_data.gmtbias = be32_to_cpup(data); break; case BI_MAC_MEMSIZE: - mac_bi_data.memsize = *data; + mac_bi_data.memsize = be32_to_cpup(data); break; case BI_MAC_CPUID: - mac_bi_data.cpuid = *data; + mac_bi_data.cpuid = be32_to_cpup(data); break; case BI_MAC_ROMBASE: - mac_bi_data.rombase = *data; + mac_bi_data.rombase = be32_to_cpup(data); break; default: unknown = 1; diff --git a/arch/m68k/mac/iop.c b/arch/m68k/mac/iop.c index 7d8d461..4d2adfb 100644 --- a/arch/m68k/mac/iop.c +++ b/arch/m68k/mac/iop.c @@ -111,16 +111,15 @@ #include <linux/init.h> #include <linux/interrupt.h> -#include <asm/bootinfo.h> #include <asm/macintosh.h> #include <asm/macints.h> #include <asm/mac_iop.h> /*#define DEBUG_IOP*/ -/* Set to non-zero if the IOPs are present. Set by iop_init() */ +/* Non-zero if the IOPs are present */ -int iop_scc_present,iop_ism_present; +int iop_scc_present, iop_ism_present; /* structure for tracking channel listeners */ diff --git a/arch/m68k/mac/misc.c b/arch/m68k/mac/misc.c index 5e08555..707b61a 100644 --- a/arch/m68k/mac/misc.c +++ b/arch/m68k/mac/misc.c @@ -25,8 +25,6 @@ #include <asm/mac_via.h> #include <asm/mac_oss.h> -#define BOOTINFO_COMPAT_1_0 -#include <asm/bootinfo.h> #include <asm/machdep.h> /* Offset between Unix time (1970-based) and Mac time (1904-based) */ diff --git a/arch/m68k/mac/oss.c b/arch/m68k/mac/oss.c index 6c4c882..5403712 100644 --- a/arch/m68k/mac/oss.c +++ b/arch/m68k/mac/oss.c @@ -21,7 +21,6 @@ #include <linux/init.h> #include <linux/irq.h> -#include <asm/bootinfo.h> #include <asm/macintosh.h> #include <asm/macints.h> #include <asm/mac_via.h> diff --git a/arch/m68k/mac/psc.c b/arch/m68k/mac/psc.c index 6f026fc..835fa04 100644 --- a/arch/m68k/mac/psc.c +++ b/arch/m68k/mac/psc.c @@ -21,7 +21,6 @@ #include <linux/irq.h> #include <asm/traps.h> -#include <asm/bootinfo.h> #include <asm/macintosh.h> #include <asm/macints.h> #include <asm/mac_psc.h> @@ -54,7 +53,7 @@ static void psc_debug_dump(void) * expanded to cover what I think are the other 7 channels. */ -static void psc_dma_die_die_die(void) +static __init void psc_dma_die_die_die(void) { int i; diff --git a/arch/m68k/mac/via.c b/arch/m68k/mac/via.c index 5d1458b..e198dec 100644 --- a/arch/m68k/mac/via.c +++ b/arch/m68k/mac/via.c @@ -30,7 +30,6 @@ #include <linux/module.h> #include <linux/irq.h> -#include <asm/bootinfo.h> #include <asm/macintosh.h> #include <asm/macints.h> #include <asm/mac_via.h> diff --git a/arch/m68k/mm/fault.c b/arch/m68k/mm/fault.c index eb1d61f..2bd7487 100644 --- a/arch/m68k/mm/fault.c +++ b/arch/m68k/mm/fault.c @@ -25,9 +25,8 @@ int send_fault_sig(struct pt_regs *regs) siginfo.si_signo = current->thread.signo; siginfo.si_code = current->thread.code; siginfo.si_addr = (void *)current->thread.faddr; -#ifdef DEBUG - printk("send_fault_sig: %p,%d,%d\n", siginfo.si_addr, siginfo.si_signo, siginfo.si_code); -#endif + pr_debug("send_fault_sig: %p,%d,%d\n", siginfo.si_addr, + siginfo.si_signo, siginfo.si_code); if (user_mode(regs)) { force_sig_info(siginfo.si_signo, @@ -45,10 +44,10 @@ int send_fault_sig(struct pt_regs *regs) * terminate things with extreme prejudice. */ if ((unsigned long)siginfo.si_addr < PAGE_SIZE) - printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference"); + pr_alert("Unable to handle kernel NULL pointer dereference"); else - printk(KERN_ALERT "Unable to handle kernel access"); - printk(" at virtual address %p\n", siginfo.si_addr); + pr_alert("Unable to handle kernel access"); + pr_cont(" at virtual address %p\n", siginfo.si_addr); die_if_kernel("Oops", regs, 0 /*error_code*/); do_exit(SIGKILL); } @@ -75,11 +74,8 @@ int do_page_fault(struct pt_regs *regs, unsigned long address, int fault; unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; -#ifdef DEBUG - printk ("do page fault:\nregs->sr=%#x, regs->pc=%#lx, address=%#lx, %ld, %p\n", - regs->sr, regs->pc, address, error_code, - current->mm->pgd); -#endif + pr_debug("do page fault:\nregs->sr=%#x, regs->pc=%#lx, address=%#lx, %ld, %p\n", + regs->sr, regs->pc, address, error_code, mm ? mm->pgd : NULL); /* * If we're in an interrupt or have no user @@ -118,9 +114,7 @@ retry: * we can handle it.. */ good_area: -#ifdef DEBUG - printk("do_page_fault: good_area\n"); -#endif + pr_debug("do_page_fault: good_area\n"); switch (error_code & 3) { default: /* 3: write, present */ /* fall through */ @@ -143,9 +137,7 @@ good_area: */ fault = handle_mm_fault(mm, vma, address, flags); -#ifdef DEBUG - printk("handle_mm_fault returns %d\n",fault); -#endif + pr_debug("handle_mm_fault returns %d\n", fault); if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) return 0; diff --git a/arch/m68k/mm/init.c b/arch/m68k/mm/init.c index 6b4baa6..acaff6a 100644 --- a/arch/m68k/mm/init.c +++ b/arch/m68k/mm/init.c @@ -59,7 +59,7 @@ EXPORT_SYMBOL(pg_data_table); void __init m68k_setup_node(int node) { #ifndef CONFIG_SINGLE_MEMORY_CHUNK - struct mem_info *info = m68k_memory + node; + struct m68k_mem_info *info = m68k_memory + node; int i, end; i = (unsigned long)phys_to_virt(info->addr) >> __virt_to_node_shift(); diff --git a/arch/m68k/mm/kmap.c b/arch/m68k/mm/kmap.c index 568cfad..6e4955b 100644 --- a/arch/m68k/mm/kmap.c +++ b/arch/m68k/mm/kmap.c @@ -27,9 +27,9 @@ /* * For 040/060 we can use the virtual memory area like other architectures, - * but for 020/030 we want to use early termination page descriptor and we + * but for 020/030 we want to use early termination page descriptors and we * can't mix this with normal page descriptors, so we have to copy that code - * (mm/vmalloc.c) and return appriorate aligned addresses. + * (mm/vmalloc.c) and return appropriately aligned addresses. */ #ifdef CPU_M68040_OR_M68060_ONLY @@ -224,7 +224,7 @@ void __iomem *__ioremap(unsigned long physaddr, unsigned long size, int cachefla EXPORT_SYMBOL(__ioremap); /* - * Unmap a ioremap()ed region again + * Unmap an ioremap()ed region again */ void iounmap(void __iomem *addr) { @@ -241,8 +241,8 @@ EXPORT_SYMBOL(iounmap); /* * __iounmap unmaps nearly everything, so be careful - * it doesn't free currently pointer/page tables anymore but it - * wans't used anyway and might be added later. + * Currently it doesn't free pointer/page tables anymore but this + * wasn't used anyway and might be added later. */ void __iounmap(void *addr, unsigned long size) { diff --git a/arch/m68k/mm/motorola.c b/arch/m68k/mm/motorola.c index 251c543..7d40244 100644 --- a/arch/m68k/mm/motorola.c +++ b/arch/m68k/mm/motorola.c @@ -233,7 +233,7 @@ void __init paging_init(void) printk("Fix your bootloader or use a memfile to make use of this area!\n"); m68k_num_memory--; memmove(m68k_memory + i, m68k_memory + i + 1, - (m68k_num_memory - i) * sizeof(struct mem_info)); + (m68k_num_memory - i) * sizeof(struct m68k_mem_info)); continue; } addr = m68k_memory[i].addr + m68k_memory[i].size; diff --git a/arch/m68k/mvme147/config.c b/arch/m68k/mvme147/config.c index 1c62628..1bb3ce6 100644 --- a/arch/m68k/mvme147/config.c +++ b/arch/m68k/mvme147/config.c @@ -26,6 +26,8 @@ #include <linux/interrupt.h> #include <asm/bootinfo.h> +#include <asm/bootinfo-vme.h> +#include <asm/byteorder.h> #include <asm/pgtable.h> #include <asm/setup.h> #include <asm/irq.h> @@ -51,9 +53,10 @@ static int bcd2int (unsigned char b); irq_handler_t tick_handler; -int mvme147_parse_bootinfo(const struct bi_record *bi) +int __init mvme147_parse_bootinfo(const struct bi_record *bi) { - if (bi->tag == BI_VME_TYPE || bi->tag == BI_VME_BRDINFO) + uint16_t tag = be16_to_cpu(bi->tag); + if (tag == BI_VME_TYPE || tag == BI_VME_BRDINFO) return 0; else return 1; diff --git a/arch/m68k/mvme16x/config.c b/arch/m68k/mvme16x/config.c index 080a342..eab7d34 100644 --- a/arch/m68k/mvme16x/config.c +++ b/arch/m68k/mvme16x/config.c @@ -29,6 +29,8 @@ #include <linux/module.h> #include <asm/bootinfo.h> +#include <asm/bootinfo-vme.h> +#include <asm/byteorder.h> #include <asm/pgtable.h> #include <asm/setup.h> #include <asm/irq.h> @@ -60,9 +62,10 @@ unsigned short mvme16x_config; EXPORT_SYMBOL(mvme16x_config); -int mvme16x_parse_bootinfo(const struct bi_record *bi) +int __init mvme16x_parse_bootinfo(const struct bi_record *bi) { - if (bi->tag == BI_VME_TYPE || bi->tag == BI_VME_BRDINFO) + uint16_t tag = be16_to_cpu(bi->tag); + if (tag == BI_VME_TYPE || tag == BI_VME_BRDINFO) return 0; else return 1; @@ -87,15 +90,15 @@ static void mvme16x_get_model(char *model) suf[3] = '\0'; suf[0] = suf[1] ? '-' : '\0'; - sprintf(model, "Motorola MVME%x%s", p->brdno, suf); + sprintf(model, "Motorola MVME%x%s", be16_to_cpu(p->brdno), suf); } static void mvme16x_get_hardware_list(struct seq_file *m) { - p_bdid p = &mvme_bdid; + uint16_t brdno = be16_to_cpu(mvme_bdid.brdno); - if (p->brdno == 0x0162 || p->brdno == 0x0172) + if (brdno == 0x0162 || brdno == 0x0172) { unsigned char rev = *(unsigned char *)MVME162_VERSION_REG; @@ -285,6 +288,7 @@ void __init config_mvme16x(void) { p_bdid p = &mvme_bdid; char id[40]; + uint16_t brdno = be16_to_cpu(p->brdno); mach_max_dma_address = 0xffffffff; mach_sched_init = mvme16x_sched_init; @@ -306,18 +310,18 @@ void __init config_mvme16x(void) } /* Board type is only set by newer versions of vmelilo/tftplilo */ if (vme_brdtype == 0) - vme_brdtype = p->brdno; + vme_brdtype = brdno; mvme16x_get_model(id); printk ("\nBRD_ID: %s BUG %x.%x %02x/%02x/%02x\n", id, p->rev>>4, p->rev&0xf, p->yr, p->mth, p->day); - if (p->brdno == 0x0162 || p->brdno == 0x172) + if (brdno == 0x0162 || brdno == 0x172) { unsigned char rev = *(unsigned char *)MVME162_VERSION_REG; mvme16x_config = rev | MVME16x_CONFIG_GOT_SCCA; - printk ("MVME%x Hardware status:\n", p->brdno); + printk ("MVME%x Hardware status:\n", brdno); printk (" CPU Type 68%s040\n", rev & MVME16x_CONFIG_GOT_FPU ? "" : "LC"); printk (" CPU clock %dMHz\n", @@ -347,12 +351,12 @@ void __init config_mvme16x(void) static irqreturn_t mvme16x_abort_int (int irq, void *dev_id) { - p_bdid p = &mvme_bdid; unsigned long *new = (unsigned long *)vectors; unsigned long *old = (unsigned long *)0xffe00000; volatile unsigned char uc, *ucp; + uint16_t brdno = be16_to_cpu(mvme_bdid.brdno); - if (p->brdno == 0x0162 || p->brdno == 0x172) + if (brdno == 0x0162 || brdno == 0x172) { ucp = (volatile unsigned char *)0xfff42043; uc = *ucp | 8; @@ -366,7 +370,7 @@ static irqreturn_t mvme16x_abort_int (int irq, void *dev_id) *(new+9) = *(old+9); /* Trace */ *(new+47) = *(old+47); /* Trap #15 */ - if (p->brdno == 0x0162 || p->brdno == 0x172) + if (brdno == 0x0162 || brdno == 0x172) *(new+0x5e) = *(old+0x5e); /* ABORT switch */ else *(new+0x6e) = *(old+0x6e); /* ABORT switch */ @@ -381,7 +385,7 @@ static irqreturn_t mvme16x_timer_int (int irq, void *dev_id) void mvme16x_sched_init (irq_handler_t timer_routine) { - p_bdid p = &mvme_bdid; + uint16_t brdno = be16_to_cpu(mvme_bdid.brdno); int irq; tick_handler = timer_routine; @@ -394,7 +398,7 @@ void mvme16x_sched_init (irq_handler_t timer_routine) "timer", mvme16x_timer_int)) panic ("Couldn't register timer int"); - if (p->brdno == 0x0162 || p->brdno == 0x172) + if (brdno == 0x0162 || brdno == 0x172) irq = MVME162_IRQ_ABORT; else irq = MVME167_IRQ_ABORT; diff --git a/arch/m68k/q40/config.c b/arch/m68k/q40/config.c index 078bb74..e90fe90 100644 --- a/arch/m68k/q40/config.c +++ b/arch/m68k/q40/config.c @@ -154,7 +154,7 @@ static unsigned int serports[] = 0x3f8,0x2f8,0x3e8,0x2e8,0 }; -static void q40_disable_irqs(void) +static void __init q40_disable_irqs(void) { unsigned i, j; @@ -198,7 +198,7 @@ void __init config_q40(void) } -int q40_parse_bootinfo(const struct bi_record *rec) +int __init q40_parse_bootinfo(const struct bi_record *rec) { return 1; } diff --git a/arch/m68k/sun3/dvma.c b/arch/m68k/sun3/dvma.c index d522eaa..d95506e 100644 --- a/arch/m68k/sun3/dvma.c +++ b/arch/m68k/sun3/dvma.c @@ -7,6 +7,7 @@ * */ +#include <linux/init.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/bootmem.h> @@ -62,10 +63,7 @@ int dvma_map_iommu(unsigned long kaddr, unsigned long baddr, } -void sun3_dvma_init(void) +void __init sun3_dvma_init(void) { - memset(ptelist, 0, sizeof(ptelist)); - - } diff --git a/arch/m68k/sun3/mmu_emu.c b/arch/m68k/sun3/mmu_emu.c index 8edc510..3f258e2 100644 --- a/arch/m68k/sun3/mmu_emu.c +++ b/arch/m68k/sun3/mmu_emu.c @@ -6,6 +6,7 @@ ** Started 1/16/98 @ 2:22 am */ +#include <linux/init.h> #include <linux/mman.h> #include <linux/mm.h> #include <linux/kernel.h> @@ -122,7 +123,7 @@ void print_pte_vaddr (unsigned long vaddr) /* * Initialise the MMU emulator. */ -void mmu_emu_init(unsigned long bootmem_end) +void __init mmu_emu_init(unsigned long bootmem_end) { unsigned long seg, num; int i,j; diff --git a/arch/m68k/sun3/sun3dvma.c b/arch/m68k/sun3/sun3dvma.c index cab5448..b37521a 100644 --- a/arch/m68k/sun3/sun3dvma.c +++ b/arch/m68k/sun3/sun3dvma.c @@ -6,6 +6,8 @@ * Contains common routines for sun3/sun3x DVMA management. */ +#include <linux/bootmem.h> +#include <linux/init.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/gfp.h> @@ -30,7 +32,7 @@ static inline void dvma_unmap_iommu(unsigned long a, int b) extern void sun3_dvma_init(void); #endif -static unsigned long iommu_use[IOMMU_TOTAL_ENTRIES]; +static unsigned long *iommu_use; #define dvma_index(baddr) ((baddr - DVMA_START) >> DVMA_PAGE_SHIFT) @@ -245,7 +247,7 @@ static inline int free_baddr(unsigned long baddr) } -void dvma_init(void) +void __init dvma_init(void) { struct hole *hole; @@ -265,7 +267,7 @@ void dvma_init(void) list_add(&(hole->list), &hole_list); - memset(iommu_use, 0, sizeof(iommu_use)); + iommu_use = alloc_bootmem(IOMMU_TOTAL_ENTRIES * sizeof(unsigned long)); dvma_unmap_iommu(DVMA_START, DVMA_SIZE); diff --git a/arch/m68k/sun3x/prom.c b/arch/m68k/sun3x/prom.c index a7b7e81..0898c3f 100644 --- a/arch/m68k/sun3x/prom.c +++ b/arch/m68k/sun3x/prom.c @@ -10,7 +10,6 @@ #include <asm/page.h> #include <asm/pgtable.h> -#include <asm/bootinfo.h> #include <asm/setup.h> #include <asm/traps.h> #include <asm/sun3xprom.h> diff --git a/arch/metag/include/asm/barrier.h b/arch/metag/include/asm/barrier.h index c90bfc6..5d6b4b4 100644 --- a/arch/metag/include/asm/barrier.h +++ b/arch/metag/include/asm/barrier.h @@ -82,4 +82,19 @@ static inline void fence(void) #define smp_read_barrier_depends() do { } while (0) #define set_mb(var, value) do { var = value; smp_mb(); } while (0) +#define smp_store_release(p, v) \ +do { \ + compiletime_assert_atomic_type(*p); \ + smp_mb(); \ + ACCESS_ONCE(*p) = (v); \ +} while (0) + +#define smp_load_acquire(p) \ +({ \ + typeof(*p) ___p1 = ACCESS_ONCE(*p); \ + compiletime_assert_atomic_type(*p); \ + smp_mb(); \ + ___p1; \ +}) + #endif /* _ASM_METAG_BARRIER_H */ diff --git a/arch/metag/include/asm/smp.h b/arch/metag/include/asm/smp.h index e0373f8..1d7e770 100644 --- a/arch/metag/include/asm/smp.h +++ b/arch/metag/include/asm/smp.h @@ -7,13 +7,11 @@ enum ipi_msg_type { IPI_CALL_FUNC, - IPI_CALL_FUNC_SINGLE, IPI_RESCHEDULE, }; extern void arch_send_call_function_single_ipi(int cpu); extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); -#define arch_send_call_function_ipi_mask arch_send_call_function_ipi_mask asmlinkage void secondary_start_kernel(void); diff --git a/arch/metag/kernel/dma.c b/arch/metag/kernel/dma.c index db589ad..c700d62 100644 --- a/arch/metag/kernel/dma.c +++ b/arch/metag/kernel/dma.c @@ -399,11 +399,6 @@ static int __init dma_alloc_init(void) pgd = pgd_offset(&init_mm, CONSISTENT_START); pud = pud_alloc(&init_mm, pgd, CONSISTENT_START); pmd = pmd_alloc(&init_mm, pud, CONSISTENT_START); - if (!pmd) { - pr_err("%s: no pmd tables\n", __func__); - ret = -ENOMEM; - break; - } WARN_ON(!pmd_none(*pmd)); pte = pte_alloc_kernel(pmd, CONSISTENT_START); diff --git a/arch/metag/kernel/smp.c b/arch/metag/kernel/smp.c index 7c01131..f006d22 100644 --- a/arch/metag/kernel/smp.c +++ b/arch/metag/kernel/smp.c @@ -68,7 +68,7 @@ static DECLARE_COMPLETION(cpu_running); /* * "thread" is assumed to be a valid Meta hardware thread ID. */ -int boot_secondary(unsigned int thread, struct task_struct *idle) +static int boot_secondary(unsigned int thread, struct task_struct *idle) { u32 val; @@ -491,7 +491,7 @@ void arch_send_call_function_ipi_mask(const struct cpumask *mask) void arch_send_call_function_single_ipi(int cpu) { - send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE); + send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC); } void show_ipi_list(struct seq_file *p) @@ -517,11 +517,10 @@ static DEFINE_SPINLOCK(stop_lock); * * Bit 0 - Inter-processor function call */ -static int do_IPI(struct pt_regs *regs) +static int do_IPI(void) { unsigned int cpu = smp_processor_id(); struct ipi_data *ipi = &per_cpu(ipi_data, cpu); - struct pt_regs *old_regs = set_irq_regs(regs); unsigned long msgs, nextmsg; int handled = 0; @@ -546,10 +545,6 @@ static int do_IPI(struct pt_regs *regs) generic_smp_call_function_interrupt(); break; - case IPI_CALL_FUNC_SINGLE: - generic_smp_call_function_single_interrupt(); - break; - default: pr_crit("CPU%u: Unknown IPI message 0x%lx\n", cpu, nextmsg); @@ -557,8 +552,6 @@ static int do_IPI(struct pt_regs *regs) } } - set_irq_regs(old_regs); - return handled; } @@ -624,7 +617,7 @@ static void kick_raise_softirq(cpumask_t callmap, unsigned int irq) static TBIRES ipi_handler(TBIRES State, int SigNum, int Triggers, int Inst, PTBI pTBI, int *handled) { - *handled = do_IPI((struct pt_regs *)State.Sig.pCtx); + *handled = do_IPI(); return State; } diff --git a/arch/metag/kernel/topology.c b/arch/metag/kernel/topology.c index bec3dec..4ba59570 100644 --- a/arch/metag/kernel/topology.c +++ b/arch/metag/kernel/topology.c @@ -19,6 +19,7 @@ DEFINE_PER_CPU(struct cpuinfo_metag, cpu_data); cpumask_t cpu_core_map[NR_CPUS]; +EXPORT_SYMBOL(cpu_core_map); static cpumask_t cpu_coregroup_map(unsigned int cpu) { diff --git a/arch/microblaze/include/asm/Kbuild b/arch/microblaze/include/asm/Kbuild index ce0bbf8..a824265 100644 --- a/arch/microblaze/include/asm/Kbuild +++ b/arch/microblaze/include/asm/Kbuild @@ -1,4 +1,5 @@ +generic-y += barrier.h generic-y += clkdev.h generic-y += exec.h generic-y += trace_clock.h diff --git a/arch/microblaze/include/asm/barrier.h b/arch/microblaze/include/asm/barrier.h deleted file mode 100644 index df5be3e8..0000000 --- a/arch/microblaze/include/asm/barrier.h +++ /dev/null @@ -1,27 +0,0 @@ -/* - * Copyright (C) 2006 Atmark Techno, Inc. - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - */ - -#ifndef _ASM_MICROBLAZE_BARRIER_H -#define _ASM_MICROBLAZE_BARRIER_H - -#define nop() asm volatile ("nop") - -#define smp_read_barrier_depends() do {} while (0) -#define read_barrier_depends() do {} while (0) - -#define mb() barrier() -#define rmb() mb() -#define wmb() mb() -#define set_mb(var, value) do { var = value; mb(); } while (0) -#define set_wmb(var, value) do { var = value; wmb(); } while (0) - -#define smp_mb() mb() -#define smp_rmb() rmb() -#define smp_wmb() wmb() - -#endif /* _ASM_MICROBLAZE_BARRIER_H */ diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 650de39..c93d92b 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -47,6 +47,7 @@ config MIPS select MODULES_USE_ELF_RELA if MODULES && 64BIT select CLONE_BACKWARDS select HAVE_DEBUG_STACKOVERFLOW + select HAVE_CC_STACKPROTECTOR menu "Machine selection" @@ -2322,19 +2323,6 @@ config SECCOMP If unsure, say Y. Only embedded should say N here. -config CC_STACKPROTECTOR - bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)" - help - This option turns on the -fstack-protector GCC feature. This - feature puts, at the beginning of functions, a canary value on - the stack just before the return address, and validates - the value just before actually returning. Stack based buffer - overflows (that need to overwrite this return address) now also - overwrite the canary, which gets detected and the attack is then - neutralized via a kernel panic. - - This feature requires gcc version 4.2 or above. - config USE_OF bool select OF diff --git a/arch/mips/Makefile b/arch/mips/Makefile index de300b9..efe50787 100644 --- a/arch/mips/Makefile +++ b/arch/mips/Makefile @@ -232,10 +232,6 @@ bootvars-y = VMLINUX_LOAD_ADDRESS=$(load-y) \ LDFLAGS += -m $(ld-emul) -ifdef CONFIG_CC_STACKPROTECTOR - KBUILD_CFLAGS += -fstack-protector -endif - ifdef CONFIG_MIPS CHECKFLAGS += $(shell $(CC) $(KBUILD_CFLAGS) -dM -E -x c /dev/null | \ egrep -vw '__GNUC_(|MINOR_|PATCHLEVEL_)_' | \ diff --git a/arch/mips/ar7/setup.c b/arch/mips/ar7/setup.c index 9a357ff..820b7a3 100644 --- a/arch/mips/ar7/setup.c +++ b/arch/mips/ar7/setup.c @@ -92,7 +92,6 @@ void __init plat_mem_setup(void) _machine_restart = ar7_machine_restart; _machine_halt = ar7_machine_halt; pm_power_off = ar7_machine_power_off; - panic_timeout = 3; io_base = (unsigned long)ioremap(AR7_REGS_BASE, 0x10000); if (!io_base) diff --git a/arch/mips/emma/markeins/setup.c b/arch/mips/emma/markeins/setup.c index d710058..9100122 100644 --- a/arch/mips/emma/markeins/setup.c +++ b/arch/mips/emma/markeins/setup.c @@ -111,9 +111,6 @@ void __init plat_mem_setup(void) iomem_resource.start = EMMA2RH_IO_BASE; iomem_resource.end = EMMA2RH_ROM_BASE - 1; - /* Reboot on panic */ - panic_timeout = 180; - markeins_sio_setup(); } diff --git a/arch/mips/include/asm/barrier.h b/arch/mips/include/asm/barrier.h index f26d8e1..e1aa4e4 100644 --- a/arch/mips/include/asm/barrier.h +++ b/arch/mips/include/asm/barrier.h @@ -180,4 +180,19 @@ #define nudge_writes() mb() #endif +#define smp_store_release(p, v) \ +do { \ + compiletime_assert_atomic_type(*p); \ + smp_mb(); \ + ACCESS_ONCE(*p) = (v); \ +} while (0) + +#define smp_load_acquire(p) \ +({ \ + typeof(*p) ___p1 = ACCESS_ONCE(*p); \ + compiletime_assert_atomic_type(*p); \ + smp_mb(); \ + ___p1; \ +}) + #endif /* __ASM_BARRIER_H */ diff --git a/arch/mips/include/asm/cacheops.h b/arch/mips/include/asm/cacheops.h index c75025f..06b9bc7 100644 --- a/arch/mips/include/asm/cacheops.h +++ b/arch/mips/include/asm/cacheops.h @@ -83,6 +83,6 @@ /* * Loongson2-specific cacheops */ -#define Hit_Invalidate_I_Loongson23 0x00 +#define Hit_Invalidate_I_Loongson2 0x00 #endif /* __ASM_CACHEOPS_H */ diff --git a/arch/mips/include/asm/r4kcache.h b/arch/mips/include/asm/r4kcache.h index 34d1a19..c84cadd 100644 --- a/arch/mips/include/asm/r4kcache.h +++ b/arch/mips/include/asm/r4kcache.h @@ -165,7 +165,7 @@ static inline void flush_icache_line(unsigned long addr) __iflush_prologue switch (boot_cpu_type()) { case CPU_LOONGSON2: - cache_op(Hit_Invalidate_I_Loongson23, addr); + cache_op(Hit_Invalidate_I_Loongson2, addr); break; default: @@ -219,7 +219,7 @@ static inline void protected_flush_icache_line(unsigned long addr) { switch (boot_cpu_type()) { case CPU_LOONGSON2: - protected_cache_op(Hit_Invalidate_I_Loongson23, addr); + protected_cache_op(Hit_Invalidate_I_Loongson2, addr); break; default: @@ -357,8 +357,8 @@ static inline void invalidate_tcache_page(unsigned long addr) "i" (op)); /* build blast_xxx, blast_xxx_page, blast_xxx_page_indexed */ -#define __BUILD_BLAST_CACHE(pfx, desc, indexop, hitop, lsize) \ -static inline void blast_##pfx##cache##lsize(void) \ +#define __BUILD_BLAST_CACHE(pfx, desc, indexop, hitop, lsize, extra) \ +static inline void extra##blast_##pfx##cache##lsize(void) \ { \ unsigned long start = INDEX_BASE; \ unsigned long end = start + current_cpu_data.desc.waysize; \ @@ -376,7 +376,7 @@ static inline void blast_##pfx##cache##lsize(void) \ __##pfx##flush_epilogue \ } \ \ -static inline void blast_##pfx##cache##lsize##_page(unsigned long page) \ +static inline void extra##blast_##pfx##cache##lsize##_page(unsigned long page) \ { \ unsigned long start = page; \ unsigned long end = page + PAGE_SIZE; \ @@ -391,7 +391,7 @@ static inline void blast_##pfx##cache##lsize##_page(unsigned long page) \ __##pfx##flush_epilogue \ } \ \ -static inline void blast_##pfx##cache##lsize##_page_indexed(unsigned long page) \ +static inline void extra##blast_##pfx##cache##lsize##_page_indexed(unsigned long page) \ { \ unsigned long indexmask = current_cpu_data.desc.waysize - 1; \ unsigned long start = INDEX_BASE + (page & indexmask); \ @@ -410,23 +410,24 @@ static inline void blast_##pfx##cache##lsize##_page_indexed(unsigned long page) __##pfx##flush_epilogue \ } -__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 16) -__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 16) -__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 16) -__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 32) -__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 32) -__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 32) -__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 64) -__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64) -__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 64) -__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 128) - -__BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 16) -__BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 32) -__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 16) -__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 32) -__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 64) -__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 128) +__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 16, ) +__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 16, ) +__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 16, ) +__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 32, ) +__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 32, ) +__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I_Loongson2, 32, loongson2_) +__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 32, ) +__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 64, ) +__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64, ) +__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 64, ) +__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 128, ) + +__BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 16, ) +__BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 32, ) +__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 16, ) +__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 32, ) +__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 64, ) +__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 128, ) /* build blast_xxx_range, protected_blast_xxx_range */ #define __BUILD_BLAST_CACHE_RANGE(pfx, desc, hitop, prot, extra) \ @@ -452,8 +453,8 @@ static inline void prot##extra##blast_##pfx##cache##_range(unsigned long start, __BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, protected_, ) __BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, protected_, ) __BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, protected_, ) -__BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I_Loongson23, \ - protected_, loongson23_) +__BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I_Loongson2, \ + protected_, loongson2_) __BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, , ) __BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, , ) /* blast_inv_dcache_range */ diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c index 62ffd20..49e572d 100644 --- a/arch/mips/mm/c-r4k.c +++ b/arch/mips/mm/c-r4k.c @@ -237,6 +237,8 @@ static void r4k_blast_icache_page_setup(void) r4k_blast_icache_page = (void *)cache_noop; else if (ic_lsize == 16) r4k_blast_icache_page = blast_icache16_page; + else if (ic_lsize == 32 && current_cpu_type() == CPU_LOONGSON2) + r4k_blast_icache_page = loongson2_blast_icache32_page; else if (ic_lsize == 32) r4k_blast_icache_page = blast_icache32_page; else if (ic_lsize == 64) @@ -261,6 +263,9 @@ static void r4k_blast_icache_page_indexed_setup(void) else if (TX49XX_ICACHE_INDEX_INV_WAR) r4k_blast_icache_page_indexed = tx49_blast_icache32_page_indexed; + else if (current_cpu_type() == CPU_LOONGSON2) + r4k_blast_icache_page_indexed = + loongson2_blast_icache32_page_indexed; else r4k_blast_icache_page_indexed = blast_icache32_page_indexed; @@ -284,6 +289,8 @@ static void r4k_blast_icache_setup(void) r4k_blast_icache = blast_r4600_v1_icache32; else if (TX49XX_ICACHE_INDEX_INV_WAR) r4k_blast_icache = tx49_blast_icache32; + else if (current_cpu_type() == CPU_LOONGSON2) + r4k_blast_icache = loongson2_blast_icache32; else r4k_blast_icache = blast_icache32; } else if (ic_lsize == 64) @@ -580,11 +587,11 @@ static inline void local_r4k_flush_icache_range(unsigned long start, unsigned lo else { switch (boot_cpu_type()) { case CPU_LOONGSON2: - protected_blast_icache_range(start, end); + protected_loongson2_blast_icache_range(start, end); break; default: - protected_loongson23_blast_icache_range(start, end); + protected_blast_icache_range(start, end); break; } } diff --git a/arch/mips/netlogic/xlp/setup.c b/arch/mips/netlogic/xlp/setup.c index 6d981bb..54e75c7 100644 --- a/arch/mips/netlogic/xlp/setup.c +++ b/arch/mips/netlogic/xlp/setup.c @@ -92,7 +92,6 @@ static void __init xlp_init_mem_from_bars(void) void __init plat_mem_setup(void) { - panic_timeout = 5; _machine_restart = (void (*)(char *))nlm_linux_exit; _machine_halt = nlm_linux_exit; pm_power_off = nlm_linux_exit; diff --git a/arch/mips/netlogic/xlr/setup.c b/arch/mips/netlogic/xlr/setup.c index 214d123..921be5f 100644 --- a/arch/mips/netlogic/xlr/setup.c +++ b/arch/mips/netlogic/xlr/setup.c @@ -92,7 +92,6 @@ static void nlm_linux_exit(void) void __init plat_mem_setup(void) { - panic_timeout = 5; _machine_restart = (void (*)(char *))nlm_linux_exit; _machine_halt = nlm_linux_exit; pm_power_off = nlm_linux_exit; diff --git a/arch/mips/sibyte/swarm/setup.c b/arch/mips/sibyte/swarm/setup.c index 41707a2..3462c83 100644 --- a/arch/mips/sibyte/swarm/setup.c +++ b/arch/mips/sibyte/swarm/setup.c @@ -134,8 +134,6 @@ void __init plat_mem_setup(void) #error invalid SiByte board configuration #endif - panic_timeout = 5; /* For debug. */ - board_be_handler = swarm_be_handler; if (xicor_probe()) diff --git a/arch/mn10300/include/asm/Kbuild b/arch/mn10300/include/asm/Kbuild index 74742dc..032143e 100644 --- a/arch/mn10300/include/asm/Kbuild +++ b/arch/mn10300/include/asm/Kbuild @@ -1,4 +1,5 @@ +generic-y += barrier.h generic-y += clkdev.h generic-y += exec.h generic-y += trace_clock.h diff --git a/arch/mn10300/include/asm/barrier.h b/arch/mn10300/include/asm/barrier.h deleted file mode 100644 index 2bd97a5..0000000 --- a/arch/mn10300/include/asm/barrier.h +++ /dev/null @@ -1,37 +0,0 @@ -/* MN10300 memory barrier definitions - * - * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. - * Written by David Howells (dhowells@redhat.com) - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public Licence - * as published by the Free Software Foundation; either version - * 2 of the Licence, or (at your option) any later version. - */ -#ifndef _ASM_BARRIER_H -#define _ASM_BARRIER_H - -#define nop() asm volatile ("nop") - -#define mb() asm volatile ("": : :"memory") -#define rmb() mb() -#define wmb() asm volatile ("": : :"memory") - -#ifdef CONFIG_SMP -#define smp_mb() mb() -#define smp_rmb() rmb() -#define smp_wmb() wmb() -#define set_mb(var, value) do { xchg(&var, value); } while (0) -#else /* CONFIG_SMP */ -#define smp_mb() barrier() -#define smp_rmb() barrier() -#define smp_wmb() barrier() -#define set_mb(var, value) do { var = value; mb(); } while (0) -#endif /* CONFIG_SMP */ - -#define set_wmb(var, value) do { var = value; wmb(); } while (0) - -#define read_barrier_depends() do {} while (0) -#define smp_read_barrier_depends() do {} while (0) - -#endif /* _ASM_BARRIER_H */ diff --git a/arch/parisc/configs/c3000_defconfig b/arch/parisc/configs/c3000_defconfig index ec1b014..acacd34 100644 --- a/arch/parisc/configs/c3000_defconfig +++ b/arch/parisc/configs/c3000_defconfig @@ -50,7 +50,7 @@ CONFIG_BLK_DEV_CRYPTOLOOP=m CONFIG_IDE=y CONFIG_BLK_DEV_IDECD=y CONFIG_BLK_DEV_NS87415=y -CONFIG_BLK_DEV_SIIMAGE=m +CONFIG_PATA_SIL680=m CONFIG_SCSI=y CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=y diff --git a/arch/parisc/configs/c8000_defconfig b/arch/parisc/configs/c8000_defconfig index e1c8d20..8249ac9 100644 --- a/arch/parisc/configs/c8000_defconfig +++ b/arch/parisc/configs/c8000_defconfig @@ -20,7 +20,6 @@ CONFIG_MODULE_FORCE_UNLOAD=y CONFIG_MODVERSIONS=y CONFIG_BLK_DEV_INTEGRITY=y CONFIG_PA8X00=y -CONFIG_MLONGCALLS=y CONFIG_64BIT=y CONFIG_SMP=y CONFIG_PREEMPT=y @@ -81,8 +80,6 @@ CONFIG_IDE=y CONFIG_BLK_DEV_IDECD=y CONFIG_BLK_DEV_PLATFORM=y CONFIG_BLK_DEV_GENERIC=y -CONFIG_BLK_DEV_SIIMAGE=y -CONFIG_SCSI=y CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=m CONFIG_BLK_DEV_SR=m @@ -94,6 +91,8 @@ CONFIG_SCSI_FC_ATTRS=y CONFIG_SCSI_SAS_LIBSAS=m CONFIG_ISCSI_TCP=m CONFIG_ISCSI_BOOT_SYSFS=m +CONFIG_ATA=y +CONFIG_PATA_SIL680=y CONFIG_FUSION=y CONFIG_FUSION_SPI=y CONFIG_FUSION_SAS=y @@ -114,9 +113,8 @@ CONFIG_INPUT_FF_MEMLESS=m # CONFIG_KEYBOARD_ATKBD is not set # CONFIG_KEYBOARD_HIL_OLD is not set # CONFIG_KEYBOARD_HIL is not set -CONFIG_MOUSE_PS2=m +# CONFIG_MOUSE_PS2 is not set CONFIG_INPUT_MISC=y -CONFIG_INPUT_CM109=m CONFIG_SERIO_SERPORT=m CONFIG_SERIO_PARKBD=m CONFIG_SERIO_GSCPS2=m @@ -167,34 +165,6 @@ CONFIG_SND_VERBOSE_PRINTK=y CONFIG_SND_AD1889=m # CONFIG_SND_USB is not set # CONFIG_SND_GSC is not set -CONFIG_HID_A4TECH=m -CONFIG_HID_APPLE=m -CONFIG_HID_BELKIN=m -CONFIG_HID_CHERRY=m -CONFIG_HID_CHICONY=m -CONFIG_HID_CYPRESS=m -CONFIG_HID_DRAGONRISE=m -CONFIG_HID_EZKEY=m -CONFIG_HID_KYE=m -CONFIG_HID_GYRATION=m -CONFIG_HID_TWINHAN=m -CONFIG_HID_KENSINGTON=m -CONFIG_HID_LOGITECH=m -CONFIG_HID_LOGITECH_DJ=m -CONFIG_HID_MICROSOFT=m -CONFIG_HID_MONTEREY=m -CONFIG_HID_NTRIG=m -CONFIG_HID_ORTEK=m -CONFIG_HID_PANTHERLORD=m -CONFIG_HID_PETALYNX=m -CONFIG_HID_SAMSUNG=m -CONFIG_HID_SUNPLUS=m -CONFIG_HID_GREENASIA=m -CONFIG_HID_SMARTJOYPLUS=m -CONFIG_HID_TOPSEED=m -CONFIG_HID_THRUSTMASTER=m -CONFIG_HID_ZEROPLUS=m -CONFIG_USB_HID=m CONFIG_USB=y CONFIG_USB_OHCI_HCD=y CONFIG_USB_STORAGE=y diff --git a/arch/parisc/configs/generic-64bit_defconfig b/arch/parisc/configs/generic-64bit_defconfig index 5874ceb..28c1b5d 100644 --- a/arch/parisc/configs/generic-64bit_defconfig +++ b/arch/parisc/configs/generic-64bit_defconfig @@ -24,7 +24,6 @@ CONFIG_MODVERSIONS=y CONFIG_BLK_DEV_INTEGRITY=y # CONFIG_IOSCHED_DEADLINE is not set CONFIG_PA8X00=y -CONFIG_MLONGCALLS=y CONFIG_64BIT=y CONFIG_SMP=y # CONFIG_COMPACTION is not set @@ -68,7 +67,6 @@ CONFIG_IDE_GD=m CONFIG_IDE_GD_ATAPI=y CONFIG_BLK_DEV_IDECD=m CONFIG_BLK_DEV_NS87415=y -CONFIG_BLK_DEV_SIIMAGE=y # CONFIG_SCSI_PROC_FS is not set CONFIG_BLK_DEV_SD=y CONFIG_BLK_DEV_SR=y @@ -82,6 +80,7 @@ CONFIG_SCSI_ZALON=y CONFIG_SCSI_QLA_ISCSI=m CONFIG_SCSI_DH=y CONFIG_ATA=y +CONFIG_PATA_SIL680=y CONFIG_ATA_GENERIC=y CONFIG_MD=y CONFIG_MD_LINEAR=m @@ -162,7 +161,7 @@ CONFIG_SLIP_MODE_SLIP6=y CONFIG_INPUT_EVDEV=y # CONFIG_KEYBOARD_HIL_OLD is not set # CONFIG_KEYBOARD_HIL is not set -# CONFIG_INPUT_MOUSE is not set +# CONFIG_MOUSE_PS2 is not set CONFIG_INPUT_MISC=y CONFIG_SERIO_SERPORT=m # CONFIG_HP_SDC is not set @@ -216,32 +215,7 @@ CONFIG_BACKLIGHT_LCD_SUPPORT=y CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y CONFIG_LOGO=y # CONFIG_LOGO_LINUX_MONO is not set -CONFIG_HID=m CONFIG_HIDRAW=y -CONFIG_HID_DRAGONRISE=m -CONFIG_DRAGONRISE_FF=y -CONFIG_HID_KYE=m -CONFIG_HID_GYRATION=m -CONFIG_HID_TWINHAN=m -CONFIG_LOGITECH_FF=y -CONFIG_LOGIRUMBLEPAD2_FF=y -CONFIG_HID_NTRIG=m -CONFIG_HID_PANTHERLORD=m -CONFIG_PANTHERLORD_FF=y -CONFIG_HID_PETALYNX=m -CONFIG_HID_SAMSUNG=m -CONFIG_HID_SONY=m -CONFIG_HID_SUNPLUS=m -CONFIG_HID_GREENASIA=m -CONFIG_GREENASIA_FF=y -CONFIG_HID_SMARTJOYPLUS=m -CONFIG_SMARTJOYPLUS_FF=y -CONFIG_HID_TOPSEED=m -CONFIG_HID_THRUSTMASTER=m -CONFIG_THRUSTMASTER_FF=y -CONFIG_HID_ZEROPLUS=m -CONFIG_ZEROPLUS_FF=y -CONFIG_USB_HID=m CONFIG_HID_PID=y CONFIG_USB_HIDDEV=y CONFIG_USB=y @@ -251,13 +225,8 @@ CONFIG_USB_DYNAMIC_MINORS=y CONFIG_USB_MON=m CONFIG_USB_WUSB_CBAF=m CONFIG_USB_XHCI_HCD=m -CONFIG_USB_EHCI_HCD=m -CONFIG_USB_OHCI_HCD=m -CONFIG_USB_R8A66597_HCD=m -CONFIG_USB_ACM=m -CONFIG_USB_PRINTER=m -CONFIG_USB_WDM=m -CONFIG_USB_TMC=m +CONFIG_USB_EHCI_HCD=y +CONFIG_USB_OHCI_HCD=y CONFIG_NEW_LEDS=y CONFIG_LEDS_CLASS=y CONFIG_LEDS_TRIGGERS=y diff --git a/arch/parisc/include/asm/Kbuild b/arch/parisc/include/asm/Kbuild index a603b9e..34b0be4 100644 --- a/arch/parisc/include/asm/Kbuild +++ b/arch/parisc/include/asm/Kbuild @@ -1,4 +1,5 @@ +generic-y += barrier.h generic-y += word-at-a-time.h auxvec.h user.h cputime.h emergency-restart.h \ segment.h topology.h vga.h device.h percpu.h hw_irq.h mutex.h \ div64.h irq_regs.h kdebug.h kvm_para.h local64.h local.h param.h \ diff --git a/arch/parisc/include/asm/barrier.h b/arch/parisc/include/asm/barrier.h deleted file mode 100644 index e77d834..0000000 --- a/arch/parisc/include/asm/barrier.h +++ /dev/null @@ -1,35 +0,0 @@ -#ifndef __PARISC_BARRIER_H -#define __PARISC_BARRIER_H - -/* -** This is simply the barrier() macro from linux/kernel.h but when serial.c -** uses tqueue.h uses smp_mb() defined using barrier(), linux/kernel.h -** hasn't yet been included yet so it fails, thus repeating the macro here. -** -** PA-RISC architecture allows for weakly ordered memory accesses although -** none of the processors use it. There is a strong ordered bit that is -** set in the O-bit of the page directory entry. Operating systems that -** can not tolerate out of order accesses should set this bit when mapping -** pages. The O-bit of the PSW should also be set to 1 (I don't believe any -** of the processor implemented the PSW O-bit). The PCX-W ERS states that -** the TLB O-bit is not implemented so the page directory does not need to -** have the O-bit set when mapping pages (section 3.1). This section also -** states that the PSW Y, Z, G, and O bits are not implemented. -** So it looks like nothing needs to be done for parisc-linux (yet). -** (thanks to chada for the above comment -ggg) -** -** The __asm__ op below simple prevents gcc/ld from reordering -** instructions across the mb() "call". -*/ -#define mb() __asm__ __volatile__("":::"memory") /* barrier() */ -#define rmb() mb() -#define wmb() mb() -#define smp_mb() mb() -#define smp_rmb() mb() -#define smp_wmb() mb() -#define smp_read_barrier_depends() do { } while(0) -#define read_barrier_depends() do { } while(0) - -#define set_mb(var, value) do { var = value; mb(); } while (0) - -#endif /* __PARISC_BARRIER_H */ diff --git a/arch/parisc/include/asm/cacheflush.h b/arch/parisc/include/asm/cacheflush.h index f0e2784..2f9b751 100644 --- a/arch/parisc/include/asm/cacheflush.h +++ b/arch/parisc/include/asm/cacheflush.h @@ -125,42 +125,38 @@ flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vma void mark_rodata_ro(void); #endif -#ifdef CONFIG_PA8X00 -/* Only pa8800, pa8900 needs this */ - #include <asm/kmap_types.h> #define ARCH_HAS_KMAP -void kunmap_parisc(void *addr); - static inline void *kmap(struct page *page) { might_sleep(); + flush_dcache_page(page); return page_address(page); } static inline void kunmap(struct page *page) { - kunmap_parisc(page_address(page)); + flush_kernel_dcache_page_addr(page_address(page)); } static inline void *kmap_atomic(struct page *page) { pagefault_disable(); + flush_dcache_page(page); return page_address(page); } static inline void __kunmap_atomic(void *addr) { - kunmap_parisc(addr); + flush_kernel_dcache_page_addr(addr); pagefault_enable(); } #define kmap_atomic_prot(page, prot) kmap_atomic(page) #define kmap_atomic_pfn(pfn) kmap_atomic(pfn_to_page(pfn)) #define kmap_atomic_to_page(ptr) virt_to_page(ptr) -#endif #endif /* _PARISC_CACHEFLUSH_H */ diff --git a/arch/parisc/include/asm/page.h b/arch/parisc/include/asm/page.h index b7adb2a..c53fc63 100644 --- a/arch/parisc/include/asm/page.h +++ b/arch/parisc/include/asm/page.h @@ -28,9 +28,8 @@ struct page; void clear_page_asm(void *page); void copy_page_asm(void *to, void *from); -void clear_user_page(void *vto, unsigned long vaddr, struct page *pg); -void copy_user_page(void *vto, void *vfrom, unsigned long vaddr, - struct page *pg); +#define clear_user_page(vto, vaddr, page) clear_page_asm(vto) +#define copy_user_page(vto, vfrom, vaddr, page) copy_page_asm(vto, vfrom) /* #define CONFIG_PARISC_TMPALIAS */ diff --git a/arch/parisc/include/asm/serial.h b/arch/parisc/include/asm/serial.h index d7e3cc6..77e9b67 100644 --- a/arch/parisc/include/asm/serial.h +++ b/arch/parisc/include/asm/serial.h @@ -6,5 +6,3 @@ * This is used for 16550-compatible UARTs */ #define BASE_BAUD ( 1843200 / 16 ) - -#define SERIAL_PORT_DFNS diff --git a/arch/parisc/include/uapi/asm/socket.h b/arch/parisc/include/uapi/asm/socket.h index f33113a..70b3674 100644 --- a/arch/parisc/include/uapi/asm/socket.h +++ b/arch/parisc/include/uapi/asm/socket.h @@ -75,6 +75,6 @@ #define SO_BUSY_POLL 0x4027 -#define SO_MAX_PACING_RATE 0x4048 +#define SO_MAX_PACING_RATE 0x4028 #endif /* _UAPI_ASM_SOCKET_H */ diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c index c035673..a725455 100644 --- a/arch/parisc/kernel/cache.c +++ b/arch/parisc/kernel/cache.c @@ -388,41 +388,6 @@ void flush_kernel_dcache_page_addr(void *addr) } EXPORT_SYMBOL(flush_kernel_dcache_page_addr); -void clear_user_page(void *vto, unsigned long vaddr, struct page *page) -{ - clear_page_asm(vto); - if (!parisc_requires_coherency()) - flush_kernel_dcache_page_asm(vto); -} -EXPORT_SYMBOL(clear_user_page); - -void copy_user_page(void *vto, void *vfrom, unsigned long vaddr, - struct page *pg) -{ - /* Copy using kernel mapping. No coherency is needed - (all in kmap/kunmap) on machines that don't support - non-equivalent aliasing. However, the `from' page - needs to be flushed before it can be accessed through - the kernel mapping. */ - preempt_disable(); - flush_dcache_page_asm(__pa(vfrom), vaddr); - preempt_enable(); - copy_page_asm(vto, vfrom); - if (!parisc_requires_coherency()) - flush_kernel_dcache_page_asm(vto); -} -EXPORT_SYMBOL(copy_user_page); - -#ifdef CONFIG_PA8X00 - -void kunmap_parisc(void *addr) -{ - if (parisc_requires_coherency()) - flush_kernel_dcache_page_addr(addr); -} -EXPORT_SYMBOL(kunmap_parisc); -#endif - void purge_tlb_entries(struct mm_struct *mm, unsigned long addr) { unsigned long flags; diff --git a/arch/parisc/kernel/hardware.c b/arch/parisc/kernel/hardware.c index 06cb399..608716f 100644 --- a/arch/parisc/kernel/hardware.c +++ b/arch/parisc/kernel/hardware.c @@ -36,6 +36,9 @@ * HP PARISC Hardware Database * Access to this database is only possible during bootup * so don't reference this table after starting the init process + * + * NOTE: Product names which are listed here and ends with a '?' + * are guessed. If you know the correct name, please let us know. */ static struct hp_hardware hp_hardware_list[] = { @@ -222,7 +225,7 @@ static struct hp_hardware hp_hardware_list[] = { {HPHW_NPROC,0x5DD,0x4,0x81,"Duet W2"}, {HPHW_NPROC,0x5DE,0x4,0x81,"Piccolo W+"}, {HPHW_NPROC,0x5DF,0x4,0x81,"Cantata W2"}, - {HPHW_NPROC,0x5DF,0x0,0x00,"Marcato W+? (rp5470)"}, + {HPHW_NPROC,0x5DF,0x0,0x00,"Marcato W+ (rp5470)?"}, {HPHW_NPROC,0x5E0,0x4,0x91,"Cantata DC- W2"}, {HPHW_NPROC,0x5E1,0x4,0x91,"Crescendo DC- W2"}, {HPHW_NPROC,0x5E2,0x4,0x91,"Crescendo 650 W2"}, @@ -276,9 +279,11 @@ static struct hp_hardware hp_hardware_list[] = { {HPHW_NPROC,0x888,0x4,0x91,"Storm Peak Fast DC-"}, {HPHW_NPROC,0x889,0x4,0x91,"Storm Peak Fast"}, {HPHW_NPROC,0x88A,0x4,0x91,"Crestone Peak Slow"}, + {HPHW_NPROC,0x88B,0x4,0x91,"Crestone Peak Fast?"}, {HPHW_NPROC,0x88C,0x4,0x91,"Orca Mako+"}, {HPHW_NPROC,0x88D,0x4,0x91,"Rainier/Medel Mako+ Slow"}, {HPHW_NPROC,0x88E,0x4,0x91,"Rainier/Medel Mako+ Fast"}, + {HPHW_NPROC,0x892,0x4,0x91,"Mt. Hamilton Slow Mako+?"}, {HPHW_NPROC,0x894,0x4,0x91,"Mt. Hamilton Fast Mako+"}, {HPHW_NPROC,0x895,0x4,0x91,"Storm Peak Slow Mako+"}, {HPHW_NPROC,0x896,0x4,0x91,"Storm Peak Fast Mako+"}, diff --git a/arch/parisc/kernel/head.S b/arch/parisc/kernel/head.S index d2d5825..d4dc588 100644 --- a/arch/parisc/kernel/head.S +++ b/arch/parisc/kernel/head.S @@ -41,9 +41,7 @@ END(boot_args) .import fault_vector_11,code /* IVA parisc 1.1 32 bit */ .import $global$ /* forward declaration */ #endif /*!CONFIG_64BIT*/ - .export _stext,data /* Kernel want it this way! */ -_stext: -ENTRY(stext) +ENTRY(parisc_kernel_start) .proc .callinfo @@ -347,7 +345,7 @@ smp_slave_stext: .procend #endif /* CONFIG_SMP */ -ENDPROC(stext) +ENDPROC(parisc_kernel_start) #ifndef CONFIG_64BIT .section .data..read_mostly diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c index 5dfd248..0d3a9d4 100644 --- a/arch/parisc/kernel/sys_parisc.c +++ b/arch/parisc/kernel/sys_parisc.c @@ -61,8 +61,15 @@ static int get_offset(struct address_space *mapping) return (unsigned long) mapping >> 8; } -static unsigned long get_shared_area(struct address_space *mapping, - unsigned long addr, unsigned long len, unsigned long pgoff) +static unsigned long shared_align_offset(struct file *filp, unsigned long pgoff) +{ + struct address_space *mapping = filp ? filp->f_mapping : NULL; + + return (get_offset(mapping) + pgoff) << PAGE_SHIFT; +} + +static unsigned long get_shared_area(struct file *filp, unsigned long addr, + unsigned long len, unsigned long pgoff) { struct vm_unmapped_area_info info; @@ -71,7 +78,7 @@ static unsigned long get_shared_area(struct address_space *mapping, info.low_limit = PAGE_ALIGN(addr); info.high_limit = TASK_SIZE; info.align_mask = PAGE_MASK & (SHMLBA - 1); - info.align_offset = (get_offset(mapping) + pgoff) << PAGE_SHIFT; + info.align_offset = shared_align_offset(filp, pgoff); return vm_unmapped_area(&info); } @@ -82,20 +89,18 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, return -ENOMEM; if (flags & MAP_FIXED) { if ((flags & MAP_SHARED) && - (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)) + (addr - shared_align_offset(filp, pgoff)) & (SHMLBA - 1)) return -EINVAL; return addr; } if (!addr) addr = TASK_UNMAPPED_BASE; - if (filp) { - addr = get_shared_area(filp->f_mapping, addr, len, pgoff); - } else if(flags & MAP_SHARED) { - addr = get_shared_area(NULL, addr, len, pgoff); - } else { + if (filp || (flags & MAP_SHARED)) + addr = get_shared_area(filp, addr, len, pgoff); + else addr = get_unshared_area(addr, len); - } + return addr; } diff --git a/arch/parisc/kernel/unwind.c b/arch/parisc/kernel/unwind.c index 76ed62e..ddd988b 100644 --- a/arch/parisc/kernel/unwind.c +++ b/arch/parisc/kernel/unwind.c @@ -168,7 +168,7 @@ void unwind_table_remove(struct unwind_table *table) } /* Called from setup_arch to import the kernel unwind info */ -int unwind_init(void) +int __init unwind_init(void) { long start, stop; register unsigned long gp __asm__ ("r27"); @@ -233,7 +233,6 @@ static void unwind_frame_regs(struct unwind_frame_info *info) e = find_unwind_entry(info->ip); if (e == NULL) { unsigned long sp; - extern char _stext[], _etext[]; dbg("Cannot find unwind entry for 0x%lx; forced unwinding\n", info->ip); @@ -281,8 +280,7 @@ static void unwind_frame_regs(struct unwind_frame_info *info) break; info->prev_ip = tmp; sp = info->prev_sp; - } while (info->prev_ip < (unsigned long)_stext || - info->prev_ip > (unsigned long)_etext); + } while (!kernel_text_address(info->prev_ip)); info->rp = 0; @@ -435,9 +433,8 @@ unsigned long return_address(unsigned int level) do { if (unwind_once(&info) < 0 || info.ip == 0) return 0; - if (!__kernel_text_address(info.ip)) { + if (!kernel_text_address(info.ip)) return 0; - } } while (info.ip && level--); return info.ip; diff --git a/arch/parisc/kernel/vmlinux.lds.S b/arch/parisc/kernel/vmlinux.lds.S index 4bb095a..0dacc5c 100644 --- a/arch/parisc/kernel/vmlinux.lds.S +++ b/arch/parisc/kernel/vmlinux.lds.S @@ -6,24 +6,19 @@ * Copyright (C) 2000 Michael Ang <mang with subcarrier.org> * Copyright (C) 2002 Randolph Chung <tausq with parisc-linux.org> * Copyright (C) 2003 James Bottomley <jejb with parisc-linux.org> - * Copyright (C) 2006 Helge Deller <deller@gmx.de> - * - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * Copyright (C) 2006-2013 Helge Deller <deller@gmx.de> + */ + +/* + * Put page table entries (swapper_pg_dir) as the first thing in .bss. This + * will ensure that it has .bss alignment (PAGE_SIZE). */ +#define BSS_FIRST_SECTIONS *(.data..vm0.pmd) \ + *(.data..vm0.pgd) \ + *(.data..vm0.pte) + #include <asm-generic/vmlinux.lds.h> + /* needed for the processor specific cache alignment size */ #include <asm/cache.h> #include <asm/page.h> @@ -39,7 +34,7 @@ OUTPUT_FORMAT("elf64-hppa-linux") OUTPUT_ARCH(hppa:hppa2.0w) #endif -ENTRY(_stext) +ENTRY(parisc_kernel_start) #ifndef CONFIG_64BIT jiffies = jiffies_64 + 4; #else @@ -49,11 +44,29 @@ SECTIONS { . = KERNEL_BINARY_TEXT_START; + __init_begin = .; + HEAD_TEXT_SECTION + INIT_TEXT_SECTION(8) + + . = ALIGN(PAGE_SIZE); + INIT_DATA_SECTION(PAGE_SIZE) + /* we have to discard exit text and such at runtime, not link time */ + .exit.text : + { + EXIT_TEXT + } + .exit.data : + { + EXIT_DATA + } + PERCPU_SECTION(8) + . = ALIGN(PAGE_SIZE); + __init_end = .; + /* freed after init ends here */ + _text = .; /* Text and read-only data */ - .head ALIGN(16) : { - HEAD_TEXT - } = 0 - .text ALIGN(16) : { + _stext = .; + .text ALIGN(PAGE_SIZE) : { TEXT_TEXT SCHED_TEXT LOCK_TEXT @@ -68,21 +81,28 @@ SECTIONS *(.lock.text) /* out-of-line lock text */ *(.gnu.warning) } - /* End of text section */ + . = ALIGN(PAGE_SIZE); _etext = .; + /* End of text section */ /* Start of data section */ _sdata = .; - RODATA + RO_DATA_SECTION(8) - /* writeable */ - /* Make sure this is page aligned so - * that we can properly leave these - * as writable - */ - . = ALIGN(PAGE_SIZE); - data_start = .; +#ifdef CONFIG_64BIT + . = ALIGN(16); + /* Linkage tables */ + .opd : { + *(.opd) + } PROVIDE (__gp = .); + .plt : { + *(.plt) + } + .dlt : { + *(.dlt) + } +#endif /* unwind info */ .PARISC.unwind : { @@ -91,7 +111,15 @@ SECTIONS __stop___unwind = .; } - EXCEPTION_TABLE(16) + /* writeable */ + /* Make sure this is page aligned so + * that we can properly leave these + * as writable + */ + . = ALIGN(PAGE_SIZE); + data_start = .; + + EXCEPTION_TABLE(8) NOTES /* Data */ @@ -107,54 +135,8 @@ SECTIONS _edata = .; /* BSS */ - __bss_start = .; - /* page table entries need to be PAGE_SIZE aligned */ - . = ALIGN(PAGE_SIZE); - .data..vmpages : { - *(.data..vm0.pmd) - *(.data..vm0.pgd) - *(.data..vm0.pte) - } - .bss : { - *(.bss) - *(COMMON) - } - __bss_stop = .; - -#ifdef CONFIG_64BIT - . = ALIGN(16); - /* Linkage tables */ - .opd : { - *(.opd) - } PROVIDE (__gp = .); - .plt : { - *(.plt) - } - .dlt : { - *(.dlt) - } -#endif + BSS_SECTION(PAGE_SIZE, PAGE_SIZE, 8) - /* reserve space for interrupt stack by aligning __init* to 16k */ - . = ALIGN(16384); - __init_begin = .; - INIT_TEXT_SECTION(16384) - . = ALIGN(PAGE_SIZE); - INIT_DATA_SECTION(16) - /* we have to discard exit text and such at runtime, not link time */ - .exit.text : - { - EXIT_TEXT - } - .exit.data : - { - EXIT_DATA - } - - PERCPU_SECTION(L1_CACHE_BYTES) - . = ALIGN(PAGE_SIZE); - __init_end = .; - /* freed after init ends here */ _end = . ; STABS_DEBUG diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c index b0f96c0..96f8168 100644 --- a/arch/parisc/mm/init.c +++ b/arch/parisc/mm/init.c @@ -32,6 +32,7 @@ #include <asm/sections.h> extern int data_start; +extern void parisc_kernel_start(void); /* Kernel entry point in head.S */ #if PT_NLEVELS == 3 /* NOTE: This layout exactly conforms to the hybrid L2/L3 page table layout @@ -324,8 +325,9 @@ static void __init setup_bootmem(void) reserve_bootmem_node(NODE_DATA(0), 0UL, (unsigned long)(PAGE0->mem_free + PDC_CONSOLE_IO_IODC_SIZE), BOOTMEM_DEFAULT); - reserve_bootmem_node(NODE_DATA(0), __pa((unsigned long)_text), - (unsigned long)(_end - _text), BOOTMEM_DEFAULT); + reserve_bootmem_node(NODE_DATA(0), __pa(KERNEL_BINARY_TEXT_START), + (unsigned long)(_end - KERNEL_BINARY_TEXT_START), + BOOTMEM_DEFAULT); reserve_bootmem_node(NODE_DATA(0), (bootmap_start_pfn << PAGE_SHIFT), ((bootmap_pfn - bootmap_start_pfn) << PAGE_SHIFT), BOOTMEM_DEFAULT); @@ -378,6 +380,17 @@ static void __init setup_bootmem(void) request_resource(&sysram_resources[0], &pdcdata_resource); } +static int __init parisc_text_address(unsigned long vaddr) +{ + static unsigned long head_ptr __initdata; + + if (!head_ptr) + head_ptr = PAGE_MASK & (unsigned long) + dereference_function_descriptor(&parisc_kernel_start); + + return core_kernel_text(vaddr) || vaddr == head_ptr; +} + static void __init map_pages(unsigned long start_vaddr, unsigned long start_paddr, unsigned long size, pgprot_t pgprot, int force) @@ -466,7 +479,7 @@ static void __init map_pages(unsigned long start_vaddr, */ if (force) pte = __mk_pte(address, pgprot); - else if (core_kernel_text(vaddr) && + else if (parisc_text_address(vaddr) && address != fv_addr) pte = __mk_pte(address, PAGE_KERNEL_EXEC); else diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index b44b52c..b2be8e8 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -147,6 +147,10 @@ config EARLY_PRINTK bool default y +config PANIC_TIMEOUT + int + default 180 + config COMPAT bool default y if PPC64 diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile index 8a24636..0f4344e 100644 --- a/arch/powerpc/Makefile +++ b/arch/powerpc/Makefile @@ -75,8 +75,10 @@ LDEMULATION := lppc GNUTARGET := powerpcle MULTIPLEWORD := -mno-multiple else +ifeq ($(call cc-option-yn,-mbig-endian),y) override CC += -mbig-endian override AS += -mbig-endian +endif override LD += -EB LDEMULATION := ppc GNUTARGET := powerpc @@ -128,7 +130,12 @@ CFLAGS-$(CONFIG_POWER5_CPU) += $(call cc-option,-mcpu=power5) CFLAGS-$(CONFIG_POWER6_CPU) += $(call cc-option,-mcpu=power6) CFLAGS-$(CONFIG_POWER7_CPU) += $(call cc-option,-mcpu=power7) +# Altivec option not allowed with e500mc64 in GCC. +ifeq ($(CONFIG_ALTIVEC),y) +E5500_CPU := -mcpu=powerpc64 +else E5500_CPU := $(call cc-option,-mcpu=e500mc64,-mcpu=powerpc64) +endif CFLAGS-$(CONFIG_E5500_CPU) += $(E5500_CPU) CFLAGS-$(CONFIG_E6500_CPU) += $(call cc-option,-mcpu=e6500,$(E5500_CPU)) diff --git a/arch/powerpc/boot/dts/mpc5121.dtsi b/arch/powerpc/boot/dts/mpc5121.dtsi index bd14c00..2d7cb04 100644 --- a/arch/powerpc/boot/dts/mpc5121.dtsi +++ b/arch/powerpc/boot/dts/mpc5121.dtsi @@ -77,7 +77,6 @@ compatible = "fsl,mpc5121-immr"; #address-cells = <1>; #size-cells = <1>; - #interrupt-cells = <2>; ranges = <0x0 0x80000000 0x400000>; reg = <0x80000000 0x400000>; bus-frequency = <66000000>; /* 66 MHz ips bus */ diff --git a/arch/powerpc/boot/dts/mpc5125twr.dts b/arch/powerpc/boot/dts/mpc5125twr.dts index 4177b62..a618dfc 100644 --- a/arch/powerpc/boot/dts/mpc5125twr.dts +++ b/arch/powerpc/boot/dts/mpc5125twr.dts @@ -58,7 +58,6 @@ compatible = "fsl,mpc5121-immr"; #address-cells = <1>; #size-cells = <1>; - #interrupt-cells = <2>; ranges = <0x0 0x80000000 0x400000>; reg = <0x80000000 0x400000>; bus-frequency = <66000000>; // 66 MHz ips bus @@ -189,6 +188,10 @@ reg = <0xA000 0x1000>; }; + // disable USB1 port + // TODO: + // correct pinmux config and fix USB3320 ulpi dependency + // before re-enabling it usb@3000 { compatible = "fsl,mpc5121-usb2-dr"; reg = <0x3000 0x400>; @@ -197,6 +200,7 @@ interrupts = <43 0x8>; dr_mode = "host"; phy_type = "ulpi"; + status = "disabled"; }; // 5125 PSCs are not 52xx or 5121 PSC compatible diff --git a/arch/powerpc/boot/dts/xcalibur1501.dts b/arch/powerpc/boot/dts/xcalibur1501.dts index cc00f4d..c409cba 100644 --- a/arch/powerpc/boot/dts/xcalibur1501.dts +++ b/arch/powerpc/boot/dts/xcalibur1501.dts @@ -637,14 +637,14 @@ tlu@2f000 { compatible = "fsl,mpc8572-tlu", "fsl_tlu"; reg = <0x2f000 0x1000>; - interupts = <61 2 >; + interrupts = <61 2>; interrupt-parent = <&mpic>; }; tlu@15000 { compatible = "fsl,mpc8572-tlu", "fsl_tlu"; reg = <0x15000 0x1000>; - interupts = <75 2>; + interrupts = <75 2>; interrupt-parent = <&mpic>; }; }; diff --git a/arch/powerpc/boot/dts/xpedite5301.dts b/arch/powerpc/boot/dts/xpedite5301.dts index 53c1c6a..04cb410 100644 --- a/arch/powerpc/boot/dts/xpedite5301.dts +++ b/arch/powerpc/boot/dts/xpedite5301.dts @@ -547,14 +547,14 @@ tlu@2f000 { compatible = "fsl,mpc8572-tlu", "fsl_tlu"; reg = <0x2f000 0x1000>; - interupts = <61 2 >; + interrupts = <61 2>; interrupt-parent = <&mpic>; }; tlu@15000 { compatible = "fsl,mpc8572-tlu", "fsl_tlu"; reg = <0x15000 0x1000>; - interupts = <75 2>; + interrupts = <75 2>; interrupt-parent = <&mpic>; }; }; diff --git a/arch/powerpc/boot/dts/xpedite5330.dts b/arch/powerpc/boot/dts/xpedite5330.dts index 2152259..73f8620 100644 --- a/arch/powerpc/boot/dts/xpedite5330.dts +++ b/arch/powerpc/boot/dts/xpedite5330.dts @@ -583,14 +583,14 @@ tlu@2f000 { compatible = "fsl,mpc8572-tlu", "fsl_tlu"; reg = <0x2f000 0x1000>; - interupts = <61 2 >; + interrupts = <61 2>; interrupt-parent = <&mpic>; }; tlu@15000 { compatible = "fsl,mpc8572-tlu", "fsl_tlu"; reg = <0x15000 0x1000>; - interupts = <75 2>; + interrupts = <75 2>; interrupt-parent = <&mpic>; }; }; diff --git a/arch/powerpc/boot/dts/xpedite5370.dts b/arch/powerpc/boot/dts/xpedite5370.dts index 11dbda1..cd0ea2b 100644 --- a/arch/powerpc/boot/dts/xpedite5370.dts +++ b/arch/powerpc/boot/dts/xpedite5370.dts @@ -545,14 +545,14 @@ tlu@2f000 { compatible = "fsl,mpc8572-tlu", "fsl_tlu"; reg = <0x2f000 0x1000>; - interupts = <61 2 >; + interrupts = <61 2>; interrupt-parent = <&mpic>; }; tlu@15000 { compatible = "fsl,mpc8572-tlu", "fsl_tlu"; reg = <0x15000 0x1000>; - interupts = <75 2>; + interrupts = <75 2>; interrupt-parent = <&mpic>; }; }; diff --git a/arch/powerpc/boot/util.S b/arch/powerpc/boot/util.S index 5143228..6636b1d 100644 --- a/arch/powerpc/boot/util.S +++ b/arch/powerpc/boot/util.S @@ -71,18 +71,32 @@ udelay: add r4,r4,r5 addi r4,r4,-1 divw r4,r4,r5 /* BUS ticks */ +#ifdef CONFIG_8xx +1: mftbu r5 + mftb r6 + mftbu r7 +#else 1: mfspr r5, SPRN_TBRU mfspr r6, SPRN_TBRL mfspr r7, SPRN_TBRU +#endif cmpw 0,r5,r7 bne 1b /* Get [synced] base time */ addc r9,r6,r4 /* Compute end time */ addze r8,r5 +#ifdef CONFIG_8xx +2: mftbu r5 +#else 2: mfspr r5, SPRN_TBRU +#endif cmpw 0,r5,r8 blt 2b bgt 3f +#ifdef CONFIG_8xx + mftb r6 +#else mfspr r6, SPRN_TBRL +#endif cmpw 0,r6,r9 blt 2b 3: blr diff --git a/arch/powerpc/configs/52xx/cm5200_defconfig b/arch/powerpc/configs/52xx/cm5200_defconfig index 69b57da..0b88c7b 100644 --- a/arch/powerpc/configs/52xx/cm5200_defconfig +++ b/arch/powerpc/configs/52xx/cm5200_defconfig @@ -12,7 +12,6 @@ CONFIG_EXPERT=y CONFIG_PPC_MPC52xx=y CONFIG_PPC_MPC5200_SIMPLE=y # CONFIG_PPC_PMAC is not set -CONFIG_PPC_BESTCOMM=y CONFIG_SPARSE_IRQ=y CONFIG_PM=y # CONFIG_PCI is not set @@ -71,6 +70,8 @@ CONFIG_USB_DEVICEFS=y CONFIG_USB_OHCI_HCD=y CONFIG_USB_OHCI_HCD_PPC_OF_BE=y CONFIG_USB_STORAGE=y +CONFIG_DMADEVICES=y +CONFIG_PPC_BESTCOMM=y CONFIG_EXT2_FS=y CONFIG_EXT3_FS=y # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set diff --git a/arch/powerpc/configs/52xx/lite5200b_defconfig b/arch/powerpc/configs/52xx/lite5200b_defconfig index f3638ae..104a332 100644 --- a/arch/powerpc/configs/52xx/lite5200b_defconfig +++ b/arch/powerpc/configs/52xx/lite5200b_defconfig @@ -15,7 +15,6 @@ CONFIG_PPC_MPC52xx=y CONFIG_PPC_MPC5200_SIMPLE=y CONFIG_PPC_LITE5200=y # CONFIG_PPC_PMAC is not set -CONFIG_PPC_BESTCOMM=y CONFIG_NO_HZ=y CONFIG_HIGH_RES_TIMERS=y CONFIG_SPARSE_IRQ=y @@ -59,6 +58,8 @@ CONFIG_I2C_CHARDEV=y CONFIG_I2C_MPC=y # CONFIG_HWMON is not set CONFIG_VIDEO_OUTPUT_CONTROL=m +CONFIG_DMADEVICES=y +CONFIG_PPC_BESTCOMM=y CONFIG_EXT2_FS=y CONFIG_EXT3_FS=y # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set diff --git a/arch/powerpc/configs/52xx/motionpro_defconfig b/arch/powerpc/configs/52xx/motionpro_defconfig index 0c7de96..0d13ad7 100644 --- a/arch/powerpc/configs/52xx/motionpro_defconfig +++ b/arch/powerpc/configs/52xx/motionpro_defconfig @@ -12,7 +12,6 @@ CONFIG_EXPERT=y CONFIG_PPC_MPC52xx=y CONFIG_PPC_MPC5200_SIMPLE=y # CONFIG_PPC_PMAC is not set -CONFIG_PPC_BESTCOMM=y CONFIG_SPARSE_IRQ=y CONFIG_PM=y # CONFIG_PCI is not set @@ -84,6 +83,8 @@ CONFIG_LEDS_TRIGGERS=y CONFIG_LEDS_TRIGGER_TIMER=y CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_DS1307=y +CONFIG_DMADEVICES=y +CONFIG_PPC_BESTCOMM=y CONFIG_EXT2_FS=y CONFIG_EXT3_FS=y # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set diff --git a/arch/powerpc/configs/52xx/pcm030_defconfig b/arch/powerpc/configs/52xx/pcm030_defconfig index 22e7195..430aa18 100644 --- a/arch/powerpc/configs/52xx/pcm030_defconfig +++ b/arch/powerpc/configs/52xx/pcm030_defconfig @@ -21,7 +21,6 @@ CONFIG_MODULE_UNLOAD=y CONFIG_PPC_MPC52xx=y CONFIG_PPC_MPC5200_SIMPLE=y # CONFIG_PPC_PMAC is not set -CONFIG_PPC_BESTCOMM=y CONFIG_NO_HZ=y CONFIG_HIGH_RES_TIMERS=y CONFIG_HZ_100=y @@ -87,6 +86,8 @@ CONFIG_USB_OHCI_HCD_PPC_OF_BE=y CONFIG_USB_STORAGE=m CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_PCF8563=m +CONFIG_DMADEVICES=y +CONFIG_PPC_BESTCOMM=y CONFIG_EXT2_FS=m CONFIG_EXT3_FS=m # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set diff --git a/arch/powerpc/configs/52xx/tqm5200_defconfig b/arch/powerpc/configs/52xx/tqm5200_defconfig index 716a37b..7af4c5b 100644 --- a/arch/powerpc/configs/52xx/tqm5200_defconfig +++ b/arch/powerpc/configs/52xx/tqm5200_defconfig @@ -17,7 +17,6 @@ CONFIG_PPC_MPC52xx=y CONFIG_PPC_MPC5200_SIMPLE=y CONFIG_PPC_MPC5200_BUGFIX=y # CONFIG_PPC_PMAC is not set -CONFIG_PPC_BESTCOMM=y CONFIG_PM=y # CONFIG_PCI is not set CONFIG_NET=y @@ -86,6 +85,8 @@ CONFIG_USB_STORAGE=y CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_DS1307=y CONFIG_RTC_DRV_DS1374=y +CONFIG_DMADEVICES=y +CONFIG_PPC_BESTCOMM=y CONFIG_EXT2_FS=y CONFIG_EXT3_FS=y # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set diff --git a/arch/powerpc/configs/mpc5200_defconfig b/arch/powerpc/configs/mpc5200_defconfig index 6640a35..8b682d1c 100644 --- a/arch/powerpc/configs/mpc5200_defconfig +++ b/arch/powerpc/configs/mpc5200_defconfig @@ -15,7 +15,6 @@ CONFIG_PPC_MEDIA5200=y CONFIG_PPC_MPC5200_BUGFIX=y CONFIG_PPC_MPC5200_LPBFIFO=m # CONFIG_PPC_PMAC is not set -CONFIG_PPC_BESTCOMM=y CONFIG_SIMPLE_GPIO=y CONFIG_NO_HZ=y CONFIG_HIGH_RES_TIMERS=y @@ -125,6 +124,8 @@ CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_DS1307=y CONFIG_RTC_DRV_DS1374=y CONFIG_RTC_DRV_PCF8563=m +CONFIG_DMADEVICES=y +CONFIG_PPC_BESTCOMM=y CONFIG_EXT2_FS=y CONFIG_EXT3_FS=y # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set diff --git a/arch/powerpc/configs/pasemi_defconfig b/arch/powerpc/configs/pasemi_defconfig index bd8a6f7..cec044a 100644 --- a/arch/powerpc/configs/pasemi_defconfig +++ b/arch/powerpc/configs/pasemi_defconfig @@ -2,7 +2,6 @@ CONFIG_PPC64=y CONFIG_ALTIVEC=y CONFIG_SMP=y CONFIG_NR_CPUS=2 -CONFIG_EXPERIMENTAL=y CONFIG_SYSVIPC=y CONFIG_NO_HZ=y CONFIG_HIGH_RES_TIMERS=y @@ -45,8 +44,9 @@ CONFIG_INET_AH=y CONFIG_INET_ESP=y # CONFIG_IPV6 is not set CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" +CONFIG_DEVTMPFS=y +CONFIG_DEVTMPFS_MOUNT=y CONFIG_MTD=y -CONFIG_MTD_CHAR=y CONFIG_MTD_BLOCK=y CONFIG_MTD_SLRAM=y CONFIG_MTD_PHRAM=y @@ -88,7 +88,6 @@ CONFIG_BLK_DEV_DM=y CONFIG_DM_CRYPT=y CONFIG_NETDEVICES=y CONFIG_DUMMY=y -CONFIG_MII=y CONFIG_TIGON3=y CONFIG_E1000=y CONFIG_PASEMI_MAC=y @@ -174,8 +173,8 @@ CONFIG_NLS_CODEPAGE_437=y CONFIG_NLS_ISO8859_1=y CONFIG_CRC_CCITT=y CONFIG_PRINTK_TIME=y -CONFIG_MAGIC_SYSRQ=y CONFIG_DEBUG_FS=y +CONFIG_MAGIC_SYSRQ=y CONFIG_DEBUG_KERNEL=y CONFIG_DETECT_HUNG_TASK=y # CONFIG_SCHED_DEBUG is not set diff --git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/barrier.h index ae78225..f89da80 100644 --- a/arch/powerpc/include/asm/barrier.h +++ b/arch/powerpc/include/asm/barrier.h @@ -45,11 +45,15 @@ # define SMPWMB eieio #endif +#define __lwsync() __asm__ __volatile__ (stringify_in_c(LWSYNC) : : :"memory") + #define smp_mb() mb() -#define smp_rmb() __asm__ __volatile__ (stringify_in_c(LWSYNC) : : :"memory") +#define smp_rmb() __lwsync() #define smp_wmb() __asm__ __volatile__ (stringify_in_c(SMPWMB) : : :"memory") #define smp_read_barrier_depends() read_barrier_depends() #else +#define __lwsync() barrier() + #define smp_mb() barrier() #define smp_rmb() barrier() #define smp_wmb() barrier() @@ -65,4 +69,19 @@ #define data_barrier(x) \ asm volatile("twi 0,%0,0; isync" : : "r" (x) : "memory"); +#define smp_store_release(p, v) \ +do { \ + compiletime_assert_atomic_type(*p); \ + __lwsync(); \ + ACCESS_ONCE(*p) = (v); \ +} while (0) + +#define smp_load_acquire(p) \ +({ \ + typeof(*p) ___p1 = ACCESS_ONCE(*p); \ + compiletime_assert_atomic_type(*p); \ + __lwsync(); \ + ___p1; \ +}) + #endif /* _ASM_POWERPC_BARRIER_H */ diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h index 894662a..243ce69 100644 --- a/arch/powerpc/include/asm/exception-64s.h +++ b/arch/powerpc/include/asm/exception-64s.h @@ -284,7 +284,7 @@ do_kvm_##n: \ subi r1,r1,INT_FRAME_SIZE; /* alloc frame on kernel stack */ \ beq- 1f; \ ld r1,PACAKSAVE(r13); /* kernel stack to use */ \ -1: cmpdi cr1,r1,0; /* check if r1 is in userspace */ \ +1: cmpdi cr1,r1,-INT_FRAME_SIZE; /* check if r1 is in userspace */ \ blt+ cr1,3f; /* abort if it is */ \ li r1,(n); /* will be reloaded later */ \ sth r1,PACA_TRAP_SAVE(r13); \ diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h index 4a594b7..bc23b1b 100644 --- a/arch/powerpc/include/asm/kvm_book3s.h +++ b/arch/powerpc/include/asm/kvm_book3s.h @@ -192,6 +192,10 @@ extern void kvmppc_load_up_vsx(void); extern u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst); extern ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst); extern int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd); +extern void kvmppc_copy_to_svcpu(struct kvmppc_book3s_shadow_vcpu *svcpu, + struct kvm_vcpu *vcpu); +extern void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu, + struct kvmppc_book3s_shadow_vcpu *svcpu); static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu) { diff --git a/arch/powerpc/include/asm/kvm_book3s_asm.h b/arch/powerpc/include/asm/kvm_book3s_asm.h index 0bd9348..192917d 100644 --- a/arch/powerpc/include/asm/kvm_book3s_asm.h +++ b/arch/powerpc/include/asm/kvm_book3s_asm.h @@ -79,6 +79,7 @@ struct kvmppc_host_state { ulong vmhandler; ulong scratch0; ulong scratch1; + ulong scratch2; u8 in_guest; u8 restore_hid5; u8 napping; @@ -106,6 +107,7 @@ struct kvmppc_host_state { }; struct kvmppc_book3s_shadow_vcpu { + bool in_use; ulong gpr[14]; u32 cr; u32 xer; diff --git a/arch/powerpc/include/asm/opal.h b/arch/powerpc/include/asm/opal.h index 033c06b..7bdcf34 100644 --- a/arch/powerpc/include/asm/opal.h +++ b/arch/powerpc/include/asm/opal.h @@ -720,13 +720,13 @@ int64_t opal_pci_next_error(uint64_t phb_id, uint64_t *first_frozen_pe, int64_t opal_pci_poll(uint64_t phb_id); int64_t opal_return_cpu(void); -int64_t opal_xscom_read(uint32_t gcid, uint32_t pcb_addr, uint64_t *val); +int64_t opal_xscom_read(uint32_t gcid, uint32_t pcb_addr, __be64 *val); int64_t opal_xscom_write(uint32_t gcid, uint32_t pcb_addr, uint64_t val); int64_t opal_lpc_write(uint32_t chip_id, enum OpalLPCAddressType addr_type, uint32_t addr, uint32_t data, uint32_t sz); int64_t opal_lpc_read(uint32_t chip_id, enum OpalLPCAddressType addr_type, - uint32_t addr, uint32_t *data, uint32_t sz); + uint32_t addr, __be32 *data, uint32_t sz); int64_t opal_validate_flash(uint64_t buffer, uint32_t *size, uint32_t *result); int64_t opal_manage_flash(uint8_t op); int64_t opal_update_flash(uint64_t blk_list); diff --git a/arch/powerpc/include/asm/pgalloc-32.h b/arch/powerpc/include/asm/pgalloc-32.h index 27b2386..842846c 100644 --- a/arch/powerpc/include/asm/pgalloc-32.h +++ b/arch/powerpc/include/asm/pgalloc-32.h @@ -84,10 +84,8 @@ static inline void pgtable_free_tlb(struct mmu_gather *tlb, static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table, unsigned long address) { - struct page *page = page_address(table); - tlb_flush_pgtable(tlb, address); - pgtable_page_dtor(page); - pgtable_free_tlb(tlb, page, 0); + pgtable_page_dtor(table); + pgtable_free_tlb(tlb, page_address(table), 0); } #endif /* _ASM_POWERPC_PGALLOC_32_H */ diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h index 16cb92d..4b0be20 100644 --- a/arch/powerpc/include/asm/pgalloc-64.h +++ b/arch/powerpc/include/asm/pgalloc-64.h @@ -16,6 +16,7 @@ struct vmemmap_backing { unsigned long phys; unsigned long virt_addr; }; +extern struct vmemmap_backing *vmemmap_list; /* * Functions that deal with pagetables that could be at any level of @@ -147,11 +148,9 @@ static inline void pgtable_free_tlb(struct mmu_gather *tlb, static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table, unsigned long address) { - struct page *page = page_address(table); - tlb_flush_pgtable(tlb, address); - pgtable_page_dtor(page); - pgtable_free_tlb(tlb, page, 0); + pgtable_page_dtor(table); + pgtable_free_tlb(tlb, page_address(table), 0); } #else /* if CONFIG_PPC_64K_PAGES */ diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h index 3c1acc3..f595b98 100644 --- a/arch/powerpc/include/asm/ppc_asm.h +++ b/arch/powerpc/include/asm/ppc_asm.h @@ -366,6 +366,8 @@ BEGIN_FTR_SECTION_NESTED(96); \ cmpwi dest,0; \ beq- 90b; \ END_FTR_SECTION_NESTED(CPU_FTR_CELL_TB_BUG, CPU_FTR_CELL_TB_BUG, 96) +#elif defined(CONFIG_8xx) +#define MFTB(dest) mftb dest #else #define MFTB(dest) mfspr dest, SPRN_TBRL #endif diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index 5c45787..fa8388e 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h @@ -1174,12 +1174,19 @@ #else /* __powerpc64__ */ +#if defined(CONFIG_8xx) +#define mftbl() ({unsigned long rval; \ + asm volatile("mftbl %0" : "=r" (rval)); rval;}) +#define mftbu() ({unsigned long rval; \ + asm volatile("mftbu %0" : "=r" (rval)); rval;}) +#else #define mftbl() ({unsigned long rval; \ asm volatile("mfspr %0, %1" : "=r" (rval) : \ "i" (SPRN_TBRL)); rval;}) #define mftbu() ({unsigned long rval; \ asm volatile("mfspr %0, %1" : "=r" (rval) : \ "i" (SPRN_TBRU)); rval;}) +#endif #endif /* !__powerpc64__ */ #define mttbl(v) asm volatile("mttbl %0":: "r"(v)) diff --git a/arch/powerpc/include/asm/setup.h b/arch/powerpc/include/asm/setup.h index 703a841..11ba86e 100644 --- a/arch/powerpc/include/asm/setup.h +++ b/arch/powerpc/include/asm/setup.h @@ -26,6 +26,7 @@ extern void reloc_got2(unsigned long); void check_for_initrd(void); void do_init_bootmem(void); void setup_panic(void); +#define ARCH_PANIC_TIMEOUT 180 #endif /* !__ASSEMBLY__ */ diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h index 5f54a74..f6e78d6 100644 --- a/arch/powerpc/include/asm/spinlock.h +++ b/arch/powerpc/include/asm/spinlock.h @@ -28,6 +28,8 @@ #include <asm/synch.h> #include <asm/ppc-opcode.h> +#define smp_mb__after_unlock_lock() smp_mb() /* Full ordering for lock. */ + #define arch_spin_is_locked(x) ((x)->slock != 0) #ifdef CONFIG_PPC64 diff --git a/arch/powerpc/include/asm/switch_to.h b/arch/powerpc/include/asm/switch_to.h index 9ee1261..aace905 100644 --- a/arch/powerpc/include/asm/switch_to.h +++ b/arch/powerpc/include/asm/switch_to.h @@ -35,7 +35,7 @@ extern void giveup_vsx(struct task_struct *); extern void enable_kernel_spe(void); extern void giveup_spe(struct task_struct *); extern void load_up_spe(struct task_struct *); -extern void switch_booke_debug_regs(struct thread_struct *new_thread); +extern void switch_booke_debug_regs(struct debug_reg *new_debug); #ifndef CONFIG_SMP extern void discard_lazy_cpu_state(void); diff --git a/arch/powerpc/include/asm/timex.h b/arch/powerpc/include/asm/timex.h index 18908ca..2cf846e 100644 --- a/arch/powerpc/include/asm/timex.h +++ b/arch/powerpc/include/asm/timex.h @@ -29,7 +29,11 @@ static inline cycles_t get_cycles(void) ret = 0; __asm__ __volatile__( +#ifdef CONFIG_8xx + "97: mftb %0\n" +#else "97: mfspr %0, %2\n" +#endif "99:\n" ".section __ftr_fixup,\"a\"\n" ".align 2\n" @@ -41,7 +45,11 @@ static inline cycles_t get_cycles(void) " .long 0\n" " .long 0\n" ".previous" +#ifdef CONFIG_8xx + : "=r" (ret) : "i" (CPU_FTR_601)); +#else : "=r" (ret) : "i" (CPU_FTR_601), "i" (SPRN_TBRL)); +#endif return ret; #endif } diff --git a/arch/powerpc/include/asm/unaligned.h b/arch/powerpc/include/asm/unaligned.h index 5f1b1e3..8296381 100644 --- a/arch/powerpc/include/asm/unaligned.h +++ b/arch/powerpc/include/asm/unaligned.h @@ -4,13 +4,18 @@ #ifdef __KERNEL__ /* - * The PowerPC can do unaligned accesses itself in big endian mode. + * The PowerPC can do unaligned accesses itself based on its endian mode. */ #include <linux/unaligned/access_ok.h> #include <linux/unaligned/generic.h> +#ifdef __LITTLE_ENDIAN__ +#define get_unaligned __get_unaligned_le +#define put_unaligned __put_unaligned_le +#else #define get_unaligned __get_unaligned_be #define put_unaligned __put_unaligned_be +#endif #endif /* __KERNEL__ */ #endif /* _ASM_POWERPC_UNALIGNED_H */ diff --git a/arch/powerpc/include/asm/uprobes.h b/arch/powerpc/include/asm/uprobes.h index 75c6ecd..7422a99 100644 --- a/arch/powerpc/include/asm/uprobes.h +++ b/arch/powerpc/include/asm/uprobes.h @@ -36,9 +36,8 @@ typedef ppc_opcode_t uprobe_opcode_t; struct arch_uprobe { union { - u8 insn[MAX_UINSN_BYTES]; - u8 ixol[MAX_UINSN_BYTES]; - u32 ainsn; + u32 insn; + u32 ixol; }; }; diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index 2ea5cc0..d3de010 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c @@ -576,6 +576,7 @@ int main(void) HSTATE_FIELD(HSTATE_VMHANDLER, vmhandler); HSTATE_FIELD(HSTATE_SCRATCH0, scratch0); HSTATE_FIELD(HSTATE_SCRATCH1, scratch1); + HSTATE_FIELD(HSTATE_SCRATCH2, scratch2); HSTATE_FIELD(HSTATE_IN_GUEST, in_guest); HSTATE_FIELD(HSTATE_RESTORE_HID5, restore_hid5); HSTATE_FIELD(HSTATE_NAPPING, napping); diff --git a/arch/powerpc/kernel/crash_dump.c b/arch/powerpc/kernel/crash_dump.c index 779a78c..11c1d06 100644 --- a/arch/powerpc/kernel/crash_dump.c +++ b/arch/powerpc/kernel/crash_dump.c @@ -124,15 +124,15 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf, void crash_free_reserved_phys_range(unsigned long begin, unsigned long end) { unsigned long addr; - const u32 *basep, *sizep; + const __be32 *basep, *sizep; unsigned int rtas_start = 0, rtas_end = 0; basep = of_get_property(rtas.dev, "linux,rtas-base", NULL); sizep = of_get_property(rtas.dev, "rtas-size", NULL); if (basep && sizep) { - rtas_start = *basep; - rtas_end = *basep + *sizep; + rtas_start = be32_to_cpup(basep); + rtas_end = rtas_start + be32_to_cpup(sizep); } for (addr = begin; addr < end; addr += PAGE_SIZE) { diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S index 2ae41ab..4f0946d 100644 --- a/arch/powerpc/kernel/head_64.S +++ b/arch/powerpc/kernel/head_64.S @@ -80,6 +80,7 @@ END_FTR_SECTION(0, 1) * of the function that the cpu should jump to to continue * initialization. */ + .balign 8 .globl __secondary_hold_spinloop __secondary_hold_spinloop: .llong 0x0 @@ -470,6 +471,7 @@ _STATIC(__after_prom_start) mtctr r8 bctr +.balign 8 p_end: .llong _end - _stext 4: /* Now copy the rest of the kernel up to _end */ diff --git a/arch/powerpc/kernel/machine_kexec.c b/arch/powerpc/kernel/machine_kexec.c index e1ec57e..75d4f73 100644 --- a/arch/powerpc/kernel/machine_kexec.c +++ b/arch/powerpc/kernel/machine_kexec.c @@ -18,6 +18,7 @@ #include <linux/ftrace.h> #include <asm/machdep.h> +#include <asm/pgalloc.h> #include <asm/prom.h> #include <asm/sections.h> @@ -75,6 +76,17 @@ void arch_crash_save_vmcoreinfo(void) #ifndef CONFIG_NEED_MULTIPLE_NODES VMCOREINFO_SYMBOL(contig_page_data); #endif +#if defined(CONFIG_PPC64) && defined(CONFIG_SPARSEMEM_VMEMMAP) + VMCOREINFO_SYMBOL(vmemmap_list); + VMCOREINFO_SYMBOL(mmu_vmemmap_psize); + VMCOREINFO_SYMBOL(mmu_psize_defs); + VMCOREINFO_STRUCT_SIZE(vmemmap_backing); + VMCOREINFO_OFFSET(vmemmap_backing, list); + VMCOREINFO_OFFSET(vmemmap_backing, phys); + VMCOREINFO_OFFSET(vmemmap_backing, virt_addr); + VMCOREINFO_STRUCT_SIZE(mmu_psize_def); + VMCOREINFO_OFFSET(mmu_psize_def, shift); +#endif } /* @@ -136,7 +148,7 @@ void __init reserve_crashkernel(void) * a small SLB (128MB) since the crash kernel needs to place * itself and some stacks to be in the first segment. */ - crashk_res.start = min(0x80000000ULL, (ppc64_rma_size / 2)); + crashk_res.start = min(0x8000000ULL, (ppc64_rma_size / 2)); #else crashk_res.start = KDUMP_KERNELBASE; #endif diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S index e59caf8..64bf8db 100644 --- a/arch/powerpc/kernel/misc_64.S +++ b/arch/powerpc/kernel/misc_64.S @@ -246,8 +246,8 @@ _GLOBAL(__bswapdi2) or r3,r7,r9 blr -#if defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE) +#ifdef CONFIG_PPC_EARLY_DEBUG_BOOTX _GLOBAL(rmci_on) sync isync @@ -277,6 +277,9 @@ _GLOBAL(rmci_off) isync sync blr +#endif /* CONFIG_PPC_EARLY_DEBUG_BOOTX */ + +#if defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE) /* * Do an IO access in real mode diff --git a/arch/powerpc/kernel/nvram_64.c b/arch/powerpc/kernel/nvram_64.c index fd82c28..28b898e 100644 --- a/arch/powerpc/kernel/nvram_64.c +++ b/arch/powerpc/kernel/nvram_64.c @@ -210,7 +210,7 @@ static void __init nvram_print_partitions(char * label) printk(KERN_WARNING "--------%s---------\n", label); printk(KERN_WARNING "indx\t\tsig\tchks\tlen\tname\n"); list_for_each_entry(tmp_part, &nvram_partitions, partition) { - printk(KERN_WARNING "%4d \t%02x\t%02x\t%d\t%12s\n", + printk(KERN_WARNING "%4d \t%02x\t%02x\t%d\t%12.12s\n", tmp_part->index, tmp_part->header.signature, tmp_part->header.checksum, tmp_part->header.length, tmp_part->header.name); diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 3386d8a..4a96556 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -339,7 +339,7 @@ static void set_debug_reg_defaults(struct thread_struct *thread) #endif } -static void prime_debug_regs(struct thread_struct *thread) +static void prime_debug_regs(struct debug_reg *debug) { /* * We could have inherited MSR_DE from userspace, since @@ -348,22 +348,22 @@ static void prime_debug_regs(struct thread_struct *thread) */ mtmsr(mfmsr() & ~MSR_DE); - mtspr(SPRN_IAC1, thread->debug.iac1); - mtspr(SPRN_IAC2, thread->debug.iac2); + mtspr(SPRN_IAC1, debug->iac1); + mtspr(SPRN_IAC2, debug->iac2); #if CONFIG_PPC_ADV_DEBUG_IACS > 2 - mtspr(SPRN_IAC3, thread->debug.iac3); - mtspr(SPRN_IAC4, thread->debug.iac4); + mtspr(SPRN_IAC3, debug->iac3); + mtspr(SPRN_IAC4, debug->iac4); #endif - mtspr(SPRN_DAC1, thread->debug.dac1); - mtspr(SPRN_DAC2, thread->debug.dac2); + mtspr(SPRN_DAC1, debug->dac1); + mtspr(SPRN_DAC2, debug->dac2); #if CONFIG_PPC_ADV_DEBUG_DVCS > 0 - mtspr(SPRN_DVC1, thread->debug.dvc1); - mtspr(SPRN_DVC2, thread->debug.dvc2); + mtspr(SPRN_DVC1, debug->dvc1); + mtspr(SPRN_DVC2, debug->dvc2); #endif - mtspr(SPRN_DBCR0, thread->debug.dbcr0); - mtspr(SPRN_DBCR1, thread->debug.dbcr1); + mtspr(SPRN_DBCR0, debug->dbcr0); + mtspr(SPRN_DBCR1, debug->dbcr1); #ifdef CONFIG_BOOKE - mtspr(SPRN_DBCR2, thread->debug.dbcr2); + mtspr(SPRN_DBCR2, debug->dbcr2); #endif } /* @@ -371,11 +371,11 @@ static void prime_debug_regs(struct thread_struct *thread) * debug registers, set the debug registers from the values * stored in the new thread. */ -void switch_booke_debug_regs(struct thread_struct *new_thread) +void switch_booke_debug_regs(struct debug_reg *new_debug) { if ((current->thread.debug.dbcr0 & DBCR0_IDM) - || (new_thread->debug.dbcr0 & DBCR0_IDM)) - prime_debug_regs(new_thread); + || (new_debug->dbcr0 & DBCR0_IDM)) + prime_debug_regs(new_debug); } EXPORT_SYMBOL_GPL(switch_booke_debug_regs); #else /* !CONFIG_PPC_ADV_DEBUG_REGS */ @@ -683,7 +683,7 @@ struct task_struct *__switch_to(struct task_struct *prev, #endif /* CONFIG_SMP */ #ifdef CONFIG_PPC_ADV_DEBUG_REGS - switch_booke_debug_regs(&new->thread); + switch_booke_debug_regs(&new->thread.debug); #else /* * For PPC_BOOK3S_64, we use the hw-breakpoint interfaces that would diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c index cb64a6e..078145a 100644 --- a/arch/powerpc/kernel/prom_init.c +++ b/arch/powerpc/kernel/prom_init.c @@ -1986,19 +1986,23 @@ static void __init prom_init_stdout(void) /* Get the full OF pathname of the stdout device */ memset(path, 0, 256); call_prom("instance-to-path", 3, 1, prom.stdout, path, 255); - stdout_node = call_prom("instance-to-package", 1, 1, prom.stdout); - val = cpu_to_be32(stdout_node); - prom_setprop(prom.chosen, "/chosen", "linux,stdout-package", - &val, sizeof(val)); prom_printf("OF stdout device is: %s\n", of_stdout_device); prom_setprop(prom.chosen, "/chosen", "linux,stdout-path", path, strlen(path) + 1); - /* If it's a display, note it */ - memset(type, 0, sizeof(type)); - prom_getprop(stdout_node, "device_type", type, sizeof(type)); - if (strcmp(type, "display") == 0) - prom_setprop(stdout_node, path, "linux,boot-display", NULL, 0); + /* instance-to-package fails on PA-Semi */ + stdout_node = call_prom("instance-to-package", 1, 1, prom.stdout); + if (stdout_node != PROM_ERROR) { + val = cpu_to_be32(stdout_node); + prom_setprop(prom.chosen, "/chosen", "linux,stdout-package", + &val, sizeof(val)); + + /* If it's a display, note it */ + memset(type, 0, sizeof(type)); + prom_getprop(stdout_node, "device_type", type, sizeof(type)); + if (strcmp(type, "display") == 0) + prom_setprop(stdout_node, path, "linux,boot-display", NULL, 0); + } } static int __init prom_find_machine_type(void) diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c index 75fb404..2e3d2bf 100644 --- a/arch/powerpc/kernel/ptrace.c +++ b/arch/powerpc/kernel/ptrace.c @@ -1555,7 +1555,7 @@ long arch_ptrace(struct task_struct *child, long request, flush_fp_to_thread(child); if (fpidx < (PT_FPSCR - PT_FPR0)) - memcpy(&tmp, &child->thread.fp_state.fpr, + memcpy(&tmp, &child->thread.TS_FPR(fpidx), sizeof(long)); else tmp = child->thread.fp_state.fpscr; @@ -1588,7 +1588,7 @@ long arch_ptrace(struct task_struct *child, long request, flush_fp_to_thread(child); if (fpidx < (PT_FPSCR - PT_FPR0)) - memcpy(&child->thread.fp_state.fpr, &data, + memcpy(&child->thread.TS_FPR(fpidx), &data, sizeof(long)); else child->thread.fp_state.fpscr = data; diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c index febc804..bc76cc6 100644 --- a/arch/powerpc/kernel/setup-common.c +++ b/arch/powerpc/kernel/setup-common.c @@ -479,7 +479,7 @@ void __init smp_setup_cpu_maps(void) if (machine_is(pseries) && firmware_has_feature(FW_FEATURE_LPAR) && (dn = of_find_node_by_path("/rtas"))) { int num_addr_cell, num_size_cell, maxcpus; - const unsigned int *ireg; + const __be32 *ireg; num_addr_cell = of_n_addr_cells(dn); num_size_cell = of_n_size_cells(dn); @@ -489,7 +489,7 @@ void __init smp_setup_cpu_maps(void) if (!ireg) goto out; - maxcpus = ireg[num_addr_cell + num_size_cell]; + maxcpus = be32_to_cpup(ireg + num_addr_cell + num_size_cell); /* Double maxcpus for processors which have SMT capability */ if (cpu_has_feature(CPU_FTR_SMT)) diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c index b903dc5..2b0da27 100644 --- a/arch/powerpc/kernel/setup_32.c +++ b/arch/powerpc/kernel/setup_32.c @@ -296,9 +296,6 @@ void __init setup_arch(char **cmdline_p) if (cpu_has_feature(CPU_FTR_UNIFIED_ID_CACHE)) ucache_bsize = icache_bsize = dcache_bsize; - /* reboot on panic */ - panic_timeout = 180; - if (ppc_md.panic) setup_panic(); diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index 4085aaa..856dd4e99 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -588,9 +588,6 @@ void __init setup_arch(char **cmdline_p) dcache_bsize = ppc64_caches.dline_size; icache_bsize = ppc64_caches.iline_size; - /* reboot on panic */ - panic_timeout = 180; - if (ppc_md.panic) setup_panic(); diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c index 1844298..68027bf 100644 --- a/arch/powerpc/kernel/signal_32.c +++ b/arch/powerpc/kernel/signal_32.c @@ -445,6 +445,12 @@ static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame, #endif /* CONFIG_ALTIVEC */ if (copy_fpr_to_user(&frame->mc_fregs, current)) return 1; + + /* + * Clear the MSR VSX bit to indicate there is no valid state attached + * to this context, except in the specific case below where we set it. + */ + msr &= ~MSR_VSX; #ifdef CONFIG_VSX /* * Copy VSR 0-31 upper half from thread_struct to local @@ -457,15 +463,7 @@ static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame, if (copy_vsx_to_user(&frame->mc_vsregs, current)) return 1; msr |= MSR_VSX; - } else if (!ctx_has_vsx_region) - /* - * With a small context structure we can't hold the VSX - * registers, hence clear the MSR value to indicate the state - * was not saved. - */ - msr &= ~MSR_VSX; - - + } #endif /* CONFIG_VSX */ #ifdef CONFIG_SPE /* save spe registers */ diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c index e66f67b..4299104 100644 --- a/arch/powerpc/kernel/signal_64.c +++ b/arch/powerpc/kernel/signal_64.c @@ -122,6 +122,12 @@ static long setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs, flush_fp_to_thread(current); /* copy fpr regs and fpscr */ err |= copy_fpr_to_user(&sc->fp_regs, current); + + /* + * Clear the MSR VSX bit to indicate there is no valid state attached + * to this context, except in the specific case below where we set it. + */ + msr &= ~MSR_VSX; #ifdef CONFIG_VSX /* * Copy VSX low doubleword to local buffer for formatting, diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index a3b64f3..c1cf4a1 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c @@ -580,7 +580,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle) int cpu_to_core_id(int cpu) { struct device_node *np; - const int *reg; + const __be32 *reg; int id = -1; np = of_get_cpu_node(cpu, NULL); @@ -591,7 +591,7 @@ int cpu_to_core_id(int cpu) if (!reg) goto out; - id = *reg; + id = be32_to_cpup(reg); out: of_node_put(np); return id; diff --git a/arch/powerpc/kernel/uprobes.c b/arch/powerpc/kernel/uprobes.c index 59f419b..003b209 100644 --- a/arch/powerpc/kernel/uprobes.c +++ b/arch/powerpc/kernel/uprobes.c @@ -186,7 +186,7 @@ bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs) * emulate_step() returns 1 if the insn was successfully emulated. * For all other cases, we need to single-step in hardware. */ - ret = emulate_step(regs, auprobe->ainsn); + ret = emulate_step(regs, auprobe->insn); if (ret > 0) return true; diff --git a/arch/powerpc/kernel/vdso32/gettimeofday.S b/arch/powerpc/kernel/vdso32/gettimeofday.S index 6b1f2a6..6b2b696 100644 --- a/arch/powerpc/kernel/vdso32/gettimeofday.S +++ b/arch/powerpc/kernel/vdso32/gettimeofday.S @@ -232,9 +232,15 @@ __do_get_tspec: lwz r6,(CFG_TB_ORIG_STAMP+4)(r9) /* Get a stable TB value */ +#ifdef CONFIG_8xx +2: mftbu r3 + mftbl r4 + mftbu r0 +#else 2: mfspr r3, SPRN_TBRU mfspr r4, SPRN_TBRL mfspr r0, SPRN_TBRU +#endif cmplw cr0,r3,r0 bne- 2b diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c index f3ff587..c5d1484 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_hv.c +++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c @@ -469,11 +469,14 @@ static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, slb_v = vcpu->kvm->arch.vrma_slb_v; } + preempt_disable(); /* Find the HPTE in the hash table */ index = kvmppc_hv_find_lock_hpte(kvm, eaddr, slb_v, HPTE_V_VALID | HPTE_V_ABSENT); - if (index < 0) + if (index < 0) { + preempt_enable(); return -ENOENT; + } hptep = (unsigned long *)(kvm->arch.hpt_virt + (index << 4)); v = hptep[0] & ~HPTE_V_HVLOCK; gr = kvm->arch.revmap[index].guest_rpte; @@ -481,6 +484,7 @@ static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, /* Unlock the HPTE */ asm volatile("lwsync" : : : "memory"); hptep[0] = v; + preempt_enable(); gpte->eaddr = eaddr; gpte->vpage = ((v & HPTE_V_AVPN) << 4) | ((eaddr >> 12) & 0xfff); @@ -665,6 +669,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, return -EFAULT; } else { page = pages[0]; + pfn = page_to_pfn(page); if (PageHuge(page)) { page = compound_head(page); pte_size <<= compound_order(page); @@ -689,7 +694,6 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, } rcu_read_unlock_sched(); } - pfn = page_to_pfn(page); } ret = -EFAULT; @@ -707,8 +711,14 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, r = (r & ~(HPTE_R_W|HPTE_R_I|HPTE_R_G)) | HPTE_R_M; } - /* Set the HPTE to point to pfn */ - r = (r & ~(HPTE_R_PP0 - pte_size)) | (pfn << PAGE_SHIFT); + /* + * Set the HPTE to point to pfn. + * Since the pfn is at PAGE_SIZE granularity, make sure we + * don't mask out lower-order bits if psize < PAGE_SIZE. + */ + if (psize < PAGE_SIZE) + psize = PAGE_SIZE; + r = (r & ~(HPTE_R_PP0 - psize)) | ((pfn << PAGE_SHIFT) & ~(psize - 1)); if (hpte_is_writable(r) && !write_ok) r = hpte_make_readonly(r); ret = RESUME_GUEST; diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 072287f..b51d5db 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -131,8 +131,9 @@ static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu) static void kvmppc_core_vcpu_load_hv(struct kvm_vcpu *vcpu, int cpu) { struct kvmppc_vcore *vc = vcpu->arch.vcore; + unsigned long flags; - spin_lock(&vcpu->arch.tbacct_lock); + spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags); if (vc->runner == vcpu && vc->vcore_state != VCORE_INACTIVE && vc->preempt_tb != TB_NIL) { vc->stolen_tb += mftb() - vc->preempt_tb; @@ -143,19 +144,20 @@ static void kvmppc_core_vcpu_load_hv(struct kvm_vcpu *vcpu, int cpu) vcpu->arch.busy_stolen += mftb() - vcpu->arch.busy_preempt; vcpu->arch.busy_preempt = TB_NIL; } - spin_unlock(&vcpu->arch.tbacct_lock); + spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags); } static void kvmppc_core_vcpu_put_hv(struct kvm_vcpu *vcpu) { struct kvmppc_vcore *vc = vcpu->arch.vcore; + unsigned long flags; - spin_lock(&vcpu->arch.tbacct_lock); + spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags); if (vc->runner == vcpu && vc->vcore_state != VCORE_INACTIVE) vc->preempt_tb = mftb(); if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST) vcpu->arch.busy_preempt = mftb(); - spin_unlock(&vcpu->arch.tbacct_lock); + spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags); } static void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr) @@ -486,11 +488,11 @@ static u64 vcore_stolen_time(struct kvmppc_vcore *vc, u64 now) */ if (vc->vcore_state != VCORE_INACTIVE && vc->runner->arch.run_task != current) { - spin_lock(&vc->runner->arch.tbacct_lock); + spin_lock_irq(&vc->runner->arch.tbacct_lock); p = vc->stolen_tb; if (vc->preempt_tb != TB_NIL) p += now - vc->preempt_tb; - spin_unlock(&vc->runner->arch.tbacct_lock); + spin_unlock_irq(&vc->runner->arch.tbacct_lock); } else { p = vc->stolen_tb; } @@ -512,10 +514,10 @@ static void kvmppc_create_dtl_entry(struct kvm_vcpu *vcpu, core_stolen = vcore_stolen_time(vc, now); stolen = core_stolen - vcpu->arch.stolen_logged; vcpu->arch.stolen_logged = core_stolen; - spin_lock(&vcpu->arch.tbacct_lock); + spin_lock_irq(&vcpu->arch.tbacct_lock); stolen += vcpu->arch.busy_stolen; vcpu->arch.busy_stolen = 0; - spin_unlock(&vcpu->arch.tbacct_lock); + spin_unlock_irq(&vcpu->arch.tbacct_lock); if (!dt || !vpa) return; memset(dt, 0, sizeof(struct dtl_entry)); @@ -589,7 +591,9 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu) if (list_empty(&vcpu->kvm->arch.rtas_tokens)) return RESUME_HOST; + idx = srcu_read_lock(&vcpu->kvm->srcu); rc = kvmppc_rtas_hcall(vcpu); + srcu_read_unlock(&vcpu->kvm->srcu, idx); if (rc == -ENOENT) return RESUME_HOST; @@ -1115,13 +1119,13 @@ static void kvmppc_remove_runnable(struct kvmppc_vcore *vc, if (vcpu->arch.state != KVMPPC_VCPU_RUNNABLE) return; - spin_lock(&vcpu->arch.tbacct_lock); + spin_lock_irq(&vcpu->arch.tbacct_lock); now = mftb(); vcpu->arch.busy_stolen += vcore_stolen_time(vc, now) - vcpu->arch.stolen_logged; vcpu->arch.busy_preempt = now; vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST; - spin_unlock(&vcpu->arch.tbacct_lock); + spin_unlock_irq(&vcpu->arch.tbacct_lock); --vc->n_runnable; list_del(&vcpu->arch.run_list); } diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c index 9c51544..8689e2e 100644 --- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c +++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c @@ -225,6 +225,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags, is_io = pa & (HPTE_R_I | HPTE_R_W); pte_size = PAGE_SIZE << (pa & KVMPPC_PAGE_ORDER_MASK); pa &= PAGE_MASK; + pa |= gpa & ~PAGE_MASK; } else { /* Translate to host virtual address */ hva = __gfn_to_hva_memslot(memslot, gfn); @@ -238,13 +239,13 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags, ptel = hpte_make_readonly(ptel); is_io = hpte_cache_bits(pte_val(pte)); pa = pte_pfn(pte) << PAGE_SHIFT; + pa |= hva & (pte_size - 1); + pa |= gpa & ~PAGE_MASK; } } if (pte_size < psize) return H_PARAMETER; - if (pa && pte_size > psize) - pa |= gpa & (pte_size - 1); ptel &= ~(HPTE_R_PP0 - psize); ptel |= pa; @@ -749,6 +750,10 @@ static int slb_base_page_shift[4] = { 20, /* 1M, unsupported */ }; +/* When called from virtmode, this func should be protected by + * preempt_disable(), otherwise, the holding of HPTE_V_HVLOCK + * can trigger deadlock issue. + */ long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v, unsigned long valid) { diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index bc8de75..be4fa04a 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -153,7 +153,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) 13: b machine_check_fwnmi - /* * We come in here when wakened from nap mode on a secondary hw thread. * Relocation is off and most register values are lost. @@ -224,6 +223,11 @@ kvm_start_guest: /* Clear our vcpu pointer so we don't come back in early */ li r0, 0 std r0, HSTATE_KVM_VCPU(r13) + /* + * Make sure we clear HSTATE_KVM_VCPU(r13) before incrementing + * the nap_count, because once the increment to nap_count is + * visible we could be given another vcpu. + */ lwsync /* Clear any pending IPI - we're an offline thread */ ld r5, HSTATE_XICS_PHYS(r13) @@ -241,7 +245,6 @@ kvm_start_guest: /* increment the nap count and then go to nap mode */ ld r4, HSTATE_KVM_VCORE(r13) addi r4, r4, VCORE_NAP_COUNT - lwsync /* make previous updates visible */ 51: lwarx r3, 0, r4 addi r3, r3, 1 stwcx. r3, 0, r4 @@ -751,15 +754,14 @@ kvmppc_interrupt_hv: * guest CR, R12 saved in shadow VCPU SCRATCH1/0 * guest R13 saved in SPRN_SCRATCH0 */ - /* abuse host_r2 as third scratch area; we get r2 from PACATOC(r13) */ - std r9, HSTATE_HOST_R2(r13) + std r9, HSTATE_SCRATCH2(r13) lbz r9, HSTATE_IN_GUEST(r13) cmpwi r9, KVM_GUEST_MODE_HOST_HV beq kvmppc_bad_host_intr #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE cmpwi r9, KVM_GUEST_MODE_GUEST - ld r9, HSTATE_HOST_R2(r13) + ld r9, HSTATE_SCRATCH2(r13) beq kvmppc_interrupt_pr #endif /* We're now back in the host but in guest MMU context */ @@ -779,7 +781,7 @@ kvmppc_interrupt_hv: std r6, VCPU_GPR(R6)(r9) std r7, VCPU_GPR(R7)(r9) std r8, VCPU_GPR(R8)(r9) - ld r0, HSTATE_HOST_R2(r13) + ld r0, HSTATE_SCRATCH2(r13) std r0, VCPU_GPR(R9)(r9) std r10, VCPU_GPR(R10)(r9) std r11, VCPU_GPR(R11)(r9) @@ -990,14 +992,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) */ /* Increment the threads-exiting-guest count in the 0xff00 bits of vcore->entry_exit_count */ - lwsync ld r5,HSTATE_KVM_VCORE(r13) addi r6,r5,VCORE_ENTRY_EXIT 41: lwarx r3,0,r6 addi r0,r3,0x100 stwcx. r0,0,r6 bne 41b - lwsync + isync /* order stwcx. vs. reading napping_threads */ /* * At this point we have an interrupt that we have to pass @@ -1030,6 +1031,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) sld r0,r0,r4 andc. r3,r3,r0 /* no sense IPI'ing ourselves */ beq 43f + /* Order entry/exit update vs. IPIs */ + sync mulli r4,r4,PACA_SIZE /* get paca for thread 0 */ subf r6,r4,r13 42: andi. r0,r3,1 @@ -1638,10 +1641,10 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206) bge kvm_cede_exit stwcx. r4,0,r6 bne 31b + /* order napping_threads update vs testing entry_exit_count */ + isync li r0,1 stb r0,HSTATE_NAPPING(r13) - /* order napping_threads update vs testing entry_exit_count */ - lwsync mr r4,r3 lwz r7,VCORE_ENTRY_EXIT(r5) cmpwi r7,0x100 diff --git a/arch/powerpc/kvm/book3s_interrupts.S b/arch/powerpc/kvm/book3s_interrupts.S index f4dd041..f779450 100644 --- a/arch/powerpc/kvm/book3s_interrupts.S +++ b/arch/powerpc/kvm/book3s_interrupts.S @@ -129,29 +129,32 @@ kvm_start_lightweight: * R12 = exit handler id * R13 = PACA * SVCPU.* = guest * + * MSR.EE = 1 * */ + PPC_LL r3, GPR4(r1) /* vcpu pointer */ + + /* + * kvmppc_copy_from_svcpu can clobber volatile registers, save + * the exit handler id to the vcpu and restore it from there later. + */ + stw r12, VCPU_TRAP(r3) + /* Transfer reg values from shadow vcpu back to vcpu struct */ /* On 64-bit, interrupts are still off at this point */ - PPC_LL r3, GPR4(r1) /* vcpu pointer */ + GET_SHADOW_VCPU(r4) bl FUNC(kvmppc_copy_from_svcpu) nop #ifdef CONFIG_PPC_BOOK3S_64 - /* Re-enable interrupts */ - ld r3, HSTATE_HOST_MSR(r13) - ori r3, r3, MSR_EE - MTMSR_EERI(r3) - /* * Reload kernel SPRG3 value. * No need to save guest value as usermode can't modify SPRG3. */ ld r3, PACA_SPRG3(r13) mtspr SPRN_SPRG3, r3 - #endif /* CONFIG_PPC_BOOK3S_64 */ /* R7 = vcpu */ @@ -177,7 +180,7 @@ kvm_start_lightweight: PPC_STL r31, VCPU_GPR(R31)(r7) /* Pass the exit number as 3rd argument to kvmppc_handle_exit */ - mr r5, r12 + lwz r5, VCPU_TRAP(r7) /* Restore r3 (kvm_run) and r4 (vcpu) */ REST_2GPRS(3, r1) diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c index fe14ca3..5b9e906 100644 --- a/arch/powerpc/kvm/book3s_pr.c +++ b/arch/powerpc/kvm/book3s_pr.c @@ -66,6 +66,7 @@ static void kvmppc_core_vcpu_load_pr(struct kvm_vcpu *vcpu, int cpu) struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); memcpy(svcpu->slb, to_book3s(vcpu)->slb_shadow, sizeof(svcpu->slb)); svcpu->slb_max = to_book3s(vcpu)->slb_shadow_max; + svcpu->in_use = 0; svcpu_put(svcpu); #endif vcpu->cpu = smp_processor_id(); @@ -78,6 +79,9 @@ static void kvmppc_core_vcpu_put_pr(struct kvm_vcpu *vcpu) { #ifdef CONFIG_PPC_BOOK3S_64 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); + if (svcpu->in_use) { + kvmppc_copy_from_svcpu(vcpu, svcpu); + } memcpy(to_book3s(vcpu)->slb_shadow, svcpu->slb, sizeof(svcpu->slb)); to_book3s(vcpu)->slb_shadow_max = svcpu->slb_max; svcpu_put(svcpu); @@ -110,12 +114,26 @@ void kvmppc_copy_to_svcpu(struct kvmppc_book3s_shadow_vcpu *svcpu, svcpu->ctr = vcpu->arch.ctr; svcpu->lr = vcpu->arch.lr; svcpu->pc = vcpu->arch.pc; + svcpu->in_use = true; } /* Copy data touched by real-mode code from shadow vcpu back to vcpu */ void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu, struct kvmppc_book3s_shadow_vcpu *svcpu) { + /* + * vcpu_put would just call us again because in_use hasn't + * been updated yet. + */ + preempt_disable(); + + /* + * Maybe we were already preempted and synced the svcpu from + * our preempt notifiers. Don't bother touching this svcpu then. + */ + if (!svcpu->in_use) + goto out; + vcpu->arch.gpr[0] = svcpu->gpr[0]; vcpu->arch.gpr[1] = svcpu->gpr[1]; vcpu->arch.gpr[2] = svcpu->gpr[2]; @@ -139,6 +157,10 @@ void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu, vcpu->arch.fault_dar = svcpu->fault_dar; vcpu->arch.fault_dsisr = svcpu->fault_dsisr; vcpu->arch.last_inst = svcpu->last_inst; + svcpu->in_use = false; + +out: + preempt_enable(); } static int kvmppc_core_check_requests_pr(struct kvm_vcpu *vcpu) diff --git a/arch/powerpc/kvm/book3s_rmhandlers.S b/arch/powerpc/kvm/book3s_rmhandlers.S index a38c4c9..c3c5231 100644 --- a/arch/powerpc/kvm/book3s_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_rmhandlers.S @@ -153,15 +153,11 @@ _GLOBAL(kvmppc_entry_trampoline) li r6, MSR_IR | MSR_DR andc r6, r5, r6 /* Clear DR and IR in MSR value */ -#ifdef CONFIG_PPC_BOOK3S_32 /* * Set EE in HOST_MSR so that it's enabled when we get into our - * C exit handler function. On 64-bit we delay enabling - * interrupts until we have finished transferring stuff - * to or from the PACA. + * C exit handler function. */ ori r5, r5, MSR_EE -#endif mtsrr0 r7 mtsrr1 r6 RFI diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c index 53e65a2..0591e05 100644 --- a/arch/powerpc/kvm/booke.c +++ b/arch/powerpc/kvm/booke.c @@ -681,7 +681,7 @@ int kvmppc_core_check_requests(struct kvm_vcpu *vcpu) int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) { int ret, s; - struct thread_struct thread; + struct debug_reg debug; #ifdef CONFIG_PPC_FPU struct thread_fp_state fp; int fpexc_mode; @@ -723,9 +723,9 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) #endif /* Switch to guest debug context */ - thread.debug = vcpu->arch.shadow_dbg_reg; - switch_booke_debug_regs(&thread); - thread.debug = current->thread.debug; + debug = vcpu->arch.shadow_dbg_reg; + switch_booke_debug_regs(&debug); + debug = current->thread.debug; current->thread.debug = vcpu->arch.shadow_dbg_reg; kvmppc_fix_ee_before_entry(); @@ -736,8 +736,8 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) We also get here with interrupts enabled. */ /* Switch back to user space debug context */ - switch_booke_debug_regs(&thread); - current->thread.debug = thread.debug; + switch_booke_debug_regs(&debug); + current->thread.debug = debug; #ifdef CONFIG_PPC_FPU kvmppc_save_guest_fp(vcpu); diff --git a/arch/powerpc/lib/copyuser_64.S b/arch/powerpc/lib/copyuser_64.S index d73a590..596a285 100644 --- a/arch/powerpc/lib/copyuser_64.S +++ b/arch/powerpc/lib/copyuser_64.S @@ -9,6 +9,14 @@ #include <asm/processor.h> #include <asm/ppc_asm.h> +#ifdef __BIG_ENDIAN__ +#define sLd sld /* Shift towards low-numbered address. */ +#define sHd srd /* Shift towards high-numbered address. */ +#else +#define sLd srd /* Shift towards low-numbered address. */ +#define sHd sld /* Shift towards high-numbered address. */ +#endif + .align 7 _GLOBAL(__copy_tofrom_user) BEGIN_FTR_SECTION @@ -118,10 +126,10 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD) 24: ld r9,0(r4) /* 3+2n loads, 2+2n stores */ 25: ld r0,8(r4) - sld r6,r9,r10 + sLd r6,r9,r10 26: ldu r9,16(r4) - srd r7,r0,r11 - sld r8,r0,r10 + sHd r7,r0,r11 + sLd r8,r0,r10 or r7,r7,r6 blt cr6,79f 27: ld r0,8(r4) @@ -129,35 +137,35 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD) 28: ld r0,0(r4) /* 4+2n loads, 3+2n stores */ 29: ldu r9,8(r4) - sld r8,r0,r10 + sLd r8,r0,r10 addi r3,r3,-8 blt cr6,5f 30: ld r0,8(r4) - srd r12,r9,r11 - sld r6,r9,r10 + sHd r12,r9,r11 + sLd r6,r9,r10 31: ldu r9,16(r4) or r12,r8,r12 - srd r7,r0,r11 - sld r8,r0,r10 + sHd r7,r0,r11 + sLd r8,r0,r10 addi r3,r3,16 beq cr6,78f 1: or r7,r7,r6 32: ld r0,8(r4) 76: std r12,8(r3) -2: srd r12,r9,r11 - sld r6,r9,r10 +2: sHd r12,r9,r11 + sLd r6,r9,r10 33: ldu r9,16(r4) or r12,r8,r12 77: stdu r7,16(r3) - srd r7,r0,r11 - sld r8,r0,r10 + sHd r7,r0,r11 + sLd r8,r0,r10 bdnz 1b 78: std r12,8(r3) or r7,r7,r6 79: std r7,16(r3) -5: srd r12,r9,r11 +5: sHd r12,r9,r11 or r12,r8,r12 80: std r12,24(r3) bne 6f @@ -165,23 +173,38 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD) blr 6: cmpwi cr1,r5,8 addi r3,r3,32 - sld r9,r9,r10 + sLd r9,r9,r10 ble cr1,7f 34: ld r0,8(r4) - srd r7,r0,r11 + sHd r7,r0,r11 or r9,r7,r9 7: bf cr7*4+1,1f +#ifdef __BIG_ENDIAN__ rotldi r9,r9,32 +#endif 94: stw r9,0(r3) +#ifdef __LITTLE_ENDIAN__ + rotrdi r9,r9,32 +#endif addi r3,r3,4 1: bf cr7*4+2,2f +#ifdef __BIG_ENDIAN__ rotldi r9,r9,16 +#endif 95: sth r9,0(r3) +#ifdef __LITTLE_ENDIAN__ + rotrdi r9,r9,16 +#endif addi r3,r3,2 2: bf cr7*4+3,3f +#ifdef __BIG_ENDIAN__ rotldi r9,r9,8 +#endif 96: stb r9,0(r3) +#ifdef __LITTLE_ENDIAN__ + rotrdi r9,r9,8 +#endif 3: li r3,0 blr diff --git a/arch/powerpc/mm/hugetlbpage-book3e.c b/arch/powerpc/mm/hugetlbpage-book3e.c index 3bc7006..74551b5 100644 --- a/arch/powerpc/mm/hugetlbpage-book3e.c +++ b/arch/powerpc/mm/hugetlbpage-book3e.c @@ -117,6 +117,5 @@ void flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr) struct hstate *hstate = hstate_file(vma->vm_file); unsigned long tsize = huge_page_shift(hstate) - 10; - __flush_tlb_page(vma ? vma->vm_mm : NULL, vmaddr, tsize, 0); - + __flush_tlb_page(vma->vm_mm, vmaddr, tsize, 0); } diff --git a/arch/powerpc/mm/tlb_nohash.c b/arch/powerpc/mm/tlb_nohash.c index 41cd68d..358d743 100644 --- a/arch/powerpc/mm/tlb_nohash.c +++ b/arch/powerpc/mm/tlb_nohash.c @@ -305,7 +305,7 @@ void __flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr, void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr) { #ifdef CONFIG_HUGETLB_PAGE - if (is_vm_hugetlb_page(vma)) + if (vma && is_vm_hugetlb_page(vma)) flush_hugetlb_page(vma, vmaddr); #endif diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c index ac3c2a1..555034f 100644 --- a/arch/powerpc/net/bpf_jit_comp.c +++ b/arch/powerpc/net/bpf_jit_comp.c @@ -223,10 +223,11 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image, } PPC_DIVWU(r_A, r_A, r_X); break; - case BPF_S_ALU_DIV_K: /* A = reciprocal_divide(A, K); */ + case BPF_S_ALU_DIV_K: /* A /= K */ + if (K == 1) + break; PPC_LI32(r_scratch1, K); - /* Top 32 bits of 64bit result -> A */ - PPC_MULHWU(r_A, r_A, r_scratch1); + PPC_DIVWU(r_A, r_A, r_scratch1); break; case BPF_S_ALU_AND_X: ctx->seen |= SEEN_XREG; diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype index 132f872..bca2465 100644 --- a/arch/powerpc/platforms/Kconfig.cputype +++ b/arch/powerpc/platforms/Kconfig.cputype @@ -404,13 +404,27 @@ config PPC_DOORBELL endmenu -config CPU_LITTLE_ENDIAN - bool "Build little endian kernel" - default n +choice + prompt "Endianness selection" + default CPU_BIG_ENDIAN help This option selects whether a big endian or little endian kernel will be built. +config CPU_BIG_ENDIAN + bool "Build big endian kernel" + help + Build a big endian kernel. + + If unsure, select this option. + +config CPU_LITTLE_ENDIAN + bool "Build little endian kernel" + help + Build a little endian kernel. + Note that if cross compiling a little endian kernel, CROSS_COMPILE must point to a toolchain capable of targeting little endian powerpc. + +endchoice diff --git a/arch/powerpc/platforms/powernv/eeh-ioda.c b/arch/powerpc/platforms/powernv/eeh-ioda.c index 02245ce..d7ddcee 100644 --- a/arch/powerpc/platforms/powernv/eeh-ioda.c +++ b/arch/powerpc/platforms/powernv/eeh-ioda.c @@ -36,7 +36,6 @@ #include "powernv.h" #include "pci.h" -static char *hub_diag = NULL; static int ioda_eeh_nb_init = 0; static int ioda_eeh_event(struct notifier_block *nb, @@ -140,15 +139,6 @@ static int ioda_eeh_post_init(struct pci_controller *hose) ioda_eeh_nb_init = 1; } - /* We needn't HUB diag-data on PHB3 */ - if (phb->type == PNV_PHB_IODA1 && !hub_diag) { - hub_diag = (char *)__get_free_page(GFP_KERNEL | __GFP_ZERO); - if (!hub_diag) { - pr_err("%s: Out of memory !\n", __func__); - return -ENOMEM; - } - } - #ifdef CONFIG_DEBUG_FS if (phb->dbgfs) { debugfs_create_file("err_injct_outbound", 0600, @@ -633,11 +623,10 @@ static void ioda_eeh_hub_diag_common(struct OpalIoP7IOCErrorData *data) static void ioda_eeh_hub_diag(struct pci_controller *hose) { struct pnv_phb *phb = hose->private_data; - struct OpalIoP7IOCErrorData *data; + struct OpalIoP7IOCErrorData *data = &phb->diag.hub_diag; long rc; - data = (struct OpalIoP7IOCErrorData *)ioda_eeh_hub_diag; - rc = opal_pci_get_hub_diag_data(phb->hub_id, data, PAGE_SIZE); + rc = opal_pci_get_hub_diag_data(phb->hub_id, data, sizeof(*data)); if (rc != OPAL_SUCCESS) { pr_warning("%s: Failed to get HUB#%llx diag-data (%ld)\n", __func__, phb->hub_id, rc); @@ -820,14 +809,15 @@ static void ioda_eeh_phb_diag(struct pci_controller *hose) struct OpalIoPhbErrorCommon *common; long rc; - common = (struct OpalIoPhbErrorCommon *)phb->diag.blob; - rc = opal_pci_get_phb_diag_data2(phb->opal_id, common, PAGE_SIZE); + rc = opal_pci_get_phb_diag_data2(phb->opal_id, phb->diag.blob, + PNV_PCI_DIAG_BUF_SIZE); if (rc != OPAL_SUCCESS) { pr_warning("%s: Failed to get diag-data for PHB#%x (%ld)\n", __func__, hose->global_number, rc); return; } + common = (struct OpalIoPhbErrorCommon *)phb->diag.blob; switch (common->ioType) { case OPAL_PHB_ERROR_DATA_TYPE_P7IOC: ioda_eeh_p7ioc_phb_diag(hose, common); diff --git a/arch/powerpc/platforms/powernv/opal-lpc.c b/arch/powerpc/platforms/powernv/opal-lpc.c index e7e59e4..79d83ca 100644 --- a/arch/powerpc/platforms/powernv/opal-lpc.c +++ b/arch/powerpc/platforms/powernv/opal-lpc.c @@ -24,25 +24,25 @@ static int opal_lpc_chip_id = -1; static u8 opal_lpc_inb(unsigned long port) { int64_t rc; - uint32_t data; + __be32 data; if (opal_lpc_chip_id < 0 || port > 0xffff) return 0xff; rc = opal_lpc_read(opal_lpc_chip_id, OPAL_LPC_IO, port, &data, 1); - return rc ? 0xff : data; + return rc ? 0xff : be32_to_cpu(data); } static __le16 __opal_lpc_inw(unsigned long port) { int64_t rc; - uint32_t data; + __be32 data; if (opal_lpc_chip_id < 0 || port > 0xfffe) return 0xffff; if (port & 1) return (__le16)opal_lpc_inb(port) << 8 | opal_lpc_inb(port + 1); rc = opal_lpc_read(opal_lpc_chip_id, OPAL_LPC_IO, port, &data, 2); - return rc ? 0xffff : data; + return rc ? 0xffff : be32_to_cpu(data); } static u16 opal_lpc_inw(unsigned long port) { @@ -52,7 +52,7 @@ static u16 opal_lpc_inw(unsigned long port) static __le32 __opal_lpc_inl(unsigned long port) { int64_t rc; - uint32_t data; + __be32 data; if (opal_lpc_chip_id < 0 || port > 0xfffc) return 0xffffffff; @@ -62,7 +62,7 @@ static __le32 __opal_lpc_inl(unsigned long port) (__le32)opal_lpc_inb(port + 2) << 8 | opal_lpc_inb(port + 3); rc = opal_lpc_read(opal_lpc_chip_id, OPAL_LPC_IO, port, &data, 4); - return rc ? 0xffffffff : data; + return rc ? 0xffffffff : be32_to_cpu(data); } static u32 opal_lpc_inl(unsigned long port) diff --git a/arch/powerpc/platforms/powernv/opal-xscom.c b/arch/powerpc/platforms/powernv/opal-xscom.c index 4d99a8f..4fbf276 100644 --- a/arch/powerpc/platforms/powernv/opal-xscom.c +++ b/arch/powerpc/platforms/powernv/opal-xscom.c @@ -96,9 +96,11 @@ static int opal_scom_read(scom_map_t map, u64 reg, u64 *value) { struct opal_scom_map *m = map; int64_t rc; + __be64 v; reg = opal_scom_unmangle(reg); - rc = opal_xscom_read(m->chip, m->addr + reg, (uint64_t *)__pa(value)); + rc = opal_xscom_read(m->chip, m->addr + reg, (__be64 *)__pa(&v)); + *value = be64_to_cpu(v); return opal_xscom_err_xlate(rc); } diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c index 084cdfa..2c6d173 100644 --- a/arch/powerpc/platforms/powernv/pci-ioda.c +++ b/arch/powerpc/platforms/powernv/pci-ioda.c @@ -720,6 +720,7 @@ static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb, tbl->it_type = TCE_PCI_SWINV_CREATE | TCE_PCI_SWINV_FREE; } iommu_init_table(tbl, phb->hose->node); + iommu_register_group(tbl, pci_domain_nr(pe->pbus), pe->pe_number); if (pe->pdev) set_iommu_table_base(&pe->pdev->dev, tbl); diff --git a/arch/powerpc/platforms/powernv/pci.h b/arch/powerpc/platforms/powernv/pci.h index 911c24e..1ed8d5f 100644 --- a/arch/powerpc/platforms/powernv/pci.h +++ b/arch/powerpc/platforms/powernv/pci.h @@ -172,11 +172,13 @@ struct pnv_phb { } ioda; }; - /* PHB status structure */ + /* PHB and hub status structure */ union { unsigned char blob[PNV_PCI_DIAG_BUF_SIZE]; struct OpalIoP7IOCPhbErrorData p7ioc; + struct OpalIoP7IOCErrorData hub_diag; } diag; + }; extern struct pci_ops pnv_pci_ops; diff --git a/arch/powerpc/platforms/pseries/lparcfg.c b/arch/powerpc/platforms/pseries/lparcfg.c index e738007..c9fecf0 100644 --- a/arch/powerpc/platforms/pseries/lparcfg.c +++ b/arch/powerpc/platforms/pseries/lparcfg.c @@ -157,7 +157,7 @@ static void parse_ppp_data(struct seq_file *m) { struct hvcall_ppp_data ppp_data; struct device_node *root; - const int *perf_level; + const __be32 *perf_level; int rc; rc = h_get_ppp(&ppp_data); @@ -201,7 +201,7 @@ static void parse_ppp_data(struct seq_file *m) perf_level = of_get_property(root, "ibm,partition-performance-parameters-level", NULL); - if (perf_level && (*perf_level >= 1)) { + if (perf_level && (be32_to_cpup(perf_level) >= 1)) { seq_printf(m, "physical_procs_allocated_to_virtualization=%d\n", ppp_data.phys_platform_procs); @@ -435,7 +435,7 @@ static int pseries_lparcfg_data(struct seq_file *m, void *v) int partition_potential_processors; int partition_active_processors; struct device_node *rtas_node; - const int *lrdrp = NULL; + const __be32 *lrdrp = NULL; rtas_node = of_find_node_by_path("/rtas"); if (rtas_node) @@ -444,7 +444,7 @@ static int pseries_lparcfg_data(struct seq_file *m, void *v) if (lrdrp == NULL) { partition_potential_processors = vdso_data->processorCount; } else { - partition_potential_processors = *(lrdrp + 4); + partition_potential_processors = be32_to_cpup(lrdrp + 4); } of_node_put(rtas_node); @@ -654,7 +654,7 @@ static int lparcfg_data(struct seq_file *m, void *v) const char *model = ""; const char *system_id = ""; const char *tmp; - const unsigned int *lp_index_ptr; + const __be32 *lp_index_ptr; unsigned int lp_index = 0; seq_printf(m, "%s %s\n", MODULE_NAME, MODULE_VERS); @@ -670,7 +670,7 @@ static int lparcfg_data(struct seq_file *m, void *v) lp_index_ptr = of_get_property(rootdn, "ibm,partition-no", NULL); if (lp_index_ptr) - lp_index = *lp_index_ptr; + lp_index = be32_to_cpup(lp_index_ptr); of_node_put(rootdn); } seq_printf(m, "serial_number=%s\n", system_id); diff --git a/arch/powerpc/platforms/pseries/msi.c b/arch/powerpc/platforms/pseries/msi.c index 6d2f0ab..0c882e8 100644 --- a/arch/powerpc/platforms/pseries/msi.c +++ b/arch/powerpc/platforms/pseries/msi.c @@ -130,7 +130,8 @@ static int check_req(struct pci_dev *pdev, int nvec, char *prop_name) { struct device_node *dn; struct pci_dn *pdn; - const u32 *req_msi; + const __be32 *p; + u32 req_msi; pdn = pci_get_pdn(pdev); if (!pdn) @@ -138,19 +139,20 @@ static int check_req(struct pci_dev *pdev, int nvec, char *prop_name) dn = pdn->node; - req_msi = of_get_property(dn, prop_name, NULL); - if (!req_msi) { + p = of_get_property(dn, prop_name, NULL); + if (!p) { pr_debug("rtas_msi: No %s on %s\n", prop_name, dn->full_name); return -ENOENT; } - if (*req_msi < nvec) { + req_msi = be32_to_cpup(p); + if (req_msi < nvec) { pr_debug("rtas_msi: %s requests < %d MSIs\n", prop_name, nvec); - if (*req_msi == 0) /* Be paranoid */ + if (req_msi == 0) /* Be paranoid */ return -ENOSPC; - return *req_msi; + return req_msi; } return 0; @@ -171,7 +173,7 @@ static int check_req_msix(struct pci_dev *pdev, int nvec) static struct device_node *find_pe_total_msi(struct pci_dev *dev, int *total) { struct device_node *dn; - const u32 *p; + const __be32 *p; dn = of_node_get(pci_device_to_OF_node(dev)); while (dn) { @@ -179,7 +181,7 @@ static struct device_node *find_pe_total_msi(struct pci_dev *dev, int *total) if (p) { pr_debug("rtas_msi: found prop on dn %s\n", dn->full_name); - *total = *p; + *total = be32_to_cpup(p); return dn; } @@ -232,13 +234,13 @@ struct msi_counts { static void *count_non_bridge_devices(struct device_node *dn, void *data) { struct msi_counts *counts = data; - const u32 *p; + const __be32 *p; u32 class; pr_debug("rtas_msi: counting %s\n", dn->full_name); p = of_get_property(dn, "class-code", NULL); - class = p ? *p : 0; + class = p ? be32_to_cpup(p) : 0; if ((class >> 8) != PCI_CLASS_BRIDGE_PCI) counts->num_devices++; @@ -249,7 +251,7 @@ static void *count_non_bridge_devices(struct device_node *dn, void *data) static void *count_spare_msis(struct device_node *dn, void *data) { struct msi_counts *counts = data; - const u32 *p; + const __be32 *p; int req; if (dn == counts->requestor) @@ -260,11 +262,11 @@ static void *count_spare_msis(struct device_node *dn, void *data) req = 0; p = of_get_property(dn, "ibm,req#msi", NULL); if (p) - req = *p; + req = be32_to_cpup(p); p = of_get_property(dn, "ibm,req#msi-x", NULL); if (p) - req = max(req, (int)*p); + req = max(req, (int)be32_to_cpup(p)); } if (req < counts->quota) diff --git a/arch/powerpc/platforms/pseries/nvram.c b/arch/powerpc/platforms/pseries/nvram.c index 7bfaf58..d7096f2 100644 --- a/arch/powerpc/platforms/pseries/nvram.c +++ b/arch/powerpc/platforms/pseries/nvram.c @@ -43,8 +43,8 @@ static char nvram_buf[NVRW_CNT]; /* assume this is in the first 4GB */ static DEFINE_SPINLOCK(nvram_lock); struct err_log_info { - int error_type; - unsigned int seq_num; + __be32 error_type; + __be32 seq_num; }; struct nvram_os_partition { @@ -79,9 +79,9 @@ static const char *pseries_nvram_os_partitions[] = { }; struct oops_log_info { - u16 version; - u16 report_length; - u64 timestamp; + __be16 version; + __be16 report_length; + __be64 timestamp; } __attribute__((packed)); static void oops_to_nvram(struct kmsg_dumper *dumper, @@ -291,8 +291,8 @@ int nvram_write_os_partition(struct nvram_os_partition *part, char * buff, length = part->size; } - info.error_type = err_type; - info.seq_num = error_log_cnt; + info.error_type = cpu_to_be32(err_type); + info.seq_num = cpu_to_be32(error_log_cnt); tmp_index = part->index; @@ -364,8 +364,8 @@ int nvram_read_partition(struct nvram_os_partition *part, char *buff, } if (part->os_partition) { - *error_log_cnt = info.seq_num; - *err_type = info.error_type; + *error_log_cnt = be32_to_cpu(info.seq_num); + *err_type = be32_to_cpu(info.error_type); } return 0; @@ -529,9 +529,9 @@ static int zip_oops(size_t text_len) pr_err("nvram: logging uncompressed oops/panic report\n"); return -1; } - oops_hdr->version = OOPS_HDR_VERSION; - oops_hdr->report_length = (u16) zipped_len; - oops_hdr->timestamp = get_seconds(); + oops_hdr->version = cpu_to_be16(OOPS_HDR_VERSION); + oops_hdr->report_length = cpu_to_be16(zipped_len); + oops_hdr->timestamp = cpu_to_be64(get_seconds()); return 0; } @@ -574,9 +574,9 @@ static int nvram_pstore_write(enum pstore_type_id type, clobbering_unread_rtas_event()) return -1; - oops_hdr->version = OOPS_HDR_VERSION; - oops_hdr->report_length = (u16) size; - oops_hdr->timestamp = get_seconds(); + oops_hdr->version = cpu_to_be16(OOPS_HDR_VERSION); + oops_hdr->report_length = cpu_to_be16(size); + oops_hdr->timestamp = cpu_to_be64(get_seconds()); if (compressed) err_type = ERR_TYPE_KERNEL_PANIC_GZ; @@ -670,16 +670,16 @@ static ssize_t nvram_pstore_read(u64 *id, enum pstore_type_id *type, size_t length, hdr_size; oops_hdr = (struct oops_log_info *)buff; - if (oops_hdr->version < OOPS_HDR_VERSION) { + if (be16_to_cpu(oops_hdr->version) < OOPS_HDR_VERSION) { /* Old format oops header had 2-byte record size */ hdr_size = sizeof(u16); - length = oops_hdr->version; + length = be16_to_cpu(oops_hdr->version); time->tv_sec = 0; time->tv_nsec = 0; } else { hdr_size = sizeof(*oops_hdr); - length = oops_hdr->report_length; - time->tv_sec = oops_hdr->timestamp; + length = be16_to_cpu(oops_hdr->report_length); + time->tv_sec = be64_to_cpu(oops_hdr->timestamp); time->tv_nsec = 0; } *buf = kmalloc(length, GFP_KERNEL); @@ -889,13 +889,13 @@ static void oops_to_nvram(struct kmsg_dumper *dumper, kmsg_dump_get_buffer(dumper, false, oops_data, oops_data_sz, &text_len); err_type = ERR_TYPE_KERNEL_PANIC; - oops_hdr->version = OOPS_HDR_VERSION; - oops_hdr->report_length = (u16) text_len; - oops_hdr->timestamp = get_seconds(); + oops_hdr->version = cpu_to_be16(OOPS_HDR_VERSION); + oops_hdr->report_length = cpu_to_be16(text_len); + oops_hdr->timestamp = cpu_to_be64(get_seconds()); } (void) nvram_write_os_partition(&oops_log_partition, oops_buf, - (int) (sizeof(*oops_hdr) + oops_hdr->report_length), err_type, + (int) (sizeof(*oops_hdr) + text_len), err_type, ++oops_count); spin_unlock_irqrestore(&lock, flags); diff --git a/arch/powerpc/platforms/pseries/pci.c b/arch/powerpc/platforms/pseries/pci.c index 5f93856..70670a2 100644 --- a/arch/powerpc/platforms/pseries/pci.c +++ b/arch/powerpc/platforms/pseries/pci.c @@ -113,7 +113,7 @@ int pseries_root_bridge_prepare(struct pci_host_bridge *bridge) { struct device_node *dn, *pdn; struct pci_bus *bus; - const uint32_t *pcie_link_speed_stats; + const __be32 *pcie_link_speed_stats; bus = bridge->bus; @@ -122,7 +122,7 @@ int pseries_root_bridge_prepare(struct pci_host_bridge *bridge) return 0; for (pdn = dn; pdn != NULL; pdn = of_get_next_parent(pdn)) { - pcie_link_speed_stats = (const uint32_t *) of_get_property(pdn, + pcie_link_speed_stats = of_get_property(pdn, "ibm,pcie-link-speed-stats", NULL); if (pcie_link_speed_stats) break; @@ -135,7 +135,7 @@ int pseries_root_bridge_prepare(struct pci_host_bridge *bridge) return 0; } - switch (pcie_link_speed_stats[0]) { + switch (be32_to_cpup(pcie_link_speed_stats)) { case 0x01: bus->max_bus_speed = PCIE_SPEED_2_5GT; break; @@ -147,7 +147,7 @@ int pseries_root_bridge_prepare(struct pci_host_bridge *bridge) break; } - switch (pcie_link_speed_stats[1]) { + switch (be32_to_cpup(pcie_link_speed_stats)) { case 0x01: bus->cur_bus_speed = PCIE_SPEED_2_5GT; break; diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c index c1f1908..6f76ae4 100644 --- a/arch/powerpc/platforms/pseries/setup.c +++ b/arch/powerpc/platforms/pseries/setup.c @@ -470,7 +470,7 @@ static long pseries_little_endian_exceptions(void) static void __init pSeries_setup_arch(void) { - panic_timeout = 10; + set_arch_panic_timeout(10, ARCH_PANIC_TIMEOUT); /* Discover PIC type and setup ppc_md accordingly */ pseries_discover_pic(); diff --git a/arch/powerpc/sysdev/ppc4xx_ocm.c b/arch/powerpc/sysdev/ppc4xx_ocm.c index b7c4345..85d9e37 100644 --- a/arch/powerpc/sysdev/ppc4xx_ocm.c +++ b/arch/powerpc/sysdev/ppc4xx_ocm.c @@ -339,7 +339,7 @@ void *ppc4xx_ocm_alloc(phys_addr_t *phys, int size, int align, if (IS_ERR_VALUE(offset)) continue; - ocm_blk = kzalloc(sizeof(struct ocm_block *), GFP_KERNEL); + ocm_blk = kzalloc(sizeof(struct ocm_block), GFP_KERNEL); if (!ocm_blk) { printk(KERN_ERR "PPC4XX OCM: could not allocate ocm block"); rh_free(ocm_reg->rh, offset); diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 314fced..e9f3125 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -101,7 +101,7 @@ config S390 select GENERIC_CPU_DEVICES if !SMP select GENERIC_FIND_FIRST_BIT select GENERIC_SMP_IDLE_THREAD - select GENERIC_TIME_VSYSCALL_OLD + select GENERIC_TIME_VSYSCALL select HAVE_ALIGNED_STRUCT_PAGE if SLUB select HAVE_ARCH_JUMP_LABEL if !MARCH_G5 select HAVE_ARCH_SECCOMP_FILTER @@ -135,7 +135,6 @@ config S390 select HAVE_SYSCALL_TRACEPOINTS select HAVE_UID16 if 32BIT select HAVE_VIRT_CPU_ACCOUNTING - select INIT_ALL_POSSIBLE select KTIME_SCALAR if 32BIT select MODULES_USE_ELF_RELA select OLD_SIGACTION @@ -347,14 +346,14 @@ config SMP Even if you don't know what to do here, say Y. config NR_CPUS - int "Maximum number of CPUs (2-64)" - range 2 64 + int "Maximum number of CPUs (2-256)" + range 2 256 depends on SMP default "32" if !64BIT default "64" if 64BIT help This allows you to specify the maximum number of CPUs which this - kernel will support. The maximum supported value is 64 and the + kernel will support. The maximum supported value is 256 and the minimum value which makes sense is 2. This is purely to save memory - each supported CPU adds diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c index 46cae13..b3feabd 100644 --- a/arch/s390/crypto/aes_s390.c +++ b/arch/s390/crypto/aes_s390.c @@ -35,7 +35,6 @@ static u8 *ctrblk; static char keylen_flag; struct s390_aes_ctx { - u8 iv[AES_BLOCK_SIZE]; u8 key[AES_MAX_KEY_SIZE]; long enc; long dec; @@ -56,8 +55,7 @@ struct pcc_param { struct s390_xts_ctx { u8 key[32]; - u8 xts_param[16]; - struct pcc_param pcc; + u8 pcc_key[32]; long enc; long dec; int key_len; @@ -441,30 +439,36 @@ static int cbc_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, return aes_set_key(tfm, in_key, key_len); } -static int cbc_aes_crypt(struct blkcipher_desc *desc, long func, void *param, +static int cbc_aes_crypt(struct blkcipher_desc *desc, long func, struct blkcipher_walk *walk) { + struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); int ret = blkcipher_walk_virt(desc, walk); unsigned int nbytes = walk->nbytes; + struct { + u8 iv[AES_BLOCK_SIZE]; + u8 key[AES_MAX_KEY_SIZE]; + } param; if (!nbytes) goto out; - memcpy(param, walk->iv, AES_BLOCK_SIZE); + memcpy(param.iv, walk->iv, AES_BLOCK_SIZE); + memcpy(param.key, sctx->key, sctx->key_len); do { /* only use complete blocks */ unsigned int n = nbytes & ~(AES_BLOCK_SIZE - 1); u8 *out = walk->dst.virt.addr; u8 *in = walk->src.virt.addr; - ret = crypt_s390_kmc(func, param, out, in, n); + ret = crypt_s390_kmc(func, ¶m, out, in, n); if (ret < 0 || ret != n) return -EIO; nbytes &= AES_BLOCK_SIZE - 1; ret = blkcipher_walk_done(desc, walk, nbytes); } while ((nbytes = walk->nbytes)); - memcpy(walk->iv, param, AES_BLOCK_SIZE); + memcpy(walk->iv, param.iv, AES_BLOCK_SIZE); out: return ret; @@ -481,7 +485,7 @@ static int cbc_aes_encrypt(struct blkcipher_desc *desc, return fallback_blk_enc(desc, dst, src, nbytes); blkcipher_walk_init(&walk, dst, src, nbytes); - return cbc_aes_crypt(desc, sctx->enc, sctx->iv, &walk); + return cbc_aes_crypt(desc, sctx->enc, &walk); } static int cbc_aes_decrypt(struct blkcipher_desc *desc, @@ -495,7 +499,7 @@ static int cbc_aes_decrypt(struct blkcipher_desc *desc, return fallback_blk_dec(desc, dst, src, nbytes); blkcipher_walk_init(&walk, dst, src, nbytes); - return cbc_aes_crypt(desc, sctx->dec, sctx->iv, &walk); + return cbc_aes_crypt(desc, sctx->dec, &walk); } static struct crypto_alg cbc_aes_alg = { @@ -586,7 +590,7 @@ static int xts_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, xts_ctx->enc = KM_XTS_128_ENCRYPT; xts_ctx->dec = KM_XTS_128_DECRYPT; memcpy(xts_ctx->key + 16, in_key, 16); - memcpy(xts_ctx->pcc.key + 16, in_key + 16, 16); + memcpy(xts_ctx->pcc_key + 16, in_key + 16, 16); break; case 48: xts_ctx->enc = 0; @@ -597,7 +601,7 @@ static int xts_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, xts_ctx->enc = KM_XTS_256_ENCRYPT; xts_ctx->dec = KM_XTS_256_DECRYPT; memcpy(xts_ctx->key, in_key, 32); - memcpy(xts_ctx->pcc.key, in_key + 32, 32); + memcpy(xts_ctx->pcc_key, in_key + 32, 32); break; default: *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; @@ -616,29 +620,33 @@ static int xts_aes_crypt(struct blkcipher_desc *desc, long func, unsigned int nbytes = walk->nbytes; unsigned int n; u8 *in, *out; - void *param; + struct pcc_param pcc_param; + struct { + u8 key[32]; + u8 init[16]; + } xts_param; if (!nbytes) goto out; - memset(xts_ctx->pcc.block, 0, sizeof(xts_ctx->pcc.block)); - memset(xts_ctx->pcc.bit, 0, sizeof(xts_ctx->pcc.bit)); - memset(xts_ctx->pcc.xts, 0, sizeof(xts_ctx->pcc.xts)); - memcpy(xts_ctx->pcc.tweak, walk->iv, sizeof(xts_ctx->pcc.tweak)); - param = xts_ctx->pcc.key + offset; - ret = crypt_s390_pcc(func, param); + memset(pcc_param.block, 0, sizeof(pcc_param.block)); + memset(pcc_param.bit, 0, sizeof(pcc_param.bit)); + memset(pcc_param.xts, 0, sizeof(pcc_param.xts)); + memcpy(pcc_param.tweak, walk->iv, sizeof(pcc_param.tweak)); + memcpy(pcc_param.key, xts_ctx->pcc_key, 32); + ret = crypt_s390_pcc(func, &pcc_param.key[offset]); if (ret < 0) return -EIO; - memcpy(xts_ctx->xts_param, xts_ctx->pcc.xts, 16); - param = xts_ctx->key + offset; + memcpy(xts_param.key, xts_ctx->key, 32); + memcpy(xts_param.init, pcc_param.xts, 16); do { /* only use complete blocks */ n = nbytes & ~(AES_BLOCK_SIZE - 1); out = walk->dst.virt.addr; in = walk->src.virt.addr; - ret = crypt_s390_km(func, param, out, in, n); + ret = crypt_s390_km(func, &xts_param.key[offset], out, in, n); if (ret < 0 || ret != n) return -EIO; diff --git a/arch/s390/include/asm/barrier.h b/arch/s390/include/asm/barrier.h index 16760ee..578680f 100644 --- a/arch/s390/include/asm/barrier.h +++ b/arch/s390/include/asm/barrier.h @@ -32,4 +32,19 @@ #define set_mb(var, value) do { var = value; mb(); } while (0) +#define smp_store_release(p, v) \ +do { \ + compiletime_assert_atomic_type(*p); \ + barrier(); \ + ACCESS_ONCE(*p) = (v); \ +} while (0) + +#define smp_load_acquire(p) \ +({ \ + typeof(*p) ___p1 = ACCESS_ONCE(*p); \ + compiletime_assert_atomic_type(*p); \ + barrier(); \ + ___p1; \ +}) + #endif /* __ASM_BARRIER_H */ diff --git a/arch/s390/include/asm/compat.h b/arch/s390/include/asm/compat.h index 4bf9da0..5d7e8cf 100644 --- a/arch/s390/include/asm/compat.h +++ b/arch/s390/include/asm/compat.h @@ -38,7 +38,8 @@ #define PSW32_USER_BITS (PSW32_MASK_DAT | PSW32_MASK_IO | PSW32_MASK_EXT | \ PSW32_DEFAULT_KEY | PSW32_MASK_BASE | \ - PSW32_MASK_MCHECK | PSW32_MASK_PSTATE | PSW32_ASC_HOME) + PSW32_MASK_MCHECK | PSW32_MASK_PSTATE | \ + PSW32_ASC_PRIMARY) #define COMPAT_USER_HZ 100 #define COMPAT_UTS_MACHINE "s390\0\0\0\0" diff --git a/arch/s390/include/asm/cpu_mf.h b/arch/s390/include/asm/cpu_mf.h index c879fad..cb700d5 100644 --- a/arch/s390/include/asm/cpu_mf.h +++ b/arch/s390/include/asm/cpu_mf.h @@ -56,6 +56,96 @@ struct cpumf_ctr_info { u32 reserved2[12]; } __packed; +/* QUERY SAMPLING INFORMATION block */ +struct hws_qsi_info_block { /* Bit(s) */ + unsigned int b0_13:14; /* 0-13: zeros */ + unsigned int as:1; /* 14: basic-sampling authorization */ + unsigned int ad:1; /* 15: diag-sampling authorization */ + unsigned int b16_21:6; /* 16-21: zeros */ + unsigned int es:1; /* 22: basic-sampling enable control */ + unsigned int ed:1; /* 23: diag-sampling enable control */ + unsigned int b24_29:6; /* 24-29: zeros */ + unsigned int cs:1; /* 30: basic-sampling activation control */ + unsigned int cd:1; /* 31: diag-sampling activation control */ + unsigned int bsdes:16; /* 4-5: size of basic sampling entry */ + unsigned int dsdes:16; /* 6-7: size of diagnostic sampling entry */ + unsigned long min_sampl_rate; /* 8-15: minimum sampling interval */ + unsigned long max_sampl_rate; /* 16-23: maximum sampling interval*/ + unsigned long tear; /* 24-31: TEAR contents */ + unsigned long dear; /* 32-39: DEAR contents */ + unsigned int rsvrd0; /* 40-43: reserved */ + unsigned int cpu_speed; /* 44-47: CPU speed */ + unsigned long long rsvrd1; /* 48-55: reserved */ + unsigned long long rsvrd2; /* 56-63: reserved */ +} __packed; + +/* SET SAMPLING CONTROLS request block */ +struct hws_lsctl_request_block { + unsigned int s:1; /* 0: maximum buffer indicator */ + unsigned int h:1; /* 1: part. level reserved for VM use*/ + unsigned long long b2_53:52;/* 2-53: zeros */ + unsigned int es:1; /* 54: basic-sampling enable control */ + unsigned int ed:1; /* 55: diag-sampling enable control */ + unsigned int b56_61:6; /* 56-61: - zeros */ + unsigned int cs:1; /* 62: basic-sampling activation control */ + unsigned int cd:1; /* 63: diag-sampling activation control */ + unsigned long interval; /* 8-15: sampling interval */ + unsigned long tear; /* 16-23: TEAR contents */ + unsigned long dear; /* 24-31: DEAR contents */ + /* 32-63: */ + unsigned long rsvrd1; /* reserved */ + unsigned long rsvrd2; /* reserved */ + unsigned long rsvrd3; /* reserved */ + unsigned long rsvrd4; /* reserved */ +} __packed; + +struct hws_basic_entry { + unsigned int def:16; /* 0-15 Data Entry Format */ + unsigned int R:4; /* 16-19 reserved */ + unsigned int U:4; /* 20-23 Number of unique instruct. */ + unsigned int z:2; /* zeros */ + unsigned int T:1; /* 26 PSW DAT mode */ + unsigned int W:1; /* 27 PSW wait state */ + unsigned int P:1; /* 28 PSW Problem state */ + unsigned int AS:2; /* 29-30 PSW address-space control */ + unsigned int I:1; /* 31 entry valid or invalid */ + unsigned int:16; + unsigned int prim_asn:16; /* primary ASN */ + unsigned long long ia; /* Instruction Address */ + unsigned long long gpp; /* Guest Program Parameter */ + unsigned long long hpp; /* Host Program Parameter */ +} __packed; + +struct hws_diag_entry { + unsigned int def:16; /* 0-15 Data Entry Format */ + unsigned int R:14; /* 16-19 and 20-30 reserved */ + unsigned int I:1; /* 31 entry valid or invalid */ + u8 data[]; /* Machine-dependent sample data */ +} __packed; + +struct hws_combined_entry { + struct hws_basic_entry basic; /* Basic-sampling data entry */ + struct hws_diag_entry diag; /* Diagnostic-sampling data entry */ +} __packed; + +struct hws_trailer_entry { + union { + struct { + unsigned int f:1; /* 0 - Block Full Indicator */ + unsigned int a:1; /* 1 - Alert request control */ + unsigned int t:1; /* 2 - Timestamp format */ + unsigned long long:61; /* 3 - 63: Reserved */ + }; + unsigned long long flags; /* 0 - 63: All indicators */ + }; + unsigned long long overflow; /* 64 - sample Overflow count */ + unsigned char timestamp[16]; /* 16 - 31 timestamp */ + unsigned long long reserved1; /* 32 -Reserved */ + unsigned long long reserved2; /* */ + unsigned long long progusage1; /* 48 - reserved for programming use */ + unsigned long long progusage2; /* */ +} __packed; + /* Query counter information */ static inline int qctri(struct cpumf_ctr_info *info) { @@ -99,4 +189,95 @@ static inline int ecctr(u64 ctr, u64 *val) return cc; } +/* Query sampling information */ +static inline int qsi(struct hws_qsi_info_block *info) +{ + int cc; + cc = 1; + + asm volatile( + "0: .insn s,0xb2860000,0(%1)\n" + "1: lhi %0,0\n" + "2:\n" + EX_TABLE(0b, 2b) EX_TABLE(1b, 2b) + : "=d" (cc), "+a" (info) + : "m" (*info) + : "cc", "memory"); + + return cc ? -EINVAL : 0; +} + +/* Load sampling controls */ +static inline int lsctl(struct hws_lsctl_request_block *req) +{ + int cc; + + cc = 1; + asm volatile( + "0: .insn s,0xb2870000,0(%1)\n" + "1: ipm %0\n" + " srl %0,28\n" + "2:\n" + EX_TABLE(0b, 2b) EX_TABLE(1b, 2b) + : "+d" (cc), "+a" (req) + : "m" (*req) + : "cc", "memory"); + + return cc ? -EINVAL : 0; +} + +/* Sampling control helper functions */ + +#include <linux/time.h> + +static inline unsigned long freq_to_sample_rate(struct hws_qsi_info_block *qsi, + unsigned long freq) +{ + return (USEC_PER_SEC / freq) * qsi->cpu_speed; +} + +static inline unsigned long sample_rate_to_freq(struct hws_qsi_info_block *qsi, + unsigned long rate) +{ + return USEC_PER_SEC * qsi->cpu_speed / rate; +} + +#define SDB_TE_ALERT_REQ_MASK 0x4000000000000000UL +#define SDB_TE_BUFFER_FULL_MASK 0x8000000000000000UL + +/* Return TOD timestamp contained in an trailer entry */ +static inline unsigned long long trailer_timestamp(struct hws_trailer_entry *te) +{ + /* TOD in STCKE format */ + if (te->t) + return *((unsigned long long *) &te->timestamp[1]); + + /* TOD in STCK format */ + return *((unsigned long long *) &te->timestamp[0]); +} + +/* Return pointer to trailer entry of an sample data block */ +static inline unsigned long *trailer_entry_ptr(unsigned long v) +{ + void *ret; + + ret = (void *) v; + ret += PAGE_SIZE; + ret -= sizeof(struct hws_trailer_entry); + + return (unsigned long *) ret; +} + +/* Return if the entry in the sample data block table (sdbt) + * is a link to the next sdbt */ +static inline int is_link_entry(unsigned long *s) +{ + return *s & 0x1ul ? 1 : 0; +} + +/* Return pointer to the linked sdbt */ +static inline unsigned long *get_next_sdbt(unsigned long *s) +{ + return (unsigned long *) (*s & ~0x1ul); +} #endif /* _ASM_S390_CPU_MF_H */ diff --git a/arch/s390/include/asm/css_chars.h b/arch/s390/include/asm/css_chars.h index 7e1c917..09d1dd4 100644 --- a/arch/s390/include/asm/css_chars.h +++ b/arch/s390/include/asm/css_chars.h @@ -29,6 +29,8 @@ struct css_general_char { u32 fcx : 1; /* bit 88 */ u32 : 19; u32 alt_ssi : 1; /* bit 108 */ + u32:1; + u32 narf:1; /* bit 110 */ } __packed; extern struct css_general_char css_general_characteristics; diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h index 316c850..114258e 100644 --- a/arch/s390/include/asm/page.h +++ b/arch/s390/include/asm/page.h @@ -48,33 +48,21 @@ static inline void clear_page(void *page) : "memory", "cc"); } +/* + * copy_page uses the mvcl instruction with 0xb0 padding byte in order to + * bypass caches when copying a page. Especially when copying huge pages + * this keeps L1 and L2 data caches alive. + */ static inline void copy_page(void *to, void *from) { - if (MACHINE_HAS_MVPG) { - register unsigned long reg0 asm ("0") = 0; - asm volatile( - " mvpg %0,%1" - : : "a" (to), "a" (from), "d" (reg0) - : "memory", "cc"); - } else - asm volatile( - " mvc 0(256,%0),0(%1)\n" - " mvc 256(256,%0),256(%1)\n" - " mvc 512(256,%0),512(%1)\n" - " mvc 768(256,%0),768(%1)\n" - " mvc 1024(256,%0),1024(%1)\n" - " mvc 1280(256,%0),1280(%1)\n" - " mvc 1536(256,%0),1536(%1)\n" - " mvc 1792(256,%0),1792(%1)\n" - " mvc 2048(256,%0),2048(%1)\n" - " mvc 2304(256,%0),2304(%1)\n" - " mvc 2560(256,%0),2560(%1)\n" - " mvc 2816(256,%0),2816(%1)\n" - " mvc 3072(256,%0),3072(%1)\n" - " mvc 3328(256,%0),3328(%1)\n" - " mvc 3584(256,%0),3584(%1)\n" - " mvc 3840(256,%0),3840(%1)\n" - : : "a" (to), "a" (from) : "memory"); + register void *reg2 asm ("2") = to; + register unsigned long reg3 asm ("3") = 0x1000; + register void *reg4 asm ("4") = from; + register unsigned long reg5 asm ("5") = 0xb0001000; + asm volatile( + " mvcl 2,4" + : "+d" (reg2), "+d" (reg3), "+d" (reg4), "+d" (reg5) + : : "memory", "cc"); } #define clear_user_page(page, vaddr, pg) clear_page(page) diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h index c129ab2..2583466 100644 --- a/arch/s390/include/asm/pci.h +++ b/arch/s390/include/asm/pci.h @@ -144,6 +144,7 @@ int clp_disable_fh(struct zpci_dev *); void zpci_event_error(void *); void zpci_event_availability(void *); void zpci_rescan(void); +bool zpci_is_enabled(void); #else /* CONFIG_PCI */ static inline void zpci_event_error(void *e) {} static inline void zpci_event_availability(void *e) {} diff --git a/arch/s390/include/asm/perf_event.h b/arch/s390/include/asm/perf_event.h index 1141fb3..159a8ec 100644 --- a/arch/s390/include/asm/perf_event.h +++ b/arch/s390/include/asm/perf_event.h @@ -1,21 +1,40 @@ /* * Performance event support - s390 specific definitions. * - * Copyright IBM Corp. 2009, 2012 + * Copyright IBM Corp. 2009, 2013 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> * Hendrik Brueckner <brueckner@linux.vnet.ibm.com> */ -#include <asm/cpu_mf.h> +#ifndef _ASM_S390_PERF_EVENT_H +#define _ASM_S390_PERF_EVENT_H -/* CPU-measurement counter facility */ -#define PERF_CPUM_CF_MAX_CTR 256 +#ifdef CONFIG_64BIT + +#include <linux/perf_event.h> +#include <linux/device.h> +#include <asm/cpu_mf.h> /* Per-CPU flags for PMU states */ #define PMU_F_RESERVED 0x1000 #define PMU_F_ENABLED 0x2000 +#define PMU_F_IN_USE 0x4000 +#define PMU_F_ERR_IBE 0x0100 +#define PMU_F_ERR_LSDA 0x0200 +#define PMU_F_ERR_MASK (PMU_F_ERR_IBE|PMU_F_ERR_LSDA) + +/* Perf defintions for PMU event attributes in sysfs */ +extern __init const struct attribute_group **cpumf_cf_event_group(void); +extern ssize_t cpumf_events_sysfs_show(struct device *dev, + struct device_attribute *attr, + char *page); +#define EVENT_VAR(_cat, _name) event_attr_##_cat##_##_name +#define EVENT_PTR(_cat, _name) (&EVENT_VAR(_cat, _name).attr.attr) + +#define CPUMF_EVENT_ATTR(cat, name, id) \ + PMU_EVENT_ATTR(name, EVENT_VAR(cat, name), id, cpumf_events_sysfs_show) +#define CPUMF_EVENT_PTR(cat, name) EVENT_PTR(cat, name) -#ifdef CONFIG_64BIT /* Perf callbacks */ struct pt_regs; @@ -23,4 +42,55 @@ extern unsigned long perf_instruction_pointer(struct pt_regs *regs); extern unsigned long perf_misc_flags(struct pt_regs *regs); #define perf_misc_flags(regs) perf_misc_flags(regs) +/* Perf pt_regs extension for sample-data-entry indicators */ +struct perf_sf_sde_regs { + unsigned char in_guest:1; /* guest sample */ + unsigned long reserved:63; /* reserved */ +}; + +/* Perf PMU definitions for the counter facility */ +#define PERF_CPUM_CF_MAX_CTR 256 + +/* Perf PMU definitions for the sampling facility */ +#define PERF_CPUM_SF_MAX_CTR 2 +#define PERF_EVENT_CPUM_SF 0xB0000UL /* Event: Basic-sampling */ +#define PERF_EVENT_CPUM_SF_DIAG 0xBD000UL /* Event: Combined-sampling */ +#define PERF_CPUM_SF_BASIC_MODE 0x0001 /* Basic-sampling flag */ +#define PERF_CPUM_SF_DIAG_MODE 0x0002 /* Diagnostic-sampling flag */ +#define PERF_CPUM_SF_MODE_MASK (PERF_CPUM_SF_BASIC_MODE| \ + PERF_CPUM_SF_DIAG_MODE) +#define PERF_CPUM_SF_FULL_BLOCKS 0x0004 /* Process full SDBs only */ + +#define REG_NONE 0 +#define REG_OVERFLOW 1 +#define OVERFLOW_REG(hwc) ((hwc)->extra_reg.config) +#define SFB_ALLOC_REG(hwc) ((hwc)->extra_reg.alloc) +#define RAWSAMPLE_REG(hwc) ((hwc)->config) +#define TEAR_REG(hwc) ((hwc)->last_tag) +#define SAMPL_RATE(hwc) ((hwc)->event_base) +#define SAMPL_FLAGS(hwc) ((hwc)->config_base) +#define SAMPL_DIAG_MODE(hwc) (SAMPL_FLAGS(hwc) & PERF_CPUM_SF_DIAG_MODE) +#define SDB_FULL_BLOCKS(hwc) (SAMPL_FLAGS(hwc) & PERF_CPUM_SF_FULL_BLOCKS) + +/* Structure for sampling data entries to be passed as perf raw sample data + * to user space. Note that raw sample data must be aligned and, thus, might + * be padded with zeros. + */ +struct sf_raw_sample { +#define SF_RAW_SAMPLE_BASIC PERF_CPUM_SF_BASIC_MODE +#define SF_RAW_SAMPLE_DIAG PERF_CPUM_SF_DIAG_MODE + u64 format; + u32 size; /* Size of sf_raw_sample */ + u16 bsdes; /* Basic-sampling data entry size */ + u16 dsdes; /* Diagnostic-sampling data entry size */ + struct hws_basic_entry basic; /* Basic-sampling data entry */ + struct hws_diag_entry diag; /* Diagnostic-sampling data entry */ + u8 padding[]; /* Padding to next multiple of 8 */ +} __packed; + +/* Perf hardware reserve and release functions */ +int perf_reserve_sampling(void); +void perf_release_sampling(void); + #endif /* CONFIG_64BIT */ +#endif /* _ASM_S390_PERF_EVENT_H */ diff --git a/arch/s390/include/asm/qdio.h b/arch/s390/include/asm/qdio.h index 57d0d7e..d786c63 100644 --- a/arch/s390/include/asm/qdio.h +++ b/arch/s390/include/asm/qdio.h @@ -336,7 +336,7 @@ typedef void qdio_handler_t(struct ccw_device *, unsigned int, int, #define QDIO_FLAG_CLEANUP_USING_HALT 0x02 /** - * struct qdio_initialize - qdio initalization data + * struct qdio_initialize - qdio initialization data * @cdev: associated ccw device * @q_format: queue format * @adapter_name: name for the adapter @@ -378,6 +378,34 @@ struct qdio_initialize { struct qdio_outbuf_state *output_sbal_state_array; }; +/** + * enum qdio_brinfo_entry_type - type of address entry for qdio_brinfo_desc() + * @l3_ipv6_addr: entry contains IPv6 address + * @l3_ipv4_addr: entry contains IPv4 address + * @l2_addr_lnid: entry contains MAC address and VLAN ID + */ +enum qdio_brinfo_entry_type {l3_ipv6_addr, l3_ipv4_addr, l2_addr_lnid}; + +/** + * struct qdio_brinfo_entry_XXX - Address entry for qdio_brinfo_desc() + * @nit: Network interface token + * @addr: Address of one of the three types + * + * The struct is passed to the callback function by qdio_brinfo_desc() + */ +struct qdio_brinfo_entry_l3_ipv6 { + u64 nit; + struct { unsigned char _s6_addr[16]; } addr; +} __packed; +struct qdio_brinfo_entry_l3_ipv4 { + u64 nit; + struct { uint32_t _s_addr; } addr; +} __packed; +struct qdio_brinfo_entry_l2 { + u64 nit; + struct { u8 mac[6]; u16 lnid; } addr_lnid; +} __packed; + #define QDIO_STATE_INACTIVE 0x00000002 /* after qdio_cleanup */ #define QDIO_STATE_ESTABLISHED 0x00000004 /* after qdio_establish */ #define QDIO_STATE_ACTIVE 0x00000008 /* after qdio_activate */ @@ -399,5 +427,10 @@ extern int qdio_get_next_buffers(struct ccw_device *, int, int *, int *); extern int qdio_shutdown(struct ccw_device *, int); extern int qdio_free(struct ccw_device *); extern int qdio_get_ssqd_desc(struct ccw_device *, struct qdio_ssqd_desc *); +extern int qdio_pnso_brinfo(struct subchannel_id schid, + int cnc, u16 *response, + void (*cb)(void *priv, enum qdio_brinfo_entry_type type, + void *entry), + void *priv); #endif /* __QDIO_H__ */ diff --git a/arch/s390/include/asm/sclp.h b/arch/s390/include/asm/sclp.h index 30ef748..220e171 100644 --- a/arch/s390/include/asm/sclp.h +++ b/arch/s390/include/asm/sclp.h @@ -8,6 +8,7 @@ #include <linux/types.h> #include <asm/chpid.h> +#include <asm/cpu.h> #define SCLP_CHP_INFO_MASK_SIZE 32 @@ -37,7 +38,7 @@ struct sclp_cpu_info { unsigned int standby; unsigned int combined; int has_cpu_type; - struct sclp_cpu_entry cpu[255]; + struct sclp_cpu_entry cpu[MAX_CPU_ADDRESS + 1]; }; int sclp_get_cpu_info(struct sclp_cpu_info *info); @@ -51,8 +52,8 @@ int sclp_chp_configure(struct chp_id chpid); int sclp_chp_deconfigure(struct chp_id chpid); int sclp_chp_read_info(struct sclp_chp_info *info); void sclp_get_ipl_info(struct sclp_ipl_info *info); -bool sclp_has_linemode(void); -bool sclp_has_vt220(void); +bool __init sclp_has_linemode(void); +bool __init sclp_has_vt220(void); int sclp_pci_configure(u32 fid); int sclp_pci_deconfigure(u32 fid); int memcpy_hsa(void *dest, unsigned long src, size_t count, int mode); diff --git a/arch/s390/include/asm/smp.h b/arch/s390/include/asm/smp.h index ac9bed8..1607793 100644 --- a/arch/s390/include/asm/smp.h +++ b/arch/s390/include/asm/smp.h @@ -31,6 +31,7 @@ extern void smp_yield(void); extern void smp_stop_cpu(void); extern void smp_cpu_set_polarization(int cpu, int val); extern int smp_cpu_get_polarization(int cpu); +extern void smp_fill_possible_mask(void); #else /* CONFIG_SMP */ @@ -50,6 +51,7 @@ static inline int smp_vcpu_scheduled(int cpu) { return 1; } static inline void smp_yield_cpu(int cpu) { } static inline void smp_yield(void) { } static inline void smp_stop_cpu(void) { } +static inline void smp_fill_possible_mask(void) { } #endif /* CONFIG_SMP */ diff --git a/arch/s390/include/asm/vdso.h b/arch/s390/include/asm/vdso.h index a73eb2e..bc9746a 100644 --- a/arch/s390/include/asm/vdso.h +++ b/arch/s390/include/asm/vdso.h @@ -26,8 +26,9 @@ struct vdso_data { __u64 wtom_clock_nsec; /* 0x28 */ __u32 tz_minuteswest; /* Minutes west of Greenwich 0x30 */ __u32 tz_dsttime; /* Type of dst correction 0x34 */ - __u32 ectg_available; - __u32 ntp_mult; /* NTP adjusted multiplier 0x3C */ + __u32 ectg_available; /* ECTG instruction present 0x38 */ + __u32 tk_mult; /* Mult. used for xtime_nsec 0x3c */ + __u32 tk_shift; /* Shift used for xtime_nsec 0x40 */ }; struct vdso_per_cpu_data { diff --git a/arch/s390/include/uapi/asm/zcrypt.h b/arch/s390/include/uapi/asm/zcrypt.h index e83fc11..f2b18ea 100644 --- a/arch/s390/include/uapi/asm/zcrypt.h +++ b/arch/s390/include/uapi/asm/zcrypt.h @@ -154,6 +154,67 @@ struct ica_xcRB { unsigned short priority_window; unsigned int status; } __attribute__((packed)); + +/** + * struct ep11_cprb - EP11 connectivity programming request block + * @cprb_len: CPRB header length [0x0020] + * @cprb_ver_id: CPRB version id. [0x04] + * @pad_000: Alignment pad bytes + * @flags: Admin cmd [0x80] or functional cmd [0x00] + * @func_id: Function id / subtype [0x5434] + * @source_id: Source id [originator id] + * @target_id: Target id [usage/ctrl domain id] + * @ret_code: Return code + * @reserved1: Reserved + * @reserved2: Reserved + * @payload_len: Payload length + */ +struct ep11_cprb { + uint16_t cprb_len; + unsigned char cprb_ver_id; + unsigned char pad_000[2]; + unsigned char flags; + unsigned char func_id[2]; + uint32_t source_id; + uint32_t target_id; + uint32_t ret_code; + uint32_t reserved1; + uint32_t reserved2; + uint32_t payload_len; +} __attribute__((packed)); + +/** + * struct ep11_target_dev - EP11 target device list + * @ap_id: AP device id + * @dom_id: Usage domain id + */ +struct ep11_target_dev { + uint16_t ap_id; + uint16_t dom_id; +}; + +/** + * struct ep11_urb - EP11 user request block + * @targets_num: Number of target adapters + * @targets: Addr to target adapter list + * @weight: Level of request priority + * @req_no: Request id/number + * @req_len: Request length + * @req: Addr to request block + * @resp_len: Response length + * @resp: Addr to response block + */ +struct ep11_urb { + uint16_t targets_num; + uint64_t targets; + uint64_t weight; + uint64_t req_no; + uint64_t req_len; + uint64_t req; + uint64_t resp_len; + uint64_t resp; +} __attribute__((packed)); + #define AUTOSELECT ((unsigned int)0xFFFFFFFF) #define ZCRYPT_IOCTL_MAGIC 'z' @@ -183,6 +244,9 @@ struct ica_xcRB { * ZSECSENDCPRB * Send an arbitrary CPRB to a crypto card. * + * ZSENDEP11CPRB + * Send an arbitrary EP11 CPRB to an EP11 coprocessor crypto card. + * * Z90STAT_STATUS_MASK * Return an 64 element array of unsigned chars for the status of * all devices. @@ -256,6 +320,7 @@ struct ica_xcRB { #define ICARSAMODEXPO _IOC(_IOC_READ|_IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x05, 0) #define ICARSACRT _IOC(_IOC_READ|_IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x06, 0) #define ZSECSENDCPRB _IOC(_IOC_READ|_IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x81, 0) +#define ZSENDEP11CPRB _IOC(_IOC_READ|_IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x04, 0) /* New status calls */ #define Z90STAT_TOTALCOUNT _IOR(ZCRYPT_IOCTL_MAGIC, 0x40, int) diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile index 2403303..1b3ac09 100644 --- a/arch/s390/kernel/Makefile +++ b/arch/s390/kernel/Makefile @@ -60,7 +60,8 @@ obj-$(CONFIG_FTRACE_SYSCALLS) += ftrace.o obj-$(CONFIG_CRASH_DUMP) += crash_dump.o ifdef CONFIG_64BIT -obj-$(CONFIG_PERF_EVENTS) += perf_event.o perf_cpum_cf.o +obj-$(CONFIG_PERF_EVENTS) += perf_event.o perf_cpum_cf.o perf_cpum_sf.o \ + perf_cpum_cf_events.o obj-y += runtime_instr.o cache.o endif diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c index 2416138..e4c99a1 100644 --- a/arch/s390/kernel/asm-offsets.c +++ b/arch/s390/kernel/asm-offsets.c @@ -65,12 +65,14 @@ int main(void) DEFINE(__VDSO_WTOM_NSEC, offsetof(struct vdso_data, wtom_clock_nsec)); DEFINE(__VDSO_TIMEZONE, offsetof(struct vdso_data, tz_minuteswest)); DEFINE(__VDSO_ECTG_OK, offsetof(struct vdso_data, ectg_available)); - DEFINE(__VDSO_NTP_MULT, offsetof(struct vdso_data, ntp_mult)); + DEFINE(__VDSO_TK_MULT, offsetof(struct vdso_data, tk_mult)); + DEFINE(__VDSO_TK_SHIFT, offsetof(struct vdso_data, tk_shift)); DEFINE(__VDSO_ECTG_BASE, offsetof(struct vdso_per_cpu_data, ectg_timer_base)); DEFINE(__VDSO_ECTG_USER, offsetof(struct vdso_per_cpu_data, ectg_user_time)); /* constants used by the vdso */ DEFINE(__CLOCK_REALTIME, CLOCK_REALTIME); DEFINE(__CLOCK_MONOTONIC, CLOCK_MONOTONIC); + DEFINE(__CLOCK_THREAD_CPUTIME_ID, CLOCK_THREAD_CPUTIME_ID); DEFINE(__CLOCK_REALTIME_RES, MONOTONIC_RES_NSEC); BLANK(); /* idle data offsets */ diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c index 6e24429..8b84bc3 100644 --- a/arch/s390/kernel/compat_signal.c +++ b/arch/s390/kernel/compat_signal.c @@ -194,7 +194,7 @@ static int restore_sigregs32(struct pt_regs *regs,_sigregs32 __user *sregs) return -EINVAL; /* Use regs->psw.mask instead of PSW_USER_BITS to preserve PER bit. */ - regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) | + regs->psw.mask = (regs->psw.mask & ~(PSW_MASK_USER | PSW_MASK_RI)) | (__u64)(user_sregs.regs.psw.mask & PSW32_MASK_USER) << 32 | (__u64)(user_sregs.regs.psw.mask & PSW32_MASK_RI) << 32 | (__u64)(user_sregs.regs.psw.addr & PSW32_ADDR_AMODE); @@ -412,8 +412,9 @@ static int setup_rt_frame32(int sig, struct k_sigaction *ka, siginfo_t *info, regs->gprs[14] = (__u64 __force) ka->sa.sa_restorer | PSW32_ADDR_AMODE; } else { regs->gprs[14] = (__u64 __force) frame->retcode | PSW32_ADDR_AMODE; - err |= __put_user(S390_SYSCALL_OPCODE | __NR_rt_sigreturn, - (u16 __force __user *)(frame->retcode)); + if (__put_user(S390_SYSCALL_OPCODE | __NR_rt_sigreturn, + (u16 __force __user *)(frame->retcode))) + goto give_sigsegv; } /* Set up backchain. */ diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S index e5b43c9..384e609 100644 --- a/arch/s390/kernel/entry64.S +++ b/arch/s390/kernel/entry64.S @@ -74,7 +74,7 @@ _TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \ .endm .macro LPP newpp -#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE) +#if IS_ENABLED(CONFIG_KVM) tm __LC_MACHINE_FLAGS+6,0x20 # MACHINE_FLAG_LPP jz .+8 .insn s,0xb2800000,\newpp @@ -82,7 +82,7 @@ _TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \ .endm .macro HANDLE_SIE_INTERCEPT scratch,reason -#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE) +#if IS_ENABLED(CONFIG_KVM) tmhh %r8,0x0001 # interrupting from user ? jnz .+62 lgr \scratch,%r9 @@ -946,7 +946,7 @@ cleanup_idle_insn: .quad __critical_end - __critical_start -#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE) +#if IS_ENABLED(CONFIG_KVM) /* * sie64a calling convention: * %r2 pointer to sie control block @@ -975,7 +975,7 @@ sie_done: lctlg %c1,%c1,__LC_USER_ASCE # load primary asce # some program checks are suppressing. C code (e.g. do_protection_exception) # will rewind the PSW by the ILC, which is 4 bytes in case of SIE. Other -# instructions beween sie64a and sie_done should not cause program +# instructions between sie64a and sie_done should not cause program # interrupts. So lets use a nop (47 00 00 00) as a landing pad. # See also HANDLE_SIE_INTERCEPT rewind_pad: diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c index 1105502..f51214c 100644 --- a/arch/s390/kernel/perf_cpum_cf.c +++ b/arch/s390/kernel/perf_cpum_cf.c @@ -680,6 +680,7 @@ static int __init cpumf_pmu_init(void) goto out; } + cpumf_pmu.attr_groups = cpumf_cf_event_group(); rc = perf_pmu_register(&cpumf_pmu, "cpum_cf", PERF_TYPE_RAW); if (rc) { pr_err("Registering the cpum_cf PMU failed with rc=%i\n", rc); diff --git a/arch/s390/kernel/perf_cpum_cf_events.c b/arch/s390/kernel/perf_cpum_cf_events.c new file mode 100644 index 0000000..4554a4b --- /dev/null +++ b/arch/s390/kernel/perf_cpum_cf_events.c @@ -0,0 +1,322 @@ +/* + * Perf PMU sysfs events attributes for available CPU-measurement counters + * + */ + +#include <linux/slab.h> +#include <linux/perf_event.h> + + +/* BEGIN: CPUM_CF COUNTER DEFINITIONS =================================== */ + +CPUMF_EVENT_ATTR(cf, CPU_CYCLES, 0x0000); +CPUMF_EVENT_ATTR(cf, INSTRUCTIONS, 0x0001); +CPUMF_EVENT_ATTR(cf, L1I_DIR_WRITES, 0x0002); +CPUMF_EVENT_ATTR(cf, L1I_PENALTY_CYCLES, 0x0003); +CPUMF_EVENT_ATTR(cf, PROBLEM_STATE_CPU_CYCLES, 0x0020); +CPUMF_EVENT_ATTR(cf, PROBLEM_STATE_INSTRUCTIONS, 0x0021); +CPUMF_EVENT_ATTR(cf, PROBLEM_STATE_L1I_DIR_WRITES, 0x0022); +CPUMF_EVENT_ATTR(cf, PROBLEM_STATE_L1I_PENALTY_CYCLES, 0x0023); +CPUMF_EVENT_ATTR(cf, PROBLEM_STATE_L1D_DIR_WRITES, 0x0024); +CPUMF_EVENT_ATTR(cf, PROBLEM_STATE_L1D_PENALTY_CYCLES, 0x0025); +CPUMF_EVENT_ATTR(cf, L1D_DIR_WRITES, 0x0004); +CPUMF_EVENT_ATTR(cf, L1D_PENALTY_CYCLES, 0x0005); +CPUMF_EVENT_ATTR(cf, PRNG_FUNCTIONS, 0x0040); +CPUMF_EVENT_ATTR(cf, PRNG_CYCLES, 0x0041); +CPUMF_EVENT_ATTR(cf, PRNG_BLOCKED_FUNCTIONS, 0x0042); +CPUMF_EVENT_ATTR(cf, PRNG_BLOCKED_CYCLES, 0x0043); +CPUMF_EVENT_ATTR(cf, SHA_FUNCTIONS, 0x0044); +CPUMF_EVENT_ATTR(cf, SHA_CYCLES, 0x0045); +CPUMF_EVENT_ATTR(cf, SHA_BLOCKED_FUNCTIONS, 0x0046); +CPUMF_EVENT_ATTR(cf, SHA_BLOCKED_CYCLES, 0x0047); +CPUMF_EVENT_ATTR(cf, DEA_FUNCTIONS, 0x0048); +CPUMF_EVENT_ATTR(cf, DEA_CYCLES, 0x0049); +CPUMF_EVENT_ATTR(cf, DEA_BLOCKED_FUNCTIONS, 0x004a); +CPUMF_EVENT_ATTR(cf, DEA_BLOCKED_CYCLES, 0x004b); +CPUMF_EVENT_ATTR(cf, AES_FUNCTIONS, 0x004c); +CPUMF_EVENT_ATTR(cf, AES_CYCLES, 0x004d); +CPUMF_EVENT_ATTR(cf, AES_BLOCKED_FUNCTIONS, 0x004e); +CPUMF_EVENT_ATTR(cf, AES_BLOCKED_CYCLES, 0x004f); +CPUMF_EVENT_ATTR(cf_z10, L1I_L2_SOURCED_WRITES, 0x0080); +CPUMF_EVENT_ATTR(cf_z10, L1D_L2_SOURCED_WRITES, 0x0081); +CPUMF_EVENT_ATTR(cf_z10, L1I_L3_LOCAL_WRITES, 0x0082); +CPUMF_EVENT_ATTR(cf_z10, L1D_L3_LOCAL_WRITES, 0x0083); +CPUMF_EVENT_ATTR(cf_z10, L1I_L3_REMOTE_WRITES, 0x0084); +CPUMF_EVENT_ATTR(cf_z10, L1D_L3_REMOTE_WRITES, 0x0085); +CPUMF_EVENT_ATTR(cf_z10, L1D_LMEM_SOURCED_WRITES, 0x0086); +CPUMF_EVENT_ATTR(cf_z10, L1I_LMEM_SOURCED_WRITES, 0x0087); +CPUMF_EVENT_ATTR(cf_z10, L1D_RO_EXCL_WRITES, 0x0088); +CPUMF_EVENT_ATTR(cf_z10, L1I_CACHELINE_INVALIDATES, 0x0089); +CPUMF_EVENT_ATTR(cf_z10, ITLB1_WRITES, 0x008a); +CPUMF_EVENT_ATTR(cf_z10, DTLB1_WRITES, 0x008b); +CPUMF_EVENT_ATTR(cf_z10, TLB2_PTE_WRITES, 0x008c); +CPUMF_EVENT_ATTR(cf_z10, TLB2_CRSTE_WRITES, 0x008d); +CPUMF_EVENT_ATTR(cf_z10, TLB2_CRSTE_HPAGE_WRITES, 0x008e); +CPUMF_EVENT_ATTR(cf_z10, ITLB1_MISSES, 0x0091); +CPUMF_EVENT_ATTR(cf_z10, DTLB1_MISSES, 0x0092); +CPUMF_EVENT_ATTR(cf_z10, L2C_STORES_SENT, 0x0093); +CPUMF_EVENT_ATTR(cf_z196, L1D_L2_SOURCED_WRITES, 0x0080); +CPUMF_EVENT_ATTR(cf_z196, L1I_L2_SOURCED_WRITES, 0x0081); +CPUMF_EVENT_ATTR(cf_z196, DTLB1_MISSES, 0x0082); +CPUMF_EVENT_ATTR(cf_z196, ITLB1_MISSES, 0x0083); +CPUMF_EVENT_ATTR(cf_z196, L2C_STORES_SENT, 0x0085); +CPUMF_EVENT_ATTR(cf_z196, L1D_OFFBOOK_L3_SOURCED_WRITES, 0x0086); +CPUMF_EVENT_ATTR(cf_z196, L1D_ONBOOK_L4_SOURCED_WRITES, 0x0087); +CPUMF_EVENT_ATTR(cf_z196, L1I_ONBOOK_L4_SOURCED_WRITES, 0x0088); +CPUMF_EVENT_ATTR(cf_z196, L1D_RO_EXCL_WRITES, 0x0089); +CPUMF_EVENT_ATTR(cf_z196, L1D_OFFBOOK_L4_SOURCED_WRITES, 0x008a); +CPUMF_EVENT_ATTR(cf_z196, L1I_OFFBOOK_L4_SOURCED_WRITES, 0x008b); +CPUMF_EVENT_ATTR(cf_z196, DTLB1_HPAGE_WRITES, 0x008c); +CPUMF_EVENT_ATTR(cf_z196, L1D_LMEM_SOURCED_WRITES, 0x008d); +CPUMF_EVENT_ATTR(cf_z196, L1I_LMEM_SOURCED_WRITES, 0x008e); +CPUMF_EVENT_ATTR(cf_z196, L1I_OFFBOOK_L3_SOURCED_WRITES, 0x008f); +CPUMF_EVENT_ATTR(cf_z196, DTLB1_WRITES, 0x0090); +CPUMF_EVENT_ATTR(cf_z196, ITLB1_WRITES, 0x0091); +CPUMF_EVENT_ATTR(cf_z196, TLB2_PTE_WRITES, 0x0092); +CPUMF_EVENT_ATTR(cf_z196, TLB2_CRSTE_HPAGE_WRITES, 0x0093); +CPUMF_EVENT_ATTR(cf_z196, TLB2_CRSTE_WRITES, 0x0094); +CPUMF_EVENT_ATTR(cf_z196, L1D_ONCHIP_L3_SOURCED_WRITES, 0x0096); +CPUMF_EVENT_ATTR(cf_z196, L1D_OFFCHIP_L3_SOURCED_WRITES, 0x0098); +CPUMF_EVENT_ATTR(cf_z196, L1I_ONCHIP_L3_SOURCED_WRITES, 0x0099); +CPUMF_EVENT_ATTR(cf_z196, L1I_OFFCHIP_L3_SOURCED_WRITES, 0x009b); +CPUMF_EVENT_ATTR(cf_zec12, DTLB1_MISSES, 0x0080); +CPUMF_EVENT_ATTR(cf_zec12, ITLB1_MISSES, 0x0081); +CPUMF_EVENT_ATTR(cf_zec12, L1D_L2I_SOURCED_WRITES, 0x0082); +CPUMF_EVENT_ATTR(cf_zec12, L1I_L2I_SOURCED_WRITES, 0x0083); +CPUMF_EVENT_ATTR(cf_zec12, L1D_L2D_SOURCED_WRITES, 0x0084); +CPUMF_EVENT_ATTR(cf_zec12, DTLB1_WRITES, 0x0085); +CPUMF_EVENT_ATTR(cf_zec12, L1D_LMEM_SOURCED_WRITES, 0x0087); +CPUMF_EVENT_ATTR(cf_zec12, L1I_LMEM_SOURCED_WRITES, 0x0089); +CPUMF_EVENT_ATTR(cf_zec12, L1D_RO_EXCL_WRITES, 0x008a); +CPUMF_EVENT_ATTR(cf_zec12, DTLB1_HPAGE_WRITES, 0x008b); +CPUMF_EVENT_ATTR(cf_zec12, ITLB1_WRITES, 0x008c); +CPUMF_EVENT_ATTR(cf_zec12, TLB2_PTE_WRITES, 0x008d); +CPUMF_EVENT_ATTR(cf_zec12, TLB2_CRSTE_HPAGE_WRITES, 0x008e); +CPUMF_EVENT_ATTR(cf_zec12, TLB2_CRSTE_WRITES, 0x008f); +CPUMF_EVENT_ATTR(cf_zec12, L1D_ONCHIP_L3_SOURCED_WRITES, 0x0090); +CPUMF_EVENT_ATTR(cf_zec12, L1D_OFFCHIP_L3_SOURCED_WRITES, 0x0091); +CPUMF_EVENT_ATTR(cf_zec12, L1D_OFFBOOK_L3_SOURCED_WRITES, 0x0092); +CPUMF_EVENT_ATTR(cf_zec12, L1D_ONBOOK_L4_SOURCED_WRITES, 0x0093); +CPUMF_EVENT_ATTR(cf_zec12, L1D_OFFBOOK_L4_SOURCED_WRITES, 0x0094); +CPUMF_EVENT_ATTR(cf_zec12, TX_NC_TEND, 0x0095); +CPUMF_EVENT_ATTR(cf_zec12, L1D_ONCHIP_L3_SOURCED_WRITES_IV, 0x0096); +CPUMF_EVENT_ATTR(cf_zec12, L1D_OFFCHIP_L3_SOURCED_WRITES_IV, 0x0097); +CPUMF_EVENT_ATTR(cf_zec12, L1D_OFFBOOK_L3_SOURCED_WRITES_IV, 0x0098); +CPUMF_EVENT_ATTR(cf_zec12, L1I_ONCHIP_L3_SOURCED_WRITES, 0x0099); +CPUMF_EVENT_ATTR(cf_zec12, L1I_OFFCHIP_L3_SOURCED_WRITES, 0x009a); +CPUMF_EVENT_ATTR(cf_zec12, L1I_OFFBOOK_L3_SOURCED_WRITES, 0x009b); +CPUMF_EVENT_ATTR(cf_zec12, L1I_ONBOOK_L4_SOURCED_WRITES, 0x009c); +CPUMF_EVENT_ATTR(cf_zec12, L1I_OFFBOOK_L4_SOURCED_WRITES, 0x009d); +CPUMF_EVENT_ATTR(cf_zec12, TX_C_TEND, 0x009e); +CPUMF_EVENT_ATTR(cf_zec12, L1I_ONCHIP_L3_SOURCED_WRITES_IV, 0x009f); +CPUMF_EVENT_ATTR(cf_zec12, L1I_OFFCHIP_L3_SOURCED_WRITES_IV, 0x00a0); +CPUMF_EVENT_ATTR(cf_zec12, L1I_OFFBOOK_L3_SOURCED_WRITES_IV, 0x00a1); +CPUMF_EVENT_ATTR(cf_zec12, TX_NC_TABORT, 0x00b1); +CPUMF_EVENT_ATTR(cf_zec12, TX_C_TABORT_NO_SPECIAL, 0x00b2); +CPUMF_EVENT_ATTR(cf_zec12, TX_C_TABORT_SPECIAL, 0x00b3); + +static struct attribute *cpumcf_pmu_event_attr[] = { + CPUMF_EVENT_PTR(cf, CPU_CYCLES), + CPUMF_EVENT_PTR(cf, INSTRUCTIONS), + CPUMF_EVENT_PTR(cf, L1I_DIR_WRITES), + CPUMF_EVENT_PTR(cf, L1I_PENALTY_CYCLES), + CPUMF_EVENT_PTR(cf, PROBLEM_STATE_CPU_CYCLES), + CPUMF_EVENT_PTR(cf, PROBLEM_STATE_INSTRUCTIONS), + CPUMF_EVENT_PTR(cf, PROBLEM_STATE_L1I_DIR_WRITES), + CPUMF_EVENT_PTR(cf, PROBLEM_STATE_L1I_PENALTY_CYCLES), + CPUMF_EVENT_PTR(cf, PROBLEM_STATE_L1D_DIR_WRITES), + CPUMF_EVENT_PTR(cf, PROBLEM_STATE_L1D_PENALTY_CYCLES), + CPUMF_EVENT_PTR(cf, L1D_DIR_WRITES), + CPUMF_EVENT_PTR(cf, L1D_PENALTY_CYCLES), + CPUMF_EVENT_PTR(cf, PRNG_FUNCTIONS), + CPUMF_EVENT_PTR(cf, PRNG_CYCLES), + CPUMF_EVENT_PTR(cf, PRNG_BLOCKED_FUNCTIONS), + CPUMF_EVENT_PTR(cf, PRNG_BLOCKED_CYCLES), + CPUMF_EVENT_PTR(cf, SHA_FUNCTIONS), + CPUMF_EVENT_PTR(cf, SHA_CYCLES), + CPUMF_EVENT_PTR(cf, SHA_BLOCKED_FUNCTIONS), + CPUMF_EVENT_PTR(cf, SHA_BLOCKED_CYCLES), + CPUMF_EVENT_PTR(cf, DEA_FUNCTIONS), + CPUMF_EVENT_PTR(cf, DEA_CYCLES), + CPUMF_EVENT_PTR(cf, DEA_BLOCKED_FUNCTIONS), + CPUMF_EVENT_PTR(cf, DEA_BLOCKED_CYCLES), + CPUMF_EVENT_PTR(cf, AES_FUNCTIONS), + CPUMF_EVENT_PTR(cf, AES_CYCLES), + CPUMF_EVENT_PTR(cf, AES_BLOCKED_FUNCTIONS), + CPUMF_EVENT_PTR(cf, AES_BLOCKED_CYCLES), + NULL, +}; + +static struct attribute *cpumcf_z10_pmu_event_attr[] __initdata = { + CPUMF_EVENT_PTR(cf_z10, L1I_L2_SOURCED_WRITES), + CPUMF_EVENT_PTR(cf_z10, L1D_L2_SOURCED_WRITES), + CPUMF_EVENT_PTR(cf_z10, L1I_L3_LOCAL_WRITES), + CPUMF_EVENT_PTR(cf_z10, L1D_L3_LOCAL_WRITES), + CPUMF_EVENT_PTR(cf_z10, L1I_L3_REMOTE_WRITES), + CPUMF_EVENT_PTR(cf_z10, L1D_L3_REMOTE_WRITES), + CPUMF_EVENT_PTR(cf_z10, L1D_LMEM_SOURCED_WRITES), + CPUMF_EVENT_PTR(cf_z10, L1I_LMEM_SOURCED_WRITES), + CPUMF_EVENT_PTR(cf_z10, L1D_RO_EXCL_WRITES), + CPUMF_EVENT_PTR(cf_z10, L1I_CACHELINE_INVALIDATES), + CPUMF_EVENT_PTR(cf_z10, ITLB1_WRITES), + CPUMF_EVENT_PTR(cf_z10, DTLB1_WRITES), + CPUMF_EVENT_PTR(cf_z10, TLB2_PTE_WRITES), + CPUMF_EVENT_PTR(cf_z10, TLB2_CRSTE_WRITES), + CPUMF_EVENT_PTR(cf_z10, TLB2_CRSTE_HPAGE_WRITES), + CPUMF_EVENT_PTR(cf_z10, ITLB1_MISSES), + CPUMF_EVENT_PTR(cf_z10, DTLB1_MISSES), + CPUMF_EVENT_PTR(cf_z10, L2C_STORES_SENT), + NULL, +}; + +static struct attribute *cpumcf_z196_pmu_event_attr[] __initdata = { + CPUMF_EVENT_PTR(cf_z196, L1D_L2_SOURCED_WRITES), + CPUMF_EVENT_PTR(cf_z196, L1I_L2_SOURCED_WRITES), + CPUMF_EVENT_PTR(cf_z196, DTLB1_MISSES), + CPUMF_EVENT_PTR(cf_z196, ITLB1_MISSES), + CPUMF_EVENT_PTR(cf_z196, L2C_STORES_SENT), + CPUMF_EVENT_PTR(cf_z196, L1D_OFFBOOK_L3_SOURCED_WRITES), + CPUMF_EVENT_PTR(cf_z196, L1D_ONBOOK_L4_SOURCED_WRITES), + CPUMF_EVENT_PTR(cf_z196, L1I_ONBOOK_L4_SOURCED_WRITES), + CPUMF_EVENT_PTR(cf_z196, L1D_RO_EXCL_WRITES), + CPUMF_EVENT_PTR(cf_z196, L1D_OFFBOOK_L4_SOURCED_WRITES), + CPUMF_EVENT_PTR(cf_z196, L1I_OFFBOOK_L4_SOURCED_WRITES), + CPUMF_EVENT_PTR(cf_z196, DTLB1_HPAGE_WRITES), + CPUMF_EVENT_PTR(cf_z196, L1D_LMEM_SOURCED_WRITES), + CPUMF_EVENT_PTR(cf_z196, L1I_LMEM_SOURCED_WRITES), + CPUMF_EVENT_PTR(cf_z196, L1I_OFFBOOK_L3_SOURCED_WRITES), + CPUMF_EVENT_PTR(cf_z196, DTLB1_WRITES), + CPUMF_EVENT_PTR(cf_z196, ITLB1_WRITES), + CPUMF_EVENT_PTR(cf_z196, TLB2_PTE_WRITES), + CPUMF_EVENT_PTR(cf_z196, TLB2_CRSTE_HPAGE_WRITES), + CPUMF_EVENT_PTR(cf_z196, TLB2_CRSTE_WRITES), + CPUMF_EVENT_PTR(cf_z196, L1D_ONCHIP_L3_SOURCED_WRITES), + CPUMF_EVENT_PTR(cf_z196, L1D_OFFCHIP_L3_SOURCED_WRITES), + CPUMF_EVENT_PTR(cf_z196, L1I_ONCHIP_L3_SOURCED_WRITES), + CPUMF_EVENT_PTR(cf_z196, L1I_OFFCHIP_L3_SOURCED_WRITES), + NULL, +}; + +static struct attribute *cpumcf_zec12_pmu_event_attr[] __initdata = { + CPUMF_EVENT_PTR(cf_zec12, DTLB1_MISSES), + CPUMF_EVENT_PTR(cf_zec12, ITLB1_MISSES), + CPUMF_EVENT_PTR(cf_zec12, L1D_L2I_SOURCED_WRITES), + CPUMF_EVENT_PTR(cf_zec12, L1I_L2I_SOURCED_WRITES), + CPUMF_EVENT_PTR(cf_zec12, L1D_L2D_SOURCED_WRITES), + CPUMF_EVENT_PTR(cf_zec12, DTLB1_WRITES), + CPUMF_EVENT_PTR(cf_zec12, L1D_LMEM_SOURCED_WRITES), + CPUMF_EVENT_PTR(cf_zec12, L1I_LMEM_SOURCED_WRITES), + CPUMF_EVENT_PTR(cf_zec12, L1D_RO_EXCL_WRITES), + CPUMF_EVENT_PTR(cf_zec12, DTLB1_HPAGE_WRITES), + CPUMF_EVENT_PTR(cf_zec12, ITLB1_WRITES), + CPUMF_EVENT_PTR(cf_zec12, TLB2_PTE_WRITES), + CPUMF_EVENT_PTR(cf_zec12, TLB2_CRSTE_HPAGE_WRITES), + CPUMF_EVENT_PTR(cf_zec12, TLB2_CRSTE_WRITES), + CPUMF_EVENT_PTR(cf_zec12, L1D_ONCHIP_L3_SOURCED_WRITES), + CPUMF_EVENT_PTR(cf_zec12, L1D_OFFCHIP_L3_SOURCED_WRITES), + CPUMF_EVENT_PTR(cf_zec12, L1D_OFFBOOK_L3_SOURCED_WRITES), + CPUMF_EVENT_PTR(cf_zec12, L1D_ONBOOK_L4_SOURCED_WRITES), + CPUMF_EVENT_PTR(cf_zec12, L1D_OFFBOOK_L4_SOURCED_WRITES), + CPUMF_EVENT_PTR(cf_zec12, TX_NC_TEND), + CPUMF_EVENT_PTR(cf_zec12, L1D_ONCHIP_L3_SOURCED_WRITES_IV), + CPUMF_EVENT_PTR(cf_zec12, L1D_OFFCHIP_L3_SOURCED_WRITES_IV), + CPUMF_EVENT_PTR(cf_zec12, L1D_OFFBOOK_L3_SOURCED_WRITES_IV), + CPUMF_EVENT_PTR(cf_zec12, L1I_ONCHIP_L3_SOURCED_WRITES), + CPUMF_EVENT_PTR(cf_zec12, L1I_OFFCHIP_L3_SOURCED_WRITES), + CPUMF_EVENT_PTR(cf_zec12, L1I_OFFBOOK_L3_SOURCED_WRITES), + CPUMF_EVENT_PTR(cf_zec12, L1I_ONBOOK_L4_SOURCED_WRITES), + CPUMF_EVENT_PTR(cf_zec12, L1I_OFFBOOK_L4_SOURCED_WRITES), + CPUMF_EVENT_PTR(cf_zec12, TX_C_TEND), + CPUMF_EVENT_PTR(cf_zec12, L1I_ONCHIP_L3_SOURCED_WRITES_IV), + CPUMF_EVENT_PTR(cf_zec12, L1I_OFFCHIP_L3_SOURCED_WRITES_IV), + CPUMF_EVENT_PTR(cf_zec12, L1I_OFFBOOK_L3_SOURCED_WRITES_IV), + CPUMF_EVENT_PTR(cf_zec12, TX_NC_TABORT), + CPUMF_EVENT_PTR(cf_zec12, TX_C_TABORT_NO_SPECIAL), + CPUMF_EVENT_PTR(cf_zec12, TX_C_TABORT_SPECIAL), + NULL, +}; + +/* END: CPUM_CF COUNTER DEFINITIONS ===================================== */ + +static struct attribute_group cpumsf_pmu_events_group = { + .name = "events", + .attrs = cpumcf_pmu_event_attr, +}; + +PMU_FORMAT_ATTR(event, "config:0-63"); + +static struct attribute *cpumsf_pmu_format_attr[] = { + &format_attr_event.attr, + NULL, +}; + +static struct attribute_group cpumsf_pmu_format_group = { + .name = "format", + .attrs = cpumsf_pmu_format_attr, +}; + +static const struct attribute_group *cpumsf_pmu_attr_groups[] = { + &cpumsf_pmu_events_group, + &cpumsf_pmu_format_group, + NULL, +}; + + +static __init struct attribute **merge_attr(struct attribute **a, + struct attribute **b) +{ + struct attribute **new; + int j, i; + + for (j = 0; a[j]; j++) + ; + for (i = 0; b[i]; i++) + j++; + j++; + + new = kmalloc(sizeof(struct attribute *) * j, GFP_KERNEL); + if (!new) + return NULL; + j = 0; + for (i = 0; a[i]; i++) + new[j++] = a[i]; + for (i = 0; b[i]; i++) + new[j++] = b[i]; + new[j] = NULL; + + return new; +} + +__init const struct attribute_group **cpumf_cf_event_group(void) +{ + struct attribute **combined, **model; + struct cpuid cpu_id; + + get_cpu_id(&cpu_id); + switch (cpu_id.machine) { + case 0x2097: + case 0x2098: + model = cpumcf_z10_pmu_event_attr; + break; + case 0x2817: + case 0x2818: + model = cpumcf_z196_pmu_event_attr; + break; + case 0x2827: + case 0x2828: + model = cpumcf_zec12_pmu_event_attr; + break; + default: + model = NULL; + break; + }; + + if (!model) + goto out; + + combined = merge_attr(cpumcf_pmu_event_attr, model); + if (combined) + cpumsf_pmu_events_group.attrs = combined; +out: + return cpumsf_pmu_attr_groups; +} diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c new file mode 100644 index 0000000..6c0d298 --- /dev/null +++ b/arch/s390/kernel/perf_cpum_sf.c @@ -0,0 +1,1641 @@ +/* + * Performance event support for the System z CPU-measurement Sampling Facility + * + * Copyright IBM Corp. 2013 + * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License (version 2 only) + * as published by the Free Software Foundation. + */ +#define KMSG_COMPONENT "cpum_sf" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + +#include <linux/kernel.h> +#include <linux/kernel_stat.h> +#include <linux/perf_event.h> +#include <linux/percpu.h> +#include <linux/notifier.h> +#include <linux/export.h> +#include <linux/slab.h> +#include <linux/mm.h> +#include <linux/moduleparam.h> +#include <asm/cpu_mf.h> +#include <asm/irq.h> +#include <asm/debug.h> +#include <asm/timex.h> + +/* Minimum number of sample-data-block-tables: + * At least one table is required for the sampling buffer structure. + * A single table contains up to 511 pointers to sample-data-blocks. + */ +#define CPUM_SF_MIN_SDBT 1 + +/* Number of sample-data-blocks per sample-data-block-table (SDBT): + * A table contains SDB pointers (8 bytes) and one table-link entry + * that points to the origin of the next SDBT. + */ +#define CPUM_SF_SDB_PER_TABLE ((PAGE_SIZE - 8) / 8) + +/* Maximum page offset for an SDBT table-link entry: + * If this page offset is reached, a table-link entry to the next SDBT + * must be added. + */ +#define CPUM_SF_SDBT_TL_OFFSET (CPUM_SF_SDB_PER_TABLE * 8) +static inline int require_table_link(const void *sdbt) +{ + return ((unsigned long) sdbt & ~PAGE_MASK) == CPUM_SF_SDBT_TL_OFFSET; +} + +/* Minimum and maximum sampling buffer sizes: + * + * This number represents the maximum size of the sampling buffer taking + * the number of sample-data-block-tables into account. Note that these + * numbers apply to the basic-sampling function only. + * The maximum number of SDBs is increased by CPUM_SF_SDB_DIAG_FACTOR if + * the diagnostic-sampling function is active. + * + * Sampling buffer size Buffer characteristics + * --------------------------------------------------- + * 64KB == 16 pages (4KB per page) + * 1 page for SDB-tables + * 15 pages for SDBs + * + * 32MB == 8192 pages (4KB per page) + * 16 pages for SDB-tables + * 8176 pages for SDBs + */ +static unsigned long __read_mostly CPUM_SF_MIN_SDB = 15; +static unsigned long __read_mostly CPUM_SF_MAX_SDB = 8176; +static unsigned long __read_mostly CPUM_SF_SDB_DIAG_FACTOR = 1; + +struct sf_buffer { + unsigned long *sdbt; /* Sample-data-block-table origin */ + /* buffer characteristics (required for buffer increments) */ + unsigned long num_sdb; /* Number of sample-data-blocks */ + unsigned long num_sdbt; /* Number of sample-data-block-tables */ + unsigned long *tail; /* last sample-data-block-table */ +}; + +struct cpu_hw_sf { + /* CPU-measurement sampling information block */ + struct hws_qsi_info_block qsi; + /* CPU-measurement sampling control block */ + struct hws_lsctl_request_block lsctl; + struct sf_buffer sfb; /* Sampling buffer */ + unsigned int flags; /* Status flags */ + struct perf_event *event; /* Scheduled perf event */ +}; +static DEFINE_PER_CPU(struct cpu_hw_sf, cpu_hw_sf); + +/* Debug feature */ +static debug_info_t *sfdbg; + +/* + * sf_disable() - Switch off sampling facility + */ +static int sf_disable(void) +{ + struct hws_lsctl_request_block sreq; + + memset(&sreq, 0, sizeof(sreq)); + return lsctl(&sreq); +} + +/* + * sf_buffer_available() - Check for an allocated sampling buffer + */ +static int sf_buffer_available(struct cpu_hw_sf *cpuhw) +{ + return !!cpuhw->sfb.sdbt; +} + +/* + * deallocate sampling facility buffer + */ +static void free_sampling_buffer(struct sf_buffer *sfb) +{ + unsigned long *sdbt, *curr; + + if (!sfb->sdbt) + return; + + sdbt = sfb->sdbt; + curr = sdbt; + + /* Free the SDBT after all SDBs are processed... */ + while (1) { + if (!*curr || !sdbt) + break; + + /* Process table-link entries */ + if (is_link_entry(curr)) { + curr = get_next_sdbt(curr); + if (sdbt) + free_page((unsigned long) sdbt); + + /* If the origin is reached, sampling buffer is freed */ + if (curr == sfb->sdbt) + break; + else + sdbt = curr; + } else { + /* Process SDB pointer */ + if (*curr) { + free_page(*curr); + curr++; + } + } + } + + debug_sprintf_event(sfdbg, 5, + "free_sampling_buffer: freed sdbt=%p\n", sfb->sdbt); + memset(sfb, 0, sizeof(*sfb)); +} + +static int alloc_sample_data_block(unsigned long *sdbt, gfp_t gfp_flags) +{ + unsigned long sdb, *trailer; + + /* Allocate and initialize sample-data-block */ + sdb = get_zeroed_page(gfp_flags); + if (!sdb) + return -ENOMEM; + trailer = trailer_entry_ptr(sdb); + *trailer = SDB_TE_ALERT_REQ_MASK; + + /* Link SDB into the sample-data-block-table */ + *sdbt = sdb; + + return 0; +} + +/* + * realloc_sampling_buffer() - extend sampler memory + * + * Allocates new sample-data-blocks and adds them to the specified sampling + * buffer memory. + * + * Important: This modifies the sampling buffer and must be called when the + * sampling facility is disabled. + * + * Returns zero on success, non-zero otherwise. + */ +static int realloc_sampling_buffer(struct sf_buffer *sfb, + unsigned long num_sdb, gfp_t gfp_flags) +{ + int i, rc; + unsigned long *new, *tail; + + if (!sfb->sdbt || !sfb->tail) + return -EINVAL; + + if (!is_link_entry(sfb->tail)) + return -EINVAL; + + /* Append to the existing sampling buffer, overwriting the table-link + * register. + * The tail variables always points to the "tail" (last and table-link) + * entry in an SDB-table. + */ + tail = sfb->tail; + + /* Do a sanity check whether the table-link entry points to + * the sampling buffer origin. + */ + if (sfb->sdbt != get_next_sdbt(tail)) { + debug_sprintf_event(sfdbg, 3, "realloc_sampling_buffer: " + "sampling buffer is not linked: origin=%p" + "tail=%p\n", + (void *) sfb->sdbt, (void *) tail); + return -EINVAL; + } + + /* Allocate remaining SDBs */ + rc = 0; + for (i = 0; i < num_sdb; i++) { + /* Allocate a new SDB-table if it is full. */ + if (require_table_link(tail)) { + new = (unsigned long *) get_zeroed_page(gfp_flags); + if (!new) { + rc = -ENOMEM; + break; + } + sfb->num_sdbt++; + /* Link current page to tail of chain */ + *tail = (unsigned long)(void *) new + 1; + tail = new; + } + + /* Allocate a new sample-data-block. + * If there is not enough memory, stop the realloc process + * and simply use what was allocated. If this is a temporary + * issue, a new realloc call (if required) might succeed. + */ + rc = alloc_sample_data_block(tail, gfp_flags); + if (rc) + break; + sfb->num_sdb++; + tail++; + } + + /* Link sampling buffer to its origin */ + *tail = (unsigned long) sfb->sdbt + 1; + sfb->tail = tail; + + debug_sprintf_event(sfdbg, 4, "realloc_sampling_buffer: new buffer" + " settings: sdbt=%lu sdb=%lu\n", + sfb->num_sdbt, sfb->num_sdb); + return rc; +} + +/* + * allocate_sampling_buffer() - allocate sampler memory + * + * Allocates and initializes a sampling buffer structure using the + * specified number of sample-data-blocks (SDB). For each allocation, + * a 4K page is used. The number of sample-data-block-tables (SDBT) + * are calculated from SDBs. + * Also set the ALERT_REQ mask in each SDBs trailer. + * + * Returns zero on success, non-zero otherwise. + */ +static int alloc_sampling_buffer(struct sf_buffer *sfb, unsigned long num_sdb) +{ + int rc; + + if (sfb->sdbt) + return -EINVAL; + + /* Allocate the sample-data-block-table origin */ + sfb->sdbt = (unsigned long *) get_zeroed_page(GFP_KERNEL); + if (!sfb->sdbt) + return -ENOMEM; + sfb->num_sdb = 0; + sfb->num_sdbt = 1; + + /* Link the table origin to point to itself to prepare for + * realloc_sampling_buffer() invocation. + */ + sfb->tail = sfb->sdbt; + *sfb->tail = (unsigned long)(void *) sfb->sdbt + 1; + + /* Allocate requested number of sample-data-blocks */ + rc = realloc_sampling_buffer(sfb, num_sdb, GFP_KERNEL); + if (rc) { + free_sampling_buffer(sfb); + debug_sprintf_event(sfdbg, 4, "alloc_sampling_buffer: " + "realloc_sampling_buffer failed with rc=%i\n", rc); + } else + debug_sprintf_event(sfdbg, 4, + "alloc_sampling_buffer: tear=%p dear=%p\n", + sfb->sdbt, (void *) *sfb->sdbt); + return rc; +} + +static void sfb_set_limits(unsigned long min, unsigned long max) +{ + struct hws_qsi_info_block si; + + CPUM_SF_MIN_SDB = min; + CPUM_SF_MAX_SDB = max; + + memset(&si, 0, sizeof(si)); + if (!qsi(&si)) + CPUM_SF_SDB_DIAG_FACTOR = DIV_ROUND_UP(si.dsdes, si.bsdes); +} + +static unsigned long sfb_max_limit(struct hw_perf_event *hwc) +{ + return SAMPL_DIAG_MODE(hwc) ? CPUM_SF_MAX_SDB * CPUM_SF_SDB_DIAG_FACTOR + : CPUM_SF_MAX_SDB; +} + +static unsigned long sfb_pending_allocs(struct sf_buffer *sfb, + struct hw_perf_event *hwc) +{ + if (!sfb->sdbt) + return SFB_ALLOC_REG(hwc); + if (SFB_ALLOC_REG(hwc) > sfb->num_sdb) + return SFB_ALLOC_REG(hwc) - sfb->num_sdb; + return 0; +} + +static int sfb_has_pending_allocs(struct sf_buffer *sfb, + struct hw_perf_event *hwc) +{ + return sfb_pending_allocs(sfb, hwc) > 0; +} + +static void sfb_account_allocs(unsigned long num, struct hw_perf_event *hwc) +{ + /* Limit the number of SDBs to not exceed the maximum */ + num = min_t(unsigned long, num, sfb_max_limit(hwc) - SFB_ALLOC_REG(hwc)); + if (num) + SFB_ALLOC_REG(hwc) += num; +} + +static void sfb_init_allocs(unsigned long num, struct hw_perf_event *hwc) +{ + SFB_ALLOC_REG(hwc) = 0; + sfb_account_allocs(num, hwc); +} + +static size_t event_sample_size(struct hw_perf_event *hwc) +{ + struct sf_raw_sample *sfr = (struct sf_raw_sample *) RAWSAMPLE_REG(hwc); + size_t sample_size; + + /* The sample size depends on the sampling function: The basic-sampling + * function must be always enabled, diagnostic-sampling function is + * optional. + */ + sample_size = sfr->bsdes; + if (SAMPL_DIAG_MODE(hwc)) + sample_size += sfr->dsdes; + + return sample_size; +} + +static void deallocate_buffers(struct cpu_hw_sf *cpuhw) +{ + if (cpuhw->sfb.sdbt) + free_sampling_buffer(&cpuhw->sfb); +} + +static int allocate_buffers(struct cpu_hw_sf *cpuhw, struct hw_perf_event *hwc) +{ + unsigned long n_sdb, freq, factor; + size_t sfr_size, sample_size; + struct sf_raw_sample *sfr; + + /* Allocate raw sample buffer + * + * The raw sample buffer is used to temporarily store sampling data + * entries for perf raw sample processing. The buffer size mainly + * depends on the size of diagnostic-sampling data entries which is + * machine-specific. The exact size calculation includes: + * 1. The first 4 bytes of diagnostic-sampling data entries are + * already reflected in the sf_raw_sample structure. Subtract + * these bytes. + * 2. The perf raw sample data must be 8-byte aligned (u64) and + * perf's internal data size must be considered too. So add + * an additional u32 for correct alignment and subtract before + * allocating the buffer. + * 3. Store the raw sample buffer pointer in the perf event + * hardware structure. + */ + sfr_size = ALIGN((sizeof(*sfr) - sizeof(sfr->diag) + cpuhw->qsi.dsdes) + + sizeof(u32), sizeof(u64)); + sfr_size -= sizeof(u32); + sfr = kzalloc(sfr_size, GFP_KERNEL); + if (!sfr) + return -ENOMEM; + sfr->size = sfr_size; + sfr->bsdes = cpuhw->qsi.bsdes; + sfr->dsdes = cpuhw->qsi.dsdes; + RAWSAMPLE_REG(hwc) = (unsigned long) sfr; + + /* Calculate sampling buffers using 4K pages + * + * 1. Determine the sample data size which depends on the used + * sampling functions, for example, basic-sampling or + * basic-sampling with diagnostic-sampling. + * + * 2. Use the sampling frequency as input. The sampling buffer is + * designed for almost one second. This can be adjusted through + * the "factor" variable. + * In any case, alloc_sampling_buffer() sets the Alert Request + * Control indicator to trigger a measurement-alert to harvest + * sample-data-blocks (sdb). + * + * 3. Compute the number of sample-data-blocks and ensure a minimum + * of CPUM_SF_MIN_SDB. Also ensure the upper limit does not + * exceed a "calculated" maximum. The symbolic maximum is + * designed for basic-sampling only and needs to be increased if + * diagnostic-sampling is active. + * See also the remarks for these symbolic constants. + * + * 4. Compute the number of sample-data-block-tables (SDBT) and + * ensure a minimum of CPUM_SF_MIN_SDBT (one table can manage up + * to 511 SDBs). + */ + sample_size = event_sample_size(hwc); + freq = sample_rate_to_freq(&cpuhw->qsi, SAMPL_RATE(hwc)); + factor = 1; + n_sdb = DIV_ROUND_UP(freq, factor * ((PAGE_SIZE-64) / sample_size)); + if (n_sdb < CPUM_SF_MIN_SDB) + n_sdb = CPUM_SF_MIN_SDB; + + /* If there is already a sampling buffer allocated, it is very likely + * that the sampling facility is enabled too. If the event to be + * initialized requires a greater sampling buffer, the allocation must + * be postponed. Changing the sampling buffer requires the sampling + * facility to be in the disabled state. So, account the number of + * required SDBs and let cpumsf_pmu_enable() resize the buffer just + * before the event is started. + */ + sfb_init_allocs(n_sdb, hwc); + if (sf_buffer_available(cpuhw)) + return 0; + + debug_sprintf_event(sfdbg, 3, + "allocate_buffers: rate=%lu f=%lu sdb=%lu/%lu" + " sample_size=%lu cpuhw=%p\n", + SAMPL_RATE(hwc), freq, n_sdb, sfb_max_limit(hwc), + sample_size, cpuhw); + + return alloc_sampling_buffer(&cpuhw->sfb, + sfb_pending_allocs(&cpuhw->sfb, hwc)); +} + +static unsigned long min_percent(unsigned int percent, unsigned long base, + unsigned long min) +{ + return min_t(unsigned long, min, DIV_ROUND_UP(percent * base, 100)); +} + +static unsigned long compute_sfb_extent(unsigned long ratio, unsigned long base) +{ + /* Use a percentage-based approach to extend the sampling facility + * buffer. Accept up to 5% sample data loss. + * Vary the extents between 1% to 5% of the current number of + * sample-data-blocks. + */ + if (ratio <= 5) + return 0; + if (ratio <= 25) + return min_percent(1, base, 1); + if (ratio <= 50) + return min_percent(1, base, 1); + if (ratio <= 75) + return min_percent(2, base, 2); + if (ratio <= 100) + return min_percent(3, base, 3); + if (ratio <= 250) + return min_percent(4, base, 4); + + return min_percent(5, base, 8); +} + +static void sfb_account_overflows(struct cpu_hw_sf *cpuhw, + struct hw_perf_event *hwc) +{ + unsigned long ratio, num; + + if (!OVERFLOW_REG(hwc)) + return; + + /* The sample_overflow contains the average number of sample data + * that has been lost because sample-data-blocks were full. + * + * Calculate the total number of sample data entries that has been + * discarded. Then calculate the ratio of lost samples to total samples + * per second in percent. + */ + ratio = DIV_ROUND_UP(100 * OVERFLOW_REG(hwc) * cpuhw->sfb.num_sdb, + sample_rate_to_freq(&cpuhw->qsi, SAMPL_RATE(hwc))); + + /* Compute number of sample-data-blocks */ + num = compute_sfb_extent(ratio, cpuhw->sfb.num_sdb); + if (num) + sfb_account_allocs(num, hwc); + + debug_sprintf_event(sfdbg, 5, "sfb: overflow: overflow=%llu ratio=%lu" + " num=%lu\n", OVERFLOW_REG(hwc), ratio, num); + OVERFLOW_REG(hwc) = 0; +} + +/* extend_sampling_buffer() - Extend sampling buffer + * @sfb: Sampling buffer structure (for local CPU) + * @hwc: Perf event hardware structure + * + * Use this function to extend the sampling buffer based on the overflow counter + * and postponed allocation extents stored in the specified Perf event hardware. + * + * Important: This function disables the sampling facility in order to safely + * change the sampling buffer structure. Do not call this function + * when the PMU is active. + */ +static void extend_sampling_buffer(struct sf_buffer *sfb, + struct hw_perf_event *hwc) +{ + unsigned long num, num_old; + int rc; + + num = sfb_pending_allocs(sfb, hwc); + if (!num) + return; + num_old = sfb->num_sdb; + + /* Disable the sampling facility to reset any states and also + * clear pending measurement alerts. + */ + sf_disable(); + + /* Extend the sampling buffer. + * This memory allocation typically happens in an atomic context when + * called by perf. Because this is a reallocation, it is fine if the + * new SDB-request cannot be satisfied immediately. + */ + rc = realloc_sampling_buffer(sfb, num, GFP_ATOMIC); + if (rc) + debug_sprintf_event(sfdbg, 5, "sfb: extend: realloc " + "failed with rc=%i\n", rc); + + if (sfb_has_pending_allocs(sfb, hwc)) + debug_sprintf_event(sfdbg, 5, "sfb: extend: " + "req=%lu alloc=%lu remaining=%lu\n", + num, sfb->num_sdb - num_old, + sfb_pending_allocs(sfb, hwc)); +} + + +/* Number of perf events counting hardware events */ +static atomic_t num_events; +/* Used to avoid races in calling reserve/release_cpumf_hardware */ +static DEFINE_MUTEX(pmc_reserve_mutex); + +#define PMC_INIT 0 +#define PMC_RELEASE 1 +#define PMC_FAILURE 2 +static void setup_pmc_cpu(void *flags) +{ + int err; + struct cpu_hw_sf *cpusf = &__get_cpu_var(cpu_hw_sf); + + err = 0; + switch (*((int *) flags)) { + case PMC_INIT: + memset(cpusf, 0, sizeof(*cpusf)); + err = qsi(&cpusf->qsi); + if (err) + break; + cpusf->flags |= PMU_F_RESERVED; + err = sf_disable(); + if (err) + pr_err("Switching off the sampling facility failed " + "with rc=%i\n", err); + debug_sprintf_event(sfdbg, 5, + "setup_pmc_cpu: initialized: cpuhw=%p\n", cpusf); + break; + case PMC_RELEASE: + cpusf->flags &= ~PMU_F_RESERVED; + err = sf_disable(); + if (err) { + pr_err("Switching off the sampling facility failed " + "with rc=%i\n", err); + } else + deallocate_buffers(cpusf); + debug_sprintf_event(sfdbg, 5, + "setup_pmc_cpu: released: cpuhw=%p\n", cpusf); + break; + } + if (err) + *((int *) flags) |= PMC_FAILURE; +} + +static void release_pmc_hardware(void) +{ + int flags = PMC_RELEASE; + + irq_subclass_unregister(IRQ_SUBCLASS_MEASUREMENT_ALERT); + on_each_cpu(setup_pmc_cpu, &flags, 1); + perf_release_sampling(); +} + +static int reserve_pmc_hardware(void) +{ + int flags = PMC_INIT; + int err; + + err = perf_reserve_sampling(); + if (err) + return err; + on_each_cpu(setup_pmc_cpu, &flags, 1); + if (flags & PMC_FAILURE) { + release_pmc_hardware(); + return -ENODEV; + } + irq_subclass_register(IRQ_SUBCLASS_MEASUREMENT_ALERT); + + return 0; +} + +static void hw_perf_event_destroy(struct perf_event *event) +{ + /* Free raw sample buffer */ + if (RAWSAMPLE_REG(&event->hw)) + kfree((void *) RAWSAMPLE_REG(&event->hw)); + + /* Release PMC if this is the last perf event */ + if (!atomic_add_unless(&num_events, -1, 1)) { + mutex_lock(&pmc_reserve_mutex); + if (atomic_dec_return(&num_events) == 0) + release_pmc_hardware(); + mutex_unlock(&pmc_reserve_mutex); + } +} + +static void hw_init_period(struct hw_perf_event *hwc, u64 period) +{ + hwc->sample_period = period; + hwc->last_period = hwc->sample_period; + local64_set(&hwc->period_left, hwc->sample_period); +} + +static void hw_reset_registers(struct hw_perf_event *hwc, + unsigned long *sdbt_origin) +{ + struct sf_raw_sample *sfr; + + /* (Re)set to first sample-data-block-table */ + TEAR_REG(hwc) = (unsigned long) sdbt_origin; + + /* (Re)set raw sampling buffer register */ + sfr = (struct sf_raw_sample *) RAWSAMPLE_REG(hwc); + memset(&sfr->basic, 0, sizeof(sfr->basic)); + memset(&sfr->diag, 0, sfr->dsdes); +} + +static unsigned long hw_limit_rate(const struct hws_qsi_info_block *si, + unsigned long rate) +{ + return clamp_t(unsigned long, rate, + si->min_sampl_rate, si->max_sampl_rate); +} + +static int __hw_perf_event_init(struct perf_event *event) +{ + struct cpu_hw_sf *cpuhw; + struct hws_qsi_info_block si; + struct perf_event_attr *attr = &event->attr; + struct hw_perf_event *hwc = &event->hw; + unsigned long rate; + int cpu, err; + + /* Reserve CPU-measurement sampling facility */ + err = 0; + if (!atomic_inc_not_zero(&num_events)) { + mutex_lock(&pmc_reserve_mutex); + if (atomic_read(&num_events) == 0 && reserve_pmc_hardware()) + err = -EBUSY; + else + atomic_inc(&num_events); + mutex_unlock(&pmc_reserve_mutex); + } + event->destroy = hw_perf_event_destroy; + + if (err) + goto out; + + /* Access per-CPU sampling information (query sampling info) */ + /* + * The event->cpu value can be -1 to count on every CPU, for example, + * when attaching to a task. If this is specified, use the query + * sampling info from the current CPU, otherwise use event->cpu to + * retrieve the per-CPU information. + * Later, cpuhw indicates whether to allocate sampling buffers for a + * particular CPU (cpuhw!=NULL) or each online CPU (cpuw==NULL). + */ + memset(&si, 0, sizeof(si)); + cpuhw = NULL; + if (event->cpu == -1) + qsi(&si); + else { + /* Event is pinned to a particular CPU, retrieve the per-CPU + * sampling structure for accessing the CPU-specific QSI. + */ + cpuhw = &per_cpu(cpu_hw_sf, event->cpu); + si = cpuhw->qsi; + } + + /* Check sampling facility authorization and, if not authorized, + * fall back to other PMUs. It is safe to check any CPU because + * the authorization is identical for all configured CPUs. + */ + if (!si.as) { + err = -ENOENT; + goto out; + } + + /* Always enable basic sampling */ + SAMPL_FLAGS(hwc) = PERF_CPUM_SF_BASIC_MODE; + + /* Check if diagnostic sampling is requested. Deny if the required + * sampling authorization is missing. + */ + if (attr->config == PERF_EVENT_CPUM_SF_DIAG) { + if (!si.ad) { + err = -EPERM; + goto out; + } + SAMPL_FLAGS(hwc) |= PERF_CPUM_SF_DIAG_MODE; + } + + /* Check and set other sampling flags */ + if (attr->config1 & PERF_CPUM_SF_FULL_BLOCKS) + SAMPL_FLAGS(hwc) |= PERF_CPUM_SF_FULL_BLOCKS; + + /* The sampling information (si) contains information about the + * min/max sampling intervals and the CPU speed. So calculate the + * correct sampling interval and avoid the whole period adjust + * feedback loop. + */ + rate = 0; + if (attr->freq) { + rate = freq_to_sample_rate(&si, attr->sample_freq); + rate = hw_limit_rate(&si, rate); + attr->freq = 0; + attr->sample_period = rate; + } else { + /* The min/max sampling rates specifies the valid range + * of sample periods. If the specified sample period is + * out of range, limit the period to the range boundary. + */ + rate = hw_limit_rate(&si, hwc->sample_period); + + /* The perf core maintains a maximum sample rate that is + * configurable through the sysctl interface. Ensure the + * sampling rate does not exceed this value. This also helps + * to avoid throttling when pushing samples with + * perf_event_overflow(). + */ + if (sample_rate_to_freq(&si, rate) > + sysctl_perf_event_sample_rate) { + err = -EINVAL; + debug_sprintf_event(sfdbg, 1, "Sampling rate exceeds maximum perf sample rate\n"); + goto out; + } + } + SAMPL_RATE(hwc) = rate; + hw_init_period(hwc, SAMPL_RATE(hwc)); + + /* Initialize sample data overflow accounting */ + hwc->extra_reg.reg = REG_OVERFLOW; + OVERFLOW_REG(hwc) = 0; + + /* Allocate the per-CPU sampling buffer using the CPU information + * from the event. If the event is not pinned to a particular + * CPU (event->cpu == -1; or cpuhw == NULL), allocate sampling + * buffers for each online CPU. + */ + if (cpuhw) + /* Event is pinned to a particular CPU */ + err = allocate_buffers(cpuhw, hwc); + else { + /* Event is not pinned, allocate sampling buffer on + * each online CPU + */ + for_each_online_cpu(cpu) { + cpuhw = &per_cpu(cpu_hw_sf, cpu); + err = allocate_buffers(cpuhw, hwc); + if (err) + break; + } + } +out: + return err; +} + +static int cpumsf_pmu_event_init(struct perf_event *event) +{ + int err; + + /* No support for taken branch sampling */ + if (has_branch_stack(event)) + return -EOPNOTSUPP; + + switch (event->attr.type) { + case PERF_TYPE_RAW: + if ((event->attr.config != PERF_EVENT_CPUM_SF) && + (event->attr.config != PERF_EVENT_CPUM_SF_DIAG)) + return -ENOENT; + break; + case PERF_TYPE_HARDWARE: + /* Support sampling of CPU cycles in addition to the + * counter facility. However, the counter facility + * is more precise and, hence, restrict this PMU to + * sampling events only. + */ + if (event->attr.config != PERF_COUNT_HW_CPU_CYCLES) + return -ENOENT; + if (!is_sampling_event(event)) + return -ENOENT; + break; + default: + return -ENOENT; + } + + /* Check online status of the CPU to which the event is pinned */ + if (event->cpu >= nr_cpumask_bits || + (event->cpu >= 0 && !cpu_online(event->cpu))) + return -ENODEV; + + /* Force reset of idle/hv excludes regardless of what the + * user requested. + */ + if (event->attr.exclude_hv) + event->attr.exclude_hv = 0; + if (event->attr.exclude_idle) + event->attr.exclude_idle = 0; + + err = __hw_perf_event_init(event); + if (unlikely(err)) + if (event->destroy) + event->destroy(event); + return err; +} + +static void cpumsf_pmu_enable(struct pmu *pmu) +{ + struct cpu_hw_sf *cpuhw = &__get_cpu_var(cpu_hw_sf); + struct hw_perf_event *hwc; + int err; + + if (cpuhw->flags & PMU_F_ENABLED) + return; + + if (cpuhw->flags & PMU_F_ERR_MASK) + return; + + /* Check whether to extent the sampling buffer. + * + * Two conditions trigger an increase of the sampling buffer for a + * perf event: + * 1. Postponed buffer allocations from the event initialization. + * 2. Sampling overflows that contribute to pending allocations. + * + * Note that the extend_sampling_buffer() function disables the sampling + * facility, but it can be fully re-enabled using sampling controls that + * have been saved in cpumsf_pmu_disable(). + */ + if (cpuhw->event) { + hwc = &cpuhw->event->hw; + /* Account number of overflow-designated buffer extents */ + sfb_account_overflows(cpuhw, hwc); + if (sfb_has_pending_allocs(&cpuhw->sfb, hwc)) + extend_sampling_buffer(&cpuhw->sfb, hwc); + } + + /* (Re)enable the PMU and sampling facility */ + cpuhw->flags |= PMU_F_ENABLED; + barrier(); + + err = lsctl(&cpuhw->lsctl); + if (err) { + cpuhw->flags &= ~PMU_F_ENABLED; + pr_err("Loading sampling controls failed: op=%i err=%i\n", + 1, err); + return; + } + + debug_sprintf_event(sfdbg, 6, "pmu_enable: es=%i cs=%i ed=%i cd=%i " + "tear=%p dear=%p\n", cpuhw->lsctl.es, cpuhw->lsctl.cs, + cpuhw->lsctl.ed, cpuhw->lsctl.cd, + (void *) cpuhw->lsctl.tear, (void *) cpuhw->lsctl.dear); +} + +static void cpumsf_pmu_disable(struct pmu *pmu) +{ + struct cpu_hw_sf *cpuhw = &__get_cpu_var(cpu_hw_sf); + struct hws_lsctl_request_block inactive; + struct hws_qsi_info_block si; + int err; + + if (!(cpuhw->flags & PMU_F_ENABLED)) + return; + + if (cpuhw->flags & PMU_F_ERR_MASK) + return; + + /* Switch off sampling activation control */ + inactive = cpuhw->lsctl; + inactive.cs = 0; + inactive.cd = 0; + + err = lsctl(&inactive); + if (err) { + pr_err("Loading sampling controls failed: op=%i err=%i\n", + 2, err); + return; + } + + /* Save state of TEAR and DEAR register contents */ + if (!qsi(&si)) { + /* TEAR/DEAR values are valid only if the sampling facility is + * enabled. Note that cpumsf_pmu_disable() might be called even + * for a disabled sampling facility because cpumsf_pmu_enable() + * controls the enable/disable state. + */ + if (si.es) { + cpuhw->lsctl.tear = si.tear; + cpuhw->lsctl.dear = si.dear; + } + } else + debug_sprintf_event(sfdbg, 3, "cpumsf_pmu_disable: " + "qsi() failed with err=%i\n", err); + + cpuhw->flags &= ~PMU_F_ENABLED; +} + +/* perf_exclude_event() - Filter event + * @event: The perf event + * @regs: pt_regs structure + * @sde_regs: Sample-data-entry (sde) regs structure + * + * Filter perf events according to their exclude specification. + * + * Return non-zero if the event shall be excluded. + */ +static int perf_exclude_event(struct perf_event *event, struct pt_regs *regs, + struct perf_sf_sde_regs *sde_regs) +{ + if (event->attr.exclude_user && user_mode(regs)) + return 1; + if (event->attr.exclude_kernel && !user_mode(regs)) + return 1; + if (event->attr.exclude_guest && sde_regs->in_guest) + return 1; + if (event->attr.exclude_host && !sde_regs->in_guest) + return 1; + return 0; +} + +/* perf_push_sample() - Push samples to perf + * @event: The perf event + * @sample: Hardware sample data + * + * Use the hardware sample data to create perf event sample. The sample + * is the pushed to the event subsystem and the function checks for + * possible event overflows. If an event overflow occurs, the PMU is + * stopped. + * + * Return non-zero if an event overflow occurred. + */ +static int perf_push_sample(struct perf_event *event, struct sf_raw_sample *sfr) +{ + int overflow; + struct pt_regs regs; + struct perf_sf_sde_regs *sde_regs; + struct perf_sample_data data; + struct perf_raw_record raw; + + /* Setup perf sample */ + perf_sample_data_init(&data, 0, event->hw.last_period); + raw.size = sfr->size; + raw.data = sfr; + data.raw = &raw; + + /* Setup pt_regs to look like an CPU-measurement external interrupt + * using the Program Request Alert code. The regs.int_parm_long + * field which is unused contains additional sample-data-entry related + * indicators. + */ + memset(®s, 0, sizeof(regs)); + regs.int_code = 0x1407; + regs.int_parm = CPU_MF_INT_SF_PRA; + sde_regs = (struct perf_sf_sde_regs *) ®s.int_parm_long; + + regs.psw.addr = sfr->basic.ia; + if (sfr->basic.T) + regs.psw.mask |= PSW_MASK_DAT; + if (sfr->basic.W) + regs.psw.mask |= PSW_MASK_WAIT; + if (sfr->basic.P) + regs.psw.mask |= PSW_MASK_PSTATE; + switch (sfr->basic.AS) { + case 0x0: + regs.psw.mask |= PSW_ASC_PRIMARY; + break; + case 0x1: + regs.psw.mask |= PSW_ASC_ACCREG; + break; + case 0x2: + regs.psw.mask |= PSW_ASC_SECONDARY; + break; + case 0x3: + regs.psw.mask |= PSW_ASC_HOME; + break; + } + + /* The host-program-parameter (hpp) contains the sie control + * block that is set by sie64a() in entry64.S. Check if hpp + * refers to a valid control block and set sde_regs flags + * accordingly. This would allow to use hpp values for other + * purposes too. + * For now, simply use a non-zero value as guest indicator. + */ + if (sfr->basic.hpp) + sde_regs->in_guest = 1; + + overflow = 0; + if (perf_exclude_event(event, ®s, sde_regs)) + goto out; + if (perf_event_overflow(event, &data, ®s)) { + overflow = 1; + event->pmu->stop(event, 0); + } + perf_event_update_userpage(event); +out: + return overflow; +} + +static void perf_event_count_update(struct perf_event *event, u64 count) +{ + local64_add(count, &event->count); +} + +static int sample_format_is_valid(struct hws_combined_entry *sample, + unsigned int flags) +{ + if (likely(flags & PERF_CPUM_SF_BASIC_MODE)) + /* Only basic-sampling data entries with data-entry-format + * version of 0x0001 can be processed. + */ + if (sample->basic.def != 0x0001) + return 0; + if (flags & PERF_CPUM_SF_DIAG_MODE) + /* The data-entry-format number of diagnostic-sampling data + * entries can vary. Because diagnostic data is just passed + * through, do only a sanity check on the DEF. + */ + if (sample->diag.def < 0x8001) + return 0; + return 1; +} + +static int sample_is_consistent(struct hws_combined_entry *sample, + unsigned long flags) +{ + /* This check applies only to basic-sampling data entries of potentially + * combined-sampling data entries. Invalid entries cannot be processed + * by the PMU and, thus, do not deliver an associated + * diagnostic-sampling data entry. + */ + if (unlikely(!(flags & PERF_CPUM_SF_BASIC_MODE))) + return 0; + /* + * Samples are skipped, if they are invalid or for which the + * instruction address is not predictable, i.e., the wait-state bit is + * set. + */ + if (sample->basic.I || sample->basic.W) + return 0; + return 1; +} + +static void reset_sample_slot(struct hws_combined_entry *sample, + unsigned long flags) +{ + if (likely(flags & PERF_CPUM_SF_BASIC_MODE)) + sample->basic.def = 0; + if (flags & PERF_CPUM_SF_DIAG_MODE) + sample->diag.def = 0; +} + +static void sfr_store_sample(struct sf_raw_sample *sfr, + struct hws_combined_entry *sample) +{ + if (likely(sfr->format & PERF_CPUM_SF_BASIC_MODE)) + sfr->basic = sample->basic; + if (sfr->format & PERF_CPUM_SF_DIAG_MODE) + memcpy(&sfr->diag, &sample->diag, sfr->dsdes); +} + +static void debug_sample_entry(struct hws_combined_entry *sample, + struct hws_trailer_entry *te, + unsigned long flags) +{ + debug_sprintf_event(sfdbg, 4, "hw_collect_samples: Found unknown " + "sampling data entry: te->f=%i basic.def=%04x (%p)" + " diag.def=%04x (%p)\n", te->f, + sample->basic.def, &sample->basic, + (flags & PERF_CPUM_SF_DIAG_MODE) + ? sample->diag.def : 0xFFFF, + (flags & PERF_CPUM_SF_DIAG_MODE) + ? &sample->diag : NULL); +} + +/* hw_collect_samples() - Walk through a sample-data-block and collect samples + * @event: The perf event + * @sdbt: Sample-data-block table + * @overflow: Event overflow counter + * + * Walks through a sample-data-block and collects sampling data entries that are + * then pushed to the perf event subsystem. Depending on the sampling function, + * there can be either basic-sampling or combined-sampling data entries. A + * combined-sampling data entry consists of a basic- and a diagnostic-sampling + * data entry. The sampling function is determined by the flags in the perf + * event hardware structure. The function always works with a combined-sampling + * data entry but ignores the the diagnostic portion if it is not available. + * + * Note that the implementation focuses on basic-sampling data entries and, if + * such an entry is not valid, the entire combined-sampling data entry is + * ignored. + * + * The overflow variables counts the number of samples that has been discarded + * due to a perf event overflow. + */ +static void hw_collect_samples(struct perf_event *event, unsigned long *sdbt, + unsigned long long *overflow) +{ + unsigned long flags = SAMPL_FLAGS(&event->hw); + struct hws_combined_entry *sample; + struct hws_trailer_entry *te; + struct sf_raw_sample *sfr; + size_t sample_size; + + /* Prepare and initialize raw sample data */ + sfr = (struct sf_raw_sample *) RAWSAMPLE_REG(&event->hw); + sfr->format = flags & PERF_CPUM_SF_MODE_MASK; + + sample_size = event_sample_size(&event->hw); + te = (struct hws_trailer_entry *) trailer_entry_ptr(*sdbt); + sample = (struct hws_combined_entry *) *sdbt; + while ((unsigned long *) sample < (unsigned long *) te) { + /* Check for an empty sample */ + if (!sample->basic.def) + break; + + /* Update perf event period */ + perf_event_count_update(event, SAMPL_RATE(&event->hw)); + + /* Check sampling data entry */ + if (sample_format_is_valid(sample, flags)) { + /* If an event overflow occurred, the PMU is stopped to + * throttle event delivery. Remaining sample data is + * discarded. + */ + if (!*overflow) { + if (sample_is_consistent(sample, flags)) { + /* Deliver sample data to perf */ + sfr_store_sample(sfr, sample); + *overflow = perf_push_sample(event, sfr); + } + } else + /* Count discarded samples */ + *overflow += 1; + } else { + debug_sample_entry(sample, te, flags); + /* Sample slot is not yet written or other record. + * + * This condition can occur if the buffer was reused + * from a combined basic- and diagnostic-sampling. + * If only basic-sampling is then active, entries are + * written into the larger diagnostic entries. + * This is typically the case for sample-data-blocks + * that are not full. Stop processing if the first + * invalid format was detected. + */ + if (!te->f) + break; + } + + /* Reset sample slot and advance to next sample */ + reset_sample_slot(sample, flags); + sample += sample_size; + } +} + +/* hw_perf_event_update() - Process sampling buffer + * @event: The perf event + * @flush_all: Flag to also flush partially filled sample-data-blocks + * + * Processes the sampling buffer and create perf event samples. + * The sampling buffer position are retrieved and saved in the TEAR_REG + * register of the specified perf event. + * + * Only full sample-data-blocks are processed. Specify the flash_all flag + * to also walk through partially filled sample-data-blocks. It is ignored + * if PERF_CPUM_SF_FULL_BLOCKS is set. The PERF_CPUM_SF_FULL_BLOCKS flag + * enforces the processing of full sample-data-blocks only (trailer entries + * with the block-full-indicator bit set). + */ +static void hw_perf_event_update(struct perf_event *event, int flush_all) +{ + struct hw_perf_event *hwc = &event->hw; + struct hws_trailer_entry *te; + unsigned long *sdbt; + unsigned long long event_overflow, sampl_overflow, num_sdb, te_flags; + int done; + + if (flush_all && SDB_FULL_BLOCKS(hwc)) + flush_all = 0; + + sdbt = (unsigned long *) TEAR_REG(hwc); + done = event_overflow = sampl_overflow = num_sdb = 0; + while (!done) { + /* Get the trailer entry of the sample-data-block */ + te = (struct hws_trailer_entry *) trailer_entry_ptr(*sdbt); + + /* Leave loop if no more work to do (block full indicator) */ + if (!te->f) { + done = 1; + if (!flush_all) + break; + } + + /* Check the sample overflow count */ + if (te->overflow) + /* Account sample overflows and, if a particular limit + * is reached, extend the sampling buffer. + * For details, see sfb_account_overflows(). + */ + sampl_overflow += te->overflow; + + /* Timestamps are valid for full sample-data-blocks only */ + debug_sprintf_event(sfdbg, 6, "hw_perf_event_update: sdbt=%p " + "overflow=%llu timestamp=0x%llx\n", + sdbt, te->overflow, + (te->f) ? trailer_timestamp(te) : 0ULL); + + /* Collect all samples from a single sample-data-block and + * flag if an (perf) event overflow happened. If so, the PMU + * is stopped and remaining samples will be discarded. + */ + hw_collect_samples(event, sdbt, &event_overflow); + num_sdb++; + + /* Reset trailer (using compare-double-and-swap) */ + do { + te_flags = te->flags & ~SDB_TE_BUFFER_FULL_MASK; + te_flags |= SDB_TE_ALERT_REQ_MASK; + } while (!cmpxchg_double(&te->flags, &te->overflow, + te->flags, te->overflow, + te_flags, 0ULL)); + + /* Advance to next sample-data-block */ + sdbt++; + if (is_link_entry(sdbt)) + sdbt = get_next_sdbt(sdbt); + + /* Update event hardware registers */ + TEAR_REG(hwc) = (unsigned long) sdbt; + + /* Stop processing sample-data if all samples of the current + * sample-data-block were flushed even if it was not full. + */ + if (flush_all && done) + break; + + /* If an event overflow happened, discard samples by + * processing any remaining sample-data-blocks. + */ + if (event_overflow) + flush_all = 1; + } + + /* Account sample overflows in the event hardware structure */ + if (sampl_overflow) + OVERFLOW_REG(hwc) = DIV_ROUND_UP(OVERFLOW_REG(hwc) + + sampl_overflow, 1 + num_sdb); + if (sampl_overflow || event_overflow) + debug_sprintf_event(sfdbg, 4, "hw_perf_event_update: " + "overflow stats: sample=%llu event=%llu\n", + sampl_overflow, event_overflow); +} + +static void cpumsf_pmu_read(struct perf_event *event) +{ + /* Nothing to do ... updates are interrupt-driven */ +} + +/* Activate sampling control. + * Next call of pmu_enable() starts sampling. + */ +static void cpumsf_pmu_start(struct perf_event *event, int flags) +{ + struct cpu_hw_sf *cpuhw = &__get_cpu_var(cpu_hw_sf); + + if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED))) + return; + + if (flags & PERF_EF_RELOAD) + WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE)); + + perf_pmu_disable(event->pmu); + event->hw.state = 0; + cpuhw->lsctl.cs = 1; + if (SAMPL_DIAG_MODE(&event->hw)) + cpuhw->lsctl.cd = 1; + perf_pmu_enable(event->pmu); +} + +/* Deactivate sampling control. + * Next call of pmu_enable() stops sampling. + */ +static void cpumsf_pmu_stop(struct perf_event *event, int flags) +{ + struct cpu_hw_sf *cpuhw = &__get_cpu_var(cpu_hw_sf); + + if (event->hw.state & PERF_HES_STOPPED) + return; + + perf_pmu_disable(event->pmu); + cpuhw->lsctl.cs = 0; + cpuhw->lsctl.cd = 0; + event->hw.state |= PERF_HES_STOPPED; + + if ((flags & PERF_EF_UPDATE) && !(event->hw.state & PERF_HES_UPTODATE)) { + hw_perf_event_update(event, 1); + event->hw.state |= PERF_HES_UPTODATE; + } + perf_pmu_enable(event->pmu); +} + +static int cpumsf_pmu_add(struct perf_event *event, int flags) +{ + struct cpu_hw_sf *cpuhw = &__get_cpu_var(cpu_hw_sf); + int err; + + if (cpuhw->flags & PMU_F_IN_USE) + return -EAGAIN; + + if (!cpuhw->sfb.sdbt) + return -EINVAL; + + err = 0; + perf_pmu_disable(event->pmu); + + event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED; + + /* Set up sampling controls. Always program the sampling register + * using the SDB-table start. Reset TEAR_REG event hardware register + * that is used by hw_perf_event_update() to store the sampling buffer + * position after samples have been flushed. + */ + cpuhw->lsctl.s = 0; + cpuhw->lsctl.h = 1; + cpuhw->lsctl.tear = (unsigned long) cpuhw->sfb.sdbt; + cpuhw->lsctl.dear = *(unsigned long *) cpuhw->sfb.sdbt; + cpuhw->lsctl.interval = SAMPL_RATE(&event->hw); + hw_reset_registers(&event->hw, cpuhw->sfb.sdbt); + + /* Ensure sampling functions are in the disabled state. If disabled, + * switch on sampling enable control. */ + if (WARN_ON_ONCE(cpuhw->lsctl.es == 1 || cpuhw->lsctl.ed == 1)) { + err = -EAGAIN; + goto out; + } + cpuhw->lsctl.es = 1; + if (SAMPL_DIAG_MODE(&event->hw)) + cpuhw->lsctl.ed = 1; + + /* Set in_use flag and store event */ + event->hw.idx = 0; /* only one sampling event per CPU supported */ + cpuhw->event = event; + cpuhw->flags |= PMU_F_IN_USE; + + if (flags & PERF_EF_START) + cpumsf_pmu_start(event, PERF_EF_RELOAD); +out: + perf_event_update_userpage(event); + perf_pmu_enable(event->pmu); + return err; +} + +static void cpumsf_pmu_del(struct perf_event *event, int flags) +{ + struct cpu_hw_sf *cpuhw = &__get_cpu_var(cpu_hw_sf); + + perf_pmu_disable(event->pmu); + cpumsf_pmu_stop(event, PERF_EF_UPDATE); + + cpuhw->lsctl.es = 0; + cpuhw->lsctl.ed = 0; + cpuhw->flags &= ~PMU_F_IN_USE; + cpuhw->event = NULL; + + perf_event_update_userpage(event); + perf_pmu_enable(event->pmu); +} + +static int cpumsf_pmu_event_idx(struct perf_event *event) +{ + return event->hw.idx; +} + +CPUMF_EVENT_ATTR(SF, SF_CYCLES_BASIC, PERF_EVENT_CPUM_SF); +CPUMF_EVENT_ATTR(SF, SF_CYCLES_BASIC_DIAG, PERF_EVENT_CPUM_SF_DIAG); + +static struct attribute *cpumsf_pmu_events_attr[] = { + CPUMF_EVENT_PTR(SF, SF_CYCLES_BASIC), + CPUMF_EVENT_PTR(SF, SF_CYCLES_BASIC_DIAG), + NULL, +}; + +PMU_FORMAT_ATTR(event, "config:0-63"); + +static struct attribute *cpumsf_pmu_format_attr[] = { + &format_attr_event.attr, + NULL, +}; + +static struct attribute_group cpumsf_pmu_events_group = { + .name = "events", + .attrs = cpumsf_pmu_events_attr, +}; +static struct attribute_group cpumsf_pmu_format_group = { + .name = "format", + .attrs = cpumsf_pmu_format_attr, +}; +static const struct attribute_group *cpumsf_pmu_attr_groups[] = { + &cpumsf_pmu_events_group, + &cpumsf_pmu_format_group, + NULL, +}; + +static struct pmu cpumf_sampling = { + .pmu_enable = cpumsf_pmu_enable, + .pmu_disable = cpumsf_pmu_disable, + + .event_init = cpumsf_pmu_event_init, + .add = cpumsf_pmu_add, + .del = cpumsf_pmu_del, + + .start = cpumsf_pmu_start, + .stop = cpumsf_pmu_stop, + .read = cpumsf_pmu_read, + + .event_idx = cpumsf_pmu_event_idx, + .attr_groups = cpumsf_pmu_attr_groups, +}; + +static void cpumf_measurement_alert(struct ext_code ext_code, + unsigned int alert, unsigned long unused) +{ + struct cpu_hw_sf *cpuhw; + + if (!(alert & CPU_MF_INT_SF_MASK)) + return; + inc_irq_stat(IRQEXT_CMS); + cpuhw = &__get_cpu_var(cpu_hw_sf); + + /* Measurement alerts are shared and might happen when the PMU + * is not reserved. Ignore these alerts in this case. */ + if (!(cpuhw->flags & PMU_F_RESERVED)) + return; + + /* The processing below must take care of multiple alert events that + * might be indicated concurrently. */ + + /* Program alert request */ + if (alert & CPU_MF_INT_SF_PRA) { + if (cpuhw->flags & PMU_F_IN_USE) + hw_perf_event_update(cpuhw->event, 0); + else + WARN_ON_ONCE(!(cpuhw->flags & PMU_F_IN_USE)); + } + + /* Report measurement alerts only for non-PRA codes */ + if (alert != CPU_MF_INT_SF_PRA) + debug_sprintf_event(sfdbg, 6, "measurement alert: 0x%x\n", alert); + + /* Sampling authorization change request */ + if (alert & CPU_MF_INT_SF_SACA) + qsi(&cpuhw->qsi); + + /* Loss of sample data due to high-priority machine activities */ + if (alert & CPU_MF_INT_SF_LSDA) { + pr_err("Sample data was lost\n"); + cpuhw->flags |= PMU_F_ERR_LSDA; + sf_disable(); + } + + /* Invalid sampling buffer entry */ + if (alert & (CPU_MF_INT_SF_IAE|CPU_MF_INT_SF_ISE)) { + pr_err("A sampling buffer entry is incorrect (alert=0x%x)\n", + alert); + cpuhw->flags |= PMU_F_ERR_IBE; + sf_disable(); + } +} + +static int cpumf_pmu_notifier(struct notifier_block *self, + unsigned long action, void *hcpu) +{ + unsigned int cpu = (long) hcpu; + int flags; + + /* Ignore the notification if no events are scheduled on the PMU. + * This might be racy... + */ + if (!atomic_read(&num_events)) + return NOTIFY_OK; + + switch (action & ~CPU_TASKS_FROZEN) { + case CPU_ONLINE: + case CPU_ONLINE_FROZEN: + flags = PMC_INIT; + smp_call_function_single(cpu, setup_pmc_cpu, &flags, 1); + break; + case CPU_DOWN_PREPARE: + flags = PMC_RELEASE; + smp_call_function_single(cpu, setup_pmc_cpu, &flags, 1); + break; + default: + break; + } + + return NOTIFY_OK; +} + +static int param_get_sfb_size(char *buffer, const struct kernel_param *kp) +{ + if (!cpum_sf_avail()) + return -ENODEV; + return sprintf(buffer, "%lu,%lu", CPUM_SF_MIN_SDB, CPUM_SF_MAX_SDB); +} + +static int param_set_sfb_size(const char *val, const struct kernel_param *kp) +{ + int rc; + unsigned long min, max; + + if (!cpum_sf_avail()) + return -ENODEV; + if (!val || !strlen(val)) + return -EINVAL; + + /* Valid parameter values: "min,max" or "max" */ + min = CPUM_SF_MIN_SDB; + max = CPUM_SF_MAX_SDB; + if (strchr(val, ',')) + rc = (sscanf(val, "%lu,%lu", &min, &max) == 2) ? 0 : -EINVAL; + else + rc = kstrtoul(val, 10, &max); + + if (min < 2 || min >= max || max > get_num_physpages()) + rc = -EINVAL; + if (rc) + return rc; + + sfb_set_limits(min, max); + pr_info("The sampling buffer limits have changed to: " + "min=%lu max=%lu (diag=x%lu)\n", + CPUM_SF_MIN_SDB, CPUM_SF_MAX_SDB, CPUM_SF_SDB_DIAG_FACTOR); + return 0; +} + +#define param_check_sfb_size(name, p) __param_check(name, p, void) +static struct kernel_param_ops param_ops_sfb_size = { + .set = param_set_sfb_size, + .get = param_get_sfb_size, +}; + +#define RS_INIT_FAILURE_QSI 0x0001 +#define RS_INIT_FAILURE_BSDES 0x0002 +#define RS_INIT_FAILURE_ALRT 0x0003 +#define RS_INIT_FAILURE_PERF 0x0004 +static void __init pr_cpumsf_err(unsigned int reason) +{ + pr_err("Sampling facility support for perf is not available: " + "reason=%04x\n", reason); +} + +static int __init init_cpum_sampling_pmu(void) +{ + struct hws_qsi_info_block si; + int err; + + if (!cpum_sf_avail()) + return -ENODEV; + + memset(&si, 0, sizeof(si)); + if (qsi(&si)) { + pr_cpumsf_err(RS_INIT_FAILURE_QSI); + return -ENODEV; + } + + if (si.bsdes != sizeof(struct hws_basic_entry)) { + pr_cpumsf_err(RS_INIT_FAILURE_BSDES); + return -EINVAL; + } + + if (si.ad) + sfb_set_limits(CPUM_SF_MIN_SDB, CPUM_SF_MAX_SDB); + + sfdbg = debug_register(KMSG_COMPONENT, 2, 1, 80); + if (!sfdbg) + pr_err("Registering for s390dbf failed\n"); + debug_register_view(sfdbg, &debug_sprintf_view); + + err = register_external_interrupt(0x1407, cpumf_measurement_alert); + if (err) { + pr_cpumsf_err(RS_INIT_FAILURE_ALRT); + goto out; + } + + err = perf_pmu_register(&cpumf_sampling, "cpum_sf", PERF_TYPE_RAW); + if (err) { + pr_cpumsf_err(RS_INIT_FAILURE_PERF); + unregister_external_interrupt(0x1407, cpumf_measurement_alert); + goto out; + } + perf_cpu_notifier(cpumf_pmu_notifier); +out: + return err; +} +arch_initcall(init_cpum_sampling_pmu); +core_param(cpum_sfb_size, CPUM_SF_MAX_SDB, sfb_size, 0640); diff --git a/arch/s390/kernel/perf_event.c b/arch/s390/kernel/perf_event.c index 2343c21..5d2dfa3 100644 --- a/arch/s390/kernel/perf_event.c +++ b/arch/s390/kernel/perf_event.c @@ -1,7 +1,7 @@ /* * Performance event support for s390x * - * Copyright IBM Corp. 2012 + * Copyright IBM Corp. 2012, 2013 * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com> * * This program is free software; you can redistribute it and/or modify @@ -16,15 +16,19 @@ #include <linux/kvm_host.h> #include <linux/percpu.h> #include <linux/export.h> +#include <linux/seq_file.h> +#include <linux/spinlock.h> +#include <linux/sysfs.h> #include <asm/irq.h> #include <asm/cpu_mf.h> #include <asm/lowcore.h> #include <asm/processor.h> +#include <asm/sysinfo.h> const char *perf_pmu_name(void) { if (cpum_cf_avail() || cpum_sf_avail()) - return "CPU-measurement facilities (CPUMF)"; + return "CPU-Measurement Facilities (CPU-MF)"; return "pmu"; } EXPORT_SYMBOL(perf_pmu_name); @@ -35,6 +39,8 @@ int perf_num_counters(void) if (cpum_cf_avail()) num += PERF_CPUM_CF_MAX_CTR; + if (cpum_sf_avail()) + num += PERF_CPUM_SF_MAX_CTR; return num; } @@ -54,7 +60,7 @@ static bool is_in_guest(struct pt_regs *regs) { if (user_mode(regs)) return false; -#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE) +#if IS_ENABLED(CONFIG_KVM) return instruction_pointer(regs) == (unsigned long) &sie_exit; #else return false; @@ -83,8 +89,31 @@ static unsigned long perf_misc_guest_flags(struct pt_regs *regs) : PERF_RECORD_MISC_GUEST_KERNEL; } +static unsigned long perf_misc_flags_sf(struct pt_regs *regs) +{ + struct perf_sf_sde_regs *sde_regs; + unsigned long flags; + + sde_regs = (struct perf_sf_sde_regs *) ®s->int_parm_long; + if (sde_regs->in_guest) + flags = user_mode(regs) ? PERF_RECORD_MISC_GUEST_USER + : PERF_RECORD_MISC_GUEST_KERNEL; + else + flags = user_mode(regs) ? PERF_RECORD_MISC_USER + : PERF_RECORD_MISC_KERNEL; + return flags; +} + unsigned long perf_misc_flags(struct pt_regs *regs) { + /* Check if the cpum_sf PMU has created the pt_regs structure. + * In this case, perf misc flags can be easily extracted. Otherwise, + * do regular checks on the pt_regs content. + */ + if (regs->int_code == 0x1407 && regs->int_parm == CPU_MF_INT_SF_PRA) + if (!regs->gprs[15]) + return perf_misc_flags_sf(regs); + if (is_in_guest(regs)) return perf_misc_guest_flags(regs); @@ -92,27 +121,107 @@ unsigned long perf_misc_flags(struct pt_regs *regs) : PERF_RECORD_MISC_KERNEL; } -void perf_event_print_debug(void) +void print_debug_cf(void) { struct cpumf_ctr_info cf_info; - unsigned long flags; - int cpu; - - if (!cpum_cf_avail()) - return; - - local_irq_save(flags); + int cpu = smp_processor_id(); - cpu = smp_processor_id(); memset(&cf_info, 0, sizeof(cf_info)); if (!qctri(&cf_info)) pr_info("CPU[%i] CPUM_CF: ver=%u.%u A=%04x E=%04x C=%04x\n", cpu, cf_info.cfvn, cf_info.csvn, cf_info.auth_ctl, cf_info.enable_ctl, cf_info.act_ctl); +} + +static void print_debug_sf(void) +{ + struct hws_qsi_info_block si; + int cpu = smp_processor_id(); + memset(&si, 0, sizeof(si)); + if (qsi(&si)) + return; + + pr_info("CPU[%i] CPUM_SF: basic=%i diag=%i min=%lu max=%lu cpu_speed=%u\n", + cpu, si.as, si.ad, si.min_sampl_rate, si.max_sampl_rate, + si.cpu_speed); + + if (si.as) + pr_info("CPU[%i] CPUM_SF: Basic-sampling: a=%i e=%i c=%i" + " bsdes=%i tear=%016lx dear=%016lx\n", cpu, + si.as, si.es, si.cs, si.bsdes, si.tear, si.dear); + if (si.ad) + pr_info("CPU[%i] CPUM_SF: Diagnostic-sampling: a=%i e=%i c=%i" + " dsdes=%i tear=%016lx dear=%016lx\n", cpu, + si.ad, si.ed, si.cd, si.dsdes, si.tear, si.dear); +} + +void perf_event_print_debug(void) +{ + unsigned long flags; + + local_irq_save(flags); + if (cpum_cf_avail()) + print_debug_cf(); + if (cpum_sf_avail()) + print_debug_sf(); local_irq_restore(flags); } +/* Service level infrastructure */ +static void sl_print_counter(struct seq_file *m) +{ + struct cpumf_ctr_info ci; + + memset(&ci, 0, sizeof(ci)); + if (qctri(&ci)) + return; + + seq_printf(m, "CPU-MF: Counter facility: version=%u.%u " + "authorization=%04x\n", ci.cfvn, ci.csvn, ci.auth_ctl); +} + +static void sl_print_sampling(struct seq_file *m) +{ + struct hws_qsi_info_block si; + + memset(&si, 0, sizeof(si)); + if (qsi(&si)) + return; + + if (!si.as && !si.ad) + return; + + seq_printf(m, "CPU-MF: Sampling facility: min_rate=%lu max_rate=%lu" + " cpu_speed=%u\n", si.min_sampl_rate, si.max_sampl_rate, + si.cpu_speed); + if (si.as) + seq_printf(m, "CPU-MF: Sampling facility: mode=basic" + " sample_size=%u\n", si.bsdes); + if (si.ad) + seq_printf(m, "CPU-MF: Sampling facility: mode=diagnostic" + " sample_size=%u\n", si.dsdes); +} + +static void service_level_perf_print(struct seq_file *m, + struct service_level *sl) +{ + if (cpum_cf_avail()) + sl_print_counter(m); + if (cpum_sf_avail()) + sl_print_sampling(m); +} + +static struct service_level service_level_perf = { + .seq_print = service_level_perf_print, +}; + +static int __init service_level_perf_register(void) +{ + return register_service_level(&service_level_perf); +} +arch_initcall(service_level_perf_register); + /* See also arch/s390/kernel/traps.c */ static unsigned long __store_trace(struct perf_callchain_entry *entry, unsigned long sp, @@ -172,3 +281,44 @@ void perf_callchain_kernel(struct perf_callchain_entry *entry, __store_trace(entry, head, S390_lowcore.thread_info, S390_lowcore.thread_info + THREAD_SIZE); } + +/* Perf defintions for PMU event attributes in sysfs */ +ssize_t cpumf_events_sysfs_show(struct device *dev, + struct device_attribute *attr, char *page) +{ + struct perf_pmu_events_attr *pmu_attr; + + pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr); + return sprintf(page, "event=0x%04llx,name=%s\n", + pmu_attr->id, attr->attr.name); +} + +/* Reserve/release functions for sharing perf hardware */ +static DEFINE_SPINLOCK(perf_hw_owner_lock); +static void *perf_sampling_owner; + +int perf_reserve_sampling(void) +{ + int err; + + err = 0; + spin_lock(&perf_hw_owner_lock); + if (perf_sampling_owner) { + pr_warn("The sampling facility is already reserved by %p\n", + perf_sampling_owner); + err = -EBUSY; + } else + perf_sampling_owner = __builtin_return_address(0); + spin_unlock(&perf_hw_owner_lock); + return err; +} +EXPORT_SYMBOL(perf_reserve_sampling); + +void perf_release_sampling(void) +{ + spin_lock(&perf_hw_owner_lock); + WARN_ON(!perf_sampling_owner); + perf_sampling_owner = NULL; + spin_unlock(&perf_hw_owner_lock); +} +EXPORT_SYMBOL(perf_release_sampling); diff --git a/arch/s390/kernel/pgm_check.S b/arch/s390/kernel/pgm_check.S index 4a460c4..813ec72 100644 --- a/arch/s390/kernel/pgm_check.S +++ b/arch/s390/kernel/pgm_check.S @@ -78,7 +78,7 @@ PGM_CHECK_DEFAULT /* 34 */ PGM_CHECK_DEFAULT /* 35 */ PGM_CHECK_DEFAULT /* 36 */ PGM_CHECK_DEFAULT /* 37 */ -PGM_CHECK_DEFAULT /* 38 */ +PGM_CHECK_64BIT(do_dat_exception) /* 38 */ PGM_CHECK_64BIT(do_dat_exception) /* 39 */ PGM_CHECK_64BIT(do_dat_exception) /* 3a */ PGM_CHECK_64BIT(do_dat_exception) /* 3b */ diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c index 7ed0d4e..dd14532 100644 --- a/arch/s390/kernel/process.c +++ b/arch/s390/kernel/process.c @@ -261,20 +261,18 @@ static inline unsigned long brk_rnd(void) unsigned long arch_randomize_brk(struct mm_struct *mm) { - unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd()); + unsigned long ret; - if (ret < mm->brk) - return mm->brk; - return ret; + ret = PAGE_ALIGN(mm->brk + brk_rnd()); + return (ret > mm->brk) ? ret : mm->brk; } unsigned long randomize_et_dyn(unsigned long base) { - unsigned long ret = PAGE_ALIGN(base + brk_rnd()); + unsigned long ret; if (!(current->flags & PF_RANDOMIZE)) return base; - if (ret < base) - return base; - return ret; + ret = PAGE_ALIGN(base + brk_rnd()); + return (ret > base) ? ret : base; } diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c index e65c91c..f6be608 100644 --- a/arch/s390/kernel/ptrace.c +++ b/arch/s390/kernel/ptrace.c @@ -56,25 +56,26 @@ void update_cr_regs(struct task_struct *task) #ifdef CONFIG_64BIT /* Take care of the enable/disable of transactional execution. */ if (MACHINE_HAS_TE) { - unsigned long cr[3], cr_new[3]; + unsigned long cr, cr_new; - __ctl_store(cr, 0, 2); - cr_new[1] = cr[1]; + __ctl_store(cr, 0, 0); /* Set or clear transaction execution TXC bit 8. */ + cr_new = cr | (1UL << 55); if (task->thread.per_flags & PER_FLAG_NO_TE) - cr_new[0] = cr[0] & ~(1UL << 55); - else - cr_new[0] = cr[0] | (1UL << 55); + cr_new &= ~(1UL << 55); + if (cr_new != cr) + __ctl_load(cr, 0, 0); /* Set or clear transaction execution TDC bits 62 and 63. */ - cr_new[2] = cr[2] & ~3UL; + __ctl_store(cr, 2, 2); + cr_new = cr & ~3UL; if (task->thread.per_flags & PER_FLAG_TE_ABORT_RAND) { if (task->thread.per_flags & PER_FLAG_TE_ABORT_RAND_TEND) - cr_new[2] |= 1UL; + cr_new |= 1UL; else - cr_new[2] |= 2UL; + cr_new |= 2UL; } - if (memcmp(&cr_new, &cr, sizeof(cr))) - __ctl_load(cr_new, 0, 2); + if (cr_new != cr) + __ctl_load(cr_new, 2, 2); } #endif /* Copy user specified PER registers */ @@ -107,15 +108,11 @@ void update_cr_regs(struct task_struct *task) void user_enable_single_step(struct task_struct *task) { set_tsk_thread_flag(task, TIF_SINGLE_STEP); - if (task == current) - update_cr_regs(task); } void user_disable_single_step(struct task_struct *task) { clear_tsk_thread_flag(task, TIF_SINGLE_STEP); - if (task == current) - update_cr_regs(task); } /* diff --git a/arch/s390/kernel/s390_ksyms.c b/arch/s390/kernel/s390_ksyms.c index 3bac589..9f60467 100644 --- a/arch/s390/kernel/s390_ksyms.c +++ b/arch/s390/kernel/s390_ksyms.c @@ -5,7 +5,7 @@ #ifdef CONFIG_FUNCTION_TRACER EXPORT_SYMBOL(_mcount); #endif -#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE) +#if IS_ENABLED(CONFIG_KVM) EXPORT_SYMBOL(sie64a); EXPORT_SYMBOL(sie_exit); #endif diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index 4444875..09e2f46 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c @@ -373,7 +373,7 @@ static void __init setup_lowcore(void) /* * Set up PSW restart to call ipl.c:do_restart(). Copy the relevant - * restart data to the absolute zero lowcore. This is necesary if + * restart data to the absolute zero lowcore. This is necessary if * PSW restart is done on an offline CPU that has lowcore zero. */ lc->restart_stack = (unsigned long) restart_stack; @@ -1023,6 +1023,7 @@ void __init setup_arch(char **cmdline_p) setup_vmcoreinfo(); setup_lowcore(); + smp_fill_possible_mask(); cpu_init(); s390_init_cpu_topology(); diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c index fb53587..d8fd508 100644 --- a/arch/s390/kernel/signal.c +++ b/arch/s390/kernel/signal.c @@ -94,7 +94,7 @@ static int restore_sigregs(struct pt_regs *regs, _sigregs __user *sregs) return -EINVAL; /* Use regs->psw.mask instead of PSW_USER_BITS to preserve PER bit. */ - regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) | + regs->psw.mask = (regs->psw.mask & ~(PSW_MASK_USER | PSW_MASK_RI)) | (user_sregs.regs.psw.mask & (PSW_MASK_USER | PSW_MASK_RI)); /* Check for invalid user address space control. */ if ((regs->psw.mask & PSW_MASK_ASC) == PSW_ASC_HOME) diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index dc4a534..a7125b6 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -59,7 +59,7 @@ enum { }; struct pcpu { - struct cpu cpu; + struct cpu *cpu; struct _lowcore *lowcore; /* lowcore page(s) for the cpu */ unsigned long async_stack; /* async stack for the cpu */ unsigned long panic_stack; /* panic stack for the cpu */ @@ -159,9 +159,9 @@ static void pcpu_ec_call(struct pcpu *pcpu, int ec_bit) { int order; - set_bit(ec_bit, &pcpu->ec_mask); - order = pcpu_running(pcpu) ? - SIGP_EXTERNAL_CALL : SIGP_EMERGENCY_SIGNAL; + if (test_and_set_bit(ec_bit, &pcpu->ec_mask)) + return; + order = pcpu_running(pcpu) ? SIGP_EXTERNAL_CALL : SIGP_EMERGENCY_SIGNAL; pcpu_sigp_retry(pcpu, order, 0); } @@ -721,18 +721,14 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle) return 0; } -static int __init setup_possible_cpus(char *s) -{ - int max, cpu; +static unsigned int setup_possible_cpus __initdata; - if (kstrtoint(s, 0, &max) < 0) - return 0; - init_cpu_possible(cpumask_of(0)); - for (cpu = 1; cpu < max && cpu < nr_cpu_ids; cpu++) - set_cpu_possible(cpu, true); +static int __init _setup_possible_cpus(char *s) +{ + get_option(&s, &setup_possible_cpus); return 0; } -early_param("possible_cpus", setup_possible_cpus); +early_param("possible_cpus", _setup_possible_cpus); #ifdef CONFIG_HOTPLUG_CPU @@ -775,6 +771,17 @@ void __noreturn cpu_die(void) #endif /* CONFIG_HOTPLUG_CPU */ +void __init smp_fill_possible_mask(void) +{ + unsigned int possible, cpu; + + possible = setup_possible_cpus; + if (!possible) + possible = MACHINE_IS_VM ? 64 : nr_cpu_ids; + for (cpu = 0; cpu < possible && cpu < nr_cpu_ids; cpu++) + set_cpu_possible(cpu, true); +} + void __init smp_prepare_cpus(unsigned int max_cpus) { /* request the 0x1201 emergency signal external interrupt */ @@ -958,7 +965,7 @@ static int smp_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) { unsigned int cpu = (unsigned int)(long)hcpu; - struct cpu *c = &pcpu_devices[cpu].cpu; + struct cpu *c = pcpu_devices[cpu].cpu; struct device *s = &c->dev; int err = 0; @@ -975,10 +982,15 @@ static int smp_cpu_notify(struct notifier_block *self, unsigned long action, static int smp_add_present_cpu(int cpu) { - struct cpu *c = &pcpu_devices[cpu].cpu; - struct device *s = &c->dev; + struct device *s; + struct cpu *c; int rc; + c = kzalloc(sizeof(*c), GFP_KERNEL); + if (!c) + return -ENOMEM; + pcpu_devices[cpu].cpu = c; + s = &c->dev; c->hotpluggable = 1; rc = register_cpu(c, cpu); if (rc) diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c index 064c308..dd95f163 100644 --- a/arch/s390/kernel/time.c +++ b/arch/s390/kernel/time.c @@ -108,20 +108,10 @@ static void fixup_clock_comparator(unsigned long long delta) set_clock_comparator(S390_lowcore.clock_comparator); } -static int s390_next_ktime(ktime_t expires, +static int s390_next_event(unsigned long delta, struct clock_event_device *evt) { - struct timespec ts; - u64 nsecs; - - ts.tv_sec = ts.tv_nsec = 0; - monotonic_to_bootbased(&ts); - nsecs = ktime_to_ns(ktime_add(timespec_to_ktime(ts), expires)); - do_div(nsecs, 125); - S390_lowcore.clock_comparator = sched_clock_base_cc + (nsecs << 9); - /* Program the maximum value if we have an overflow (== year 2042) */ - if (unlikely(S390_lowcore.clock_comparator < sched_clock_base_cc)) - S390_lowcore.clock_comparator = -1ULL; + S390_lowcore.clock_comparator = get_tod_clock() + delta; set_clock_comparator(S390_lowcore.clock_comparator); return 0; } @@ -146,15 +136,14 @@ void init_cpu_timer(void) cpu = smp_processor_id(); cd = &per_cpu(comparators, cpu); cd->name = "comparator"; - cd->features = CLOCK_EVT_FEAT_ONESHOT | - CLOCK_EVT_FEAT_KTIME; + cd->features = CLOCK_EVT_FEAT_ONESHOT; cd->mult = 16777; cd->shift = 12; cd->min_delta_ns = 1; cd->max_delta_ns = LONG_MAX; cd->rating = 400; cd->cpumask = cpumask_of(cpu); - cd->set_next_ktime = s390_next_ktime; + cd->set_next_event = s390_next_event; cd->set_mode = s390_set_mode; clockevents_register_device(cd); @@ -221,21 +210,30 @@ struct clocksource * __init clocksource_default_clock(void) return &clocksource_tod; } -void update_vsyscall_old(struct timespec *wall_time, struct timespec *wtm, - struct clocksource *clock, u32 mult) +void update_vsyscall(struct timekeeper *tk) { - if (clock != &clocksource_tod) + u64 nsecps; + + if (tk->clock != &clocksource_tod) return; /* Make userspace gettimeofday spin until we're done. */ ++vdso_data->tb_update_count; smp_wmb(); - vdso_data->xtime_tod_stamp = clock->cycle_last; - vdso_data->xtime_clock_sec = wall_time->tv_sec; - vdso_data->xtime_clock_nsec = wall_time->tv_nsec; - vdso_data->wtom_clock_sec = wtm->tv_sec; - vdso_data->wtom_clock_nsec = wtm->tv_nsec; - vdso_data->ntp_mult = mult; + vdso_data->xtime_tod_stamp = tk->clock->cycle_last; + vdso_data->xtime_clock_sec = tk->xtime_sec; + vdso_data->xtime_clock_nsec = tk->xtime_nsec; + vdso_data->wtom_clock_sec = + tk->xtime_sec + tk->wall_to_monotonic.tv_sec; + vdso_data->wtom_clock_nsec = tk->xtime_nsec + + + (tk->wall_to_monotonic.tv_nsec << tk->shift); + nsecps = (u64) NSEC_PER_SEC << tk->shift; + while (vdso_data->wtom_clock_nsec >= nsecps) { + vdso_data->wtom_clock_nsec -= nsecps; + vdso_data->wtom_clock_sec++; + } + vdso_data->tk_mult = tk->mult; + vdso_data->tk_shift = tk->shift; smp_wmb(); ++vdso_data->tb_update_count; } diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c index a84476f..6136490 100644 --- a/arch/s390/kernel/vdso.c +++ b/arch/s390/kernel/vdso.c @@ -125,7 +125,7 @@ int vdso_alloc_per_cpu(struct _lowcore *lowcore) psal[i] = 0x80000000; lowcore->paste[4] = (u32)(addr_t) psal; - psal[0] = 0x20000000; + psal[0] = 0x02000000; psal[2] = (u32)(addr_t) aste; *(unsigned long *) (aste + 2) = segment_table + _ASCE_TABLE_LENGTH + _ASCE_USER_BITS + _ASCE_TYPE_SEGMENT; diff --git a/arch/s390/kernel/vdso32/clock_gettime.S b/arch/s390/kernel/vdso32/clock_gettime.S index b2224e0..65fc397 100644 --- a/arch/s390/kernel/vdso32/clock_gettime.S +++ b/arch/s390/kernel/vdso32/clock_gettime.S @@ -38,25 +38,21 @@ __kernel_clock_gettime: sl %r1,__VDSO_XTIME_STAMP+4(%r5) brc 3,2f ahi %r0,-1 -2: ms %r0,__VDSO_NTP_MULT(%r5) /* cyc2ns(clock,cycle_delta) */ +2: ms %r0,__VDSO_TK_MULT(%r5) /* * tk->mult */ lr %r2,%r0 - l %r0,__VDSO_NTP_MULT(%r5) + l %r0,__VDSO_TK_MULT(%r5) ltr %r1,%r1 mr %r0,%r0 jnm 3f - a %r0,__VDSO_NTP_MULT(%r5) + a %r0,__VDSO_TK_MULT(%r5) 3: alr %r0,%r2 - srdl %r0,12 - al %r0,__VDSO_XTIME_NSEC(%r5) /* + xtime */ - al %r1,__VDSO_XTIME_NSEC+4(%r5) - brc 12,4f - ahi %r0,1 -4: l %r2,__VDSO_XTIME_SEC+4(%r5) - al %r0,__VDSO_WTOM_NSEC(%r5) /* + wall_to_monotonic */ + al %r0,__VDSO_WTOM_NSEC(%r5) al %r1,__VDSO_WTOM_NSEC+4(%r5) brc 12,5f ahi %r0,1 -5: al %r2,__VDSO_WTOM_SEC+4(%r5) +5: l %r2,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */ + srdl %r0,0(%r2) /* >> tk->shift */ + l %r2,__VDSO_WTOM_SEC+4(%r5) cl %r4,__VDSO_UPD_COUNT+4(%r5) /* check update counter */ jne 1b basr %r5,0 @@ -86,20 +82,21 @@ __kernel_clock_gettime: sl %r1,__VDSO_XTIME_STAMP+4(%r5) brc 3,12f ahi %r0,-1 -12: ms %r0,__VDSO_NTP_MULT(%r5) /* cyc2ns(clock,cycle_delta) */ +12: ms %r0,__VDSO_TK_MULT(%r5) /* * tk->mult */ lr %r2,%r0 - l %r0,__VDSO_NTP_MULT(%r5) + l %r0,__VDSO_TK_MULT(%r5) ltr %r1,%r1 mr %r0,%r0 jnm 13f - a %r0,__VDSO_NTP_MULT(%r5) + a %r0,__VDSO_TK_MULT(%r5) 13: alr %r0,%r2 - srdl %r0,12 - al %r0,__VDSO_XTIME_NSEC(%r5) /* + xtime */ + al %r0,__VDSO_XTIME_NSEC(%r5) /* + tk->xtime_nsec */ al %r1,__VDSO_XTIME_NSEC+4(%r5) brc 12,14f ahi %r0,1 -14: l %r2,__VDSO_XTIME_SEC+4(%r5) +14: l %r2,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */ + srdl %r0,0(%r2) /* >> tk->shift */ + l %r2,__VDSO_XTIME_SEC+4(%r5) cl %r4,__VDSO_UPD_COUNT+4(%r5) /* check update counter */ jne 11b basr %r5,0 diff --git a/arch/s390/kernel/vdso32/gettimeofday.S b/arch/s390/kernel/vdso32/gettimeofday.S index 2d36331..fd621a9 100644 --- a/arch/s390/kernel/vdso32/gettimeofday.S +++ b/arch/s390/kernel/vdso32/gettimeofday.S @@ -35,15 +35,14 @@ __kernel_gettimeofday: sl %r1,__VDSO_XTIME_STAMP+4(%r5) brc 3,3f ahi %r0,-1 -3: ms %r0,__VDSO_NTP_MULT(%r5) /* cyc2ns(clock,cycle_delta) */ +3: ms %r0,__VDSO_TK_MULT(%r5) /* * tk->mult */ st %r0,24(%r15) - l %r0,__VDSO_NTP_MULT(%r5) + l %r0,__VDSO_TK_MULT(%r5) ltr %r1,%r1 mr %r0,%r0 jnm 4f - a %r0,__VDSO_NTP_MULT(%r5) + a %r0,__VDSO_TK_MULT(%r5) 4: al %r0,24(%r15) - srdl %r0,12 al %r0,__VDSO_XTIME_NSEC(%r5) /* + xtime */ al %r1,__VDSO_XTIME_NSEC+4(%r5) brc 12,5f @@ -51,6 +50,8 @@ __kernel_gettimeofday: 5: mvc 24(4,%r15),__VDSO_XTIME_SEC+4(%r5) cl %r4,__VDSO_UPD_COUNT+4(%r5) /* check update counter */ jne 1b + l %r4,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */ + srdl %r0,0(%r4) /* >> tk->shift */ l %r4,24(%r15) /* get tv_sec from stack */ basr %r5,0 6: ltr %r0,%r0 diff --git a/arch/s390/kernel/vdso64/clock_getres.S b/arch/s390/kernel/vdso64/clock_getres.S index 176e1f7..34deba7 100644 --- a/arch/s390/kernel/vdso64/clock_getres.S +++ b/arch/s390/kernel/vdso64/clock_getres.S @@ -23,7 +23,9 @@ __kernel_clock_getres: je 0f cghi %r2,__CLOCK_MONOTONIC je 0f - cghi %r2,-2 /* CLOCK_THREAD_CPUTIME_ID for this thread */ + cghi %r2,__CLOCK_THREAD_CPUTIME_ID + je 0f + cghi %r2,-2 /* Per-thread CPUCLOCK with PID=0, VIRT=1 */ jne 2f larl %r5,_vdso_data icm %r0,15,__LC_ECTG_OK(%r5) diff --git a/arch/s390/kernel/vdso64/clock_gettime.S b/arch/s390/kernel/vdso64/clock_gettime.S index d46c95e..91940ed 100644 --- a/arch/s390/kernel/vdso64/clock_gettime.S +++ b/arch/s390/kernel/vdso64/clock_gettime.S @@ -22,7 +22,9 @@ __kernel_clock_gettime: larl %r5,_vdso_data cghi %r2,__CLOCK_REALTIME je 4f - cghi %r2,-2 /* CLOCK_THREAD_CPUTIME_ID for this thread */ + cghi %r2,__CLOCK_THREAD_CPUTIME_ID + je 9f + cghi %r2,-2 /* Per-thread CPUCLOCK with PID=0, VIRT=1 */ je 9f cghi %r2,__CLOCK_MONOTONIC jne 12f @@ -34,14 +36,13 @@ __kernel_clock_gettime: tmll %r4,0x0001 /* pending update ? loop */ jnz 0b stck 48(%r15) /* Store TOD clock */ + lgf %r2,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */ + lg %r0,__VDSO_WTOM_SEC(%r5) lg %r1,48(%r15) sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */ - msgf %r1,__VDSO_NTP_MULT(%r5) /* * NTP adjustment */ - srlg %r1,%r1,12 /* cyc2ns(clock,cycle_delta) */ - alg %r1,__VDSO_XTIME_NSEC(%r5) /* + xtime */ - lg %r0,__VDSO_XTIME_SEC(%r5) - alg %r1,__VDSO_WTOM_NSEC(%r5) /* + wall_to_monotonic */ - alg %r0,__VDSO_WTOM_SEC(%r5) + msgf %r1,__VDSO_TK_MULT(%r5) /* * tk->mult */ + alg %r1,__VDSO_WTOM_NSEC(%r5) + srlg %r1,%r1,0(%r2) /* >> tk->shift */ clg %r4,__VDSO_UPD_COUNT(%r5) /* check update counter */ jne 0b larl %r5,13f @@ -62,12 +63,13 @@ __kernel_clock_gettime: tmll %r4,0x0001 /* pending update ? loop */ jnz 5b stck 48(%r15) /* Store TOD clock */ + lgf %r2,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */ lg %r1,48(%r15) sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */ - msgf %r1,__VDSO_NTP_MULT(%r5) /* * NTP adjustment */ - srlg %r1,%r1,12 /* cyc2ns(clock,cycle_delta) */ - alg %r1,__VDSO_XTIME_NSEC(%r5) /* + xtime */ - lg %r0,__VDSO_XTIME_SEC(%r5) + msgf %r1,__VDSO_TK_MULT(%r5) /* * tk->mult */ + alg %r1,__VDSO_XTIME_NSEC(%r5) /* + tk->xtime_nsec */ + srlg %r1,%r1,0(%r2) /* >> tk->shift */ + lg %r0,__VDSO_XTIME_SEC(%r5) /* tk->xtime_sec */ clg %r4,__VDSO_UPD_COUNT(%r5) /* check update counter */ jne 5b larl %r5,13f diff --git a/arch/s390/kernel/vdso64/gettimeofday.S b/arch/s390/kernel/vdso64/gettimeofday.S index 36ee674..d0860d1 100644 --- a/arch/s390/kernel/vdso64/gettimeofday.S +++ b/arch/s390/kernel/vdso64/gettimeofday.S @@ -31,12 +31,13 @@ __kernel_gettimeofday: stck 48(%r15) /* Store TOD clock */ lg %r1,48(%r15) sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */ - msgf %r1,__VDSO_NTP_MULT(%r5) /* * NTP adjustment */ - srlg %r1,%r1,12 /* cyc2ns(clock,cycle_delta) */ - alg %r1,__VDSO_XTIME_NSEC(%r5) /* + xtime.tv_nsec */ - lg %r0,__VDSO_XTIME_SEC(%r5) /* xtime.tv_sec */ + msgf %r1,__VDSO_TK_MULT(%r5) /* * tk->mult */ + alg %r1,__VDSO_XTIME_NSEC(%r5) /* + tk->xtime_nsec */ + lg %r0,__VDSO_XTIME_SEC(%r5) /* tk->xtime_sec */ clg %r4,__VDSO_UPD_COUNT(%r5) /* check update counter */ jne 0b + lgf %r5,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */ + srlg %r1,%r1,0(%r5) /* >> tk->shift */ larl %r5,5f 2: clg %r1,0(%r5) jl 3f diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c index 2440602..d101dae 100644 --- a/arch/s390/kvm/priv.c +++ b/arch/s390/kvm/priv.c @@ -275,7 +275,7 @@ static int handle_io_inst(struct kvm_vcpu *vcpu) return -EOPNOTSUPP; } else { /* - * Set condition code 3 to stop the guest from issueing channel + * Set condition code 3 to stop the guest from issuing channel * I/O instructions. */ kvm_s390_set_psw_cc(vcpu, 3); diff --git a/arch/s390/lib/uaccess_pt.c b/arch/s390/lib/uaccess_pt.c index 97e03ca..0632dc5 100644 --- a/arch/s390/lib/uaccess_pt.c +++ b/arch/s390/lib/uaccess_pt.c @@ -74,15 +74,18 @@ static size_t copy_in_kernel(size_t count, void __user *to, /* * Returns kernel address for user virtual address. If the returned address is - * >= -4095 (IS_ERR_VALUE(x) returns true), a fault has occured and the address - * contains the (negative) exception code. + * >= -4095 (IS_ERR_VALUE(x) returns true), a fault has occurred and the + * address contains the (negative) exception code. */ #ifdef CONFIG_64BIT + static unsigned long follow_table(struct mm_struct *mm, unsigned long address, int write) { unsigned long *table = (unsigned long *)__pa(mm->pgd); + if (unlikely(address > mm->context.asce_limit - 1)) + return -0x38UL; switch (mm->context.asce_bits & _ASCE_TYPE_MASK) { case _ASCE_TYPE_REGION1: table = table + ((address >> 53) & 0x7ff); diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index e794c88..3584ed9 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c @@ -293,7 +293,7 @@ static int gmap_alloc_table(struct gmap *gmap, * @addr: address in the guest address space * @len: length of the memory area to unmap * - * Returns 0 if the unmap succeded, -EINVAL if not. + * Returns 0 if the unmap succeeded, -EINVAL if not. */ int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len) { @@ -344,7 +344,7 @@ EXPORT_SYMBOL_GPL(gmap_unmap_segment); * @from: source address in the parent address space * @to: target address in the guest address space * - * Returns 0 if the mmap succeded, -EINVAL or -ENOMEM if not. + * Returns 0 if the mmap succeeded, -EINVAL or -ENOMEM if not. */ int gmap_map_segment(struct gmap *gmap, unsigned long from, unsigned long to, unsigned long len) diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c index 16871da..708d60e 100644 --- a/arch/s390/net/bpf_jit_comp.c +++ b/arch/s390/net/bpf_jit_comp.c @@ -368,14 +368,16 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct sock_filter *filter, EMIT4_PCREL(0xa7840000, (jit->ret0_ip - jit->prg)); /* lhi %r4,0 */ EMIT4(0xa7480000); - /* dr %r4,%r12 */ - EMIT2(0x1d4c); + /* dlr %r4,%r12 */ + EMIT4(0xb997004c); break; - case BPF_S_ALU_DIV_K: /* A = reciprocal_divide(A, K) */ - /* m %r4,<d(K)>(%r13) */ - EMIT4_DISP(0x5c40d000, EMIT_CONST(K)); - /* lr %r5,%r4 */ - EMIT2(0x1854); + case BPF_S_ALU_DIV_K: /* A /= K */ + if (K == 1) + break; + /* lhi %r4,0 */ + EMIT4(0xa7480000); + /* dl %r4,<d(K)>(%r13) */ + EMIT6_DISP(0xe340d000, 0x0097, EMIT_CONST(K)); break; case BPF_S_ALU_MOD_X: /* A %= X */ jit->seen |= SEEN_XREG | SEEN_RET0; @@ -385,16 +387,21 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct sock_filter *filter, EMIT4_PCREL(0xa7840000, (jit->ret0_ip - jit->prg)); /* lhi %r4,0 */ EMIT4(0xa7480000); - /* dr %r4,%r12 */ - EMIT2(0x1d4c); + /* dlr %r4,%r12 */ + EMIT4(0xb997004c); /* lr %r5,%r4 */ EMIT2(0x1854); break; case BPF_S_ALU_MOD_K: /* A %= K */ + if (K == 1) { + /* lhi %r5,0 */ + EMIT4(0xa7580000); + break; + } /* lhi %r4,0 */ EMIT4(0xa7480000); - /* d %r4,<d(K)>(%r13) */ - EMIT4_DISP(0x5d40d000, EMIT_CONST(K)); + /* dl %r4,<d(K)>(%r13) */ + EMIT6_DISP(0xe340d000, 0x0097, EMIT_CONST(K)); /* lr %r5,%r4 */ EMIT2(0x1854); break; diff --git a/arch/s390/oprofile/hwsampler.c b/arch/s390/oprofile/hwsampler.c index 231ceca..a32c967 100644 --- a/arch/s390/oprofile/hwsampler.c +++ b/arch/s390/oprofile/hwsampler.c @@ -26,9 +26,6 @@ #define MAX_NUM_SDB 511 #define MIN_NUM_SDB 1 -#define ALERT_REQ_MASK 0x4000000000000000ul -#define BUFFER_FULL_MASK 0x8000000000000000ul - DECLARE_PER_CPU(struct hws_cpu_buffer, sampler_cpu_buffer); struct hws_execute_parms { @@ -44,6 +41,7 @@ static DEFINE_MUTEX(hws_sem_oom); static unsigned char hws_flush_all; static unsigned int hws_oom; +static unsigned int hws_alert; static struct workqueue_struct *hws_wq; static unsigned int hws_state; @@ -65,43 +63,6 @@ static unsigned long interval; static unsigned long min_sampler_rate; static unsigned long max_sampler_rate; -static int ssctl(void *buffer) -{ - int cc; - - /* set in order to detect a program check */ - cc = 1; - - asm volatile( - "0: .insn s,0xB2870000,0(%1)\n" - "1: ipm %0\n" - " srl %0,28\n" - "2:\n" - EX_TABLE(0b, 2b) EX_TABLE(1b, 2b) - : "+d" (cc), "+a" (buffer) - : "m" (*((struct hws_ssctl_request_block *)buffer)) - : "cc", "memory"); - - return cc ? -EINVAL : 0 ; -} - -static int qsi(void *buffer) -{ - int cc; - cc = 1; - - asm volatile( - "0: .insn s,0xB2860000,0(%1)\n" - "1: lhi %0,0\n" - "2:\n" - EX_TABLE(0b, 2b) EX_TABLE(1b, 2b) - : "=d" (cc), "+a" (buffer) - : "m" (*((struct hws_qsi_info_block *)buffer)) - : "cc", "memory"); - - return cc ? -EINVAL : 0; -} - static void execute_qsi(void *parms) { struct hws_execute_parms *ep = parms; @@ -113,7 +74,7 @@ static void execute_ssctl(void *parms) { struct hws_execute_parms *ep = parms; - ep->rc = ssctl(ep->buffer); + ep->rc = lsctl(ep->buffer); } static int smp_ctl_ssctl_stop(int cpu) @@ -214,17 +175,6 @@ static int smp_ctl_qsi(int cpu) return ep.rc; } -static inline unsigned long *trailer_entry_ptr(unsigned long v) -{ - void *ret; - - ret = (void *)v; - ret += PAGE_SIZE; - ret -= sizeof(struct hws_trailer_entry); - - return (unsigned long *) ret; -} - static void hws_ext_handler(struct ext_code ext_code, unsigned int param32, unsigned long param64) { @@ -233,6 +183,9 @@ static void hws_ext_handler(struct ext_code ext_code, if (!(param32 & CPU_MF_INT_SF_MASK)) return; + if (!hws_alert) + return; + inc_irq_stat(IRQEXT_CMS); atomic_xchg(&cb->ext_params, atomic_read(&cb->ext_params) | param32); @@ -256,16 +209,6 @@ static void init_all_cpu_buffers(void) } } -static int is_link_entry(unsigned long *s) -{ - return *s & 0x1ul ? 1 : 0; -} - -static unsigned long *get_next_sdbt(unsigned long *s) -{ - return (unsigned long *) (*s & ~0x1ul); -} - static int prepare_cpu_buffers(void) { int cpu; @@ -353,7 +296,7 @@ static int allocate_sdbt(int cpu) } *sdbt = sdb; trailer = trailer_entry_ptr(*sdbt); - *trailer = ALERT_REQ_MASK; + *trailer = SDB_TE_ALERT_REQ_MASK; sdbt++; mutex_unlock(&hws_sem_oom); } @@ -829,7 +772,7 @@ static void worker_on_interrupt(unsigned int cpu) trailer = trailer_entry_ptr(*sdbt); /* leave loop if no more work to do */ - if (!(*trailer & BUFFER_FULL_MASK)) { + if (!(*trailer & SDB_TE_BUFFER_FULL_MASK)) { done = 1; if (!hws_flush_all) continue; @@ -856,7 +799,7 @@ static void worker_on_interrupt(unsigned int cpu) static void add_samples_to_oprofile(unsigned int cpu, unsigned long *sdbt, unsigned long *dear) { - struct hws_data_entry *sample_data_ptr; + struct hws_basic_entry *sample_data_ptr; unsigned long *trailer; trailer = trailer_entry_ptr(*sdbt); @@ -866,7 +809,7 @@ static void add_samples_to_oprofile(unsigned int cpu, unsigned long *sdbt, trailer = dear; } - sample_data_ptr = (struct hws_data_entry *)(*sdbt); + sample_data_ptr = (struct hws_basic_entry *)(*sdbt); while ((unsigned long *)sample_data_ptr < trailer) { struct pt_regs *regs = NULL; @@ -1002,6 +945,7 @@ int hwsampler_deallocate(void) goto deallocate_exit; irq_subclass_unregister(IRQ_SUBCLASS_MEASUREMENT_ALERT); + hws_alert = 0; deallocate_sdbt(); hws_state = HWS_DEALLOCATED; @@ -1116,6 +1060,7 @@ int hwsampler_shutdown(void) if (hws_state == HWS_STOPPED) { irq_subclass_unregister(IRQ_SUBCLASS_MEASUREMENT_ALERT); + hws_alert = 0; deallocate_sdbt(); } if (hws_wq) { @@ -1190,6 +1135,7 @@ start_all_exit: hws_oom = 1; hws_flush_all = 0; /* now let them in, 1407 CPUMF external interrupts */ + hws_alert = 1; irq_subclass_register(IRQ_SUBCLASS_MEASUREMENT_ALERT); return 0; diff --git a/arch/s390/oprofile/hwsampler.h b/arch/s390/oprofile/hwsampler.h index 0022e1e..a483d06 100644 --- a/arch/s390/oprofile/hwsampler.h +++ b/arch/s390/oprofile/hwsampler.h @@ -9,27 +9,7 @@ #define HWSAMPLER_H_ #include <linux/workqueue.h> - -struct hws_qsi_info_block /* QUERY SAMPLING information block */ -{ /* Bit(s) */ - unsigned int b0_13:14; /* 0-13: zeros */ - unsigned int as:1; /* 14: sampling authorisation control*/ - unsigned int b15_21:7; /* 15-21: zeros */ - unsigned int es:1; /* 22: sampling enable control */ - unsigned int b23_29:7; /* 23-29: zeros */ - unsigned int cs:1; /* 30: sampling activation control */ - unsigned int:1; /* 31: reserved */ - unsigned int bsdes:16; /* 4-5: size of sampling entry */ - unsigned int:16; /* 6-7: reserved */ - unsigned long min_sampl_rate; /* 8-15: minimum sampling interval */ - unsigned long max_sampl_rate; /* 16-23: maximum sampling interval*/ - unsigned long tear; /* 24-31: TEAR contents */ - unsigned long dear; /* 32-39: DEAR contents */ - unsigned int rsvrd0; /* 40-43: reserved */ - unsigned int cpu_speed; /* 44-47: CPU speed */ - unsigned long long rsvrd1; /* 48-55: reserved */ - unsigned long long rsvrd2; /* 56-63: reserved */ -}; +#include <asm/cpu_mf.h> struct hws_ssctl_request_block /* SET SAMPLING CONTROLS req block */ { /* bytes 0 - 7 Bit(s) */ @@ -68,36 +48,6 @@ struct hws_cpu_buffer { unsigned int stop_mode:1; }; -struct hws_data_entry { - unsigned int def:16; /* 0-15 Data Entry Format */ - unsigned int R:4; /* 16-19 reserved */ - unsigned int U:4; /* 20-23 Number of unique instruct. */ - unsigned int z:2; /* zeros */ - unsigned int T:1; /* 26 PSW DAT mode */ - unsigned int W:1; /* 27 PSW wait state */ - unsigned int P:1; /* 28 PSW Problem state */ - unsigned int AS:2; /* 29-30 PSW address-space control */ - unsigned int I:1; /* 31 entry valid or invalid */ - unsigned int:16; - unsigned int prim_asn:16; /* primary ASN */ - unsigned long long ia; /* Instruction Address */ - unsigned long long gpp; /* Guest Program Parameter */ - unsigned long long hpp; /* Host Program Parameter */ -}; - -struct hws_trailer_entry { - unsigned int f:1; /* 0 - Block Full Indicator */ - unsigned int a:1; /* 1 - Alert request control */ - unsigned long:62; /* 2 - 63: Reserved */ - unsigned long overflow; /* 64 - sample Overflow count */ - unsigned long timestamp; /* 16 - time-stamp */ - unsigned long timestamp1; /* */ - unsigned long reserved1; /* 32 -Reserved */ - unsigned long reserved2; /* */ - unsigned long progusage1; /* 48 - reserved for programming use */ - unsigned long progusage2; /* */ -}; - int hwsampler_setup(void); int hwsampler_shutdown(void); int hwsampler_allocate(unsigned long sdbt, unsigned long sdb); diff --git a/arch/s390/oprofile/init.c b/arch/s390/oprofile/init.c index 04e1b6a..9ffe645 100644 --- a/arch/s390/oprofile/init.c +++ b/arch/s390/oprofile/init.c @@ -10,6 +10,7 @@ */ #include <linux/oprofile.h> +#include <linux/perf_event.h> #include <linux/init.h> #include <linux/errno.h> #include <linux/fs.h> @@ -67,6 +68,21 @@ module_param_call(cpu_type, set_cpu_type, NULL, NULL, 0); MODULE_PARM_DESC(cpu_type, "Force legacy basic mode sampling" "(report cpu_type \"timer\""); +static int __oprofile_hwsampler_start(void) +{ + int retval; + + retval = hwsampler_allocate(oprofile_sdbt_blocks, oprofile_sdb_blocks); + if (retval) + return retval; + + retval = hwsampler_start_all(oprofile_hw_interval); + if (retval) + hwsampler_deallocate(); + + return retval; +} + static int oprofile_hwsampler_start(void) { int retval; @@ -76,13 +92,13 @@ static int oprofile_hwsampler_start(void) if (!hwsampler_running) return timer_ops.start(); - retval = hwsampler_allocate(oprofile_sdbt_blocks, oprofile_sdb_blocks); + retval = perf_reserve_sampling(); if (retval) return retval; - retval = hwsampler_start_all(oprofile_hw_interval); + retval = __oprofile_hwsampler_start(); if (retval) - hwsampler_deallocate(); + perf_release_sampling(); return retval; } @@ -96,6 +112,7 @@ static void oprofile_hwsampler_stop(void) hwsampler_stop_all(); hwsampler_deallocate(); + perf_release_sampling(); return; } diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c index bf7c73d..0820362 100644 --- a/arch/s390/pci/pci.c +++ b/arch/s390/pci/pci.c @@ -919,17 +919,23 @@ static void zpci_mem_exit(void) kmem_cache_destroy(zdev_fmb_cache); } -static unsigned int s390_pci_probe; +static unsigned int s390_pci_probe = 1; +static unsigned int s390_pci_initialized; char * __init pcibios_setup(char *str) { - if (!strcmp(str, "on")) { - s390_pci_probe = 1; + if (!strcmp(str, "off")) { + s390_pci_probe = 0; return NULL; } return str; } +bool zpci_is_enabled(void) +{ + return s390_pci_initialized; +} + static int __init pci_base_init(void) { int rc; @@ -961,6 +967,7 @@ static int __init pci_base_init(void) if (rc) goto out_find; + s390_pci_initialized = 1; return 0; out_find: @@ -978,5 +985,6 @@ subsys_initcall_sync(pci_base_init); void zpci_rescan(void) { - clp_rescan_pci_devices_simple(); + if (zpci_is_enabled()) + clp_rescan_pci_devices_simple(); } diff --git a/arch/s390/pci/pci_dma.c b/arch/s390/pci/pci_dma.c index 9b83d08..60c11a6 100644 --- a/arch/s390/pci/pci_dma.c +++ b/arch/s390/pci/pci_dma.c @@ -285,7 +285,7 @@ static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page, flags |= ZPCI_TABLE_PROTECTED; if (!dma_update_trans(zdev, pa, dma_addr, size, flags)) { - atomic64_add(nr_pages, (atomic64_t *) &zdev->fmb->mapped_pages); + atomic64_add(nr_pages, &zdev->fmb->mapped_pages); return dma_addr + (offset & ~PAGE_MASK); } @@ -313,7 +313,7 @@ static void s390_dma_unmap_pages(struct device *dev, dma_addr_t dma_addr, zpci_err_hex(&dma_addr, sizeof(dma_addr)); } - atomic64_add(npages, (atomic64_t *) &zdev->fmb->unmapped_pages); + atomic64_add(npages, &zdev->fmb->unmapped_pages); iommu_page_index = (dma_addr - zdev->start_dma) >> PAGE_SHIFT; dma_free_iommu(zdev, iommu_page_index, npages); } @@ -332,7 +332,6 @@ static void *s390_dma_alloc(struct device *dev, size_t size, if (!page) return NULL; - atomic64_add(size / PAGE_SIZE, (atomic64_t *) &zdev->fmb->allocated_pages); pa = page_to_phys(page); memset((void *) pa, 0, size); @@ -343,6 +342,7 @@ static void *s390_dma_alloc(struct device *dev, size_t size, return NULL; } + atomic64_add(size / PAGE_SIZE, &zdev->fmb->allocated_pages); if (dma_handle) *dma_handle = map; return (void *) pa; @@ -352,8 +352,11 @@ static void s390_dma_free(struct device *dev, size_t size, void *pa, dma_addr_t dma_handle, struct dma_attrs *attrs) { - s390_dma_unmap_pages(dev, dma_handle, PAGE_ALIGN(size), - DMA_BIDIRECTIONAL, NULL); + struct zpci_dev *zdev = get_zdev(to_pci_dev(dev)); + + size = PAGE_ALIGN(size); + atomic64_sub(size / PAGE_SIZE, &zdev->fmb->allocated_pages); + s390_dma_unmap_pages(dev, dma_handle, size, DMA_BIDIRECTIONAL, NULL); free_pages((unsigned long) pa, get_order(size)); } diff --git a/arch/s390/pci/pci_event.c b/arch/s390/pci/pci_event.c index 800f064..01e251b 100644 --- a/arch/s390/pci/pci_event.c +++ b/arch/s390/pci/pci_event.c @@ -43,9 +43,8 @@ struct zpci_ccdf_avail { u16 pec; /* PCI event code */ } __packed; -void zpci_event_error(void *data) +static void __zpci_event_error(struct zpci_ccdf_err *ccdf) { - struct zpci_ccdf_err *ccdf = data; struct zpci_dev *zdev = get_zdev_by_fid(ccdf->fid); zpci_err("error CCDF:\n"); @@ -58,9 +57,14 @@ void zpci_event_error(void *data) pci_name(zdev->pdev), ccdf->pec, ccdf->fid); } -void zpci_event_availability(void *data) +void zpci_event_error(void *data) +{ + if (zpci_is_enabled()) + __zpci_event_error(data); +} + +static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf) { - struct zpci_ccdf_avail *ccdf = data; struct zpci_dev *zdev = get_zdev_by_fid(ccdf->fid); struct pci_dev *pdev = zdev ? zdev->pdev : NULL; int ret; @@ -75,6 +79,7 @@ void zpci_event_availability(void *data) if (!zdev || zdev->state == ZPCI_FN_STATE_CONFIGURED) break; zdev->state = ZPCI_FN_STATE_CONFIGURED; + zdev->fh = ccdf->fh; ret = zpci_enable_device(zdev); if (ret) break; @@ -98,9 +103,14 @@ void zpci_event_availability(void *data) break; case 0x0304: /* Configured -> Standby */ - if (pdev) + if (pdev) { + /* Give the driver a hint that the function is + * already unusable. */ + pdev->error_state = pci_channel_io_perm_failure; pci_stop_and_remove_bus_device(pdev); + } + zdev->fh = ccdf->fh; zpci_disable_device(zdev); zdev->state = ZPCI_FN_STATE_STANDBY; break; @@ -108,6 +118,8 @@ void zpci_event_availability(void *data) clp_rescan_pci_devices(); break; case 0x0308: /* Standby -> Reserved */ + if (!zdev) + break; pci_stop_root_bus(zdev->bus); pci_remove_root_bus(zdev->bus); break; @@ -115,3 +127,9 @@ void zpci_event_availability(void *data) break; } } + +void zpci_event_availability(void *data) +{ + if (zpci_is_enabled()) + __zpci_event_availability(data); +} diff --git a/arch/score/include/asm/Kbuild b/arch/score/include/asm/Kbuild index f3414ad..fe7471e 100644 --- a/arch/score/include/asm/Kbuild +++ b/arch/score/include/asm/Kbuild @@ -1,6 +1,7 @@ header-y += +generic-y += barrier.h generic-y += clkdev.h generic-y += trace_clock.h generic-y += xor.h diff --git a/arch/score/include/asm/barrier.h b/arch/score/include/asm/barrier.h deleted file mode 100644 index 0eacb64..0000000 --- a/arch/score/include/asm/barrier.h +++ /dev/null @@ -1,16 +0,0 @@ -#ifndef _ASM_SCORE_BARRIER_H -#define _ASM_SCORE_BARRIER_H - -#define mb() barrier() -#define rmb() barrier() -#define wmb() barrier() -#define smp_mb() barrier() -#define smp_rmb() barrier() -#define smp_wmb() barrier() - -#define read_barrier_depends() do {} while (0) -#define smp_read_barrier_depends() do {} while (0) - -#define set_mb(var, value) do {var = value; wmb(); } while (0) - -#endif /* _ASM_SCORE_BARRIER_H */ diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index 9b0979f..ce29831 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig @@ -66,6 +66,7 @@ config SUPERH32 select PERF_EVENTS select ARCH_HIBERNATION_POSSIBLE if MMU select SPARSE_IRQ + select HAVE_CC_STACKPROTECTOR config SUPERH64 def_bool ARCH = "sh64" @@ -695,20 +696,6 @@ config SECCOMP If unsure, say N. -config CC_STACKPROTECTOR - bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)" - depends on SUPERH32 - help - This option turns on the -fstack-protector GCC feature. This - feature puts, at the beginning of functions, a canary value on - the stack just before the return address, and validates - the value just before actually returning. Stack based buffer - overflows (that need to overwrite this return address) now also - overwrite the canary, which gets detected and the attack is then - neutralized via a kernel panic. - - This feature requires gcc version 4.2 or above. - config SMP bool "Symmetric multi-processing support" depends on SYS_SUPPORTS_SMP diff --git a/arch/sh/Makefile b/arch/sh/Makefile index aed701c..d4d16e4 100644 --- a/arch/sh/Makefile +++ b/arch/sh/Makefile @@ -199,10 +199,6 @@ ifeq ($(CONFIG_DWARF_UNWINDER),y) KBUILD_CFLAGS += -fasynchronous-unwind-tables endif -ifeq ($(CONFIG_CC_STACKPROTECTOR),y) - KBUILD_CFLAGS += -fstack-protector -endif - libs-$(CONFIG_SUPERH32) := arch/sh/lib/ $(libs-y) libs-$(CONFIG_SUPERH64) := arch/sh/lib64/ $(libs-y) diff --git a/arch/sh/include/asm/barrier.h b/arch/sh/include/asm/barrier.h index 72c103d..4371530 100644 --- a/arch/sh/include/asm/barrier.h +++ b/arch/sh/include/asm/barrier.h @@ -26,29 +26,14 @@ #if defined(CONFIG_CPU_SH4A) || defined(CONFIG_CPU_SH5) #define mb() __asm__ __volatile__ ("synco": : :"memory") #define rmb() mb() -#define wmb() __asm__ __volatile__ ("synco": : :"memory") +#define wmb() mb() #define ctrl_barrier() __icbi(PAGE_OFFSET) -#define read_barrier_depends() do { } while(0) #else -#define mb() __asm__ __volatile__ ("": : :"memory") -#define rmb() mb() -#define wmb() __asm__ __volatile__ ("": : :"memory") #define ctrl_barrier() __asm__ __volatile__ ("nop;nop;nop;nop;nop;nop;nop;nop") -#define read_barrier_depends() do { } while(0) -#endif - -#ifdef CONFIG_SMP -#define smp_mb() mb() -#define smp_rmb() rmb() -#define smp_wmb() wmb() -#define smp_read_barrier_depends() read_barrier_depends() -#else -#define smp_mb() barrier() -#define smp_rmb() barrier() -#define smp_wmb() barrier() -#define smp_read_barrier_depends() do { } while(0) #endif #define set_mb(var, value) do { (void)xchg(&var, value); } while (0) +#include <asm-generic/barrier.h> + #endif /* __ASM_SH_BARRIER_H */ diff --git a/arch/sh/kernel/sh_ksyms_32.c b/arch/sh/kernel/sh_ksyms_32.c index 2a0a596..d77f2f6 100644 --- a/arch/sh/kernel/sh_ksyms_32.c +++ b/arch/sh/kernel/sh_ksyms_32.c @@ -20,6 +20,11 @@ EXPORT_SYMBOL(csum_partial_copy_generic); EXPORT_SYMBOL(copy_page); EXPORT_SYMBOL(__clear_user); EXPORT_SYMBOL(empty_zero_page); +#ifdef CONFIG_FLATMEM +/* need in pfn_valid macro */ +EXPORT_SYMBOL(min_low_pfn); +EXPORT_SYMBOL(max_low_pfn); +#endif #define DECLARE_EXPORT(name) \ extern void name(void);EXPORT_SYMBOL(name) diff --git a/arch/sh/lib/Makefile b/arch/sh/lib/Makefile index 7b95f29..3baff31 100644 --- a/arch/sh/lib/Makefile +++ b/arch/sh/lib/Makefile @@ -6,7 +6,7 @@ lib-y = delay.o memmove.o memchr.o \ checksum.o strlen.o div64.o div64-generic.o # Extracted from libgcc -lib-y += movmem.o ashldi3.o ashrdi3.o lshrdi3.o \ +obj-y += movmem.o ashldi3.o ashrdi3.o lshrdi3.o \ ashlsi3.o ashrsi3.o ashiftrt.o lshrsi3.o \ udiv_qrnnd.o diff --git a/arch/sparc/include/asm/barrier_32.h b/arch/sparc/include/asm/barrier_32.h index c1b7665..ae69eda 100644 --- a/arch/sparc/include/asm/barrier_32.h +++ b/arch/sparc/include/asm/barrier_32.h @@ -1,15 +1,7 @@ #ifndef __SPARC_BARRIER_H #define __SPARC_BARRIER_H -/* XXX Change this if we ever use a PSO mode kernel. */ -#define mb() __asm__ __volatile__ ("" : : : "memory") -#define rmb() mb() -#define wmb() mb() -#define read_barrier_depends() do { } while(0) -#define set_mb(__var, __value) do { __var = __value; mb(); } while(0) -#define smp_mb() __asm__ __volatile__("":::"memory") -#define smp_rmb() __asm__ __volatile__("":::"memory") -#define smp_wmb() __asm__ __volatile__("":::"memory") -#define smp_read_barrier_depends() do { } while(0) +#include <asm/processor.h> /* for nop() */ +#include <asm-generic/barrier.h> #endif /* !(__SPARC_BARRIER_H) */ diff --git a/arch/sparc/include/asm/barrier_64.h b/arch/sparc/include/asm/barrier_64.h index 95d4598..b5aad96 100644 --- a/arch/sparc/include/asm/barrier_64.h +++ b/arch/sparc/include/asm/barrier_64.h @@ -53,4 +53,19 @@ do { __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \ #define smp_read_barrier_depends() do { } while(0) +#define smp_store_release(p, v) \ +do { \ + compiletime_assert_atomic_type(*p); \ + barrier(); \ + ACCESS_ONCE(*p) = (v); \ +} while (0) + +#define smp_load_acquire(p) \ +({ \ + typeof(*p) ___p1 = ACCESS_ONCE(*p); \ + compiletime_assert_atomic_type(*p); \ + barrier(); \ + ___p1; \ +}) + #endif /* !(__SPARC64_BARRIER_H) */ diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h index 8358dc1..0f9e945 100644 --- a/arch/sparc/include/asm/pgtable_64.h +++ b/arch/sparc/include/asm/pgtable_64.h @@ -619,7 +619,7 @@ static inline unsigned long pte_present(pte_t pte) } #define pte_accessible pte_accessible -static inline unsigned long pte_accessible(pte_t a) +static inline unsigned long pte_accessible(struct mm_struct *mm, pte_t a) { return pte_val(a) & _PAGE_VALID; } @@ -847,7 +847,7 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, * SUN4V NOTE: _PAGE_VALID is the same value in both the SUN4U * and SUN4V pte layout, so this inline test is fine. */ - if (likely(mm != &init_mm) && pte_accessible(orig)) + if (likely(mm != &init_mm) && pte_accessible(mm, orig)) tlb_batch_add(mm, addr, ptep, orig, fullmm); } diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h index e562d3c..ad7e178 100644 --- a/arch/sparc/include/asm/uaccess_64.h +++ b/arch/sparc/include/asm/uaccess_64.h @@ -262,8 +262,8 @@ extern unsigned long __must_check __clear_user(void __user *, unsigned long); extern __must_check long strlen_user(const char __user *str); extern __must_check long strnlen_user(const char __user *str, long n); -#define __copy_to_user_inatomic ___copy_to_user -#define __copy_from_user_inatomic ___copy_from_user +#define __copy_to_user_inatomic __copy_to_user +#define __copy_from_user_inatomic __copy_from_user struct pt_regs; extern unsigned long compute_effective_address(struct pt_regs *, diff --git a/arch/sparc/kernel/iommu.c b/arch/sparc/kernel/iommu.c index 070ed14..76663b0 100644 --- a/arch/sparc/kernel/iommu.c +++ b/arch/sparc/kernel/iommu.c @@ -854,7 +854,7 @@ int dma_supported(struct device *dev, u64 device_mask) return 1; #ifdef CONFIG_PCI - if (dev->bus == &pci_bus_type) + if (dev_is_pci(dev)) return pci64_dma_supported(to_pci_dev(dev), device_mask); #endif diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c index 2096468..e7e215d 100644 --- a/arch/sparc/kernel/ioport.c +++ b/arch/sparc/kernel/ioport.c @@ -666,10 +666,9 @@ EXPORT_SYMBOL(dma_ops); */ int dma_supported(struct device *dev, u64 mask) { -#ifdef CONFIG_PCI - if (dev->bus == &pci_bus_type) + if (dev_is_pci(dev)) return 1; -#endif + return 0; } EXPORT_SYMBOL(dma_supported); diff --git a/arch/sparc/kernel/kgdb_64.c b/arch/sparc/kernel/kgdb_64.c index 60b19f5..b45fe3f 100644 --- a/arch/sparc/kernel/kgdb_64.c +++ b/arch/sparc/kernel/kgdb_64.c @@ -6,6 +6,7 @@ #include <linux/kgdb.h> #include <linux/kdebug.h> #include <linux/ftrace.h> +#include <linux/context_tracking.h> #include <asm/cacheflush.h> #include <asm/kdebug.h> diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c index b66a533..b085311 100644 --- a/arch/sparc/kernel/smp_64.c +++ b/arch/sparc/kernel/smp_64.c @@ -123,11 +123,12 @@ void smp_callin(void) rmb(); set_cpu_online(cpuid, true); - local_irq_enable(); /* idle thread is expected to have preempt disabled */ preempt_disable(); + local_irq_enable(); + cpu_startup_entry(CPUHP_ONLINE); } diff --git a/arch/sparc/net/bpf_jit_comp.c b/arch/sparc/net/bpf_jit_comp.c index 218b6b2..01fe994 100644 --- a/arch/sparc/net/bpf_jit_comp.c +++ b/arch/sparc/net/bpf_jit_comp.c @@ -497,9 +497,20 @@ void bpf_jit_compile(struct sk_filter *fp) case BPF_S_ALU_MUL_K: /* A *= K */ emit_alu_K(MUL, K); break; - case BPF_S_ALU_DIV_K: /* A /= K */ - emit_alu_K(MUL, K); - emit_read_y(r_A); + case BPF_S_ALU_DIV_K: /* A /= K with K != 0*/ + if (K == 1) + break; + emit_write_y(G0); +#ifdef CONFIG_SPARC32 + /* The Sparc v8 architecture requires + * three instructions between a %y + * register write and the first use. + */ + emit_nop(); + emit_nop(); + emit_nop(); +#endif + emit_alu_K(DIV, K); break; case BPF_S_ALU_DIV_X: /* A /= X; */ emit_cmpi(r_X, 0); diff --git a/arch/tile/include/asm/barrier.h b/arch/tile/include/asm/barrier.h index a9a73da..b5a05d0 100644 --- a/arch/tile/include/asm/barrier.h +++ b/arch/tile/include/asm/barrier.h @@ -22,59 +22,6 @@ #include <arch/spr_def.h> #include <asm/timex.h> -/* - * read_barrier_depends - Flush all pending reads that subsequents reads - * depend on. - * - * No data-dependent reads from memory-like regions are ever reordered - * over this barrier. All reads preceding this primitive are guaranteed - * to access memory (but not necessarily other CPUs' caches) before any - * reads following this primitive that depend on the data return by - * any of the preceding reads. This primitive is much lighter weight than - * rmb() on most CPUs, and is never heavier weight than is - * rmb(). - * - * These ordering constraints are respected by both the local CPU - * and the compiler. - * - * Ordering is not guaranteed by anything other than these primitives, - * not even by data dependencies. See the documentation for - * memory_barrier() for examples and URLs to more information. - * - * For example, the following code would force ordering (the initial - * value of "a" is zero, "b" is one, and "p" is "&a"): - * - * <programlisting> - * CPU 0 CPU 1 - * - * b = 2; - * memory_barrier(); - * p = &b; q = p; - * read_barrier_depends(); - * d = *q; - * </programlisting> - * - * because the read of "*q" depends on the read of "p" and these - * two reads are separated by a read_barrier_depends(). However, - * the following code, with the same initial values for "a" and "b": - * - * <programlisting> - * CPU 0 CPU 1 - * - * a = 2; - * memory_barrier(); - * b = 3; y = b; - * read_barrier_depends(); - * x = a; - * </programlisting> - * - * does not enforce ordering, since there is no data dependency between - * the read of "a" and the read of "b". Therefore, on some CPUs, such - * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb() - * in cases like this where there are no data dependencies. - */ -#define read_barrier_depends() do { } while (0) - #define __sync() __insn_mf() #include <hv/syscall_public.h> @@ -125,20 +72,7 @@ mb_incoherent(void) #define mb() fast_mb() #define iob() fast_iob() -#ifdef CONFIG_SMP -#define smp_mb() mb() -#define smp_rmb() rmb() -#define smp_wmb() wmb() -#define smp_read_barrier_depends() read_barrier_depends() -#else -#define smp_mb() barrier() -#define smp_rmb() barrier() -#define smp_wmb() barrier() -#define smp_read_barrier_depends() do { } while (0) -#endif - -#define set_mb(var, value) \ - do { var = value; mb(); } while (0) +#include <asm-generic/barrier.h> #endif /* !__ASSEMBLY__ */ #endif /* _ASM_TILE_BARRIER_H */ diff --git a/arch/um/Makefile b/arch/um/Makefile index 48d92bb..36e658a 100644 --- a/arch/um/Makefile +++ b/arch/um/Makefile @@ -33,12 +33,11 @@ MODE_INCLUDE += -I$(srctree)/$(ARCH_DIR)/include/shared/skas HEADER_ARCH := $(SUBARCH) -# Additional ARCH settings for x86 -ifeq ($(SUBARCH),i386) - HEADER_ARCH := x86 +ifneq ($(filter $(SUBARCH),x86 x86_64 i386),) + HEADER_ARCH := x86 endif -ifeq ($(SUBARCH),x86_64) - HEADER_ARCH := x86 + +ifdef CONFIG_64BIT KBUILD_CFLAGS += -mcmodel=large endif diff --git a/arch/um/kernel/sysrq.c b/arch/um/kernel/sysrq.c index 4d6fdf6..799d7e4 100644 --- a/arch/um/kernel/sysrq.c +++ b/arch/um/kernel/sysrq.c @@ -19,7 +19,7 @@ struct stack_frame { unsigned long return_address; }; -static void print_stack_trace(unsigned long *sp, unsigned long bp) +static void do_stack_trace(unsigned long *sp, unsigned long bp) { int reliable; unsigned long addr; @@ -94,5 +94,5 @@ void show_stack(struct task_struct *task, unsigned long *stack) } printk(KERN_CONT "\n"); - print_stack_trace(sp, bp); + do_stack_trace(sp, bp); } diff --git a/arch/unicore32/include/asm/barrier.h b/arch/unicore32/include/asm/barrier.h index a6620e5..83d6a52 100644 --- a/arch/unicore32/include/asm/barrier.h +++ b/arch/unicore32/include/asm/barrier.h @@ -14,15 +14,6 @@ #define dsb() __asm__ __volatile__ ("" : : : "memory") #define dmb() __asm__ __volatile__ ("" : : : "memory") -#define mb() barrier() -#define rmb() barrier() -#define wmb() barrier() -#define smp_mb() barrier() -#define smp_rmb() barrier() -#define smp_wmb() barrier() -#define read_barrier_depends() do { } while (0) -#define smp_read_barrier_depends() do { } while (0) - -#define set_mb(var, value) do { var = value; smp_mb(); } while (0) +#include <asm-generic/barrier.h> #endif /* __UNICORE_BARRIER_H__ */ diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index e903c71..cd18b83 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -26,6 +26,7 @@ config X86 select HAVE_AOUT if X86_32 select HAVE_UNSTABLE_SCHED_CLOCK select ARCH_SUPPORTS_NUMA_BALANCING + select ARCH_SUPPORTS_INT128 if X86_64 select ARCH_WANTS_PROT_NUMA_PROT_NONE select HAVE_IDE select HAVE_OPROFILE @@ -124,6 +125,7 @@ config X86 select RTC_LIB select HAVE_DEBUG_STACKOVERFLOW select HAVE_IRQ_EXIT_ON_IRQ_STACK if X86_64 + select HAVE_CC_STACKPROTECTOR config INSTRUCTION_DECODER def_bool y @@ -437,42 +439,26 @@ config X86_INTEL_CE This option compiles in support for the CE4100 SOC for settop boxes and media devices. -config X86_WANT_INTEL_MID +config X86_INTEL_MID bool "Intel MID platform support" depends on X86_32 depends on X86_EXTENDED_PLATFORM - ---help--- - Select to build a kernel capable of supporting Intel MID platform - systems which do not have the PCI legacy interfaces (Moorestown, - Medfield). If you are building for a PC class system say N here. - -if X86_WANT_INTEL_MID - -config X86_INTEL_MID - bool - -config X86_MDFLD - bool "Medfield MID platform" depends on PCI depends on PCI_GOANY depends on X86_IO_APIC - select X86_INTEL_MID select SFI + select I2C select DW_APB_TIMER select APB_TIMER - select I2C - select SPI select INTEL_SCU_IPC - select X86_PLATFORM_DEVICES select MFD_INTEL_MSIC ---help--- - Medfield is Intel's Low Power Intel Architecture (LPIA) based Moblin - Internet Device(MID) platform. - Unlike standard x86 PCs, Medfield does not have many legacy devices - nor standard legacy replacement devices/features. e.g. Medfield does - not contain i8259, i8254, HPET, legacy BIOS, most of the io ports. + Select to build a kernel capable of supporting Intel MID (Mobile + Internet Device) platform systems which do not have the PCI legacy + interfaces. If you are building for a PC class system say N here. -endif + Intel MID platforms are based on an Intel processor and chipset which + consume less power than most of the x86 derivatives. config X86_INTEL_LPSS bool "Intel Low Power Subsystem Support" @@ -1079,10 +1065,6 @@ config MICROCODE_OLD_INTERFACE def_bool y depends on MICROCODE -config MICROCODE_INTEL_LIB - def_bool y - depends on MICROCODE_INTEL - config MICROCODE_INTEL_EARLY def_bool n @@ -1616,22 +1598,6 @@ config SECCOMP If unsure, say Y. Only embedded should say N here. -config CC_STACKPROTECTOR - bool "Enable -fstack-protector buffer overflow detection" - ---help--- - This option turns on the -fstack-protector GCC feature. This - feature puts, at the beginning of functions, a canary value on - the stack just before the return address, and validates - the value just before actually returning. Stack based buffer - overflows (that need to overwrite this return address) now also - overwrite the canary, which gets detected and the attack is then - neutralized via a kernel panic. - - This feature requires gcc version 4.2 or above, or a distribution - gcc with the feature backported. Older versions are automatically - detected and for those versions, this configuration option is - ignored. (and a warning is printed during bootup) - source kernel/Kconfig.hz config KEXEC @@ -1727,16 +1693,67 @@ config RELOCATABLE Note: If CONFIG_RELOCATABLE=y, then the kernel runs from the address it has been loaded at and the compile time physical address - (CONFIG_PHYSICAL_START) is ignored. + (CONFIG_PHYSICAL_START) is used as the minimum location. -# Relocation on x86-32 needs some additional build support +config RANDOMIZE_BASE + bool "Randomize the address of the kernel image" + depends on RELOCATABLE + depends on !HIBERNATION + default n + ---help--- + Randomizes the physical and virtual address at which the + kernel image is decompressed, as a security feature that + deters exploit attempts relying on knowledge of the location + of kernel internals. + + Entropy is generated using the RDRAND instruction if it is + supported. If RDTSC is supported, it is used as well. If + neither RDRAND nor RDTSC are supported, then randomness is + read from the i8254 timer. + + The kernel will be offset by up to RANDOMIZE_BASE_MAX_OFFSET, + and aligned according to PHYSICAL_ALIGN. Since the kernel is + built using 2GiB addressing, and PHYSICAL_ALGIN must be at a + minimum of 2MiB, only 10 bits of entropy is theoretically + possible. At best, due to page table layouts, 64-bit can use + 9 bits of entropy and 32-bit uses 8 bits. + + If unsure, say N. + +config RANDOMIZE_BASE_MAX_OFFSET + hex "Maximum kASLR offset allowed" if EXPERT + depends on RANDOMIZE_BASE + range 0x0 0x20000000 if X86_32 + default "0x20000000" if X86_32 + range 0x0 0x40000000 if X86_64 + default "0x40000000" if X86_64 + ---help--- + The lesser of RANDOMIZE_BASE_MAX_OFFSET and available physical + memory is used to determine the maximal offset in bytes that will + be applied to the kernel when kernel Address Space Layout + Randomization (kASLR) is active. This must be a multiple of + PHYSICAL_ALIGN. + + On 32-bit this is limited to 512MiB by page table layouts. The + default is 512MiB. + + On 64-bit this is limited by how the kernel fixmap page table is + positioned, so this cannot be larger than 1GiB currently. Without + RANDOMIZE_BASE, there is a 512MiB to 1.5GiB split between kernel + and modules. When RANDOMIZE_BASE_MAX_OFFSET is above 512MiB, the + modules area will shrink to compensate, up to the current maximum + 1GiB to 1GiB split. The default is 1GiB. + + If unsure, leave at the default value. + +# Relocation on x86 needs some additional build support config X86_NEED_RELOCS def_bool y - depends on X86_32 && RELOCATABLE + depends on RANDOMIZE_BASE || (X86_32 && RELOCATABLE) config PHYSICAL_ALIGN hex "Alignment value to which kernel should be aligned" - default "0x1000000" + default "0x200000" range 0x2000 0x1000000 if X86_32 range 0x200000 0x1000000 if X86_64 ---help--- @@ -2392,6 +2409,14 @@ config X86_DMA_REMAP bool depends on STA2X11 +config IOSF_MBI + bool + depends on PCI + ---help--- + To be selected by modules requiring access to the Intel OnChip System + Fabric (IOSF) Sideband MailBox Interface (MBI). For MBI platforms + enumerable by PCI. + source "net/Kconfig" source "drivers/Kconfig" diff --git a/arch/x86/Makefile b/arch/x86/Makefile index 41250fb..13b22e0 100644 --- a/arch/x86/Makefile +++ b/arch/x86/Makefile @@ -31,6 +31,9 @@ ifeq ($(CONFIG_X86_32),y) KBUILD_CFLAGS += -msoft-float -mregparm=3 -freg-struct-return + # Don't autogenerate MMX or SSE instructions + KBUILD_CFLAGS += -mno-mmx -mno-sse + # Never want PIC in a 32-bit kernel, prevent breakage with GCC built # with nonstandard options KBUILD_CFLAGS += -fno-pic @@ -57,8 +60,11 @@ else KBUILD_AFLAGS += -m64 KBUILD_CFLAGS += -m64 + # Don't autogenerate MMX or SSE instructions + KBUILD_CFLAGS += -mno-mmx -mno-sse + # Use -mpreferred-stack-boundary=3 if supported. - KBUILD_CFLAGS += $(call cc-option,-mno-sse -mpreferred-stack-boundary=3) + KBUILD_CFLAGS += $(call cc-option,-mpreferred-stack-boundary=3) # FIXME - should be integrated in Makefile.cpu (Makefile_32.cpu) cflags-$(CONFIG_MK8) += $(call cc-option,-march=k8) @@ -83,13 +89,11 @@ else KBUILD_CFLAGS += -maccumulate-outgoing-args endif +# Make sure compiler does not have buggy stack-protector support. ifdef CONFIG_CC_STACKPROTECTOR cc_has_sp := $(srctree)/scripts/gcc-x86_$(BITS)-has-stack-protector.sh - ifeq ($(shell $(CONFIG_SHELL) $(cc_has_sp) $(CC) $(KBUILD_CPPFLAGS) $(biarch)),y) - stackp-y := -fstack-protector - KBUILD_CFLAGS += $(stackp-y) - else - $(warning stack protector enabled but no compiler support) + ifneq ($(shell $(CONFIG_SHELL) $(cc_has_sp) $(CC) $(KBUILD_CPPFLAGS) $(biarch)),y) + $(warning stack-protector enabled but compiler support broken) endif endif diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile index dce69a2..de70669 100644 --- a/arch/x86/boot/Makefile +++ b/arch/x86/boot/Makefile @@ -20,7 +20,7 @@ targets := vmlinux.bin setup.bin setup.elf bzImage targets += fdimage fdimage144 fdimage288 image.iso mtools.conf subdir- := compressed -setup-y += a20.o bioscall.o cmdline.o copy.o cpu.o cpucheck.o +setup-y += a20.o bioscall.o cmdline.o copy.o cpu.o cpuflags.o cpucheck.o setup-y += early_serial_console.o edd.o header.o main.o mca.o memory.o setup-y += pm.o pmjump.o printf.o regs.o string.o tty.o video.o setup-y += video-mode.o version.o @@ -53,18 +53,18 @@ $(obj)/cpustr.h: $(obj)/mkcpustr FORCE # How to compile the 16-bit code. Note we always compile for -march=i386, # that way we can complain to the user if the CPU is insufficient. -KBUILD_CFLAGS := $(USERINCLUDE) -g -Os -D_SETUP -D__KERNEL__ \ +KBUILD_CFLAGS := $(USERINCLUDE) -m32 -g -Os -D_SETUP -D__KERNEL__ \ -DDISABLE_BRANCH_PROFILING \ -Wall -Wstrict-prototypes \ -march=i386 -mregparm=3 \ -include $(srctree)/$(src)/code16gcc.h \ -fno-strict-aliasing -fomit-frame-pointer -fno-pic \ + -mno-mmx -mno-sse \ $(call cc-option, -ffreestanding) \ $(call cc-option, -fno-toplevel-reorder,\ - $(call cc-option, -fno-unit-at-a-time)) \ + $(call cc-option, -fno-unit-at-a-time)) \ $(call cc-option, -fno-stack-protector) \ $(call cc-option, -mpreferred-stack-boundary=2) -KBUILD_CFLAGS += $(call cc-option, -m32) KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__ GCOV_PROFILE := n diff --git a/arch/x86/boot/bioscall.S b/arch/x86/boot/bioscall.S index 1dfbf64..d401b4a 100644 --- a/arch/x86/boot/bioscall.S +++ b/arch/x86/boot/bioscall.S @@ -1,6 +1,6 @@ /* ----------------------------------------------------------------------- * - * Copyright 2009 Intel Corporation; author H. Peter Anvin + * Copyright 2009-2014 Intel Corporation; author H. Peter Anvin * * This file is part of the Linux kernel, and is made available under * the terms of the GNU General Public License version 2 or (at your @@ -13,8 +13,8 @@ * touching registers they shouldn't be. */ - .code16gcc - .text + .code16 + .section ".inittext","ax" .globl intcall .type intcall, @function intcall: diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h index ef72bae..50f8c5e 100644 --- a/arch/x86/boot/boot.h +++ b/arch/x86/boot/boot.h @@ -26,9 +26,8 @@ #include <asm/boot.h> #include <asm/setup.h> #include "bitops.h" -#include <asm/cpufeature.h> -#include <asm/processor-flags.h> #include "ctype.h" +#include "cpuflags.h" /* Useful macros */ #define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)])) @@ -307,14 +306,7 @@ static inline int cmdline_find_option_bool(const char *option) return __cmdline_find_option_bool(cmd_line_ptr, option); } - /* cpu.c, cpucheck.c */ -struct cpu_features { - int level; /* Family, or 64 for x86-64 */ - int model; - u32 flags[NCAPINTS]; -}; -extern struct cpu_features cpu; int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr); int validate_cpu(void); diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile index dcd90df..0fcd913 100644 --- a/arch/x86/boot/compressed/Makefile +++ b/arch/x86/boot/compressed/Makefile @@ -13,6 +13,7 @@ KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING cflags-$(CONFIG_X86_32) := -march=i386 cflags-$(CONFIG_X86_64) := -mcmodel=small KBUILD_CFLAGS += $(cflags-y) +KBUILD_CFLAGS += -mno-mmx -mno-sse KBUILD_CFLAGS += $(call cc-option,-ffreestanding) KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector) @@ -27,7 +28,7 @@ HOST_EXTRACFLAGS += -I$(srctree)/tools/include VMLINUX_OBJS = $(obj)/vmlinux.lds $(obj)/head_$(BITS).o $(obj)/misc.o \ $(obj)/string.o $(obj)/cmdline.o $(obj)/early_serial_console.o \ - $(obj)/piggy.o + $(obj)/piggy.o $(obj)/cpuflags.o $(obj)/aslr.o $(obj)/eboot.o: KBUILD_CFLAGS += -fshort-wchar -mno-red-zone diff --git a/arch/x86/boot/compressed/aslr.c b/arch/x86/boot/compressed/aslr.c new file mode 100644 index 0000000..90a21f4 --- /dev/null +++ b/arch/x86/boot/compressed/aslr.c @@ -0,0 +1,316 @@ +#include "misc.h" + +#ifdef CONFIG_RANDOMIZE_BASE +#include <asm/msr.h> +#include <asm/archrandom.h> +#include <asm/e820.h> + +#include <generated/compile.h> +#include <linux/module.h> +#include <linux/uts.h> +#include <linux/utsname.h> +#include <generated/utsrelease.h> + +/* Simplified build-specific string for starting entropy. */ +static const char build_str[] = UTS_RELEASE " (" LINUX_COMPILE_BY "@" + LINUX_COMPILE_HOST ") (" LINUX_COMPILER ") " UTS_VERSION; + +#define I8254_PORT_CONTROL 0x43 +#define I8254_PORT_COUNTER0 0x40 +#define I8254_CMD_READBACK 0xC0 +#define I8254_SELECT_COUNTER0 0x02 +#define I8254_STATUS_NOTREADY 0x40 +static inline u16 i8254(void) +{ + u16 status, timer; + + do { + outb(I8254_PORT_CONTROL, + I8254_CMD_READBACK | I8254_SELECT_COUNTER0); + status = inb(I8254_PORT_COUNTER0); + timer = inb(I8254_PORT_COUNTER0); + timer |= inb(I8254_PORT_COUNTER0) << 8; + } while (status & I8254_STATUS_NOTREADY); + + return timer; +} + +static unsigned long rotate_xor(unsigned long hash, const void *area, + size_t size) +{ + size_t i; + unsigned long *ptr = (unsigned long *)area; + + for (i = 0; i < size / sizeof(hash); i++) { + /* Rotate by odd number of bits and XOR. */ + hash = (hash << ((sizeof(hash) * 8) - 7)) | (hash >> 7); + hash ^= ptr[i]; + } + + return hash; +} + +/* Attempt to create a simple but unpredictable starting entropy. */ +static unsigned long get_random_boot(void) +{ + unsigned long hash = 0; + + hash = rotate_xor(hash, build_str, sizeof(build_str)); + hash = rotate_xor(hash, real_mode, sizeof(*real_mode)); + + return hash; +} + +static unsigned long get_random_long(void) +{ +#ifdef CONFIG_X86_64 + const unsigned long mix_const = 0x5d6008cbf3848dd3UL; +#else + const unsigned long mix_const = 0x3f39e593UL; +#endif + unsigned long raw, random = get_random_boot(); + bool use_i8254 = true; + + debug_putstr("KASLR using"); + + if (has_cpuflag(X86_FEATURE_RDRAND)) { + debug_putstr(" RDRAND"); + if (rdrand_long(&raw)) { + random ^= raw; + use_i8254 = false; + } + } + + if (has_cpuflag(X86_FEATURE_TSC)) { + debug_putstr(" RDTSC"); + rdtscll(raw); + + random ^= raw; + use_i8254 = false; + } + + if (use_i8254) { + debug_putstr(" i8254"); + random ^= i8254(); + } + + /* Circular multiply for better bit diffusion */ + asm("mul %3" + : "=a" (random), "=d" (raw) + : "a" (random), "rm" (mix_const)); + random += raw; + + debug_putstr("...\n"); + + return random; +} + +struct mem_vector { + unsigned long start; + unsigned long size; +}; + +#define MEM_AVOID_MAX 5 +struct mem_vector mem_avoid[MEM_AVOID_MAX]; + +static bool mem_contains(struct mem_vector *region, struct mem_vector *item) +{ + /* Item at least partially before region. */ + if (item->start < region->start) + return false; + /* Item at least partially after region. */ + if (item->start + item->size > region->start + region->size) + return false; + return true; +} + +static bool mem_overlaps(struct mem_vector *one, struct mem_vector *two) +{ + /* Item one is entirely before item two. */ + if (one->start + one->size <= two->start) + return false; + /* Item one is entirely after item two. */ + if (one->start >= two->start + two->size) + return false; + return true; +} + +static void mem_avoid_init(unsigned long input, unsigned long input_size, + unsigned long output, unsigned long output_size) +{ + u64 initrd_start, initrd_size; + u64 cmd_line, cmd_line_size; + unsigned long unsafe, unsafe_len; + char *ptr; + + /* + * Avoid the region that is unsafe to overlap during + * decompression (see calculations at top of misc.c). + */ + unsafe_len = (output_size >> 12) + 32768 + 18; + unsafe = (unsigned long)input + input_size - unsafe_len; + mem_avoid[0].start = unsafe; + mem_avoid[0].size = unsafe_len; + + /* Avoid initrd. */ + initrd_start = (u64)real_mode->ext_ramdisk_image << 32; + initrd_start |= real_mode->hdr.ramdisk_image; + initrd_size = (u64)real_mode->ext_ramdisk_size << 32; + initrd_size |= real_mode->hdr.ramdisk_size; + mem_avoid[1].start = initrd_start; + mem_avoid[1].size = initrd_size; + + /* Avoid kernel command line. */ + cmd_line = (u64)real_mode->ext_cmd_line_ptr << 32; + cmd_line |= real_mode->hdr.cmd_line_ptr; + /* Calculate size of cmd_line. */ + ptr = (char *)(unsigned long)cmd_line; + for (cmd_line_size = 0; ptr[cmd_line_size++]; ) + ; + mem_avoid[2].start = cmd_line; + mem_avoid[2].size = cmd_line_size; + + /* Avoid heap memory. */ + mem_avoid[3].start = (unsigned long)free_mem_ptr; + mem_avoid[3].size = BOOT_HEAP_SIZE; + + /* Avoid stack memory. */ + mem_avoid[4].start = (unsigned long)free_mem_end_ptr; + mem_avoid[4].size = BOOT_STACK_SIZE; +} + +/* Does this memory vector overlap a known avoided area? */ +bool mem_avoid_overlap(struct mem_vector *img) +{ + int i; + + for (i = 0; i < MEM_AVOID_MAX; i++) { + if (mem_overlaps(img, &mem_avoid[i])) + return true; + } + + return false; +} + +unsigned long slots[CONFIG_RANDOMIZE_BASE_MAX_OFFSET / CONFIG_PHYSICAL_ALIGN]; +unsigned long slot_max = 0; + +static void slots_append(unsigned long addr) +{ + /* Overflowing the slots list should be impossible. */ + if (slot_max >= CONFIG_RANDOMIZE_BASE_MAX_OFFSET / + CONFIG_PHYSICAL_ALIGN) + return; + + slots[slot_max++] = addr; +} + +static unsigned long slots_fetch_random(void) +{ + /* Handle case of no slots stored. */ + if (slot_max == 0) + return 0; + + return slots[get_random_long() % slot_max]; +} + +static void process_e820_entry(struct e820entry *entry, + unsigned long minimum, + unsigned long image_size) +{ + struct mem_vector region, img; + + /* Skip non-RAM entries. */ + if (entry->type != E820_RAM) + return; + + /* Ignore entries entirely above our maximum. */ + if (entry->addr >= CONFIG_RANDOMIZE_BASE_MAX_OFFSET) + return; + + /* Ignore entries entirely below our minimum. */ + if (entry->addr + entry->size < minimum) + return; + + region.start = entry->addr; + region.size = entry->size; + + /* Potentially raise address to minimum location. */ + if (region.start < minimum) + region.start = minimum; + + /* Potentially raise address to meet alignment requirements. */ + region.start = ALIGN(region.start, CONFIG_PHYSICAL_ALIGN); + + /* Did we raise the address above the bounds of this e820 region? */ + if (region.start > entry->addr + entry->size) + return; + + /* Reduce size by any delta from the original address. */ + region.size -= region.start - entry->addr; + + /* Reduce maximum size to fit end of image within maximum limit. */ + if (region.start + region.size > CONFIG_RANDOMIZE_BASE_MAX_OFFSET) + region.size = CONFIG_RANDOMIZE_BASE_MAX_OFFSET - region.start; + + /* Walk each aligned slot and check for avoided areas. */ + for (img.start = region.start, img.size = image_size ; + mem_contains(®ion, &img) ; + img.start += CONFIG_PHYSICAL_ALIGN) { + if (mem_avoid_overlap(&img)) + continue; + slots_append(img.start); + } +} + +static unsigned long find_random_addr(unsigned long minimum, + unsigned long size) +{ + int i; + unsigned long addr; + + /* Make sure minimum is aligned. */ + minimum = ALIGN(minimum, CONFIG_PHYSICAL_ALIGN); + + /* Verify potential e820 positions, appending to slots list. */ + for (i = 0; i < real_mode->e820_entries; i++) { + process_e820_entry(&real_mode->e820_map[i], minimum, size); + } + + return slots_fetch_random(); +} + +unsigned char *choose_kernel_location(unsigned char *input, + unsigned long input_size, + unsigned char *output, + unsigned long output_size) +{ + unsigned long choice = (unsigned long)output; + unsigned long random; + + if (cmdline_find_option_bool("nokaslr")) { + debug_putstr("KASLR disabled...\n"); + goto out; + } + + /* Record the various known unsafe memory ranges. */ + mem_avoid_init((unsigned long)input, input_size, + (unsigned long)output, output_size); + + /* Walk e820 and find a random address. */ + random = find_random_addr(choice, output_size); + if (!random) { + debug_putstr("KASLR could not find suitable E820 region...\n"); + goto out; + } + + /* Always enforce the minimum. */ + if (random < choice) + goto out; + + choice = random; +out: + return (unsigned char *)choice; +} + +#endif /* CONFIG_RANDOMIZE_BASE */ diff --git a/arch/x86/boot/compressed/cmdline.c b/arch/x86/boot/compressed/cmdline.c index bffd73b..b68e303 100644 --- a/arch/x86/boot/compressed/cmdline.c +++ b/arch/x86/boot/compressed/cmdline.c @@ -1,6 +1,6 @@ #include "misc.h" -#ifdef CONFIG_EARLY_PRINTK +#if CONFIG_EARLY_PRINTK || CONFIG_RANDOMIZE_BASE static unsigned long fs; static inline void set_fs(unsigned long seg) diff --git a/arch/x86/boot/compressed/cpuflags.c b/arch/x86/boot/compressed/cpuflags.c new file mode 100644 index 0000000..aa31346 --- /dev/null +++ b/arch/x86/boot/compressed/cpuflags.c @@ -0,0 +1,12 @@ +#ifdef CONFIG_RANDOMIZE_BASE + +#include "../cpuflags.c" + +bool has_cpuflag(int flag) +{ + get_cpuflags(); + + return test_bit(flag, cpu.flags); +} + +#endif diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S index 5d6f6891..9116aac 100644 --- a/arch/x86/boot/compressed/head_32.S +++ b/arch/x86/boot/compressed/head_32.S @@ -117,9 +117,11 @@ preferred_addr: addl %eax, %ebx notl %eax andl %eax, %ebx -#else - movl $LOAD_PHYSICAL_ADDR, %ebx + cmpl $LOAD_PHYSICAL_ADDR, %ebx + jge 1f #endif + movl $LOAD_PHYSICAL_ADDR, %ebx +1: /* Target address to relocate to for decompression */ addl $z_extract_offset, %ebx @@ -191,14 +193,14 @@ relocated: leal boot_heap(%ebx), %eax pushl %eax /* heap area */ pushl %esi /* real mode pointer */ - call decompress_kernel + call decompress_kernel /* returns kernel location in %eax */ addl $24, %esp /* * Jump to the decompressed kernel. */ xorl %ebx, %ebx - jmp *%ebp + jmp *%eax /* * Stack and heap for uncompression diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S index c337422..c5c1ae0 100644 --- a/arch/x86/boot/compressed/head_64.S +++ b/arch/x86/boot/compressed/head_64.S @@ -94,9 +94,11 @@ ENTRY(startup_32) addl %eax, %ebx notl %eax andl %eax, %ebx -#else - movl $LOAD_PHYSICAL_ADDR, %ebx + cmpl $LOAD_PHYSICAL_ADDR, %ebx + jge 1f #endif + movl $LOAD_PHYSICAL_ADDR, %ebx +1: /* Target address to relocate to for decompression */ addl $z_extract_offset, %ebx @@ -269,9 +271,11 @@ preferred_addr: addq %rax, %rbp notq %rax andq %rax, %rbp -#else - movq $LOAD_PHYSICAL_ADDR, %rbp + cmpq $LOAD_PHYSICAL_ADDR, %rbp + jge 1f #endif + movq $LOAD_PHYSICAL_ADDR, %rbp +1: /* Target address to relocate to for decompression */ leaq z_extract_offset(%rbp), %rbx @@ -339,13 +343,13 @@ relocated: movl $z_input_len, %ecx /* input_len */ movq %rbp, %r8 /* output target address */ movq $z_output_len, %r9 /* decompressed length */ - call decompress_kernel + call decompress_kernel /* returns kernel location in %rax */ popq %rsi /* * Jump to the decompressed kernel. */ - jmp *%rbp + jmp *%rax .code32 no_longmode: diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c index 434f077..196eaf3 100644 --- a/arch/x86/boot/compressed/misc.c +++ b/arch/x86/boot/compressed/misc.c @@ -112,14 +112,8 @@ struct boot_params *real_mode; /* Pointer to real-mode data */ void *memset(void *s, int c, size_t n); void *memcpy(void *dest, const void *src, size_t n); -#ifdef CONFIG_X86_64 -#define memptr long -#else -#define memptr unsigned -#endif - -static memptr free_mem_ptr; -static memptr free_mem_end_ptr; +memptr free_mem_ptr; +memptr free_mem_end_ptr; static char *vidmem; static int vidport; @@ -395,7 +389,7 @@ static void parse_elf(void *output) free(phdrs); } -asmlinkage void decompress_kernel(void *rmode, memptr heap, +asmlinkage void *decompress_kernel(void *rmode, memptr heap, unsigned char *input_data, unsigned long input_len, unsigned char *output, @@ -422,6 +416,10 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap, free_mem_ptr = heap; /* Heap */ free_mem_end_ptr = heap + BOOT_HEAP_SIZE; + output = choose_kernel_location(input_data, input_len, + output, output_len); + + /* Validate memory location choices. */ if ((unsigned long)output & (MIN_KERNEL_ALIGN - 1)) error("Destination address inappropriately aligned"); #ifdef CONFIG_X86_64 @@ -441,5 +439,5 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap, parse_elf(output); handle_relocations(output, output_len); debug_putstr("done.\nBooting the kernel.\n"); - return; + return output; } diff --git a/arch/x86/boot/compressed/misc.h b/arch/x86/boot/compressed/misc.h index 674019d..24e3e56 100644 --- a/arch/x86/boot/compressed/misc.h +++ b/arch/x86/boot/compressed/misc.h @@ -23,7 +23,15 @@ #define BOOT_BOOT_H #include "../ctype.h" +#ifdef CONFIG_X86_64 +#define memptr long +#else +#define memptr unsigned +#endif + /* misc.c */ +extern memptr free_mem_ptr; +extern memptr free_mem_end_ptr; extern struct boot_params *real_mode; /* Pointer to real-mode data */ void __putstr(const char *s); #define error_putstr(__x) __putstr(__x) @@ -39,23 +47,40 @@ static inline void debug_putstr(const char *s) #endif -#ifdef CONFIG_EARLY_PRINTK - +#if CONFIG_EARLY_PRINTK || CONFIG_RANDOMIZE_BASE /* cmdline.c */ int cmdline_find_option(const char *option, char *buffer, int bufsize); int cmdline_find_option_bool(const char *option); +#endif -/* early_serial_console.c */ -extern int early_serial_base; -void console_init(void); +#if CONFIG_RANDOMIZE_BASE +/* aslr.c */ +unsigned char *choose_kernel_location(unsigned char *input, + unsigned long input_size, + unsigned char *output, + unsigned long output_size); +/* cpuflags.c */ +bool has_cpuflag(int flag); #else +static inline +unsigned char *choose_kernel_location(unsigned char *input, + unsigned long input_size, + unsigned char *output, + unsigned long output_size) +{ + return output; +} +#endif +#ifdef CONFIG_EARLY_PRINTK /* early_serial_console.c */ +extern int early_serial_base; +void console_init(void); +#else static const int early_serial_base; static inline void console_init(void) { } - #endif #endif diff --git a/arch/x86/boot/copy.S b/arch/x86/boot/copy.S index 11f272c..1eb7d29 100644 --- a/arch/x86/boot/copy.S +++ b/arch/x86/boot/copy.S @@ -14,7 +14,7 @@ * Memory copy routines */ - .code16gcc + .code16 .text GLOBAL(memcpy) @@ -30,7 +30,7 @@ GLOBAL(memcpy) rep; movsb popw %di popw %si - ret + retl ENDPROC(memcpy) GLOBAL(memset) @@ -45,25 +45,25 @@ GLOBAL(memset) andw $3, %cx rep; stosb popw %di - ret + retl ENDPROC(memset) GLOBAL(copy_from_fs) pushw %ds pushw %fs popw %ds - call memcpy + calll memcpy popw %ds - ret + retl ENDPROC(copy_from_fs) GLOBAL(copy_to_fs) pushw %es pushw %fs popw %es - call memcpy + calll memcpy popw %es - ret + retl ENDPROC(copy_to_fs) #if 0 /* Not currently used, but can be enabled as needed */ @@ -71,17 +71,17 @@ GLOBAL(copy_from_gs) pushw %ds pushw %gs popw %ds - call memcpy + calll memcpy popw %ds - ret + retl ENDPROC(copy_from_gs) GLOBAL(copy_to_gs) pushw %es pushw %gs popw %es - call memcpy + calll memcpy popw %es - ret + retl ENDPROC(copy_to_gs) #endif diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c index 4d3ff03..100a9a1 100644 --- a/arch/x86/boot/cpucheck.c +++ b/arch/x86/boot/cpucheck.c @@ -28,8 +28,6 @@ #include <asm/required-features.h> #include <asm/msr-index.h> -struct cpu_features cpu; -static u32 cpu_vendor[3]; static u32 err_flags[NCAPINTS]; static const int req_level = CONFIG_X86_MINIMUM_CPU_FAMILY; @@ -69,92 +67,8 @@ static int is_transmeta(void) cpu_vendor[2] == A32('M', 'x', '8', '6'); } -static int has_fpu(void) -{ - u16 fcw = -1, fsw = -1; - u32 cr0; - - asm("movl %%cr0,%0" : "=r" (cr0)); - if (cr0 & (X86_CR0_EM|X86_CR0_TS)) { - cr0 &= ~(X86_CR0_EM|X86_CR0_TS); - asm volatile("movl %0,%%cr0" : : "r" (cr0)); - } - - asm volatile("fninit ; fnstsw %0 ; fnstcw %1" - : "+m" (fsw), "+m" (fcw)); - - return fsw == 0 && (fcw & 0x103f) == 0x003f; -} - -static int has_eflag(u32 mask) -{ - u32 f0, f1; - - asm("pushfl ; " - "pushfl ; " - "popl %0 ; " - "movl %0,%1 ; " - "xorl %2,%1 ; " - "pushl %1 ; " - "popfl ; " - "pushfl ; " - "popl %1 ; " - "popfl" - : "=&r" (f0), "=&r" (f1) - : "ri" (mask)); - - return !!((f0^f1) & mask); -} - -static void get_flags(void) -{ - u32 max_intel_level, max_amd_level; - u32 tfms; - - if (has_fpu()) - set_bit(X86_FEATURE_FPU, cpu.flags); - - if (has_eflag(X86_EFLAGS_ID)) { - asm("cpuid" - : "=a" (max_intel_level), - "=b" (cpu_vendor[0]), - "=d" (cpu_vendor[1]), - "=c" (cpu_vendor[2]) - : "a" (0)); - - if (max_intel_level >= 0x00000001 && - max_intel_level <= 0x0000ffff) { - asm("cpuid" - : "=a" (tfms), - "=c" (cpu.flags[4]), - "=d" (cpu.flags[0]) - : "a" (0x00000001) - : "ebx"); - cpu.level = (tfms >> 8) & 15; - cpu.model = (tfms >> 4) & 15; - if (cpu.level >= 6) - cpu.model += ((tfms >> 16) & 0xf) << 4; - } - - asm("cpuid" - : "=a" (max_amd_level) - : "a" (0x80000000) - : "ebx", "ecx", "edx"); - - if (max_amd_level >= 0x80000001 && - max_amd_level <= 0x8000ffff) { - u32 eax = 0x80000001; - asm("cpuid" - : "+a" (eax), - "=c" (cpu.flags[6]), - "=d" (cpu.flags[1]) - : : "ebx"); - } - } -} - /* Returns a bitmask of which words we have error bits in */ -static int check_flags(void) +static int check_cpuflags(void) { u32 err; int i; @@ -187,8 +101,8 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr) if (has_eflag(X86_EFLAGS_AC)) cpu.level = 4; - get_flags(); - err = check_flags(); + get_cpuflags(); + err = check_cpuflags(); if (test_bit(X86_FEATURE_LM, cpu.flags)) cpu.level = 64; @@ -207,8 +121,8 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr) eax &= ~(1 << 15); asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx)); - get_flags(); /* Make sure it really did something */ - err = check_flags(); + get_cpuflags(); /* Make sure it really did something */ + err = check_cpuflags(); } else if (err == 0x01 && !(err_flags[0] & ~(1 << X86_FEATURE_CX8)) && is_centaur() && cpu.model >= 6) { @@ -223,7 +137,7 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr) asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx)); set_bit(X86_FEATURE_CX8, cpu.flags); - err = check_flags(); + err = check_cpuflags(); } else if (err == 0x01 && is_transmeta()) { /* Transmeta might have masked feature bits in word 0 */ @@ -238,7 +152,7 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr) : : "ecx", "ebx"); asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx)); - err = check_flags(); + err = check_cpuflags(); } if (err_flags_ptr) diff --git a/arch/x86/boot/cpuflags.c b/arch/x86/boot/cpuflags.c new file mode 100644 index 0000000..a9fcb7c --- /dev/null +++ b/arch/x86/boot/cpuflags.c @@ -0,0 +1,104 @@ +#include <linux/types.h> +#include "bitops.h" + +#include <asm/processor-flags.h> +#include <asm/required-features.h> +#include <asm/msr-index.h> +#include "cpuflags.h" + +struct cpu_features cpu; +u32 cpu_vendor[3]; + +static bool loaded_flags; + +static int has_fpu(void) +{ + u16 fcw = -1, fsw = -1; + unsigned long cr0; + + asm volatile("mov %%cr0,%0" : "=r" (cr0)); + if (cr0 & (X86_CR0_EM|X86_CR0_TS)) { + cr0 &= ~(X86_CR0_EM|X86_CR0_TS); + asm volatile("mov %0,%%cr0" : : "r" (cr0)); + } + + asm volatile("fninit ; fnstsw %0 ; fnstcw %1" + : "+m" (fsw), "+m" (fcw)); + + return fsw == 0 && (fcw & 0x103f) == 0x003f; +} + +int has_eflag(unsigned long mask) +{ + unsigned long f0, f1; + + asm volatile("pushf \n\t" + "pushf \n\t" + "pop %0 \n\t" + "mov %0,%1 \n\t" + "xor %2,%1 \n\t" + "push %1 \n\t" + "popf \n\t" + "pushf \n\t" + "pop %1 \n\t" + "popf" + : "=&r" (f0), "=&r" (f1) + : "ri" (mask)); + + return !!((f0^f1) & mask); +} + +/* Handle x86_32 PIC using ebx. */ +#if defined(__i386__) && defined(__PIC__) +# define EBX_REG "=r" +#else +# define EBX_REG "=b" +#endif + +static inline void cpuid(u32 id, u32 *a, u32 *b, u32 *c, u32 *d) +{ + asm volatile(".ifnc %%ebx,%3 ; movl %%ebx,%3 ; .endif \n\t" + "cpuid \n\t" + ".ifnc %%ebx,%3 ; xchgl %%ebx,%3 ; .endif \n\t" + : "=a" (*a), "=c" (*c), "=d" (*d), EBX_REG (*b) + : "a" (id) + ); +} + +void get_cpuflags(void) +{ + u32 max_intel_level, max_amd_level; + u32 tfms; + u32 ignored; + + if (loaded_flags) + return; + loaded_flags = true; + + if (has_fpu()) + set_bit(X86_FEATURE_FPU, cpu.flags); + + if (has_eflag(X86_EFLAGS_ID)) { + cpuid(0x0, &max_intel_level, &cpu_vendor[0], &cpu_vendor[2], + &cpu_vendor[1]); + + if (max_intel_level >= 0x00000001 && + max_intel_level <= 0x0000ffff) { + cpuid(0x1, &tfms, &ignored, &cpu.flags[4], + &cpu.flags[0]); + cpu.level = (tfms >> 8) & 15; + cpu.model = (tfms >> 4) & 15; + if (cpu.level >= 6) + cpu.model += ((tfms >> 16) & 0xf) << 4; + } + + cpuid(0x80000000, &max_amd_level, &ignored, &ignored, + &ignored); + + if (max_amd_level >= 0x80000001 && + max_amd_level <= 0x8000ffff) { + cpuid(0x80000001, &ignored, &ignored, &cpu.flags[6], + &cpu.flags[1]); + } + } +} diff --git a/arch/x86/boot/cpuflags.h b/arch/x86/boot/cpuflags.h new file mode 100644 index 0000000..ea97697 --- /dev/null +++ b/arch/x86/boot/cpuflags.h @@ -0,0 +1,19 @@ +#ifndef BOOT_CPUFLAGS_H +#define BOOT_CPUFLAGS_H + +#include <asm/cpufeature.h> +#include <asm/processor-flags.h> + +struct cpu_features { + int level; /* Family, or 64 for x86-64 */ + int model; + u32 flags[NCAPINTS]; +}; + +extern struct cpu_features cpu; +extern u32 cpu_vendor[3]; + +int has_eflag(unsigned long mask); +void get_cpuflags(void); + +#endif diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S index 9ec06a1..ec3b8ba 100644 --- a/arch/x86/boot/header.S +++ b/arch/x86/boot/header.S @@ -391,7 +391,14 @@ xloadflags: #else # define XLF23 0 #endif - .word XLF0 | XLF1 | XLF23 + +#if defined(CONFIG_X86_64) && defined(CONFIG_EFI) && defined(CONFIG_KEXEC) +# define XLF4 XLF_EFI_KEXEC +#else +# define XLF4 0 +#endif + + .word XLF0 | XLF1 | XLF23 | XLF4 cmdline_size: .long COMMAND_LINE_SIZE-1 #length of the command line, #added with boot protocol diff --git a/arch/x86/crypto/Makefile b/arch/x86/crypto/Makefile index 7d6ba9d..e0fc24d 100644 --- a/arch/x86/crypto/Makefile +++ b/arch/x86/crypto/Makefile @@ -3,8 +3,9 @@ # avx_supported := $(call as-instr,vpxor %xmm0$(comma)%xmm0$(comma)%xmm0,yes,no) +avx2_supported := $(call as-instr,vpgatherdd %ymm0$(comma)(%eax$(comma)%ymm1\ + $(comma)4)$(comma)%ymm2,yes,no) -obj-$(CONFIG_CRYPTO_ABLK_HELPER_X86) += ablk_helper.o obj-$(CONFIG_CRYPTO_GLUE_HELPER_X86) += glue_helper.o obj-$(CONFIG_CRYPTO_AES_586) += aes-i586.o diff --git a/arch/x86/crypto/ablk_helper.c b/arch/x86/crypto/ablk_helper.c deleted file mode 100644 index 43282fe..0000000 --- a/arch/x86/crypto/ablk_helper.c +++ /dev/null @@ -1,149 +0,0 @@ -/* - * Shared async block cipher helpers - * - * Copyright (c) 2012 Jussi Kivilinna <jussi.kivilinna@mbnet.fi> - * - * Based on aesni-intel_glue.c by: - * Copyright (C) 2008, Intel Corp. - * Author: Huang Ying <ying.huang@intel.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 - * USA - * - */ - -#include <linux/kernel.h> -#include <linux/crypto.h> -#include <linux/init.h> -#include <linux/module.h> -#include <crypto/algapi.h> -#include <crypto/cryptd.h> -#include <asm/i387.h> -#include <asm/crypto/ablk_helper.h> - -int ablk_set_key(struct crypto_ablkcipher *tfm, const u8 *key, - unsigned int key_len) -{ - struct async_helper_ctx *ctx = crypto_ablkcipher_ctx(tfm); - struct crypto_ablkcipher *child = &ctx->cryptd_tfm->base; - int err; - - crypto_ablkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); - crypto_ablkcipher_set_flags(child, crypto_ablkcipher_get_flags(tfm) - & CRYPTO_TFM_REQ_MASK); - err = crypto_ablkcipher_setkey(child, key, key_len); - crypto_ablkcipher_set_flags(tfm, crypto_ablkcipher_get_flags(child) - & CRYPTO_TFM_RES_MASK); - return err; -} -EXPORT_SYMBOL_GPL(ablk_set_key); - -int __ablk_encrypt(struct ablkcipher_request *req) -{ - struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); - struct async_helper_ctx *ctx = crypto_ablkcipher_ctx(tfm); - struct blkcipher_desc desc; - - desc.tfm = cryptd_ablkcipher_child(ctx->cryptd_tfm); - desc.info = req->info; - desc.flags = 0; - - return crypto_blkcipher_crt(desc.tfm)->encrypt( - &desc, req->dst, req->src, req->nbytes); -} -EXPORT_SYMBOL_GPL(__ablk_encrypt); - -int ablk_encrypt(struct ablkcipher_request *req) -{ - struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); - struct async_helper_ctx *ctx = crypto_ablkcipher_ctx(tfm); - - if (!irq_fpu_usable()) { - struct ablkcipher_request *cryptd_req = - ablkcipher_request_ctx(req); - - memcpy(cryptd_req, req, sizeof(*req)); - ablkcipher_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base); - - return crypto_ablkcipher_encrypt(cryptd_req); - } else { - return __ablk_encrypt(req); - } -} -EXPORT_SYMBOL_GPL(ablk_encrypt); - -int ablk_decrypt(struct ablkcipher_request *req) -{ - struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); - struct async_helper_ctx *ctx = crypto_ablkcipher_ctx(tfm); - - if (!irq_fpu_usable()) { - struct ablkcipher_request *cryptd_req = - ablkcipher_request_ctx(req); - - memcpy(cryptd_req, req, sizeof(*req)); - ablkcipher_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base); - - return crypto_ablkcipher_decrypt(cryptd_req); - } else { - struct blkcipher_desc desc; - - desc.tfm = cryptd_ablkcipher_child(ctx->cryptd_tfm); - desc.info = req->info; - desc.flags = 0; - - return crypto_blkcipher_crt(desc.tfm)->decrypt( - &desc, req->dst, req->src, req->nbytes); - } -} -EXPORT_SYMBOL_GPL(ablk_decrypt); - -void ablk_exit(struct crypto_tfm *tfm) -{ - struct async_helper_ctx *ctx = crypto_tfm_ctx(tfm); - - cryptd_free_ablkcipher(ctx->cryptd_tfm); -} -EXPORT_SYMBOL_GPL(ablk_exit); - -int ablk_init_common(struct crypto_tfm *tfm, const char *drv_name) -{ - struct async_helper_ctx *ctx = crypto_tfm_ctx(tfm); - struct cryptd_ablkcipher *cryptd_tfm; - - cryptd_tfm = cryptd_alloc_ablkcipher(drv_name, 0, 0); - if (IS_ERR(cryptd_tfm)) - return PTR_ERR(cryptd_tfm); - - ctx->cryptd_tfm = cryptd_tfm; - tfm->crt_ablkcipher.reqsize = sizeof(struct ablkcipher_request) + - crypto_ablkcipher_reqsize(&cryptd_tfm->base); - - return 0; -} -EXPORT_SYMBOL_GPL(ablk_init_common); - -int ablk_init(struct crypto_tfm *tfm) -{ - char drv_name[CRYPTO_MAX_ALG_NAME]; - - snprintf(drv_name, sizeof(drv_name), "__driver-%s", - crypto_tfm_alg_driver_name(tfm)); - - return ablk_init_common(tfm, drv_name); -} -EXPORT_SYMBOL_GPL(ablk_init); - -MODULE_LICENSE("GPL"); diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c index f80e668..835488b 100644 --- a/arch/x86/crypto/aesni-intel_glue.c +++ b/arch/x86/crypto/aesni-intel_glue.c @@ -34,7 +34,7 @@ #include <asm/cpu_device_id.h> #include <asm/i387.h> #include <asm/crypto/aes.h> -#include <asm/crypto/ablk_helper.h> +#include <crypto/ablk_helper.h> #include <crypto/scatterwalk.h> #include <crypto/internal/aead.h> #include <linux/workqueue.h> diff --git a/arch/x86/crypto/camellia_aesni_avx2_glue.c b/arch/x86/crypto/camellia_aesni_avx2_glue.c index 414fe5d..4209a76 100644 --- a/arch/x86/crypto/camellia_aesni_avx2_glue.c +++ b/arch/x86/crypto/camellia_aesni_avx2_glue.c @@ -14,6 +14,7 @@ #include <linux/types.h> #include <linux/crypto.h> #include <linux/err.h> +#include <crypto/ablk_helper.h> #include <crypto/algapi.h> #include <crypto/ctr.h> #include <crypto/lrw.h> @@ -21,7 +22,6 @@ #include <asm/xcr.h> #include <asm/xsave.h> #include <asm/crypto/camellia.h> -#include <asm/crypto/ablk_helper.h> #include <asm/crypto/glue_helper.h> #define CAMELLIA_AESNI_PARALLEL_BLOCKS 16 diff --git a/arch/x86/crypto/camellia_aesni_avx_glue.c b/arch/x86/crypto/camellia_aesni_avx_glue.c index 37fd0c0..87a041a 100644 --- a/arch/x86/crypto/camellia_aesni_avx_glue.c +++ b/arch/x86/crypto/camellia_aesni_avx_glue.c @@ -14,6 +14,7 @@ #include <linux/types.h> #include <linux/crypto.h> #include <linux/err.h> +#include <crypto/ablk_helper.h> #include <crypto/algapi.h> #include <crypto/ctr.h> #include <crypto/lrw.h> @@ -21,7 +22,6 @@ #include <asm/xcr.h> #include <asm/xsave.h> #include <asm/crypto/camellia.h> -#include <asm/crypto/ablk_helper.h> #include <asm/crypto/glue_helper.h> #define CAMELLIA_AESNI_PARALLEL_BLOCKS 16 diff --git a/arch/x86/crypto/cast5_avx_glue.c b/arch/x86/crypto/cast5_avx_glue.c index c6631813..e6a3700 100644 --- a/arch/x86/crypto/cast5_avx_glue.c +++ b/arch/x86/crypto/cast5_avx_glue.c @@ -26,13 +26,13 @@ #include <linux/types.h> #include <linux/crypto.h> #include <linux/err.h> +#include <crypto/ablk_helper.h> #include <crypto/algapi.h> #include <crypto/cast5.h> #include <crypto/cryptd.h> #include <crypto/ctr.h> #include <asm/xcr.h> #include <asm/xsave.h> -#include <asm/crypto/ablk_helper.h> #include <asm/crypto/glue_helper.h> #define CAST5_PARALLEL_BLOCKS 16 diff --git a/arch/x86/crypto/cast6_avx_glue.c b/arch/x86/crypto/cast6_avx_glue.c index 8d0dfb8..09f3677 100644 --- a/arch/x86/crypto/cast6_avx_glue.c +++ b/arch/x86/crypto/cast6_avx_glue.c @@ -28,6 +28,7 @@ #include <linux/types.h> #include <linux/crypto.h> #include <linux/err.h> +#include <crypto/ablk_helper.h> #include <crypto/algapi.h> #include <crypto/cast6.h> #include <crypto/cryptd.h> @@ -37,7 +38,6 @@ #include <crypto/xts.h> #include <asm/xcr.h> #include <asm/xsave.h> -#include <asm/crypto/ablk_helper.h> #include <asm/crypto/glue_helper.h> #define CAST6_PARALLEL_BLOCKS 8 diff --git a/arch/x86/crypto/serpent_avx2_glue.c b/arch/x86/crypto/serpent_avx2_glue.c index 23aabc6..2fae489 100644 --- a/arch/x86/crypto/serpent_avx2_glue.c +++ b/arch/x86/crypto/serpent_avx2_glue.c @@ -14,6 +14,7 @@ #include <linux/types.h> #include <linux/crypto.h> #include <linux/err.h> +#include <crypto/ablk_helper.h> #include <crypto/algapi.h> #include <crypto/ctr.h> #include <crypto/lrw.h> @@ -22,7 +23,6 @@ #include <asm/xcr.h> #include <asm/xsave.h> #include <asm/crypto/serpent-avx.h> -#include <asm/crypto/ablk_helper.h> #include <asm/crypto/glue_helper.h> #define SERPENT_AVX2_PARALLEL_BLOCKS 16 diff --git a/arch/x86/crypto/serpent_avx_glue.c b/arch/x86/crypto/serpent_avx_glue.c index 9ae83cf..ff48708 100644 --- a/arch/x86/crypto/serpent_avx_glue.c +++ b/arch/x86/crypto/serpent_avx_glue.c @@ -28,6 +28,7 @@ #include <linux/types.h> #include <linux/crypto.h> #include <linux/err.h> +#include <crypto/ablk_helper.h> #include <crypto/algapi.h> #include <crypto/serpent.h> #include <crypto/cryptd.h> @@ -38,7 +39,6 @@ #include <asm/xcr.h> #include <asm/xsave.h> #include <asm/crypto/serpent-avx.h> -#include <asm/crypto/ablk_helper.h> #include <asm/crypto/glue_helper.h> /* 8-way parallel cipher functions */ diff --git a/arch/x86/crypto/serpent_sse2_glue.c b/arch/x86/crypto/serpent_sse2_glue.c index 97a356e..8c95f86 100644 --- a/arch/x86/crypto/serpent_sse2_glue.c +++ b/arch/x86/crypto/serpent_sse2_glue.c @@ -34,6 +34,7 @@ #include <linux/types.h> #include <linux/crypto.h> #include <linux/err.h> +#include <crypto/ablk_helper.h> #include <crypto/algapi.h> #include <crypto/serpent.h> #include <crypto/cryptd.h> @@ -42,7 +43,6 @@ #include <crypto/lrw.h> #include <crypto/xts.h> #include <asm/crypto/serpent-sse2.h> -#include <asm/crypto/ablk_helper.h> #include <asm/crypto/glue_helper.h> static void serpent_decrypt_cbc_xway(void *ctx, u128 *dst, const u128 *src) diff --git a/arch/x86/crypto/sha256_ssse3_glue.c b/arch/x86/crypto/sha256_ssse3_glue.c index 50226c4..f248546 100644 --- a/arch/x86/crypto/sha256_ssse3_glue.c +++ b/arch/x86/crypto/sha256_ssse3_glue.c @@ -281,7 +281,7 @@ static int __init sha256_ssse3_mod_init(void) /* allow AVX to override SSSE3, it's a little faster */ if (avx_usable()) { #ifdef CONFIG_AS_AVX2 - if (boot_cpu_has(X86_FEATURE_AVX2)) + if (boot_cpu_has(X86_FEATURE_AVX2) && boot_cpu_has(X86_FEATURE_BMI2)) sha256_transform_asm = sha256_transform_rorx; else #endif @@ -319,4 +319,4 @@ MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("SHA256 Secure Hash Algorithm, Supplemental SSE3 accelerated"); MODULE_ALIAS("sha256"); -MODULE_ALIAS("sha384"); +MODULE_ALIAS("sha224"); diff --git a/arch/x86/crypto/twofish_avx_glue.c b/arch/x86/crypto/twofish_avx_glue.c index a62ba54..4e3c665 100644 --- a/arch/x86/crypto/twofish_avx_glue.c +++ b/arch/x86/crypto/twofish_avx_glue.c @@ -28,6 +28,7 @@ #include <linux/types.h> #include <linux/crypto.h> #include <linux/err.h> +#include <crypto/ablk_helper.h> #include <crypto/algapi.h> #include <crypto/twofish.h> #include <crypto/cryptd.h> @@ -39,7 +40,6 @@ #include <asm/xcr.h> #include <asm/xsave.h> #include <asm/crypto/twofish.h> -#include <asm/crypto/ablk_helper.h> #include <asm/crypto/glue_helper.h> #include <crypto/scatterwalk.h> #include <linux/workqueue.h> diff --git a/arch/x86/include/asm/archrandom.h b/arch/x86/include/asm/archrandom.h index 0d9ec77..e6a9245 100644 --- a/arch/x86/include/asm/archrandom.h +++ b/arch/x86/include/asm/archrandom.h @@ -39,6 +39,20 @@ #ifdef CONFIG_ARCH_RANDOM +/* Instead of arch_get_random_long() when alternatives haven't run. */ +static inline int rdrand_long(unsigned long *v) +{ + int ok; + asm volatile("1: " RDRAND_LONG "\n\t" + "jc 2f\n\t" + "decl %0\n\t" + "jnz 1b\n\t" + "2:" + : "=r" (ok), "=a" (*v) + : "0" (RDRAND_RETRY_LOOPS)); + return ok; +} + #define GET_RANDOM(name, type, rdrand, nop) \ static inline int name(type *v) \ { \ @@ -68,6 +82,13 @@ GET_RANDOM(arch_get_random_int, unsigned int, RDRAND_INT, ASM_NOP3); #endif /* CONFIG_X86_64 */ +#else + +static inline int rdrand_long(unsigned long *v) +{ + return 0; +} + #endif /* CONFIG_ARCH_RANDOM */ extern void x86_init_rdrand(struct cpuinfo_x86 *c); diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h index da31c8b..b17f4f4 100644 --- a/arch/x86/include/asm/atomic.h +++ b/arch/x86/include/asm/atomic.h @@ -77,7 +77,7 @@ static inline void atomic_sub(int i, atomic_t *v) */ static inline int atomic_sub_and_test(int i, atomic_t *v) { - GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, i, "%0", "e"); + GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, "er", i, "%0", "e"); } /** @@ -141,7 +141,7 @@ static inline int atomic_inc_and_test(atomic_t *v) */ static inline int atomic_add_negative(int i, atomic_t *v) { - GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, i, "%0", "s"); + GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, "er", i, "%0", "s"); } /** diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h index 3f065c9..46e9052 100644 --- a/arch/x86/include/asm/atomic64_64.h +++ b/arch/x86/include/asm/atomic64_64.h @@ -72,7 +72,7 @@ static inline void atomic64_sub(long i, atomic64_t *v) */ static inline int atomic64_sub_and_test(long i, atomic64_t *v) { - GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, i, "%0", "e"); + GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, "er", i, "%0", "e"); } /** @@ -138,7 +138,7 @@ static inline int atomic64_inc_and_test(atomic64_t *v) */ static inline int atomic64_add_negative(long i, atomic64_t *v) { - GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, i, "%0", "s"); + GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, "er", i, "%0", "s"); } /** diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h index c6cd358..04a4890 100644 --- a/arch/x86/include/asm/barrier.h +++ b/arch/x86/include/asm/barrier.h @@ -92,12 +92,53 @@ #endif #define smp_read_barrier_depends() read_barrier_depends() #define set_mb(var, value) do { (void)xchg(&var, value); } while (0) -#else +#else /* !SMP */ #define smp_mb() barrier() #define smp_rmb() barrier() #define smp_wmb() barrier() #define smp_read_barrier_depends() do { } while (0) #define set_mb(var, value) do { var = value; barrier(); } while (0) +#endif /* SMP */ + +#if defined(CONFIG_X86_OOSTORE) || defined(CONFIG_X86_PPRO_FENCE) + +/* + * For either of these options x86 doesn't have a strong TSO memory + * model and we should fall back to full barriers. + */ + +#define smp_store_release(p, v) \ +do { \ + compiletime_assert_atomic_type(*p); \ + smp_mb(); \ + ACCESS_ONCE(*p) = (v); \ +} while (0) + +#define smp_load_acquire(p) \ +({ \ + typeof(*p) ___p1 = ACCESS_ONCE(*p); \ + compiletime_assert_atomic_type(*p); \ + smp_mb(); \ + ___p1; \ +}) + +#else /* regular x86 TSO memory ordering */ + +#define smp_store_release(p, v) \ +do { \ + compiletime_assert_atomic_type(*p); \ + barrier(); \ + ACCESS_ONCE(*p) = (v); \ +} while (0) + +#define smp_load_acquire(p) \ +({ \ + typeof(*p) ___p1 = ACCESS_ONCE(*p); \ + compiletime_assert_atomic_type(*p); \ + barrier(); \ + ___p1; \ +}) + #endif /* diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h index 6d76d09..9fc1af7 100644 --- a/arch/x86/include/asm/bitops.h +++ b/arch/x86/include/asm/bitops.h @@ -205,7 +205,7 @@ static inline void change_bit(long nr, volatile unsigned long *addr) */ static inline int test_and_set_bit(long nr, volatile unsigned long *addr) { - GEN_BINARY_RMWcc(LOCK_PREFIX "bts", *addr, nr, "%0", "c"); + GEN_BINARY_RMWcc(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", "c"); } /** @@ -251,7 +251,7 @@ static inline int __test_and_set_bit(long nr, volatile unsigned long *addr) */ static inline int test_and_clear_bit(long nr, volatile unsigned long *addr) { - GEN_BINARY_RMWcc(LOCK_PREFIX "btr", *addr, nr, "%0", "c"); + GEN_BINARY_RMWcc(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", "c"); } /** @@ -304,7 +304,7 @@ static inline int __test_and_change_bit(long nr, volatile unsigned long *addr) */ static inline int test_and_change_bit(long nr, volatile unsigned long *addr) { - GEN_BINARY_RMWcc(LOCK_PREFIX "btc", *addr, nr, "%0", "c"); + GEN_BINARY_RMWcc(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", "c"); } static __always_inline int constant_test_bit(long nr, const volatile unsigned long *addr) diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h index 89270b43..e099f95 100644 --- a/arch/x86/include/asm/cpufeature.h +++ b/arch/x86/include/asm/cpufeature.h @@ -216,6 +216,7 @@ #define X86_FEATURE_ERMS (9*32+ 9) /* Enhanced REP MOVSB/STOSB */ #define X86_FEATURE_INVPCID (9*32+10) /* Invalidate Processor Context ID */ #define X86_FEATURE_RTM (9*32+11) /* Restricted Transactional Memory */ +#define X86_FEATURE_MPX (9*32+14) /* Memory Protection Extension */ #define X86_FEATURE_RDSEED (9*32+18) /* The RDSEED instruction */ #define X86_FEATURE_ADX (9*32+19) /* The ADCX and ADOX instructions */ #define X86_FEATURE_SMAP (9*32+20) /* Supervisor Mode Access Prevention */ diff --git a/arch/x86/include/asm/crypto/ablk_helper.h b/arch/x86/include/asm/crypto/ablk_helper.h deleted file mode 100644 index 4f93df5..0000000 --- a/arch/x86/include/asm/crypto/ablk_helper.h +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Shared async block cipher helpers - */ - -#ifndef _CRYPTO_ABLK_HELPER_H -#define _CRYPTO_ABLK_HELPER_H - -#include <linux/crypto.h> -#include <linux/kernel.h> -#include <crypto/cryptd.h> - -struct async_helper_ctx { - struct cryptd_ablkcipher *cryptd_tfm; -}; - -extern int ablk_set_key(struct crypto_ablkcipher *tfm, const u8 *key, - unsigned int key_len); - -extern int __ablk_encrypt(struct ablkcipher_request *req); - -extern int ablk_encrypt(struct ablkcipher_request *req); - -extern int ablk_decrypt(struct ablkcipher_request *req); - -extern void ablk_exit(struct crypto_tfm *tfm); - -extern int ablk_init_common(struct crypto_tfm *tfm, const char *drv_name); - -extern int ablk_init(struct crypto_tfm *tfm); - -#endif /* _CRYPTO_ABLK_HELPER_H */ diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h index 65c6e6e..3b978c4 100644 --- a/arch/x86/include/asm/efi.h +++ b/arch/x86/include/asm/efi.h @@ -1,6 +1,24 @@ #ifndef _ASM_X86_EFI_H #define _ASM_X86_EFI_H +/* + * We map the EFI regions needed for runtime services non-contiguously, + * with preserved alignment on virtual addresses starting from -4G down + * for a total max space of 64G. This way, we provide for stable runtime + * services addresses across kernels so that a kexec'd kernel can still + * use them. + * + * This is the main reason why we're doing stable VA mappings for RT + * services. + * + * This flag is used in conjuction with a chicken bit called + * "efi=old_map" which can be used as a fallback to the old runtime + * services mapping method in case there's some b0rkage with a + * particular EFI implementation (haha, it is hard to hold up the + * sarcasm here...). + */ +#define EFI_OLD_MEMMAP EFI_ARCH_1 + #ifdef CONFIG_X86_32 #define EFI_LOADER_SIGNATURE "EL32" @@ -69,24 +87,31 @@ extern u64 efi_call6(void *fp, u64 arg1, u64 arg2, u64 arg3, efi_call6((f), (u64)(a1), (u64)(a2), (u64)(a3), \ (u64)(a4), (u64)(a5), (u64)(a6)) +#define _efi_call_virtX(x, f, ...) \ +({ \ + efi_status_t __s; \ + \ + efi_sync_low_kernel_mappings(); \ + preempt_disable(); \ + __s = efi_call##x((void *)efi.systab->runtime->f, __VA_ARGS__); \ + preempt_enable(); \ + __s; \ +}) + #define efi_call_virt0(f) \ - efi_call0((efi.systab->runtime->f)) -#define efi_call_virt1(f, a1) \ - efi_call1((efi.systab->runtime->f), (u64)(a1)) -#define efi_call_virt2(f, a1, a2) \ - efi_call2((efi.systab->runtime->f), (u64)(a1), (u64)(a2)) -#define efi_call_virt3(f, a1, a2, a3) \ - efi_call3((efi.systab->runtime->f), (u64)(a1), (u64)(a2), \ - (u64)(a3)) -#define efi_call_virt4(f, a1, a2, a3, a4) \ - efi_call4((efi.systab->runtime->f), (u64)(a1), (u64)(a2), \ - (u64)(a3), (u64)(a4)) -#define efi_call_virt5(f, a1, a2, a3, a4, a5) \ - efi_call5((efi.systab->runtime->f), (u64)(a1), (u64)(a2), \ - (u64)(a3), (u64)(a4), (u64)(a5)) -#define efi_call_virt6(f, a1, a2, a3, a4, a5, a6) \ - efi_call6((efi.systab->runtime->f), (u64)(a1), (u64)(a2), \ - (u64)(a3), (u64)(a4), (u64)(a5), (u64)(a6)) + _efi_call_virtX(0, f) +#define efi_call_virt1(f, a1) \ + _efi_call_virtX(1, f, (u64)(a1)) +#define efi_call_virt2(f, a1, a2) \ + _efi_call_virtX(2, f, (u64)(a1), (u64)(a2)) +#define efi_call_virt3(f, a1, a2, a3) \ + _efi_call_virtX(3, f, (u64)(a1), (u64)(a2), (u64)(a3)) +#define efi_call_virt4(f, a1, a2, a3, a4) \ + _efi_call_virtX(4, f, (u64)(a1), (u64)(a2), (u64)(a3), (u64)(a4)) +#define efi_call_virt5(f, a1, a2, a3, a4, a5) \ + _efi_call_virtX(5, f, (u64)(a1), (u64)(a2), (u64)(a3), (u64)(a4), (u64)(a5)) +#define efi_call_virt6(f, a1, a2, a3, a4, a5, a6) \ + _efi_call_virtX(6, f, (u64)(a1), (u64)(a2), (u64)(a3), (u64)(a4), (u64)(a5), (u64)(a6)) extern void __iomem *efi_ioremap(unsigned long addr, unsigned long size, u32 type, u64 attribute); @@ -95,12 +120,28 @@ extern void __iomem *efi_ioremap(unsigned long addr, unsigned long size, extern int add_efi_memmap; extern unsigned long x86_efi_facility; +extern struct efi_scratch efi_scratch; extern void efi_set_executable(efi_memory_desc_t *md, bool executable); extern int efi_memblock_x86_reserve_range(void); extern void efi_call_phys_prelog(void); extern void efi_call_phys_epilog(void); extern void efi_unmap_memmap(void); extern void efi_memory_uc(u64 addr, unsigned long size); +extern void __init efi_map_region(efi_memory_desc_t *md); +extern void __init efi_map_region_fixed(efi_memory_desc_t *md); +extern void efi_sync_low_kernel_mappings(void); +extern void efi_setup_page_tables(void); +extern void __init old_map_region(efi_memory_desc_t *md); + +struct efi_setup_data { + u64 fw_vendor; + u64 runtime; + u64 tables; + u64 smbios; + u64 reserved[8]; +}; + +extern u64 efi_setup; #ifdef CONFIG_EFI @@ -110,7 +151,7 @@ static inline bool efi_is_native(void) } extern struct console early_efi_console; - +extern void parse_efi_setup(u64 phys_addr, u32 data_len); #else /* * IF EFI is not configured, have the EFI calls return -ENOSYS. @@ -122,6 +163,7 @@ extern struct console early_efi_console; #define efi_call4(_f, _a1, _a2, _a3, _a4) (-ENOSYS) #define efi_call5(_f, _a1, _a2, _a3, _a4, _a5) (-ENOSYS) #define efi_call6(_f, _a1, _a2, _a3, _a4, _a5, _a6) (-ENOSYS) +static inline void parse_efi_setup(u64 phys_addr, u32 data_len) {} #endif /* CONFIG_EFI */ #endif /* _ASM_X86_EFI_H */ diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h index c49a613..cea1c76 100644 --- a/arch/x86/include/asm/fpu-internal.h +++ b/arch/x86/include/asm/fpu-internal.h @@ -293,12 +293,13 @@ static inline int restore_fpu_checking(struct task_struct *tsk) /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception is pending. Clear the x87 state here by setting it to fixed values. "m" is a random variable that should be in L1 */ - alternative_input( - ASM_NOP8 ASM_NOP2, - "emms\n\t" /* clear stack tags */ - "fildl %P[addr]", /* set F?P to defined value */ - X86_FEATURE_FXSAVE_LEAK, - [addr] "m" (tsk->thread.fpu.has_fpu)); + if (unlikely(static_cpu_has(X86_FEATURE_FXSAVE_LEAK))) { + asm volatile( + "fnclex\n\t" + "emms\n\t" + "fildl %P[addr]" /* set F?P to defined value */ + : : [addr] "m" (tsk->thread.fpu.has_fpu)); + } return fpu_restore_checking(&tsk->thread.fpu); } diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h index be27ba1..b4c1f54 100644 --- a/arch/x86/include/asm/futex.h +++ b/arch/x86/include/asm/futex.h @@ -110,26 +110,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval, u32 newval) { - int ret = 0; - - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) - return -EFAULT; - - asm volatile("\t" ASM_STAC "\n" - "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n" - "2:\t" ASM_CLAC "\n" - "\t.section .fixup, \"ax\"\n" - "3:\tmov %3, %0\n" - "\tjmp 2b\n" - "\t.previous\n" - _ASM_EXTABLE(1b, 3b) - : "+r" (ret), "=a" (oldval), "+m" (*uaddr) - : "i" (-EFAULT), "r" (newval), "1" (oldval) - : "memory" - ); - - *uval = oldval; - return ret; + return user_atomic_cmpxchg_inatomic(uval, uaddr, oldval, newval); } #endif diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h index cba45d9..67d69b8 100644 --- a/arch/x86/include/asm/hw_irq.h +++ b/arch/x86/include/asm/hw_irq.h @@ -191,6 +191,9 @@ extern void (*__initconst interrupt[NR_VECTORS-FIRST_EXTERNAL_VECTOR])(void); #define trace_interrupt interrupt #endif +#define VECTOR_UNDEFINED -1 +#define VECTOR_RETRIGGERED -2 + typedef int vector_irq_t[NR_VECTORS]; DECLARE_PER_CPU(vector_irq_t, vector_irq); extern void setup_vector_irq(int cpu); diff --git a/arch/x86/include/asm/intel-mid.h b/arch/x86/include/asm/intel-mid.h index 459769d..e34e097 100644 --- a/arch/x86/include/asm/intel-mid.h +++ b/arch/x86/include/asm/intel-mid.h @@ -51,10 +51,41 @@ struct devs_id { enum intel_mid_cpu_type { /* 1 was Moorestown */ INTEL_MID_CPU_CHIP_PENWELL = 2, + INTEL_MID_CPU_CHIP_CLOVERVIEW, + INTEL_MID_CPU_CHIP_TANGIER, }; extern enum intel_mid_cpu_type __intel_mid_cpu_chip; +/** + * struct intel_mid_ops - Interface between intel-mid & sub archs + * @arch_setup: arch_setup function to re-initialize platform + * structures (x86_init, x86_platform_init) + * + * This structure can be extended if any new interface is required + * between intel-mid & its sub arch files. + */ +struct intel_mid_ops { + void (*arch_setup)(void); +}; + +/* Helper API's for INTEL_MID_OPS_INIT */ +#define DECLARE_INTEL_MID_OPS_INIT(cpuname, cpuid) \ + [cpuid] = get_##cpuname##_ops + +/* Maximum number of CPU ops */ +#define MAX_CPU_OPS(a) (sizeof(a)/sizeof(void *)) + +/* + * For every new cpu addition, a weak get_<cpuname>_ops() function needs be + * declared in arch/x86/platform/intel_mid/intel_mid_weak_decls.h. + */ +#define INTEL_MID_OPS_INIT {\ + DECLARE_INTEL_MID_OPS_INIT(penwell, INTEL_MID_CPU_CHIP_PENWELL), \ + DECLARE_INTEL_MID_OPS_INIT(cloverview, INTEL_MID_CPU_CHIP_CLOVERVIEW), \ + DECLARE_INTEL_MID_OPS_INIT(tangier, INTEL_MID_CPU_CHIP_TANGIER) \ +}; + #ifdef CONFIG_X86_INTEL_MID static inline enum intel_mid_cpu_type intel_mid_identify_cpu(void) @@ -86,8 +117,21 @@ extern enum intel_mid_timer_options intel_mid_timer_options; * Penwell uses spread spectrum clock, so the freq number is not exactly * the same as reported by MSR based on SDM. */ -#define PENWELL_FSB_FREQ_83SKU 83200 -#define PENWELL_FSB_FREQ_100SKU 99840 +#define FSB_FREQ_83SKU 83200 +#define FSB_FREQ_100SKU 99840 +#define FSB_FREQ_133SKU 133000 + +#define FSB_FREQ_167SKU 167000 +#define FSB_FREQ_200SKU 200000 +#define FSB_FREQ_267SKU 267000 +#define FSB_FREQ_333SKU 333000 +#define FSB_FREQ_400SKU 400000 + +/* Bus Select SoC Fuse value */ +#define BSEL_SOC_FUSE_MASK 0x7 +#define BSEL_SOC_FUSE_001 0x1 /* FSB 133MHz */ +#define BSEL_SOC_FUSE_101 0x5 /* FSB 100MHz */ +#define BSEL_SOC_FUSE_111 0x7 /* FSB 83MHz */ #define SFI_MTMR_MAX_NUM 8 #define SFI_MRTC_MAX 8 diff --git a/arch/x86/include/asm/iosf_mbi.h b/arch/x86/include/asm/iosf_mbi.h new file mode 100644 index 0000000..8e71c79 --- /dev/null +++ b/arch/x86/include/asm/iosf_mbi.h @@ -0,0 +1,90 @@ +/* + * iosf_mbi.h: Intel OnChip System Fabric MailBox access support + */ + +#ifndef IOSF_MBI_SYMS_H +#define IOSF_MBI_SYMS_H + +#define MBI_MCR_OFFSET 0xD0 +#define MBI_MDR_OFFSET 0xD4 +#define MBI_MCRX_OFFSET 0xD8 + +#define MBI_RD_MASK 0xFEFFFFFF +#define MBI_WR_MASK 0X01000000 + +#define MBI_MASK_HI 0xFFFFFF00 +#define MBI_MASK_LO 0x000000FF +#define MBI_ENABLE 0xF0 + +/* Baytrail available units */ +#define BT_MBI_UNIT_AUNIT 0x00 +#define BT_MBI_UNIT_SMC 0x01 +#define BT_MBI_UNIT_CPU 0x02 +#define BT_MBI_UNIT_BUNIT 0x03 +#define BT_MBI_UNIT_PMC 0x04 +#define BT_MBI_UNIT_GFX 0x06 +#define BT_MBI_UNIT_SMI 0x0C +#define BT_MBI_UNIT_USB 0x43 +#define BT_MBI_UNIT_SATA 0xA3 +#define BT_MBI_UNIT_PCIE 0xA6 + +/* Baytrail read/write opcodes */ +#define BT_MBI_AUNIT_READ 0x10 +#define BT_MBI_AUNIT_WRITE 0x11 +#define BT_MBI_SMC_READ 0x10 +#define BT_MBI_SMC_WRITE 0x11 +#define BT_MBI_CPU_READ 0x10 +#define BT_MBI_CPU_WRITE 0x11 +#define BT_MBI_BUNIT_READ 0x10 +#define BT_MBI_BUNIT_WRITE 0x11 +#define BT_MBI_PMC_READ 0x06 +#define BT_MBI_PMC_WRITE 0x07 +#define BT_MBI_GFX_READ 0x00 +#define BT_MBI_GFX_WRITE 0x01 +#define BT_MBI_SMIO_READ 0x06 +#define BT_MBI_SMIO_WRITE 0x07 +#define BT_MBI_USB_READ 0x06 +#define BT_MBI_USB_WRITE 0x07 +#define BT_MBI_SATA_READ 0x00 +#define BT_MBI_SATA_WRITE 0x01 +#define BT_MBI_PCIE_READ 0x00 +#define BT_MBI_PCIE_WRITE 0x01 + +/** + * iosf_mbi_read() - MailBox Interface read command + * @port: port indicating subunit being accessed + * @opcode: port specific read or write opcode + * @offset: register address offset + * @mdr: register data to be read + * + * Locking is handled by spinlock - cannot sleep. + * Return: Nonzero on error + */ +int iosf_mbi_read(u8 port, u8 opcode, u32 offset, u32 *mdr); + +/** + * iosf_mbi_write() - MailBox unmasked write command + * @port: port indicating subunit being accessed + * @opcode: port specific read or write opcode + * @offset: register address offset + * @mdr: register data to be written + * + * Locking is handled by spinlock - cannot sleep. + * Return: Nonzero on error + */ +int iosf_mbi_write(u8 port, u8 opcode, u32 offset, u32 mdr); + +/** + * iosf_mbi_modify() - MailBox masked write command + * @port: port indicating subunit being accessed + * @opcode: port specific read or write opcode + * @offset: register address offset + * @mdr: register data being modified + * @mask: mask indicating bits in mdr to be modified + * + * Locking is handled by spinlock - cannot sleep. + * Return: Nonzero on error + */ +int iosf_mbi_modify(u8 port, u8 opcode, u32 offset, u32 mdr, u32 mask); + +#endif /* IOSF_MBI_SYMS_H */ diff --git a/arch/x86/include/asm/irq.h b/arch/x86/include/asm/irq.h index 0ea10f27..cb6cfcd 100644 --- a/arch/x86/include/asm/irq.h +++ b/arch/x86/include/asm/irq.h @@ -25,6 +25,7 @@ extern void irq_ctx_init(int cpu); #ifdef CONFIG_HOTPLUG_CPU #include <linux/cpumask.h> +extern int check_irq_vectors_for_cpu_disable(void); extern void fixup_irqs(void); extern void irq_force_complete_move(int); #endif diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h index 5b23e60..4ad6560 100644 --- a/arch/x86/include/asm/local.h +++ b/arch/x86/include/asm/local.h @@ -52,7 +52,7 @@ static inline void local_sub(long i, local_t *l) */ static inline int local_sub_and_test(long i, local_t *l) { - GEN_BINARY_RMWcc(_ASM_SUB, l->a.counter, i, "%0", "e"); + GEN_BINARY_RMWcc(_ASM_SUB, l->a.counter, "er", i, "%0", "e"); } /** @@ -92,7 +92,7 @@ static inline int local_inc_and_test(local_t *l) */ static inline int local_add_negative(long i, local_t *l) { - GEN_BINARY_RMWcc(_ASM_ADD, l->a.counter, i, "%0", "s"); + GEN_BINARY_RMWcc(_ASM_ADD, l->a.counter, "er", i, "%0", "s"); } /** diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h index c696a86..6e4ce2d 100644 --- a/arch/x86/include/asm/mce.h +++ b/arch/x86/include/asm/mce.h @@ -118,7 +118,6 @@ extern void mce_register_decode_chain(struct notifier_block *nb); extern void mce_unregister_decode_chain(struct notifier_block *nb); #include <linux/percpu.h> -#include <linux/init.h> #include <linux/atomic.h> extern int mce_p5_enabled; diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h index f98bd66..b59827e 100644 --- a/arch/x86/include/asm/microcode.h +++ b/arch/x86/include/asm/microcode.h @@ -1,6 +1,21 @@ #ifndef _ASM_X86_MICROCODE_H #define _ASM_X86_MICROCODE_H +#define native_rdmsr(msr, val1, val2) \ +do { \ + u64 __val = native_read_msr((msr)); \ + (void)((val1) = (u32)__val); \ + (void)((val2) = (u32)(__val >> 32)); \ +} while (0) + +#define native_wrmsr(msr, low, high) \ + native_write_msr(msr, low, high) + +#define native_wrmsrl(msr, val) \ + native_write_msr((msr), \ + (u32)((u64)(val)), \ + (u32)((u64)(val) >> 32)) + struct cpu_signature { unsigned int sig; unsigned int pf; diff --git a/arch/x86/include/asm/microcode_amd.h b/arch/x86/include/asm/microcode_amd.h index 4c01917..b7b10b8 100644 --- a/arch/x86/include/asm/microcode_amd.h +++ b/arch/x86/include/asm/microcode_amd.h @@ -61,11 +61,10 @@ extern int __apply_microcode_amd(struct microcode_amd *mc_amd); extern int apply_microcode_amd(int cpu); extern enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t size); +#define PATCH_MAX_SIZE PAGE_SIZE +extern u8 amd_ucode_patch[PATCH_MAX_SIZE]; + #ifdef CONFIG_MICROCODE_AMD_EARLY -#ifdef CONFIG_X86_32 -#define MPB_MAX_SIZE PAGE_SIZE -extern u8 amd_bsp_mpb[MPB_MAX_SIZE]; -#endif extern void __init load_ucode_amd_bsp(void); extern void load_ucode_amd_ap(void); extern int __init save_microcode_in_initrd_amd(void); diff --git a/arch/x86/include/asm/mpspec.h b/arch/x86/include/asm/mpspec.h index 3142a94..3e6b492 100644 --- a/arch/x86/include/asm/mpspec.h +++ b/arch/x86/include/asm/mpspec.h @@ -1,7 +1,6 @@ #ifndef _ASM_X86_MPSPEC_H #define _ASM_X86_MPSPEC_H -#include <linux/init.h> #include <asm/mpspec_def.h> #include <asm/x86_init.h> diff --git a/arch/x86/include/asm/mwait.h b/arch/x86/include/asm/mwait.h index 2f366d0..1da25a5 100644 --- a/arch/x86/include/asm/mwait.h +++ b/arch/x86/include/asm/mwait.h @@ -1,6 +1,8 @@ #ifndef _ASM_X86_MWAIT_H #define _ASM_X86_MWAIT_H +#include <linux/sched.h> + #define MWAIT_SUBSTATE_MASK 0xf #define MWAIT_CSTATE_MASK 0xf #define MWAIT_SUBSTATE_SIZE 4 @@ -13,4 +15,45 @@ #define MWAIT_ECX_INTERRUPT_BREAK 0x1 +static inline void __monitor(const void *eax, unsigned long ecx, + unsigned long edx) +{ + /* "monitor %eax, %ecx, %edx;" */ + asm volatile(".byte 0x0f, 0x01, 0xc8;" + :: "a" (eax), "c" (ecx), "d"(edx)); +} + +static inline void __mwait(unsigned long eax, unsigned long ecx) +{ + /* "mwait %eax, %ecx;" */ + asm volatile(".byte 0x0f, 0x01, 0xc9;" + :: "a" (eax), "c" (ecx)); +} + +/* + * This uses new MONITOR/MWAIT instructions on P4 processors with PNI, + * which can obviate IPI to trigger checking of need_resched. + * We execute MONITOR against need_resched and enter optimized wait state + * through MWAIT. Whenever someone changes need_resched, we would be woken + * up from MWAIT (without an IPI). + * + * New with Core Duo processors, MWAIT can take some hints based on CPU + * capability. + */ +static inline void mwait_idle_with_hints(unsigned long eax, unsigned long ecx) +{ + if (!current_set_polling_and_test()) { + if (static_cpu_has(X86_FEATURE_CLFLUSH_MONITOR)) { + mb(); + clflush((void *)¤t_thread_info()->flags); + mb(); + } + + __monitor((void *)¤t_thread_info()->flags, 0, 0); + if (!need_resched()) + __mwait(eax, ecx); + } + current_clr_polling(); +} + #endif /* _ASM_X86_MWAIT_H */ diff --git a/arch/x86/include/asm/page.h b/arch/x86/include/asm/page.h index c878924..775873d 100644 --- a/arch/x86/include/asm/page.h +++ b/arch/x86/include/asm/page.h @@ -71,6 +71,7 @@ extern bool __virt_addr_valid(unsigned long kaddr); #include <asm-generic/getorder.h> #define __HAVE_ARCH_GATE_AREA 1 +#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA #endif /* __KERNEL__ */ #endif /* _ASM_X86_PAGE_H */ diff --git a/arch/x86/include/asm/page_32.h b/arch/x86/include/asm/page_32.h index 4d550d0..904f528 100644 --- a/arch/x86/include/asm/page_32.h +++ b/arch/x86/include/asm/page_32.h @@ -5,10 +5,6 @@ #ifndef __ASSEMBLY__ -#ifdef CONFIG_HUGETLB_PAGE -#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA -#endif - #define __phys_addr_nodebug(x) ((x) - PAGE_OFFSET) #ifdef CONFIG_DEBUG_VIRTUAL extern unsigned long __phys_addr(unsigned long); diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h index 43dcd80..8de6d9c 100644 --- a/arch/x86/include/asm/page_64_types.h +++ b/arch/x86/include/asm/page_64_types.h @@ -39,9 +39,18 @@ #define __VIRTUAL_MASK_SHIFT 47 /* - * Kernel image size is limited to 512 MB (see level2_kernel_pgt in - * arch/x86/kernel/head_64.S), and it is mapped here: + * Kernel image size is limited to 1GiB due to the fixmap living in the + * next 1GiB (see level2_kernel_pgt in arch/x86/kernel/head_64.S). Use + * 512MiB by default, leaving 1.5GiB for modules once the page tables + * are fully set up. If kernel ASLR is configured, it can extend the + * kernel page table mapping, reducing the size of the modules area. */ -#define KERNEL_IMAGE_SIZE (512 * 1024 * 1024) +#define KERNEL_IMAGE_SIZE_DEFAULT (512 * 1024 * 1024) +#if defined(CONFIG_RANDOMIZE_BASE) && \ + CONFIG_RANDOMIZE_BASE_MAX_OFFSET > KERNEL_IMAGE_SIZE_DEFAULT +#define KERNEL_IMAGE_SIZE CONFIG_RANDOMIZE_BASE_MAX_OFFSET +#else +#define KERNEL_IMAGE_SIZE KERNEL_IMAGE_SIZE_DEFAULT +#endif #endif /* _ASM_X86_PAGE_64_DEFS_H */ diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h index 3bf2dd0..0d193e2 100644 --- a/arch/x86/include/asm/pgtable-2level.h +++ b/arch/x86/include/asm/pgtable-2level.h @@ -55,6 +55,13 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp) #define native_pmdp_get_and_clear(xp) native_local_pmdp_get_and_clear(xp) #endif +/* Bit manipulation helper on pte/pgoff entry */ +static inline unsigned long pte_bitop(unsigned long value, unsigned int rightshift, + unsigned long mask, unsigned int leftshift) +{ + return ((value >> rightshift) & mask) << leftshift; +} + #ifdef CONFIG_MEM_SOFT_DIRTY /* @@ -71,31 +78,34 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp) #define PTE_FILE_BITS2 (PTE_FILE_SHIFT3 - PTE_FILE_SHIFT2 - 1) #define PTE_FILE_BITS3 (PTE_FILE_SHIFT4 - PTE_FILE_SHIFT3 - 1) -#define pte_to_pgoff(pte) \ - ((((pte).pte_low >> (PTE_FILE_SHIFT1)) \ - & ((1U << PTE_FILE_BITS1) - 1))) \ - + ((((pte).pte_low >> (PTE_FILE_SHIFT2)) \ - & ((1U << PTE_FILE_BITS2) - 1)) \ - << (PTE_FILE_BITS1)) \ - + ((((pte).pte_low >> (PTE_FILE_SHIFT3)) \ - & ((1U << PTE_FILE_BITS3) - 1)) \ - << (PTE_FILE_BITS1 + PTE_FILE_BITS2)) \ - + ((((pte).pte_low >> (PTE_FILE_SHIFT4))) \ - << (PTE_FILE_BITS1 + PTE_FILE_BITS2 + PTE_FILE_BITS3)) - -#define pgoff_to_pte(off) \ - ((pte_t) { .pte_low = \ - ((((off)) & ((1U << PTE_FILE_BITS1) - 1)) << PTE_FILE_SHIFT1) \ - + ((((off) >> PTE_FILE_BITS1) \ - & ((1U << PTE_FILE_BITS2) - 1)) \ - << PTE_FILE_SHIFT2) \ - + ((((off) >> (PTE_FILE_BITS1 + PTE_FILE_BITS2)) \ - & ((1U << PTE_FILE_BITS3) - 1)) \ - << PTE_FILE_SHIFT3) \ - + ((((off) >> \ - (PTE_FILE_BITS1 + PTE_FILE_BITS2 + PTE_FILE_BITS3))) \ - << PTE_FILE_SHIFT4) \ - + _PAGE_FILE }) +#define PTE_FILE_MASK1 ((1U << PTE_FILE_BITS1) - 1) +#define PTE_FILE_MASK2 ((1U << PTE_FILE_BITS2) - 1) +#define PTE_FILE_MASK3 ((1U << PTE_FILE_BITS3) - 1) + +#define PTE_FILE_LSHIFT2 (PTE_FILE_BITS1) +#define PTE_FILE_LSHIFT3 (PTE_FILE_BITS1 + PTE_FILE_BITS2) +#define PTE_FILE_LSHIFT4 (PTE_FILE_BITS1 + PTE_FILE_BITS2 + PTE_FILE_BITS3) + +static __always_inline pgoff_t pte_to_pgoff(pte_t pte) +{ + return (pgoff_t) + (pte_bitop(pte.pte_low, PTE_FILE_SHIFT1, PTE_FILE_MASK1, 0) + + pte_bitop(pte.pte_low, PTE_FILE_SHIFT2, PTE_FILE_MASK2, PTE_FILE_LSHIFT2) + + pte_bitop(pte.pte_low, PTE_FILE_SHIFT3, PTE_FILE_MASK3, PTE_FILE_LSHIFT3) + + pte_bitop(pte.pte_low, PTE_FILE_SHIFT4, -1UL, PTE_FILE_LSHIFT4)); +} + +static __always_inline pte_t pgoff_to_pte(pgoff_t off) +{ + return (pte_t){ + .pte_low = + pte_bitop(off, 0, PTE_FILE_MASK1, PTE_FILE_SHIFT1) + + pte_bitop(off, PTE_FILE_LSHIFT2, PTE_FILE_MASK2, PTE_FILE_SHIFT2) + + pte_bitop(off, PTE_FILE_LSHIFT3, PTE_FILE_MASK3, PTE_FILE_SHIFT3) + + pte_bitop(off, PTE_FILE_LSHIFT4, -1UL, PTE_FILE_SHIFT4) + + _PAGE_FILE, + }; +} #else /* CONFIG_MEM_SOFT_DIRTY */ @@ -115,22 +125,30 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp) #define PTE_FILE_BITS1 (PTE_FILE_SHIFT2 - PTE_FILE_SHIFT1 - 1) #define PTE_FILE_BITS2 (PTE_FILE_SHIFT3 - PTE_FILE_SHIFT2 - 1) -#define pte_to_pgoff(pte) \ - ((((pte).pte_low >> PTE_FILE_SHIFT1) \ - & ((1U << PTE_FILE_BITS1) - 1)) \ - + ((((pte).pte_low >> PTE_FILE_SHIFT2) \ - & ((1U << PTE_FILE_BITS2) - 1)) << PTE_FILE_BITS1) \ - + (((pte).pte_low >> PTE_FILE_SHIFT3) \ - << (PTE_FILE_BITS1 + PTE_FILE_BITS2))) - -#define pgoff_to_pte(off) \ - ((pte_t) { .pte_low = \ - (((off) & ((1U << PTE_FILE_BITS1) - 1)) << PTE_FILE_SHIFT1) \ - + ((((off) >> PTE_FILE_BITS1) & ((1U << PTE_FILE_BITS2) - 1)) \ - << PTE_FILE_SHIFT2) \ - + (((off) >> (PTE_FILE_BITS1 + PTE_FILE_BITS2)) \ - << PTE_FILE_SHIFT3) \ - + _PAGE_FILE }) +#define PTE_FILE_MASK1 ((1U << PTE_FILE_BITS1) - 1) +#define PTE_FILE_MASK2 ((1U << PTE_FILE_BITS2) - 1) + +#define PTE_FILE_LSHIFT2 (PTE_FILE_BITS1) +#define PTE_FILE_LSHIFT3 (PTE_FILE_BITS1 + PTE_FILE_BITS2) + +static __always_inline pgoff_t pte_to_pgoff(pte_t pte) +{ + return (pgoff_t) + (pte_bitop(pte.pte_low, PTE_FILE_SHIFT1, PTE_FILE_MASK1, 0) + + pte_bitop(pte.pte_low, PTE_FILE_SHIFT2, PTE_FILE_MASK2, PTE_FILE_LSHIFT2) + + pte_bitop(pte.pte_low, PTE_FILE_SHIFT3, -1UL, PTE_FILE_LSHIFT3)); +} + +static __always_inline pte_t pgoff_to_pte(pgoff_t off) +{ + return (pte_t){ + .pte_low = + pte_bitop(off, 0, PTE_FILE_MASK1, PTE_FILE_SHIFT1) + + pte_bitop(off, PTE_FILE_LSHIFT2, PTE_FILE_MASK2, PTE_FILE_SHIFT2) + + pte_bitop(off, PTE_FILE_LSHIFT3, -1UL, PTE_FILE_SHIFT3) + + _PAGE_FILE, + }; +} #endif /* CONFIG_MEM_SOFT_DIRTY */ diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index 3d19994..bbc8b12 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h @@ -452,9 +452,16 @@ static inline int pte_present(pte_t a) } #define pte_accessible pte_accessible -static inline int pte_accessible(pte_t a) +static inline bool pte_accessible(struct mm_struct *mm, pte_t a) { - return pte_flags(a) & _PAGE_PRESENT; + if (pte_flags(a) & _PAGE_PRESENT) + return true; + + if ((pte_flags(a) & (_PAGE_PROTNONE | _PAGE_NUMA)) && + mm_tlb_flush_pending(mm)) + return true; + + return false; } static inline int pte_hidden(pte_t pte) diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h index 2d88344..c883bf7 100644 --- a/arch/x86/include/asm/pgtable_64_types.h +++ b/arch/x86/include/asm/pgtable_64_types.h @@ -58,7 +58,7 @@ typedef struct { pteval_t pte; } pte_t; #define VMALLOC_START _AC(0xffffc90000000000, UL) #define VMALLOC_END _AC(0xffffe8ffffffffff, UL) #define VMEMMAP_START _AC(0xffffea0000000000, UL) -#define MODULES_VADDR _AC(0xffffffffa0000000, UL) +#define MODULES_VADDR (__START_KERNEL_map + KERNEL_IMAGE_SIZE) #define MODULES_END _AC(0xffffffffff000000, UL) #define MODULES_LEN (MODULES_END - MODULES_VADDR) diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h index 0ecac25..a83aa44 100644 --- a/arch/x86/include/asm/pgtable_types.h +++ b/arch/x86/include/asm/pgtable_types.h @@ -382,7 +382,8 @@ static inline void update_page_count(int level, unsigned long pages) { } */ extern pte_t *lookup_address(unsigned long address, unsigned int *level); extern phys_addr_t slow_virt_to_phys(void *__address); - +extern int kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn, unsigned long address, + unsigned numpages, unsigned long page_flags); #endif /* !__ASSEMBLY__ */ #endif /* _ASM_X86_PGTABLE_DEFS_H */ diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h index 8729723..c8b0519 100644 --- a/arch/x86/include/asm/preempt.h +++ b/arch/x86/include/asm/preempt.h @@ -8,6 +8,12 @@ DECLARE_PER_CPU(int, __preempt_count); /* + * We use the PREEMPT_NEED_RESCHED bit as an inverted NEED_RESCHED such + * that a decrement hitting 0 means we can and should reschedule. + */ +#define PREEMPT_ENABLED (0 + PREEMPT_NEED_RESCHED) + +/* * We mask the PREEMPT_NEED_RESCHED bit so as not to confuse all current users * that think a non-zero value indicates we cannot preempt. */ @@ -74,6 +80,11 @@ static __always_inline void __preempt_count_sub(int val) __this_cpu_add_4(__preempt_count, -val); } +/* + * Because we keep PREEMPT_NEED_RESCHED set when we do _not_ need to reschedule + * a decrement which hits zero means we have no preempt_count and should + * reschedule. + */ static __always_inline bool __preempt_count_dec_and_test(void) { GEN_UNARY_RMWcc("decl", __preempt_count, __percpu_arg(0), "e"); diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index 7b034a4..fdedd38 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -27,7 +27,6 @@ struct mm_struct; #include <linux/cache.h> #include <linux/threads.h> #include <linux/math64.h> -#include <linux/init.h> #include <linux/err.h> #include <linux/irqflags.h> @@ -72,6 +71,7 @@ extern u16 __read_mostly tlb_lli_4m[NR_INFO]; extern u16 __read_mostly tlb_lld_4k[NR_INFO]; extern u16 __read_mostly tlb_lld_2m[NR_INFO]; extern u16 __read_mostly tlb_lld_4m[NR_INFO]; +extern u16 __read_mostly tlb_lld_1g[NR_INFO]; extern s8 __read_mostly tlb_flushall_shift; /* @@ -370,6 +370,20 @@ struct ymmh_struct { u32 ymmh_space[64]; }; +/* We don't support LWP yet: */ +struct lwp_struct { + u8 reserved[128]; +}; + +struct bndregs_struct { + u64 bndregs[8]; +} __packed; + +struct bndcsr_struct { + u64 cfg_reg_u; + u64 status_reg; +} __packed; + struct xsave_hdr_struct { u64 xstate_bv; u64 reserved1[2]; @@ -380,6 +394,9 @@ struct xsave_struct { struct i387_fxsave_struct i387; struct xsave_hdr_struct xsave_hdr; struct ymmh_struct ymmh; + struct lwp_struct lwp; + struct bndregs_struct bndregs; + struct bndcsr_struct bndcsr; /* new processor state extensions will go here */ } __attribute__ ((packed, aligned (64))); @@ -700,29 +717,6 @@ static inline void sync_core(void) #endif } -static inline void __monitor(const void *eax, unsigned long ecx, - unsigned long edx) -{ - /* "monitor %eax, %ecx, %edx;" */ - asm volatile(".byte 0x0f, 0x01, 0xc8;" - :: "a" (eax), "c" (ecx), "d"(edx)); -} - -static inline void __mwait(unsigned long eax, unsigned long ecx) -{ - /* "mwait %eax, %ecx;" */ - asm volatile(".byte 0x0f, 0x01, 0xc9;" - :: "a" (eax), "c" (ecx)); -} - -static inline void __sti_mwait(unsigned long eax, unsigned long ecx) -{ - trace_hardirqs_on(); - /* "mwait %eax, %ecx;" */ - asm volatile("sti; .byte 0x0f, 0x01, 0xc9;" - :: "a" (eax), "c" (ecx)); -} - extern void select_idle_routine(const struct cpuinfo_x86 *c); extern void init_amd_e400_c1e_mask(void); diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h index 942a086..14fd6fd 100644 --- a/arch/x86/include/asm/ptrace.h +++ b/arch/x86/include/asm/ptrace.h @@ -60,7 +60,6 @@ struct pt_regs { #endif /* !__i386__ */ -#include <linux/init.h> #ifdef CONFIG_PARAVIRT #include <asm/paravirt_types.h> #endif diff --git a/arch/x86/include/asm/rmwcc.h b/arch/x86/include/asm/rmwcc.h index 1ff990f..8f7866a 100644 --- a/arch/x86/include/asm/rmwcc.h +++ b/arch/x86/include/asm/rmwcc.h @@ -16,8 +16,8 @@ cc_label: \ #define GEN_UNARY_RMWcc(op, var, arg0, cc) \ __GEN_RMWcc(op " " arg0, var, cc) -#define GEN_BINARY_RMWcc(op, var, val, arg0, cc) \ - __GEN_RMWcc(op " %1, " arg0, var, cc, "er" (val)) +#define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc) \ + __GEN_RMWcc(op " %1, " arg0, var, cc, vcon (val)) #else /* !CC_HAVE_ASM_GOTO */ @@ -33,8 +33,8 @@ do { \ #define GEN_UNARY_RMWcc(op, var, arg0, cc) \ __GEN_RMWcc(op " " arg0, var, cc) -#define GEN_BINARY_RMWcc(op, var, val, arg0, cc) \ - __GEN_RMWcc(op " %2, " arg0, var, cc, "er" (val)) +#define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc) \ + __GEN_RMWcc(op " %2, " arg0, var, cc, vcon (val)) #endif /* CC_HAVE_ASM_GOTO */ diff --git a/arch/x86/include/asm/setup.h b/arch/x86/include/asm/setup.h index 59bcf4e..d62c9f8 100644 --- a/arch/x86/include/asm/setup.h +++ b/arch/x86/include/asm/setup.h @@ -3,7 +3,6 @@ #include <uapi/asm/setup.h> - #define COMMAND_LINE_SIZE 2048 #include <linux/linkage.h> @@ -29,6 +28,8 @@ #include <asm/bootparam.h> #include <asm/x86_init.h> +extern u64 relocated_ramdisk; + /* Interrupt control for vSMPowered x86_64 systems */ #ifdef CONFIG_X86_64 void vsmp_init(void); diff --git a/arch/x86/include/asm/simd.h b/arch/x86/include/asm/simd.h new file mode 100644 index 0000000..ee80b92 --- /dev/null +++ b/arch/x86/include/asm/simd.h @@ -0,0 +1,11 @@ + +#include <asm/i387.h> + +/* + * may_use_simd - whether it is allowable at this time to issue SIMD + * instructions or access the SIMD register file + */ +static __must_check inline bool may_use_simd(void) +{ + return irq_fpu_usable(); +} diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h index 4137890..8cd27e0 100644 --- a/arch/x86/include/asm/smp.h +++ b/arch/x86/include/asm/smp.h @@ -2,7 +2,6 @@ #define _ASM_X86_SMP_H #ifndef __ASSEMBLY__ #include <linux/cpumask.h> -#include <linux/init.h> #include <asm/percpu.h> /* diff --git a/arch/x86/include/asm/timer.h b/arch/x86/include/asm/timer.h index 34baa0e..a04eabd 100644 --- a/arch/x86/include/asm/timer.h +++ b/arch/x86/include/asm/timer.h @@ -1,9 +1,9 @@ #ifndef _ASM_X86_TIMER_H #define _ASM_X86_TIMER_H -#include <linux/init.h> #include <linux/pm.h> #include <linux/percpu.h> #include <linux/interrupt.h> +#include <linux/math64.h> #define TICK_SIZE (tick_nsec / 1000) @@ -12,68 +12,26 @@ extern int recalibrate_cpu_khz(void); extern int no_timer_check; -/* Accelerators for sched_clock() - * convert from cycles(64bits) => nanoseconds (64bits) - * basic equation: - * ns = cycles / (freq / ns_per_sec) - * ns = cycles * (ns_per_sec / freq) - * ns = cycles * (10^9 / (cpu_khz * 10^3)) - * ns = cycles * (10^6 / cpu_khz) +/* + * We use the full linear equation: f(x) = a + b*x, in order to allow + * a continuous function in the face of dynamic freq changes. * - * Then we use scaling math (suggested by george@mvista.com) to get: - * ns = cycles * (10^6 * SC / cpu_khz) / SC - * ns = cycles * cyc2ns_scale / SC + * Continuity means that when our frequency changes our slope (b); we want to + * ensure that: f(t) == f'(t), which gives: a + b*t == a' + b'*t. * - * And since SC is a constant power of two, we can convert the div - * into a shift. + * Without an offset (a) the above would not be possible. * - * We can use khz divisor instead of mhz to keep a better precision, since - * cyc2ns_scale is limited to 10^6 * 2^10, which fits in 32 bits. - * (mathieu.desnoyers@polymtl.ca) - * - * -johnstul@us.ibm.com "math is hard, lets go shopping!" - * - * In: - * - * ns = cycles * cyc2ns_scale / SC - * - * Although we may still have enough bits to store the value of ns, - * in some cases, we may not have enough bits to store cycles * cyc2ns_scale, - * leading to an incorrect result. - * - * To avoid this, we can decompose 'cycles' into quotient and remainder - * of division by SC. Then, - * - * ns = (quot * SC + rem) * cyc2ns_scale / SC - * = quot * cyc2ns_scale + (rem * cyc2ns_scale) / SC - * - * - sqazi@google.com + * See the comment near cycles_2_ns() for details on how we compute (b). */ - -DECLARE_PER_CPU(unsigned long, cyc2ns); -DECLARE_PER_CPU(unsigned long long, cyc2ns_offset); - -#define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */ - -static inline unsigned long long __cycles_2_ns(unsigned long long cyc) -{ - int cpu = smp_processor_id(); - unsigned long long ns = per_cpu(cyc2ns_offset, cpu); - ns += mult_frac(cyc, per_cpu(cyc2ns, cpu), - (1UL << CYC2NS_SCALE_FACTOR)); - return ns; -} - -static inline unsigned long long cycles_2_ns(unsigned long long cyc) -{ - unsigned long long ns; - unsigned long flags; - - local_irq_save(flags); - ns = __cycles_2_ns(cyc); - local_irq_restore(flags); - - return ns; -} +struct cyc2ns_data { + u32 cyc2ns_mul; + u32 cyc2ns_shift; + u64 cyc2ns_offset; + u32 __count; + /* u32 hole */ +}; /* 24 bytes -- do not grow */ + +extern struct cyc2ns_data *cyc2ns_read_begin(void); +extern void cyc2ns_read_end(struct cyc2ns_data *); #endif /* _ASM_X86_TIMER_H */ diff --git a/arch/x86/include/asm/trace/irq_vectors.h b/arch/x86/include/asm/trace/irq_vectors.h index 2874df2..4cab890 100644 --- a/arch/x86/include/asm/trace/irq_vectors.h +++ b/arch/x86/include/asm/trace/irq_vectors.h @@ -72,6 +72,17 @@ DEFINE_IRQ_VECTOR_EVENT(x86_platform_ipi); DEFINE_IRQ_VECTOR_EVENT(irq_work); /* + * We must dis-allow sampling irq_work_exit() because perf event sampling + * itself can cause irq_work, which would lead to an infinite loop; + * + * 1) irq_work_exit happens + * 2) generates perf sample + * 3) generates irq_work + * 4) goto 1 + */ +TRACE_EVENT_PERF_PERM(irq_work_exit, is_sampling_event(p_event) ? -EPERM : 0); + +/* * call_function - called when entering/exiting a call function interrupt * vector handler */ diff --git a/arch/x86/include/asm/tsc.h b/arch/x86/include/asm/tsc.h index 235be70..57ae63c 100644 --- a/arch/x86/include/asm/tsc.h +++ b/arch/x86/include/asm/tsc.h @@ -65,4 +65,7 @@ extern int notsc_setup(char *); extern void tsc_save_sched_clock_state(void); extern void tsc_restore_sched_clock_state(void); +/* MSR based TSC calibration for Intel Atom SoC platforms */ +int try_msr_calibrate_tsc(unsigned long *fast_calibrate); + #endif /* _ASM_X86_TSC_H */ diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h index 8ec57c0..0d592e0 100644 --- a/arch/x86/include/asm/uaccess.h +++ b/arch/x86/include/asm/uaccess.h @@ -40,22 +40,30 @@ /* * Test whether a block of memory is a valid user space address. * Returns 0 if the range is valid, nonzero otherwise. - * - * This is equivalent to the following test: - * (u33)addr + (u33)size > (u33)current->addr_limit.seg (u65 for x86_64) - * - * This needs 33-bit (65-bit for x86_64) arithmetic. We have a carry... */ +static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, unsigned long limit) +{ + /* + * If we have used "sizeof()" for the size, + * we know it won't overflow the limit (but + * it might overflow the 'addr', so it's + * important to subtract the size from the + * limit, not add it to the address). + */ + if (__builtin_constant_p(size)) + return addr > limit - size; + + /* Arbitrary sizes? Be careful about overflow */ + addr += size; + if (addr < size) + return true; + return addr > limit; +} #define __range_not_ok(addr, size, limit) \ ({ \ - unsigned long flag, roksum; \ __chk_user_ptr(addr); \ - asm("add %3,%1 ; sbb %0,%0 ; cmp %1,%4 ; sbb $0,%0" \ - : "=&r" (flag), "=r" (roksum) \ - : "1" (addr), "g" ((long)(size)), \ - "rm" (limit)); \ - flag; \ + __chk_range_not_ok((unsigned long __force)(addr), size, limit); \ }) /** @@ -78,7 +86,7 @@ * this function, memory access functions may still return -EFAULT. */ #define access_ok(type, addr, size) \ - (likely(__range_not_ok(addr, size, user_addr_max()) == 0)) + likely(!__range_not_ok(addr, size, user_addr_max())) /* * The exception table consists of pairs of addresses relative to the @@ -525,6 +533,98 @@ extern __must_check long strnlen_user(const char __user *str, long n); unsigned long __must_check clear_user(void __user *mem, unsigned long len); unsigned long __must_check __clear_user(void __user *mem, unsigned long len); +extern void __cmpxchg_wrong_size(void) + __compiletime_error("Bad argument size for cmpxchg"); + +#define __user_atomic_cmpxchg_inatomic(uval, ptr, old, new, size) \ +({ \ + int __ret = 0; \ + __typeof__(ptr) __uval = (uval); \ + __typeof__(*(ptr)) __old = (old); \ + __typeof__(*(ptr)) __new = (new); \ + switch (size) { \ + case 1: \ + { \ + asm volatile("\t" ASM_STAC "\n" \ + "1:\t" LOCK_PREFIX "cmpxchgb %4, %2\n" \ + "2:\t" ASM_CLAC "\n" \ + "\t.section .fixup, \"ax\"\n" \ + "3:\tmov %3, %0\n" \ + "\tjmp 2b\n" \ + "\t.previous\n" \ + _ASM_EXTABLE(1b, 3b) \ + : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \ + : "i" (-EFAULT), "q" (__new), "1" (__old) \ + : "memory" \ + ); \ + break; \ + } \ + case 2: \ + { \ + asm volatile("\t" ASM_STAC "\n" \ + "1:\t" LOCK_PREFIX "cmpxchgw %4, %2\n" \ + "2:\t" ASM_CLAC "\n" \ + "\t.section .fixup, \"ax\"\n" \ + "3:\tmov %3, %0\n" \ + "\tjmp 2b\n" \ + "\t.previous\n" \ + _ASM_EXTABLE(1b, 3b) \ + : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \ + : "i" (-EFAULT), "r" (__new), "1" (__old) \ + : "memory" \ + ); \ + break; \ + } \ + case 4: \ + { \ + asm volatile("\t" ASM_STAC "\n" \ + "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n" \ + "2:\t" ASM_CLAC "\n" \ + "\t.section .fixup, \"ax\"\n" \ + "3:\tmov %3, %0\n" \ + "\tjmp 2b\n" \ + "\t.previous\n" \ + _ASM_EXTABLE(1b, 3b) \ + : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \ + : "i" (-EFAULT), "r" (__new), "1" (__old) \ + : "memory" \ + ); \ + break; \ + } \ + case 8: \ + { \ + if (!IS_ENABLED(CONFIG_X86_64)) \ + __cmpxchg_wrong_size(); \ + \ + asm volatile("\t" ASM_STAC "\n" \ + "1:\t" LOCK_PREFIX "cmpxchgq %4, %2\n" \ + "2:\t" ASM_CLAC "\n" \ + "\t.section .fixup, \"ax\"\n" \ + "3:\tmov %3, %0\n" \ + "\tjmp 2b\n" \ + "\t.previous\n" \ + _ASM_EXTABLE(1b, 3b) \ + : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \ + : "i" (-EFAULT), "r" (__new), "1" (__old) \ + : "memory" \ + ); \ + break; \ + } \ + default: \ + __cmpxchg_wrong_size(); \ + } \ + *__uval = __old; \ + __ret; \ +}) + +#define user_atomic_cmpxchg_inatomic(uval, ptr, old, new) \ +({ \ + access_ok(VERIFY_WRITE, (ptr), sizeof(*(ptr))) ? \ + __user_atomic_cmpxchg_inatomic((uval), (ptr), \ + (old), (new), sizeof(*(ptr))) : \ + -EFAULT; \ +}) + /* * movsl can be slow when source and dest are not both 8-byte aligned */ diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h index 190413d..12a26b9 100644 --- a/arch/x86/include/asm/uaccess_64.h +++ b/arch/x86/include/asm/uaccess_64.h @@ -204,13 +204,13 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size) static __must_check __always_inline int __copy_from_user_inatomic(void *dst, const void __user *src, unsigned size) { - return __copy_from_user_nocheck(dst, (__force const void *)src, size); + return __copy_from_user_nocheck(dst, src, size); } static __must_check __always_inline int __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size) { - return __copy_to_user_nocheck((__force void *)dst, src, size); + return __copy_to_user_nocheck(dst, src, size); } extern long __copy_user_nocache(void *dst, const void __user *src, diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h index 0415cda..5547389 100644 --- a/arch/x86/include/asm/xsave.h +++ b/arch/x86/include/asm/xsave.h @@ -9,6 +9,8 @@ #define XSTATE_FP 0x1 #define XSTATE_SSE 0x2 #define XSTATE_YMM 0x4 +#define XSTATE_BNDREGS 0x8 +#define XSTATE_BNDCSR 0x10 #define XSTATE_FPSSE (XSTATE_FP | XSTATE_SSE) @@ -20,10 +22,14 @@ #define XSAVE_YMM_SIZE 256 #define XSAVE_YMM_OFFSET (XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET) -/* - * These are the features that the OS can handle currently. - */ -#define XCNTXT_MASK (XSTATE_FP | XSTATE_SSE | XSTATE_YMM) +/* Supported features which support lazy state saving */ +#define XSTATE_LAZY (XSTATE_FP | XSTATE_SSE | XSTATE_YMM) + +/* Supported features which require eager state saving */ +#define XSTATE_EAGER (XSTATE_BNDREGS | XSTATE_BNDCSR) + +/* All currently supported features */ +#define XCNTXT_MASK (XSTATE_LAZY | XSTATE_EAGER) #ifdef CONFIG_X86_64 #define REX_PREFIX "0x48, " diff --git a/arch/x86/include/uapi/asm/bootparam.h b/arch/x86/include/uapi/asm/bootparam.h index 9c3733c..225b098 100644 --- a/arch/x86/include/uapi/asm/bootparam.h +++ b/arch/x86/include/uapi/asm/bootparam.h @@ -6,6 +6,7 @@ #define SETUP_E820_EXT 1 #define SETUP_DTB 2 #define SETUP_PCI 3 +#define SETUP_EFI 4 /* ram_size flags */ #define RAMDISK_IMAGE_START_MASK 0x07FF @@ -23,6 +24,7 @@ #define XLF_CAN_BE_LOADED_ABOVE_4G (1<<1) #define XLF_EFI_HANDOVER_32 (1<<2) #define XLF_EFI_HANDOVER_64 (1<<3) +#define XLF_EFI_KEXEC (1<<4) #ifndef __ASSEMBLY__ diff --git a/arch/x86/include/uapi/asm/msr-index.h b/arch/x86/include/uapi/asm/msr-index.h index 37813b5..59cea18 100644 --- a/arch/x86/include/uapi/asm/msr-index.h +++ b/arch/x86/include/uapi/asm/msr-index.h @@ -184,6 +184,7 @@ #define MSR_AMD64_PATCH_LOADER 0xc0010020 #define MSR_AMD64_OSVW_ID_LENGTH 0xc0010140 #define MSR_AMD64_OSVW_STATUS 0xc0010141 +#define MSR_AMD64_LS_CFG 0xc0011020 #define MSR_AMD64_DC_CFG 0xc0011022 #define MSR_AMD64_BU_CFG2 0xc001102a #define MSR_AMD64_IBSFETCHCTL 0xc0011030 diff --git a/arch/x86/include/uapi/asm/stat.h b/arch/x86/include/uapi/asm/stat.h index 7b3ddc3..bc03eb5 100644 --- a/arch/x86/include/uapi/asm/stat.h +++ b/arch/x86/include/uapi/asm/stat.h @@ -1,6 +1,8 @@ #ifndef _ASM_X86_STAT_H #define _ASM_X86_STAT_H +#include <asm/posix_types.h> + #define STAT_HAVE_NSEC 1 #ifdef __i386__ @@ -78,26 +80,26 @@ struct stat64 { #else /* __i386__ */ struct stat { - unsigned long st_dev; - unsigned long st_ino; - unsigned long st_nlink; - - unsigned int st_mode; - unsigned int st_uid; - unsigned int st_gid; - unsigned int __pad0; - unsigned long st_rdev; - long st_size; - long st_blksize; - long st_blocks; /* Number 512-byte blocks allocated. */ - - unsigned long st_atime; - unsigned long st_atime_nsec; - unsigned long st_mtime; - unsigned long st_mtime_nsec; - unsigned long st_ctime; - unsigned long st_ctime_nsec; - long __unused[3]; + __kernel_ulong_t st_dev; + __kernel_ulong_t st_ino; + __kernel_ulong_t st_nlink; + + unsigned int st_mode; + unsigned int st_uid; + unsigned int st_gid; + unsigned int __pad0; + __kernel_ulong_t st_rdev; + __kernel_long_t st_size; + __kernel_long_t st_blksize; + __kernel_long_t st_blocks; /* Number 512-byte blocks allocated. */ + + __kernel_ulong_t st_atime; + __kernel_ulong_t st_atime_nsec; + __kernel_ulong_t st_mtime; + __kernel_ulong_t st_mtime_nsec; + __kernel_ulong_t st_ctime; + __kernel_ulong_t st_ctime_nsec; + __kernel_long_t __unused[3]; }; /* We don't need to memset the whole thing just to initialize the padding */ diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index 9b0a34e..cb648c8 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile @@ -29,10 +29,11 @@ obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o obj-y += syscall_$(BITS).o obj-$(CONFIG_X86_64) += vsyscall_64.o obj-$(CONFIG_X86_64) += vsyscall_emu_64.o +obj-$(CONFIG_SYSFS) += ksysfs.o obj-y += bootflag.o e820.o obj-y += pci-dma.o quirks.o topology.o kdebugfs.o obj-y += alternative.o i8253.o pci-nommu.o hw_breakpoint.o -obj-y += tsc.o io_delay.o rtc.o +obj-y += tsc.o tsc_msr.o io_delay.o rtc.o obj-y += pci-iommu_table.o obj-y += resource.o @@ -91,15 +92,6 @@ obj-$(CONFIG_PARAVIRT_CLOCK) += pvclock.o obj-$(CONFIG_PCSPKR_PLATFORM) += pcspeaker.o -obj-$(CONFIG_MICROCODE_EARLY) += microcode_core_early.o -obj-$(CONFIG_MICROCODE_INTEL_EARLY) += microcode_intel_early.o -obj-$(CONFIG_MICROCODE_INTEL_LIB) += microcode_intel_lib.o -microcode-y := microcode_core.o -microcode-$(CONFIG_MICROCODE_INTEL) += microcode_intel.o -microcode-$(CONFIG_MICROCODE_AMD) += microcode_amd.o -obj-$(CONFIG_MICROCODE_AMD_EARLY) += microcode_amd_early.o -obj-$(CONFIG_MICROCODE) += microcode.o - obj-$(CONFIG_X86_CHECK_BIOS_CORRUPTION) += check.o obj-$(CONFIG_SWIOTLB) += pci-swiotlb.o @@ -111,6 +103,7 @@ obj-$(CONFIG_EFI) += sysfb_efi.o obj-$(CONFIG_PERF_EVENTS) += perf_regs.o obj-$(CONFIG_TRACING) += tracepoint.o +obj-$(CONFIG_IOSF_MBI) += iosf_mbi.o ### # 64 bit specific files diff --git a/arch/x86/kernel/acpi/cstate.c b/arch/x86/kernel/acpi/cstate.c index d2b7f27..e69182f 100644 --- a/arch/x86/kernel/acpi/cstate.c +++ b/arch/x86/kernel/acpi/cstate.c @@ -150,29 +150,6 @@ int acpi_processor_ffh_cstate_probe(unsigned int cpu, } EXPORT_SYMBOL_GPL(acpi_processor_ffh_cstate_probe); -/* - * This uses new MONITOR/MWAIT instructions on P4 processors with PNI, - * which can obviate IPI to trigger checking of need_resched. - * We execute MONITOR against need_resched and enter optimized wait state - * through MWAIT. Whenever someone changes need_resched, we would be woken - * up from MWAIT (without an IPI). - * - * New with Core Duo processors, MWAIT can take some hints based on CPU - * capability. - */ -void mwait_idle_with_hints(unsigned long ax, unsigned long cx) -{ - if (!need_resched()) { - if (this_cpu_has(X86_FEATURE_CLFLUSH_MONITOR)) - clflush((void *)¤t_thread_info()->flags); - - __monitor((void *)¤t_thread_info()->flags, 0, 0); - smp_mb(); - if (!need_resched()) - __mwait(ax, cx); - } -} - void acpi_processor_ffh_cstate_enter(struct acpi_processor_cx *cx) { unsigned int cpu = smp_processor_id(); diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index d278736..7f26c9a 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c @@ -75,6 +75,13 @@ unsigned int max_physical_apicid; physid_mask_t phys_cpu_present_map; /* + * Processor to be disabled specified by kernel parameter + * disable_cpu_apicid=<int>, mostly used for the kdump 2nd kernel to + * avoid undefined behaviour caused by sending INIT from AP to BSP. + */ +static unsigned int disabled_cpu_apicid __read_mostly = BAD_APICID; + +/* * Map cpu index to physical APIC ID */ DEFINE_EARLY_PER_CPU_READ_MOSTLY(u16, x86_cpu_to_apicid, BAD_APICID); @@ -1968,7 +1975,7 @@ __visible void smp_trace_spurious_interrupt(struct pt_regs *regs) */ static inline void __smp_error_interrupt(struct pt_regs *regs) { - u32 v0, v1; + u32 v; u32 i = 0; static const char * const error_interrupt_reason[] = { "Send CS error", /* APIC Error Bit 0 */ @@ -1982,21 +1989,20 @@ static inline void __smp_error_interrupt(struct pt_regs *regs) }; /* First tickle the hardware, only then report what went on. -- REW */ - v0 = apic_read(APIC_ESR); apic_write(APIC_ESR, 0); - v1 = apic_read(APIC_ESR); + v = apic_read(APIC_ESR); ack_APIC_irq(); atomic_inc(&irq_err_count); - apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x(%02x)", - smp_processor_id(), v0 , v1); + apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x", + smp_processor_id(), v); - v1 = v1 & 0xff; - while (v1) { - if (v1 & 0x1) + v &= 0xff; + while (v) { + if (v & 0x1) apic_printk(APIC_DEBUG, KERN_CONT " : %s", error_interrupt_reason[i]); i++; - v1 >>= 1; + v >>= 1; } apic_printk(APIC_DEBUG, KERN_CONT "\n"); @@ -2115,6 +2121,39 @@ int generic_processor_info(int apicid, int version) phys_cpu_present_map); /* + * boot_cpu_physical_apicid is designed to have the apicid + * returned by read_apic_id(), i.e, the apicid of the + * currently booting-up processor. However, on some platforms, + * it is temporarily modified by the apicid reported as BSP + * through MP table. Concretely: + * + * - arch/x86/kernel/mpparse.c: MP_processor_info() + * - arch/x86/mm/amdtopology.c: amd_numa_init() + * - arch/x86/platform/visws/visws_quirks.c: MP_processor_info() + * + * This function is executed with the modified + * boot_cpu_physical_apicid. So, disabled_cpu_apicid kernel + * parameter doesn't work to disable APs on kdump 2nd kernel. + * + * Since fixing handling of boot_cpu_physical_apicid requires + * another discussion and tests on each platform, we leave it + * for now and here we use read_apic_id() directly in this + * function, generic_processor_info(). + */ + if (disabled_cpu_apicid != BAD_APICID && + disabled_cpu_apicid != read_apic_id() && + disabled_cpu_apicid == apicid) { + int thiscpu = num_processors + disabled_cpus; + + pr_warning("APIC: Disabling requested cpu." + " Processor %d/0x%x ignored.\n", + thiscpu, apicid); + + disabled_cpus++; + return -ENODEV; + } + + /* * If boot cpu has not been detected yet, then only allow upto * nr_cpu_ids - 1 processors and keep one slot free for boot cpu */ @@ -2592,3 +2631,12 @@ static int __init lapic_insert_resource(void) * that is using request_resource */ late_initcall(lapic_insert_resource); + +static int __init apic_set_disabled_cpu_apicid(char *arg) +{ + if (!arg || !get_option(&arg, &disabled_cpu_apicid)) + return -EINVAL; + + return 0; +} +early_param("disable_cpu_apicid", apic_set_disabled_cpu_apicid); diff --git a/arch/x86/kernel/apic/apic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c index 00c77cf..5d5b9eb 100644 --- a/arch/x86/kernel/apic/apic_flat_64.c +++ b/arch/x86/kernel/apic/apic_flat_64.c @@ -14,7 +14,6 @@ #include <linux/string.h> #include <linux/kernel.h> #include <linux/ctype.h> -#include <linux/init.h> #include <linux/hardirq.h> #include <linux/module.h> #include <asm/smp.h> diff --git a/arch/x86/kernel/apic/apic_noop.c b/arch/x86/kernel/apic/apic_noop.c index e145f28..191ce75 100644 --- a/arch/x86/kernel/apic/apic_noop.c +++ b/arch/x86/kernel/apic/apic_noop.c @@ -15,7 +15,6 @@ #include <linux/string.h> #include <linux/kernel.h> #include <linux/ctype.h> -#include <linux/init.h> #include <linux/errno.h> #include <asm/fixmap.h> #include <asm/mpspec.h> diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index e63a5bd..a43f068 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c @@ -1142,9 +1142,10 @@ next: if (test_bit(vector, used_vectors)) goto next; - for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask) - if (per_cpu(vector_irq, new_cpu)[vector] != -1) + for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask) { + if (per_cpu(vector_irq, new_cpu)[vector] > VECTOR_UNDEFINED) goto next; + } /* Found one! */ current_vector = vector; current_offset = offset; @@ -1183,7 +1184,7 @@ static void __clear_irq_vector(int irq, struct irq_cfg *cfg) vector = cfg->vector; for_each_cpu_and(cpu, cfg->domain, cpu_online_mask) - per_cpu(vector_irq, cpu)[vector] = -1; + per_cpu(vector_irq, cpu)[vector] = VECTOR_UNDEFINED; cfg->vector = 0; cpumask_clear(cfg->domain); @@ -1191,11 +1192,10 @@ static void __clear_irq_vector(int irq, struct irq_cfg *cfg) if (likely(!cfg->move_in_progress)) return; for_each_cpu_and(cpu, cfg->old_domain, cpu_online_mask) { - for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; - vector++) { + for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) { if (per_cpu(vector_irq, cpu)[vector] != irq) continue; - per_cpu(vector_irq, cpu)[vector] = -1; + per_cpu(vector_irq, cpu)[vector] = VECTOR_UNDEFINED; break; } } @@ -1228,12 +1228,12 @@ void __setup_vector_irq(int cpu) /* Mark the free vectors */ for (vector = 0; vector < NR_VECTORS; ++vector) { irq = per_cpu(vector_irq, cpu)[vector]; - if (irq < 0) + if (irq <= VECTOR_UNDEFINED) continue; cfg = irq_cfg(irq); if (!cpumask_test_cpu(cpu, cfg->domain)) - per_cpu(vector_irq, cpu)[vector] = -1; + per_cpu(vector_irq, cpu)[vector] = VECTOR_UNDEFINED; } raw_spin_unlock(&vector_lock); } @@ -2202,13 +2202,13 @@ asmlinkage void smp_irq_move_cleanup_interrupt(void) me = smp_processor_id(); for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) { - unsigned int irq; + int irq; unsigned int irr; struct irq_desc *desc; struct irq_cfg *cfg; irq = __this_cpu_read(vector_irq[vector]); - if (irq == -1) + if (irq <= VECTOR_UNDEFINED) continue; desc = irq_to_desc(irq); diff --git a/arch/x86/kernel/apic/ipi.c b/arch/x86/kernel/apic/ipi.c index 7434d85..6207156 100644 --- a/arch/x86/kernel/apic/ipi.c +++ b/arch/x86/kernel/apic/ipi.c @@ -1,6 +1,5 @@ #include <linux/cpumask.h> #include <linux/interrupt.h> -#include <linux/init.h> #include <linux/mm.h> #include <linux/delay.h> diff --git a/arch/x86/kernel/apic/summit_32.c b/arch/x86/kernel/apic/summit_32.c index 77c95c0..00146f9 100644 --- a/arch/x86/kernel/apic/summit_32.c +++ b/arch/x86/kernel/apic/summit_32.c @@ -29,7 +29,6 @@ #define pr_fmt(fmt) "summit: %s: " fmt, __func__ #include <linux/mm.h> -#include <linux/init.h> #include <asm/io.h> #include <asm/bios_ebda.h> diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c index 140e29d..cac85ee 100644 --- a/arch/x86/kernel/apic/x2apic_cluster.c +++ b/arch/x86/kernel/apic/x2apic_cluster.c @@ -3,7 +3,6 @@ #include <linux/string.h> #include <linux/kernel.h> #include <linux/ctype.h> -#include <linux/init.h> #include <linux/dmar.h> #include <linux/cpu.h> diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c index 562a76d..de231e3 100644 --- a/arch/x86/kernel/apic/x2apic_phys.c +++ b/arch/x86/kernel/apic/x2apic_phys.c @@ -3,7 +3,6 @@ #include <linux/string.h> #include <linux/kernel.h> #include <linux/ctype.h> -#include <linux/init.h> #include <linux/dmar.h> #include <asm/smp.h> diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile index 47b56a7..7fd54f0 100644 --- a/arch/x86/kernel/cpu/Makefile +++ b/arch/x86/kernel/cpu/Makefile @@ -36,12 +36,13 @@ obj-$(CONFIG_CPU_SUP_AMD) += perf_event_amd_iommu.o endif obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_p6.o perf_event_knc.o perf_event_p4.o obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_lbr.o perf_event_intel_ds.o perf_event_intel.o -obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_uncore.o +obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_uncore.o perf_event_intel_rapl.o endif obj-$(CONFIG_X86_MCE) += mcheck/ obj-$(CONFIG_MTRR) += mtrr/ +obj-$(CONFIG_MICROCODE) += microcode/ obj-$(CONFIG_X86_LOCAL_APIC) += perfctr-watchdog.o perf_event_amd_ibs.o diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index bca023b..d3153e2 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -1,5 +1,4 @@ #include <linux/export.h> -#include <linux/init.h> #include <linux/bitops.h> #include <linux/elf.h> #include <linux/mm.h> @@ -487,7 +486,7 @@ static void early_init_amd(struct cpuinfo_x86 *c) set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC); if (!check_tsc_unstable()) - sched_clock_stable = 1; + set_sched_clock_stable(); } #ifdef CONFIG_X86_64 @@ -508,6 +507,16 @@ static void early_init_amd(struct cpuinfo_x86 *c) set_cpu_cap(c, X86_FEATURE_EXTD_APICID); } #endif + + /* F16h erratum 793, CVE-2013-6885 */ + if (c->x86 == 0x16 && c->x86_model <= 0xf) { + u64 val; + + rdmsrl(MSR_AMD64_LS_CFG, val); + if (!(val & BIT(15))) + wrmsrl(MSR_AMD64_LS_CFG, val | BIT(15)); + } + } static const int amd_erratum_383[]; @@ -790,14 +799,10 @@ static void cpu_detect_tlb_amd(struct cpuinfo_x86 *c) } /* Handle DTLB 2M and 4M sizes, fall back to L1 if L2 is disabled */ - if (!((eax >> 16) & mask)) { - u32 a, b, c, d; - - cpuid(0x80000005, &a, &b, &c, &d); - tlb_lld_2m[ENTRIES] = (a >> 16) & 0xff; - } else { + if (!((eax >> 16) & mask)) + tlb_lld_2m[ENTRIES] = (cpuid_eax(0x80000005) >> 16) & 0xff; + else tlb_lld_2m[ENTRIES] = (eax >> 16) & mask; - } /* a 4M entry uses two 2M entries */ tlb_lld_4m[ENTRIES] = tlb_lld_2m[ENTRIES] >> 1; diff --git a/arch/x86/kernel/cpu/centaur.c b/arch/x86/kernel/cpu/centaur.c index 8d5652d..8779eda 100644 --- a/arch/x86/kernel/cpu/centaur.c +++ b/arch/x86/kernel/cpu/centaur.c @@ -1,6 +1,5 @@ #include <linux/bitops.h> #include <linux/kernel.h> -#include <linux/init.h> #include <asm/processor.h> #include <asm/e820.h> diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 6abc172..24b6fd1 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -472,6 +472,7 @@ u16 __read_mostly tlb_lli_4m[NR_INFO]; u16 __read_mostly tlb_lld_4k[NR_INFO]; u16 __read_mostly tlb_lld_2m[NR_INFO]; u16 __read_mostly tlb_lld_4m[NR_INFO]; +u16 __read_mostly tlb_lld_1g[NR_INFO]; /* * tlb_flushall_shift shows the balance point in replacing cr3 write @@ -486,13 +487,13 @@ void cpu_detect_tlb(struct cpuinfo_x86 *c) if (this_cpu->c_detect_tlb) this_cpu->c_detect_tlb(c); - printk(KERN_INFO "Last level iTLB entries: 4KB %d, 2MB %d, 4MB %d\n" \ - "Last level dTLB entries: 4KB %d, 2MB %d, 4MB %d\n" \ + printk(KERN_INFO "Last level iTLB entries: 4KB %d, 2MB %d, 4MB %d\n" + "Last level dTLB entries: 4KB %d, 2MB %d, 4MB %d, 1GB %d\n" "tlb_flushall_shift: %d\n", tlb_lli_4k[ENTRIES], tlb_lli_2m[ENTRIES], tlb_lli_4m[ENTRIES], tlb_lld_4k[ENTRIES], tlb_lld_2m[ENTRIES], tlb_lld_4m[ENTRIES], - tlb_flushall_shift); + tlb_lld_1g[ENTRIES], tlb_flushall_shift); } void detect_ht(struct cpuinfo_x86 *c) diff --git a/arch/x86/kernel/cpu/cyrix.c b/arch/x86/kernel/cpu/cyrix.c index d0969c7..aaf152e 100644 --- a/arch/x86/kernel/cpu/cyrix.c +++ b/arch/x86/kernel/cpu/cyrix.c @@ -1,4 +1,3 @@ -#include <linux/init.h> #include <linux/bitops.h> #include <linux/delay.h> #include <linux/pci.h> diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index dc1ec0d..3db61c6 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c @@ -1,4 +1,3 @@ -#include <linux/init.h> #include <linux/kernel.h> #include <linux/string.h> @@ -93,7 +92,7 @@ static void early_init_intel(struct cpuinfo_x86 *c) set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC); if (!check_tsc_unstable()) - sched_clock_stable = 1; + set_sched_clock_stable(); } /* Penwell and Cloverview have the TSC which doesn't sleep on S3 */ @@ -387,7 +386,8 @@ static void init_intel(struct cpuinfo_x86 *c) set_cpu_cap(c, X86_FEATURE_PEBS); } - if (c->x86 == 6 && c->x86_model == 29 && cpu_has_clflush) + if (c->x86 == 6 && cpu_has_clflush && + (c->x86_model == 29 || c->x86_model == 46 || c->x86_model == 47)) set_cpu_cap(c, X86_FEATURE_CLFLUSH_MONITOR); #ifdef CONFIG_X86_64 @@ -505,6 +505,7 @@ static unsigned int intel_size_cache(struct cpuinfo_x86 *c, unsigned int size) #define TLB_DATA0_2M_4M 0x23 #define STLB_4K 0x41 +#define STLB_4K_2M 0x42 static const struct _tlb_table intel_tlb_table[] = { { 0x01, TLB_INST_4K, 32, " TLB_INST 4 KByte pages, 4-way set associative" }, @@ -525,13 +526,20 @@ static const struct _tlb_table intel_tlb_table[] = { { 0x5b, TLB_DATA_4K_4M, 64, " TLB_DATA 4 KByte and 4 MByte pages" }, { 0x5c, TLB_DATA_4K_4M, 128, " TLB_DATA 4 KByte and 4 MByte pages" }, { 0x5d, TLB_DATA_4K_4M, 256, " TLB_DATA 4 KByte and 4 MByte pages" }, + { 0x61, TLB_INST_4K, 48, " TLB_INST 4 KByte pages, full associative" }, + { 0x63, TLB_DATA_1G, 4, " TLB_DATA 1 GByte pages, 4-way set associative" }, + { 0x76, TLB_INST_2M_4M, 8, " TLB_INST 2-MByte or 4-MByte pages, fully associative" }, { 0xb0, TLB_INST_4K, 128, " TLB_INST 4 KByte pages, 4-way set associative" }, { 0xb1, TLB_INST_2M_4M, 4, " TLB_INST 2M pages, 4-way, 8 entries or 4M pages, 4-way entries" }, { 0xb2, TLB_INST_4K, 64, " TLB_INST 4KByte pages, 4-way set associative" }, { 0xb3, TLB_DATA_4K, 128, " TLB_DATA 4 KByte pages, 4-way set associative" }, { 0xb4, TLB_DATA_4K, 256, " TLB_DATA 4 KByte pages, 4-way associative" }, + { 0xb5, TLB_INST_4K, 64, " TLB_INST 4 KByte pages, 8-way set ssociative" }, + { 0xb6, TLB_INST_4K, 128, " TLB_INST 4 KByte pages, 8-way set ssociative" }, { 0xba, TLB_DATA_4K, 64, " TLB_DATA 4 KByte pages, 4-way associative" }, { 0xc0, TLB_DATA_4K_4M, 8, " TLB_DATA 4 KByte and 4 MByte pages, 4-way associative" }, + { 0xc1, STLB_4K_2M, 1024, " STLB 4 KByte and 2 MByte pages, 8-way associative" }, + { 0xc2, TLB_DATA_2M_4M, 16, " DTLB 2 MByte/4MByte pages, 4-way associative" }, { 0xca, STLB_4K, 512, " STLB 4 KByte pages, 4-way associative" }, { 0x00, 0, 0 } }; @@ -557,6 +565,20 @@ static void intel_tlb_lookup(const unsigned char desc) if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries) tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries; break; + case STLB_4K_2M: + if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries) + tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries; + if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries) + tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries; + if (tlb_lli_2m[ENTRIES] < intel_tlb_table[k].entries) + tlb_lli_2m[ENTRIES] = intel_tlb_table[k].entries; + if (tlb_lld_2m[ENTRIES] < intel_tlb_table[k].entries) + tlb_lld_2m[ENTRIES] = intel_tlb_table[k].entries; + if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries) + tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries; + if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries) + tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries; + break; case TLB_INST_ALL: if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries) tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries; @@ -602,6 +624,10 @@ static void intel_tlb_lookup(const unsigned char desc) if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries) tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries; break; + case TLB_DATA_1G: + if (tlb_lld_1g[ENTRIES] < intel_tlb_table[k].entries) + tlb_lld_1g[ENTRIES] = intel_tlb_table[k].entries; + break; } } diff --git a/arch/x86/kernel/cpu/mcheck/mce-apei.c b/arch/x86/kernel/cpu/mcheck/mce-apei.c index de8b60a..a1aef95 100644 --- a/arch/x86/kernel/cpu/mcheck/mce-apei.c +++ b/arch/x86/kernel/cpu/mcheck/mce-apei.c @@ -33,22 +33,28 @@ #include <linux/acpi.h> #include <linux/cper.h> #include <acpi/apei.h> +#include <acpi/ghes.h> #include <asm/mce.h> #include "mce-internal.h" -void apei_mce_report_mem_error(int corrected, struct cper_sec_mem_err *mem_err) +void apei_mce_report_mem_error(int severity, struct cper_sec_mem_err *mem_err) { struct mce m; - /* Only corrected MC is reported */ - if (!corrected || !(mem_err->validation_bits & CPER_MEM_VALID_PA)) + if (!(mem_err->validation_bits & CPER_MEM_VALID_PA)) return; mce_setup(&m); m.bank = 1; - /* Fake a memory read corrected error with unknown channel */ + /* Fake a memory read error with unknown channel */ m.status = MCI_STATUS_VAL | MCI_STATUS_EN | MCI_STATUS_ADDRV | 0x9f; + + if (severity >= GHES_SEV_RECOVERABLE) + m.status |= MCI_STATUS_UC; + if (severity >= GHES_SEV_PANIC) + m.status |= MCI_STATUS_PCC; + m.addr = mem_err->physical_addr; mce_log(&m); mce_notify_irq(); diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index b3218cd..4d5419b 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c @@ -1638,15 +1638,15 @@ static void __mcheck_cpu_init_vendor(struct cpuinfo_x86 *c) static void mce_start_timer(unsigned int cpu, struct timer_list *t) { - unsigned long iv = mce_adjust_timer(check_interval * HZ); - - __this_cpu_write(mce_next_interval, iv); + unsigned long iv = check_interval * HZ; if (mca_cfg.ignore_ce || !iv) return; + per_cpu(mce_next_interval, cpu) = iv; + t->expires = round_jiffies(jiffies + iv); - add_timer_on(t, smp_processor_id()); + add_timer_on(t, cpu); } static void __mcheck_cpu_init_timer(void) @@ -2272,8 +2272,10 @@ static int mce_device_create(unsigned int cpu) dev->release = &mce_device_release; err = device_register(dev); - if (err) + if (err) { + put_device(dev); return err; + } for (i = 0; mce_device_attrs[i]; i++) { err = device_create_file(dev, mce_device_attrs[i]); diff --git a/arch/x86/kernel/cpu/mcheck/mce_intel.c b/arch/x86/kernel/cpu/mcheck/mce_intel.c index 4cfe045..fb6156f 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_intel.c +++ b/arch/x86/kernel/cpu/mcheck/mce_intel.c @@ -6,7 +6,6 @@ */ #include <linux/gfp.h> -#include <linux/init.h> #include <linux/interrupt.h> #include <linux/percpu.h> #include <linux/sched.h> diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c index 1c044b1..a304298 100644 --- a/arch/x86/kernel/cpu/mcheck/p5.c +++ b/arch/x86/kernel/cpu/mcheck/p5.c @@ -5,7 +5,6 @@ #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/types.h> -#include <linux/init.h> #include <linux/smp.h> #include <asm/processor.h> diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c index e9a701a..7dc5564 100644 --- a/arch/x86/kernel/cpu/mcheck/winchip.c +++ b/arch/x86/kernel/cpu/mcheck/winchip.c @@ -5,7 +5,6 @@ #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/types.h> -#include <linux/init.h> #include <asm/processor.h> #include <asm/mce.h> diff --git a/arch/x86/kernel/cpu/microcode/Makefile b/arch/x86/kernel/cpu/microcode/Makefile new file mode 100644 index 0000000..285c854 --- /dev/null +++ b/arch/x86/kernel/cpu/microcode/Makefile @@ -0,0 +1,7 @@ +microcode-y := core.o +obj-$(CONFIG_MICROCODE) += microcode.o +microcode-$(CONFIG_MICROCODE_INTEL) += intel.o intel_lib.o +microcode-$(CONFIG_MICROCODE_AMD) += amd.o +obj-$(CONFIG_MICROCODE_EARLY) += core_early.o +obj-$(CONFIG_MICROCODE_INTEL_EARLY) += intel_early.o +obj-$(CONFIG_MICROCODE_AMD_EARLY) += amd_early.o diff --git a/arch/x86/kernel/microcode_amd.c b/arch/x86/kernel/cpu/microcode/amd.c index c3d4cc9..8fffd84 100644 --- a/arch/x86/kernel/microcode_amd.c +++ b/arch/x86/kernel/cpu/microcode/amd.c @@ -182,10 +182,10 @@ int __apply_microcode_amd(struct microcode_amd *mc_amd) { u32 rev, dummy; - wrmsrl(MSR_AMD64_PATCH_LOADER, (u64)(long)&mc_amd->hdr.data_code); + native_wrmsrl(MSR_AMD64_PATCH_LOADER, (u64)(long)&mc_amd->hdr.data_code); /* verify patch application was successful */ - rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy); + native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy); if (rev != mc_amd->hdr.patch_id) return -1; @@ -332,6 +332,9 @@ static int verify_and_add_patch(u8 family, u8 *fw, unsigned int leftover) patch->patch_id = mc_hdr->patch_id; patch->equiv_cpu = proc_id; + pr_debug("%s: Added patch_id: 0x%08x, proc_id: 0x%04x\n", + __func__, patch->patch_id, proc_id); + /* ... and add to cache. */ update_cache(patch); @@ -390,9 +393,9 @@ enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t size) if (cpu_data(smp_processor_id()).cpu_index == boot_cpu_data.cpu_index) { struct ucode_patch *p = find_patch(smp_processor_id()); if (p) { - memset(amd_bsp_mpb, 0, MPB_MAX_SIZE); - memcpy(amd_bsp_mpb, p->data, min_t(u32, ksize(p->data), - MPB_MAX_SIZE)); + memset(amd_ucode_patch, 0, PATCH_MAX_SIZE); + memcpy(amd_ucode_patch, p->data, min_t(u32, ksize(p->data), + PATCH_MAX_SIZE)); } } #endif @@ -430,7 +433,7 @@ static enum ucode_state request_microcode_amd(int cpu, struct device *device, if (c->x86 >= 0x15) snprintf(fw_name, sizeof(fw_name), "amd-ucode/microcode_amd_fam%.2xh.bin", c->x86); - if (request_firmware(&fw, (const char *)fw_name, device)) { + if (request_firmware_direct(&fw, (const char *)fw_name, device)) { pr_debug("failed to load file %s\n", fw_name); goto out; } diff --git a/arch/x86/kernel/microcode_amd_early.c b/arch/x86/kernel/cpu/microcode/amd_early.c index 6073104..8384c0f 100644 --- a/arch/x86/kernel/microcode_amd_early.c +++ b/arch/x86/kernel/cpu/microcode/amd_early.c @@ -2,6 +2,7 @@ * Copyright (C) 2013 Advanced Micro Devices, Inc. * * Author: Jacob Shin <jacob.shin@amd.com> + * Fixes: Borislav Petkov <bp@suse.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -15,10 +16,18 @@ #include <asm/setup.h> #include <asm/microcode_amd.h> -static bool ucode_loaded; +/* + * This points to the current valid container of microcode patches which we will + * save from the initrd before jettisoning its contents. + */ +static u8 *container; +static size_t container_size; + static u32 ucode_new_rev; -static unsigned long ucode_offset; -static size_t ucode_size; +u8 amd_ucode_patch[PATCH_MAX_SIZE]; +static u16 this_equiv_id; + +struct cpio_data ucode_cpio; /* * Microcode patch container file is prepended to the initrd in cpio format. @@ -32,9 +41,6 @@ static struct cpio_data __init find_ucode_in_initrd(void) char *path; void *start; size_t size; - unsigned long *uoffset; - size_t *usize; - struct cpio_data cd; #ifdef CONFIG_X86_32 struct boot_params *p; @@ -47,30 +53,50 @@ static struct cpio_data __init find_ucode_in_initrd(void) path = (char *)__pa_nodebug(ucode_path); start = (void *)p->hdr.ramdisk_image; size = p->hdr.ramdisk_size; - uoffset = (unsigned long *)__pa_nodebug(&ucode_offset); - usize = (size_t *)__pa_nodebug(&ucode_size); #else path = ucode_path; start = (void *)(boot_params.hdr.ramdisk_image + PAGE_OFFSET); size = boot_params.hdr.ramdisk_size; - uoffset = &ucode_offset; - usize = &ucode_size; #endif - cd = find_cpio_data(path, start, size, &offset); - if (!cd.data) - return cd; + return find_cpio_data(path, start, size, &offset); +} - if (*(u32 *)cd.data != UCODE_MAGIC) { - cd.data = NULL; - cd.size = 0; - return cd; - } +static size_t compute_container_size(u8 *data, u32 total_size) +{ + size_t size = 0; + u32 *header = (u32 *)data; - *uoffset = (u8 *)cd.data - (u8 *)start; - *usize = cd.size; + if (header[0] != UCODE_MAGIC || + header[1] != UCODE_EQUIV_CPU_TABLE_TYPE || /* type */ + header[2] == 0) /* size */ + return size; - return cd; + size = header[2] + CONTAINER_HDR_SZ; + total_size -= size; + data += size; + + while (total_size) { + u16 patch_size; + + header = (u32 *)data; + + if (header[0] != UCODE_UCODE_TYPE) + break; + + /* + * Sanity-check patch size. + */ + patch_size = header[1]; + if (patch_size > PATCH_MAX_SIZE) + break; + + size += patch_size + SECTION_HDR_SIZE; + data += patch_size + SECTION_HDR_SIZE; + total_size -= patch_size + SECTION_HDR_SIZE; + } + + return size; } /* @@ -85,23 +111,22 @@ static struct cpio_data __init find_ucode_in_initrd(void) static void apply_ucode_in_initrd(void *ucode, size_t size) { struct equiv_cpu_entry *eq; + size_t *cont_sz; u32 *header; - u8 *data; + u8 *data, **cont; u16 eq_id = 0; int offset, left; - u32 rev, eax; + u32 rev, eax, ebx, ecx, edx; u32 *new_rev; - unsigned long *uoffset; - size_t *usize; #ifdef CONFIG_X86_32 new_rev = (u32 *)__pa_nodebug(&ucode_new_rev); - uoffset = (unsigned long *)__pa_nodebug(&ucode_offset); - usize = (size_t *)__pa_nodebug(&ucode_size); + cont_sz = (size_t *)__pa_nodebug(&container_size); + cont = (u8 **)__pa_nodebug(&container); #else new_rev = &ucode_new_rev; - uoffset = &ucode_offset; - usize = &ucode_size; + cont_sz = &container_size; + cont = &container; #endif data = ucode; @@ -109,23 +134,37 @@ static void apply_ucode_in_initrd(void *ucode, size_t size) header = (u32 *)data; /* find equiv cpu table */ - - if (header[1] != UCODE_EQUIV_CPU_TABLE_TYPE || /* type */ + if (header[0] != UCODE_MAGIC || + header[1] != UCODE_EQUIV_CPU_TABLE_TYPE || /* type */ header[2] == 0) /* size */ return; - eax = cpuid_eax(0x00000001); + eax = 0x00000001; + ecx = 0; + native_cpuid(&eax, &ebx, &ecx, &edx); while (left > 0) { eq = (struct equiv_cpu_entry *)(data + CONTAINER_HDR_SZ); + *cont = data; + + /* Advance past the container header */ offset = header[2] + CONTAINER_HDR_SZ; data += offset; left -= offset; eq_id = find_equiv_id(eq, eax); - if (eq_id) + if (eq_id) { + this_equiv_id = eq_id; + *cont_sz = compute_container_size(*cont, left + offset); + + /* + * truncate how much we need to iterate over in the + * ucode update loop below + */ + left = *cont_sz - offset; break; + } /* * support multiple container files appended together. if this @@ -145,19 +184,18 @@ static void apply_ucode_in_initrd(void *ucode, size_t size) /* mark where the next microcode container file starts */ offset = data - (u8 *)ucode; - *uoffset += offset; - *usize -= offset; ucode = data; } if (!eq_id) { - *usize = 0; + *cont = NULL; + *cont_sz = 0; return; } /* find ucode and update if needed */ - rdmsr(MSR_AMD64_PATCH_LEVEL, rev, eax); + native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, eax); while (left > 0) { struct microcode_amd *mc; @@ -168,73 +206,83 @@ static void apply_ucode_in_initrd(void *ucode, size_t size) break; mc = (struct microcode_amd *)(data + SECTION_HDR_SIZE); - if (eq_id == mc->hdr.processor_rev_id && rev < mc->hdr.patch_id) - if (__apply_microcode_amd(mc) == 0) { + + if (eq_id == mc->hdr.processor_rev_id && rev < mc->hdr.patch_id) { + + if (!__apply_microcode_amd(mc)) { rev = mc->hdr.patch_id; *new_rev = rev; + + /* save ucode patch */ + memcpy(amd_ucode_patch, mc, + min_t(u32, header[1], PATCH_MAX_SIZE)); } + } offset = header[1] + SECTION_HDR_SIZE; data += offset; left -= offset; } - - /* mark where this microcode container file ends */ - offset = *usize - (data - (u8 *)ucode); - *usize -= offset; - - if (!(*new_rev)) - *usize = 0; } void __init load_ucode_amd_bsp(void) { - struct cpio_data cd = find_ucode_in_initrd(); - if (!cd.data) + struct cpio_data cp; + void **data; + size_t *size; + +#ifdef CONFIG_X86_32 + data = (void **)__pa_nodebug(&ucode_cpio.data); + size = (size_t *)__pa_nodebug(&ucode_cpio.size); +#else + data = &ucode_cpio.data; + size = &ucode_cpio.size; +#endif + + cp = find_ucode_in_initrd(); + if (!cp.data) return; - apply_ucode_in_initrd(cd.data, cd.size); + *data = cp.data; + *size = cp.size; + + apply_ucode_in_initrd(cp.data, cp.size); } #ifdef CONFIG_X86_32 -u8 amd_bsp_mpb[MPB_MAX_SIZE]; - /* * On 32-bit, since AP's early load occurs before paging is turned on, we * cannot traverse cpu_equiv_table and pcache in kernel heap memory. So during * cold boot, AP will apply_ucode_in_initrd() just like the BSP. During - * save_microcode_in_initrd_amd() BSP's patch is copied to amd_bsp_mpb, which - * is used upon resume from suspend. + * save_microcode_in_initrd_amd() BSP's patch is copied to amd_ucode_patch, + * which is used upon resume from suspend. */ void load_ucode_amd_ap(void) { struct microcode_amd *mc; - unsigned long *initrd; - unsigned long *uoffset; size_t *usize; - void *ucode; + void **ucode; - mc = (struct microcode_amd *)__pa(amd_bsp_mpb); + mc = (struct microcode_amd *)__pa(amd_ucode_patch); if (mc->hdr.patch_id && mc->hdr.processor_rev_id) { __apply_microcode_amd(mc); return; } - initrd = (unsigned long *)__pa(&initrd_start); - uoffset = (unsigned long *)__pa(&ucode_offset); - usize = (size_t *)__pa(&ucode_size); + ucode = (void *)__pa_nodebug(&container); + usize = (size_t *)__pa_nodebug(&container_size); - if (!*usize || !*initrd) + if (!*ucode || !*usize) return; - ucode = (void *)((unsigned long)__pa(*initrd) + *uoffset); - apply_ucode_in_initrd(ucode, *usize); + apply_ucode_in_initrd(*ucode, *usize); } static void __init collect_cpu_sig_on_bsp(void *arg) { unsigned int cpu = smp_processor_id(); struct ucode_cpu_info *uci = ucode_cpu_info + cpu; + uci->cpu_sig.sig = cpuid_eax(0x00000001); } #else @@ -242,36 +290,54 @@ void load_ucode_amd_ap(void) { unsigned int cpu = smp_processor_id(); struct ucode_cpu_info *uci = ucode_cpu_info + cpu; + struct equiv_cpu_entry *eq; + struct microcode_amd *mc; u32 rev, eax; + u16 eq_id; + + /* Exit if called on the BSP. */ + if (!cpu) + return; + + if (!container) + return; rdmsr(MSR_AMD64_PATCH_LEVEL, rev, eax); - eax = cpuid_eax(0x00000001); uci->cpu_sig.rev = rev; uci->cpu_sig.sig = eax; - if (cpu && !ucode_loaded) { - void *ucode; + eax = cpuid_eax(0x00000001); + eq = (struct equiv_cpu_entry *)(container + CONTAINER_HDR_SZ); - if (!ucode_size || !initrd_start) - return; + eq_id = find_equiv_id(eq, eax); + if (!eq_id) + return; + + if (eq_id == this_equiv_id) { + mc = (struct microcode_amd *)amd_ucode_patch; - ucode = (void *)(initrd_start + ucode_offset); - eax = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff); - if (load_microcode_amd(eax, ucode, ucode_size) != UCODE_OK) + if (mc && rev < mc->hdr.patch_id) { + if (!__apply_microcode_amd(mc)) + ucode_new_rev = mc->hdr.patch_id; + } + + } else { + if (!ucode_cpio.data) return; - ucode_loaded = true; + /* + * AP has a different equivalence ID than BSP, looks like + * mixed-steppings silicon so go through the ucode blob anew. + */ + apply_ucode_in_initrd(ucode_cpio.data, ucode_cpio.size); } - - apply_microcode_amd(cpu); } #endif int __init save_microcode_in_initrd_amd(void) { enum ucode_state ret; - void *ucode; u32 eax; #ifdef CONFIG_X86_32 @@ -280,22 +346,35 @@ int __init save_microcode_in_initrd_amd(void) if (!uci->cpu_sig.sig) smp_call_function_single(bsp, collect_cpu_sig_on_bsp, NULL, 1); + + /* + * Take into account the fact that the ramdisk might get relocated + * and therefore we need to recompute the container's position in + * virtual memory space. + */ + container = (u8 *)(__va((u32)relocated_ramdisk) + + ((u32)container - boot_params.hdr.ramdisk_image)); #endif if (ucode_new_rev) pr_info("microcode: updated early to new patch_level=0x%08x\n", ucode_new_rev); - if (ucode_loaded || !ucode_size || !initrd_start) - return 0; + if (!container) + return -EINVAL; - ucode = (void *)(initrd_start + ucode_offset); eax = cpuid_eax(0x00000001); eax = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff); - ret = load_microcode_amd(eax, ucode, ucode_size); + ret = load_microcode_amd(eax, container, container_size); if (ret != UCODE_OK) return -EINVAL; - ucode_loaded = true; + /* + * This will be freed any msec now, stash patches for the current + * family and switch to patch cache for cpu hotplug, etc later. + */ + container = NULL; + container_size = 0; + return 0; } diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/cpu/microcode/core.c index 15c9876..15c9876 100644 --- a/arch/x86/kernel/microcode_core.c +++ b/arch/x86/kernel/cpu/microcode/core.c diff --git a/arch/x86/kernel/microcode_core_early.c b/arch/x86/kernel/cpu/microcode/core_early.c index be7f851..be7f851 100644 --- a/arch/x86/kernel/microcode_core_early.c +++ b/arch/x86/kernel/cpu/microcode/core_early.c diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/cpu/microcode/intel.c index 5fb2ceb..a276fa7 100644 --- a/arch/x86/kernel/microcode_intel.c +++ b/arch/x86/kernel/cpu/microcode/intel.c @@ -278,7 +278,7 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device, sprintf(name, "intel-ucode/%02x-%02x-%02x", c->x86, c->x86_model, c->x86_mask); - if (request_firmware(&firmware, name, device)) { + if (request_firmware_direct(&firmware, name, device)) { pr_debug("data file %s load failed\n", name); return UCODE_NFOUND; } diff --git a/arch/x86/kernel/microcode_intel_early.c b/arch/x86/kernel/cpu/microcode/intel_early.c index 1575deb..18f7391 100644 --- a/arch/x86/kernel/microcode_intel_early.c +++ b/arch/x86/kernel/cpu/microcode/intel_early.c @@ -365,16 +365,6 @@ out: return state; } -#define native_rdmsr(msr, val1, val2) \ -do { \ - u64 __val = native_read_msr((msr)); \ - (void)((val1) = (u32)__val); \ - (void)((val2) = (u32)(__val >> 32)); \ -} while (0) - -#define native_wrmsr(msr, low, high) \ - native_write_msr(msr, low, high); - static int collect_cpu_info_early(struct ucode_cpu_info *uci) { unsigned int val[2]; diff --git a/arch/x86/kernel/microcode_intel_lib.c b/arch/x86/kernel/cpu/microcode/intel_lib.c index ce69320..ce69320 100644 --- a/arch/x86/kernel/microcode_intel_lib.c +++ b/arch/x86/kernel/cpu/microcode/intel_lib.c diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 8e13293..b886451 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -1883,21 +1883,27 @@ static struct pmu pmu = { void arch_perf_update_userpage(struct perf_event_mmap_page *userpg, u64 now) { + struct cyc2ns_data *data; + userpg->cap_user_time = 0; userpg->cap_user_time_zero = 0; userpg->cap_user_rdpmc = x86_pmu.attr_rdpmc; userpg->pmc_width = x86_pmu.cntval_bits; - if (!sched_clock_stable) + if (!sched_clock_stable()) return; + data = cyc2ns_read_begin(); + userpg->cap_user_time = 1; - userpg->time_mult = this_cpu_read(cyc2ns); - userpg->time_shift = CYC2NS_SCALE_FACTOR; - userpg->time_offset = this_cpu_read(cyc2ns_offset) - now; + userpg->time_mult = data->cyc2ns_mul; + userpg->time_shift = data->cyc2ns_shift; + userpg->time_offset = data->cyc2ns_offset - now; userpg->cap_user_time_zero = 1; - userpg->time_zero = this_cpu_read(cyc2ns_offset); + userpg->time_zero = data->cyc2ns_offset; + + cyc2ns_read_end(data); } /* diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h index fd00bb2..c1a8618 100644 --- a/arch/x86/kernel/cpu/perf_event.h +++ b/arch/x86/kernel/cpu/perf_event.h @@ -262,11 +262,20 @@ struct cpu_hw_events { __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK, \ HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST_HSW) -#define EVENT_CONSTRAINT_END \ - EVENT_CONSTRAINT(0, 0, 0) +/* + * We define the end marker as having a weight of -1 + * to enable blacklisting of events using a counter bitmask + * of zero and thus a weight of zero. + * The end marker has a weight that cannot possibly be + * obtained from counting the bits in the bitmask. + */ +#define EVENT_CONSTRAINT_END { .weight = -1 } +/* + * Check for end marker with weight == -1 + */ #define for_each_event_constraint(e, c) \ - for ((e) = (c); (e)->weight; (e)++) + for ((e) = (c); (e)->weight != -1; (e)++) /* * Extra registers for specific events. diff --git a/arch/x86/kernel/cpu/perf_event_amd_ibs.c b/arch/x86/kernel/cpu/perf_event_amd_ibs.c index e09f0bf..4b8e4d3 100644 --- a/arch/x86/kernel/cpu/perf_event_amd_ibs.c +++ b/arch/x86/kernel/cpu/perf_event_amd_ibs.c @@ -10,6 +10,7 @@ #include <linux/module.h> #include <linux/pci.h> #include <linux/ptrace.h> +#include <linux/syscore_ops.h> #include <asm/apic.h> @@ -816,6 +817,18 @@ out: return ret; } +static void ibs_eilvt_setup(void) +{ + /* + * Force LVT offset assignment for family 10h: The offsets are + * not assigned by the BIOS for this family, so the OS is + * responsible for doing it. If the OS assignment fails, fall + * back to BIOS settings and try to setup this. + */ + if (boot_cpu_data.x86 == 0x10) + force_ibs_eilvt_setup(); +} + static inline int get_ibs_lvt_offset(void) { u64 val; @@ -851,6 +864,36 @@ static void clear_APIC_ibs(void *dummy) setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_FIX, 1); } +#ifdef CONFIG_PM + +static int perf_ibs_suspend(void) +{ + clear_APIC_ibs(NULL); + return 0; +} + +static void perf_ibs_resume(void) +{ + ibs_eilvt_setup(); + setup_APIC_ibs(NULL); +} + +static struct syscore_ops perf_ibs_syscore_ops = { + .resume = perf_ibs_resume, + .suspend = perf_ibs_suspend, +}; + +static void perf_ibs_pm_init(void) +{ + register_syscore_ops(&perf_ibs_syscore_ops); +} + +#else + +static inline void perf_ibs_pm_init(void) { } + +#endif + static int perf_ibs_cpu_notifier(struct notifier_block *self, unsigned long action, void *hcpu) { @@ -877,18 +920,12 @@ static __init int amd_ibs_init(void) if (!caps) return -ENODEV; /* ibs not supported by the cpu */ - /* - * Force LVT offset assignment for family 10h: The offsets are - * not assigned by the BIOS for this family, so the OS is - * responsible for doing it. If the OS assignment fails, fall - * back to BIOS settings and try to setup this. - */ - if (boot_cpu_data.x86 == 0x10) - force_ibs_eilvt_setup(); + ibs_eilvt_setup(); if (!ibs_eilvt_valid()) goto out; + perf_ibs_pm_init(); get_online_cpus(); ibs_caps = caps; /* make ibs_caps visible to other cpus: */ diff --git a/arch/x86/kernel/cpu/perf_event_intel_rapl.c b/arch/x86/kernel/cpu/perf_event_intel_rapl.c new file mode 100644 index 0000000..5ad35ad --- /dev/null +++ b/arch/x86/kernel/cpu/perf_event_intel_rapl.c @@ -0,0 +1,679 @@ +/* + * perf_event_intel_rapl.c: support Intel RAPL energy consumption counters + * Copyright (C) 2013 Google, Inc., Stephane Eranian + * + * Intel RAPL interface is specified in the IA-32 Manual Vol3b + * section 14.7.1 (September 2013) + * + * RAPL provides more controls than just reporting energy consumption + * however here we only expose the 3 energy consumption free running + * counters (pp0, pkg, dram). + * + * Each of those counters increments in a power unit defined by the + * RAPL_POWER_UNIT MSR. On SandyBridge, this unit is 1/(2^16) Joules + * but it can vary. + * + * Counter to rapl events mappings: + * + * pp0 counter: consumption of all physical cores (power plane 0) + * event: rapl_energy_cores + * perf code: 0x1 + * + * pkg counter: consumption of the whole processor package + * event: rapl_energy_pkg + * perf code: 0x2 + * + * dram counter: consumption of the dram domain (servers only) + * event: rapl_energy_dram + * perf code: 0x3 + * + * dram counter: consumption of the builtin-gpu domain (client only) + * event: rapl_energy_gpu + * perf code: 0x4 + * + * We manage those counters as free running (read-only). They may be + * use simultaneously by other tools, such as turbostat. + * + * The events only support system-wide mode counting. There is no + * sampling support because it does not make sense and is not + * supported by the RAPL hardware. + * + * Because we want to avoid floating-point operations in the kernel, + * the events are all reported in fixed point arithmetic (32.32). + * Tools must adjust the counts to convert them to Watts using + * the duration of the measurement. Tools may use a function such as + * ldexp(raw_count, -32); + */ +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/perf_event.h> +#include <asm/cpu_device_id.h> +#include "perf_event.h" + +/* + * RAPL energy status counters + */ +#define RAPL_IDX_PP0_NRG_STAT 0 /* all cores */ +#define INTEL_RAPL_PP0 0x1 /* pseudo-encoding */ +#define RAPL_IDX_PKG_NRG_STAT 1 /* entire package */ +#define INTEL_RAPL_PKG 0x2 /* pseudo-encoding */ +#define RAPL_IDX_RAM_NRG_STAT 2 /* DRAM */ +#define INTEL_RAPL_RAM 0x3 /* pseudo-encoding */ +#define RAPL_IDX_PP1_NRG_STAT 3 /* DRAM */ +#define INTEL_RAPL_PP1 0x4 /* pseudo-encoding */ + +/* Clients have PP0, PKG */ +#define RAPL_IDX_CLN (1<<RAPL_IDX_PP0_NRG_STAT|\ + 1<<RAPL_IDX_PKG_NRG_STAT|\ + 1<<RAPL_IDX_PP1_NRG_STAT) + +/* Servers have PP0, PKG, RAM */ +#define RAPL_IDX_SRV (1<<RAPL_IDX_PP0_NRG_STAT|\ + 1<<RAPL_IDX_PKG_NRG_STAT|\ + 1<<RAPL_IDX_RAM_NRG_STAT) + +/* + * event code: LSB 8 bits, passed in attr->config + * any other bit is reserved + */ +#define RAPL_EVENT_MASK 0xFFULL + +#define DEFINE_RAPL_FORMAT_ATTR(_var, _name, _format) \ +static ssize_t __rapl_##_var##_show(struct kobject *kobj, \ + struct kobj_attribute *attr, \ + char *page) \ +{ \ + BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \ + return sprintf(page, _format "\n"); \ +} \ +static struct kobj_attribute format_attr_##_var = \ + __ATTR(_name, 0444, __rapl_##_var##_show, NULL) + +#define RAPL_EVENT_DESC(_name, _config) \ +{ \ + .attr = __ATTR(_name, 0444, rapl_event_show, NULL), \ + .config = _config, \ +} + +#define RAPL_CNTR_WIDTH 32 /* 32-bit rapl counters */ + +struct rapl_pmu { + spinlock_t lock; + int hw_unit; /* 1/2^hw_unit Joule */ + int n_active; /* number of active events */ + struct list_head active_list; + struct pmu *pmu; /* pointer to rapl_pmu_class */ + ktime_t timer_interval; /* in ktime_t unit */ + struct hrtimer hrtimer; +}; + +static struct pmu rapl_pmu_class; +static cpumask_t rapl_cpu_mask; +static int rapl_cntr_mask; + +static DEFINE_PER_CPU(struct rapl_pmu *, rapl_pmu); +static DEFINE_PER_CPU(struct rapl_pmu *, rapl_pmu_to_free); + +static inline u64 rapl_read_counter(struct perf_event *event) +{ + u64 raw; + rdmsrl(event->hw.event_base, raw); + return raw; +} + +static inline u64 rapl_scale(u64 v) +{ + /* + * scale delta to smallest unit (1/2^32) + * users must then scale back: count * 1/(1e9*2^32) to get Joules + * or use ldexp(count, -32). + * Watts = Joules/Time delta + */ + return v << (32 - __get_cpu_var(rapl_pmu)->hw_unit); +} + +static u64 rapl_event_update(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + u64 prev_raw_count, new_raw_count; + s64 delta, sdelta; + int shift = RAPL_CNTR_WIDTH; + +again: + prev_raw_count = local64_read(&hwc->prev_count); + rdmsrl(event->hw.event_base, new_raw_count); + + if (local64_cmpxchg(&hwc->prev_count, prev_raw_count, + new_raw_count) != prev_raw_count) { + cpu_relax(); + goto again; + } + + /* + * Now we have the new raw value and have updated the prev + * timestamp already. We can now calculate the elapsed delta + * (event-)time and add that to the generic event. + * + * Careful, not all hw sign-extends above the physical width + * of the count. + */ + delta = (new_raw_count << shift) - (prev_raw_count << shift); + delta >>= shift; + + sdelta = rapl_scale(delta); + + local64_add(sdelta, &event->count); + + return new_raw_count; +} + +static void rapl_start_hrtimer(struct rapl_pmu *pmu) +{ + __hrtimer_start_range_ns(&pmu->hrtimer, + pmu->timer_interval, 0, + HRTIMER_MODE_REL_PINNED, 0); +} + +static void rapl_stop_hrtimer(struct rapl_pmu *pmu) +{ + hrtimer_cancel(&pmu->hrtimer); +} + +static enum hrtimer_restart rapl_hrtimer_handle(struct hrtimer *hrtimer) +{ + struct rapl_pmu *pmu = __get_cpu_var(rapl_pmu); + struct perf_event *event; + unsigned long flags; + + if (!pmu->n_active) + return HRTIMER_NORESTART; + + spin_lock_irqsave(&pmu->lock, flags); + + list_for_each_entry(event, &pmu->active_list, active_entry) { + rapl_event_update(event); + } + + spin_unlock_irqrestore(&pmu->lock, flags); + + hrtimer_forward_now(hrtimer, pmu->timer_interval); + + return HRTIMER_RESTART; +} + +static void rapl_hrtimer_init(struct rapl_pmu *pmu) +{ + struct hrtimer *hr = &pmu->hrtimer; + + hrtimer_init(hr, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + hr->function = rapl_hrtimer_handle; +} + +static void __rapl_pmu_event_start(struct rapl_pmu *pmu, + struct perf_event *event) +{ + if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED))) + return; + + event->hw.state = 0; + + list_add_tail(&event->active_entry, &pmu->active_list); + + local64_set(&event->hw.prev_count, rapl_read_counter(event)); + + pmu->n_active++; + if (pmu->n_active == 1) + rapl_start_hrtimer(pmu); +} + +static void rapl_pmu_event_start(struct perf_event *event, int mode) +{ + struct rapl_pmu *pmu = __get_cpu_var(rapl_pmu); + unsigned long flags; + + spin_lock_irqsave(&pmu->lock, flags); + __rapl_pmu_event_start(pmu, event); + spin_unlock_irqrestore(&pmu->lock, flags); +} + +static void rapl_pmu_event_stop(struct perf_event *event, int mode) +{ + struct rapl_pmu *pmu = __get_cpu_var(rapl_pmu); + struct hw_perf_event *hwc = &event->hw; + unsigned long flags; + + spin_lock_irqsave(&pmu->lock, flags); + + /* mark event as deactivated and stopped */ + if (!(hwc->state & PERF_HES_STOPPED)) { + WARN_ON_ONCE(pmu->n_active <= 0); + pmu->n_active--; + if (pmu->n_active == 0) + rapl_stop_hrtimer(pmu); + + list_del(&event->active_entry); + + WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED); + hwc->state |= PERF_HES_STOPPED; + } + + /* check if update of sw counter is necessary */ + if ((mode & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) { + /* + * Drain the remaining delta count out of a event + * that we are disabling: + */ + rapl_event_update(event); + hwc->state |= PERF_HES_UPTODATE; + } + + spin_unlock_irqrestore(&pmu->lock, flags); +} + +static int rapl_pmu_event_add(struct perf_event *event, int mode) +{ + struct rapl_pmu *pmu = __get_cpu_var(rapl_pmu); + struct hw_perf_event *hwc = &event->hw; + unsigned long flags; + + spin_lock_irqsave(&pmu->lock, flags); + + hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED; + + if (mode & PERF_EF_START) + __rapl_pmu_event_start(pmu, event); + + spin_unlock_irqrestore(&pmu->lock, flags); + + return 0; +} + +static void rapl_pmu_event_del(struct perf_event *event, int flags) +{ + rapl_pmu_event_stop(event, PERF_EF_UPDATE); +} + +static int rapl_pmu_event_init(struct perf_event *event) +{ + u64 cfg = event->attr.config & RAPL_EVENT_MASK; + int bit, msr, ret = 0; + + /* only look at RAPL events */ + if (event->attr.type != rapl_pmu_class.type) + return -ENOENT; + + /* check only supported bits are set */ + if (event->attr.config & ~RAPL_EVENT_MASK) + return -EINVAL; + + /* + * check event is known (determines counter) + */ + switch (cfg) { + case INTEL_RAPL_PP0: + bit = RAPL_IDX_PP0_NRG_STAT; + msr = MSR_PP0_ENERGY_STATUS; + break; + case INTEL_RAPL_PKG: + bit = RAPL_IDX_PKG_NRG_STAT; + msr = MSR_PKG_ENERGY_STATUS; + break; + case INTEL_RAPL_RAM: + bit = RAPL_IDX_RAM_NRG_STAT; + msr = MSR_DRAM_ENERGY_STATUS; + break; + case INTEL_RAPL_PP1: + bit = RAPL_IDX_PP1_NRG_STAT; + msr = MSR_PP1_ENERGY_STATUS; + break; + default: + return -EINVAL; + } + /* check event supported */ + if (!(rapl_cntr_mask & (1 << bit))) + return -EINVAL; + + /* unsupported modes and filters */ + if (event->attr.exclude_user || + event->attr.exclude_kernel || + event->attr.exclude_hv || + event->attr.exclude_idle || + event->attr.exclude_host || + event->attr.exclude_guest || + event->attr.sample_period) /* no sampling */ + return -EINVAL; + + /* must be done before validate_group */ + event->hw.event_base = msr; + event->hw.config = cfg; + event->hw.idx = bit; + + return ret; +} + +static void rapl_pmu_event_read(struct perf_event *event) +{ + rapl_event_update(event); +} + +static ssize_t rapl_get_attr_cpumask(struct device *dev, + struct device_attribute *attr, char *buf) +{ + int n = cpulist_scnprintf(buf, PAGE_SIZE - 2, &rapl_cpu_mask); + + buf[n++] = '\n'; + buf[n] = '\0'; + return n; +} + +static DEVICE_ATTR(cpumask, S_IRUGO, rapl_get_attr_cpumask, NULL); + +static struct attribute *rapl_pmu_attrs[] = { + &dev_attr_cpumask.attr, + NULL, +}; + +static struct attribute_group rapl_pmu_attr_group = { + .attrs = rapl_pmu_attrs, +}; + +EVENT_ATTR_STR(energy-cores, rapl_cores, "event=0x01"); +EVENT_ATTR_STR(energy-pkg , rapl_pkg, "event=0x02"); +EVENT_ATTR_STR(energy-ram , rapl_ram, "event=0x03"); +EVENT_ATTR_STR(energy-gpu , rapl_gpu, "event=0x04"); + +EVENT_ATTR_STR(energy-cores.unit, rapl_cores_unit, "Joules"); +EVENT_ATTR_STR(energy-pkg.unit , rapl_pkg_unit, "Joules"); +EVENT_ATTR_STR(energy-ram.unit , rapl_ram_unit, "Joules"); +EVENT_ATTR_STR(energy-gpu.unit , rapl_gpu_unit, "Joules"); + +/* + * we compute in 0.23 nJ increments regardless of MSR + */ +EVENT_ATTR_STR(energy-cores.scale, rapl_cores_scale, "2.3283064365386962890625e-10"); +EVENT_ATTR_STR(energy-pkg.scale, rapl_pkg_scale, "2.3283064365386962890625e-10"); +EVENT_ATTR_STR(energy-ram.scale, rapl_ram_scale, "2.3283064365386962890625e-10"); +EVENT_ATTR_STR(energy-gpu.scale, rapl_gpu_scale, "2.3283064365386962890625e-10"); + +static struct attribute *rapl_events_srv_attr[] = { + EVENT_PTR(rapl_cores), + EVENT_PTR(rapl_pkg), + EVENT_PTR(rapl_ram), + + EVENT_PTR(rapl_cores_unit), + EVENT_PTR(rapl_pkg_unit), + EVENT_PTR(rapl_ram_unit), + + EVENT_PTR(rapl_cores_scale), + EVENT_PTR(rapl_pkg_scale), + EVENT_PTR(rapl_ram_scale), + NULL, +}; + +static struct attribute *rapl_events_cln_attr[] = { + EVENT_PTR(rapl_cores), + EVENT_PTR(rapl_pkg), + EVENT_PTR(rapl_gpu), + + EVENT_PTR(rapl_cores_unit), + EVENT_PTR(rapl_pkg_unit), + EVENT_PTR(rapl_gpu_unit), + + EVENT_PTR(rapl_cores_scale), + EVENT_PTR(rapl_pkg_scale), + EVENT_PTR(rapl_gpu_scale), + NULL, +}; + +static struct attribute_group rapl_pmu_events_group = { + .name = "events", + .attrs = NULL, /* patched at runtime */ +}; + +DEFINE_RAPL_FORMAT_ATTR(event, event, "config:0-7"); +static struct attribute *rapl_formats_attr[] = { + &format_attr_event.attr, + NULL, +}; + +static struct attribute_group rapl_pmu_format_group = { + .name = "format", + .attrs = rapl_formats_attr, +}; + +const struct attribute_group *rapl_attr_groups[] = { + &rapl_pmu_attr_group, + &rapl_pmu_format_group, + &rapl_pmu_events_group, + NULL, +}; + +static struct pmu rapl_pmu_class = { + .attr_groups = rapl_attr_groups, + .task_ctx_nr = perf_invalid_context, /* system-wide only */ + .event_init = rapl_pmu_event_init, + .add = rapl_pmu_event_add, /* must have */ + .del = rapl_pmu_event_del, /* must have */ + .start = rapl_pmu_event_start, + .stop = rapl_pmu_event_stop, + .read = rapl_pmu_event_read, +}; + +static void rapl_cpu_exit(int cpu) +{ + struct rapl_pmu *pmu = per_cpu(rapl_pmu, cpu); + int i, phys_id = topology_physical_package_id(cpu); + int target = -1; + + /* find a new cpu on same package */ + for_each_online_cpu(i) { + if (i == cpu) + continue; + if (phys_id == topology_physical_package_id(i)) { + target = i; + break; + } + } + /* + * clear cpu from cpumask + * if was set in cpumask and still some cpu on package, + * then move to new cpu + */ + if (cpumask_test_and_clear_cpu(cpu, &rapl_cpu_mask) && target >= 0) + cpumask_set_cpu(target, &rapl_cpu_mask); + + WARN_ON(cpumask_empty(&rapl_cpu_mask)); + /* + * migrate events and context to new cpu + */ + if (target >= 0) + perf_pmu_migrate_context(pmu->pmu, cpu, target); + + /* cancel overflow polling timer for CPU */ + rapl_stop_hrtimer(pmu); +} + +static void rapl_cpu_init(int cpu) +{ + int i, phys_id = topology_physical_package_id(cpu); + + /* check if phys_is is already covered */ + for_each_cpu(i, &rapl_cpu_mask) { + if (phys_id == topology_physical_package_id(i)) + return; + } + /* was not found, so add it */ + cpumask_set_cpu(cpu, &rapl_cpu_mask); +} + +static int rapl_cpu_prepare(int cpu) +{ + struct rapl_pmu *pmu = per_cpu(rapl_pmu, cpu); + int phys_id = topology_physical_package_id(cpu); + u64 ms; + + if (pmu) + return 0; + + if (phys_id < 0) + return -1; + + pmu = kzalloc_node(sizeof(*pmu), GFP_KERNEL, cpu_to_node(cpu)); + if (!pmu) + return -1; + + spin_lock_init(&pmu->lock); + + INIT_LIST_HEAD(&pmu->active_list); + + /* + * grab power unit as: 1/2^unit Joules + * + * we cache in local PMU instance + */ + rdmsrl(MSR_RAPL_POWER_UNIT, pmu->hw_unit); + pmu->hw_unit = (pmu->hw_unit >> 8) & 0x1FULL; + pmu->pmu = &rapl_pmu_class; + + /* + * use reference of 200W for scaling the timeout + * to avoid missing counter overflows. + * 200W = 200 Joules/sec + * divide interval by 2 to avoid lockstep (2 * 100) + * if hw unit is 32, then we use 2 ms 1/200/2 + */ + if (pmu->hw_unit < 32) + ms = (1000 / (2 * 100)) * (1ULL << (32 - pmu->hw_unit - 1)); + else + ms = 2; + + pmu->timer_interval = ms_to_ktime(ms); + + rapl_hrtimer_init(pmu); + + /* set RAPL pmu for this cpu for now */ + per_cpu(rapl_pmu, cpu) = pmu; + per_cpu(rapl_pmu_to_free, cpu) = NULL; + + return 0; +} + +static void rapl_cpu_kfree(int cpu) +{ + struct rapl_pmu *pmu = per_cpu(rapl_pmu_to_free, cpu); + + kfree(pmu); + + per_cpu(rapl_pmu_to_free, cpu) = NULL; +} + +static int rapl_cpu_dying(int cpu) +{ + struct rapl_pmu *pmu = per_cpu(rapl_pmu, cpu); + + if (!pmu) + return 0; + + per_cpu(rapl_pmu, cpu) = NULL; + + per_cpu(rapl_pmu_to_free, cpu) = pmu; + + return 0; +} + +static int rapl_cpu_notifier(struct notifier_block *self, + unsigned long action, void *hcpu) +{ + unsigned int cpu = (long)hcpu; + + switch (action & ~CPU_TASKS_FROZEN) { + case CPU_UP_PREPARE: + rapl_cpu_prepare(cpu); + break; + case CPU_STARTING: + rapl_cpu_init(cpu); + break; + case CPU_UP_CANCELED: + case CPU_DYING: + rapl_cpu_dying(cpu); + break; + case CPU_ONLINE: + case CPU_DEAD: + rapl_cpu_kfree(cpu); + break; + case CPU_DOWN_PREPARE: + rapl_cpu_exit(cpu); + break; + default: + break; + } + + return NOTIFY_OK; +} + +static const struct x86_cpu_id rapl_cpu_match[] = { + [0] = { .vendor = X86_VENDOR_INTEL, .family = 6 }, + [1] = {}, +}; + +static int __init rapl_pmu_init(void) +{ + struct rapl_pmu *pmu; + int cpu, ret; + + /* + * check for Intel processor family 6 + */ + if (!x86_match_cpu(rapl_cpu_match)) + return 0; + + /* check supported CPU */ + switch (boot_cpu_data.x86_model) { + case 42: /* Sandy Bridge */ + case 58: /* Ivy Bridge */ + case 60: /* Haswell */ + case 69: /* Haswell-Celeron */ + rapl_cntr_mask = RAPL_IDX_CLN; + rapl_pmu_events_group.attrs = rapl_events_cln_attr; + break; + case 45: /* Sandy Bridge-EP */ + case 62: /* IvyTown */ + rapl_cntr_mask = RAPL_IDX_SRV; + rapl_pmu_events_group.attrs = rapl_events_srv_attr; + break; + + default: + /* unsupported */ + return 0; + } + get_online_cpus(); + + for_each_online_cpu(cpu) { + rapl_cpu_prepare(cpu); + rapl_cpu_init(cpu); + } + + perf_cpu_notifier(rapl_cpu_notifier); + + ret = perf_pmu_register(&rapl_pmu_class, "power", -1); + if (WARN_ON(ret)) { + pr_info("RAPL PMU detected, registration failed (%d), RAPL PMU disabled\n", ret); + put_online_cpus(); + return -1; + } + + pmu = __get_cpu_var(rapl_pmu); + + pr_info("RAPL PMU detected, hw unit 2^-%d Joules," + " API unit is 2^-32 Joules," + " %d fixed counters" + " %llu ms ovfl timer\n", + pmu->hw_unit, + hweight32(rapl_cntr_mask), + ktime_to_ms(pmu->timer_interval)); + + put_online_cpus(); + + return 0; +} +device_initcall(rapl_pmu_init); diff --git a/arch/x86/kernel/cpu/rdrand.c b/arch/x86/kernel/cpu/rdrand.c index 88db010..384df51 100644 --- a/arch/x86/kernel/cpu/rdrand.c +++ b/arch/x86/kernel/cpu/rdrand.c @@ -31,20 +31,6 @@ static int __init x86_rdrand_setup(char *s) } __setup("nordrand", x86_rdrand_setup); -/* We can't use arch_get_random_long() here since alternatives haven't run */ -static inline int rdrand_long(unsigned long *v) -{ - int ok; - asm volatile("1: " RDRAND_LONG "\n\t" - "jc 2f\n\t" - "decl %0\n\t" - "jnz 1b\n\t" - "2:" - : "=r" (ok), "=a" (*v) - : "0" (RDRAND_RETRY_LOOPS)); - return ok; -} - /* * Force a reseed cycle; we are architecturally guaranteed a reseed * after no more than 512 128-bit chunks of random data. This also diff --git a/arch/x86/kernel/cpu/transmeta.c b/arch/x86/kernel/cpu/transmeta.c index aa0430d..3fa0e5a 100644 --- a/arch/x86/kernel/cpu/transmeta.c +++ b/arch/x86/kernel/cpu/transmeta.c @@ -1,6 +1,5 @@ #include <linux/kernel.h> #include <linux/mm.h> -#include <linux/init.h> #include <asm/processor.h> #include <asm/msr.h> #include "cpu.h" diff --git a/arch/x86/kernel/cpu/umc.c b/arch/x86/kernel/cpu/umc.c index 75c5ad5..ef9c2a0 100644 --- a/arch/x86/kernel/cpu/umc.c +++ b/arch/x86/kernel/cpu/umc.c @@ -1,5 +1,4 @@ #include <linux/kernel.h> -#include <linux/init.h> #include <asm/processor.h> #include "cpu.h" diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c index 18677a9..a57902e 100644 --- a/arch/x86/kernel/crash.c +++ b/arch/x86/kernel/crash.c @@ -7,7 +7,6 @@ * */ -#include <linux/init.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/smp.h> diff --git a/arch/x86/kernel/doublefault.c b/arch/x86/kernel/doublefault.c index 5d3fe8d..f6dfd93 100644 --- a/arch/x86/kernel/doublefault.c +++ b/arch/x86/kernel/doublefault.c @@ -1,6 +1,5 @@ #include <linux/mm.h> #include <linux/sched.h> -#include <linux/init.h> #include <linux/init_task.h> #include <linux/fs.h> diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S index 51e2988..a2a4f46 100644 --- a/arch/x86/kernel/entry_32.S +++ b/arch/x86/kernel/entry_32.S @@ -1082,7 +1082,7 @@ ENTRY(ftrace_caller) pushl $0 /* Pass NULL as regs pointer */ movl 4*4(%esp), %eax movl 0x4(%ebp), %edx - leal function_trace_op, %ecx + movl function_trace_op, %ecx subl $MCOUNT_INSN_SIZE, %eax .globl ftrace_call @@ -1140,7 +1140,7 @@ ENTRY(ftrace_regs_caller) movl 12*4(%esp), %eax /* Load ip (1st parameter) */ subl $MCOUNT_INSN_SIZE, %eax /* Adjust ip */ movl 0x4(%ebp), %edx /* Load parent ip (2nd parameter) */ - leal function_trace_op, %ecx /* Save ftrace_pos in 3rd parameter */ + movl function_trace_op, %ecx /* Save ftrace_pos in 3rd parameter */ pushl %esp /* Save pt_regs as 4th parameter */ GLOBAL(ftrace_regs_call) diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index e21b078..1e96c36 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S @@ -88,7 +88,7 @@ END(function_hook) MCOUNT_SAVE_FRAME \skip /* Load the ftrace_ops into the 3rd parameter */ - leaq function_trace_op, %rdx + movq function_trace_op(%rip), %rdx /* Load ip into the first parameter */ movq RIP(%rsp), %rdi diff --git a/arch/x86/kernel/hw_breakpoint.c b/arch/x86/kernel/hw_breakpoint.c index f66ff16..a67b47c 100644 --- a/arch/x86/kernel/hw_breakpoint.c +++ b/arch/x86/kernel/hw_breakpoint.c @@ -38,7 +38,6 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/sched.h> -#include <linux/init.h> #include <linux/smp.h> #include <asm/hw_breakpoint.h> diff --git a/arch/x86/kernel/iosf_mbi.c b/arch/x86/kernel/iosf_mbi.c new file mode 100644 index 0000000..c3aae66 --- /dev/null +++ b/arch/x86/kernel/iosf_mbi.c @@ -0,0 +1,226 @@ +/* + * IOSF-SB MailBox Interface Driver + * Copyright (c) 2013, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * + * The IOSF-SB is a fabric bus available on Atom based SOC's that uses a + * mailbox interface (MBI) to communicate with mutiple devices. This + * driver implements access to this interface for those platforms that can + * enumerate the device using PCI. + */ + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/spinlock.h> +#include <linux/pci.h> + +#include <asm/iosf_mbi.h> + +static DEFINE_SPINLOCK(iosf_mbi_lock); + +static inline u32 iosf_mbi_form_mcr(u8 op, u8 port, u8 offset) +{ + return (op << 24) | (port << 16) | (offset << 8) | MBI_ENABLE; +} + +static struct pci_dev *mbi_pdev; /* one mbi device */ + +static int iosf_mbi_pci_read_mdr(u32 mcrx, u32 mcr, u32 *mdr) +{ + int result; + + if (!mbi_pdev) + return -ENODEV; + + if (mcrx) { + result = pci_write_config_dword(mbi_pdev, MBI_MCRX_OFFSET, + mcrx); + if (result < 0) + goto fail_read; + } + + result = pci_write_config_dword(mbi_pdev, MBI_MCR_OFFSET, mcr); + if (result < 0) + goto fail_read; + + result = pci_read_config_dword(mbi_pdev, MBI_MDR_OFFSET, mdr); + if (result < 0) + goto fail_read; + + return 0; + +fail_read: + dev_err(&mbi_pdev->dev, "PCI config access failed with %d\n", result); + return result; +} + +static int iosf_mbi_pci_write_mdr(u32 mcrx, u32 mcr, u32 mdr) +{ + int result; + + if (!mbi_pdev) + return -ENODEV; + + result = pci_write_config_dword(mbi_pdev, MBI_MDR_OFFSET, mdr); + if (result < 0) + goto fail_write; + + if (mcrx) { + result = pci_write_config_dword(mbi_pdev, MBI_MCRX_OFFSET, + mcrx); + if (result < 0) + goto fail_write; + } + + result = pci_write_config_dword(mbi_pdev, MBI_MCR_OFFSET, mcr); + if (result < 0) + goto fail_write; + + return 0; + +fail_write: + dev_err(&mbi_pdev->dev, "PCI config access failed with %d\n", result); + return result; +} + +int iosf_mbi_read(u8 port, u8 opcode, u32 offset, u32 *mdr) +{ + u32 mcr, mcrx; + unsigned long flags; + int ret; + + /*Access to the GFX unit is handled by GPU code */ + if (port == BT_MBI_UNIT_GFX) { + WARN_ON(1); + return -EPERM; + } + + mcr = iosf_mbi_form_mcr(opcode, port, offset & MBI_MASK_LO); + mcrx = offset & MBI_MASK_HI; + + spin_lock_irqsave(&iosf_mbi_lock, flags); + ret = iosf_mbi_pci_read_mdr(mcrx, mcr, mdr); + spin_unlock_irqrestore(&iosf_mbi_lock, flags); + + return ret; +} +EXPORT_SYMBOL(iosf_mbi_read); + +int iosf_mbi_write(u8 port, u8 opcode, u32 offset, u32 mdr) +{ + u32 mcr, mcrx; + unsigned long flags; + int ret; + + /*Access to the GFX unit is handled by GPU code */ + if (port == BT_MBI_UNIT_GFX) { + WARN_ON(1); + return -EPERM; + } + + mcr = iosf_mbi_form_mcr(opcode, port, offset & MBI_MASK_LO); + mcrx = offset & MBI_MASK_HI; + + spin_lock_irqsave(&iosf_mbi_lock, flags); + ret = iosf_mbi_pci_write_mdr(mcrx, mcr, mdr); + spin_unlock_irqrestore(&iosf_mbi_lock, flags); + + return ret; +} +EXPORT_SYMBOL(iosf_mbi_write); + +int iosf_mbi_modify(u8 port, u8 opcode, u32 offset, u32 mdr, u32 mask) +{ + u32 mcr, mcrx; + u32 value; + unsigned long flags; + int ret; + + /*Access to the GFX unit is handled by GPU code */ + if (port == BT_MBI_UNIT_GFX) { + WARN_ON(1); + return -EPERM; + } + + mcr = iosf_mbi_form_mcr(opcode, port, offset & MBI_MASK_LO); + mcrx = offset & MBI_MASK_HI; + + spin_lock_irqsave(&iosf_mbi_lock, flags); + + /* Read current mdr value */ + ret = iosf_mbi_pci_read_mdr(mcrx, mcr & MBI_RD_MASK, &value); + if (ret < 0) { + spin_unlock_irqrestore(&iosf_mbi_lock, flags); + return ret; + } + + /* Apply mask */ + value &= ~mask; + mdr &= mask; + value |= mdr; + + /* Write back */ + ret = iosf_mbi_pci_write_mdr(mcrx, mcr | MBI_WR_MASK, value); + + spin_unlock_irqrestore(&iosf_mbi_lock, flags); + + return ret; +} +EXPORT_SYMBOL(iosf_mbi_modify); + +static int iosf_mbi_probe(struct pci_dev *pdev, + const struct pci_device_id *unused) +{ + int ret; + + ret = pci_enable_device(pdev); + if (ret < 0) { + dev_err(&pdev->dev, "error: could not enable device\n"); + return ret; + } + + mbi_pdev = pci_dev_get(pdev); + return 0; +} + +static DEFINE_PCI_DEVICE_TABLE(iosf_mbi_pci_ids) = { + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0F00) }, + { 0, }, +}; +MODULE_DEVICE_TABLE(pci, iosf_mbi_pci_ids); + +static struct pci_driver iosf_mbi_pci_driver = { + .name = "iosf_mbi_pci", + .probe = iosf_mbi_probe, + .id_table = iosf_mbi_pci_ids, +}; + +static int __init iosf_mbi_init(void) +{ + return pci_register_driver(&iosf_mbi_pci_driver); +} + +static void __exit iosf_mbi_exit(void) +{ + pci_unregister_driver(&iosf_mbi_pci_driver); + if (mbi_pdev) { + pci_dev_put(mbi_pdev); + mbi_pdev = NULL; + } +} + +module_init(iosf_mbi_init); +module_exit(iosf_mbi_exit); + +MODULE_AUTHOR("David E. Box <david.e.box@linux.intel.com>"); +MODULE_DESCRIPTION("IOSF Mailbox Interface accessor"); +MODULE_LICENSE("GPL v2"); diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c index 22d0687..dbb6087 100644 --- a/arch/x86/kernel/irq.c +++ b/arch/x86/kernel/irq.c @@ -193,9 +193,13 @@ __visible unsigned int __irq_entry do_IRQ(struct pt_regs *regs) if (!handle_irq(irq, regs)) { ack_APIC_irq(); - if (printk_ratelimit()) - pr_emerg("%s: %d.%d No irq handler for vector (irq %d)\n", - __func__, smp_processor_id(), vector, irq); + if (irq != VECTOR_RETRIGGERED) { + pr_emerg_ratelimited("%s: %d.%d No irq handler for vector (irq %d)\n", + __func__, smp_processor_id(), + vector, irq); + } else { + __this_cpu_write(vector_irq[vector], VECTOR_UNDEFINED); + } } irq_exit(); @@ -262,6 +266,76 @@ __visible void smp_trace_x86_platform_ipi(struct pt_regs *regs) EXPORT_SYMBOL_GPL(vector_used_by_percpu_irq); #ifdef CONFIG_HOTPLUG_CPU +/* + * This cpu is going to be removed and its vectors migrated to the remaining + * online cpus. Check to see if there are enough vectors in the remaining cpus. + * This function is protected by stop_machine(). + */ +int check_irq_vectors_for_cpu_disable(void) +{ + int irq, cpu; + unsigned int this_cpu, vector, this_count, count; + struct irq_desc *desc; + struct irq_data *data; + struct cpumask affinity_new, online_new; + + this_cpu = smp_processor_id(); + cpumask_copy(&online_new, cpu_online_mask); + cpu_clear(this_cpu, online_new); + + this_count = 0; + for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) { + irq = __this_cpu_read(vector_irq[vector]); + if (irq >= 0) { + desc = irq_to_desc(irq); + data = irq_desc_get_irq_data(desc); + cpumask_copy(&affinity_new, data->affinity); + cpu_clear(this_cpu, affinity_new); + + /* Do not count inactive or per-cpu irqs. */ + if (!irq_has_action(irq) || irqd_is_per_cpu(data)) + continue; + + /* + * A single irq may be mapped to multiple + * cpu's vector_irq[] (for example IOAPIC cluster + * mode). In this case we have two + * possibilities: + * + * 1) the resulting affinity mask is empty; that is + * this the down'd cpu is the last cpu in the irq's + * affinity mask, or + * + * 2) the resulting affinity mask is no longer + * a subset of the online cpus but the affinity + * mask is not zero; that is the down'd cpu is the + * last online cpu in a user set affinity mask. + */ + if (cpumask_empty(&affinity_new) || + !cpumask_subset(&affinity_new, &online_new)) + this_count++; + } + } + + count = 0; + for_each_online_cpu(cpu) { + if (cpu == this_cpu) + continue; + for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; + vector++) { + if (per_cpu(vector_irq, cpu)[vector] < 0) + count++; + } + } + + if (count < this_count) { + pr_warn("CPU %d disable failed: CPU has %u vectors assigned and there are only %u available.\n", + this_cpu, this_count, count); + return -ERANGE; + } + return 0; +} + /* A cpu has been removed from cpu_online_mask. Reset irq affinities. */ void fixup_irqs(void) { @@ -344,7 +418,7 @@ void fixup_irqs(void) for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) { unsigned int irr; - if (__this_cpu_read(vector_irq[vector]) < 0) + if (__this_cpu_read(vector_irq[vector]) <= VECTOR_UNDEFINED) continue; irr = apic_read(APIC_IRR + (vector / 32 * 0x10)); @@ -355,11 +429,14 @@ void fixup_irqs(void) data = irq_desc_get_irq_data(desc); chip = irq_data_get_irq_chip(data); raw_spin_lock(&desc->lock); - if (chip->irq_retrigger) + if (chip->irq_retrigger) { chip->irq_retrigger(data); + __this_cpu_write(vector_irq[vector], VECTOR_RETRIGGERED); + } raw_spin_unlock(&desc->lock); } - __this_cpu_write(vector_irq[vector], -1); + if (__this_cpu_read(vector_irq[vector]) != VECTOR_RETRIGGERED) + __this_cpu_write(vector_irq[vector], VECTOR_UNDEFINED); } } #endif diff --git a/arch/x86/kernel/irqinit.c b/arch/x86/kernel/irqinit.c index a2a1fbc..7f50156 100644 --- a/arch/x86/kernel/irqinit.c +++ b/arch/x86/kernel/irqinit.c @@ -52,7 +52,7 @@ static struct irqaction irq2 = { }; DEFINE_PER_CPU(vector_irq_t, vector_irq) = { - [0 ... NR_VECTORS - 1] = -1, + [0 ... NR_VECTORS - 1] = VECTOR_UNDEFINED, }; int vector_used_by_percpu_irq(unsigned int vector) @@ -60,7 +60,7 @@ int vector_used_by_percpu_irq(unsigned int vector) int cpu; for_each_online_cpu(cpu) { - if (per_cpu(vector_irq, cpu)[vector] != -1) + if (per_cpu(vector_irq, cpu)[vector] > VECTOR_UNDEFINED) return 1; } diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c index 836f832..7ec1d5f 100644 --- a/arch/x86/kernel/kgdb.c +++ b/arch/x86/kernel/kgdb.c @@ -39,7 +39,6 @@ #include <linux/sched.h> #include <linux/delay.h> #include <linux/kgdb.h> -#include <linux/init.h> #include <linux/smp.h> #include <linux/nmi.h> #include <linux/hw_breakpoint.h> diff --git a/arch/x86/kernel/ksysfs.c b/arch/x86/kernel/ksysfs.c new file mode 100644 index 0000000..c2bedae --- /dev/null +++ b/arch/x86/kernel/ksysfs.c @@ -0,0 +1,340 @@ +/* + * Architecture specific sysfs attributes in /sys/kernel + * + * Copyright (C) 2007, Intel Corp. + * Huang Ying <ying.huang@intel.com> + * Copyright (C) 2013, 2013 Red Hat, Inc. + * Dave Young <dyoung@redhat.com> + * + * This file is released under the GPLv2 + */ + +#include <linux/kobject.h> +#include <linux/string.h> +#include <linux/sysfs.h> +#include <linux/init.h> +#include <linux/stat.h> +#include <linux/slab.h> +#include <linux/mm.h> + +#include <asm/io.h> +#include <asm/setup.h> + +static ssize_t version_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + return sprintf(buf, "0x%04x\n", boot_params.hdr.version); +} + +static struct kobj_attribute boot_params_version_attr = __ATTR_RO(version); + +static ssize_t boot_params_data_read(struct file *fp, struct kobject *kobj, + struct bin_attribute *bin_attr, + char *buf, loff_t off, size_t count) +{ + memcpy(buf, (void *)&boot_params + off, count); + return count; +} + +static struct bin_attribute boot_params_data_attr = { + .attr = { + .name = "data", + .mode = S_IRUGO, + }, + .read = boot_params_data_read, + .size = sizeof(boot_params), +}; + +static struct attribute *boot_params_version_attrs[] = { + &boot_params_version_attr.attr, + NULL, +}; + +static struct bin_attribute *boot_params_data_attrs[] = { + &boot_params_data_attr, + NULL, +}; + +static struct attribute_group boot_params_attr_group = { + .attrs = boot_params_version_attrs, + .bin_attrs = boot_params_data_attrs, +}; + +static int kobj_to_setup_data_nr(struct kobject *kobj, int *nr) +{ + const char *name; + + name = kobject_name(kobj); + return kstrtoint(name, 10, nr); +} + +static int get_setup_data_paddr(int nr, u64 *paddr) +{ + int i = 0; + struct setup_data *data; + u64 pa_data = boot_params.hdr.setup_data; + + while (pa_data) { + if (nr == i) { + *paddr = pa_data; + return 0; + } + data = ioremap_cache(pa_data, sizeof(*data)); + if (!data) + return -ENOMEM; + + pa_data = data->next; + iounmap(data); + i++; + } + return -EINVAL; +} + +static int __init get_setup_data_size(int nr, size_t *size) +{ + int i = 0; + struct setup_data *data; + u64 pa_data = boot_params.hdr.setup_data; + + while (pa_data) { + data = ioremap_cache(pa_data, sizeof(*data)); + if (!data) + return -ENOMEM; + if (nr == i) { + *size = data->len; + iounmap(data); + return 0; + } + + pa_data = data->next; + iounmap(data); + i++; + } + return -EINVAL; +} + +static ssize_t type_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + int nr, ret; + u64 paddr; + struct setup_data *data; + + ret = kobj_to_setup_data_nr(kobj, &nr); + if (ret) + return ret; + + ret = get_setup_data_paddr(nr, &paddr); + if (ret) + return ret; + data = ioremap_cache(paddr, sizeof(*data)); + if (!data) + return -ENOMEM; + + ret = sprintf(buf, "0x%x\n", data->type); + iounmap(data); + return ret; +} + +static ssize_t setup_data_data_read(struct file *fp, + struct kobject *kobj, + struct bin_attribute *bin_attr, + char *buf, + loff_t off, size_t count) +{ + int nr, ret = 0; + u64 paddr; + struct setup_data *data; + void *p; + + ret = kobj_to_setup_data_nr(kobj, &nr); + if (ret) + return ret; + + ret = get_setup_data_paddr(nr, &paddr); + if (ret) + return ret; + data = ioremap_cache(paddr, sizeof(*data)); + if (!data) + return -ENOMEM; + + if (off > data->len) { + ret = -EINVAL; + goto out; + } + + if (count > data->len - off) + count = data->len - off; + + if (!count) + goto out; + + ret = count; + p = ioremap_cache(paddr + sizeof(*data), data->len); + if (!p) { + ret = -ENOMEM; + goto out; + } + memcpy(buf, p + off, count); + iounmap(p); +out: + iounmap(data); + return ret; +} + +static struct kobj_attribute type_attr = __ATTR_RO(type); + +static struct bin_attribute data_attr = { + .attr = { + .name = "data", + .mode = S_IRUGO, + }, + .read = setup_data_data_read, +}; + +static struct attribute *setup_data_type_attrs[] = { + &type_attr.attr, + NULL, +}; + +static struct bin_attribute *setup_data_data_attrs[] = { + &data_attr, + NULL, +}; + +static struct attribute_group setup_data_attr_group = { + .attrs = setup_data_type_attrs, + .bin_attrs = setup_data_data_attrs, +}; + +static int __init create_setup_data_node(struct kobject *parent, + struct kobject **kobjp, int nr) +{ + int ret = 0; + size_t size; + struct kobject *kobj; + char name[16]; /* should be enough for setup_data nodes numbers */ + snprintf(name, 16, "%d", nr); + + kobj = kobject_create_and_add(name, parent); + if (!kobj) + return -ENOMEM; + + ret = get_setup_data_size(nr, &size); + if (ret) + goto out_kobj; + + data_attr.size = size; + ret = sysfs_create_group(kobj, &setup_data_attr_group); + if (ret) + goto out_kobj; + *kobjp = kobj; + + return 0; +out_kobj: + kobject_put(kobj); + return ret; +} + +static void __init cleanup_setup_data_node(struct kobject *kobj) +{ + sysfs_remove_group(kobj, &setup_data_attr_group); + kobject_put(kobj); +} + +static int __init get_setup_data_total_num(u64 pa_data, int *nr) +{ + int ret = 0; + struct setup_data *data; + + *nr = 0; + while (pa_data) { + *nr += 1; + data = ioremap_cache(pa_data, sizeof(*data)); + if (!data) { + ret = -ENOMEM; + goto out; + } + pa_data = data->next; + iounmap(data); + } + +out: + return ret; +} + +static int __init create_setup_data_nodes(struct kobject *parent) +{ + struct kobject *setup_data_kobj, **kobjp; + u64 pa_data; + int i, j, nr, ret = 0; + + pa_data = boot_params.hdr.setup_data; + if (!pa_data) + return 0; + + setup_data_kobj = kobject_create_and_add("setup_data", parent); + if (!setup_data_kobj) { + ret = -ENOMEM; + goto out; + } + + ret = get_setup_data_total_num(pa_data, &nr); + if (ret) + goto out_setup_data_kobj; + + kobjp = kmalloc(sizeof(*kobjp) * nr, GFP_KERNEL); + if (!kobjp) { + ret = -ENOMEM; + goto out_setup_data_kobj; + } + + for (i = 0; i < nr; i++) { + ret = create_setup_data_node(setup_data_kobj, kobjp + i, i); + if (ret) + goto out_clean_nodes; + } + + kfree(kobjp); + return 0; + +out_clean_nodes: + for (j = i - 1; j > 0; j--) + cleanup_setup_data_node(*(kobjp + j)); + kfree(kobjp); +out_setup_data_kobj: + kobject_put(setup_data_kobj); +out: + return ret; +} + +static int __init boot_params_ksysfs_init(void) +{ + int ret; + struct kobject *boot_params_kobj; + + boot_params_kobj = kobject_create_and_add("boot_params", + kernel_kobj); + if (!boot_params_kobj) { + ret = -ENOMEM; + goto out; + } + + ret = sysfs_create_group(boot_params_kobj, &boot_params_attr_group); + if (ret) + goto out_boot_params_kobj; + + ret = create_setup_data_nodes(boot_params_kobj); + if (ret) + goto out_create_group; + + return 0; +out_create_group: + sysfs_remove_group(boot_params_kobj, &boot_params_attr_group); +out_boot_params_kobj: + kobject_put(boot_params_kobj); +out: + return ret; +} + +arch_initcall(boot_params_ksysfs_init); diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c index 5b19e4d..1667b1d 100644 --- a/arch/x86/kernel/machine_kexec_32.c +++ b/arch/x86/kernel/machine_kexec_32.c @@ -9,7 +9,6 @@ #include <linux/mm.h> #include <linux/kexec.h> #include <linux/delay.h> -#include <linux/init.h> #include <linux/numa.h> #include <linux/ftrace.h> #include <linux/suspend.h> diff --git a/arch/x86/kernel/pci-nommu.c b/arch/x86/kernel/pci-nommu.c index 871be4a..da15918 100644 --- a/arch/x86/kernel/pci-nommu.c +++ b/arch/x86/kernel/pci-nommu.c @@ -3,7 +3,6 @@ #include <linux/dma-mapping.h> #include <linux/scatterlist.h> #include <linux/string.h> -#include <linux/init.h> #include <linux/gfp.h> #include <linux/pci.h> #include <linux/mm.h> diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index 6f1236c..0de43e9 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c @@ -24,7 +24,6 @@ #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/reboot.h> -#include <linux/init.h> #include <linux/mc146818rtc.h> #include <linux/module.h> #include <linux/kallsyms.h> diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index da3c599..c752cb4 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c @@ -558,6 +558,17 @@ void native_machine_shutdown(void) { /* Stop the cpus and apics */ #ifdef CONFIG_X86_IO_APIC + /* + * Disabling IO APIC before local APIC is a workaround for + * erratum AVR31 in "Intel Atom Processor C2000 Product Family + * Specification Update". In this situation, interrupts that target + * a Logical Processor whose Local APIC is either in the process of + * being hardware disabled or software disabled are neither delivered + * nor discarded. When this erratum occurs, the processor may hang. + * + * Even without the erratum, it still makes sense to quiet IO APIC + * before disabling Local APIC. + */ disable_IO_APIC(); #endif diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index cb233bc..06853e6 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -295,6 +295,8 @@ static void __init reserve_brk(void) _brk_start = 0; } +u64 relocated_ramdisk; + #ifdef CONFIG_BLK_DEV_INITRD static u64 __init get_ramdisk_image(void) @@ -321,25 +323,24 @@ static void __init relocate_initrd(void) u64 ramdisk_image = get_ramdisk_image(); u64 ramdisk_size = get_ramdisk_size(); u64 area_size = PAGE_ALIGN(ramdisk_size); - u64 ramdisk_here; unsigned long slop, clen, mapaddr; char *p, *q; /* We need to move the initrd down into directly mapped mem */ - ramdisk_here = memblock_find_in_range(0, PFN_PHYS(max_pfn_mapped), - area_size, PAGE_SIZE); + relocated_ramdisk = memblock_find_in_range(0, PFN_PHYS(max_pfn_mapped), + area_size, PAGE_SIZE); - if (!ramdisk_here) + if (!relocated_ramdisk) panic("Cannot find place for new RAMDISK of size %lld\n", - ramdisk_size); + ramdisk_size); /* Note: this includes all the mem currently occupied by the initrd, we rely on that fact to keep the data intact. */ - memblock_reserve(ramdisk_here, area_size); - initrd_start = ramdisk_here + PAGE_OFFSET; + memblock_reserve(relocated_ramdisk, area_size); + initrd_start = relocated_ramdisk + PAGE_OFFSET; initrd_end = initrd_start + ramdisk_size; printk(KERN_INFO "Allocated new RAMDISK: [mem %#010llx-%#010llx]\n", - ramdisk_here, ramdisk_here + ramdisk_size - 1); + relocated_ramdisk, relocated_ramdisk + ramdisk_size - 1); q = (char *)initrd_start; @@ -363,7 +364,7 @@ static void __init relocate_initrd(void) printk(KERN_INFO "Move RAMDISK from [mem %#010llx-%#010llx] to" " [mem %#010llx-%#010llx]\n", ramdisk_image, ramdisk_image + ramdisk_size - 1, - ramdisk_here, ramdisk_here + ramdisk_size - 1); + relocated_ramdisk, relocated_ramdisk + ramdisk_size - 1); } static void __init early_reserve_initrd(void) @@ -447,6 +448,9 @@ static void __init parse_setup_data(void) case SETUP_DTB: add_dtb(pa_data); break; + case SETUP_EFI: + parse_efi_setup(pa_data, data_len); + break; default: break; } @@ -824,6 +828,20 @@ static void __init trim_low_memory_range(void) } /* + * Dump out kernel offset information on panic. + */ +static int +dump_kernel_offset(struct notifier_block *self, unsigned long v, void *p) +{ + pr_emerg("Kernel Offset: 0x%lx from 0x%lx " + "(relocation range: 0x%lx-0x%lx)\n", + (unsigned long)&_text - __START_KERNEL, __START_KERNEL, + __START_KERNEL_map, MODULES_VADDR-1); + + return 0; +} + +/* * Determine if we were loaded by an EFI loader. If so, then we have also been * passed the efi memmap, systab, etc., so we should use these data structures * for initialization. Note, the efi init code path is determined by the @@ -924,8 +942,6 @@ void __init setup_arch(char **cmdline_p) iomem_resource.end = (1ULL << boot_cpu_data.x86_phys_bits) - 1; setup_memory_map(); parse_setup_data(); - /* update the e820_saved too */ - e820_reserve_setup_data(); copy_edd(); @@ -987,6 +1003,8 @@ void __init setup_arch(char **cmdline_p) early_dump_pci_devices(); #endif + /* update the e820_saved too */ + e820_reserve_setup_data(); finish_e820_parsing(); if (efi_enabled(EFI_BOOT)) @@ -1248,3 +1266,15 @@ void __init i386_reserve_resources(void) } #endif /* CONFIG_X86_32 */ + +static struct notifier_block kernel_offset_notifier = { + .notifier_call = dump_kernel_offset +}; + +static int __init register_kernel_offset_dumper(void) +{ + atomic_notifier_chain_register(&panic_notifier_list, + &kernel_offset_notifier); + return 0; +} +__initcall(register_kernel_offset_dumper); diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 85dc05a..a32da80 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -1312,6 +1312,12 @@ void cpu_disable_common(void) int native_cpu_disable(void) { + int ret; + + ret = check_irq_vectors_for_cpu_disable(); + if (ret) + return ret; + clear_local_APIC(); cpu_disable_common(); @@ -1417,7 +1423,9 @@ static inline void mwait_play_dead(void) * The WBINVD is insufficient due to the spurious-wakeup * case where we return around the loop. */ + mb(); clflush(mwait_ptr); + mb(); __monitor(mwait_ptr, 0, 0); mb(); __mwait(eax, 0); diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index b857ed8..57409f6 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -211,21 +211,17 @@ dotraplinkage void do_##name(struct pt_regs *regs, long error_code) \ exception_exit(prev_state); \ } -DO_ERROR_INFO(X86_TRAP_DE, SIGFPE, "divide error", divide_error, FPE_INTDIV, - regs->ip) -DO_ERROR(X86_TRAP_OF, SIGSEGV, "overflow", overflow) -DO_ERROR(X86_TRAP_BR, SIGSEGV, "bounds", bounds) -DO_ERROR_INFO(X86_TRAP_UD, SIGILL, "invalid opcode", invalid_op, ILL_ILLOPN, - regs->ip) -DO_ERROR(X86_TRAP_OLD_MF, SIGFPE, "coprocessor segment overrun", - coprocessor_segment_overrun) -DO_ERROR(X86_TRAP_TS, SIGSEGV, "invalid TSS", invalid_TSS) -DO_ERROR(X86_TRAP_NP, SIGBUS, "segment not present", segment_not_present) +DO_ERROR_INFO(X86_TRAP_DE, SIGFPE, "divide error", divide_error, FPE_INTDIV, regs->ip ) +DO_ERROR (X86_TRAP_OF, SIGSEGV, "overflow", overflow ) +DO_ERROR (X86_TRAP_BR, SIGSEGV, "bounds", bounds ) +DO_ERROR_INFO(X86_TRAP_UD, SIGILL, "invalid opcode", invalid_op, ILL_ILLOPN, regs->ip ) +DO_ERROR (X86_TRAP_OLD_MF, SIGFPE, "coprocessor segment overrun", coprocessor_segment_overrun ) +DO_ERROR (X86_TRAP_TS, SIGSEGV, "invalid TSS", invalid_TSS ) +DO_ERROR (X86_TRAP_NP, SIGBUS, "segment not present", segment_not_present ) #ifdef CONFIG_X86_32 -DO_ERROR(X86_TRAP_SS, SIGBUS, "stack segment", stack_segment) +DO_ERROR (X86_TRAP_SS, SIGBUS, "stack segment", stack_segment ) #endif -DO_ERROR_INFO(X86_TRAP_AC, SIGBUS, "alignment check", alignment_check, - BUS_ADRALN, 0) +DO_ERROR_INFO(X86_TRAP_AC, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0 ) #ifdef CONFIG_X86_64 /* Runs on IST stack */ diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index 930e5d4..a3acbac 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c @@ -11,6 +11,7 @@ #include <linux/clocksource.h> #include <linux/percpu.h> #include <linux/timex.h> +#include <linux/static_key.h> #include <asm/hpet.h> #include <asm/timer.h> @@ -37,13 +38,244 @@ static int __read_mostly tsc_unstable; erroneous rdtsc usage on !cpu_has_tsc processors */ static int __read_mostly tsc_disabled = -1; +static struct static_key __use_tsc = STATIC_KEY_INIT; + int tsc_clocksource_reliable; + +/* + * Use a ring-buffer like data structure, where a writer advances the head by + * writing a new data entry and a reader advances the tail when it observes a + * new entry. + * + * Writers are made to wait on readers until there's space to write a new + * entry. + * + * This means that we can always use an {offset, mul} pair to compute a ns + * value that is 'roughly' in the right direction, even if we're writing a new + * {offset, mul} pair during the clock read. + * + * The down-side is that we can no longer guarantee strict monotonicity anymore + * (assuming the TSC was that to begin with), because while we compute the + * intersection point of the two clock slopes and make sure the time is + * continuous at the point of switching; we can no longer guarantee a reader is + * strictly before or after the switch point. + * + * It does mean a reader no longer needs to disable IRQs in order to avoid + * CPU-Freq updates messing with his times, and similarly an NMI reader will + * no longer run the risk of hitting half-written state. + */ + +struct cyc2ns { + struct cyc2ns_data data[2]; /* 0 + 2*24 = 48 */ + struct cyc2ns_data *head; /* 48 + 8 = 56 */ + struct cyc2ns_data *tail; /* 56 + 8 = 64 */ +}; /* exactly fits one cacheline */ + +static DEFINE_PER_CPU_ALIGNED(struct cyc2ns, cyc2ns); + +struct cyc2ns_data *cyc2ns_read_begin(void) +{ + struct cyc2ns_data *head; + + preempt_disable(); + + head = this_cpu_read(cyc2ns.head); + /* + * Ensure we observe the entry when we observe the pointer to it. + * matches the wmb from cyc2ns_write_end(). + */ + smp_read_barrier_depends(); + head->__count++; + barrier(); + + return head; +} + +void cyc2ns_read_end(struct cyc2ns_data *head) +{ + barrier(); + /* + * If we're the outer most nested read; update the tail pointer + * when we're done. This notifies possible pending writers + * that we've observed the head pointer and that the other + * entry is now free. + */ + if (!--head->__count) { + /* + * x86-TSO does not reorder writes with older reads; + * therefore once this write becomes visible to another + * cpu, we must be finished reading the cyc2ns_data. + * + * matches with cyc2ns_write_begin(). + */ + this_cpu_write(cyc2ns.tail, head); + } + preempt_enable(); +} + +/* + * Begin writing a new @data entry for @cpu. + * + * Assumes some sort of write side lock; currently 'provided' by the assumption + * that cpufreq will call its notifiers sequentially. + */ +static struct cyc2ns_data *cyc2ns_write_begin(int cpu) +{ + struct cyc2ns *c2n = &per_cpu(cyc2ns, cpu); + struct cyc2ns_data *data = c2n->data; + + if (data == c2n->head) + data++; + + /* XXX send an IPI to @cpu in order to guarantee a read? */ + + /* + * When we observe the tail write from cyc2ns_read_end(), + * the cpu must be done with that entry and its safe + * to start writing to it. + */ + while (c2n->tail == data) + cpu_relax(); + + return data; +} + +static void cyc2ns_write_end(int cpu, struct cyc2ns_data *data) +{ + struct cyc2ns *c2n = &per_cpu(cyc2ns, cpu); + + /* + * Ensure the @data writes are visible before we publish the + * entry. Matches the data-depencency in cyc2ns_read_begin(). + */ + smp_wmb(); + + ACCESS_ONCE(c2n->head) = data; +} + +/* + * Accelerators for sched_clock() + * convert from cycles(64bits) => nanoseconds (64bits) + * basic equation: + * ns = cycles / (freq / ns_per_sec) + * ns = cycles * (ns_per_sec / freq) + * ns = cycles * (10^9 / (cpu_khz * 10^3)) + * ns = cycles * (10^6 / cpu_khz) + * + * Then we use scaling math (suggested by george@mvista.com) to get: + * ns = cycles * (10^6 * SC / cpu_khz) / SC + * ns = cycles * cyc2ns_scale / SC + * + * And since SC is a constant power of two, we can convert the div + * into a shift. + * + * We can use khz divisor instead of mhz to keep a better precision, since + * cyc2ns_scale is limited to 10^6 * 2^10, which fits in 32 bits. + * (mathieu.desnoyers@polymtl.ca) + * + * -johnstul@us.ibm.com "math is hard, lets go shopping!" + */ + +#define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */ + +static void cyc2ns_data_init(struct cyc2ns_data *data) +{ + data->cyc2ns_mul = 1U << CYC2NS_SCALE_FACTOR; + data->cyc2ns_shift = CYC2NS_SCALE_FACTOR; + data->cyc2ns_offset = 0; + data->__count = 0; +} + +static void cyc2ns_init(int cpu) +{ + struct cyc2ns *c2n = &per_cpu(cyc2ns, cpu); + + cyc2ns_data_init(&c2n->data[0]); + cyc2ns_data_init(&c2n->data[1]); + + c2n->head = c2n->data; + c2n->tail = c2n->data; +} + +static inline unsigned long long cycles_2_ns(unsigned long long cyc) +{ + struct cyc2ns_data *data, *tail; + unsigned long long ns; + + /* + * See cyc2ns_read_*() for details; replicated in order to avoid + * an extra few instructions that came with the abstraction. + * Notable, it allows us to only do the __count and tail update + * dance when its actually needed. + */ + + preempt_disable(); + data = this_cpu_read(cyc2ns.head); + tail = this_cpu_read(cyc2ns.tail); + + if (likely(data == tail)) { + ns = data->cyc2ns_offset; + ns += mul_u64_u32_shr(cyc, data->cyc2ns_mul, CYC2NS_SCALE_FACTOR); + } else { + data->__count++; + + barrier(); + + ns = data->cyc2ns_offset; + ns += mul_u64_u32_shr(cyc, data->cyc2ns_mul, CYC2NS_SCALE_FACTOR); + + barrier(); + + if (!--data->__count) + this_cpu_write(cyc2ns.tail, data); + } + preempt_enable(); + + return ns; +} + +/* XXX surely we already have this someplace in the kernel?! */ +#define DIV_ROUND(n, d) (((n) + ((d) / 2)) / (d)) + +static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu) +{ + unsigned long long tsc_now, ns_now; + struct cyc2ns_data *data; + unsigned long flags; + + local_irq_save(flags); + sched_clock_idle_sleep_event(); + + if (!cpu_khz) + goto done; + + data = cyc2ns_write_begin(cpu); + + rdtscll(tsc_now); + ns_now = cycles_2_ns(tsc_now); + + /* + * Compute a new multiplier as per the above comment and ensure our + * time function is continuous; see the comment near struct + * cyc2ns_data. + */ + data->cyc2ns_mul = DIV_ROUND(NSEC_PER_MSEC << CYC2NS_SCALE_FACTOR, cpu_khz); + data->cyc2ns_shift = CYC2NS_SCALE_FACTOR; + data->cyc2ns_offset = ns_now - + mul_u64_u32_shr(tsc_now, data->cyc2ns_mul, CYC2NS_SCALE_FACTOR); + + cyc2ns_write_end(cpu, data); + +done: + sched_clock_idle_wakeup_event(0); + local_irq_restore(flags); +} /* * Scheduler clock - returns current time in nanosec units. */ u64 native_sched_clock(void) { - u64 this_offset; + u64 tsc_now; /* * Fall back to jiffies if there's no TSC available: @@ -53,16 +285,16 @@ u64 native_sched_clock(void) * very important for it to be as fast as the platform * can achieve it. ) */ - if (unlikely(tsc_disabled)) { + if (!static_key_false(&__use_tsc)) { /* No locking but a rare wrong value is not a big deal: */ return (jiffies_64 - INITIAL_JIFFIES) * (1000000000 / HZ); } /* read the Time Stamp Counter: */ - rdtscll(this_offset); + rdtscll(tsc_now); /* return the value in ns */ - return __cycles_2_ns(this_offset); + return cycles_2_ns(tsc_now); } /* We need to define a real function for sched_clock, to override the @@ -419,6 +651,16 @@ unsigned long native_calibrate_tsc(void) unsigned long flags, latch, ms, fast_calibrate; int hpet = is_hpet_enabled(), i, loopmin; + /* Calibrate TSC using MSR for Intel Atom SoCs */ + local_irq_save(flags); + i = try_msr_calibrate_tsc(&fast_calibrate); + local_irq_restore(flags); + if (i >= 0) { + if (i == 0) + pr_warn("Fast TSC calibration using MSR failed\n"); + return fast_calibrate; + } + local_irq_save(flags); fast_calibrate = quick_pit_calibrate(); local_irq_restore(flags); @@ -589,61 +831,11 @@ int recalibrate_cpu_khz(void) EXPORT_SYMBOL(recalibrate_cpu_khz); -/* Accelerators for sched_clock() - * convert from cycles(64bits) => nanoseconds (64bits) - * basic equation: - * ns = cycles / (freq / ns_per_sec) - * ns = cycles * (ns_per_sec / freq) - * ns = cycles * (10^9 / (cpu_khz * 10^3)) - * ns = cycles * (10^6 / cpu_khz) - * - * Then we use scaling math (suggested by george@mvista.com) to get: - * ns = cycles * (10^6 * SC / cpu_khz) / SC - * ns = cycles * cyc2ns_scale / SC - * - * And since SC is a constant power of two, we can convert the div - * into a shift. - * - * We can use khz divisor instead of mhz to keep a better precision, since - * cyc2ns_scale is limited to 10^6 * 2^10, which fits in 32 bits. - * (mathieu.desnoyers@polymtl.ca) - * - * -johnstul@us.ibm.com "math is hard, lets go shopping!" - */ - -DEFINE_PER_CPU(unsigned long, cyc2ns); -DEFINE_PER_CPU(unsigned long long, cyc2ns_offset); - -static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu) -{ - unsigned long long tsc_now, ns_now, *offset; - unsigned long flags, *scale; - - local_irq_save(flags); - sched_clock_idle_sleep_event(); - - scale = &per_cpu(cyc2ns, cpu); - offset = &per_cpu(cyc2ns_offset, cpu); - - rdtscll(tsc_now); - ns_now = __cycles_2_ns(tsc_now); - - if (cpu_khz) { - *scale = ((NSEC_PER_MSEC << CYC2NS_SCALE_FACTOR) + - cpu_khz / 2) / cpu_khz; - *offset = ns_now - mult_frac(tsc_now, *scale, - (1UL << CYC2NS_SCALE_FACTOR)); - } - - sched_clock_idle_wakeup_event(0); - local_irq_restore(flags); -} - static unsigned long long cyc2ns_suspend; void tsc_save_sched_clock_state(void) { - if (!sched_clock_stable) + if (!sched_clock_stable()) return; cyc2ns_suspend = sched_clock(); @@ -663,16 +855,26 @@ void tsc_restore_sched_clock_state(void) unsigned long flags; int cpu; - if (!sched_clock_stable) + if (!sched_clock_stable()) return; local_irq_save(flags); - __this_cpu_write(cyc2ns_offset, 0); + /* + * We're comming out of suspend, there's no concurrency yet; don't + * bother being nice about the RCU stuff, just write to both + * data fields. + */ + + this_cpu_write(cyc2ns.data[0].cyc2ns_offset, 0); + this_cpu_write(cyc2ns.data[1].cyc2ns_offset, 0); + offset = cyc2ns_suspend - sched_clock(); - for_each_possible_cpu(cpu) - per_cpu(cyc2ns_offset, cpu) = offset; + for_each_possible_cpu(cpu) { + per_cpu(cyc2ns.data[0].cyc2ns_offset, cpu) = offset; + per_cpu(cyc2ns.data[1].cyc2ns_offset, cpu) = offset; + } local_irq_restore(flags); } @@ -795,7 +997,7 @@ void mark_tsc_unstable(char *reason) { if (!tsc_unstable) { tsc_unstable = 1; - sched_clock_stable = 0; + clear_sched_clock_stable(); disable_sched_clock_irqtime(); pr_info("Marking TSC unstable due to %s\n", reason); /* Change only the rating, when not registered */ @@ -995,14 +1197,18 @@ void __init tsc_init(void) * speed as the bootup CPU. (cpufreq notifiers will fix this * up if their speed diverges) */ - for_each_possible_cpu(cpu) + for_each_possible_cpu(cpu) { + cyc2ns_init(cpu); set_cyc2ns_scale(cpu_khz, cpu); + } if (tsc_disabled > 0) return; /* now allow native_sched_clock() to use rdtsc */ + tsc_disabled = 0; + static_key_slow_inc(&__use_tsc); if (!no_sched_irq_time) enable_sched_clock_irqtime(); diff --git a/arch/x86/kernel/tsc_msr.c b/arch/x86/kernel/tsc_msr.c new file mode 100644 index 0000000..8b5434f --- /dev/null +++ b/arch/x86/kernel/tsc_msr.c @@ -0,0 +1,127 @@ +/* + * tsc_msr.c - MSR based TSC calibration on Intel Atom SoC platforms. + * + * TSC in Intel Atom SoC runs at a constant rate which can be figured + * by this formula: + * <maximum core-clock to bus-clock ratio> * <maximum resolved frequency> + * See Intel 64 and IA-32 System Programming Guid section 16.12 and 30.11.5 + * for details. + * Especially some Intel Atom SoCs don't have PIT(i8254) or HPET, so MSR + * based calibration is the only option. + * + * + * Copyright (C) 2013 Intel Corporation + * Author: Bin Gao <bin.gao@intel.com> + * + * This file is released under the GPLv2. + */ + +#include <linux/kernel.h> +#include <asm/processor.h> +#include <asm/setup.h> +#include <asm/apic.h> +#include <asm/param.h> + +/* CPU reference clock frequency: in KHz */ +#define FREQ_83 83200 +#define FREQ_100 99840 +#define FREQ_133 133200 +#define FREQ_166 166400 + +#define MAX_NUM_FREQS 8 + +/* + * According to Intel 64 and IA-32 System Programming Guide, + * if MSR_PERF_STAT[31] is set, the maximum resolved bus ratio can be + * read in MSR_PLATFORM_ID[12:8], otherwise in MSR_PERF_STAT[44:40]. + * Unfortunately some Intel Atom SoCs aren't quite compliant to this, + * so we need manually differentiate SoC families. This is what the + * field msr_plat does. + */ +struct freq_desc { + u8 x86_family; /* CPU family */ + u8 x86_model; /* model */ + u8 msr_plat; /* 1: use MSR_PLATFORM_INFO, 0: MSR_IA32_PERF_STATUS */ + u32 freqs[MAX_NUM_FREQS]; +}; + +static struct freq_desc freq_desc_tables[] = { + /* PNW */ + { 6, 0x27, 0, { 0, 0, 0, 0, 0, FREQ_100, 0, FREQ_83 } }, + /* CLV+ */ + { 6, 0x35, 0, { 0, FREQ_133, 0, 0, 0, FREQ_100, 0, FREQ_83 } }, + /* TNG */ + { 6, 0x4a, 1, { 0, FREQ_100, FREQ_133, 0, 0, 0, 0, 0 } }, + /* VLV2 */ + { 6, 0x37, 1, { 0, FREQ_100, FREQ_133, FREQ_166, 0, 0, 0, 0 } }, + /* ANN */ + { 6, 0x5a, 1, { FREQ_83, FREQ_100, FREQ_133, FREQ_100, 0, 0, 0, 0 } }, +}; + +static int match_cpu(u8 family, u8 model) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(freq_desc_tables); i++) { + if ((family == freq_desc_tables[i].x86_family) && + (model == freq_desc_tables[i].x86_model)) + return i; + } + + return -1; +} + +/* Map CPU reference clock freq ID(0-7) to CPU reference clock freq(KHz) */ +#define id_to_freq(cpu_index, freq_id) \ + (freq_desc_tables[cpu_index].freqs[freq_id]) + +/* + * Do MSR calibration only for known/supported CPUs. + * Return values: + * -1: CPU is unknown/unsupported for MSR based calibration + * 0: CPU is known/supported, but calibration failed + * 1: CPU is known/supported, and calibration succeeded + */ +int try_msr_calibrate_tsc(unsigned long *fast_calibrate) +{ + int cpu_index; + u32 lo, hi, ratio, freq_id, freq; + + cpu_index = match_cpu(boot_cpu_data.x86, boot_cpu_data.x86_model); + if (cpu_index < 0) + return -1; + + *fast_calibrate = 0; + + if (freq_desc_tables[cpu_index].msr_plat) { + rdmsr(MSR_PLATFORM_INFO, lo, hi); + ratio = (lo >> 8) & 0x1f; + } else { + rdmsr(MSR_IA32_PERF_STATUS, lo, hi); + ratio = (hi >> 8) & 0x1f; + } + pr_info("Maximum core-clock to bus-clock ratio: 0x%x\n", ratio); + + if (!ratio) + return 0; + + /* Get FSB FREQ ID */ + rdmsr(MSR_FSB_FREQ, lo, hi); + freq_id = lo & 0x7; + freq = id_to_freq(cpu_index, freq_id); + pr_info("Resolved frequency ID: %u, frequency: %u KHz\n", + freq_id, freq); + if (!freq) + return 0; + + /* TSC frequency = maximum resolved freq * maximum resolved bus ratio */ + *fast_calibrate = freq * ratio; + pr_info("TSC runs at %lu KHz\n", *fast_calibrate); + +#ifdef CONFIG_X86_LOCAL_APIC + lapic_timer_frequency = (freq * 1000) / HZ; + pr_info("lapic_timer_frequency = %d\n", lapic_timer_frequency); +#endif + + return 1; +} diff --git a/arch/x86/kernel/tsc_sync.c b/arch/x86/kernel/tsc_sync.c index adfdf56..2648848 100644 --- a/arch/x86/kernel/tsc_sync.c +++ b/arch/x86/kernel/tsc_sync.c @@ -16,7 +16,6 @@ */ #include <linux/spinlock.h> #include <linux/kernel.h> -#include <linux/init.h> #include <linux/smp.h> #include <linux/nmi.h> #include <asm/tsc.h> diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c index 422fd82..a4b451c 100644 --- a/arch/x86/kernel/xsave.c +++ b/arch/x86/kernel/xsave.c @@ -562,6 +562,16 @@ static void __init xstate_enable_boot_cpu(void) if (cpu_has_xsaveopt && eagerfpu != DISABLE) eagerfpu = ENABLE; + if (pcntxt_mask & XSTATE_EAGER) { + if (eagerfpu == DISABLE) { + pr_err("eagerfpu not present, disabling some xstate features: 0x%llx\n", + pcntxt_mask & XSTATE_EAGER); + pcntxt_mask &= ~XSTATE_EAGER; + } else { + eagerfpu = ENABLE; + } + } + pr_info("enabled xstate_bv 0x%llx, cntxt size 0x%x\n", pcntxt_mask, xstate_size); } diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 5439117..775702f 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -143,6 +143,8 @@ static inline int kvm_apic_id(struct kvm_lapic *apic) return (kvm_apic_get_reg(apic, APIC_ID) >> 24) & 0xff; } +#define KVM_X2APIC_CID_BITS 0 + static void recalculate_apic_map(struct kvm *kvm) { struct kvm_apic_map *new, *old = NULL; @@ -180,7 +182,8 @@ static void recalculate_apic_map(struct kvm *kvm) if (apic_x2apic_mode(apic)) { new->ldr_bits = 32; new->cid_shift = 16; - new->cid_mask = new->lid_mask = 0xffff; + new->cid_mask = (1 << KVM_X2APIC_CID_BITS) - 1; + new->lid_mask = 0xffff; } else if (kvm_apic_sw_enabled(apic) && !new->cid_mask /* flat mode */ && kvm_apic_get_reg(apic, APIC_DFR) == APIC_DFR_CLUSTER) { @@ -841,7 +844,8 @@ static u32 apic_get_tmcct(struct kvm_lapic *apic) ASSERT(apic != NULL); /* if initial count is 0, current count should also be 0 */ - if (kvm_apic_get_reg(apic, APIC_TMICT) == 0) + if (kvm_apic_get_reg(apic, APIC_TMICT) == 0 || + apic->lapic_timer.period == 0) return 0; remaining = hrtimer_get_remaining(&apic->lapic_timer.timer); @@ -1346,8 +1350,12 @@ void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value) return; } + if (!kvm_vcpu_is_bsp(apic->vcpu)) + value &= ~MSR_IA32_APICBASE_BSP; + vcpu->arch.apic_base = value; + /* update jump label if enable bit changes */ - if ((vcpu->arch.apic_base ^ value) & MSR_IA32_APICBASE_ENABLE) { + if ((old_value ^ value) & MSR_IA32_APICBASE_ENABLE) { if (value & MSR_IA32_APICBASE_ENABLE) static_key_slow_dec_deferred(&apic_hw_disabled); else @@ -1355,10 +1363,6 @@ void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value) recalculate_apic_map(vcpu->kvm); } - if (!kvm_vcpu_is_bsp(apic->vcpu)) - value &= ~MSR_IA32_APICBASE_BSP; - - vcpu->arch.apic_base = value; if ((old_value ^ value) & X2APIC_ENABLE) { if (value & X2APIC_ENABLE) { u32 id = kvm_apic_id(apic); @@ -1691,7 +1695,6 @@ static void apic_sync_pv_eoi_from_guest(struct kvm_vcpu *vcpu, void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu) { u32 data; - void *vapic; if (test_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention)) apic_sync_pv_eoi_from_guest(vcpu, vcpu->arch.apic); @@ -1699,9 +1702,8 @@ void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu) if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention)) return; - vapic = kmap_atomic(vcpu->arch.apic->vapic_page); - data = *(u32 *)(vapic + offset_in_page(vcpu->arch.apic->vapic_addr)); - kunmap_atomic(vapic); + kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data, + sizeof(u32)); apic_set_tpr(vcpu->arch.apic, data & 0xff); } @@ -1737,7 +1739,6 @@ void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu) u32 data, tpr; int max_irr, max_isr; struct kvm_lapic *apic = vcpu->arch.apic; - void *vapic; apic_sync_pv_eoi_to_guest(vcpu, apic); @@ -1753,18 +1754,24 @@ void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu) max_isr = 0; data = (tpr & 0xff) | ((max_isr & 0xf0) << 8) | (max_irr << 24); - vapic = kmap_atomic(vcpu->arch.apic->vapic_page); - *(u32 *)(vapic + offset_in_page(vcpu->arch.apic->vapic_addr)) = data; - kunmap_atomic(vapic); + kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data, + sizeof(u32)); } -void kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr) +int kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr) { - vcpu->arch.apic->vapic_addr = vapic_addr; - if (vapic_addr) + if (vapic_addr) { + if (kvm_gfn_to_hva_cache_init(vcpu->kvm, + &vcpu->arch.apic->vapic_cache, + vapic_addr, sizeof(u32))) + return -EINVAL; __set_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention); - else + } else { __clear_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention); + } + + vcpu->arch.apic->vapic_addr = vapic_addr; + return 0; } int kvm_x2apic_msr_write(struct kvm_vcpu *vcpu, u32 msr, u64 data) diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h index c730ac9..c8b0d0d 100644 --- a/arch/x86/kvm/lapic.h +++ b/arch/x86/kvm/lapic.h @@ -34,7 +34,7 @@ struct kvm_lapic { */ void *regs; gpa_t vapic_addr; - struct page *vapic_page; + struct gfn_to_hva_cache vapic_cache; unsigned long pending_events; unsigned int sipi_vector; }; @@ -76,7 +76,7 @@ void kvm_set_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu, u64 data); void kvm_apic_write_nodecode(struct kvm_vcpu *vcpu, u32 offset); void kvm_apic_set_eoi_accelerated(struct kvm_vcpu *vcpu, int vector); -void kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr); +int kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr); void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu); void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu); diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index b2fe1c2..da7837e 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -8283,8 +8283,7 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu, vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK); kvm_set_cr4(vcpu, vmcs12->host_cr4); - if (nested_cpu_has_ept(vmcs12)) - nested_ept_uninit_mmu_context(vcpu); + nested_ept_uninit_mmu_context(vcpu); kvm_set_cr3(vcpu, vmcs12->host_cr3); kvm_mmu_reset_context(vcpu); diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 21ef1ba..5d004da 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -3214,8 +3214,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp, r = -EFAULT; if (copy_from_user(&va, argp, sizeof va)) goto out; - r = 0; - kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr); + r = kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr); break; } case KVM_X86_SETUP_MCE: { @@ -5739,36 +5738,6 @@ static void post_kvm_run_save(struct kvm_vcpu *vcpu) !kvm_event_needs_reinjection(vcpu); } -static int vapic_enter(struct kvm_vcpu *vcpu) -{ - struct kvm_lapic *apic = vcpu->arch.apic; - struct page *page; - - if (!apic || !apic->vapic_addr) - return 0; - - page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT); - if (is_error_page(page)) - return -EFAULT; - - vcpu->arch.apic->vapic_page = page; - return 0; -} - -static void vapic_exit(struct kvm_vcpu *vcpu) -{ - struct kvm_lapic *apic = vcpu->arch.apic; - int idx; - - if (!apic || !apic->vapic_addr) - return; - - idx = srcu_read_lock(&vcpu->kvm->srcu); - kvm_release_page_dirty(apic->vapic_page); - mark_page_dirty(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT); - srcu_read_unlock(&vcpu->kvm->srcu, idx); -} - static void update_cr8_intercept(struct kvm_vcpu *vcpu) { int max_irr, tpr; @@ -6069,11 +6038,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu) struct kvm *kvm = vcpu->kvm; vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); - r = vapic_enter(vcpu); - if (r) { - srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); - return r; - } r = 1; while (r > 0) { @@ -6132,8 +6096,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu) srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); - vapic_exit(vcpu); - return r; } diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S index a30ca15..dee945d 100644 --- a/arch/x86/lib/copy_user_64.S +++ b/arch/x86/lib/copy_user_64.S @@ -186,7 +186,7 @@ ENTRY(copy_user_generic_unrolled) 30: shll $6,%ecx addl %ecx,%edx jmp 60f -40: lea (%rdx,%rcx,8),%rdx +40: leal (%rdx,%rcx,8),%edx jmp 60f 50: movl %ecx,%edx 60: jmp copy_user_handle_tail /* ecx is zerorest also */ @@ -236,8 +236,6 @@ ENDPROC(copy_user_generic_unrolled) ENTRY(copy_user_generic_string) CFI_STARTPROC ASM_STAC - andl %edx,%edx - jz 4f cmpl $8,%edx jb 2f /* less than 8 bytes, go to byte copy loop */ ALIGN_DESTINATION @@ -249,12 +247,12 @@ ENTRY(copy_user_generic_string) 2: movl %edx,%ecx 3: rep movsb -4: xorl %eax,%eax + xorl %eax,%eax ASM_CLAC ret .section .fixup,"ax" -11: lea (%rdx,%rcx,8),%rcx +11: leal (%rdx,%rcx,8),%ecx 12: movl %ecx,%edx /* ecx is zerorest also */ jmp copy_user_handle_tail .previous @@ -279,12 +277,10 @@ ENDPROC(copy_user_generic_string) ENTRY(copy_user_enhanced_fast_string) CFI_STARTPROC ASM_STAC - andl %edx,%edx - jz 2f movl %edx,%ecx 1: rep movsb -2: xorl %eax,%eax + xorl %eax,%eax ASM_CLAC ret diff --git a/arch/x86/lib/delay.c b/arch/x86/lib/delay.c index 7c3bee6..39d6a3d 100644 --- a/arch/x86/lib/delay.c +++ b/arch/x86/lib/delay.c @@ -16,7 +16,6 @@ #include <linux/timex.h> #include <linux/preempt.h> #include <linux/delay.h> -#include <linux/init.h> #include <asm/processor.h> #include <asm/delay.h> diff --git a/arch/x86/lib/x86-opcode-map.txt b/arch/x86/lib/x86-opcode-map.txt index 533a85e..1a2be7c 100644 --- a/arch/x86/lib/x86-opcode-map.txt +++ b/arch/x86/lib/x86-opcode-map.txt @@ -346,8 +346,8 @@ AVXcode: 1 17: vmovhps Mq,Vq (v1) | vmovhpd Mq,Vq (66),(v1) 18: Grp16 (1A) 19: -1a: -1b: +1a: BNDCL Ev,Gv | BNDCU Ev,Gv | BNDMOV Gv,Ev | BNDLDX Gv,Ev,Gv +1b: BNDCN Ev,Gv | BNDMOV Ev,Gv | BNDMK Gv,Ev | BNDSTX Ev,GV,Gv 1c: 1d: 1e: diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 9ff85bb..9d591c8 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -641,6 +641,20 @@ no_context(struct pt_regs *regs, unsigned long error_code, /* Are we prepared to handle this kernel fault? */ if (fixup_exception(regs)) { + /* + * Any interrupt that takes a fault gets the fixup. This makes + * the below recursive fault logic only apply to a faults from + * task context. + */ + if (in_interrupt()) + return; + + /* + * Per the above we're !in_interrupt(), aka. task context. + * + * In this case we need to make sure we're not recursively + * faulting through the emulate_vsyscall() logic. + */ if (current_thread_info()->sig_on_uaccess_error && signal) { tsk->thread.trap_nr = X86_TRAP_PF; tsk->thread.error_code = error_code | PF_USER; @@ -649,6 +663,10 @@ no_context(struct pt_regs *regs, unsigned long error_code, /* XXX: hwpoison faults will set the wrong code. */ force_sig_info_fault(signal, si_code, address, tsk, 0); } + + /* + * Barring that, we can do the fixup and be happy. + */ return; } diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c index dd74e46..0596e8e 100644 --- a/arch/x86/mm/gup.c +++ b/arch/x86/mm/gup.c @@ -83,6 +83,12 @@ static noinline int gup_pte_range(pmd_t pmd, unsigned long addr, pte_t pte = gup_get_pte(ptep); struct page *page; + /* Similar to the PMD case, NUMA hinting must take slow path */ + if (pte_numa(pte)) { + pte_unmap(ptep); + return 0; + } + if ((pte_flags(pte) & (mask | _PAGE_SPECIAL)) != mask) { pte_unmap(ptep); return 0; @@ -167,6 +173,13 @@ static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end, if (pmd_none(pmd) || pmd_trans_splitting(pmd)) return 0; if (unlikely(pmd_large(pmd))) { + /* + * NUMA hinting faults need to be handled in the GUP + * slowpath for accounting purposes and so that they + * can be serialised against THP migration. + */ + if (pmd_numa(pmd)) + return 0; if (!gup_huge_pmd(pmd, addr, next, write, pages, nr)) return 0; } else { diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c index 9d980d8..8c9f647 100644 --- a/arch/x86/mm/hugetlbpage.c +++ b/arch/x86/mm/hugetlbpage.c @@ -87,9 +87,7 @@ int pmd_huge_support(void) } #endif -/* x86_64 also uses this file */ - -#ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA +#ifdef CONFIG_HUGETLB_PAGE static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) @@ -99,7 +97,7 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file, info.flags = 0; info.length = len; - info.low_limit = TASK_UNMAPPED_BASE; + info.low_limit = current->mm->mmap_legacy_base; info.high_limit = TASK_SIZE; info.align_mask = PAGE_MASK & ~huge_page_mask(h); info.align_offset = 0; @@ -172,8 +170,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr, return hugetlb_get_unmapped_area_topdown(file, addr, len, pgoff, flags); } - -#endif /*HAVE_ARCH_HUGETLB_UNMAPPED_AREA*/ +#endif /* CONFIG_HUGETLB_PAGE */ #ifdef CONFIG_X86_64 static __init int setup_hugepagesz(char *opt) diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index 4287f1f..5bdc543 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c @@ -806,6 +806,9 @@ void __init mem_init(void) BUILD_BUG_ON(VMALLOC_START >= VMALLOC_END); #undef high_memory #undef __FIXADDR_TOP +#ifdef CONFIG_RANDOMIZE_BASE + BUILD_BUG_ON(CONFIG_RANDOMIZE_BASE_MAX_OFFSET > KERNEL_IMAGE_SIZE); +#endif #ifdef CONFIG_HIGHMEM BUG_ON(PKMAP_BASE + LAST_PKMAP*PAGE_SIZE > FIXADDR_START); diff --git a/arch/x86/mm/kmmio.c b/arch/x86/mm/kmmio.c index e5d5e2c..637ab34 100644 --- a/arch/x86/mm/kmmio.c +++ b/arch/x86/mm/kmmio.c @@ -11,7 +11,6 @@ #include <linux/rculist.h> #include <linux/spinlock.h> #include <linux/hash.h> -#include <linux/init.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/uaccess.h> diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c index 24aec58..c85da7b 100644 --- a/arch/x86/mm/numa.c +++ b/arch/x86/mm/numa.c @@ -211,9 +211,13 @@ static void __init setup_node_data(int nid, u64 start, u64 end) */ nd_pa = memblock_alloc_nid(nd_size, SMP_CACHE_BYTES, nid); if (!nd_pa) { - pr_err("Cannot find %zu bytes in node %d\n", - nd_size, nid); - return; + nd_pa = __memblock_alloc_base(nd_size, SMP_CACHE_BYTES, + MEMBLOCK_ALLOC_ACCESSIBLE); + if (!nd_pa) { + pr_err("Cannot find %zu bytes in node %d\n", + nd_size, nid); + return; + } } nd = __va(nd_pa); diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c index d0b1773..461bc82 100644 --- a/arch/x86/mm/pageattr-test.c +++ b/arch/x86/mm/pageattr-test.c @@ -8,7 +8,6 @@ #include <linux/kthread.h> #include <linux/random.h> #include <linux/kernel.h> -#include <linux/init.h> #include <linux/mm.h> #include <asm/cacheflush.h> diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index bb32480..b3b19f4 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c @@ -30,6 +30,7 @@ */ struct cpa_data { unsigned long *vaddr; + pgd_t *pgd; pgprot_t mask_set; pgprot_t mask_clr; int numpages; @@ -322,17 +323,9 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address, return prot; } -/* - * Lookup the page table entry for a virtual address. Return a pointer - * to the entry and the level of the mapping. - * - * Note: We return pud and pmd either when the entry is marked large - * or when the present bit is not set. Otherwise we would return a - * pointer to a nonexisting mapping. - */ -pte_t *lookup_address(unsigned long address, unsigned int *level) +static pte_t *__lookup_address_in_pgd(pgd_t *pgd, unsigned long address, + unsigned int *level) { - pgd_t *pgd = pgd_offset_k(address); pud_t *pud; pmd_t *pmd; @@ -361,8 +354,31 @@ pte_t *lookup_address(unsigned long address, unsigned int *level) return pte_offset_kernel(pmd, address); } + +/* + * Lookup the page table entry for a virtual address. Return a pointer + * to the entry and the level of the mapping. + * + * Note: We return pud and pmd either when the entry is marked large + * or when the present bit is not set. Otherwise we would return a + * pointer to a nonexisting mapping. + */ +pte_t *lookup_address(unsigned long address, unsigned int *level) +{ + return __lookup_address_in_pgd(pgd_offset_k(address), address, level); +} EXPORT_SYMBOL_GPL(lookup_address); +static pte_t *_lookup_address_cpa(struct cpa_data *cpa, unsigned long address, + unsigned int *level) +{ + if (cpa->pgd) + return __lookup_address_in_pgd(cpa->pgd + pgd_index(address), + address, level); + + return lookup_address(address, level); +} + /* * This is necessary because __pa() does not work on some * kinds of memory, like vmalloc() or the alloc_remap() @@ -437,7 +453,7 @@ try_preserve_large_page(pte_t *kpte, unsigned long address, * Check for races, another CPU might have split this page * up already: */ - tmp = lookup_address(address, &level); + tmp = _lookup_address_cpa(cpa, address, &level); if (tmp != kpte) goto out_unlock; @@ -543,7 +559,8 @@ out_unlock: } static int -__split_large_page(pte_t *kpte, unsigned long address, struct page *base) +__split_large_page(struct cpa_data *cpa, pte_t *kpte, unsigned long address, + struct page *base) { pte_t *pbase = (pte_t *)page_address(base); unsigned long pfn, pfninc = 1; @@ -556,7 +573,7 @@ __split_large_page(pte_t *kpte, unsigned long address, struct page *base) * Check for races, another CPU might have split this page * up for us already: */ - tmp = lookup_address(address, &level); + tmp = _lookup_address_cpa(cpa, address, &level); if (tmp != kpte) { spin_unlock(&pgd_lock); return 1; @@ -632,7 +649,8 @@ __split_large_page(pte_t *kpte, unsigned long address, struct page *base) return 0; } -static int split_large_page(pte_t *kpte, unsigned long address) +static int split_large_page(struct cpa_data *cpa, pte_t *kpte, + unsigned long address) { struct page *base; @@ -644,15 +662,390 @@ static int split_large_page(pte_t *kpte, unsigned long address) if (!base) return -ENOMEM; - if (__split_large_page(kpte, address, base)) + if (__split_large_page(cpa, kpte, address, base)) __free_page(base); return 0; } +static bool try_to_free_pte_page(pte_t *pte) +{ + int i; + + for (i = 0; i < PTRS_PER_PTE; i++) + if (!pte_none(pte[i])) + return false; + + free_page((unsigned long)pte); + return true; +} + +static bool try_to_free_pmd_page(pmd_t *pmd) +{ + int i; + + for (i = 0; i < PTRS_PER_PMD; i++) + if (!pmd_none(pmd[i])) + return false; + + free_page((unsigned long)pmd); + return true; +} + +static bool unmap_pte_range(pmd_t *pmd, unsigned long start, unsigned long end) +{ + pte_t *pte = pte_offset_kernel(pmd, start); + + while (start < end) { + set_pte(pte, __pte(0)); + + start += PAGE_SIZE; + pte++; + } + + if (try_to_free_pte_page((pte_t *)pmd_page_vaddr(*pmd))) { + pmd_clear(pmd); + return true; + } + return false; +} + +static void __unmap_pmd_range(pud_t *pud, pmd_t *pmd, + unsigned long start, unsigned long end) +{ + if (unmap_pte_range(pmd, start, end)) + if (try_to_free_pmd_page((pmd_t *)pud_page_vaddr(*pud))) + pud_clear(pud); +} + +static void unmap_pmd_range(pud_t *pud, unsigned long start, unsigned long end) +{ + pmd_t *pmd = pmd_offset(pud, start); + + /* + * Not on a 2MB page boundary? + */ + if (start & (PMD_SIZE - 1)) { + unsigned long next_page = (start + PMD_SIZE) & PMD_MASK; + unsigned long pre_end = min_t(unsigned long, end, next_page); + + __unmap_pmd_range(pud, pmd, start, pre_end); + + start = pre_end; + pmd++; + } + + /* + * Try to unmap in 2M chunks. + */ + while (end - start >= PMD_SIZE) { + if (pmd_large(*pmd)) + pmd_clear(pmd); + else + __unmap_pmd_range(pud, pmd, start, start + PMD_SIZE); + + start += PMD_SIZE; + pmd++; + } + + /* + * 4K leftovers? + */ + if (start < end) + return __unmap_pmd_range(pud, pmd, start, end); + + /* + * Try again to free the PMD page if haven't succeeded above. + */ + if (!pud_none(*pud)) + if (try_to_free_pmd_page((pmd_t *)pud_page_vaddr(*pud))) + pud_clear(pud); +} + +static void unmap_pud_range(pgd_t *pgd, unsigned long start, unsigned long end) +{ + pud_t *pud = pud_offset(pgd, start); + + /* + * Not on a GB page boundary? + */ + if (start & (PUD_SIZE - 1)) { + unsigned long next_page = (start + PUD_SIZE) & PUD_MASK; + unsigned long pre_end = min_t(unsigned long, end, next_page); + + unmap_pmd_range(pud, start, pre_end); + + start = pre_end; + pud++; + } + + /* + * Try to unmap in 1G chunks? + */ + while (end - start >= PUD_SIZE) { + + if (pud_large(*pud)) + pud_clear(pud); + else + unmap_pmd_range(pud, start, start + PUD_SIZE); + + start += PUD_SIZE; + pud++; + } + + /* + * 2M leftovers? + */ + if (start < end) + unmap_pmd_range(pud, start, end); + + /* + * No need to try to free the PUD page because we'll free it in + * populate_pgd's error path + */ +} + +static int alloc_pte_page(pmd_t *pmd) +{ + pte_t *pte = (pte_t *)get_zeroed_page(GFP_KERNEL | __GFP_NOTRACK); + if (!pte) + return -1; + + set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE)); + return 0; +} + +static int alloc_pmd_page(pud_t *pud) +{ + pmd_t *pmd = (pmd_t *)get_zeroed_page(GFP_KERNEL | __GFP_NOTRACK); + if (!pmd) + return -1; + + set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE)); + return 0; +} + +static void populate_pte(struct cpa_data *cpa, + unsigned long start, unsigned long end, + unsigned num_pages, pmd_t *pmd, pgprot_t pgprot) +{ + pte_t *pte; + + pte = pte_offset_kernel(pmd, start); + + while (num_pages-- && start < end) { + + /* deal with the NX bit */ + if (!(pgprot_val(pgprot) & _PAGE_NX)) + cpa->pfn &= ~_PAGE_NX; + + set_pte(pte, pfn_pte(cpa->pfn >> PAGE_SHIFT, pgprot)); + + start += PAGE_SIZE; + cpa->pfn += PAGE_SIZE; + pte++; + } +} + +static int populate_pmd(struct cpa_data *cpa, + unsigned long start, unsigned long end, + unsigned num_pages, pud_t *pud, pgprot_t pgprot) +{ + unsigned int cur_pages = 0; + pmd_t *pmd; + + /* + * Not on a 2M boundary? + */ + if (start & (PMD_SIZE - 1)) { + unsigned long pre_end = start + (num_pages << PAGE_SHIFT); + unsigned long next_page = (start + PMD_SIZE) & PMD_MASK; + + pre_end = min_t(unsigned long, pre_end, next_page); + cur_pages = (pre_end - start) >> PAGE_SHIFT; + cur_pages = min_t(unsigned int, num_pages, cur_pages); + + /* + * Need a PTE page? + */ + pmd = pmd_offset(pud, start); + if (pmd_none(*pmd)) + if (alloc_pte_page(pmd)) + return -1; + + populate_pte(cpa, start, pre_end, cur_pages, pmd, pgprot); + + start = pre_end; + } + + /* + * We mapped them all? + */ + if (num_pages == cur_pages) + return cur_pages; + + while (end - start >= PMD_SIZE) { + + /* + * We cannot use a 1G page so allocate a PMD page if needed. + */ + if (pud_none(*pud)) + if (alloc_pmd_page(pud)) + return -1; + + pmd = pmd_offset(pud, start); + + set_pmd(pmd, __pmd(cpa->pfn | _PAGE_PSE | massage_pgprot(pgprot))); + + start += PMD_SIZE; + cpa->pfn += PMD_SIZE; + cur_pages += PMD_SIZE >> PAGE_SHIFT; + } + + /* + * Map trailing 4K pages. + */ + if (start < end) { + pmd = pmd_offset(pud, start); + if (pmd_none(*pmd)) + if (alloc_pte_page(pmd)) + return -1; + + populate_pte(cpa, start, end, num_pages - cur_pages, + pmd, pgprot); + } + return num_pages; +} + +static int populate_pud(struct cpa_data *cpa, unsigned long start, pgd_t *pgd, + pgprot_t pgprot) +{ + pud_t *pud; + unsigned long end; + int cur_pages = 0; + + end = start + (cpa->numpages << PAGE_SHIFT); + + /* + * Not on a Gb page boundary? => map everything up to it with + * smaller pages. + */ + if (start & (PUD_SIZE - 1)) { + unsigned long pre_end; + unsigned long next_page = (start + PUD_SIZE) & PUD_MASK; + + pre_end = min_t(unsigned long, end, next_page); + cur_pages = (pre_end - start) >> PAGE_SHIFT; + cur_pages = min_t(int, (int)cpa->numpages, cur_pages); + + pud = pud_offset(pgd, start); + + /* + * Need a PMD page? + */ + if (pud_none(*pud)) + if (alloc_pmd_page(pud)) + return -1; + + cur_pages = populate_pmd(cpa, start, pre_end, cur_pages, + pud, pgprot); + if (cur_pages < 0) + return cur_pages; + + start = pre_end; + } + + /* We mapped them all? */ + if (cpa->numpages == cur_pages) + return cur_pages; + + pud = pud_offset(pgd, start); + + /* + * Map everything starting from the Gb boundary, possibly with 1G pages + */ + while (end - start >= PUD_SIZE) { + set_pud(pud, __pud(cpa->pfn | _PAGE_PSE | massage_pgprot(pgprot))); + + start += PUD_SIZE; + cpa->pfn += PUD_SIZE; + cur_pages += PUD_SIZE >> PAGE_SHIFT; + pud++; + } + + /* Map trailing leftover */ + if (start < end) { + int tmp; + + pud = pud_offset(pgd, start); + if (pud_none(*pud)) + if (alloc_pmd_page(pud)) + return -1; + + tmp = populate_pmd(cpa, start, end, cpa->numpages - cur_pages, + pud, pgprot); + if (tmp < 0) + return cur_pages; + + cur_pages += tmp; + } + return cur_pages; +} + +/* + * Restrictions for kernel page table do not necessarily apply when mapping in + * an alternate PGD. + */ +static int populate_pgd(struct cpa_data *cpa, unsigned long addr) +{ + pgprot_t pgprot = __pgprot(_KERNPG_TABLE); + bool allocd_pgd = false; + pgd_t *pgd_entry; + pud_t *pud = NULL; /* shut up gcc */ + int ret; + + pgd_entry = cpa->pgd + pgd_index(addr); + + /* + * Allocate a PUD page and hand it down for mapping. + */ + if (pgd_none(*pgd_entry)) { + pud = (pud_t *)get_zeroed_page(GFP_KERNEL | __GFP_NOTRACK); + if (!pud) + return -1; + + set_pgd(pgd_entry, __pgd(__pa(pud) | _KERNPG_TABLE)); + allocd_pgd = true; + } + + pgprot_val(pgprot) &= ~pgprot_val(cpa->mask_clr); + pgprot_val(pgprot) |= pgprot_val(cpa->mask_set); + + ret = populate_pud(cpa, addr, pgd_entry, pgprot); + if (ret < 0) { + unmap_pud_range(pgd_entry, addr, + addr + (cpa->numpages << PAGE_SHIFT)); + + if (allocd_pgd) { + /* + * If I allocated this PUD page, I can just as well + * free it in this error path. + */ + pgd_clear(pgd_entry); + free_page((unsigned long)pud); + } + return ret; + } + cpa->numpages = ret; + return 0; +} + static int __cpa_process_fault(struct cpa_data *cpa, unsigned long vaddr, int primary) { + if (cpa->pgd) + return populate_pgd(cpa, vaddr); + /* * Ignore all non primary paths. */ @@ -697,7 +1090,7 @@ static int __change_page_attr(struct cpa_data *cpa, int primary) else address = *cpa->vaddr; repeat: - kpte = lookup_address(address, &level); + kpte = _lookup_address_cpa(cpa, address, &level); if (!kpte) return __cpa_process_fault(cpa, address, primary); @@ -761,7 +1154,7 @@ repeat: /* * We have to split the large page: */ - err = split_large_page(kpte, address); + err = split_large_page(cpa, kpte, address); if (!err) { /* * Do a global flush tlb after splitting the large page @@ -910,6 +1303,8 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages, int ret, cache, checkalias; unsigned long baddr = 0; + memset(&cpa, 0, sizeof(cpa)); + /* * Check, if we are requested to change a not supported * feature: @@ -1356,6 +1751,7 @@ static int __set_pages_p(struct page *page, int numpages) { unsigned long tempaddr = (unsigned long) page_address(page); struct cpa_data cpa = { .vaddr = &tempaddr, + .pgd = NULL, .numpages = numpages, .mask_set = __pgprot(_PAGE_PRESENT | _PAGE_RW), .mask_clr = __pgprot(0), @@ -1374,6 +1770,7 @@ static int __set_pages_np(struct page *page, int numpages) { unsigned long tempaddr = (unsigned long) page_address(page); struct cpa_data cpa = { .vaddr = &tempaddr, + .pgd = NULL, .numpages = numpages, .mask_set = __pgprot(0), .mask_clr = __pgprot(_PAGE_PRESENT | _PAGE_RW), @@ -1434,6 +1831,36 @@ bool kernel_page_present(struct page *page) #endif /* CONFIG_DEBUG_PAGEALLOC */ +int kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn, unsigned long address, + unsigned numpages, unsigned long page_flags) +{ + int retval = -EINVAL; + + struct cpa_data cpa = { + .vaddr = &address, + .pfn = pfn, + .pgd = pgd, + .numpages = numpages, + .mask_set = __pgprot(0), + .mask_clr = __pgprot(0), + .flags = 0, + }; + + if (!(__supported_pte_mask & _PAGE_NX)) + goto out; + + if (!(page_flags & _PAGE_NX)) + cpa.mask_clr = __pgprot(_PAGE_NX); + + cpa.mask_set = __pgprot(_PAGE_PRESENT | page_flags); + + retval = __change_page_attr_set_clr(&cpa, 0); + __flush_tlb_all(); + +out: + return retval; +} + /* * The testcases use internal knowledge of the implementation that shouldn't * be exposed to the rest of the kernel. Include these directly here. diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index 26328e8..4ed75dd 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c @@ -359,15 +359,21 @@ void bpf_jit_compile(struct sk_filter *fp) EMIT2(0x89, 0xd0); /* mov %edx,%eax */ break; case BPF_S_ALU_MOD_K: /* A %= K; */ + if (K == 1) { + CLEAR_A(); + break; + } EMIT2(0x31, 0xd2); /* xor %edx,%edx */ EMIT1(0xb9);EMIT(K, 4); /* mov imm32,%ecx */ EMIT2(0xf7, 0xf1); /* div %ecx */ EMIT2(0x89, 0xd0); /* mov %edx,%eax */ break; - case BPF_S_ALU_DIV_K: /* A = reciprocal_divide(A, K); */ - EMIT3(0x48, 0x69, 0xc0); /* imul imm32,%rax,%rax */ - EMIT(K, 4); - EMIT4(0x48, 0xc1, 0xe8, 0x20); /* shr $0x20,%rax */ + case BPF_S_ALU_DIV_K: /* A /= K */ + if (K == 1) + break; + EMIT2(0x31, 0xd2); /* xor %edx,%edx */ + EMIT1(0xb9);EMIT(K, 4); /* mov imm32,%ecx */ + EMIT2(0xf7, 0xf1); /* div %ecx */ break; case BPF_S_ALU_AND_X: seen |= SEEN_XREG; diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c index b046e07..bca9e85 100644 --- a/arch/x86/pci/fixup.c +++ b/arch/x86/pci/fixup.c @@ -5,7 +5,6 @@ #include <linux/delay.h> #include <linux/dmi.h> #include <linux/pci.h> -#include <linux/init.h> #include <linux/vgaarb.h> #include <asm/pci_x86.h> diff --git a/arch/x86/pci/intel_mid_pci.c b/arch/x86/pci/intel_mid_pci.c index 51384ca..84b9d67 100644 --- a/arch/x86/pci/intel_mid_pci.c +++ b/arch/x86/pci/intel_mid_pci.c @@ -31,6 +31,7 @@ #include <asm/pci_x86.h> #include <asm/hw_irq.h> #include <asm/io_apic.h> +#include <asm/intel-mid.h> #define PCIE_CAP_OFFSET 0x100 @@ -219,7 +220,10 @@ static int intel_mid_pci_irq_enable(struct pci_dev *dev) irq_attr.ioapic = mp_find_ioapic(dev->irq); irq_attr.ioapic_pin = dev->irq; irq_attr.trigger = 1; /* level */ - irq_attr.polarity = 1; /* active low */ + if (intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_TANGIER) + irq_attr.polarity = 0; /* active high */ + else + irq_attr.polarity = 1; /* active low */ io_apic_set_pci_routing(&dev->dev, dev->irq, &irq_attr); return 0; diff --git a/arch/x86/platform/efi/early_printk.c b/arch/x86/platform/efi/early_printk.c index 6599a00..81b506d 100644 --- a/arch/x86/platform/efi/early_printk.c +++ b/arch/x86/platform/efi/early_printk.c @@ -142,7 +142,7 @@ early_efi_write(struct console *con, const char *str, unsigned int num) efi_y += font->height; } - if (efi_y + font->height >= si->lfb_height) { + if (efi_y + font->height > si->lfb_height) { u32 i; efi_y -= font->height; diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c index 92c0234..d62ec87 100644 --- a/arch/x86/platform/efi/efi.c +++ b/arch/x86/platform/efi/efi.c @@ -12,6 +12,8 @@ * Bibo Mao <bibo.mao@intel.com> * Chandramouli Narayanan <mouli@linux.intel.com> * Huang Ying <ying.huang@intel.com> + * Copyright (C) 2013 SuSE Labs + * Borislav Petkov <bp@suse.de> - runtime services VA mapping * * Copied from efi_32.c to eliminate the duplicated code between EFI * 32/64 support code. --ying 2007-10-26 @@ -51,7 +53,7 @@ #include <asm/x86_init.h> #include <asm/rtc.h> -#define EFI_DEBUG 1 +#define EFI_DEBUG #define EFI_MIN_RESERVE 5120 @@ -74,6 +76,8 @@ static __initdata efi_config_table_type_t arch_tables[] = { {NULL_GUID, NULL, NULL}, }; +u64 efi_setup; /* efi setup_data physical address */ + /* * Returns 1 if 'facility' is enabled, 0 otherwise. */ @@ -110,7 +114,6 @@ static int __init setup_storage_paranoia(char *arg) } early_param("efi_no_storage_paranoia", setup_storage_paranoia); - static efi_status_t virt_efi_get_time(efi_time_t *tm, efi_time_cap_t *tc) { unsigned long flags; @@ -398,9 +401,9 @@ int __init efi_memblock_x86_reserve_range(void) return 0; } -#if EFI_DEBUG static void __init print_efi_memmap(void) { +#ifdef EFI_DEBUG efi_memory_desc_t *md; void *p; int i; @@ -415,8 +418,8 @@ static void __init print_efi_memmap(void) md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT), (md->num_pages >> (20 - EFI_PAGE_SHIFT))); } -} #endif /* EFI_DEBUG */ +} void __init efi_reserve_boot_services(void) { @@ -436,7 +439,7 @@ void __init efi_reserve_boot_services(void) * - Not within any part of the kernel * - Not the bios reserved area */ - if ((start+size >= __pa_symbol(_text) + if ((start + size > __pa_symbol(_text) && start <= __pa_symbol(_end)) || !e820_all_mapped(start, start+size, E820_RAM) || memblock_is_region_reserved(start, size)) { @@ -489,18 +492,27 @@ static int __init efi_systab_init(void *phys) { if (efi_enabled(EFI_64BIT)) { efi_system_table_64_t *systab64; + struct efi_setup_data *data = NULL; u64 tmp = 0; + if (efi_setup) { + data = early_memremap(efi_setup, sizeof(*data)); + if (!data) + return -ENOMEM; + } systab64 = early_ioremap((unsigned long)phys, sizeof(*systab64)); if (systab64 == NULL) { pr_err("Couldn't map the system table!\n"); + if (data) + early_iounmap(data, sizeof(*data)); return -ENOMEM; } efi_systab.hdr = systab64->hdr; - efi_systab.fw_vendor = systab64->fw_vendor; - tmp |= systab64->fw_vendor; + efi_systab.fw_vendor = data ? (unsigned long)data->fw_vendor : + systab64->fw_vendor; + tmp |= data ? data->fw_vendor : systab64->fw_vendor; efi_systab.fw_revision = systab64->fw_revision; efi_systab.con_in_handle = systab64->con_in_handle; tmp |= systab64->con_in_handle; @@ -514,15 +526,20 @@ static int __init efi_systab_init(void *phys) tmp |= systab64->stderr_handle; efi_systab.stderr = systab64->stderr; tmp |= systab64->stderr; - efi_systab.runtime = (void *)(unsigned long)systab64->runtime; - tmp |= systab64->runtime; + efi_systab.runtime = data ? + (void *)(unsigned long)data->runtime : + (void *)(unsigned long)systab64->runtime; + tmp |= data ? data->runtime : systab64->runtime; efi_systab.boottime = (void *)(unsigned long)systab64->boottime; tmp |= systab64->boottime; efi_systab.nr_tables = systab64->nr_tables; - efi_systab.tables = systab64->tables; - tmp |= systab64->tables; + efi_systab.tables = data ? (unsigned long)data->tables : + systab64->tables; + tmp |= data ? data->tables : systab64->tables; early_iounmap(systab64, sizeof(*systab64)); + if (data) + early_iounmap(data, sizeof(*data)); #ifdef CONFIG_X86_32 if (tmp >> 32) { pr_err("EFI data located above 4GB, disabling EFI.\n"); @@ -626,6 +643,62 @@ static int __init efi_memmap_init(void) return 0; } +/* + * A number of config table entries get remapped to virtual addresses + * after entering EFI virtual mode. However, the kexec kernel requires + * their physical addresses therefore we pass them via setup_data and + * correct those entries to their respective physical addresses here. + * + * Currently only handles smbios which is necessary for some firmware + * implementation. + */ +static int __init efi_reuse_config(u64 tables, int nr_tables) +{ + int i, sz, ret = 0; + void *p, *tablep; + struct efi_setup_data *data; + + if (!efi_setup) + return 0; + + if (!efi_enabled(EFI_64BIT)) + return 0; + + data = early_memremap(efi_setup, sizeof(*data)); + if (!data) { + ret = -ENOMEM; + goto out; + } + + if (!data->smbios) + goto out_memremap; + + sz = sizeof(efi_config_table_64_t); + + p = tablep = early_memremap(tables, nr_tables * sz); + if (!p) { + pr_err("Could not map Configuration table!\n"); + ret = -ENOMEM; + goto out_memremap; + } + + for (i = 0; i < efi.systab->nr_tables; i++) { + efi_guid_t guid; + + guid = ((efi_config_table_64_t *)p)->guid; + + if (!efi_guidcmp(guid, SMBIOS_TABLE_GUID)) + ((efi_config_table_64_t *)p)->table = data->smbios; + p += sz; + } + early_iounmap(tablep, nr_tables * sz); + +out_memremap: + early_iounmap(data, sizeof(*data)); +out: + return ret; +} + void __init efi_init(void) { efi_char16_t *c16; @@ -651,6 +724,10 @@ void __init efi_init(void) set_bit(EFI_SYSTEM_TABLES, &x86_efi_facility); + efi.config_table = (unsigned long)efi.systab->tables; + efi.fw_vendor = (unsigned long)efi.systab->fw_vendor; + efi.runtime = (unsigned long)efi.systab->runtime; + /* * Show what we know for posterity */ @@ -667,6 +744,9 @@ void __init efi_init(void) efi.systab->hdr.revision >> 16, efi.systab->hdr.revision & 0xffff, vendor); + if (efi_reuse_config(efi.systab->tables, efi.systab->nr_tables)) + return; + if (efi_config_init(arch_tables)) return; @@ -684,22 +764,12 @@ void __init efi_init(void) return; set_bit(EFI_RUNTIME_SERVICES, &x86_efi_facility); } - if (efi_memmap_init()) return; set_bit(EFI_MEMMAP, &x86_efi_facility); -#ifdef CONFIG_X86_32 - if (efi_is_native()) { - x86_platform.get_wallclock = efi_get_time; - x86_platform.set_wallclock = efi_set_rtc_mmss; - } -#endif - -#if EFI_DEBUG print_efi_memmap(); -#endif } void __init efi_late_init(void) @@ -748,36 +818,38 @@ void efi_memory_uc(u64 addr, unsigned long size) set_memory_uc(addr, npages); } -/* - * This function will switch the EFI runtime services to virtual mode. - * Essentially, look through the EFI memmap and map every region that - * has the runtime attribute bit set in its memory descriptor and update - * that memory descriptor with the virtual address obtained from ioremap(). - * This enables the runtime services to be called without having to - * thunk back into physical mode for every invocation. - */ -void __init efi_enter_virtual_mode(void) +void __init old_map_region(efi_memory_desc_t *md) { - efi_memory_desc_t *md, *prev_md = NULL; - efi_status_t status; + u64 start_pfn, end_pfn, end; unsigned long size; - u64 end, systab, start_pfn, end_pfn; - void *p, *va, *new_memmap = NULL; - int count = 0; + void *va; - efi.systab = NULL; + start_pfn = PFN_DOWN(md->phys_addr); + size = md->num_pages << PAGE_SHIFT; + end = md->phys_addr + size; + end_pfn = PFN_UP(end); - /* - * We don't do virtual mode, since we don't do runtime services, on - * non-native EFI - */ + if (pfn_range_is_mapped(start_pfn, end_pfn)) { + va = __va(md->phys_addr); - if (!efi_is_native()) { - efi_unmap_memmap(); - return; - } + if (!(md->attribute & EFI_MEMORY_WB)) + efi_memory_uc((u64)(unsigned long)va, size); + } else + va = efi_ioremap(md->phys_addr, size, + md->type, md->attribute); + + md->virt_addr = (u64) (unsigned long) va; + if (!va) + pr_err("ioremap of 0x%llX failed!\n", + (unsigned long long)md->phys_addr); +} + +/* Merge contiguous regions of the same type and attribute */ +static void __init efi_merge_regions(void) +{ + void *p; + efi_memory_desc_t *md, *prev_md = NULL; - /* Merge contiguous regions of the same type and attribute */ for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { u64 prev_size; md = p; @@ -803,6 +875,77 @@ void __init efi_enter_virtual_mode(void) } prev_md = md; } +} + +static void __init get_systab_virt_addr(efi_memory_desc_t *md) +{ + unsigned long size; + u64 end, systab; + + size = md->num_pages << EFI_PAGE_SHIFT; + end = md->phys_addr + size; + systab = (u64)(unsigned long)efi_phys.systab; + if (md->phys_addr <= systab && systab < end) { + systab += md->virt_addr - md->phys_addr; + efi.systab = (efi_system_table_t *)(unsigned long)systab; + } +} + +static int __init save_runtime_map(void) +{ + efi_memory_desc_t *md; + void *tmp, *p, *q = NULL; + int count = 0; + + for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { + md = p; + + if (!(md->attribute & EFI_MEMORY_RUNTIME) || + (md->type == EFI_BOOT_SERVICES_CODE) || + (md->type == EFI_BOOT_SERVICES_DATA)) + continue; + tmp = krealloc(q, (count + 1) * memmap.desc_size, GFP_KERNEL); + if (!tmp) + goto out; + q = tmp; + + memcpy(q + count * memmap.desc_size, md, memmap.desc_size); + count++; + } + + efi_runtime_map_setup(q, count, memmap.desc_size); + + return 0; +out: + kfree(q); + return -ENOMEM; +} + +/* + * Map efi regions which were passed via setup_data. The virt_addr is a fixed + * addr which was used in first kernel of a kexec boot. + */ +static void __init efi_map_regions_fixed(void) +{ + void *p; + efi_memory_desc_t *md; + + for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { + md = p; + efi_map_region_fixed(md); /* FIXME: add error handling */ + get_systab_virt_addr(md); + } + +} + +/* + * Map efi memory ranges for runtime serivce and update new_memmap with virtual + * addresses. + */ +static void * __init efi_map_regions(int *count) +{ + efi_memory_desc_t *md; + void *p, *tmp, *new_memmap = NULL; for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { md = p; @@ -814,53 +957,95 @@ void __init efi_enter_virtual_mode(void) continue; } - size = md->num_pages << EFI_PAGE_SHIFT; - end = md->phys_addr + size; + efi_map_region(md); + get_systab_virt_addr(md); - start_pfn = PFN_DOWN(md->phys_addr); - end_pfn = PFN_UP(end); - if (pfn_range_is_mapped(start_pfn, end_pfn)) { - va = __va(md->phys_addr); + tmp = krealloc(new_memmap, (*count + 1) * memmap.desc_size, + GFP_KERNEL); + if (!tmp) + goto out; + new_memmap = tmp; + memcpy(new_memmap + (*count * memmap.desc_size), md, + memmap.desc_size); + (*count)++; + } - if (!(md->attribute & EFI_MEMORY_WB)) - efi_memory_uc((u64)(unsigned long)va, size); - } else - va = efi_ioremap(md->phys_addr, size, - md->type, md->attribute); + return new_memmap; +out: + kfree(new_memmap); + return NULL; +} + +/* + * This function will switch the EFI runtime services to virtual mode. + * Essentially, we look through the EFI memmap and map every region that + * has the runtime attribute bit set in its memory descriptor into the + * ->trampoline_pgd page table using a top-down VA allocation scheme. + * + * The old method which used to update that memory descriptor with the + * virtual address obtained from ioremap() is still supported when the + * kernel is booted with efi=old_map on its command line. Same old + * method enabled the runtime services to be called without having to + * thunk back into physical mode for every invocation. + * + * The new method does a pagetable switch in a preemption-safe manner + * so that we're in a different address space when calling a runtime + * function. For function arguments passing we do copy the PGDs of the + * kernel page table into ->trampoline_pgd prior to each call. + * + * Specially for kexec boot, efi runtime maps in previous kernel should + * be passed in via setup_data. In that case runtime ranges will be mapped + * to the same virtual addresses as the first kernel. + */ +void __init efi_enter_virtual_mode(void) +{ + efi_status_t status; + void *new_memmap = NULL; + int err, count = 0; - md->virt_addr = (u64) (unsigned long) va; + efi.systab = NULL; - if (!va) { - pr_err("ioremap of 0x%llX failed!\n", - (unsigned long long)md->phys_addr); - continue; - } + /* + * We don't do virtual mode, since we don't do runtime services, on + * non-native EFI + */ + if (!efi_is_native()) { + efi_unmap_memmap(); + return; + } - systab = (u64) (unsigned long) efi_phys.systab; - if (md->phys_addr <= systab && systab < end) { - systab += md->virt_addr - md->phys_addr; - efi.systab = (efi_system_table_t *) (unsigned long) systab; + if (efi_setup) { + efi_map_regions_fixed(); + } else { + efi_merge_regions(); + new_memmap = efi_map_regions(&count); + if (!new_memmap) { + pr_err("Error reallocating memory, EFI runtime non-functional!\n"); + return; } - new_memmap = krealloc(new_memmap, - (count + 1) * memmap.desc_size, - GFP_KERNEL); - memcpy(new_memmap + (count * memmap.desc_size), md, - memmap.desc_size); - count++; } + err = save_runtime_map(); + if (err) + pr_err("Error saving runtime map, efi runtime on kexec non-functional!!\n"); + BUG_ON(!efi.systab); - status = phys_efi_set_virtual_address_map( - memmap.desc_size * count, - memmap.desc_size, - memmap.desc_version, - (efi_memory_desc_t *)__pa(new_memmap)); + efi_setup_page_tables(); + efi_sync_low_kernel_mappings(); - if (status != EFI_SUCCESS) { - pr_alert("Unable to switch EFI into virtual mode " - "(status=%lx)!\n", status); - panic("EFI call to SetVirtualAddressMap() failed!"); + if (!efi_setup) { + status = phys_efi_set_virtual_address_map( + memmap.desc_size * count, + memmap.desc_size, + memmap.desc_version, + (efi_memory_desc_t *)__pa(new_memmap)); + + if (status != EFI_SUCCESS) { + pr_alert("Unable to switch EFI into virtual mode (status=%lx)!\n", + status); + panic("EFI call to SetVirtualAddressMap() failed!"); + } } /* @@ -883,7 +1068,8 @@ void __init efi_enter_virtual_mode(void) efi.query_variable_info = virt_efi_query_variable_info; efi.update_capsule = virt_efi_update_capsule; efi.query_capsule_caps = virt_efi_query_capsule_caps; - if (__supported_pte_mask & _PAGE_NX) + + if (efi_enabled(EFI_OLD_MEMMAP) && (__supported_pte_mask & _PAGE_NX)) runtime_code_page_mkexec(); kfree(new_memmap); @@ -1013,3 +1199,15 @@ efi_status_t efi_query_variable_store(u32 attributes, unsigned long size) return EFI_SUCCESS; } EXPORT_SYMBOL_GPL(efi_query_variable_store); + +static int __init parse_efi_cmdline(char *str) +{ + if (*str == '=') + str++; + + if (!strncmp(str, "old_map", 7)) + set_bit(EFI_OLD_MEMMAP, &x86_efi_facility); + + return 0; +} +early_param("efi", parse_efi_cmdline); diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c index 40e4469..249b183 100644 --- a/arch/x86/platform/efi/efi_32.c +++ b/arch/x86/platform/efi/efi_32.c @@ -37,9 +37,19 @@ * claim EFI runtime service handler exclusively and to duplicate a memory in * low memory space say 0 - 3G. */ - static unsigned long efi_rt_eflags; +void efi_sync_low_kernel_mappings(void) {} +void efi_setup_page_tables(void) {} + +void __init efi_map_region(efi_memory_desc_t *md) +{ + old_map_region(md); +} + +void __init efi_map_region_fixed(efi_memory_desc_t *md) {} +void __init parse_efi_setup(u64 phys_addr, u32 data_len) {} + void efi_call_phys_prelog(void) { struct desc_ptr gdt_descr; diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c index 39a0e7f..6284f15 100644 --- a/arch/x86/platform/efi/efi_64.c +++ b/arch/x86/platform/efi/efi_64.c @@ -38,10 +38,28 @@ #include <asm/efi.h> #include <asm/cacheflush.h> #include <asm/fixmap.h> +#include <asm/realmode.h> static pgd_t *save_pgd __initdata; static unsigned long efi_flags __initdata; +/* + * We allocate runtime services regions bottom-up, starting from -4G, i.e. + * 0xffff_ffff_0000_0000 and limit EFI VA mapping space to 64G. + */ +static u64 efi_va = -4 * (1UL << 30); +#define EFI_VA_END (-68 * (1UL << 30)) + +/* + * Scratch space used for switching the pagetable in the EFI stub + */ +struct efi_scratch { + u64 r15; + u64 prev_cr3; + pgd_t *efi_pgt; + bool use_pgd; +}; + static void __init early_code_mapping_set_exec(int executable) { efi_memory_desc_t *md; @@ -65,6 +83,9 @@ void __init efi_call_phys_prelog(void) int pgd; int n_pgds; + if (!efi_enabled(EFI_OLD_MEMMAP)) + return; + early_code_mapping_set_exec(1); local_irq_save(efi_flags); @@ -86,6 +107,10 @@ void __init efi_call_phys_epilog(void) */ int pgd; int n_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT) , PGDIR_SIZE); + + if (!efi_enabled(EFI_OLD_MEMMAP)) + return; + for (pgd = 0; pgd < n_pgds; pgd++) set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), save_pgd[pgd]); kfree(save_pgd); @@ -94,6 +119,96 @@ void __init efi_call_phys_epilog(void) early_code_mapping_set_exec(0); } +/* + * Add low kernel mappings for passing arguments to EFI functions. + */ +void efi_sync_low_kernel_mappings(void) +{ + unsigned num_pgds; + pgd_t *pgd = (pgd_t *)__va(real_mode_header->trampoline_pgd); + + if (efi_enabled(EFI_OLD_MEMMAP)) + return; + + num_pgds = pgd_index(MODULES_END - 1) - pgd_index(PAGE_OFFSET); + + memcpy(pgd + pgd_index(PAGE_OFFSET), + init_mm.pgd + pgd_index(PAGE_OFFSET), + sizeof(pgd_t) * num_pgds); +} + +void efi_setup_page_tables(void) +{ + efi_scratch.efi_pgt = (pgd_t *)(unsigned long)real_mode_header->trampoline_pgd; + + if (!efi_enabled(EFI_OLD_MEMMAP)) + efi_scratch.use_pgd = true; +} + +static void __init __map_region(efi_memory_desc_t *md, u64 va) +{ + pgd_t *pgd = (pgd_t *)__va(real_mode_header->trampoline_pgd); + unsigned long pf = 0; + + if (!(md->attribute & EFI_MEMORY_WB)) + pf |= _PAGE_PCD; + + if (kernel_map_pages_in_pgd(pgd, md->phys_addr, va, md->num_pages, pf)) + pr_warn("Error mapping PA 0x%llx -> VA 0x%llx!\n", + md->phys_addr, va); +} + +void __init efi_map_region(efi_memory_desc_t *md) +{ + unsigned long size = md->num_pages << PAGE_SHIFT; + u64 pa = md->phys_addr; + + if (efi_enabled(EFI_OLD_MEMMAP)) + return old_map_region(md); + + /* + * Make sure the 1:1 mappings are present as a catch-all for b0rked + * firmware which doesn't update all internal pointers after switching + * to virtual mode and would otherwise crap on us. + */ + __map_region(md, md->phys_addr); + + efi_va -= size; + + /* Is PA 2M-aligned? */ + if (!(pa & (PMD_SIZE - 1))) { + efi_va &= PMD_MASK; + } else { + u64 pa_offset = pa & (PMD_SIZE - 1); + u64 prev_va = efi_va; + + /* get us the same offset within this 2M page */ + efi_va = (efi_va & PMD_MASK) + pa_offset; + + if (efi_va > prev_va) + efi_va -= PMD_SIZE; + } + + if (efi_va < EFI_VA_END) { + pr_warn(FW_WARN "VA address range overflow!\n"); + return; + } + + /* Do the VA map */ + __map_region(md, efi_va); + md->virt_addr = efi_va; +} + +/* + * kexec kernel will use efi_map_region_fixed to map efi runtime memory ranges. + * md->virt_addr is the original virtual address which had been mapped in kexec + * 1st kernel. + */ +void __init efi_map_region_fixed(efi_memory_desc_t *md) +{ + __map_region(md, md->virt_addr); +} + void __iomem *__init efi_ioremap(unsigned long phys_addr, unsigned long size, u32 type, u64 attribute) { @@ -113,3 +228,8 @@ void __iomem *__init efi_ioremap(unsigned long phys_addr, unsigned long size, return (void __iomem *)__va(phys_addr); } + +void __init parse_efi_setup(u64 phys_addr, u32 data_len) +{ + efi_setup = phys_addr + sizeof(struct setup_data); +} diff --git a/arch/x86/platform/efi/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S index 4c07cca..88073b1 100644 --- a/arch/x86/platform/efi/efi_stub_64.S +++ b/arch/x86/platform/efi/efi_stub_64.S @@ -34,10 +34,47 @@ mov %rsi, %cr0; \ mov (%rsp), %rsp + /* stolen from gcc */ + .macro FLUSH_TLB_ALL + movq %r15, efi_scratch(%rip) + movq %r14, efi_scratch+8(%rip) + movq %cr4, %r15 + movq %r15, %r14 + andb $0x7f, %r14b + movq %r14, %cr4 + movq %r15, %cr4 + movq efi_scratch+8(%rip), %r14 + movq efi_scratch(%rip), %r15 + .endm + + .macro SWITCH_PGT + cmpb $0, efi_scratch+24(%rip) + je 1f + movq %r15, efi_scratch(%rip) # r15 + # save previous CR3 + movq %cr3, %r15 + movq %r15, efi_scratch+8(%rip) # prev_cr3 + movq efi_scratch+16(%rip), %r15 # EFI pgt + movq %r15, %cr3 + 1: + .endm + + .macro RESTORE_PGT + cmpb $0, efi_scratch+24(%rip) + je 2f + movq efi_scratch+8(%rip), %r15 + movq %r15, %cr3 + movq efi_scratch(%rip), %r15 + FLUSH_TLB_ALL + 2: + .endm + ENTRY(efi_call0) SAVE_XMM subq $32, %rsp + SWITCH_PGT call *%rdi + RESTORE_PGT addq $32, %rsp RESTORE_XMM ret @@ -47,7 +84,9 @@ ENTRY(efi_call1) SAVE_XMM subq $32, %rsp mov %rsi, %rcx + SWITCH_PGT call *%rdi + RESTORE_PGT addq $32, %rsp RESTORE_XMM ret @@ -57,7 +96,9 @@ ENTRY(efi_call2) SAVE_XMM subq $32, %rsp mov %rsi, %rcx + SWITCH_PGT call *%rdi + RESTORE_PGT addq $32, %rsp RESTORE_XMM ret @@ -68,7 +109,9 @@ ENTRY(efi_call3) subq $32, %rsp mov %rcx, %r8 mov %rsi, %rcx + SWITCH_PGT call *%rdi + RESTORE_PGT addq $32, %rsp RESTORE_XMM ret @@ -80,7 +123,9 @@ ENTRY(efi_call4) mov %r8, %r9 mov %rcx, %r8 mov %rsi, %rcx + SWITCH_PGT call *%rdi + RESTORE_PGT addq $32, %rsp RESTORE_XMM ret @@ -93,7 +138,9 @@ ENTRY(efi_call5) mov %r8, %r9 mov %rcx, %r8 mov %rsi, %rcx + SWITCH_PGT call *%rdi + RESTORE_PGT addq $48, %rsp RESTORE_XMM ret @@ -109,8 +156,15 @@ ENTRY(efi_call6) mov %r8, %r9 mov %rcx, %r8 mov %rsi, %rcx + SWITCH_PGT call *%rdi + RESTORE_PGT addq $48, %rsp RESTORE_XMM ret ENDPROC(efi_call6) + + .data +ENTRY(efi_scratch) + .fill 3,8,0 + .byte 0 diff --git a/arch/x86/platform/intel-mid/Makefile b/arch/x86/platform/intel-mid/Makefile index 01cc29e..0a8ee70 100644 --- a/arch/x86/platform/intel-mid/Makefile +++ b/arch/x86/platform/intel-mid/Makefile @@ -1,6 +1,6 @@ -obj-$(CONFIG_X86_INTEL_MID) += intel-mid.o -obj-$(CONFIG_X86_INTEL_MID) += intel_mid_vrtc.o +obj-$(CONFIG_X86_INTEL_MID) += intel-mid.o intel_mid_vrtc.o mfld.o mrfl.o obj-$(CONFIG_EARLY_PRINTK_INTEL_MID) += early_printk_intel_mid.o + # SFI specific code ifdef CONFIG_X86_INTEL_MID obj-$(CONFIG_SFI) += sfi.o device_libs/ diff --git a/arch/x86/platform/intel-mid/device_libs/platform_emc1403.c b/arch/x86/platform/intel-mid/device_libs/platform_emc1403.c index 0d942c1..69a7836 100644 --- a/arch/x86/platform/intel-mid/device_libs/platform_emc1403.c +++ b/arch/x86/platform/intel-mid/device_libs/platform_emc1403.c @@ -22,7 +22,9 @@ static void __init *emc1403_platform_data(void *info) int intr = get_gpio_by_name("thermal_int"); int intr2nd = get_gpio_by_name("thermal_alert"); - if (intr == -1 || intr2nd == -1) + if (intr < 0) + return NULL; + if (intr2nd < 0) return NULL; i2c_info->irq = intr + INTEL_MID_IRQ_OFFSET; diff --git a/arch/x86/platform/intel-mid/device_libs/platform_gpio_keys.c b/arch/x86/platform/intel-mid/device_libs/platform_gpio_keys.c index a013a48..dccae6b 100644 --- a/arch/x86/platform/intel-mid/device_libs/platform_gpio_keys.c +++ b/arch/x86/platform/intel-mid/device_libs/platform_gpio_keys.c @@ -66,7 +66,7 @@ static int __init pb_keys_init(void) gb[i].gpio = get_gpio_by_name(gb[i].desc); pr_debug("info[%2d]: name = %s, gpio = %d\n", i, gb[i].desc, gb[i].gpio); - if (gb[i].gpio == -1) + if (gb[i].gpio < 0) continue; if (i != good) diff --git a/arch/x86/platform/intel-mid/device_libs/platform_lis331.c b/arch/x86/platform/intel-mid/device_libs/platform_lis331.c index 15278c1..54226de 100644 --- a/arch/x86/platform/intel-mid/device_libs/platform_lis331.c +++ b/arch/x86/platform/intel-mid/device_libs/platform_lis331.c @@ -21,7 +21,9 @@ static void __init *lis331dl_platform_data(void *info) int intr = get_gpio_by_name("accel_int"); int intr2nd = get_gpio_by_name("accel_2"); - if (intr == -1 || intr2nd == -1) + if (intr < 0) + return NULL; + if (intr2nd < 0) return NULL; i2c_info->irq = intr + INTEL_MID_IRQ_OFFSET; diff --git a/arch/x86/platform/intel-mid/device_libs/platform_max7315.c b/arch/x86/platform/intel-mid/device_libs/platform_max7315.c index 94ade10..2c8acbc 100644 --- a/arch/x86/platform/intel-mid/device_libs/platform_max7315.c +++ b/arch/x86/platform/intel-mid/device_libs/platform_max7315.c @@ -48,7 +48,7 @@ static void __init *max7315_platform_data(void *info) gpio_base = get_gpio_by_name(base_pin_name); intr = get_gpio_by_name(intr_pin_name); - if (gpio_base == -1) + if (gpio_base < 0) return NULL; max7315->gpio_base = gpio_base; if (intr != -1) { diff --git a/arch/x86/platform/intel-mid/device_libs/platform_mpu3050.c b/arch/x86/platform/intel-mid/device_libs/platform_mpu3050.c index dd28d63..cfe9a47 100644 --- a/arch/x86/platform/intel-mid/device_libs/platform_mpu3050.c +++ b/arch/x86/platform/intel-mid/device_libs/platform_mpu3050.c @@ -19,7 +19,7 @@ static void *mpu3050_platform_data(void *info) struct i2c_board_info *i2c_info = info; int intr = get_gpio_by_name("mpu3050_int"); - if (intr == -1) + if (intr < 0) return NULL; i2c_info->irq = intr + INTEL_MID_IRQ_OFFSET; diff --git a/arch/x86/platform/intel-mid/device_libs/platform_pmic_gpio.c b/arch/x86/platform/intel-mid/device_libs/platform_pmic_gpio.c index d87182a..65c2a9a 100644 --- a/arch/x86/platform/intel-mid/device_libs/platform_pmic_gpio.c +++ b/arch/x86/platform/intel-mid/device_libs/platform_pmic_gpio.c @@ -26,7 +26,7 @@ static void __init *pmic_gpio_platform_data(void *info) static struct intel_pmic_gpio_platform_data pmic_gpio_pdata; int gpio_base = get_gpio_by_name("pmic_gpio_base"); - if (gpio_base == -1) + if (gpio_base < 0) gpio_base = 64; pmic_gpio_pdata.gpio_base = gpio_base; pmic_gpio_pdata.irq_base = gpio_base + INTEL_MID_IRQ_OFFSET; diff --git a/arch/x86/platform/intel-mid/device_libs/platform_tca6416.c b/arch/x86/platform/intel-mid/device_libs/platform_tca6416.c index 22881c9..33be0b3 100644 --- a/arch/x86/platform/intel-mid/device_libs/platform_tca6416.c +++ b/arch/x86/platform/intel-mid/device_libs/platform_tca6416.c @@ -34,10 +34,10 @@ static void *tca6416_platform_data(void *info) gpio_base = get_gpio_by_name(base_pin_name); intr = get_gpio_by_name(intr_pin_name); - if (gpio_base == -1) + if (gpio_base < 0) return NULL; tca6416.gpio_base = gpio_base; - if (intr != -1) { + if (intr >= 0) { i2c_info->irq = intr + INTEL_MID_IRQ_OFFSET; tca6416.irq_base = gpio_base + INTEL_MID_IRQ_OFFSET; } else { diff --git a/arch/x86/platform/intel-mid/early_printk_intel_mid.c b/arch/x86/platform/intel-mid/early_printk_intel_mid.c index 4f702f5..e0bd082 100644 --- a/arch/x86/platform/intel-mid/early_printk_intel_mid.c +++ b/arch/x86/platform/intel-mid/early_printk_intel_mid.c @@ -22,7 +22,6 @@ #include <linux/console.h> #include <linux/kernel.h> #include <linux/delay.h> -#include <linux/init.h> #include <linux/io.h> #include <asm/fixmap.h> diff --git a/arch/x86/platform/intel-mid/intel-mid.c b/arch/x86/platform/intel-mid/intel-mid.c index f90e290..1bbedc4 100644 --- a/arch/x86/platform/intel-mid/intel-mid.c +++ b/arch/x86/platform/intel-mid/intel-mid.c @@ -35,6 +35,8 @@ #include <asm/apb_timer.h> #include <asm/reboot.h> +#include "intel_mid_weak_decls.h" + /* * the clockevent devices on Moorestown/Medfield can be APBT or LAPIC clock, * cmdline option x86_intel_mid_timer can be used to override the configuration @@ -58,12 +60,16 @@ enum intel_mid_timer_options intel_mid_timer_options; +/* intel_mid_ops to store sub arch ops */ +struct intel_mid_ops *intel_mid_ops; +/* getter function for sub arch ops*/ +static void *(*get_intel_mid_ops[])(void) = INTEL_MID_OPS_INIT; enum intel_mid_cpu_type __intel_mid_cpu_chip; EXPORT_SYMBOL_GPL(__intel_mid_cpu_chip); static void intel_mid_power_off(void) { -} +}; static void intel_mid_reboot(void) { @@ -72,32 +78,6 @@ static void intel_mid_reboot(void) static unsigned long __init intel_mid_calibrate_tsc(void) { - unsigned long fast_calibrate; - u32 lo, hi, ratio, fsb; - - rdmsr(MSR_IA32_PERF_STATUS, lo, hi); - pr_debug("IA32 perf status is 0x%x, 0x%0x\n", lo, hi); - ratio = (hi >> 8) & 0x1f; - pr_debug("ratio is %d\n", ratio); - if (!ratio) { - pr_err("read a zero ratio, should be incorrect!\n"); - pr_err("force tsc ratio to 16 ...\n"); - ratio = 16; - } - rdmsr(MSR_FSB_FREQ, lo, hi); - if ((lo & 0x7) == 0x7) - fsb = PENWELL_FSB_FREQ_83SKU; - else - fsb = PENWELL_FSB_FREQ_100SKU; - fast_calibrate = ratio * fsb; - pr_debug("read penwell tsc %lu khz\n", fast_calibrate); - lapic_timer_frequency = fsb * 1000 / HZ; - /* mark tsc clocksource as reliable */ - set_cpu_cap(&boot_cpu_data, X86_FEATURE_TSC_RELIABLE); - - if (fast_calibrate) - return fast_calibrate; - return 0; } @@ -125,13 +105,37 @@ static void __init intel_mid_time_init(void) static void intel_mid_arch_setup(void) { - if (boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model == 0x27) - __intel_mid_cpu_chip = INTEL_MID_CPU_CHIP_PENWELL; - else { + if (boot_cpu_data.x86 != 6) { pr_err("Unknown Intel MID CPU (%d:%d), default to Penwell\n", boot_cpu_data.x86, boot_cpu_data.x86_model); __intel_mid_cpu_chip = INTEL_MID_CPU_CHIP_PENWELL; + goto out; } + + switch (boot_cpu_data.x86_model) { + case 0x35: + __intel_mid_cpu_chip = INTEL_MID_CPU_CHIP_CLOVERVIEW; + break; + case 0x3C: + case 0x4A: + __intel_mid_cpu_chip = INTEL_MID_CPU_CHIP_TANGIER; + break; + case 0x27: + default: + __intel_mid_cpu_chip = INTEL_MID_CPU_CHIP_PENWELL; + break; + } + + if (__intel_mid_cpu_chip < MAX_CPU_OPS(get_intel_mid_ops)) + intel_mid_ops = get_intel_mid_ops[__intel_mid_cpu_chip](); + else { + intel_mid_ops = get_intel_mid_ops[INTEL_MID_CPU_CHIP_PENWELL](); + pr_info("ARCH: Uknown SoC, assuming PENWELL!\n"); + } + +out: + if (intel_mid_ops->arch_setup) + intel_mid_ops->arch_setup(); } /* MID systems don't have i8042 controller */ diff --git a/arch/x86/platform/intel-mid/intel_mid_weak_decls.h b/arch/x86/platform/intel-mid/intel_mid_weak_decls.h new file mode 100644 index 0000000..a537ffc --- /dev/null +++ b/arch/x86/platform/intel-mid/intel_mid_weak_decls.h @@ -0,0 +1,19 @@ +/* + * intel_mid_weak_decls.h: Weak declarations of intel-mid.c + * + * (C) Copyright 2013 Intel Corporation + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; version 2 + * of the License. + */ + + +/* __attribute__((weak)) makes these declarations overridable */ +/* For every CPU addition a new get_<cpuname>_ops interface needs + * to be added. + */ +extern void * __cpuinit get_penwell_ops(void) __attribute__((weak)); +extern void * __cpuinit get_cloverview_ops(void) __attribute__((weak)); +extern void * __init get_tangier_ops(void) __attribute__((weak)); diff --git a/arch/x86/platform/intel-mid/mfld.c b/arch/x86/platform/intel-mid/mfld.c new file mode 100644 index 0000000..4f7884e --- /dev/null +++ b/arch/x86/platform/intel-mid/mfld.c @@ -0,0 +1,75 @@ +/* + * mfld.c: Intel Medfield platform setup code + * + * (C) Copyright 2013 Intel Corporation + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; version 2 + * of the License. + */ + +#include <linux/init.h> + +#include <asm/apic.h> +#include <asm/intel-mid.h> +#include <asm/intel_mid_vrtc.h> + +#include "intel_mid_weak_decls.h" + +static void penwell_arch_setup(void); +/* penwell arch ops */ +static struct intel_mid_ops penwell_ops = { + .arch_setup = penwell_arch_setup, +}; + +static void mfld_power_off(void) +{ +} + +static unsigned long __init mfld_calibrate_tsc(void) +{ + unsigned long fast_calibrate; + u32 lo, hi, ratio, fsb; + + rdmsr(MSR_IA32_PERF_STATUS, lo, hi); + pr_debug("IA32 perf status is 0x%x, 0x%0x\n", lo, hi); + ratio = (hi >> 8) & 0x1f; + pr_debug("ratio is %d\n", ratio); + if (!ratio) { + pr_err("read a zero ratio, should be incorrect!\n"); + pr_err("force tsc ratio to 16 ...\n"); + ratio = 16; + } + rdmsr(MSR_FSB_FREQ, lo, hi); + if ((lo & 0x7) == 0x7) + fsb = FSB_FREQ_83SKU; + else + fsb = FSB_FREQ_100SKU; + fast_calibrate = ratio * fsb; + pr_debug("read penwell tsc %lu khz\n", fast_calibrate); + lapic_timer_frequency = fsb * 1000 / HZ; + /* mark tsc clocksource as reliable */ + set_cpu_cap(&boot_cpu_data, X86_FEATURE_TSC_RELIABLE); + + if (fast_calibrate) + return fast_calibrate; + + return 0; +} + +static void __init penwell_arch_setup() +{ + x86_platform.calibrate_tsc = mfld_calibrate_tsc; + pm_power_off = mfld_power_off; +} + +void * __cpuinit get_penwell_ops() +{ + return &penwell_ops; +} + +void * __cpuinit get_cloverview_ops() +{ + return &penwell_ops; +} diff --git a/arch/x86/platform/intel-mid/mrfl.c b/arch/x86/platform/intel-mid/mrfl.c new file mode 100644 index 0000000..09d1015 --- /dev/null +++ b/arch/x86/platform/intel-mid/mrfl.c @@ -0,0 +1,103 @@ +/* + * mrfl.c: Intel Merrifield platform specific setup code + * + * (C) Copyright 2013 Intel Corporation + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; version 2 + * of the License. + */ + +#include <linux/init.h> + +#include <asm/apic.h> +#include <asm/intel-mid.h> + +#include "intel_mid_weak_decls.h" + +static unsigned long __init tangier_calibrate_tsc(void) +{ + unsigned long fast_calibrate; + u32 lo, hi, ratio, fsb, bus_freq; + + /* *********************** */ + /* Compute TSC:Ratio * FSB */ + /* *********************** */ + + /* Compute Ratio */ + rdmsr(MSR_PLATFORM_INFO, lo, hi); + pr_debug("IA32 PLATFORM_INFO is 0x%x : %x\n", hi, lo); + + ratio = (lo >> 8) & 0xFF; + pr_debug("ratio is %d\n", ratio); + if (!ratio) { + pr_err("Read a zero ratio, force tsc ratio to 4 ...\n"); + ratio = 4; + } + + /* Compute FSB */ + rdmsr(MSR_FSB_FREQ, lo, hi); + pr_debug("Actual FSB frequency detected by SOC 0x%x : %x\n", + hi, lo); + + bus_freq = lo & 0x7; + pr_debug("bus_freq = 0x%x\n", bus_freq); + + if (bus_freq == 0) + fsb = FSB_FREQ_100SKU; + else if (bus_freq == 1) + fsb = FSB_FREQ_100SKU; + else if (bus_freq == 2) + fsb = FSB_FREQ_133SKU; + else if (bus_freq == 3) + fsb = FSB_FREQ_167SKU; + else if (bus_freq == 4) + fsb = FSB_FREQ_83SKU; + else if (bus_freq == 5) + fsb = FSB_FREQ_400SKU; + else if (bus_freq == 6) + fsb = FSB_FREQ_267SKU; + else if (bus_freq == 7) + fsb = FSB_FREQ_333SKU; + else { + BUG(); + pr_err("Invalid bus_freq! Setting to minimal value!\n"); + fsb = FSB_FREQ_100SKU; + } + + /* TSC = FSB Freq * Resolved HFM Ratio */ + fast_calibrate = ratio * fsb; + pr_debug("calculate tangier tsc %lu KHz\n", fast_calibrate); + + /* ************************************ */ + /* Calculate Local APIC Timer Frequency */ + /* ************************************ */ + lapic_timer_frequency = (fsb * 1000) / HZ; + + pr_debug("Setting lapic_timer_frequency = %d\n", + lapic_timer_frequency); + + /* mark tsc clocksource as reliable */ + set_cpu_cap(&boot_cpu_data, X86_FEATURE_TSC_RELIABLE); + + if (fast_calibrate) + return fast_calibrate; + + return 0; +} + +static void __init tangier_arch_setup(void) +{ + x86_platform.calibrate_tsc = tangier_calibrate_tsc; +} + +/* tangier arch ops */ +static struct intel_mid_ops tangier_ops = { + .arch_setup = tangier_arch_setup, +}; + +void * __cpuinit get_tangier_ops() +{ + return &tangier_ops; +} diff --git a/arch/x86/platform/intel-mid/sfi.c b/arch/x86/platform/intel-mid/sfi.c index c84c1ca..994c40b 100644 --- a/arch/x86/platform/intel-mid/sfi.c +++ b/arch/x86/platform/intel-mid/sfi.c @@ -224,7 +224,7 @@ int get_gpio_by_name(const char *name) if (!strncmp(name, pentry->pin_name, SFI_NAME_LEN)) return pentry->pin_no; } - return -1; + return -EINVAL; } void __init intel_scu_device_register(struct platform_device *pdev) @@ -250,7 +250,7 @@ static void __init intel_scu_spi_device_register(struct spi_board_info *sdev) sdev->modalias); return; } - memcpy(new_dev, sdev, sizeof(*sdev)); + *new_dev = *sdev; spi_devs[spi_next_dev++] = new_dev; } @@ -271,7 +271,7 @@ static void __init intel_scu_i2c_device_register(int bus, idev->type); return; } - memcpy(new_dev, idev, sizeof(*idev)); + *new_dev = *idev; i2c_bus[i2c_next_dev] = bus; i2c_devs[i2c_next_dev++] = new_dev; @@ -337,6 +337,8 @@ static void __init sfi_handle_ipc_dev(struct sfi_device_table_entry *pentry, pr_debug("IPC bus, name = %16.16s, irq = 0x%2x\n", pentry->name, pentry->irq); pdata = intel_mid_sfi_get_pdata(dev, pentry); + if (IS_ERR(pdata)) + return; pdev = platform_device_alloc(pentry->name, 0); if (pdev == NULL) { @@ -370,6 +372,8 @@ static void __init sfi_handle_spi_dev(struct sfi_device_table_entry *pentry, spi_info.chip_select); pdata = intel_mid_sfi_get_pdata(dev, &spi_info); + if (IS_ERR(pdata)) + return; spi_info.platform_data = pdata; if (dev->delay) @@ -395,6 +399,8 @@ static void __init sfi_handle_i2c_dev(struct sfi_device_table_entry *pentry, i2c_info.addr); pdata = intel_mid_sfi_get_pdata(dev, &i2c_info); i2c_info.platform_data = pdata; + if (IS_ERR(pdata)) + return; if (dev->delay) intel_scu_i2c_device_register(pentry->host_num, &i2c_info); @@ -443,13 +449,35 @@ static int __init sfi_parse_devs(struct sfi_table_header *table) * so we have to enable them one by one here */ ioapic = mp_find_ioapic(irq); - irq_attr.ioapic = ioapic; - irq_attr.ioapic_pin = irq; - irq_attr.trigger = 1; - irq_attr.polarity = 1; - io_apic_set_pci_routing(NULL, irq, &irq_attr); - } else + if (ioapic >= 0) { + irq_attr.ioapic = ioapic; + irq_attr.ioapic_pin = irq; + irq_attr.trigger = 1; + if (intel_mid_identify_cpu() == + INTEL_MID_CPU_CHIP_TANGIER) { + if (!strncmp(pentry->name, + "r69001-ts-i2c", 13)) + /* active low */ + irq_attr.polarity = 1; + else if (!strncmp(pentry->name, + "synaptics_3202", 14)) + /* active low */ + irq_attr.polarity = 1; + else if (irq == 41) + /* fast_int_1 */ + irq_attr.polarity = 1; + else + /* active high */ + irq_attr.polarity = 0; + } else { + /* PNW and CLV go with active low */ + irq_attr.polarity = 1; + } + io_apic_set_pci_routing(NULL, irq, &irq_attr); + } + } else { irq = 0; /* No irq */ + } dev = get_device_id(pentry->type, pentry->name); diff --git a/arch/x86/platform/iris/iris.c b/arch/x86/platform/iris/iris.c index e6cb80f..4d171e8 100644 --- a/arch/x86/platform/iris/iris.c +++ b/arch/x86/platform/iris/iris.c @@ -27,7 +27,6 @@ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/delay.h> -#include <linux/init.h> #include <linux/pm.h> #include <asm/io.h> diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c index 0f92173..dfe605a 100644 --- a/arch/x86/platform/uv/tlb_uv.c +++ b/arch/x86/platform/uv/tlb_uv.c @@ -433,15 +433,49 @@ static void reset_with_ipi(struct pnmask *distribution, struct bau_control *bcp) return; } -static inline unsigned long cycles_2_us(unsigned long long cyc) +/* + * Not to be confused with cycles_2_ns() from tsc.c; this gives a relative + * number, not an absolute. It converts a duration in cycles to a duration in + * ns. + */ +static inline unsigned long long cycles_2_ns(unsigned long long cyc) { + struct cyc2ns_data *data = cyc2ns_read_begin(); unsigned long long ns; - unsigned long us; - int cpu = smp_processor_id(); - ns = (cyc * per_cpu(cyc2ns, cpu)) >> CYC2NS_SCALE_FACTOR; - us = ns / 1000; - return us; + ns = mul_u64_u32_shr(cyc, data->cyc2ns_mul, data->cyc2ns_shift); + + cyc2ns_read_end(data); + return ns; +} + +/* + * The reverse of the above; converts a duration in ns to a duration in cycles. + */ +static inline unsigned long long ns_2_cycles(unsigned long long ns) +{ + struct cyc2ns_data *data = cyc2ns_read_begin(); + unsigned long long cyc; + + cyc = (ns << data->cyc2ns_shift) / data->cyc2ns_mul; + + cyc2ns_read_end(data); + return cyc; +} + +static inline unsigned long cycles_2_us(unsigned long long cyc) +{ + return cycles_2_ns(cyc) / NSEC_PER_USEC; +} + +static inline cycles_t sec_2_cycles(unsigned long sec) +{ + return ns_2_cycles(sec * NSEC_PER_SEC); +} + +static inline unsigned long long usec_2_cycles(unsigned long usec) +{ + return ns_2_cycles(usec * NSEC_PER_USEC); } /* @@ -668,16 +702,6 @@ static int wait_completion(struct bau_desc *bau_desc, bcp, try); } -static inline cycles_t sec_2_cycles(unsigned long sec) -{ - unsigned long ns; - cycles_t cyc; - - ns = sec * 1000000000; - cyc = (ns << CYC2NS_SCALE_FACTOR)/(per_cpu(cyc2ns, smp_processor_id())); - return cyc; -} - /* * Our retries are blocked by all destination sw ack resources being * in use, and a timeout is pending. In that case hardware immediately @@ -1070,12 +1094,13 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask, unsigned long status; bcp = &per_cpu(bau_control, cpu); - stat = bcp->statp; - stat->s_enters++; if (bcp->nobau) return cpumask; + stat = bcp->statp; + stat->s_enters++; + if (bcp->busy) { descriptor_status = read_lmmr(UVH_LB_BAU_SB_ACTIVATION_STATUS_0); @@ -1326,16 +1351,6 @@ static void ptc_seq_stop(struct seq_file *file, void *data) { } -static inline unsigned long long usec_2_cycles(unsigned long microsec) -{ - unsigned long ns; - unsigned long long cyc; - - ns = microsec * 1000; - cyc = (ns << CYC2NS_SCALE_FACTOR)/(per_cpu(cyc2ns, smp_processor_id())); - return cyc; -} - /* * Display the statistics thru /proc/sgi_uv/ptc_statistics * 'data' points to the cpu number diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c index a44f457..bad628a 100644 --- a/arch/x86/realmode/init.c +++ b/arch/x86/realmode/init.c @@ -29,12 +29,10 @@ void __init reserve_real_mode(void) void __init setup_real_mode(void) { u16 real_mode_seg; - u32 *rel; + const u32 *rel; u32 count; - u32 *ptr; - u16 *seg; - int i; unsigned char *base; + unsigned long phys_base; struct trampoline_header *trampoline_header; size_t size = PAGE_ALIGN(real_mode_blob_end - real_mode_blob); #ifdef CONFIG_X86_64 @@ -46,23 +44,23 @@ void __init setup_real_mode(void) memcpy(base, real_mode_blob, size); - real_mode_seg = __pa(base) >> 4; + phys_base = __pa(base); + real_mode_seg = phys_base >> 4; + rel = (u32 *) real_mode_relocs; /* 16-bit segment relocations. */ - count = rel[0]; - rel = &rel[1]; - for (i = 0; i < count; i++) { - seg = (u16 *) (base + rel[i]); + count = *rel++; + while (count--) { + u16 *seg = (u16 *) (base + *rel++); *seg = real_mode_seg; } /* 32-bit linear relocations. */ - count = rel[i]; - rel = &rel[i + 1]; - for (i = 0; i < count; i++) { - ptr = (u32 *) (base + rel[i]); - *ptr += __pa(base); + count = *rel++; + while (count--) { + u32 *ptr = (u32 *) (base + *rel++); + *ptr += phys_base; } /* Must be perfomed *after* relocation. */ diff --git a/arch/x86/realmode/rm/Makefile b/arch/x86/realmode/rm/Makefile index 8869287..9cac825 100644 --- a/arch/x86/realmode/rm/Makefile +++ b/arch/x86/realmode/rm/Makefile @@ -73,9 +73,10 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -m32 -g -Os -D_SETUP -D__KERNEL__ -D_WAKEUP \ -march=i386 -mregparm=3 \ -include $(srctree)/$(src)/../../boot/code16gcc.h \ -fno-strict-aliasing -fomit-frame-pointer -fno-pic \ + -mno-mmx -mno-sse \ $(call cc-option, -ffreestanding) \ $(call cc-option, -fno-toplevel-reorder,\ - $(call cc-option, -fno-unit-at-a-time)) \ + $(call cc-option, -fno-unit-at-a-time)) \ $(call cc-option, -fno-stack-protector) \ $(call cc-option, -mpreferred-stack-boundary=2) KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__ diff --git a/arch/x86/realmode/rm/reboot.S b/arch/x86/realmode/rm/reboot.S index f932ea6..d66c607 100644 --- a/arch/x86/realmode/rm/reboot.S +++ b/arch/x86/realmode/rm/reboot.S @@ -1,5 +1,4 @@ #include <linux/linkage.h> -#include <linux/init.h> #include <asm/segment.h> #include <asm/page_types.h> #include <asm/processor-flags.h> diff --git a/arch/x86/realmode/rm/trampoline_32.S b/arch/x86/realmode/rm/trampoline_32.S index c1b2791..48ddd76 100644 --- a/arch/x86/realmode/rm/trampoline_32.S +++ b/arch/x86/realmode/rm/trampoline_32.S @@ -20,7 +20,6 @@ */ #include <linux/linkage.h> -#include <linux/init.h> #include <asm/segment.h> #include <asm/page_types.h> #include "realmode.h" diff --git a/arch/x86/realmode/rm/trampoline_64.S b/arch/x86/realmode/rm/trampoline_64.S index bb360dc..dac7b20 100644 --- a/arch/x86/realmode/rm/trampoline_64.S +++ b/arch/x86/realmode/rm/trampoline_64.S @@ -25,7 +25,6 @@ */ #include <linux/linkage.h> -#include <linux/init.h> #include <asm/pgtable_types.h> #include <asm/page_types.h> #include <asm/msr.h> diff --git a/arch/x86/syscalls/syscall_32.tbl b/arch/x86/syscalls/syscall_32.tbl index aabfb83..96bc506 100644 --- a/arch/x86/syscalls/syscall_32.tbl +++ b/arch/x86/syscalls/syscall_32.tbl @@ -357,3 +357,5 @@ 348 i386 process_vm_writev sys_process_vm_writev compat_sys_process_vm_writev 349 i386 kcmp sys_kcmp 350 i386 finit_module sys_finit_module +351 i386 sched_setattr sys_sched_setattr +352 i386 sched_getattr sys_sched_getattr diff --git a/arch/x86/syscalls/syscall_64.tbl b/arch/x86/syscalls/syscall_64.tbl index 38ae65d..a12bddc 100644 --- a/arch/x86/syscalls/syscall_64.tbl +++ b/arch/x86/syscalls/syscall_64.tbl @@ -320,6 +320,8 @@ 311 64 process_vm_writev sys_process_vm_writev 312 common kcmp sys_kcmp 313 common finit_module sys_finit_module +314 common sched_setattr sys_sched_setattr +315 common sched_getattr sys_sched_getattr # # x32-specific system call numbers start at 512 to avoid cache impact diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c index f7bab68..11f9285 100644 --- a/arch/x86/tools/relocs.c +++ b/arch/x86/tools/relocs.c @@ -722,15 +722,25 @@ static void percpu_init(void) /* * Check to see if a symbol lies in the .data..percpu section. - * For some as yet not understood reason the "__init_begin" - * symbol which immediately preceeds the .data..percpu section - * also shows up as it it were part of it so we do an explict - * check for that symbol name and ignore it. + * + * The linker incorrectly associates some symbols with the + * .data..percpu section so we also need to check the symbol + * name to make sure that we classify the symbol correctly. + * + * The GNU linker incorrectly associates: + * __init_begin + * __per_cpu_load + * + * The "gold" linker incorrectly associates: + * init_per_cpu__irq_stack_union + * init_per_cpu__gdt_page */ static int is_percpu_sym(ElfW(Sym) *sym, const char *symname) { return (sym->st_shndx == per_cpu_shndx) && - strcmp(symname, "__init_begin"); + strcmp(symname, "__init_begin") && + strcmp(symname, "__per_cpu_load") && + strncmp(symname, "init_per_cpu_", 13); } diff --git a/arch/x86/vdso/vclock_gettime.c b/arch/x86/vdso/vclock_gettime.c index 2ada505..eb5d7a5 100644 --- a/arch/x86/vdso/vclock_gettime.c +++ b/arch/x86/vdso/vclock_gettime.c @@ -178,7 +178,7 @@ notrace static int __always_inline do_realtime(struct timespec *ts) ts->tv_nsec = 0; do { - seq = read_seqcount_begin_no_lockdep(>od->seq); + seq = raw_read_seqcount_begin(>od->seq); mode = gtod->clock.vclock_mode; ts->tv_sec = gtod->wall_time_sec; ns = gtod->wall_time_snsec; @@ -198,7 +198,7 @@ notrace static int do_monotonic(struct timespec *ts) ts->tv_nsec = 0; do { - seq = read_seqcount_begin_no_lockdep(>od->seq); + seq = raw_read_seqcount_begin(>od->seq); mode = gtod->clock.vclock_mode; ts->tv_sec = gtod->monotonic_time_sec; ns = gtod->monotonic_time_snsec; @@ -214,7 +214,7 @@ notrace static int do_realtime_coarse(struct timespec *ts) { unsigned long seq; do { - seq = read_seqcount_begin_no_lockdep(>od->seq); + seq = raw_read_seqcount_begin(>od->seq); ts->tv_sec = gtod->wall_time_coarse.tv_sec; ts->tv_nsec = gtod->wall_time_coarse.tv_nsec; } while (unlikely(read_seqcount_retry(>od->seq, seq))); @@ -225,7 +225,7 @@ notrace static int do_monotonic_coarse(struct timespec *ts) { unsigned long seq; do { - seq = read_seqcount_begin_no_lockdep(>od->seq); + seq = raw_read_seqcount_begin(>od->seq); ts->tv_sec = gtod->monotonic_time_coarse.tv_sec; ts->tv_nsec = gtod->monotonic_time_coarse.tv_nsec; } while (unlikely(read_seqcount_retry(>od->seq, seq))); diff --git a/arch/x86/vdso/vdso.S b/arch/x86/vdso/vdso.S index 01f5e3b..1e13eb8 100644 --- a/arch/x86/vdso/vdso.S +++ b/arch/x86/vdso/vdso.S @@ -1,6 +1,5 @@ #include <asm/page_types.h> #include <linux/linkage.h> -#include <linux/init.h> __PAGE_ALIGNED_DATA diff --git a/arch/x86/vdso/vdsox32.S b/arch/x86/vdso/vdsox32.S index d6b9a7f..295f1c7 100644 --- a/arch/x86/vdso/vdsox32.S +++ b/arch/x86/vdso/vdsox32.S @@ -1,6 +1,5 @@ #include <asm/page_types.h> #include <linux/linkage.h> -#include <linux/init.h> __PAGE_ALIGNED_DATA diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig index 8d24dcb..f8df0cc 100644 --- a/arch/xtensa/Kconfig +++ b/arch/xtensa/Kconfig @@ -64,6 +64,9 @@ config MMU config VARIANT_IRQ_SWITCH def_bool n +config HAVE_XTENSA_GPIO32 + def_bool n + menu "Processor type and features" choice @@ -73,16 +76,19 @@ choice config XTENSA_VARIANT_FSF bool "fsf - default (not generic) configuration" select MMU + select HAVE_XTENSA_GPIO32 config XTENSA_VARIANT_DC232B bool "dc232b - Diamond 232L Standard Core Rev.B (LE)" select MMU + select HAVE_XTENSA_GPIO32 help This variant refers to Tensilica's Diamond 232L Standard core Rev.B (LE). config XTENSA_VARIANT_DC233C bool "dc233c - Diamond 233L Standard Core Rev.C (LE)" select MMU + select HAVE_XTENSA_GPIO32 help This variant refers to Tensilica's Diamond 233L Standard core Rev.C (LE). diff --git a/arch/xtensa/include/asm/barrier.h b/arch/xtensa/include/asm/barrier.h index ef02167..e1ee6b5 100644 --- a/arch/xtensa/include/asm/barrier.h +++ b/arch/xtensa/include/asm/barrier.h @@ -9,21 +9,14 @@ #ifndef _XTENSA_SYSTEM_H #define _XTENSA_SYSTEM_H -#define smp_read_barrier_depends() do { } while(0) -#define read_barrier_depends() do { } while(0) - #define mb() ({ __asm__ __volatile__("memw" : : : "memory"); }) #define rmb() barrier() #define wmb() mb() #ifdef CONFIG_SMP #error smp_* not defined -#else -#define smp_mb() barrier() -#define smp_rmb() barrier() -#define smp_wmb() barrier() #endif -#define set_mb(var, value) do { var = value; mb(); } while (0) +#include <asm-generic/barrier.h> #endif /* _XTENSA_SYSTEM_H */ |