diff options
author | Takashi Iwai <tiwai@suse.de> | 2014-02-25 12:12:17 +0100 |
---|---|---|
committer | Takashi Iwai <tiwai@suse.de> | 2014-02-25 12:12:17 +0100 |
commit | d01a838c86b60fdce4fbc9e51d5d14d6cfe0a902 (patch) | |
tree | 4fe17a547c8c4e403c18cdc6a6c5e30c6f437bc9 /arch | |
parent | bf68665d7a5647591258c120382fd64465db7d63 (diff) | |
parent | 37c367ecdb9a01c9acc980e6e17913570a1788a7 (diff) | |
download | op-kernel-dev-d01a838c86b60fdce4fbc9e51d5d14d6cfe0a902.zip op-kernel-dev-d01a838c86b60fdce4fbc9e51d5d14d6cfe0a902.tar.gz |
Merge branch 'for-linus' into HEAD
Diffstat (limited to 'arch')
125 files changed, 1207 insertions, 356 deletions
diff --git a/arch/arm/boot/dts/Makefile b/arch/arm/boot/dts/Makefile index b9d6a8b..6d1e43d 100644 --- a/arch/arm/boot/dts/Makefile +++ b/arch/arm/boot/dts/Makefile @@ -38,6 +38,7 @@ dtb-$(CONFIG_ARCH_AT91) += at91sam9g35ek.dtb dtb-$(CONFIG_ARCH_AT91) += at91sam9x25ek.dtb dtb-$(CONFIG_ARCH_AT91) += at91sam9x35ek.dtb # sama5d3 +dtb-$(CONFIG_ARCH_AT91) += at91-sama5d3_xplained.dtb dtb-$(CONFIG_ARCH_AT91) += sama5d31ek.dtb dtb-$(CONFIG_ARCH_AT91) += sama5d33ek.dtb dtb-$(CONFIG_ARCH_AT91) += sama5d34ek.dtb diff --git a/arch/arm/boot/dts/at91-sama5d3_xplained.dts b/arch/arm/boot/dts/at91-sama5d3_xplained.dts new file mode 100644 index 0000000..ce13755 --- /dev/null +++ b/arch/arm/boot/dts/at91-sama5d3_xplained.dts @@ -0,0 +1,229 @@ +/* + * at91-sama5d3_xplained.dts - Device Tree file for the SAMA5D3 Xplained board + * + * Copyright (C) 2014 Atmel, + * 2014 Nicolas Ferre <nicolas.ferre@atmel.com> + * + * Licensed under GPLv2 or later. + */ +/dts-v1/; +#include "sama5d36.dtsi" + +/ { + model = "SAMA5D3 Xplained"; + compatible = "atmel,sama5d3-xplained", "atmel,sama5d3", "atmel,sama5"; + + chosen { + bootargs = "console=ttyS0,115200"; + }; + + memory { + reg = <0x20000000 0x10000000>; + }; + + ahb { + apb { + mmc0: mmc@f0000000 { + pinctrl-0 = <&pinctrl_mmc0_clk_cmd_dat0 &pinctrl_mmc0_dat1_3 &pinctrl_mmc0_dat4_7 &pinctrl_mmc0_cd>; + status = "okay"; + slot@0 { + reg = <0>; + bus-width = <8>; + cd-gpios = <&pioE 0 GPIO_ACTIVE_LOW>; + }; + }; + + spi0: spi@f0004000 { + cs-gpios = <&pioD 13 0>; + status = "okay"; + }; + + can0: can@f000c000 { + status = "okay"; + }; + + i2c0: i2c@f0014000 { + status = "okay"; + }; + + i2c1: i2c@f0018000 { + status = "okay"; + }; + + macb0: ethernet@f0028000 { + phy-mode = "rgmii"; + status = "okay"; + }; + + usart0: serial@f001c000 { + status = "okay"; + }; + + usart1: serial@f0020000 { + pinctrl-0 = <&pinctrl_usart1 &pinctrl_usart1_rts_cts>; + status = "okay"; + }; + + uart0: serial@f0024000 { + status = "okay"; + }; + + mmc1: mmc@f8000000 { + pinctrl-0 = <&pinctrl_mmc1_clk_cmd_dat0 &pinctrl_mmc1_dat1_3 &pinctrl_mmc1_cd>; + status = "okay"; + slot@0 { + reg = <0>; + bus-width = <4>; + cd-gpios = <&pioE 1 GPIO_ACTIVE_HIGH>; + }; + }; + + spi1: spi@f8008000 { + cs-gpios = <&pioC 25 0>, <0>, <0>, <&pioD 16 0>; + status = "okay"; + }; + + adc0: adc@f8018000 { + pinctrl-0 = < + &pinctrl_adc0_adtrg + &pinctrl_adc0_ad0 + &pinctrl_adc0_ad1 + &pinctrl_adc0_ad2 + &pinctrl_adc0_ad3 + &pinctrl_adc0_ad4 + &pinctrl_adc0_ad5 + &pinctrl_adc0_ad6 + &pinctrl_adc0_ad7 + &pinctrl_adc0_ad8 + &pinctrl_adc0_ad9 + >; + status = "okay"; + }; + + i2c2: i2c@f801c000 { + dmas = <0>, <0>; /* Do not use DMA for i2c2 */ + status = "okay"; + }; + + macb1: ethernet@f802c000 { + phy-mode = "rmii"; + status = "okay"; + }; + + dbgu: serial@ffffee00 { + status = "okay"; + }; + + pinctrl@fffff200 { + board { + pinctrl_mmc0_cd: mmc0_cd { + atmel,pins = + <AT91_PIOE 0 AT91_PERIPH_GPIO AT91_PINCTRL_PULL_UP_DEGLITCH>; + }; + + pinctrl_mmc1_cd: mmc1_cd { + atmel,pins = + <AT91_PIOE 1 AT91_PERIPH_GPIO AT91_PINCTRL_PULL_UP_DEGLITCH>; + }; + + pinctrl_usba_vbus: usba_vbus { + atmel,pins = + <AT91_PIOE 9 AT91_PERIPH_GPIO AT91_PINCTRL_DEGLITCH>; /* PE9, conflicts with A9 */ + }; + }; + }; + + pmc: pmc@fffffc00 { + main: mainck { + clock-frequency = <12000000>; + }; + }; + }; + + nand0: nand@60000000 { + nand-bus-width = <8>; + nand-ecc-mode = "hw"; + atmel,has-pmecc; + atmel,pmecc-cap = <4>; + atmel,pmecc-sector-size = <512>; + nand-on-flash-bbt; + status = "okay"; + + at91bootstrap@0 { + label = "at91bootstrap"; + reg = <0x0 0x40000>; + }; + + bootloader@40000 { + label = "bootloader"; + reg = <0x40000 0x80000>; + }; + + bootloaderenv@c0000 { + label = "bootloader env"; + reg = <0xc0000 0xc0000>; + }; + + dtb@180000 { + label = "device tree"; + reg = <0x180000 0x80000>; + }; + + kernel@200000 { + label = "kernel"; + reg = <0x200000 0x600000>; + }; + + rootfs@800000 { + label = "rootfs"; + reg = <0x800000 0x0f800000>; + }; + }; + + usb0: gadget@00500000 { + atmel,vbus-gpio = <&pioE 9 GPIO_ACTIVE_HIGH>; /* PE9, conflicts with A9 */ + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_usba_vbus>; + status = "okay"; + }; + + usb1: ohci@00600000 { + num-ports = <3>; + atmel,vbus-gpio = <0 + &pioE 3 GPIO_ACTIVE_LOW + &pioE 4 GPIO_ACTIVE_LOW + >; + status = "okay"; + }; + + usb2: ehci@00700000 { + status = "okay"; + }; + }; + + gpio_keys { + compatible = "gpio-keys"; + + bp3 { + label = "PB_USER"; + gpios = <&pioE 29 GPIO_ACTIVE_LOW>; + linux,code = <0x104>; + gpio-key,wakeup; + }; + }; + + leds { + compatible = "gpio-leds"; + + d2 { + label = "d2"; + gpios = <&pioE 23 GPIO_ACTIVE_LOW>; /* PE23, conflicts with A23, CTS2 */ + linux,default-trigger = "heartbeat"; + }; + + d3 { + label = "d3"; + gpios = <&pioE 24 GPIO_ACTIVE_HIGH>; + }; + }; +}; diff --git a/arch/arm/boot/dts/at91sam9263.dtsi b/arch/arm/boot/dts/at91sam9263.dtsi index 0042f73..fece866 100644 --- a/arch/arm/boot/dts/at91sam9263.dtsi +++ b/arch/arm/boot/dts/at91sam9263.dtsi @@ -523,7 +523,7 @@ }; i2c0: i2c@fff88000 { - compatible = "atmel,at91sam9263-i2c"; + compatible = "atmel,at91sam9260-i2c"; reg = <0xfff88000 0x100>; interrupts = <13 IRQ_TYPE_LEVEL_HIGH 6>; #address-cells = <1>; diff --git a/arch/arm/boot/dts/at91sam9n12ek.dts b/arch/arm/boot/dts/at91sam9n12ek.dts index e9487f6..924a6a6 100644 --- a/arch/arm/boot/dts/at91sam9n12ek.dts +++ b/arch/arm/boot/dts/at91sam9n12ek.dts @@ -124,6 +124,10 @@ nand-on-flash-bbt; status = "okay"; }; + + usb0: ohci@00500000 { + status = "okay"; + }; }; leds { diff --git a/arch/arm/boot/dts/sama5d3.dtsi b/arch/arm/boot/dts/sama5d3.dtsi index 52447c1..3d5faf8 100644 --- a/arch/arm/boot/dts/sama5d3.dtsi +++ b/arch/arm/boot/dts/sama5d3.dtsi @@ -1228,7 +1228,7 @@ compatible = "atmel,at91rm9200-ohci", "usb-ohci"; reg = <0x00600000 0x100000>; interrupts = <32 IRQ_TYPE_LEVEL_HIGH 2>; - clocks = <&usb>, <&uhphs_clk>, <&udphs_clk>, + clocks = <&usb>, <&uhphs_clk>, <&uhphs_clk>, <&uhpck>; clock-names = "usb_clk", "ohci_clk", "hclk", "uhpck"; status = "disabled"; diff --git a/arch/arm/boot/dts/ste-href.dtsi b/arch/arm/boot/dts/ste-href.dtsi index 0c1e8d8..6cb9b68 100644 --- a/arch/arm/boot/dts/ste-href.dtsi +++ b/arch/arm/boot/dts/ste-href.dtsi @@ -188,7 +188,6 @@ msp2: msp@80117000 { pinctrl-names = "default"; pinctrl-0 = <&msp2_default_mode>; - status = "okay"; }; msp3: msp@80125000 { diff --git a/arch/arm/boot/dts/sun4i-a10.dtsi b/arch/arm/boot/dts/sun4i-a10.dtsi index 040bb0e..10666ca 100644 --- a/arch/arm/boot/dts/sun4i-a10.dtsi +++ b/arch/arm/boot/dts/sun4i-a10.dtsi @@ -315,7 +315,7 @@ ranges; emac: ethernet@01c0b000 { - compatible = "allwinner,sun4i-emac"; + compatible = "allwinner,sun4i-a10-emac"; reg = <0x01c0b000 0x1000>; interrupts = <55>; clocks = <&ahb_gates 17>; @@ -323,7 +323,7 @@ }; mdio@01c0b080 { - compatible = "allwinner,sun4i-mdio"; + compatible = "allwinner,sun4i-a10-mdio"; reg = <0x01c0b080 0x14>; status = "disabled"; #address-cells = <1>; diff --git a/arch/arm/boot/dts/sun5i-a10s.dtsi b/arch/arm/boot/dts/sun5i-a10s.dtsi index ea16054..6496159 100644 --- a/arch/arm/boot/dts/sun5i-a10s.dtsi +++ b/arch/arm/boot/dts/sun5i-a10s.dtsi @@ -278,7 +278,7 @@ ranges; emac: ethernet@01c0b000 { - compatible = "allwinner,sun4i-emac"; + compatible = "allwinner,sun4i-a10-emac"; reg = <0x01c0b000 0x1000>; interrupts = <55>; clocks = <&ahb_gates 17>; @@ -286,7 +286,7 @@ }; mdio@01c0b080 { - compatible = "allwinner,sun4i-mdio"; + compatible = "allwinner,sun4i-a10-mdio"; reg = <0x01c0b080 0x14>; status = "disabled"; #address-cells = <1>; diff --git a/arch/arm/boot/dts/sun7i-a20.dtsi b/arch/arm/boot/dts/sun7i-a20.dtsi index 119f066..9ff0948 100644 --- a/arch/arm/boot/dts/sun7i-a20.dtsi +++ b/arch/arm/boot/dts/sun7i-a20.dtsi @@ -340,7 +340,7 @@ ranges; emac: ethernet@01c0b000 { - compatible = "allwinner,sun4i-emac"; + compatible = "allwinner,sun4i-a10-emac"; reg = <0x01c0b000 0x1000>; interrupts = <0 55 4>; clocks = <&ahb_gates 17>; @@ -348,7 +348,7 @@ }; mdio@01c0b080 { - compatible = "allwinner,sun4i-mdio"; + compatible = "allwinner,sun4i-a10-mdio"; reg = <0x01c0b080 0x14>; status = "disabled"; #address-cells = <1>; diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig index 845bc74..ee69829 100644 --- a/arch/arm/configs/multi_v7_defconfig +++ b/arch/arm/configs/multi_v7_defconfig @@ -29,6 +29,7 @@ CONFIG_ARCH_OMAP3=y CONFIG_ARCH_OMAP4=y CONFIG_SOC_OMAP5=y CONFIG_SOC_AM33XX=y +CONFIG_SOC_DRA7XX=y CONFIG_SOC_AM43XX=y CONFIG_ARCH_ROCKCHIP=y CONFIG_ARCH_SOCFPGA=y diff --git a/arch/arm/mach-hisi/Kconfig b/arch/arm/mach-hisi/Kconfig index 8f4649b..1abae5f 100644 --- a/arch/arm/mach-hisi/Kconfig +++ b/arch/arm/mach-hisi/Kconfig @@ -8,7 +8,7 @@ config ARCH_HI3xxx select CLKSRC_OF select GENERIC_CLOCKEVENTS select HAVE_ARM_SCU - select HAVE_ARM_TWD + select HAVE_ARM_TWD if SMP select HAVE_SMP select PINCTRL select PINCTRL_SINGLE diff --git a/arch/arm/mach-imx/clk-imx6q.c b/arch/arm/mach-imx/clk-imx6q.c index af2e582..4d677f4 100644 --- a/arch/arm/mach-imx/clk-imx6q.c +++ b/arch/arm/mach-imx/clk-imx6q.c @@ -482,6 +482,9 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node) if (IS_ENABLED(CONFIG_PCI_IMX6)) clk_set_parent(clk[lvds1_sel], clk[sata_ref]); + /* Set initial power mode */ + imx6q_set_lpm(WAIT_CLOCKED); + np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-gpt"); base = of_iomap(np, 0); WARN_ON(!base); diff --git a/arch/arm/mach-imx/clk-imx6sl.c b/arch/arm/mach-imx/clk-imx6sl.c index 3781a18..4c86f30 100644 --- a/arch/arm/mach-imx/clk-imx6sl.c +++ b/arch/arm/mach-imx/clk-imx6sl.c @@ -266,6 +266,9 @@ static void __init imx6sl_clocks_init(struct device_node *ccm_node) /* Audio-related clocks configuration */ clk_set_parent(clks[IMX6SL_CLK_SPDIF0_SEL], clks[IMX6SL_CLK_PLL3_PFD3]); + /* Set initial power mode */ + imx6q_set_lpm(WAIT_CLOCKED); + np = of_find_compatible_node(NULL, NULL, "fsl,imx6sl-gpt"); base = of_iomap(np, 0); WARN_ON(!base); diff --git a/arch/arm/mach-imx/pm-imx6q.c b/arch/arm/mach-imx/pm-imx6q.c index 9d47adc..7a9b985 100644 --- a/arch/arm/mach-imx/pm-imx6q.c +++ b/arch/arm/mach-imx/pm-imx6q.c @@ -236,8 +236,6 @@ void __init imx6q_pm_init(void) regmap_update_bits(gpr, IOMUXC_GPR1, IMX6Q_GPR1_GINT, IMX6Q_GPR1_GINT); - /* Set initial power mode */ - imx6q_set_lpm(WAIT_CLOCKED); suspend_set_ops(&imx6q_pm_ops); } diff --git a/arch/arm/mach-moxart/Kconfig b/arch/arm/mach-moxart/Kconfig index ba470d6..3795ae2 100644 --- a/arch/arm/mach-moxart/Kconfig +++ b/arch/arm/mach-moxart/Kconfig @@ -2,7 +2,6 @@ config ARCH_MOXART bool "MOXA ART SoC" if ARCH_MULTI_V4T select CPU_FA526 select ARM_DMA_MEM_BUFFERABLE - select DMA_OF select USE_OF select CLKSRC_OF select CLKSRC_MMIO diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig index 653b489..e2ce4f8 100644 --- a/arch/arm/mach-omap2/Kconfig +++ b/arch/arm/mach-omap2/Kconfig @@ -54,7 +54,7 @@ config SOC_OMAP5 select ARM_GIC select CPU_V7 select HAVE_ARM_SCU if SMP - select HAVE_ARM_TWD if LOCAL_TIMERS + select HAVE_ARM_TWD if SMP select HAVE_SMP select HAVE_ARM_ARCH_TIMER select ARM_ERRATA_798181 if SMP diff --git a/arch/arm/mach-pxa/am300epd.c b/arch/arm/mach-pxa/am300epd.c index c9f309a..8b90c4f 100644 --- a/arch/arm/mach-pxa/am300epd.c +++ b/arch/arm/mach-pxa/am300epd.c @@ -30,6 +30,7 @@ #include <mach/gumstix.h> #include <mach/mfp-pxa25x.h> +#include <mach/irqs.h> #include <linux/platform_data/video-pxafb.h> #include "generic.h" diff --git a/arch/arm/mach-pxa/include/mach/balloon3.h b/arch/arm/mach-pxa/include/mach/balloon3.h index 954641e..1b08259 100644 --- a/arch/arm/mach-pxa/include/mach/balloon3.h +++ b/arch/arm/mach-pxa/include/mach/balloon3.h @@ -14,6 +14,8 @@ #ifndef ASM_ARCH_BALLOON3_H #define ASM_ARCH_BALLOON3_H +#include "irqs.h" /* PXA_NR_BUILTIN_GPIO */ + enum balloon3_features { BALLOON3_FEATURE_OHCI, BALLOON3_FEATURE_MMC, diff --git a/arch/arm/mach-pxa/include/mach/corgi.h b/arch/arm/mach-pxa/include/mach/corgi.h index f3c3493..c030d95 100644 --- a/arch/arm/mach-pxa/include/mach/corgi.h +++ b/arch/arm/mach-pxa/include/mach/corgi.h @@ -13,6 +13,7 @@ #ifndef __ASM_ARCH_CORGI_H #define __ASM_ARCH_CORGI_H 1 +#include "irqs.h" /* PXA_NR_BUILTIN_GPIO */ /* * Corgi (Non Standard) GPIO Definitions diff --git a/arch/arm/mach-pxa/include/mach/csb726.h b/arch/arm/mach-pxa/include/mach/csb726.h index 2628e7b..00cfbbb 100644 --- a/arch/arm/mach-pxa/include/mach/csb726.h +++ b/arch/arm/mach-pxa/include/mach/csb726.h @@ -11,6 +11,8 @@ #ifndef CSB726_H #define CSB726_H +#include "irqs.h" /* PXA_GPIO_TO_IRQ */ + #define CSB726_GPIO_IRQ_LAN 52 #define CSB726_GPIO_IRQ_SM501 53 #define CSB726_GPIO_MMC_DETECT 100 diff --git a/arch/arm/mach-pxa/include/mach/gumstix.h b/arch/arm/mach-pxa/include/mach/gumstix.h index dba14b6..f7df27b 100644 --- a/arch/arm/mach-pxa/include/mach/gumstix.h +++ b/arch/arm/mach-pxa/include/mach/gumstix.h @@ -6,6 +6,7 @@ * published by the Free Software Foundation. */ +#include "irqs.h" /* PXA_GPIO_TO_IRQ */ /* BTRESET - Reset line to Bluetooth module, active low signal. */ #define GPIO_GUMSTIX_BTRESET 7 diff --git a/arch/arm/mach-pxa/include/mach/idp.h b/arch/arm/mach-pxa/include/mach/idp.h index 22a96f8..7e63f46 100644 --- a/arch/arm/mach-pxa/include/mach/idp.h +++ b/arch/arm/mach-pxa/include/mach/idp.h @@ -23,6 +23,7 @@ * IDP hardware. */ +#include "irqs.h" /* PXA_GPIO_TO_IRQ */ #define IDP_FLASH_PHYS (PXA_CS0_PHYS) #define IDP_ALT_FLASH_PHYS (PXA_CS1_PHYS) diff --git a/arch/arm/mach-pxa/include/mach/palmld.h b/arch/arm/mach-pxa/include/mach/palmld.h index 2c447133..b184f29 100644 --- a/arch/arm/mach-pxa/include/mach/palmld.h +++ b/arch/arm/mach-pxa/include/mach/palmld.h @@ -13,6 +13,8 @@ #ifndef _INCLUDE_PALMLD_H_ #define _INCLUDE_PALMLD_H_ +#include "irqs.h" /* PXA_GPIO_TO_IRQ */ + /** HERE ARE GPIOs **/ /* GPIOs */ diff --git a/arch/arm/mach-pxa/include/mach/palmt5.h b/arch/arm/mach-pxa/include/mach/palmt5.h index 0bd4f03..e342c59 100644 --- a/arch/arm/mach-pxa/include/mach/palmt5.h +++ b/arch/arm/mach-pxa/include/mach/palmt5.h @@ -15,6 +15,8 @@ #ifndef _INCLUDE_PALMT5_H_ #define _INCLUDE_PALMT5_H_ +#include "irqs.h" /* PXA_GPIO_TO_IRQ */ + /** HERE ARE GPIOs **/ /* GPIOs */ diff --git a/arch/arm/mach-pxa/include/mach/palmtc.h b/arch/arm/mach-pxa/include/mach/palmtc.h index c383a21..81c727b 100644 --- a/arch/arm/mach-pxa/include/mach/palmtc.h +++ b/arch/arm/mach-pxa/include/mach/palmtc.h @@ -16,6 +16,8 @@ #ifndef _INCLUDE_PALMTC_H_ #define _INCLUDE_PALMTC_H_ +#include "irqs.h" /* PXA_GPIO_TO_IRQ */ + /** HERE ARE GPIOs **/ /* GPIOs */ diff --git a/arch/arm/mach-pxa/include/mach/palmtx.h b/arch/arm/mach-pxa/include/mach/palmtx.h index f2e5303..92bc1f0 100644 --- a/arch/arm/mach-pxa/include/mach/palmtx.h +++ b/arch/arm/mach-pxa/include/mach/palmtx.h @@ -16,6 +16,8 @@ #ifndef _INCLUDE_PALMTX_H_ #define _INCLUDE_PALMTX_H_ +#include "irqs.h" /* PXA_GPIO_TO_IRQ */ + /** HERE ARE GPIOs **/ /* GPIOs */ diff --git a/arch/arm/mach-pxa/include/mach/pcm027.h b/arch/arm/mach-pxa/include/mach/pcm027.h index 6bf28de..86ebd7b 100644 --- a/arch/arm/mach-pxa/include/mach/pcm027.h +++ b/arch/arm/mach-pxa/include/mach/pcm027.h @@ -23,6 +23,8 @@ * Definitions of CPU card resources only */ +#include "irqs.h" /* PXA_GPIO_TO_IRQ */ + /* phyCORE-PXA270 (PCM027) Interrupts */ #define PCM027_IRQ(x) (IRQ_BOARD_START + (x)) #define PCM027_BTDET_IRQ PCM027_IRQ(0) diff --git a/arch/arm/mach-pxa/include/mach/pcm990_baseboard.h b/arch/arm/mach-pxa/include/mach/pcm990_baseboard.h index 0260aaa..7e544c1 100644 --- a/arch/arm/mach-pxa/include/mach/pcm990_baseboard.h +++ b/arch/arm/mach-pxa/include/mach/pcm990_baseboard.h @@ -20,6 +20,7 @@ */ #include <mach/pcm027.h> +#include "irqs.h" /* PXA_GPIO_TO_IRQ */ /* * definitions relevant only when the PCM-990 diff --git a/arch/arm/mach-pxa/include/mach/poodle.h b/arch/arm/mach-pxa/include/mach/poodle.h index f32ff75..b56b193 100644 --- a/arch/arm/mach-pxa/include/mach/poodle.h +++ b/arch/arm/mach-pxa/include/mach/poodle.h @@ -15,6 +15,8 @@ #ifndef __ASM_ARCH_POODLE_H #define __ASM_ARCH_POODLE_H 1 +#include "irqs.h" /* PXA_GPIO_TO_IRQ */ + /* * GPIOs */ diff --git a/arch/arm/mach-pxa/include/mach/spitz.h b/arch/arm/mach-pxa/include/mach/spitz.h index 0bfe650..25c9f62 100644 --- a/arch/arm/mach-pxa/include/mach/spitz.h +++ b/arch/arm/mach-pxa/include/mach/spitz.h @@ -15,8 +15,8 @@ #define __ASM_ARCH_SPITZ_H 1 #endif +#include "irqs.h" /* PXA_NR_BUILTIN_GPIO, PXA_GPIO_TO_IRQ */ #include <linux/fb.h> -#include <linux/gpio.h> /* Spitz/Akita GPIOs */ diff --git a/arch/arm/mach-pxa/include/mach/tosa.h b/arch/arm/mach-pxa/include/mach/tosa.h index 2bb0e86..0497d95 100644 --- a/arch/arm/mach-pxa/include/mach/tosa.h +++ b/arch/arm/mach-pxa/include/mach/tosa.h @@ -13,6 +13,8 @@ #ifndef _ASM_ARCH_TOSA_H_ #define _ASM_ARCH_TOSA_H_ 1 +#include "irqs.h" /* PXA_NR_BUILTIN_GPIO */ + /* TOSA Chip selects */ #define TOSA_LCDC_PHYS PXA_CS4_PHYS /* Internel Scoop */ diff --git a/arch/arm/mach-pxa/include/mach/trizeps4.h b/arch/arm/mach-pxa/include/mach/trizeps4.h index d2ca010..ae3ca01 100644 --- a/arch/arm/mach-pxa/include/mach/trizeps4.h +++ b/arch/arm/mach-pxa/include/mach/trizeps4.h @@ -10,6 +10,8 @@ #ifndef _TRIPEPS4_H_ #define _TRIPEPS4_H_ +#include "irqs.h" /* PXA_GPIO_TO_IRQ */ + /* physical memory regions */ #define TRIZEPS4_FLASH_PHYS (PXA_CS0_PHYS) /* Flash region */ #define TRIZEPS4_DISK_PHYS (PXA_CS1_PHYS) /* Disk On Chip region */ diff --git a/arch/arm/mach-shmobile/Kconfig b/arch/arm/mach-shmobile/Kconfig index 3386406..05fa505 100644 --- a/arch/arm/mach-shmobile/Kconfig +++ b/arch/arm/mach-shmobile/Kconfig @@ -8,7 +8,7 @@ config ARCH_SHMOBILE_MULTI select CPU_V7 select GENERIC_CLOCKEVENTS select HAVE_ARM_SCU if SMP - select HAVE_ARM_TWD if LOCAL_TIMERS + select HAVE_ARM_TWD if SMP select HAVE_SMP select ARM_GIC select MIGHT_HAVE_CACHE_L2X0 diff --git a/arch/arm/mach-zynq/common.c b/arch/arm/mach-zynq/common.c index 1db2a5ca..8c09a83 100644 --- a/arch/arm/mach-zynq/common.c +++ b/arch/arm/mach-zynq/common.c @@ -25,6 +25,7 @@ #include <linux/of_irq.h> #include <linux/of_platform.h> #include <linux/of.h> +#include <linux/memblock.h> #include <linux/irqchip.h> #include <linux/irqchip/arm-gic.h> @@ -41,6 +42,18 @@ void __iomem *zynq_scu_base; +/** + * zynq_memory_init - Initialize special memory + * + * We need to stop things allocating the low memory as DMA can't work in + * the 1st 512K of memory. + */ +static void __init zynq_memory_init(void) +{ + if (!__pa(PAGE_OFFSET)) + memblock_reserve(__pa(PAGE_OFFSET), __pa(swapper_pg_dir)); +} + static struct platform_device zynq_cpuidle_device = { .name = "cpuidle-zynq", }; @@ -117,5 +130,6 @@ DT_MACHINE_START(XILINX_EP107, "Xilinx Zynq Platform") .init_machine = zynq_init_machine, .init_time = zynq_timer_init, .dt_compat = zynq_dt_match, + .reserve = zynq_memory_init, .restart = zynq_system_reset, MACHINE_END diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index dd4327f..27bbcfc 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -36,6 +36,7 @@ config ARM64 select HAVE_GENERIC_DMA_COHERENT select HAVE_HW_BREAKPOINT if PERF_EVENTS select HAVE_MEMBLOCK + select HAVE_PATA_PLATFORM select HAVE_PERF_EVENTS select IRQ_DOMAIN select MODULES_USE_ELF_RELA diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig index 84139be..7959dd0 100644 --- a/arch/arm64/configs/defconfig +++ b/arch/arm64/configs/defconfig @@ -1,4 +1,3 @@ -CONFIG_EXPERIMENTAL=y # CONFIG_LOCALVERSION_AUTO is not set # CONFIG_SWAP is not set CONFIG_SYSVIPC=y @@ -19,6 +18,7 @@ CONFIG_BLK_DEV_INITRD=y CONFIG_KALLSYMS_ALL=y # CONFIG_COMPAT_BRK is not set CONFIG_PROFILING=y +CONFIG_JUMP_LABEL=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y # CONFIG_BLK_DEV_BSG is not set @@ -27,6 +27,7 @@ CONFIG_ARCH_VEXPRESS=y CONFIG_ARCH_XGENE=y CONFIG_SMP=y CONFIG_PREEMPT=y +CONFIG_CMA=y CONFIG_CMDLINE="console=ttyAMA0" # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set CONFIG_COMPAT=y @@ -42,14 +43,17 @@ CONFIG_IP_PNP_BOOTP=y # CONFIG_WIRELESS is not set CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" CONFIG_DEVTMPFS=y -CONFIG_BLK_DEV=y +CONFIG_DMA_CMA=y CONFIG_SCSI=y # CONFIG_SCSI_PROC_FS is not set CONFIG_BLK_DEV_SD=y # CONFIG_SCSI_LOWLEVEL is not set +CONFIG_ATA=y +CONFIG_PATA_PLATFORM=y +CONFIG_PATA_OF_PLATFORM=y CONFIG_NETDEVICES=y -CONFIG_MII=y CONFIG_SMC91X=y +CONFIG_SMSC911X=y # CONFIG_WLAN is not set CONFIG_INPUT_EVDEV=y # CONFIG_SERIO_I8042 is not set @@ -62,13 +66,19 @@ CONFIG_SERIAL_AMBA_PL011=y CONFIG_SERIAL_AMBA_PL011_CONSOLE=y # CONFIG_HW_RANDOM is not set # CONFIG_HWMON is not set +CONFIG_REGULATOR=y +CONFIG_REGULATOR_FIXED_VOLTAGE=y CONFIG_FB=y # CONFIG_VGA_CONSOLE is not set CONFIG_FRAMEBUFFER_CONSOLE=y CONFIG_LOGO=y # CONFIG_LOGO_LINUX_MONO is not set # CONFIG_LOGO_LINUX_VGA16 is not set -# CONFIG_USB_SUPPORT is not set +CONFIG_USB=y +CONFIG_USB_ISP1760_HCD=y +CONFIG_USB_STORAGE=y +CONFIG_MMC=y +CONFIG_MMC_ARMMMCI=y # CONFIG_IOMMU_SUPPORT is not set CONFIG_EXT2_FS=y CONFIG_EXT3_FS=y diff --git a/arch/arm64/include/asm/atomic.h b/arch/arm64/include/asm/atomic.h index 01de5aa..0237f08 100644 --- a/arch/arm64/include/asm/atomic.h +++ b/arch/arm64/include/asm/atomic.h @@ -54,8 +54,7 @@ static inline void atomic_add(int i, atomic_t *v) " stxr %w1, %w0, %2\n" " cbnz %w1, 1b" : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) - : "Ir" (i) - : "cc"); + : "Ir" (i)); } static inline int atomic_add_return(int i, atomic_t *v) @@ -64,14 +63,15 @@ static inline int atomic_add_return(int i, atomic_t *v) int result; asm volatile("// atomic_add_return\n" -"1: ldaxr %w0, %2\n" +"1: ldxr %w0, %2\n" " add %w0, %w0, %w3\n" " stlxr %w1, %w0, %2\n" " cbnz %w1, 1b" : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) : "Ir" (i) - : "cc", "memory"); + : "memory"); + smp_mb(); return result; } @@ -86,8 +86,7 @@ static inline void atomic_sub(int i, atomic_t *v) " stxr %w1, %w0, %2\n" " cbnz %w1, 1b" : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) - : "Ir" (i) - : "cc"); + : "Ir" (i)); } static inline int atomic_sub_return(int i, atomic_t *v) @@ -96,14 +95,15 @@ static inline int atomic_sub_return(int i, atomic_t *v) int result; asm volatile("// atomic_sub_return\n" -"1: ldaxr %w0, %2\n" +"1: ldxr %w0, %2\n" " sub %w0, %w0, %w3\n" " stlxr %w1, %w0, %2\n" " cbnz %w1, 1b" : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) : "Ir" (i) - : "cc", "memory"); + : "memory"); + smp_mb(); return result; } @@ -112,17 +112,20 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new) unsigned long tmp; int oldval; + smp_mb(); + asm volatile("// atomic_cmpxchg\n" -"1: ldaxr %w1, %2\n" +"1: ldxr %w1, %2\n" " cmp %w1, %w3\n" " b.ne 2f\n" -" stlxr %w0, %w4, %2\n" +" stxr %w0, %w4, %2\n" " cbnz %w0, 1b\n" "2:" : "=&r" (tmp), "=&r" (oldval), "+Q" (ptr->counter) : "Ir" (old), "r" (new) - : "cc", "memory"); + : "cc"); + smp_mb(); return oldval; } @@ -173,8 +176,7 @@ static inline void atomic64_add(u64 i, atomic64_t *v) " stxr %w1, %0, %2\n" " cbnz %w1, 1b" : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) - : "Ir" (i) - : "cc"); + : "Ir" (i)); } static inline long atomic64_add_return(long i, atomic64_t *v) @@ -183,14 +185,15 @@ static inline long atomic64_add_return(long i, atomic64_t *v) unsigned long tmp; asm volatile("// atomic64_add_return\n" -"1: ldaxr %0, %2\n" +"1: ldxr %0, %2\n" " add %0, %0, %3\n" " stlxr %w1, %0, %2\n" " cbnz %w1, 1b" : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) : "Ir" (i) - : "cc", "memory"); + : "memory"); + smp_mb(); return result; } @@ -205,8 +208,7 @@ static inline void atomic64_sub(u64 i, atomic64_t *v) " stxr %w1, %0, %2\n" " cbnz %w1, 1b" : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) - : "Ir" (i) - : "cc"); + : "Ir" (i)); } static inline long atomic64_sub_return(long i, atomic64_t *v) @@ -215,14 +217,15 @@ static inline long atomic64_sub_return(long i, atomic64_t *v) unsigned long tmp; asm volatile("// atomic64_sub_return\n" -"1: ldaxr %0, %2\n" +"1: ldxr %0, %2\n" " sub %0, %0, %3\n" " stlxr %w1, %0, %2\n" " cbnz %w1, 1b" : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) : "Ir" (i) - : "cc", "memory"); + : "memory"); + smp_mb(); return result; } @@ -231,17 +234,20 @@ static inline long atomic64_cmpxchg(atomic64_t *ptr, long old, long new) long oldval; unsigned long res; + smp_mb(); + asm volatile("// atomic64_cmpxchg\n" -"1: ldaxr %1, %2\n" +"1: ldxr %1, %2\n" " cmp %1, %3\n" " b.ne 2f\n" -" stlxr %w0, %4, %2\n" +" stxr %w0, %4, %2\n" " cbnz %w0, 1b\n" "2:" : "=&r" (res), "=&r" (oldval), "+Q" (ptr->counter) : "Ir" (old), "r" (new) - : "cc", "memory"); + : "cc"); + smp_mb(); return oldval; } @@ -253,11 +259,12 @@ static inline long atomic64_dec_if_positive(atomic64_t *v) unsigned long tmp; asm volatile("// atomic64_dec_if_positive\n" -"1: ldaxr %0, %2\n" +"1: ldxr %0, %2\n" " subs %0, %0, #1\n" " b.mi 2f\n" " stlxr %w1, %0, %2\n" " cbnz %w1, 1b\n" +" dmb ish\n" "2:" : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) : diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h index 78e20ba..409ca37 100644 --- a/arch/arm64/include/asm/barrier.h +++ b/arch/arm64/include/asm/barrier.h @@ -25,7 +25,7 @@ #define wfi() asm volatile("wfi" : : : "memory") #define isb() asm volatile("isb" : : : "memory") -#define dsb() asm volatile("dsb sy" : : : "memory") +#define dsb(opt) asm volatile("dsb sy" : : : "memory") #define mb() dsb() #define rmb() asm volatile("dsb ld" : : : "memory") diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h index fea9ee3..88932498 100644 --- a/arch/arm64/include/asm/cacheflush.h +++ b/arch/arm64/include/asm/cacheflush.h @@ -116,6 +116,7 @@ extern void flush_dcache_page(struct page *); static inline void __flush_icache_all(void) { asm("ic ialluis"); + dsb(); } #define flush_dcache_mmap_lock(mapping) \ diff --git a/arch/arm64/include/asm/cmpxchg.h b/arch/arm64/include/asm/cmpxchg.h index 56166d7..57c0fa7 100644 --- a/arch/arm64/include/asm/cmpxchg.h +++ b/arch/arm64/include/asm/cmpxchg.h @@ -29,44 +29,45 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size switch (size) { case 1: asm volatile("// __xchg1\n" - "1: ldaxrb %w0, %2\n" + "1: ldxrb %w0, %2\n" " stlxrb %w1, %w3, %2\n" " cbnz %w1, 1b\n" : "=&r" (ret), "=&r" (tmp), "+Q" (*(u8 *)ptr) : "r" (x) - : "cc", "memory"); + : "memory"); break; case 2: asm volatile("// __xchg2\n" - "1: ldaxrh %w0, %2\n" + "1: ldxrh %w0, %2\n" " stlxrh %w1, %w3, %2\n" " cbnz %w1, 1b\n" : "=&r" (ret), "=&r" (tmp), "+Q" (*(u16 *)ptr) : "r" (x) - : "cc", "memory"); + : "memory"); break; case 4: asm volatile("// __xchg4\n" - "1: ldaxr %w0, %2\n" + "1: ldxr %w0, %2\n" " stlxr %w1, %w3, %2\n" " cbnz %w1, 1b\n" : "=&r" (ret), "=&r" (tmp), "+Q" (*(u32 *)ptr) : "r" (x) - : "cc", "memory"); + : "memory"); break; case 8: asm volatile("// __xchg8\n" - "1: ldaxr %0, %2\n" + "1: ldxr %0, %2\n" " stlxr %w1, %3, %2\n" " cbnz %w1, 1b\n" : "=&r" (ret), "=&r" (tmp), "+Q" (*(u64 *)ptr) : "r" (x) - : "cc", "memory"); + : "memory"); break; default: BUILD_BUG(); } + smp_mb(); return ret; } diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h index 7883412..c4a7f94 100644 --- a/arch/arm64/include/asm/esr.h +++ b/arch/arm64/include/asm/esr.h @@ -42,7 +42,7 @@ #define ESR_EL1_EC_SP_ALIGN (0x26) #define ESR_EL1_EC_FP_EXC32 (0x28) #define ESR_EL1_EC_FP_EXC64 (0x2C) -#define ESR_EL1_EC_SERRROR (0x2F) +#define ESR_EL1_EC_SERROR (0x2F) #define ESR_EL1_EC_BREAKPT_EL0 (0x30) #define ESR_EL1_EC_BREAKPT_EL1 (0x31) #define ESR_EL1_EC_SOFTSTP_EL0 (0x32) diff --git a/arch/arm64/include/asm/futex.h b/arch/arm64/include/asm/futex.h index 78cc3ab..5f750dc 100644 --- a/arch/arm64/include/asm/futex.h +++ b/arch/arm64/include/asm/futex.h @@ -24,10 +24,11 @@ #define __futex_atomic_op(insn, ret, oldval, uaddr, tmp, oparg) \ asm volatile( \ -"1: ldaxr %w1, %2\n" \ +"1: ldxr %w1, %2\n" \ insn "\n" \ "2: stlxr %w3, %w0, %2\n" \ " cbnz %w3, 1b\n" \ +" dmb ish\n" \ "3:\n" \ " .pushsection .fixup,\"ax\"\n" \ " .align 2\n" \ @@ -40,7 +41,7 @@ " .popsection\n" \ : "=&r" (ret), "=&r" (oldval), "+Q" (*uaddr), "=&r" (tmp) \ : "r" (oparg), "Ir" (-EFAULT) \ - : "cc", "memory") + : "memory") static inline int futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) @@ -111,11 +112,12 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, return -EFAULT; asm volatile("// futex_atomic_cmpxchg_inatomic\n" -"1: ldaxr %w1, %2\n" +"1: ldxr %w1, %2\n" " sub %w3, %w1, %w4\n" " cbnz %w3, 3f\n" "2: stlxr %w3, %w5, %2\n" " cbnz %w3, 1b\n" +" dmb ish\n" "3:\n" " .pushsection .fixup,\"ax\"\n" "4: mov %w0, %w6\n" @@ -127,7 +129,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, " .popsection\n" : "+r" (ret), "=&r" (val), "+Q" (*uaddr), "=&r" (tmp) : "r" (oldval), "r" (newval), "Ir" (-EFAULT) - : "cc", "memory"); + : "memory"); *uval = val; return ret; diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h index c98ef47..0eb3986 100644 --- a/arch/arm64/include/asm/kvm_arm.h +++ b/arch/arm64/include/asm/kvm_arm.h @@ -231,7 +231,7 @@ #define ESR_EL2_EC_SP_ALIGN (0x26) #define ESR_EL2_EC_FP_EXC32 (0x28) #define ESR_EL2_EC_FP_EXC64 (0x2C) -#define ESR_EL2_EC_SERRROR (0x2F) +#define ESR_EL2_EC_SERROR (0x2F) #define ESR_EL2_EC_BREAKPT (0x30) #define ESR_EL2_EC_BREAKPT_HYP (0x31) #define ESR_EL2_EC_SOFTSTP (0x32) diff --git a/arch/arm64/include/asm/spinlock.h b/arch/arm64/include/asm/spinlock.h index 3d5cf06..c45b7b1 100644 --- a/arch/arm64/include/asm/spinlock.h +++ b/arch/arm64/include/asm/spinlock.h @@ -132,7 +132,7 @@ static inline void arch_write_lock(arch_rwlock_t *rw) " cbnz %w0, 2b\n" : "=&r" (tmp), "+Q" (rw->lock) : "r" (0x80000000) - : "cc", "memory"); + : "memory"); } static inline int arch_write_trylock(arch_rwlock_t *rw) @@ -146,7 +146,7 @@ static inline int arch_write_trylock(arch_rwlock_t *rw) "1:\n" : "=&r" (tmp), "+Q" (rw->lock) : "r" (0x80000000) - : "cc", "memory"); + : "memory"); return !tmp; } @@ -187,7 +187,7 @@ static inline void arch_read_lock(arch_rwlock_t *rw) " cbnz %w1, 2b\n" : "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock) : - : "cc", "memory"); + : "memory"); } static inline void arch_read_unlock(arch_rwlock_t *rw) @@ -201,7 +201,7 @@ static inline void arch_read_unlock(arch_rwlock_t *rw) " cbnz %w1, 1b\n" : "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock) : - : "cc", "memory"); + : "memory"); } static inline int arch_read_trylock(arch_rwlock_t *rw) @@ -216,7 +216,7 @@ static inline int arch_read_trylock(arch_rwlock_t *rw) "1:\n" : "=&r" (tmp), "+r" (tmp2), "+Q" (rw->lock) : - : "cc", "memory"); + : "memory"); return !tmp2; } diff --git a/arch/arm64/include/asm/unistd32.h b/arch/arm64/include/asm/unistd32.h index 58125bf..bb8eb8a 100644 --- a/arch/arm64/include/asm/unistd32.h +++ b/arch/arm64/include/asm/unistd32.h @@ -399,7 +399,10 @@ __SYSCALL(374, compat_sys_sendmmsg) __SYSCALL(375, sys_setns) __SYSCALL(376, compat_sys_process_vm_readv) __SYSCALL(377, compat_sys_process_vm_writev) -__SYSCALL(378, sys_ni_syscall) /* 378 for kcmp */ +__SYSCALL(378, sys_kcmp) +__SYSCALL(379, sys_finit_module) +__SYSCALL(380, sys_sched_setattr) +__SYSCALL(381, sys_sched_getattr) #define __NR_compat_syscalls 379 diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h index 495ab6f..eaf54a3 100644 --- a/arch/arm64/include/uapi/asm/kvm.h +++ b/arch/arm64/include/uapi/asm/kvm.h @@ -148,6 +148,15 @@ struct kvm_arch_memory_slot { #define KVM_REG_ARM_TIMER_CNT ARM64_SYS_REG(3, 3, 14, 3, 2) #define KVM_REG_ARM_TIMER_CVAL ARM64_SYS_REG(3, 3, 14, 0, 2) +/* Device Control API: ARM VGIC */ +#define KVM_DEV_ARM_VGIC_GRP_ADDR 0 +#define KVM_DEV_ARM_VGIC_GRP_DIST_REGS 1 +#define KVM_DEV_ARM_VGIC_GRP_CPU_REGS 2 +#define KVM_DEV_ARM_VGIC_CPUID_SHIFT 32 +#define KVM_DEV_ARM_VGIC_CPUID_MASK (0xffULL << KVM_DEV_ARM_VGIC_CPUID_SHIFT) +#define KVM_DEV_ARM_VGIC_OFFSET_SHIFT 0 +#define KVM_DEV_ARM_VGIC_OFFSET_MASK (0xffffffffULL << KVM_DEV_ARM_VGIC_OFFSET_SHIFT) + /* KVM_IRQ_LINE irq field index values */ #define KVM_ARM_IRQ_TYPE_SHIFT 24 #define KVM_ARM_IRQ_TYPE_MASK 0xff diff --git a/arch/arm64/kernel/kuser32.S b/arch/arm64/kernel/kuser32.S index 63c48ff..7787208 100644 --- a/arch/arm64/kernel/kuser32.S +++ b/arch/arm64/kernel/kuser32.S @@ -38,12 +38,13 @@ __kuser_cmpxchg64: // 0xffff0f60 .inst 0xe92d00f0 // push {r4, r5, r6, r7} .inst 0xe1c040d0 // ldrd r4, r5, [r0] .inst 0xe1c160d0 // ldrd r6, r7, [r1] - .inst 0xe1b20e9f // 1: ldaexd r0, r1, [r2] + .inst 0xe1b20f9f // 1: ldrexd r0, r1, [r2] .inst 0xe0303004 // eors r3, r0, r4 .inst 0x00313005 // eoreqs r3, r1, r5 .inst 0x01a23e96 // stlexdeq r3, r6, [r2] .inst 0x03330001 // teqeq r3, #1 .inst 0x0afffff9 // beq 1b + .inst 0xf57ff05b // dmb ish .inst 0xe2730000 // rsbs r0, r3, #0 .inst 0xe8bd00f0 // pop {r4, r5, r6, r7} .inst 0xe12fff1e // bx lr @@ -55,11 +56,12 @@ __kuser_memory_barrier: // 0xffff0fa0 .align 5 __kuser_cmpxchg: // 0xffff0fc0 - .inst 0xe1923e9f // 1: ldaex r3, [r2] + .inst 0xe1923f9f // 1: ldrex r3, [r2] .inst 0xe0533000 // subs r3, r3, r0 .inst 0x01823e91 // stlexeq r3, r1, [r2] .inst 0x03330001 // teqeq r3, #1 .inst 0x0afffffa // beq 1b + .inst 0xf57ff05b // dmb ish .inst 0xe2730000 // rsbs r0, r3, #0 .inst 0xe12fff1e // bx lr diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c index 65d40cf..a7149ca 100644 --- a/arch/arm64/kernel/vdso.c +++ b/arch/arm64/kernel/vdso.c @@ -238,6 +238,8 @@ void update_vsyscall(struct timekeeper *tk) vdso_data->use_syscall = use_syscall; vdso_data->xtime_coarse_sec = xtime_coarse.tv_sec; vdso_data->xtime_coarse_nsec = xtime_coarse.tv_nsec; + vdso_data->wtm_clock_sec = tk->wall_to_monotonic.tv_sec; + vdso_data->wtm_clock_nsec = tk->wall_to_monotonic.tv_nsec; if (!use_syscall) { vdso_data->cs_cycle_last = tk->clock->cycle_last; @@ -245,8 +247,6 @@ void update_vsyscall(struct timekeeper *tk) vdso_data->xtime_clock_nsec = tk->xtime_nsec; vdso_data->cs_mult = tk->mult; vdso_data->cs_shift = tk->shift; - vdso_data->wtm_clock_sec = tk->wall_to_monotonic.tv_sec; - vdso_data->wtm_clock_nsec = tk->wall_to_monotonic.tv_nsec; } smp_wmb(); diff --git a/arch/arm64/kernel/vdso/Makefile b/arch/arm64/kernel/vdso/Makefile index d8064af..6d20b7d 100644 --- a/arch/arm64/kernel/vdso/Makefile +++ b/arch/arm64/kernel/vdso/Makefile @@ -48,7 +48,7 @@ $(obj-vdso): %.o: %.S # Actual build commands quiet_cmd_vdsold = VDSOL $@ - cmd_vdsold = $(CC) $(c_flags) -Wl,-T $^ -o $@ + cmd_vdsold = $(CC) $(c_flags) -Wl,-n -Wl,-T $^ -o $@ quiet_cmd_vdsoas = VDSOA $@ cmd_vdsoas = $(CC) $(a_flags) -c -o $@ $< diff --git a/arch/arm64/kernel/vdso/gettimeofday.S b/arch/arm64/kernel/vdso/gettimeofday.S index f0a6d10..fe652ff 100644 --- a/arch/arm64/kernel/vdso/gettimeofday.S +++ b/arch/arm64/kernel/vdso/gettimeofday.S @@ -103,6 +103,8 @@ ENTRY(__kernel_clock_gettime) bl __do_get_tspec seqcnt_check w9, 1b + mov x30, x2 + cmp w0, #CLOCK_MONOTONIC b.ne 6f @@ -118,6 +120,9 @@ ENTRY(__kernel_clock_gettime) ccmp w0, #CLOCK_MONOTONIC_COARSE, #0x4, ne b.ne 8f + /* xtime_coarse_nsec is already right-shifted */ + mov x12, #0 + /* Get coarse timespec. */ adr vdso_data, _vdso_data 3: seqcnt_acquire @@ -156,7 +161,7 @@ ENTRY(__kernel_clock_gettime) lsr x11, x11, x12 stp x10, x11, [x1, #TSPEC_TV_SEC] mov x0, xzr - ret x2 + ret 7: mov x30, x2 8: /* Syscall fallback. */ diff --git a/arch/arm64/lib/bitops.S b/arch/arm64/lib/bitops.S index e5db797..7dac371 100644 --- a/arch/arm64/lib/bitops.S +++ b/arch/arm64/lib/bitops.S @@ -46,11 +46,12 @@ ENTRY( \name ) mov x2, #1 add x1, x1, x0, lsr #3 // Get word offset lsl x4, x2, x3 // Create mask -1: ldaxr x2, [x1] +1: ldxr x2, [x1] lsr x0, x2, x3 // Save old value of bit \instr x2, x2, x4 // toggle bit stlxr w5, x2, [x1] cbnz w5, 1b + dmb ish and x0, x0, #1 3: ret ENDPROC(\name ) diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c index 45b5ab5..fbd7678 100644 --- a/arch/arm64/mm/dma-mapping.c +++ b/arch/arm64/mm/dma-mapping.c @@ -45,6 +45,7 @@ static void *arm64_swiotlb_alloc_coherent(struct device *dev, size_t size, if (IS_ENABLED(CONFIG_DMA_CMA)) { struct page *page; + size = PAGE_ALIGN(size); page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT, get_order(size)); if (!page) diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index f557ebb..f8dc7e8 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -203,10 +203,18 @@ static void __init alloc_init_pmd(pud_t *pud, unsigned long addr, do { next = pmd_addr_end(addr, end); /* try section mapping first */ - if (((addr | next | phys) & ~SECTION_MASK) == 0) + if (((addr | next | phys) & ~SECTION_MASK) == 0) { + pmd_t old_pmd =*pmd; set_pmd(pmd, __pmd(phys | prot_sect_kernel)); - else + /* + * Check for previous table entries created during + * boot (__create_page_tables) and flush them. + */ + if (!pmd_none(old_pmd)) + flush_tlb_all(); + } else { alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys)); + } phys += next - addr; } while (pmd++, addr = next, addr != end); } diff --git a/arch/arm64/mm/pgd.c b/arch/arm64/mm/pgd.c index 7083cda..62c6101 100644 --- a/arch/arm64/mm/pgd.c +++ b/arch/arm64/mm/pgd.c @@ -32,17 +32,10 @@ pgd_t *pgd_alloc(struct mm_struct *mm) { - pgd_t *new_pgd; - if (PGD_SIZE == PAGE_SIZE) - new_pgd = (pgd_t *)get_zeroed_page(GFP_KERNEL); + return (pgd_t *)get_zeroed_page(GFP_KERNEL); else - new_pgd = kzalloc(PGD_SIZE, GFP_KERNEL); - - if (!new_pgd) - return NULL; - - return new_pgd; + return kzalloc(PGD_SIZE, GFP_KERNEL); } void pgd_free(struct mm_struct *mm, pgd_t *pgd) diff --git a/arch/ia64/include/asm/unistd.h b/arch/ia64/include/asm/unistd.h index afd45e0..ae763d8b 100644 --- a/arch/ia64/include/asm/unistd.h +++ b/arch/ia64/include/asm/unistd.h @@ -11,7 +11,7 @@ -#define NR_syscalls 312 /* length of syscall table */ +#define NR_syscalls 314 /* length of syscall table */ /* * The following defines stop scripts/checksyscalls.sh from complaining about diff --git a/arch/ia64/include/uapi/asm/unistd.h b/arch/ia64/include/uapi/asm/unistd.h index 34fd6fe..715e85f 100644 --- a/arch/ia64/include/uapi/asm/unistd.h +++ b/arch/ia64/include/uapi/asm/unistd.h @@ -325,5 +325,7 @@ #define __NR_process_vm_writev 1333 #define __NR_accept4 1334 #define __NR_finit_module 1335 +#define __NR_sched_setattr 1336 +#define __NR_sched_getattr 1337 #endif /* _UAPI_ASM_IA64_UNISTD_H */ diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S index ddea607f..fa8d61a 100644 --- a/arch/ia64/kernel/entry.S +++ b/arch/ia64/kernel/entry.S @@ -1773,6 +1773,8 @@ sys_call_table: data8 sys_process_vm_writev data8 sys_accept4 data8 sys_finit_module // 1335 + data8 sys_sched_setattr + data8 sys_sched_getattr .org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls #endif /* __IA64_ASM_PARAVIRTUALIZED_NATIVE */ diff --git a/arch/microblaze/include/asm/delay.h b/arch/microblaze/include/asm/delay.h index 05b7d39..66fc24c 100644 --- a/arch/microblaze/include/asm/delay.h +++ b/arch/microblaze/include/asm/delay.h @@ -13,6 +13,8 @@ #ifndef _ASM_MICROBLAZE_DELAY_H #define _ASM_MICROBLAZE_DELAY_H +#include <linux/param.h> + extern inline void __delay(unsigned long loops) { asm volatile ("# __delay \n\t" \ diff --git a/arch/microblaze/include/asm/io.h b/arch/microblaze/include/asm/io.h index a2cea72..3fbb7f1 100644 --- a/arch/microblaze/include/asm/io.h +++ b/arch/microblaze/include/asm/io.h @@ -89,6 +89,11 @@ static inline unsigned int readl(const volatile void __iomem *addr) { return le32_to_cpu(*(volatile unsigned int __force *)addr); } +#define readq readq +static inline u64 readq(const volatile void __iomem *addr) +{ + return le64_to_cpu(__raw_readq(addr)); +} static inline void writeb(unsigned char v, volatile void __iomem *addr) { *(volatile unsigned char __force *)addr = v; @@ -101,6 +106,7 @@ static inline void writel(unsigned int v, volatile void __iomem *addr) { *(volatile unsigned int __force *)addr = cpu_to_le32(v); } +#define writeq(b, addr) __raw_writeq(cpu_to_le64(b), addr) /* ioread and iowrite variants. thease are for now same as __raw_ * variants of accessors. we might check for endianess in the feature diff --git a/arch/microblaze/kernel/head.S b/arch/microblaze/kernel/head.S index b7fb043..17645b2 100644 --- a/arch/microblaze/kernel/head.S +++ b/arch/microblaze/kernel/head.S @@ -66,7 +66,7 @@ real_start: mts rmsr, r0 /* Disable stack protection from bootloader */ mts rslr, r0 - addi r8, r0, 0xFFFFFFF + addi r8, r0, 0xFFFFFFFF mts rshr, r8 /* * According to Xilinx, msrclr instruction behaves like 'mfs rX,rpc' diff --git a/arch/mips/alchemy/devboards/db1000.c b/arch/mips/alchemy/devboards/db1000.c index 11f3ad2..5483906 100644 --- a/arch/mips/alchemy/devboards/db1000.c +++ b/arch/mips/alchemy/devboards/db1000.c @@ -534,13 +534,10 @@ static int __init db1000_dev_init(void) s0 = AU1100_GPIO1_INT; s1 = AU1100_GPIO4_INT; + gpio_request(19, "sd0_cd"); + gpio_request(20, "sd1_cd"); gpio_direction_input(19); /* sd0 cd# */ gpio_direction_input(20); /* sd1 cd# */ - gpio_direction_input(21); /* touch pendown# */ - gpio_direction_input(207); /* SPI MISO */ - gpio_direction_output(208, 0); /* SPI MOSI */ - gpio_direction_output(209, 1); /* SPI SCK */ - gpio_direction_output(210, 1); /* SPI CS# */ /* spi_gpio on SSI0 pins */ pfc = __raw_readl((void __iomem *)SYS_PINFUNC); diff --git a/arch/mips/include/asm/fpu.h b/arch/mips/include/asm/fpu.h index cfe092f..6b97495 100644 --- a/arch/mips/include/asm/fpu.h +++ b/arch/mips/include/asm/fpu.h @@ -74,6 +74,8 @@ static inline int __enable_fpu(enum fpu_mode mode) default: BUG(); } + + return SIGFPE; } #define __disable_fpu() \ diff --git a/arch/mips/include/uapi/asm/unistd.h b/arch/mips/include/uapi/asm/unistd.h index 1dee279..d6e154a 100644 --- a/arch/mips/include/uapi/asm/unistd.h +++ b/arch/mips/include/uapi/asm/unistd.h @@ -369,16 +369,18 @@ #define __NR_process_vm_writev (__NR_Linux + 346) #define __NR_kcmp (__NR_Linux + 347) #define __NR_finit_module (__NR_Linux + 348) +#define __NR_sched_setattr (__NR_Linux + 349) +#define __NR_sched_getattr (__NR_Linux + 350) /* * Offset of the last Linux o32 flavoured syscall */ -#define __NR_Linux_syscalls 348 +#define __NR_Linux_syscalls 350 #endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */ #define __NR_O32_Linux 4000 -#define __NR_O32_Linux_syscalls 348 +#define __NR_O32_Linux_syscalls 350 #if _MIPS_SIM == _MIPS_SIM_ABI64 @@ -695,16 +697,18 @@ #define __NR_kcmp (__NR_Linux + 306) #define __NR_finit_module (__NR_Linux + 307) #define __NR_getdents64 (__NR_Linux + 308) +#define __NR_sched_setattr (__NR_Linux + 309) +#define __NR_sched_getattr (__NR_Linux + 310) /* * Offset of the last Linux 64-bit flavoured syscall */ -#define __NR_Linux_syscalls 308 +#define __NR_Linux_syscalls 310 #endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */ #define __NR_64_Linux 5000 -#define __NR_64_Linux_syscalls 308 +#define __NR_64_Linux_syscalls 310 #if _MIPS_SIM == _MIPS_SIM_NABI32 @@ -1025,15 +1029,17 @@ #define __NR_process_vm_writev (__NR_Linux + 310) #define __NR_kcmp (__NR_Linux + 311) #define __NR_finit_module (__NR_Linux + 312) +#define __NR_sched_setattr (__NR_Linux + 313) +#define __NR_sched_getattr (__NR_Linux + 314) /* * Offset of the last N32 flavoured syscall */ -#define __NR_Linux_syscalls 312 +#define __NR_Linux_syscalls 314 #endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */ #define __NR_N32_Linux 6000 -#define __NR_N32_Linux_syscalls 312 +#define __NR_N32_Linux_syscalls 314 #endif /* _UAPI_ASM_UNISTD_H */ diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S index e8e541b..a5b14f4 100644 --- a/arch/mips/kernel/scall32-o32.S +++ b/arch/mips/kernel/scall32-o32.S @@ -563,3 +563,5 @@ EXPORT(sys_call_table) PTR sys_process_vm_writev PTR sys_kcmp PTR sys_finit_module + PTR sys_sched_setattr + PTR sys_sched_getattr /* 4350 */ diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S index 57e3742..b56e254 100644 --- a/arch/mips/kernel/scall64-64.S +++ b/arch/mips/kernel/scall64-64.S @@ -425,4 +425,6 @@ EXPORT(sys_call_table) PTR sys_kcmp PTR sys_finit_module PTR sys_getdents64 + PTR sys_sched_setattr + PTR sys_sched_getattr /* 5310 */ .size sys_call_table,.-sys_call_table diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S index 2f48f59..f7e5b72 100644 --- a/arch/mips/kernel/scall64-n32.S +++ b/arch/mips/kernel/scall64-n32.S @@ -418,4 +418,6 @@ EXPORT(sysn32_call_table) PTR compat_sys_process_vm_writev /* 6310 */ PTR sys_kcmp PTR sys_finit_module + PTR sys_sched_setattr + PTR sys_sched_getattr .size sysn32_call_table,.-sysn32_call_table diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S index f1acdb4..6788727d 100644 --- a/arch/mips/kernel/scall64-o32.S +++ b/arch/mips/kernel/scall64-o32.S @@ -541,4 +541,6 @@ EXPORT(sys32_call_table) PTR compat_sys_process_vm_writev PTR sys_kcmp PTR sys_finit_module + PTR sys_sched_setattr + PTR sys_sched_getattr /* 4350 */ .size sys32_call_table,.-sys32_call_table diff --git a/arch/parisc/hpux/fs.c b/arch/parisc/hpux/fs.c index 88d0962..2bedafe 100644 --- a/arch/parisc/hpux/fs.c +++ b/arch/parisc/hpux/fs.c @@ -33,22 +33,9 @@ int hpux_execve(struct pt_regs *regs) { - int error; - struct filename *filename; - - filename = getname((const char __user *) regs->gr[26]); - error = PTR_ERR(filename); - if (IS_ERR(filename)) - goto out; - - error = do_execve(filename->name, + return do_execve(getname((const char __user *) regs->gr[26]), (const char __user *const __user *) regs->gr[25], (const char __user *const __user *) regs->gr[24]); - - putname(filename); - -out: - return error; } struct hpux_dirent { diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h index e27e9ad..150866b 100644 --- a/arch/powerpc/include/asm/dma-mapping.h +++ b/arch/powerpc/include/asm/dma-mapping.h @@ -134,6 +134,7 @@ static inline int dma_supported(struct device *dev, u64 mask) } extern int dma_set_mask(struct device *dev, u64 dma_mask); +extern int __dma_set_mask(struct device *dev, u64 dma_mask); #define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL) diff --git a/arch/powerpc/include/asm/iommu.h b/arch/powerpc/include/asm/iommu.h index f7a8036..42632c7 100644 --- a/arch/powerpc/include/asm/iommu.h +++ b/arch/powerpc/include/asm/iommu.h @@ -77,6 +77,7 @@ struct iommu_table { #ifdef CONFIG_IOMMU_API struct iommu_group *it_group; #endif + void (*set_bypass)(struct iommu_table *tbl, bool enable); }; /* Pure 2^n version of get_order */ diff --git a/arch/powerpc/include/asm/sections.h b/arch/powerpc/include/asm/sections.h index 4ee06fe..d0e784e 100644 --- a/arch/powerpc/include/asm/sections.h +++ b/arch/powerpc/include/asm/sections.h @@ -8,6 +8,7 @@ #ifdef __powerpc64__ +extern char __start_interrupts[]; extern char __end_interrupts[]; extern char __prom_init_toc_start[]; @@ -21,6 +22,17 @@ static inline int in_kernel_text(unsigned long addr) return 0; } +static inline int overlaps_interrupt_vector_text(unsigned long start, + unsigned long end) +{ + unsigned long real_start, real_end; + real_start = __start_interrupts - _stext; + real_end = __end_interrupts - _stext; + + return start < (unsigned long)__va(real_end) && + (unsigned long)__va(real_start) < end; +} + static inline int overlaps_kernel_text(unsigned long start, unsigned long end) { return start < (unsigned long)__init_end && diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c index 8032b97..ee78f6e 100644 --- a/arch/powerpc/kernel/dma.c +++ b/arch/powerpc/kernel/dma.c @@ -191,12 +191,10 @@ EXPORT_SYMBOL(dma_direct_ops); #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16) -int dma_set_mask(struct device *dev, u64 dma_mask) +int __dma_set_mask(struct device *dev, u64 dma_mask) { struct dma_map_ops *dma_ops = get_dma_ops(dev); - if (ppc_md.dma_set_mask) - return ppc_md.dma_set_mask(dev, dma_mask); if ((dma_ops != NULL) && (dma_ops->set_dma_mask != NULL)) return dma_ops->set_dma_mask(dev, dma_mask); if (!dev->dma_mask || !dma_supported(dev, dma_mask)) @@ -204,6 +202,12 @@ int dma_set_mask(struct device *dev, u64 dma_mask) *dev->dma_mask = dma_mask; return 0; } +int dma_set_mask(struct device *dev, u64 dma_mask) +{ + if (ppc_md.dma_set_mask) + return ppc_md.dma_set_mask(dev, dma_mask); + return __dma_set_mask(dev, dma_mask); +} EXPORT_SYMBOL(dma_set_mask); u64 dma_get_required_mask(struct device *dev) diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c index 7bb30dc..fdc679d 100644 --- a/arch/powerpc/kernel/eeh_driver.c +++ b/arch/powerpc/kernel/eeh_driver.c @@ -362,9 +362,13 @@ static void *eeh_rmv_device(void *data, void *userdata) */ if (!dev || (dev->hdr_type & PCI_HEADER_TYPE_BRIDGE)) return NULL; + driver = eeh_pcid_get(dev); - if (driver && driver->err_handler) - return NULL; + if (driver) { + eeh_pcid_put(dev); + if (driver->err_handler) + return NULL; + } /* Remove it from PCI subsystem */ pr_debug("EEH: Removing %s without EEH sensitive driver\n", diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c index d773dd4..88e3ec6 100644 --- a/arch/powerpc/kernel/iommu.c +++ b/arch/powerpc/kernel/iommu.c @@ -1088,6 +1088,14 @@ int iommu_take_ownership(struct iommu_table *tbl) memset(tbl->it_map, 0xff, sz); iommu_clear_tces_and_put_pages(tbl, tbl->it_offset, tbl->it_size); + /* + * Disable iommu bypass, otherwise the user can DMA to all of + * our physical memory via the bypass window instead of just + * the pages that has been explicitly mapped into the iommu + */ + if (tbl->set_bypass) + tbl->set_bypass(tbl, false); + return 0; } EXPORT_SYMBOL_GPL(iommu_take_ownership); @@ -1102,6 +1110,10 @@ void iommu_release_ownership(struct iommu_table *tbl) /* Restore bit#0 set by iommu_init_table() */ if (tbl->it_offset == 0) set_bit(0, tbl->it_map); + + /* The kernel owns the device now, we can restore the iommu bypass */ + if (tbl->set_bypass) + tbl->set_bypass(tbl, true); } EXPORT_SYMBOL_GPL(iommu_release_ownership); diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index 9729b23..1d0848b 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c @@ -559,8 +559,13 @@ void exc_lvl_ctx_init(void) #ifdef CONFIG_PPC64 cpu_nr = i; #else +#ifdef CONFIG_SMP cpu_nr = get_hard_smp_processor_id(i); +#else + cpu_nr = 0; #endif +#endif + memset((void *)critirq_ctx[cpu_nr], 0, THREAD_SIZE); tp = critirq_ctx[cpu_nr]; tp->cpu = cpu_nr; diff --git a/arch/powerpc/kernel/machine_kexec.c b/arch/powerpc/kernel/machine_kexec.c index 75d4f73..015ae55 100644 --- a/arch/powerpc/kernel/machine_kexec.c +++ b/arch/powerpc/kernel/machine_kexec.c @@ -196,7 +196,9 @@ int overlaps_crashkernel(unsigned long start, unsigned long size) /* Values we need to export to the second kernel via the device tree. */ static phys_addr_t kernel_end; +static phys_addr_t crashk_base; static phys_addr_t crashk_size; +static unsigned long long mem_limit; static struct property kernel_end_prop = { .name = "linux,kernel-end", @@ -207,7 +209,7 @@ static struct property kernel_end_prop = { static struct property crashk_base_prop = { .name = "linux,crashkernel-base", .length = sizeof(phys_addr_t), - .value = &crashk_res.start, + .value = &crashk_base }; static struct property crashk_size_prop = { @@ -219,9 +221,11 @@ static struct property crashk_size_prop = { static struct property memory_limit_prop = { .name = "linux,memory-limit", .length = sizeof(unsigned long long), - .value = &memory_limit, + .value = &mem_limit, }; +#define cpu_to_be_ulong __PASTE(cpu_to_be, BITS_PER_LONG) + static void __init export_crashk_values(struct device_node *node) { struct property *prop; @@ -237,8 +241,9 @@ static void __init export_crashk_values(struct device_node *node) of_remove_property(node, prop); if (crashk_res.start != 0) { + crashk_base = cpu_to_be_ulong(crashk_res.start), of_add_property(node, &crashk_base_prop); - crashk_size = resource_size(&crashk_res); + crashk_size = cpu_to_be_ulong(resource_size(&crashk_res)); of_add_property(node, &crashk_size_prop); } @@ -246,6 +251,7 @@ static void __init export_crashk_values(struct device_node *node) * memory_limit is required by the kexec-tools to limit the * crash regions to the actual memory used. */ + mem_limit = cpu_to_be_ulong(memory_limit); of_update_property(node, &memory_limit_prop); } @@ -264,7 +270,7 @@ static int __init kexec_setup(void) of_remove_property(node, prop); /* information needed by userspace when using default_machine_kexec */ - kernel_end = __pa(_end); + kernel_end = cpu_to_be_ulong(__pa(_end)); of_add_property(node, &kernel_end_prop); export_crashk_values(node); diff --git a/arch/powerpc/kernel/machine_kexec_64.c b/arch/powerpc/kernel/machine_kexec_64.c index be4e6d6..59d229a 100644 --- a/arch/powerpc/kernel/machine_kexec_64.c +++ b/arch/powerpc/kernel/machine_kexec_64.c @@ -369,6 +369,7 @@ void default_machine_kexec(struct kimage *image) /* Values we need to export to the second kernel via the device tree. */ static unsigned long htab_base; +static unsigned long htab_size; static struct property htab_base_prop = { .name = "linux,htab-base", @@ -379,7 +380,7 @@ static struct property htab_base_prop = { static struct property htab_size_prop = { .name = "linux,htab-size", .length = sizeof(unsigned long), - .value = &htab_size_bytes, + .value = &htab_size, }; static int __init export_htab_values(void) @@ -403,8 +404,9 @@ static int __init export_htab_values(void) if (prop) of_remove_property(node, prop); - htab_base = __pa(htab_address); + htab_base = cpu_to_be64(__pa(htab_address)); of_add_property(node, &htab_base_prop); + htab_size = cpu_to_be64(htab_size_bytes); of_add_property(node, &htab_size_prop); of_node_put(node); diff --git a/arch/powerpc/kernel/reloc_64.S b/arch/powerpc/kernel/reloc_64.S index b47a0e1..1482327 100644 --- a/arch/powerpc/kernel/reloc_64.S +++ b/arch/powerpc/kernel/reloc_64.S @@ -69,8 +69,8 @@ _GLOBAL(relocate) * R_PPC64_RELATIVE ones. */ mtctr r8 -5: lwz r0,12(9) /* ELF64_R_TYPE(reloc->r_info) */ - cmpwi r0,R_PPC64_RELATIVE +5: ld r0,8(9) /* ELF64_R_TYPE(reloc->r_info) */ + cmpdi r0,R_PPC64_RELATIVE bne 6f ld r6,0(r9) /* reloc->r_offset */ ld r0,16(r9) /* reloc->r_addend */ diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c index 2b0da27..04cc4fc 100644 --- a/arch/powerpc/kernel/setup_32.c +++ b/arch/powerpc/kernel/setup_32.c @@ -247,7 +247,12 @@ static void __init exc_lvl_early_init(void) /* interrupt stacks must be in lowmem, we get that for free on ppc32 * as the memblock is limited to lowmem by MEMBLOCK_REAL_LIMIT */ for_each_possible_cpu(i) { +#ifdef CONFIG_SMP hw_cpu = get_hard_smp_processor_id(i); +#else + hw_cpu = 0; +#endif + critirq_ctx[hw_cpu] = (struct thread_info *) __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE)); #ifdef CONFIG_BOOKE diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index de68812..d766d6e 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c @@ -207,6 +207,20 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend, if (overlaps_kernel_text(vaddr, vaddr + step)) tprot &= ~HPTE_R_N; + /* + * If relocatable, check if it overlaps interrupt vectors that + * are copied down to real 0. For relocatable kernel + * (e.g. kdump case) we copy interrupt vectors down to real + * address 0. Mark that region as executable. This is + * because on p8 system with relocation on exception feature + * enabled, exceptions are raised with MMU (IR=DR=1) ON. Hence + * in order to execute the interrupt handlers in virtual + * mode the vector region need to be marked as executable. + */ + if ((PHYSICAL_START > MEMORY_START) && + overlaps_interrupt_vector_text(vaddr, vaddr + step)) + tprot &= ~HPTE_R_N; + hash = hpt_hash(vpn, shift, ssize); hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP); diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c index 29b89e8..67cf220 100644 --- a/arch/powerpc/perf/core-book3s.c +++ b/arch/powerpc/perf/core-book3s.c @@ -1147,6 +1147,9 @@ static void power_pmu_enable(struct pmu *pmu) mmcr0 = ebb_switch_in(ebb, cpuhw->mmcr[0]); mb(); + if (cpuhw->bhrb_users) + ppmu->config_bhrb(cpuhw->bhrb_filter); + write_mmcr0(cpuhw, mmcr0); /* @@ -1158,8 +1161,6 @@ static void power_pmu_enable(struct pmu *pmu) } out: - if (cpuhw->bhrb_users) - ppmu->config_bhrb(cpuhw->bhrb_filter); local_irq_restore(flags); } diff --git a/arch/powerpc/perf/power8-pmu.c b/arch/powerpc/perf/power8-pmu.c index a3f7abd..96cee20 100644 --- a/arch/powerpc/perf/power8-pmu.c +++ b/arch/powerpc/perf/power8-pmu.c @@ -25,6 +25,37 @@ #define PM_BRU_FIN 0x10068 #define PM_BR_MPRED_CMPL 0x400f6 +/* All L1 D cache load references counted at finish, gated by reject */ +#define PM_LD_REF_L1 0x100ee +/* Load Missed L1 */ +#define PM_LD_MISS_L1 0x3e054 +/* Store Missed L1 */ +#define PM_ST_MISS_L1 0x300f0 +/* L1 cache data prefetches */ +#define PM_L1_PREF 0x0d8b8 +/* Instruction fetches from L1 */ +#define PM_INST_FROM_L1 0x04080 +/* Demand iCache Miss */ +#define PM_L1_ICACHE_MISS 0x200fd +/* Instruction Demand sectors wriittent into IL1 */ +#define PM_L1_DEMAND_WRITE 0x0408c +/* Instruction prefetch written into IL1 */ +#define PM_IC_PREF_WRITE 0x0408e +/* The data cache was reloaded from local core's L3 due to a demand load */ +#define PM_DATA_FROM_L3 0x4c042 +/* Demand LD - L3 Miss (not L2 hit and not L3 hit) */ +#define PM_DATA_FROM_L3MISS 0x300fe +/* All successful D-side store dispatches for this thread */ +#define PM_L2_ST 0x17080 +/* All successful D-side store dispatches for this thread that were L2 Miss */ +#define PM_L2_ST_MISS 0x17082 +/* Total HW L3 prefetches(Load+store) */ +#define PM_L3_PREF_ALL 0x4e052 +/* Data PTEG reload */ +#define PM_DTLB_MISS 0x300fc +/* ITLB Reloaded */ +#define PM_ITLB_MISS 0x400fc + /* * Raw event encoding for POWER8: @@ -557,6 +588,8 @@ static int power8_generic_events[] = { [PERF_COUNT_HW_INSTRUCTIONS] = PM_INST_CMPL, [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = PM_BRU_FIN, [PERF_COUNT_HW_BRANCH_MISSES] = PM_BR_MPRED_CMPL, + [PERF_COUNT_HW_CACHE_REFERENCES] = PM_LD_REF_L1, + [PERF_COUNT_HW_CACHE_MISSES] = PM_LD_MISS_L1, }; static u64 power8_bhrb_filter_map(u64 branch_sample_type) @@ -596,6 +629,116 @@ static void power8_config_bhrb(u64 pmu_bhrb_filter) mtspr(SPRN_MMCRA, (mfspr(SPRN_MMCRA) | pmu_bhrb_filter)); } +#define C(x) PERF_COUNT_HW_CACHE_##x + +/* + * Table of generalized cache-related events. + * 0 means not supported, -1 means nonsensical, other values + * are event codes. + */ +static int power8_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = { + [ C(L1D) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = PM_LD_REF_L1, + [ C(RESULT_MISS) ] = PM_LD_MISS_L1, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = 0, + [ C(RESULT_MISS) ] = PM_ST_MISS_L1, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = PM_L1_PREF, + [ C(RESULT_MISS) ] = 0, + }, + }, + [ C(L1I) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = PM_INST_FROM_L1, + [ C(RESULT_MISS) ] = PM_L1_ICACHE_MISS, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = PM_L1_DEMAND_WRITE, + [ C(RESULT_MISS) ] = -1, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = PM_IC_PREF_WRITE, + [ C(RESULT_MISS) ] = 0, + }, + }, + [ C(LL) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = PM_DATA_FROM_L3, + [ C(RESULT_MISS) ] = PM_DATA_FROM_L3MISS, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = PM_L2_ST, + [ C(RESULT_MISS) ] = PM_L2_ST_MISS, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = PM_L3_PREF_ALL, + [ C(RESULT_MISS) ] = 0, + }, + }, + [ C(DTLB) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = 0, + [ C(RESULT_MISS) ] = PM_DTLB_MISS, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + }, + [ C(ITLB) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = 0, + [ C(RESULT_MISS) ] = PM_ITLB_MISS, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + }, + [ C(BPU) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = PM_BRU_FIN, + [ C(RESULT_MISS) ] = PM_BR_MPRED_CMPL, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + }, + [ C(NODE) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + }, +}; + +#undef C + static struct power_pmu power8_pmu = { .name = "POWER8", .n_counter = 6, @@ -611,6 +754,7 @@ static struct power_pmu power8_pmu = { .flags = PPMU_HAS_SSLOT | PPMU_HAS_SIER | PPMU_BHRB | PPMU_EBB, .n_generic = ARRAY_SIZE(power8_generic_events), .generic_events = power8_generic_events, + .cache_events = &power8_cache_events, .attr_groups = power8_pmu_attr_groups, .bhrb_nr = 32, }; diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c index 7d6dcc6..3b2b4fb 100644 --- a/arch/powerpc/platforms/powernv/pci-ioda.c +++ b/arch/powerpc/platforms/powernv/pci-ioda.c @@ -21,6 +21,7 @@ #include <linux/irq.h> #include <linux/io.h> #include <linux/msi.h> +#include <linux/memblock.h> #include <asm/sections.h> #include <asm/io.h> @@ -460,9 +461,39 @@ static void pnv_pci_ioda_dma_dev_setup(struct pnv_phb *phb, struct pci_dev *pdev return; pe = &phb->ioda.pe_array[pdn->pe_number]; + WARN_ON(get_dma_ops(&pdev->dev) != &dma_iommu_ops); set_iommu_table_base_and_group(&pdev->dev, &pe->tce32_table); } +static int pnv_pci_ioda_dma_set_mask(struct pnv_phb *phb, + struct pci_dev *pdev, u64 dma_mask) +{ + struct pci_dn *pdn = pci_get_pdn(pdev); + struct pnv_ioda_pe *pe; + uint64_t top; + bool bypass = false; + + if (WARN_ON(!pdn || pdn->pe_number == IODA_INVALID_PE)) + return -ENODEV;; + + pe = &phb->ioda.pe_array[pdn->pe_number]; + if (pe->tce_bypass_enabled) { + top = pe->tce_bypass_base + memblock_end_of_DRAM() - 1; + bypass = (dma_mask >= top); + } + + if (bypass) { + dev_info(&pdev->dev, "Using 64-bit DMA iommu bypass\n"); + set_dma_ops(&pdev->dev, &dma_direct_ops); + set_dma_offset(&pdev->dev, pe->tce_bypass_base); + } else { + dev_info(&pdev->dev, "Using 32-bit DMA via iommu\n"); + set_dma_ops(&pdev->dev, &dma_iommu_ops); + set_iommu_table_base(&pdev->dev, &pe->tce32_table); + } + return 0; +} + static void pnv_ioda_setup_bus_dma(struct pnv_ioda_pe *pe, struct pci_bus *bus) { struct pci_dev *dev; @@ -657,6 +688,56 @@ static void pnv_pci_ioda_setup_dma_pe(struct pnv_phb *phb, __free_pages(tce_mem, get_order(TCE32_TABLE_SIZE * segs)); } +static void pnv_pci_ioda2_set_bypass(struct iommu_table *tbl, bool enable) +{ + struct pnv_ioda_pe *pe = container_of(tbl, struct pnv_ioda_pe, + tce32_table); + uint16_t window_id = (pe->pe_number << 1 ) + 1; + int64_t rc; + + pe_info(pe, "%sabling 64-bit DMA bypass\n", enable ? "En" : "Dis"); + if (enable) { + phys_addr_t top = memblock_end_of_DRAM(); + + top = roundup_pow_of_two(top); + rc = opal_pci_map_pe_dma_window_real(pe->phb->opal_id, + pe->pe_number, + window_id, + pe->tce_bypass_base, + top); + } else { + rc = opal_pci_map_pe_dma_window_real(pe->phb->opal_id, + pe->pe_number, + window_id, + pe->tce_bypass_base, + 0); + + /* + * We might want to reset the DMA ops of all devices on + * this PE. However in theory, that shouldn't be necessary + * as this is used for VFIO/KVM pass-through and the device + * hasn't yet been returned to its kernel driver + */ + } + if (rc) + pe_err(pe, "OPAL error %lld configuring bypass window\n", rc); + else + pe->tce_bypass_enabled = enable; +} + +static void pnv_pci_ioda2_setup_bypass_pe(struct pnv_phb *phb, + struct pnv_ioda_pe *pe) +{ + /* TVE #1 is selected by PCI address bit 59 */ + pe->tce_bypass_base = 1ull << 59; + + /* Install set_bypass callback for VFIO */ + pe->tce32_table.set_bypass = pnv_pci_ioda2_set_bypass; + + /* Enable bypass by default */ + pnv_pci_ioda2_set_bypass(&pe->tce32_table, true); +} + static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe) { @@ -727,6 +808,8 @@ static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb, else pnv_ioda_setup_bus_dma(pe, pe->pbus); + /* Also create a bypass window */ + pnv_pci_ioda2_setup_bypass_pe(phb, pe); return; fail: if (pe->tce32_seg >= 0) @@ -1286,6 +1369,7 @@ void __init pnv_pci_init_ioda_phb(struct device_node *np, /* Setup TCEs */ phb->dma_dev_setup = pnv_pci_ioda_dma_dev_setup; + phb->dma_set_mask = pnv_pci_ioda_dma_set_mask; /* Setup shutdown function for kexec */ phb->shutdown = pnv_pci_ioda_shutdown; diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c index b555ebc..95633d7 100644 --- a/arch/powerpc/platforms/powernv/pci.c +++ b/arch/powerpc/platforms/powernv/pci.c @@ -634,6 +634,16 @@ static void pnv_pci_dma_dev_setup(struct pci_dev *pdev) pnv_pci_dma_fallback_setup(hose, pdev); } +int pnv_pci_dma_set_mask(struct pci_dev *pdev, u64 dma_mask) +{ + struct pci_controller *hose = pci_bus_to_host(pdev->bus); + struct pnv_phb *phb = hose->private_data; + + if (phb && phb->dma_set_mask) + return phb->dma_set_mask(phb, pdev, dma_mask); + return __dma_set_mask(&pdev->dev, dma_mask); +} + void pnv_pci_shutdown(void) { struct pci_controller *hose; diff --git a/arch/powerpc/platforms/powernv/pci.h b/arch/powerpc/platforms/powernv/pci.h index 13f1942..cde1694 100644 --- a/arch/powerpc/platforms/powernv/pci.h +++ b/arch/powerpc/platforms/powernv/pci.h @@ -54,7 +54,9 @@ struct pnv_ioda_pe { struct iommu_table tce32_table; phys_addr_t tce_inval_reg_phys; - /* XXX TODO: Add support for additional 64-bit iommus */ + /* 64-bit TCE bypass region */ + bool tce_bypass_enabled; + uint64_t tce_bypass_base; /* MSIs. MVE index is identical for for 32 and 64 bit MSI * and -1 if not supported. (It's actually identical to the @@ -113,6 +115,8 @@ struct pnv_phb { unsigned int hwirq, unsigned int virq, unsigned int is_64, struct msi_msg *msg); void (*dma_dev_setup)(struct pnv_phb *phb, struct pci_dev *pdev); + int (*dma_set_mask)(struct pnv_phb *phb, struct pci_dev *pdev, + u64 dma_mask); void (*fixup_phb)(struct pci_controller *hose); u32 (*bdfn_to_pe)(struct pnv_phb *phb, struct pci_bus *bus, u32 devfn); void (*shutdown)(struct pnv_phb *phb); diff --git a/arch/powerpc/platforms/powernv/powernv.h b/arch/powerpc/platforms/powernv/powernv.h index de6819b..0051e10 100644 --- a/arch/powerpc/platforms/powernv/powernv.h +++ b/arch/powerpc/platforms/powernv/powernv.h @@ -7,12 +7,20 @@ extern void pnv_smp_init(void); static inline void pnv_smp_init(void) { } #endif +struct pci_dev; + #ifdef CONFIG_PCI extern void pnv_pci_init(void); extern void pnv_pci_shutdown(void); +extern int pnv_pci_dma_set_mask(struct pci_dev *pdev, u64 dma_mask); #else static inline void pnv_pci_init(void) { } static inline void pnv_pci_shutdown(void) { } + +static inline int pnv_pci_dma_set_mask(struct pci_dev *pdev, u64 dma_mask) +{ + return -ENODEV; +} #endif extern void pnv_lpc_init(void); diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c index 21166f6..110f4fb 100644 --- a/arch/powerpc/platforms/powernv/setup.c +++ b/arch/powerpc/platforms/powernv/setup.c @@ -27,6 +27,7 @@ #include <linux/interrupt.h> #include <linux/bug.h> #include <linux/cpuidle.h> +#include <linux/pci.h> #include <asm/machdep.h> #include <asm/firmware.h> @@ -141,6 +142,13 @@ static void pnv_progress(char *s, unsigned short hex) { } +static int pnv_dma_set_mask(struct device *dev, u64 dma_mask) +{ + if (dev_is_pci(dev)) + return pnv_pci_dma_set_mask(to_pci_dev(dev), dma_mask); + return __dma_set_mask(dev, dma_mask); +} + static void pnv_shutdown(void) { /* Let the PCI code clear up IODA tables */ @@ -238,6 +246,7 @@ define_machine(powernv) { .machine_shutdown = pnv_shutdown, .power_save = powernv_idle, .calibrate_decr = generic_calibrate_decr, + .dma_set_mask = pnv_dma_set_mask, #ifdef CONFIG_KEXEC .kexec_cpu_down = pnv_kexec_cpu_down, #endif diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig index 37300f6..80b1d57 100644 --- a/arch/powerpc/platforms/pseries/Kconfig +++ b/arch/powerpc/platforms/pseries/Kconfig @@ -20,6 +20,7 @@ config PPC_PSERIES select PPC_DOORBELL select HAVE_CONTEXT_TRACKING select HOTPLUG_CPU if SMP + select ARCH_RANDOM default y config PPC_SPLPAR diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c index 8e639d7..972df0ff 100644 --- a/arch/powerpc/platforms/pseries/setup.c +++ b/arch/powerpc/platforms/pseries/setup.c @@ -430,8 +430,7 @@ static void pSeries_machine_kexec(struct kimage *image) { long rc; - if (firmware_has_feature(FW_FEATURE_SET_MODE) && - (image->type != KEXEC_TYPE_CRASH)) { + if (firmware_has_feature(FW_FEATURE_SET_MODE)) { rc = pSeries_disable_reloc_on_exc(); if (rc != H_SUCCESS) pr_warning("Warning: Failed to disable relocation on " diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c index 0e166ed..8209744 100644 --- a/arch/powerpc/sysdev/mpic.c +++ b/arch/powerpc/sysdev/mpic.c @@ -886,25 +886,25 @@ int mpic_set_irq_type(struct irq_data *d, unsigned int flow_type) /* Default: read HW settings */ if (flow_type == IRQ_TYPE_DEFAULT) { - switch(vold & (MPIC_INFO(VECPRI_POLARITY_MASK) | - MPIC_INFO(VECPRI_SENSE_MASK))) { - case MPIC_INFO(VECPRI_SENSE_EDGE) | - MPIC_INFO(VECPRI_POLARITY_POSITIVE): - flow_type = IRQ_TYPE_EDGE_RISING; - break; - case MPIC_INFO(VECPRI_SENSE_EDGE) | - MPIC_INFO(VECPRI_POLARITY_NEGATIVE): - flow_type = IRQ_TYPE_EDGE_FALLING; - break; - case MPIC_INFO(VECPRI_SENSE_LEVEL) | - MPIC_INFO(VECPRI_POLARITY_POSITIVE): - flow_type = IRQ_TYPE_LEVEL_HIGH; - break; - case MPIC_INFO(VECPRI_SENSE_LEVEL) | - MPIC_INFO(VECPRI_POLARITY_NEGATIVE): - flow_type = IRQ_TYPE_LEVEL_LOW; - break; - } + int vold_ps; + + vold_ps = vold & (MPIC_INFO(VECPRI_POLARITY_MASK) | + MPIC_INFO(VECPRI_SENSE_MASK)); + + if (vold_ps == (MPIC_INFO(VECPRI_SENSE_EDGE) | + MPIC_INFO(VECPRI_POLARITY_POSITIVE))) + flow_type = IRQ_TYPE_EDGE_RISING; + else if (vold_ps == (MPIC_INFO(VECPRI_SENSE_EDGE) | + MPIC_INFO(VECPRI_POLARITY_NEGATIVE))) + flow_type = IRQ_TYPE_EDGE_FALLING; + else if (vold_ps == (MPIC_INFO(VECPRI_SENSE_LEVEL) | + MPIC_INFO(VECPRI_POLARITY_POSITIVE))) + flow_type = IRQ_TYPE_LEVEL_HIGH; + else if (vold_ps == (MPIC_INFO(VECPRI_SENSE_LEVEL) | + MPIC_INFO(VECPRI_POLARITY_NEGATIVE))) + flow_type = IRQ_TYPE_LEVEL_LOW; + else + WARN_ONCE(1, "mpic: unknown IRQ type %d\n", vold); } /* Apply to irq desc */ diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c index a90731b..b079098 100644 --- a/arch/powerpc/xmon/xmon.c +++ b/arch/powerpc/xmon/xmon.c @@ -309,16 +309,23 @@ static void get_output_lock(void) if (xmon_speaker == me) return; + for (;;) { - if (xmon_speaker == 0) { - last_speaker = cmpxchg(&xmon_speaker, 0, me); - if (last_speaker == 0) - return; - } - timeout = 10000000; + last_speaker = cmpxchg(&xmon_speaker, 0, me); + if (last_speaker == 0) + return; + + /* + * Wait a full second for the lock, we might be on a slow + * console, but check every 100us. + */ + timeout = 10000; while (xmon_speaker == last_speaker) { - if (--timeout > 0) + if (--timeout > 0) { + udelay(100); continue; + } + /* hostile takeover */ prev = cmpxchg(&xmon_speaker, last_speaker, me); if (prev == last_speaker) @@ -397,7 +404,6 @@ static int xmon_core(struct pt_regs *regs, int fromipi) } xmon_fault_jmp[cpu] = recurse_jmp; - cpumask_set_cpu(cpu, &cpus_in_xmon); bp = NULL; if ((regs->msr & (MSR_IR|MSR_PR|MSR_64BIT)) == (MSR_IR|MSR_64BIT)) @@ -419,6 +425,8 @@ static int xmon_core(struct pt_regs *regs, int fromipi) release_output_lock(); } + cpumask_set_cpu(cpu, &cpus_in_xmon); + waiting: secondary = 1; while (secondary && !xmon_gate) { diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c index 4c4a1ce..47c8630 100644 --- a/arch/s390/appldata/appldata_base.c +++ b/arch/s390/appldata/appldata_base.c @@ -529,6 +529,7 @@ static int __init appldata_init(void) { int rc; + init_virt_timer(&appldata_timer); appldata_timer.function = appldata_timer_function; appldata_timer.data = (unsigned long) &appldata_work; diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c index b3feabd..cf3c008 100644 --- a/arch/s390/crypto/aes_s390.c +++ b/arch/s390/crypto/aes_s390.c @@ -25,6 +25,7 @@ #include <linux/err.h> #include <linux/module.h> #include <linux/init.h> +#include <linux/spinlock.h> #include "crypt_s390.h" #define AES_KEYLEN_128 1 @@ -32,6 +33,7 @@ #define AES_KEYLEN_256 4 static u8 *ctrblk; +static DEFINE_SPINLOCK(ctrblk_lock); static char keylen_flag; struct s390_aes_ctx { @@ -758,43 +760,67 @@ static int ctr_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, return aes_set_key(tfm, in_key, key_len); } +static unsigned int __ctrblk_init(u8 *ctrptr, unsigned int nbytes) +{ + unsigned int i, n; + + /* only use complete blocks, max. PAGE_SIZE */ + n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : nbytes & ~(AES_BLOCK_SIZE - 1); + for (i = AES_BLOCK_SIZE; i < n; i += AES_BLOCK_SIZE) { + memcpy(ctrptr + i, ctrptr + i - AES_BLOCK_SIZE, + AES_BLOCK_SIZE); + crypto_inc(ctrptr + i, AES_BLOCK_SIZE); + } + return n; +} + static int ctr_aes_crypt(struct blkcipher_desc *desc, long func, struct s390_aes_ctx *sctx, struct blkcipher_walk *walk) { int ret = blkcipher_walk_virt_block(desc, walk, AES_BLOCK_SIZE); - unsigned int i, n, nbytes; - u8 buf[AES_BLOCK_SIZE]; - u8 *out, *in; + unsigned int n, nbytes; + u8 buf[AES_BLOCK_SIZE], ctrbuf[AES_BLOCK_SIZE]; + u8 *out, *in, *ctrptr = ctrbuf; if (!walk->nbytes) return ret; - memcpy(ctrblk, walk->iv, AES_BLOCK_SIZE); + if (spin_trylock(&ctrblk_lock)) + ctrptr = ctrblk; + + memcpy(ctrptr, walk->iv, AES_BLOCK_SIZE); while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) { out = walk->dst.virt.addr; in = walk->src.virt.addr; while (nbytes >= AES_BLOCK_SIZE) { - /* only use complete blocks, max. PAGE_SIZE */ - n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : - nbytes & ~(AES_BLOCK_SIZE - 1); - for (i = AES_BLOCK_SIZE; i < n; i += AES_BLOCK_SIZE) { - memcpy(ctrblk + i, ctrblk + i - AES_BLOCK_SIZE, - AES_BLOCK_SIZE); - crypto_inc(ctrblk + i, AES_BLOCK_SIZE); - } - ret = crypt_s390_kmctr(func, sctx->key, out, in, n, ctrblk); - if (ret < 0 || ret != n) + if (ctrptr == ctrblk) + n = __ctrblk_init(ctrptr, nbytes); + else + n = AES_BLOCK_SIZE; + ret = crypt_s390_kmctr(func, sctx->key, out, in, + n, ctrptr); + if (ret < 0 || ret != n) { + if (ctrptr == ctrblk) + spin_unlock(&ctrblk_lock); return -EIO; + } if (n > AES_BLOCK_SIZE) - memcpy(ctrblk, ctrblk + n - AES_BLOCK_SIZE, + memcpy(ctrptr, ctrptr + n - AES_BLOCK_SIZE, AES_BLOCK_SIZE); - crypto_inc(ctrblk, AES_BLOCK_SIZE); + crypto_inc(ctrptr, AES_BLOCK_SIZE); out += n; in += n; nbytes -= n; } ret = blkcipher_walk_done(desc, walk, nbytes); } + if (ctrptr == ctrblk) { + if (nbytes) + memcpy(ctrbuf, ctrptr, AES_BLOCK_SIZE); + else + memcpy(walk->iv, ctrptr, AES_BLOCK_SIZE); + spin_unlock(&ctrblk_lock); + } /* * final block may be < AES_BLOCK_SIZE, copy only nbytes */ @@ -802,14 +828,15 @@ static int ctr_aes_crypt(struct blkcipher_desc *desc, long func, out = walk->dst.virt.addr; in = walk->src.virt.addr; ret = crypt_s390_kmctr(func, sctx->key, buf, in, - AES_BLOCK_SIZE, ctrblk); + AES_BLOCK_SIZE, ctrbuf); if (ret < 0 || ret != AES_BLOCK_SIZE) return -EIO; memcpy(out, buf, nbytes); - crypto_inc(ctrblk, AES_BLOCK_SIZE); + crypto_inc(ctrbuf, AES_BLOCK_SIZE); ret = blkcipher_walk_done(desc, walk, 0); + memcpy(walk->iv, ctrbuf, AES_BLOCK_SIZE); } - memcpy(walk->iv, ctrblk, AES_BLOCK_SIZE); + return ret; } diff --git a/arch/s390/crypto/des_s390.c b/arch/s390/crypto/des_s390.c index 200f2a1..0a5aac8 100644 --- a/arch/s390/crypto/des_s390.c +++ b/arch/s390/crypto/des_s390.c @@ -25,6 +25,7 @@ #define DES3_KEY_SIZE (3 * DES_KEY_SIZE) static u8 *ctrblk; +static DEFINE_SPINLOCK(ctrblk_lock); struct s390_des_ctx { u8 iv[DES_BLOCK_SIZE]; @@ -105,29 +106,35 @@ static int ecb_desall_crypt(struct blkcipher_desc *desc, long func, } static int cbc_desall_crypt(struct blkcipher_desc *desc, long func, - u8 *iv, struct blkcipher_walk *walk) + struct blkcipher_walk *walk) { + struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); int ret = blkcipher_walk_virt(desc, walk); unsigned int nbytes = walk->nbytes; + struct { + u8 iv[DES_BLOCK_SIZE]; + u8 key[DES3_KEY_SIZE]; + } param; if (!nbytes) goto out; - memcpy(iv, walk->iv, DES_BLOCK_SIZE); + memcpy(param.iv, walk->iv, DES_BLOCK_SIZE); + memcpy(param.key, ctx->key, DES3_KEY_SIZE); do { /* only use complete blocks */ unsigned int n = nbytes & ~(DES_BLOCK_SIZE - 1); u8 *out = walk->dst.virt.addr; u8 *in = walk->src.virt.addr; - ret = crypt_s390_kmc(func, iv, out, in, n); + ret = crypt_s390_kmc(func, ¶m, out, in, n); if (ret < 0 || ret != n) return -EIO; nbytes &= DES_BLOCK_SIZE - 1; ret = blkcipher_walk_done(desc, walk, nbytes); } while ((nbytes = walk->nbytes)); - memcpy(walk->iv, iv, DES_BLOCK_SIZE); + memcpy(walk->iv, param.iv, DES_BLOCK_SIZE); out: return ret; @@ -179,22 +186,20 @@ static int cbc_des_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { - struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); struct blkcipher_walk walk; blkcipher_walk_init(&walk, dst, src, nbytes); - return cbc_desall_crypt(desc, KMC_DEA_ENCRYPT, ctx->iv, &walk); + return cbc_desall_crypt(desc, KMC_DEA_ENCRYPT, &walk); } static int cbc_des_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { - struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); struct blkcipher_walk walk; blkcipher_walk_init(&walk, dst, src, nbytes); - return cbc_desall_crypt(desc, KMC_DEA_DECRYPT, ctx->iv, &walk); + return cbc_desall_crypt(desc, KMC_DEA_DECRYPT, &walk); } static struct crypto_alg cbc_des_alg = { @@ -327,22 +332,20 @@ static int cbc_des3_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { - struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); struct blkcipher_walk walk; blkcipher_walk_init(&walk, dst, src, nbytes); - return cbc_desall_crypt(desc, KMC_TDEA_192_ENCRYPT, ctx->iv, &walk); + return cbc_desall_crypt(desc, KMC_TDEA_192_ENCRYPT, &walk); } static int cbc_des3_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { - struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); struct blkcipher_walk walk; blkcipher_walk_init(&walk, dst, src, nbytes); - return cbc_desall_crypt(desc, KMC_TDEA_192_DECRYPT, ctx->iv, &walk); + return cbc_desall_crypt(desc, KMC_TDEA_192_DECRYPT, &walk); } static struct crypto_alg cbc_des3_alg = { @@ -366,54 +369,80 @@ static struct crypto_alg cbc_des3_alg = { } }; +static unsigned int __ctrblk_init(u8 *ctrptr, unsigned int nbytes) +{ + unsigned int i, n; + + /* align to block size, max. PAGE_SIZE */ + n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : nbytes & ~(DES_BLOCK_SIZE - 1); + for (i = DES_BLOCK_SIZE; i < n; i += DES_BLOCK_SIZE) { + memcpy(ctrptr + i, ctrptr + i - DES_BLOCK_SIZE, DES_BLOCK_SIZE); + crypto_inc(ctrptr + i, DES_BLOCK_SIZE); + } + return n; +} + static int ctr_desall_crypt(struct blkcipher_desc *desc, long func, - struct s390_des_ctx *ctx, struct blkcipher_walk *walk) + struct s390_des_ctx *ctx, + struct blkcipher_walk *walk) { int ret = blkcipher_walk_virt_block(desc, walk, DES_BLOCK_SIZE); - unsigned int i, n, nbytes; - u8 buf[DES_BLOCK_SIZE]; - u8 *out, *in; + unsigned int n, nbytes; + u8 buf[DES_BLOCK_SIZE], ctrbuf[DES_BLOCK_SIZE]; + u8 *out, *in, *ctrptr = ctrbuf; + + if (!walk->nbytes) + return ret; - memcpy(ctrblk, walk->iv, DES_BLOCK_SIZE); + if (spin_trylock(&ctrblk_lock)) + ctrptr = ctrblk; + + memcpy(ctrptr, walk->iv, DES_BLOCK_SIZE); while ((nbytes = walk->nbytes) >= DES_BLOCK_SIZE) { out = walk->dst.virt.addr; in = walk->src.virt.addr; while (nbytes >= DES_BLOCK_SIZE) { - /* align to block size, max. PAGE_SIZE */ - n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : - nbytes & ~(DES_BLOCK_SIZE - 1); - for (i = DES_BLOCK_SIZE; i < n; i += DES_BLOCK_SIZE) { - memcpy(ctrblk + i, ctrblk + i - DES_BLOCK_SIZE, - DES_BLOCK_SIZE); - crypto_inc(ctrblk + i, DES_BLOCK_SIZE); - } - ret = crypt_s390_kmctr(func, ctx->key, out, in, n, ctrblk); - if (ret < 0 || ret != n) + if (ctrptr == ctrblk) + n = __ctrblk_init(ctrptr, nbytes); + else + n = DES_BLOCK_SIZE; + ret = crypt_s390_kmctr(func, ctx->key, out, in, + n, ctrptr); + if (ret < 0 || ret != n) { + if (ctrptr == ctrblk) + spin_unlock(&ctrblk_lock); return -EIO; + } if (n > DES_BLOCK_SIZE) - memcpy(ctrblk, ctrblk + n - DES_BLOCK_SIZE, + memcpy(ctrptr, ctrptr + n - DES_BLOCK_SIZE, DES_BLOCK_SIZE); - crypto_inc(ctrblk, DES_BLOCK_SIZE); + crypto_inc(ctrptr, DES_BLOCK_SIZE); out += n; in += n; nbytes -= n; } ret = blkcipher_walk_done(desc, walk, nbytes); } - + if (ctrptr == ctrblk) { + if (nbytes) + memcpy(ctrbuf, ctrptr, DES_BLOCK_SIZE); + else + memcpy(walk->iv, ctrptr, DES_BLOCK_SIZE); + spin_unlock(&ctrblk_lock); + } /* final block may be < DES_BLOCK_SIZE, copy only nbytes */ if (nbytes) { out = walk->dst.virt.addr; in = walk->src.virt.addr; ret = crypt_s390_kmctr(func, ctx->key, buf, in, - DES_BLOCK_SIZE, ctrblk); + DES_BLOCK_SIZE, ctrbuf); if (ret < 0 || ret != DES_BLOCK_SIZE) return -EIO; memcpy(out, buf, nbytes); - crypto_inc(ctrblk, DES_BLOCK_SIZE); + crypto_inc(ctrbuf, DES_BLOCK_SIZE); ret = blkcipher_walk_done(desc, walk, 0); + memcpy(walk->iv, ctrbuf, DES_BLOCK_SIZE); } - memcpy(walk->iv, ctrblk, DES_BLOCK_SIZE); return ret; } diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S index b9e25ae..d7c0050 100644 --- a/arch/s390/kernel/head64.S +++ b/arch/s390/kernel/head64.S @@ -59,7 +59,7 @@ ENTRY(startup_continue) .quad 0 # cr12: tracing off .quad 0 # cr13: home space segment table .quad 0xc0000000 # cr14: machine check handling off - .quad 0 # cr15: linkage stack operations + .quad .Llinkage_stack # cr15: linkage stack operations .Lpcmsk:.quad 0x0000000180000000 .L4malign:.quad 0xffffffffffc00000 .Lscan2g:.quad 0x80000000 + 0x20000 - 8 # 2GB + 128K - 8 @@ -67,12 +67,15 @@ ENTRY(startup_continue) .Lparmaddr: .quad PARMAREA .align 64 -.Lduct: .long 0,0,0,0,.Lduald,0,0,0 +.Lduct: .long 0,.Laste,.Laste,0,.Lduald,0,0,0 .long 0,0,0,0,0,0,0,0 +.Laste: .quad 0,0xffffffffffffffff,0,0,0,0,0,0 .align 128 .Lduald:.rept 8 .long 0x80000000,0,0,0 # invalid access-list entries .endr +.Llinkage_stack: + .long 0,0,0x89000000,0,0,0,0x8a000000,0 ENTRY(_ehead) diff --git a/arch/s390/mm/page-states.c b/arch/s390/mm/page-states.c index a90d45e..27c50f4 100644 --- a/arch/s390/mm/page-states.c +++ b/arch/s390/mm/page-states.c @@ -12,6 +12,8 @@ #include <linux/mm.h> #include <linux/gfp.h> #include <linux/init.h> +#include <asm/setup.h> +#include <asm/ipl.h> #define ESSA_SET_STABLE 1 #define ESSA_SET_UNUSED 2 @@ -41,6 +43,14 @@ void __init cmma_init(void) if (!cmma_flag) return; + /* + * Disable CMM for dump, otherwise the tprot based memory + * detection can fail because of unstable pages. + */ + if (OLDMEM_BASE || ipl_info.type == IPL_TYPE_FCP_DUMP) { + cmma_flag = 0; + return; + } asm volatile( " .insn rrf,0xb9ab0000,%1,%1,0,0\n" "0: la %0,0\n" diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 940e50e..0af5250 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -444,6 +444,7 @@ config X86_INTEL_MID bool "Intel MID platform support" depends on X86_32 depends on X86_EXTENDED_PLATFORM + depends on X86_PLATFORM_DEVICES depends on PCI depends on PCI_GOANY depends on X86_IO_APIC @@ -1051,9 +1052,9 @@ config MICROCODE_INTEL This options enables microcode patch loading support for Intel processors. - For latest news and information on obtaining all the required - Intel ingredients for this driver, check: - <http://www.urbanmyth.org/microcode/>. + For the current Intel microcode data package go to + <https://downloadcenter.intel.com> and search for + 'Linux Processor Microcode Data File'. config MICROCODE_AMD bool "AMD microcode loading support" diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug index 0f3621e..321a52c 100644 --- a/arch/x86/Kconfig.debug +++ b/arch/x86/Kconfig.debug @@ -184,6 +184,7 @@ config HAVE_MMIOTRACE_SUPPORT config X86_DECODER_SELFTEST bool "x86 instruction decoder selftest" depends on DEBUG_KERNEL && KPROBES + depends on !COMPILE_TEST ---help--- Perform x86 instruction decoder selftests at build time. This option is useful for checking the sanity of x86 instruction diff --git a/arch/x86/include/asm/amd_nb.h b/arch/x86/include/asm/amd_nb.h index a54ee1d..aaac3b2 100644 --- a/arch/x86/include/asm/amd_nb.h +++ b/arch/x86/include/asm/amd_nb.h @@ -19,7 +19,7 @@ extern int amd_cache_northbridges(void); extern void amd_flush_garts(void); extern int amd_numa_init(void); extern int amd_get_subcaches(int); -extern int amd_set_subcaches(int, int); +extern int amd_set_subcaches(int, unsigned long); struct amd_l3_cache { unsigned indices; diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h index 3b978c4..3d6b9f8 100644 --- a/arch/x86/include/asm/efi.h +++ b/arch/x86/include/asm/efi.h @@ -132,6 +132,8 @@ extern void __init efi_map_region_fixed(efi_memory_desc_t *md); extern void efi_sync_low_kernel_mappings(void); extern void efi_setup_page_tables(void); extern void __init old_map_region(efi_memory_desc_t *md); +extern void __init runtime_code_page_mkexec(void); +extern void __init efi_runtime_mkexec(void); struct efi_setup_data { u64 fw_vendor; diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index bbc8b12..5ad38ad 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h @@ -445,10 +445,20 @@ static inline int pte_same(pte_t a, pte_t b) return a.pte == b.pte; } +static inline int pteval_present(pteval_t pteval) +{ + /* + * Yes Linus, _PAGE_PROTNONE == _PAGE_NUMA. Expressing it this + * way clearly states that the intent is that protnone and numa + * hinting ptes are considered present for the purposes of + * pagetable operations like zapping, protection changes, gup etc. + */ + return pteval & (_PAGE_PRESENT | _PAGE_PROTNONE | _PAGE_NUMA); +} + static inline int pte_present(pte_t a) { - return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE | - _PAGE_NUMA); + return pteval_present(pte_flags(a)); } #define pte_accessible pte_accessible diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h index e6d90ba..04905bfc 100644 --- a/arch/x86/include/asm/tlbflush.h +++ b/arch/x86/include/asm/tlbflush.h @@ -62,7 +62,7 @@ static inline void __flush_tlb_all(void) static inline void __flush_tlb_one(unsigned long addr) { - count_vm_event(NR_TLB_LOCAL_FLUSH_ONE); + count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE); __flush_tlb_single(addr); } @@ -93,13 +93,13 @@ static inline void __flush_tlb_one(unsigned long addr) */ static inline void __flush_tlb_up(void) { - count_vm_event(NR_TLB_LOCAL_FLUSH_ALL); + count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL); __flush_tlb(); } static inline void flush_tlb_all(void) { - count_vm_event(NR_TLB_LOCAL_FLUSH_ALL); + count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL); __flush_tlb_all(); } diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h index 787e1bb..3e276eb 100644 --- a/arch/x86/include/asm/xen/page.h +++ b/arch/x86/include/asm/xen/page.h @@ -52,8 +52,7 @@ extern unsigned long set_phys_range_identity(unsigned long pfn_s, extern int m2p_add_override(unsigned long mfn, struct page *page, struct gnttab_map_grant_ref *kmap_op); extern int m2p_remove_override(struct page *page, - struct gnttab_map_grant_ref *kmap_op, - unsigned long mfn); + struct gnttab_map_grant_ref *kmap_op); extern struct page *m2p_find_override(unsigned long mfn); extern unsigned long m2p_find_override_pfn(unsigned long mfn, unsigned long pfn); @@ -122,7 +121,7 @@ static inline unsigned long mfn_to_pfn(unsigned long mfn) pfn = m2p_find_override_pfn(mfn, ~0); } - /* + /* * pfn is ~0 if there are no entries in the m2p for mfn or if the * entry doesn't map back to the mfn and m2p_override doesn't have a * valid entry for it. diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c index 59554dc..dec8de4 100644 --- a/arch/x86/kernel/amd_nb.c +++ b/arch/x86/kernel/amd_nb.c @@ -179,7 +179,7 @@ int amd_get_subcaches(int cpu) return (mask >> (4 * cuid)) & 0xf; } -int amd_set_subcaches(int cpu, int mask) +int amd_set_subcaches(int cpu, unsigned long mask) { static unsigned int reset, ban; struct amd_northbridge *nb = node_to_amd_nb(amd_get_nb_id(cpu)); diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index d3153e2..c67ffa6 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -767,10 +767,7 @@ static unsigned int amd_size_cache(struct cpuinfo_x86 *c, unsigned int size) static void cpu_set_tlb_flushall_shift(struct cpuinfo_x86 *c) { - tlb_flushall_shift = 5; - - if (c->x86 <= 0x11) - tlb_flushall_shift = 4; + tlb_flushall_shift = 6; } static void cpu_detect_tlb_amd(struct cpuinfo_x86 *c) diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 24b6fd1..8e28bf2 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -284,8 +284,13 @@ static __always_inline void setup_smap(struct cpuinfo_x86 *c) raw_local_save_flags(eflags); BUG_ON(eflags & X86_EFLAGS_AC); - if (cpu_has(c, X86_FEATURE_SMAP)) + if (cpu_has(c, X86_FEATURE_SMAP)) { +#ifdef CONFIG_X86_SMAP set_in_cr4(X86_CR4_SMAP); +#else + clear_in_cr4(X86_CR4_SMAP); +#endif + } } /* diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index 3db61c6..5cd9bfa 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c @@ -640,21 +640,17 @@ static void intel_tlb_flushall_shift_set(struct cpuinfo_x86 *c) case 0x61d: /* six-core 45 nm xeon "Dunnington" */ tlb_flushall_shift = -1; break; + case 0x63a: /* Ivybridge */ + tlb_flushall_shift = 2; + break; case 0x61a: /* 45 nm nehalem, "Bloomfield" */ case 0x61e: /* 45 nm nehalem, "Lynnfield" */ case 0x625: /* 32 nm nehalem, "Clarkdale" */ case 0x62c: /* 32 nm nehalem, "Gulftown" */ case 0x62e: /* 45 nm nehalem-ex, "Beckton" */ case 0x62f: /* 32 nm Xeon E7 */ - tlb_flushall_shift = 6; - break; case 0x62a: /* SandyBridge */ case 0x62d: /* SandyBridge, "Romely-EP" */ - tlb_flushall_shift = 5; - break; - case 0x63a: /* Ivybridge */ - tlb_flushall_shift = 1; - break; default: tlb_flushall_shift = 6; } diff --git a/arch/x86/kernel/cpu/microcode/amd_early.c b/arch/x86/kernel/cpu/microcode/amd_early.c index 8384c0f..617a9e2 100644 --- a/arch/x86/kernel/cpu/microcode/amd_early.c +++ b/arch/x86/kernel/cpu/microcode/amd_early.c @@ -285,6 +285,15 @@ static void __init collect_cpu_sig_on_bsp(void *arg) uci->cpu_sig.sig = cpuid_eax(0x00000001); } + +static void __init get_bsp_sig(void) +{ + unsigned int bsp = boot_cpu_data.cpu_index; + struct ucode_cpu_info *uci = ucode_cpu_info + bsp; + + if (!uci->cpu_sig.sig) + smp_call_function_single(bsp, collect_cpu_sig_on_bsp, NULL, 1); +} #else void load_ucode_amd_ap(void) { @@ -337,31 +346,37 @@ void load_ucode_amd_ap(void) int __init save_microcode_in_initrd_amd(void) { + unsigned long cont; enum ucode_state ret; u32 eax; -#ifdef CONFIG_X86_32 - unsigned int bsp = boot_cpu_data.cpu_index; - struct ucode_cpu_info *uci = ucode_cpu_info + bsp; - - if (!uci->cpu_sig.sig) - smp_call_function_single(bsp, collect_cpu_sig_on_bsp, NULL, 1); + if (!container) + return -EINVAL; +#ifdef CONFIG_X86_32 + get_bsp_sig(); + cont = (unsigned long)container; +#else /* - * Take into account the fact that the ramdisk might get relocated - * and therefore we need to recompute the container's position in - * virtual memory space. + * We need the physical address of the container for both bitness since + * boot_params.hdr.ramdisk_image is a physical address. */ - container = (u8 *)(__va((u32)relocated_ramdisk) + - ((u32)container - boot_params.hdr.ramdisk_image)); + cont = __pa(container); #endif + + /* + * Take into account the fact that the ramdisk might get relocated and + * therefore we need to recompute the container's position in virtual + * memory space. + */ + if (relocated_ramdisk) + container = (u8 *)(__va(relocated_ramdisk) + + (cont - boot_params.hdr.ramdisk_image)); + if (ucode_new_rev) pr_info("microcode: updated early to new patch_level=0x%08x\n", ucode_new_rev); - if (!container) - return -EINVAL; - eax = cpuid_eax(0x00000001); eax = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff); diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c index ce2d0a2..0e25a1b 100644 --- a/arch/x86/kernel/cpu/mtrr/generic.c +++ b/arch/x86/kernel/cpu/mtrr/generic.c @@ -683,7 +683,7 @@ static void prepare_set(void) __acquires(set_atomicity_lock) } /* Flush all TLBs via a mov %cr3, %reg; mov %reg, %cr3 */ - count_vm_event(NR_TLB_LOCAL_FLUSH_ALL); + count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL); __flush_tlb(); /* Save MTRR state */ @@ -697,7 +697,7 @@ static void prepare_set(void) __acquires(set_atomicity_lock) static void post_set(void) __releases(set_atomicity_lock) { /* Flush TLBs (no need to flush caches - they are disabled) */ - count_vm_event(NR_TLB_LOCAL_FLUSH_ALL); + count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL); __flush_tlb(); /* Intel (P6) standard MTRRs */ diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c index d4bdd25..e625319 100644 --- a/arch/x86/kernel/ftrace.c +++ b/arch/x86/kernel/ftrace.c @@ -77,8 +77,7 @@ within(unsigned long addr, unsigned long start, unsigned long end) return addr >= start && addr < end; } -static int -do_ftrace_mod_code(unsigned long ip, const void *new_code) +static unsigned long text_ip_addr(unsigned long ip) { /* * On x86_64, kernel text mappings are mapped read-only with @@ -91,7 +90,7 @@ do_ftrace_mod_code(unsigned long ip, const void *new_code) if (within(ip, (unsigned long)_text, (unsigned long)_etext)) ip = (unsigned long)__va(__pa_symbol(ip)); - return probe_kernel_write((void *)ip, new_code, MCOUNT_INSN_SIZE); + return ip; } static const unsigned char *ftrace_nop_replace(void) @@ -123,8 +122,10 @@ ftrace_modify_code_direct(unsigned long ip, unsigned const char *old_code, if (memcmp(replaced, old_code, MCOUNT_INSN_SIZE) != 0) return -EINVAL; + ip = text_ip_addr(ip); + /* replace the text with the new text */ - if (do_ftrace_mod_code(ip, new_code)) + if (probe_kernel_write((void *)ip, new_code, MCOUNT_INSN_SIZE)) return -EPERM; sync_core(); @@ -221,37 +222,51 @@ int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, return -EINVAL; } -int ftrace_update_ftrace_func(ftrace_func_t func) +static unsigned long ftrace_update_func; + +static int update_ftrace_func(unsigned long ip, void *new) { - unsigned long ip = (unsigned long)(&ftrace_call); - unsigned char old[MCOUNT_INSN_SIZE], *new; + unsigned char old[MCOUNT_INSN_SIZE]; int ret; - memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE); - new = ftrace_call_replace(ip, (unsigned long)func); + memcpy(old, (void *)ip, MCOUNT_INSN_SIZE); + + ftrace_update_func = ip; + /* Make sure the breakpoints see the ftrace_update_func update */ + smp_wmb(); /* See comment above by declaration of modifying_ftrace_code */ atomic_inc(&modifying_ftrace_code); ret = ftrace_modify_code(ip, old, new); + atomic_dec(&modifying_ftrace_code); + + return ret; +} + +int ftrace_update_ftrace_func(ftrace_func_t func) +{ + unsigned long ip = (unsigned long)(&ftrace_call); + unsigned char *new; + int ret; + + new = ftrace_call_replace(ip, (unsigned long)func); + ret = update_ftrace_func(ip, new); + /* Also update the regs callback function */ if (!ret) { ip = (unsigned long)(&ftrace_regs_call); - memcpy(old, &ftrace_regs_call, MCOUNT_INSN_SIZE); new = ftrace_call_replace(ip, (unsigned long)func); - ret = ftrace_modify_code(ip, old, new); + ret = update_ftrace_func(ip, new); } - atomic_dec(&modifying_ftrace_code); - return ret; } static int is_ftrace_caller(unsigned long ip) { - if (ip == (unsigned long)(&ftrace_call) || - ip == (unsigned long)(&ftrace_regs_call)) + if (ip == ftrace_update_func) return 1; return 0; @@ -677,45 +692,41 @@ int __init ftrace_dyn_arch_init(void *data) #ifdef CONFIG_DYNAMIC_FTRACE extern void ftrace_graph_call(void); -static int ftrace_mod_jmp(unsigned long ip, - int old_offset, int new_offset) +static unsigned char *ftrace_jmp_replace(unsigned long ip, unsigned long addr) { - unsigned char code[MCOUNT_INSN_SIZE]; + static union ftrace_code_union calc; - if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE)) - return -EFAULT; + /* Jmp not a call (ignore the .e8) */ + calc.e8 = 0xe9; + calc.offset = ftrace_calc_offset(ip + MCOUNT_INSN_SIZE, addr); - if (code[0] != 0xe9 || old_offset != *(int *)(&code[1])) - return -EINVAL; + /* + * ftrace external locks synchronize the access to the static variable. + */ + return calc.code; +} - *(int *)(&code[1]) = new_offset; +static int ftrace_mod_jmp(unsigned long ip, void *func) +{ + unsigned char *new; - if (do_ftrace_mod_code(ip, &code)) - return -EPERM; + new = ftrace_jmp_replace(ip, (unsigned long)func); - return 0; + return update_ftrace_func(ip, new); } int ftrace_enable_ftrace_graph_caller(void) { unsigned long ip = (unsigned long)(&ftrace_graph_call); - int old_offset, new_offset; - old_offset = (unsigned long)(&ftrace_stub) - (ip + MCOUNT_INSN_SIZE); - new_offset = (unsigned long)(&ftrace_graph_caller) - (ip + MCOUNT_INSN_SIZE); - - return ftrace_mod_jmp(ip, old_offset, new_offset); + return ftrace_mod_jmp(ip, &ftrace_graph_caller); } int ftrace_disable_ftrace_graph_caller(void) { unsigned long ip = (unsigned long)(&ftrace_graph_call); - int old_offset, new_offset; - - old_offset = (unsigned long)(&ftrace_graph_caller) - (ip + MCOUNT_INSN_SIZE); - new_offset = (unsigned long)(&ftrace_stub) - (ip + MCOUNT_INSN_SIZE); - return ftrace_mod_jmp(ip, old_offset, new_offset); + return ftrace_mod_jmp(ip, &ftrace_stub); } #endif /* !CONFIG_DYNAMIC_FTRACE */ diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c index dbb6087..d99f31d 100644 --- a/arch/x86/kernel/irq.c +++ b/arch/x86/kernel/irq.c @@ -266,6 +266,14 @@ __visible void smp_trace_x86_platform_ipi(struct pt_regs *regs) EXPORT_SYMBOL_GPL(vector_used_by_percpu_irq); #ifdef CONFIG_HOTPLUG_CPU + +/* These two declarations are only used in check_irq_vectors_for_cpu_disable() + * below, which is protected by stop_machine(). Putting them on the stack + * results in a stack frame overflow. Dynamically allocating could result in a + * failure so declare these two cpumasks as global. + */ +static struct cpumask affinity_new, online_new; + /* * This cpu is going to be removed and its vectors migrated to the remaining * online cpus. Check to see if there are enough vectors in the remaining cpus. @@ -277,7 +285,6 @@ int check_irq_vectors_for_cpu_disable(void) unsigned int this_cpu, vector, this_count, count; struct irq_desc *desc; struct irq_data *data; - struct cpumask affinity_new, online_new; this_cpu = smp_processor_id(); cpumask_copy(&online_new, cpu_online_mask); diff --git a/arch/x86/kernel/quirks.c b/arch/x86/kernel/quirks.c index 04ee1e2..7c6acd4 100644 --- a/arch/x86/kernel/quirks.c +++ b/arch/x86/kernel/quirks.c @@ -571,3 +571,40 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F5, quirk_amd_nb_node); #endif + +#ifdef CONFIG_PCI +/* + * Processor does not ensure DRAM scrub read/write sequence + * is atomic wrt accesses to CC6 save state area. Therefore + * if a concurrent scrub read/write access is to same address + * the entry may appear as if it is not written. This quirk + * applies to Fam16h models 00h-0Fh + * + * See "Revision Guide" for AMD F16h models 00h-0fh, + * document 51810 rev. 3.04, Nov 2013 + */ +static void amd_disable_seq_and_redirect_scrub(struct pci_dev *dev) +{ + u32 val; + + /* + * Suggested workaround: + * set D18F3x58[4:0] = 00h and set D18F3x5C[0] = 0b + */ + pci_read_config_dword(dev, 0x58, &val); + if (val & 0x1F) { + val &= ~(0x1F); + pci_write_config_dword(dev, 0x58, val); + } + + pci_read_config_dword(dev, 0x5C, &val); + if (val & BIT(0)) { + val &= ~BIT(0); + pci_write_config_dword(dev, 0x5c, val); + } +} + +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3, + amd_disable_seq_and_redirect_scrub); + +#endif diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index 19e5adb..acb3b60 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c @@ -209,7 +209,7 @@ static inline unsigned long long cycles_2_ns(unsigned long long cyc) * dance when its actually needed. */ - preempt_disable(); + preempt_disable_notrace(); data = this_cpu_read(cyc2ns.head); tail = this_cpu_read(cyc2ns.tail); @@ -229,7 +229,7 @@ static inline unsigned long long cycles_2_ns(unsigned long long cyc) if (!--data->__count) this_cpu_write(cyc2ns.tail, data); } - preempt_enable(); + preempt_enable_notrace(); return ns; } diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 9d591c8..6dea040 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -1001,6 +1001,12 @@ static int fault_in_kernel_space(unsigned long address) static inline bool smap_violation(int error_code, struct pt_regs *regs) { + if (!IS_ENABLED(CONFIG_X86_SMAP)) + return false; + + if (!static_cpu_has(X86_FEATURE_SMAP)) + return false; + if (error_code & PF_USER) return false; @@ -1087,11 +1093,9 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code) if (unlikely(error_code & PF_RSVD)) pgtable_bad(regs, error_code, address); - if (static_cpu_has(X86_FEATURE_SMAP)) { - if (unlikely(smap_violation(error_code, regs))) { - bad_area_nosemaphore(regs, error_code, address); - return; - } + if (unlikely(smap_violation(error_code, regs))) { + bad_area_nosemaphore(regs, error_code, address); + return; } /* diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c index 81b2750..27aa0455 100644 --- a/arch/x86/mm/numa.c +++ b/arch/x86/mm/numa.c @@ -493,14 +493,6 @@ static int __init numa_register_memblks(struct numa_meminfo *mi) struct numa_memblk *mb = &mi->blk[i]; memblock_set_node(mb->start, mb->end - mb->start, &memblock.memory, mb->nid); - - /* - * At this time, all memory regions reserved by memblock are - * used by the kernel. Set the nid in memblock.reserved will - * mark out all the nodes the kernel resides in. - */ - memblock_set_node(mb->start, mb->end - mb->start, - &memblock.reserved, mb->nid); } /* @@ -565,10 +557,21 @@ static void __init numa_init_array(void) static void __init numa_clear_kernel_node_hotplug(void) { int i, nid; - nodemask_t numa_kernel_nodes; + nodemask_t numa_kernel_nodes = NODE_MASK_NONE; unsigned long start, end; struct memblock_type *type = &memblock.reserved; + /* + * At this time, all memory regions reserved by memblock are + * used by the kernel. Set the nid in memblock.reserved will + * mark out all the nodes the kernel resides in. + */ + for (i = 0; i < numa_meminfo.nr_blks; i++) { + struct numa_memblk *mb = &numa_meminfo.blk[i]; + memblock_set_node(mb->start, mb->end - mb->start, + &memblock.reserved, mb->nid); + } + /* Mark all kernel nodes. */ for (i = 0; i < type->cnt; i++) node_set(type->regions[i].nid, numa_kernel_nodes); diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c index 0342d27..47b6436 100644 --- a/arch/x86/mm/numa_32.c +++ b/arch/x86/mm/numa_32.c @@ -52,6 +52,8 @@ void memory_present(int nid, unsigned long start, unsigned long end) nid, start, end); printk(KERN_DEBUG " Setting physnode_map array to node %d for pfns:\n", nid); printk(KERN_DEBUG " "); + start = round_down(start, PAGES_PER_SECTION); + end = round_up(end, PAGES_PER_SECTION); for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) { physnode_map[pfn / PAGES_PER_SECTION] = nid; printk(KERN_CONT "%lx ", pfn); diff --git a/arch/x86/mm/srat.c b/arch/x86/mm/srat.c index 1a25187..1953e9c 100644 --- a/arch/x86/mm/srat.c +++ b/arch/x86/mm/srat.c @@ -42,15 +42,25 @@ static __init inline int srat_disabled(void) return acpi_numa < 0; } -/* Callback for SLIT parsing */ +/* + * Callback for SLIT parsing. pxm_to_node() returns NUMA_NO_NODE for + * I/O localities since SRAT does not list them. I/O localities are + * not supported at this point. + */ void __init acpi_numa_slit_init(struct acpi_table_slit *slit) { int i, j; - for (i = 0; i < slit->locality_count; i++) - for (j = 0; j < slit->locality_count; j++) + for (i = 0; i < slit->locality_count; i++) { + if (pxm_to_node(i) == NUMA_NO_NODE) + continue; + for (j = 0; j < slit->locality_count; j++) { + if (pxm_to_node(j) == NUMA_NO_NODE) + continue; numa_set_distance(pxm_to_node(i), pxm_to_node(j), slit->entry[slit->locality_count * i + j]); + } + } } /* Callback for Proximity Domain -> x2APIC mapping */ diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c index ae699b3..dd8dda1 100644 --- a/arch/x86/mm/tlb.c +++ b/arch/x86/mm/tlb.c @@ -103,7 +103,7 @@ static void flush_tlb_func(void *info) if (f->flush_mm != this_cpu_read(cpu_tlbstate.active_mm)) return; - count_vm_event(NR_TLB_REMOTE_FLUSH_RECEIVED); + count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED); if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) { if (f->flush_end == TLB_FLUSH_ALL) local_flush_tlb(); @@ -131,7 +131,7 @@ void native_flush_tlb_others(const struct cpumask *cpumask, info.flush_start = start; info.flush_end = end; - count_vm_event(NR_TLB_REMOTE_FLUSH); + count_vm_tlb_event(NR_TLB_REMOTE_FLUSH); if (is_uv_system()) { unsigned int cpu; @@ -151,44 +151,19 @@ void flush_tlb_current_task(void) preempt_disable(); - count_vm_event(NR_TLB_LOCAL_FLUSH_ALL); + count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL); local_flush_tlb(); if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids) flush_tlb_others(mm_cpumask(mm), mm, 0UL, TLB_FLUSH_ALL); preempt_enable(); } -/* - * It can find out the THP large page, or - * HUGETLB page in tlb_flush when THP disabled - */ -static inline unsigned long has_large_page(struct mm_struct *mm, - unsigned long start, unsigned long end) -{ - pgd_t *pgd; - pud_t *pud; - pmd_t *pmd; - unsigned long addr = ALIGN(start, HPAGE_SIZE); - for (; addr < end; addr += HPAGE_SIZE) { - pgd = pgd_offset(mm, addr); - if (likely(!pgd_none(*pgd))) { - pud = pud_offset(pgd, addr); - if (likely(!pud_none(*pud))) { - pmd = pmd_offset(pud, addr); - if (likely(!pmd_none(*pmd))) - if (pmd_large(*pmd)) - return addr; - } - } - } - return 0; -} - void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long vmflag) { unsigned long addr; unsigned act_entries, tlb_entries = 0; + unsigned long nr_base_pages; preempt_disable(); if (current->active_mm != mm) @@ -210,21 +185,20 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, tlb_entries = tlb_lli_4k[ENTRIES]; else tlb_entries = tlb_lld_4k[ENTRIES]; + /* Assume all of TLB entries was occupied by this task */ - act_entries = mm->total_vm > tlb_entries ? tlb_entries : mm->total_vm; + act_entries = tlb_entries >> tlb_flushall_shift; + act_entries = mm->total_vm > act_entries ? act_entries : mm->total_vm; + nr_base_pages = (end - start) >> PAGE_SHIFT; /* tlb_flushall_shift is on balance point, details in commit log */ - if ((end - start) >> PAGE_SHIFT > act_entries >> tlb_flushall_shift) { - count_vm_event(NR_TLB_LOCAL_FLUSH_ALL); + if (nr_base_pages > act_entries) { + count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL); local_flush_tlb(); } else { - if (has_large_page(mm, start, end)) { - local_flush_tlb(); - goto flush_all; - } /* flush range by one by one 'invlpg' */ for (addr = start; addr < end; addr += PAGE_SIZE) { - count_vm_event(NR_TLB_LOCAL_FLUSH_ONE); + count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE); __flush_tlb_single(addr); } @@ -262,7 +236,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long start) static void do_flush_tlb_all(void *info) { - count_vm_event(NR_TLB_REMOTE_FLUSH_RECEIVED); + count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED); __flush_tlb_all(); if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_LAZY) leave_mm(smp_processor_id()); @@ -270,7 +244,7 @@ static void do_flush_tlb_all(void *info) void flush_tlb_all(void) { - count_vm_event(NR_TLB_REMOTE_FLUSH); + count_vm_tlb_event(NR_TLB_REMOTE_FLUSH); on_each_cpu(do_flush_tlb_all, NULL, 1); } diff --git a/arch/x86/platform/efi/efi-bgrt.c b/arch/x86/platform/efi/efi-bgrt.c index 7145ec6..f15103d 100644 --- a/arch/x86/platform/efi/efi-bgrt.c +++ b/arch/x86/platform/efi/efi-bgrt.c @@ -42,14 +42,15 @@ void __init efi_bgrt_init(void) if (bgrt_tab->header.length < sizeof(*bgrt_tab)) return; - if (bgrt_tab->version != 1) + if (bgrt_tab->version != 1 || bgrt_tab->status != 1) return; if (bgrt_tab->image_type != 0 || !bgrt_tab->image_address) return; image = efi_lookup_mapped_addr(bgrt_tab->image_address); if (!image) { - image = ioremap(bgrt_tab->image_address, sizeof(bmp_header)); + image = early_memremap(bgrt_tab->image_address, + sizeof(bmp_header)); ioremapped = true; if (!image) return; @@ -57,7 +58,7 @@ void __init efi_bgrt_init(void) memcpy_fromio(&bmp_header, image, sizeof(bmp_header)); if (ioremapped) - iounmap(image); + early_iounmap(image, sizeof(bmp_header)); bgrt_image_size = bmp_header.size; bgrt_image = kmalloc(bgrt_image_size, GFP_KERNEL); @@ -65,7 +66,8 @@ void __init efi_bgrt_init(void) return; if (ioremapped) { - image = ioremap(bgrt_tab->image_address, bmp_header.size); + image = early_memremap(bgrt_tab->image_address, + bmp_header.size); if (!image) { kfree(bgrt_image); bgrt_image = NULL; @@ -75,5 +77,5 @@ void __init efi_bgrt_init(void) memcpy_fromio(bgrt_image, image, bgrt_image_size); if (ioremapped) - iounmap(image); + early_iounmap(image, bmp_header.size); } diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c index d62ec87..1a201ac 100644 --- a/arch/x86/platform/efi/efi.c +++ b/arch/x86/platform/efi/efi.c @@ -792,7 +792,7 @@ void __init efi_set_executable(efi_memory_desc_t *md, bool executable) set_memory_nx(addr, npages); } -static void __init runtime_code_page_mkexec(void) +void __init runtime_code_page_mkexec(void) { efi_memory_desc_t *md; void *p; @@ -1069,8 +1069,7 @@ void __init efi_enter_virtual_mode(void) efi.update_capsule = virt_efi_update_capsule; efi.query_capsule_caps = virt_efi_query_capsule_caps; - if (efi_enabled(EFI_OLD_MEMMAP) && (__supported_pte_mask & _PAGE_NX)) - runtime_code_page_mkexec(); + efi_runtime_mkexec(); kfree(new_memmap); diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c index 249b183..0b74cdf 100644 --- a/arch/x86/platform/efi/efi_32.c +++ b/arch/x86/platform/efi/efi_32.c @@ -77,3 +77,9 @@ void efi_call_phys_epilog(void) local_irq_restore(efi_rt_eflags); } + +void __init efi_runtime_mkexec(void) +{ + if (__supported_pte_mask & _PAGE_NX) + runtime_code_page_mkexec(); +} diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c index 6284f15..0c2a234 100644 --- a/arch/x86/platform/efi/efi_64.c +++ b/arch/x86/platform/efi/efi_64.c @@ -233,3 +233,12 @@ void __init parse_efi_setup(u64 phys_addr, u32 data_len) { efi_setup = phys_addr + sizeof(struct setup_data); } + +void __init efi_runtime_mkexec(void) +{ + if (!efi_enabled(EFI_OLD_MEMMAP)) + return; + + if (__supported_pte_mask & _PAGE_NX) + runtime_code_page_mkexec(); +} diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index a4d7b64..201d09a 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c @@ -1473,6 +1473,18 @@ static void xen_pvh_set_cr_flags(int cpu) * X86_CR0_TS, X86_CR0_PE, X86_CR0_ET are set by Xen for HVM guests * (which PVH shared codepaths), while X86_CR0_PG is for PVH. */ write_cr0(read_cr0() | X86_CR0_MP | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM); + + if (!cpu) + return; + /* + * For BSP, PSE PGE are set in probe_page_size_mask(), for APs + * set them here. For all, OSFXSR OSXMMEXCPT are set in fpu_init. + */ + if (cpu_has_pse) + set_in_cr4(X86_CR4_PSE); + + if (cpu_has_pge) + set_in_cr4(X86_CR4_PGE); } /* diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index 2423ef0..256282e 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c @@ -365,7 +365,7 @@ void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr, /* Assume pteval_t is equivalent to all the other *val_t types. */ static pteval_t pte_mfn_to_pfn(pteval_t val) { - if (val & _PAGE_PRESENT) { + if (pteval_present(val)) { unsigned long mfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT; unsigned long pfn = mfn_to_pfn(mfn); @@ -381,7 +381,7 @@ static pteval_t pte_mfn_to_pfn(pteval_t val) static pteval_t pte_pfn_to_mfn(pteval_t val) { - if (val & _PAGE_PRESENT) { + if (pteval_present(val)) { unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT; pteval_t flags = val & PTE_FLAGS_MASK; unsigned long mfn; diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c index 8009acb..696c694 100644 --- a/arch/x86/xen/p2m.c +++ b/arch/x86/xen/p2m.c @@ -899,6 +899,13 @@ int m2p_add_override(unsigned long mfn, struct page *page, "m2p_add_override: pfn %lx not mapped", pfn)) return -EINVAL; } + WARN_ON(PagePrivate(page)); + SetPagePrivate(page); + set_page_private(page, mfn); + page->index = pfn_to_mfn(pfn); + + if (unlikely(!set_phys_to_machine(pfn, FOREIGN_FRAME(mfn)))) + return -ENOMEM; if (kmap_op != NULL) { if (!PageHighMem(page)) { @@ -937,16 +944,19 @@ int m2p_add_override(unsigned long mfn, struct page *page, } EXPORT_SYMBOL_GPL(m2p_add_override); int m2p_remove_override(struct page *page, - struct gnttab_map_grant_ref *kmap_op, - unsigned long mfn) + struct gnttab_map_grant_ref *kmap_op) { unsigned long flags; + unsigned long mfn; unsigned long pfn; unsigned long uninitialized_var(address); unsigned level; pte_t *ptep = NULL; pfn = page_to_pfn(page); + mfn = get_phys_to_machine(pfn); + if (mfn == INVALID_P2M_ENTRY || !(mfn & FOREIGN_FRAME_BIT)) + return -EINVAL; if (!PageHighMem(page)) { address = (unsigned long)__va(pfn << PAGE_SHIFT); @@ -960,7 +970,10 @@ int m2p_remove_override(struct page *page, spin_lock_irqsave(&m2p_override_lock, flags); list_del(&page->lru); spin_unlock_irqrestore(&m2p_override_lock, flags); + WARN_ON(!PagePrivate(page)); + ClearPagePrivate(page); + set_phys_to_machine(pfn, page->index); if (kmap_op != NULL) { if (!PageHighMem(page)) { struct multicall_space mcs; |